Skip to content

On Demand self profile data #1367

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 20 additions & 13 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

62 changes: 10 additions & 52 deletions database/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -448,16 +448,6 @@ pub struct QueryDatum {
pub invocation_count: u32,
}

#[async_trait::async_trait]
impl SeriesType for QueryDatum {
async fn get(
conn: &dyn pool::Connection,
series: u32,
artifact_row_id: ArtifactIdNumber,
) -> Option<Self> {
conn.get_self_profile_query(series, artifact_row_id).await
}
}
#[derive(Hash, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct LabelId(pub u8, pub u32);

Expand All @@ -481,8 +471,6 @@ pub struct Index {
/// Id lookup of stat description ids
/// For legacy reasons called `pstat_series` in the database, and so the name is kept here.
pstat_series: Indexed<(Benchmark, Profile, Scenario, Metric)>,
/// Id lookup of a given process query label
queries: Indexed<(Benchmark, Profile, Scenario, QueryLabel)>,
}

/// An index lookup
Expand Down Expand Up @@ -605,12 +593,6 @@ pub enum DbLabel {
scenario: Scenario,
metric: Metric,
},
SelfProfileQuery {
benchmark: Benchmark,
profile: Profile,
scenario: Scenario,
query: QueryLabel,
},
}

pub trait Lookup {
Expand All @@ -631,14 +613,6 @@ impl Lookup for DbLabel {
} => index
.pstat_series
.get(&(*benchmark, *profile, *scenario, *metric)),
DbLabel::SelfProfileQuery {
benchmark,
profile,
scenario,
query,
} => index
.queries
.get(&(*benchmark, *profile, *scenario, *query)),
}
}
}
Expand Down Expand Up @@ -717,32 +691,16 @@ impl Index {
self.pstat_series.map.keys()
}

// FIXME: in theory this won't scale indefinitely as there's potentially
// millions of queries and labels and iterating all of them is eventually
// going to be impractical. But for now it performs quite well, so we'll go
// for it as keeping indices around would be annoying.
pub fn all_query_series(
&self,
) -> impl Iterator<Item = &'_ (Benchmark, Profile, Scenario, QueryLabel)> + '_ {
self.queries.map.keys()
}

// FIXME: in theory this won't scale indefinitely as there's potentially
// millions of queries and labels and iterating all of them is eventually
// going to be impractical. But for now it performs quite well, so we'll go
// for it as keeping indices around would be annoying.
pub fn filtered_queries(
&self,
benchmark: Benchmark,
profile: Profile,
scenario: Scenario,
) -> impl Iterator<Item = QueryLabel> + '_ {
self.queries
.map
.keys()
.filter(move |&&(b, p, s, _)| b == benchmark && p == profile && s == scenario)
.map(|&(_, _, _, q)| q)
.filter(|q| !q.as_str().starts_with("codegen passes ["))
pub fn artifact_id_for_commit(&self, commit: &str) -> Option<ArtifactId> {
self.commits()
.into_iter()
.find(|c| c.sha == *commit)
.map(|c| ArtifactId::Commit(c))
.or_else(|| {
self.artifacts()
.find(|a| *a == commit)
.map(|a| ArtifactId::Tag(a.to_owned()))
})
}
}

Expand Down
16 changes: 2 additions & 14 deletions database/src/pool.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::{ArtifactId, ArtifactIdNumber, BenchmarkData};
use crate::{CollectionId, Index, Profile, QueryDatum, QueuedCommit, Scenario, Step};
use crate::{CollectionId, Index, Profile, QueuedCommit, Scenario, Step};
use chrono::{DateTime, Utc};
use hashbrown::HashMap;
use std::sync::{Arc, Mutex};
Expand Down Expand Up @@ -58,7 +58,7 @@ pub trait Connection: Send + Sync {
profile: Profile,
scenario: Scenario,
query: &str,
qd: QueryDatum,
qd: crate::QueryDatum,
);
async fn record_error(&self, artifact: ArtifactIdNumber, krate: &str, error: &str);
async fn record_rustc_crate(
Expand Down Expand Up @@ -92,18 +92,6 @@ pub trait Connection: Send + Sync {
pstat_series_row_ids: &[u32],
artifact_row_id: &[Option<ArtifactIdNumber>],
) -> Vec<Vec<Option<f64>>>;
async fn get_self_profile(
&self,
artifact_row_id: ArtifactIdNumber,
crate_: &str,
profile: &str,
cache: &str,
) -> HashMap<crate::QueryLabel, QueryDatum>;
async fn get_self_profile_query(
&self,
series: u32,
artifact_row_id: ArtifactIdNumber,
) -> Option<QueryDatum>;
async fn get_error(&self, artifact_row_id: ArtifactIdNumber) -> HashMap<String, String>;

async fn queue_pr(
Expand Down
108 changes: 0 additions & 108 deletions database/src/pool/postgres.rs
Original file line number Diff line number Diff line change
Expand Up @@ -276,8 +276,6 @@ pub struct CachedStatements {
get_rustc_compilation_by_crate: Statement,
insert_pstat: Statement,
insert_rustc: Statement,
get_self_profile_query: Statement,
get_self_profile: Statement,
insert_self_profile_query: Statement,
select_self_query_series: Statement,
insert_self_query_series: Statement,
Expand Down Expand Up @@ -393,27 +391,6 @@ impl PostgresConnection {
.prepare("insert into rustc_compilation (aid, cid, crate, duration) VALUES ($1, $2, $3, $4)")
.await
.unwrap(),
get_self_profile_query: conn
.prepare(
"select
self_time, blocked_time, incremental_load_time, number_of_cache_hits, invocation_count
from self_profile_query
where series = $1 and aid = $2 order by self_time asc;
",
)
.await
.unwrap(),
get_self_profile: conn.prepare("
select
query, self_time, blocked_time, incremental_load_time, number_of_cache_hits, invocation_count
from self_profile_query_series
join self_profile_query on self_profile_query_series.id = self_profile_query.series
where
crate = $1
and profile = $2
and cache = $3
and aid = $4
").await.unwrap(),
insert_self_profile_query: conn
.prepare(
"insert into self_profile_query(
Expand Down Expand Up @@ -577,33 +554,6 @@ where
)
})
.collect(),
queries: self
.conn()
.query(
"select id, crate, profile, cache, query from self_profile_query_series;",
&[],
)
.await
.unwrap()
.into_iter()
.map(|row| {
(
row.get::<_, i32>(0) as u32,
(
Benchmark::from(row.get::<_, String>(1).as_str()),
match row.get::<_, String>(2).as_str() {
"check" => Profile::Check,
"opt" => Profile::Opt,
"debug" => Profile::Debug,
"doc" => Profile::Doc,
o => unreachable!("{}: not a profile", o),
},
row.get::<_, String>(3).as_str().parse().unwrap(),
row.get::<_, String>(4).as_str().into(),
),
)
})
.collect(),
}
}
async fn get_benchmarks(&self) -> Vec<BenchmarkData> {
Expand Down Expand Up @@ -645,64 +595,6 @@ where
.map(|row| row.get::<_, Vec<Option<f64>>>(0))
.collect()
}
async fn get_self_profile_query(
&self,
pstat_series_row_id: u32,
artifact_row_id: crate::ArtifactIdNumber,
) -> Option<crate::QueryDatum> {
let row = self
.conn()
.query_opt(
&self.statements().get_self_profile_query,
&[&(pstat_series_row_id as i32), &(artifact_row_id.0 as i32)],
)
.await
.unwrap()?;
let self_time: i64 = row.get(0);
let blocked_time: i64 = row.get(1);
let incremental_load_time: i64 = row.get(2);
Some(crate::QueryDatum {
self_time: Duration::from_nanos(self_time as u64),
blocked_time: Duration::from_nanos(blocked_time as u64),
incremental_load_time: Duration::from_nanos(incremental_load_time as u64),
number_of_cache_hits: row.get::<_, i32>(3) as u32,
invocation_count: row.get::<_, i32>(4) as u32,
})
}
async fn get_self_profile(
&self,
artifact_row_id: ArtifactIdNumber,
crate_: &str,
profile: &str,
scenario: &str,
) -> HashMap<crate::QueryLabel, crate::QueryDatum> {
let rows = self
.conn()
.query(
&self.statements().get_self_profile,
&[&crate_, &profile, &scenario, &(artifact_row_id.0 as i32)],
)
.await
.unwrap();

rows.into_iter()
.map(|r| {
let self_time: i64 = r.get(1);
let blocked_time: i64 = r.get(2);
let incremental_load_time: i64 = r.get(3);
(
r.get::<_, &str>(0).into(),
crate::QueryDatum {
self_time: Duration::from_nanos(self_time as u64),
blocked_time: Duration::from_nanos(blocked_time as u64),
incremental_load_time: Duration::from_nanos(incremental_load_time as u64),
number_of_cache_hits: r.get::<_, i32>(4) as u32,
invocation_count: r.get::<_, i32>(5) as u32,
},
)
})
.collect()
}
async fn get_error(&self, artifact_row_id: crate::ArtifactIdNumber) -> HashMap<String, String> {
let rows = self
.conn()
Expand Down
Loading