Skip to content

Speed up self-profile-query view #723

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions database/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -671,6 +671,7 @@ impl Index {
.keys()
.filter(move |path| path.0 == krate && path.1 == profile && path.2 == cache)
.map(|path| path.3)
.filter(|q| !q.as_str().starts_with("codegen passes ["))
}
}

Expand Down
7 changes: 7 additions & 0 deletions database/src/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,13 @@ pub trait Connection: Send + Sync {
series: &[u32],
cid: &[Option<ArtifactIdNumber>],
) -> Vec<Vec<Option<f64>>>;
async fn get_self_profile(
&self,
cid: ArtifactIdNumber,
crate_: &str,
profile: &str,
cache: &str,
) -> HashMap<crate::QueryLabel, QueryDatum>;
async fn get_self_profile_query(
&self,
series: u32,
Expand Down
46 changes: 46 additions & 0 deletions database/src/pool/postgres.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ pub struct CachedStatements {
get_pstat: Statement,
insert_pstat: Statement,
get_self_profile_query: Statement,
get_self_profile: Statement,
insert_self_profile_query: Statement,
select_self_query_series: Statement,
insert_self_query_series: Statement,
Expand Down Expand Up @@ -333,6 +334,17 @@ impl PostgresConnection {
)
.await
.unwrap(),
get_self_profile: conn.prepare("
select
query, self_time, blocked_time, incremental_load_time, number_of_cache_hits, invocation_count
from self_profile_query_series
join self_profile_query on self_profile_query_series.id = self_profile_query.series
where
crate = $1
and profile = $2
and cache = $3
and aid = $4
").await.unwrap(),
insert_self_profile_query: conn
.prepare(
"insert into self_profile_query(
Expand Down Expand Up @@ -563,6 +575,40 @@ where
invocation_count: row.get::<_, i32>(4) as u32,
})
}
async fn get_self_profile(
&self,
cid: ArtifactIdNumber,
crate_: &str,
profile: &str,
cache: &str,
) -> HashMap<crate::QueryLabel, crate::QueryDatum> {
let rows = self
.conn()
.query(
&self.statements().get_self_profile,
&[&crate_, &profile, &cache, &(cid.0 as i16)],
)
.await
.unwrap();

rows.into_iter()
.map(|r| {
let self_time: i64 = r.get(1);
let blocked_time: i64 = r.get(2);
let incremental_load_time: i64 = r.get(3);
(
r.get::<_, &str>(0).into(),
crate::QueryDatum {
self_time: Duration::from_nanos(self_time as u64),
blocked_time: Duration::from_nanos(blocked_time as u64),
incremental_load_time: Duration::from_nanos(incremental_load_time as u64),
number_of_cache_hits: r.get::<_, i32>(4) as u32,
invocation_count: r.get::<_, i32>(5) as u32,
},
)
})
.collect()
}
async fn get_error(&self, cid: crate::ArtifactIdNumber) -> HashMap<String, Option<String>> {
let rows = self
.conn()
Expand Down
39 changes: 39 additions & 0 deletions database/src/pool/sqlite.rs
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,45 @@ impl Connection for SqliteConnection {
.optional()
.unwrap()
}
async fn get_self_profile(
&self,
cid: ArtifactIdNumber,
crate_: &str,
profile: &str,
cache: &str,
) -> HashMap<crate::QueryLabel, crate::QueryDatum> {
self.raw_ref()
.prepare_cached("
select
query, self_time, blocked_time, incremental_load_time, number_of_cache_hits, invocation_count
from self_profile_query_series
join self_profile_query on self_profile_query_series.id = self_profile_query.series
where
crate = ?
and profile = ?
and cache = ?
and aid = ?
")
.unwrap()
.query_map(params![&crate_, &profile, &cache, &cid.0], |r| {
let self_time: i64 = r.get(1)?;
let blocked_time: i64 = r.get(2)?;
let incremental_load_time: i64 = r.get(3)?;
Ok((
r.get::<_, String>(0)?.as_str().into(),
crate::QueryDatum {
self_time: Duration::from_nanos(self_time as u64),
blocked_time: Duration::from_nanos(blocked_time as u64),
incremental_load_time: Duration::from_nanos(incremental_load_time as u64),
number_of_cache_hits: r.get::<_, i32>(4)? as u32,
invocation_count: r.get::<_, i32>(5)? as u32,
},
))
})
.unwrap()
.collect::<Result<_, _>>()
.unwrap()
}
async fn get_error(&self, cid: crate::ArtifactIdNumber) -> HashMap<String, Option<String>> {
self.raw_ref()
.prepare_cached(
Expand Down
49 changes: 25 additions & 24 deletions site/src/selector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -665,30 +665,31 @@ impl SelfProfile {
.collect::<Vec<_>>();
for cid in cids.iter() {
let mut queries = Vec::new();
for label in labels.iter() {
let query = crate::db::DbLabel::SelfProfileQuery {
krate,
profile,
cache,
query: *label,
};
if let Some(qd) = idx
.get::<crate::db::QueryDatum>(tx.conn(), &query, cid)
.await
{
queries.push(QueryData {
label: *label,
self_time: qd.self_time.as_nanos().try_into().unwrap(),
number_of_cache_hits: qd.number_of_cache_hits,
invocation_count: qd.invocation_count,
blocked_time: qd.blocked_time.as_nanos().try_into().unwrap(),
incremental_load_time: qd
.incremental_load_time
.as_nanos()
.try_into()
.unwrap(),
});
}
log::trace!("Fetching {} self-profile-query series", labels.len());
let conn = tx.conn();
let cid_id = if let Some(c) = cid.lookup(&idx) {
c
} else {
res.push(None);
continue;
};
let cid_data = conn
.get_self_profile(
cid_id,
krate.as_str(),
&profile.to_string(),
&cache.to_string(),
)
.await;
for (label, qd) in cid_data {
queries.push(QueryData {
label,
self_time: qd.self_time.as_nanos().try_into().unwrap(),
number_of_cache_hits: qd.number_of_cache_hits,
invocation_count: qd.invocation_count,
blocked_time: qd.blocked_time.as_nanos().try_into().unwrap(),
incremental_load_time: qd.incremental_load_time.as_nanos().try_into().unwrap(),
});
}
if queries.is_empty() {
res.push(None);
Expand Down