Skip to content

Commit b374a11

Browse files
committed
move Selector to database crate
1 parent 64003a0 commit b374a11

File tree

6 files changed

+347
-343
lines changed

6 files changed

+347
-343
lines changed

database/src/selector.rs

Lines changed: 335 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,38 @@
1-
use std::fmt;
1+
//! Selector API for returning subset of series which will be rendered in some
2+
//! format.
3+
//!
4+
//! We have the following expected paths:
5+
//!
6+
//! * :benchmark/:profile/:scenario/:metric => [cid => u64]
7+
//! * :crate/:profile/:scenario/:self_profile_query/:stat (SelfProfileTime, SelfProfileCacheHits, ...)
8+
//! :stat = time => Duration,
9+
//! :stat = cache hits => u32,
10+
//! :stat = invocation count => u32,
11+
//! :stat = blocked time => Duration,
12+
//! :stat = incremental load time => Duration,
13+
//!
14+
//! Note that the returned series always have a "simple" type of a small set --
15+
//! things like arrays, integers. We aggregate into higher level types above the
16+
//! primitive series readers.
17+
//!
18+
//! We specify a single struct per path style above.
19+
//!
20+
//! `Option<T>` in the path either specifies a specific T to filter by, or
21+
//! requests that all are provided. Note that this is a cartesian product if
22+
//! there are multiple `None`s.
223
3-
use crate::{ArtifactId, ArtifactIdIter};
24+
use std::{
25+
fmt::{self, Debug},
26+
hash::Hash,
27+
sync::Arc,
28+
};
29+
30+
use async_trait::async_trait;
31+
32+
use crate::{
33+
comparison::Metric, interpolate::Interpolate, ArtifactId, ArtifactIdIter, Benchmark,
34+
CodegenBackend, Connection, Index, Lookup, Profile, Scenario,
35+
};
436

537
#[derive(Debug)]
638
pub struct StatisticSeries {
@@ -75,3 +107,304 @@ impl<T: Clone + PartialEq + fmt::Debug> Point for (T, f64) {
75107
// no-op
76108
}
77109
}
110+
111+
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
112+
pub enum Selector<T> {
113+
All,
114+
Subset(Vec<T>),
115+
One(T),
116+
}
117+
118+
impl<T> Selector<T> {
119+
fn map<U>(self, mut f: impl FnMut(T) -> U) -> Selector<U> {
120+
match self {
121+
Selector::All => Selector::All,
122+
Selector::Subset(subset) => Selector::Subset(subset.into_iter().map(f).collect()),
123+
Selector::One(o) => Selector::One(f(o)),
124+
}
125+
}
126+
pub fn try_map<U, E>(self, mut f: impl FnMut(T) -> Result<U, E>) -> Result<Selector<U>, E> {
127+
Ok(match self {
128+
Selector::All => Selector::All,
129+
Selector::Subset(subset) => {
130+
Selector::Subset(subset.into_iter().map(f).collect::<Result<_, _>>()?)
131+
}
132+
Selector::One(o) => Selector::One(f(o)?),
133+
})
134+
}
135+
136+
fn matches<U>(&self, other: U) -> bool
137+
where
138+
U: PartialEq<T>,
139+
{
140+
match self {
141+
Selector::One(c) => other == *c,
142+
Selector::Subset(subset) => subset.iter().any(|c| other == *c),
143+
Selector::All => true,
144+
}
145+
}
146+
}
147+
148+
/// Represents the parameters of a single benchmark execution that collects a set of statistics.
149+
pub trait TestCase: Debug + Clone + Hash + PartialEq + Eq + PartialOrd + Ord {}
150+
151+
#[derive(Debug)]
152+
pub struct SeriesResponse<Case, T> {
153+
pub test_case: Case,
154+
pub series: T,
155+
}
156+
157+
impl<TestCase, T> SeriesResponse<TestCase, T> {
158+
pub fn map<U>(self, m: impl FnOnce(T) -> U) -> SeriesResponse<TestCase, U> {
159+
let SeriesResponse {
160+
test_case: key,
161+
series,
162+
} = self;
163+
SeriesResponse {
164+
test_case: key,
165+
series: m(series),
166+
}
167+
}
168+
169+
pub fn interpolate(self) -> SeriesResponse<TestCase, Interpolate<T>>
170+
where
171+
T: Iterator,
172+
T::Item: Point,
173+
{
174+
self.map(|s| Interpolate::new(s))
175+
}
176+
}
177+
178+
#[async_trait]
179+
pub trait BenchmarkQuery: Debug + Clone {
180+
type TestCase: TestCase;
181+
182+
async fn execute(
183+
&self,
184+
connection: &mut dyn Connection,
185+
index: &Index,
186+
artifact_ids: Arc<Vec<ArtifactId>>,
187+
) -> Result<Vec<SeriesResponse<Self::TestCase, StatisticSeries>>, String>;
188+
}
189+
190+
// Compile benchmarks querying
191+
#[derive(Clone, Hash, Eq, PartialEq, Debug)]
192+
pub struct CompileBenchmarkQuery {
193+
benchmark: Selector<String>,
194+
scenario: Selector<Scenario>,
195+
profile: Selector<Profile>,
196+
backend: Selector<CodegenBackend>,
197+
metric: Selector<crate::Metric>,
198+
}
199+
200+
impl CompileBenchmarkQuery {
201+
pub fn benchmark(mut self, selector: Selector<String>) -> Self {
202+
self.benchmark = selector;
203+
self
204+
}
205+
206+
pub fn profile(mut self, selector: Selector<Profile>) -> Self {
207+
self.profile = selector;
208+
self
209+
}
210+
211+
pub fn scenario(mut self, selector: Selector<Scenario>) -> Self {
212+
self.scenario = selector;
213+
self
214+
}
215+
216+
pub fn metric(mut self, selector: Selector<Metric>) -> Self {
217+
self.metric = selector.map(|v| v.as_str().into());
218+
self
219+
}
220+
221+
pub fn all_for_metric(metric: Metric) -> Self {
222+
Self {
223+
benchmark: Selector::All,
224+
profile: Selector::All,
225+
scenario: Selector::All,
226+
backend: Selector::All,
227+
metric: Selector::One(metric.as_str().into()),
228+
}
229+
}
230+
}
231+
232+
impl Default for CompileBenchmarkQuery {
233+
fn default() -> Self {
234+
Self {
235+
benchmark: Selector::All,
236+
scenario: Selector::All,
237+
profile: Selector::All,
238+
backend: Selector::All,
239+
metric: Selector::All,
240+
}
241+
}
242+
}
243+
244+
#[async_trait]
245+
impl BenchmarkQuery for CompileBenchmarkQuery {
246+
type TestCase = CompileTestCase;
247+
248+
async fn execute(
249+
&self,
250+
conn: &mut dyn Connection,
251+
index: &Index,
252+
artifact_ids: Arc<Vec<ArtifactId>>,
253+
) -> Result<Vec<SeriesResponse<Self::TestCase, StatisticSeries>>, String> {
254+
let mut statistic_descriptions: Vec<_> = index
255+
.compile_statistic_descriptions()
256+
.filter(|(&(b, p, s, backend, m), _)| {
257+
self.benchmark.matches(b)
258+
&& self.profile.matches(p)
259+
&& self.scenario.matches(s)
260+
&& self.backend.matches(backend)
261+
&& self.metric.matches(m)
262+
})
263+
.map(|(&(benchmark, profile, scenario, backend, metric), sid)| {
264+
(
265+
CompileTestCase {
266+
benchmark,
267+
profile,
268+
scenario,
269+
backend,
270+
},
271+
metric,
272+
sid,
273+
)
274+
})
275+
.collect();
276+
277+
statistic_descriptions.sort_unstable();
278+
279+
let sids: Vec<_> = statistic_descriptions
280+
.iter()
281+
.map(|(_, _, sid)| *sid)
282+
.collect();
283+
284+
let aids = artifact_ids
285+
.iter()
286+
.map(|aid| aid.lookup(index))
287+
.collect::<Vec<_>>();
288+
289+
Ok(conn
290+
.get_pstats(&sids, &aids)
291+
.await
292+
.into_iter()
293+
.zip(statistic_descriptions)
294+
.filter(|(points, _)| points.iter().any(|value| value.is_some()))
295+
.map(|(points, (test_case, metric, _))| {
296+
SeriesResponse {
297+
series: StatisticSeries {
298+
artifact_ids: ArtifactIdIter::new(artifact_ids.clone()),
299+
points: if *metric == *"cpu-clock" || *metric == *"task-clock" {
300+
// Convert to seconds -- perf reports these measurements in
301+
// milliseconds
302+
points
303+
.into_iter()
304+
.map(|p| p.map(|v| v / 1000.0))
305+
.collect::<Vec<_>>()
306+
.into_iter()
307+
} else {
308+
points.into_iter()
309+
},
310+
},
311+
test_case,
312+
}
313+
})
314+
.collect::<Vec<_>>())
315+
}
316+
}
317+
318+
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
319+
pub struct CompileTestCase {
320+
pub benchmark: Benchmark,
321+
pub profile: Profile,
322+
pub scenario: Scenario,
323+
pub backend: CodegenBackend,
324+
}
325+
326+
impl TestCase for CompileTestCase {}
327+
328+
// Runtime benchmarks querying
329+
#[derive(Clone, Hash, Eq, PartialEq, Debug)]
330+
pub struct RuntimeBenchmarkQuery {
331+
benchmark: Selector<String>,
332+
metric: Selector<crate::Metric>,
333+
}
334+
335+
impl RuntimeBenchmarkQuery {
336+
pub fn benchmark(mut self, selector: Selector<String>) -> Self {
337+
self.benchmark = selector;
338+
self
339+
}
340+
341+
pub fn metric(mut self, selector: Selector<Metric>) -> Self {
342+
self.metric = selector.map(|v| v.as_str().into());
343+
self
344+
}
345+
346+
pub fn all_for_metric(metric: Metric) -> Self {
347+
Self {
348+
benchmark: Selector::All,
349+
metric: Selector::One(metric.as_str().into()),
350+
}
351+
}
352+
}
353+
354+
impl Default for RuntimeBenchmarkQuery {
355+
fn default() -> Self {
356+
Self {
357+
benchmark: Selector::All,
358+
metric: Selector::All,
359+
}
360+
}
361+
}
362+
363+
#[async_trait]
364+
impl BenchmarkQuery for RuntimeBenchmarkQuery {
365+
type TestCase = RuntimeTestCase;
366+
367+
async fn execute(
368+
&self,
369+
conn: &mut dyn Connection,
370+
index: &Index,
371+
artifact_ids: Arc<Vec<ArtifactId>>,
372+
) -> Result<Vec<SeriesResponse<Self::TestCase, StatisticSeries>>, String> {
373+
let mut statistic_descriptions: Vec<_> = index
374+
.runtime_statistic_descriptions()
375+
.filter(|(&(b, m), _)| self.benchmark.matches(b) && self.metric.matches(m))
376+
.map(|(&(benchmark, _), sid)| (RuntimeTestCase { benchmark }, sid))
377+
.collect();
378+
379+
statistic_descriptions.sort_unstable();
380+
381+
let sids: Vec<_> = statistic_descriptions.iter().map(|(_, sid)| *sid).collect();
382+
383+
let aids = artifact_ids
384+
.iter()
385+
.map(|aid| aid.lookup(index))
386+
.collect::<Vec<_>>();
387+
388+
Ok(conn
389+
.get_runtime_pstats(&sids, &aids)
390+
.await
391+
.into_iter()
392+
.zip(statistic_descriptions)
393+
.filter(|(points, _)| points.iter().any(|value| value.is_some()))
394+
.map(|(points, (test_case, _))| SeriesResponse {
395+
series: StatisticSeries {
396+
artifact_ids: ArtifactIdIter::new(artifact_ids.clone()),
397+
points: points.into_iter(),
398+
},
399+
test_case,
400+
})
401+
.collect::<Vec<_>>())
402+
}
403+
}
404+
405+
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
406+
pub struct RuntimeTestCase {
407+
pub benchmark: Benchmark,
408+
}
409+
410+
impl TestCase for RuntimeTestCase {}

site/src/comparison.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@ use crate::api;
66
use crate::db::{ArtifactId, Benchmark, Lookup, Profile, Scenario};
77
use crate::github;
88
use crate::load::SiteCtxt;
9-
use crate::selector::{
10-
self, BenchmarkQuery, CompileBenchmarkQuery, RuntimeBenchmarkQuery, TestCase,
11-
};
129

1310
use collector::compile::benchmark::category::Category;
1411
use collector::Bound;
15-
use database::comparison::Metric;
12+
use database::{
13+
comparison::Metric,
14+
selector::{self, BenchmarkQuery, CompileBenchmarkQuery, RuntimeBenchmarkQuery, TestCase},
15+
};
1616
use serde::Serialize;
1717

1818
use crate::api::comparison::CompileBenchmarkMetadata;

site/src/request_handlers/dashboard.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
use std::sync::Arc;
22

3+
use database::selector;
34
use lazy_static::lazy_static;
45

56
use crate::api::{dashboard, ServerResult};
67
use crate::benchmark_metadata::get_stable_benchmark_names;
78
use crate::db::{self, comparison::Metric, ArtifactId, Profile, Scenario};
89
use crate::load::SiteCtxt;
9-
use crate::selector;
1010

1111
pub async fn handle_dashboard(ctxt: Arc<SiteCtxt>) -> ServerResult<dashboard::Response> {
1212
let index = ctxt.index.load();

site/src/request_handlers/graph.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,12 @@ use crate::api::graphs::GraphKind;
88
use crate::api::{detail_graphs, detail_sections, graphs, runtime_detail_graphs, ServerResult};
99
use crate::db::{self, ArtifactId, Profile, Scenario};
1010
use crate::load::SiteCtxt;
11-
use crate::selector::{
12-
CompileBenchmarkQuery, CompileTestCase, RuntimeBenchmarkQuery, Selector, SeriesResponse,
13-
};
1411
use crate::self_profile::get_or_download_self_profile;
1512

1613
use database::interpolate::IsInterpolated;
14+
use database::selector::{
15+
CompileBenchmarkQuery, CompileTestCase, RuntimeBenchmarkQuery, Selector, SeriesResponse,
16+
};
1717

1818
/// Returns data for before/after graphs when comparing a single test result comparison
1919
/// for a compile-time benchmark.

0 commit comments

Comments
 (0)