Skip to content

Catch panics in collector #1800

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jan 15, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
193 changes: 104 additions & 89 deletions collector/src/bin/collector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -645,15 +645,6 @@ fn main_result() -> anyhow::Result<i32> {
runtime: &runtime_benchmark_dir,
};

let mut builder = tokio::runtime::Builder::new_multi_thread();
// We want to minimize noise from the runtime
builder
.worker_threads(1)
.max_blocking_threads(1)
.enable_time()
.enable_io();
let mut rt = builder.build().expect("built runtime");

// XXX: This doesn't necessarily work for all archs
let target_triple = format!("{}-unknown-linux-gnu", std::env::consts::ARCH);

Expand Down Expand Up @@ -765,6 +756,7 @@ fn main_result() -> anyhow::Result<i32> {
CargoIsolationMode::Isolated
};

let mut rt = build_async_runtime();
let mut conn = rt.block_on(pool.connection());
let artifact_id = ArtifactId::Tag(toolchain.id.clone());
rt.block_on(purge_old_data(conn.as_mut(), &artifact_id, purge.purge));
Expand Down Expand Up @@ -916,6 +908,7 @@ fn main_result() -> anyhow::Result<i32> {
benchmarks.retain(|b| local.category.0.contains(&b.category()));

let artifact_id = ArtifactId::Tag(toolchain.id.clone());
let mut rt = build_async_runtime();
let mut conn = rt.block_on(pool.connection());
rt.block_on(purge_old_data(conn.as_mut(), &artifact_id, purge.purge));

Expand Down Expand Up @@ -964,96 +957,106 @@ fn main_result() -> anyhow::Result<i32> {
return Ok(0);
};

let pool = database::Pool::open(&db.db);

match next {
NextArtifact::Release(tag) => {
let toolchain = create_toolchain_from_published_version(&tag, &target_triple)?;
let res = bench_published_artifact(
rt.block_on(pool.connection()),
&mut rt,
toolchain,
&benchmark_dirs,
);

client.post(format!("{}/perf/onpush", site_url)).send()?;

res?;
let res = std::panic::catch_unwind(|| {
let pool = database::Pool::open(&db.db);
let mut rt = build_async_runtime();

match next {
NextArtifact::Release(tag) => {
let toolchain =
create_toolchain_from_published_version(&tag, &target_triple)?;
bench_published_artifact(
rt.block_on(pool.connection()),
&mut rt,
toolchain,
&benchmark_dirs,
)
}
NextArtifact::Commit {
commit,
include,
exclude,
runs,
} => {
let sha = commit.sha.to_string();
let sysroot = Sysroot::install(
sha.clone(),
&target_triple,
vec![CodegenBackend::Llvm],
)
.with_context(|| format!("failed to install sysroot for {:?}", commit))?;

let mut benchmarks = get_compile_benchmarks(
&compile_benchmark_dir,
include.as_deref(),
exclude.as_deref(),
None,
)?;
benchmarks.retain(|b| b.category().is_primary_or_secondary());

let artifact_id = ArtifactId::Commit(commit);
let mut conn = rt.block_on(pool.connection());
let toolchain = Toolchain::from_sysroot(&sysroot, sha);

let compile_config = CompileBenchmarkConfig {
benchmarks,
profiles: vec![
Profile::Check,
Profile::Debug,
Profile::Doc,
Profile::Opt,
],
scenarios: Scenario::all(),
backends: vec![CodegenBackend::Llvm],
iterations: runs.map(|v| v as usize),
is_self_profile: self_profile.self_profile,
bench_rustc: bench_rustc.bench_rustc,
};
let runtime_suite = rt.block_on(load_runtime_benchmarks(
conn.as_mut(),
&runtime_benchmark_dir,
CargoIsolationMode::Isolated,
None,
&toolchain,
&artifact_id,
))?;

let runtime_config = RuntimeBenchmarkConfig {
runtime_suite,
filter: BenchmarkFilter::keep_all(),
iterations: DEFAULT_RUNTIME_ITERATIONS,
};
let shared = SharedBenchmarkConfig {
artifact_id,
toolchain,
};

run_benchmarks(
&mut rt,
conn,
shared,
Some(compile_config),
Some(runtime_config),
)
}
}
NextArtifact::Commit {
commit,
include,
exclude,
runs,
} => {
let sha = commit.sha.to_string();
let sysroot = Sysroot::install(
sha.clone(),
&target_triple,
vec![CodegenBackend::Llvm],
)
.with_context(|| format!("failed to install sysroot for {:?}", commit))?;

let mut benchmarks = get_compile_benchmarks(
&compile_benchmark_dir,
include.as_deref(),
exclude.as_deref(),
None,
)?;
benchmarks.retain(|b| b.category().is_primary_or_secondary());

let artifact_id = ArtifactId::Commit(commit);
let mut conn = rt.block_on(pool.connection());
let toolchain = Toolchain::from_sysroot(&sysroot, sha);

let compile_config = CompileBenchmarkConfig {
benchmarks,
profiles: vec![Profile::Check, Profile::Debug, Profile::Doc, Profile::Opt],
scenarios: Scenario::all(),
backends: vec![CodegenBackend::Llvm],
iterations: runs.map(|v| v as usize),
is_self_profile: self_profile.self_profile,
bench_rustc: bench_rustc.bench_rustc,
};
let runtime_suite = rt.block_on(load_runtime_benchmarks(
conn.as_mut(),
&runtime_benchmark_dir,
CargoIsolationMode::Isolated,
None,
&toolchain,
&artifact_id,
))?;

let runtime_config = RuntimeBenchmarkConfig {
runtime_suite,
filter: BenchmarkFilter::keep_all(),
iterations: DEFAULT_RUNTIME_ITERATIONS,
};
let shared = SharedBenchmarkConfig {
artifact_id,
toolchain,
};

let res = run_benchmarks(
&mut rt,
conn,
shared,
Some(compile_config),
Some(runtime_config),
);

client.post(format!("{}/perf/onpush", site_url)).send()?;
});
// We need to send a message to this endpoint even if the collector panics
client.post(format!("{}/perf/onpush", site_url)).send()?;

res?;
match res {
Ok(res) => res?,
Err(error) => {
log::error!("The collector has crashed\n{error:?}");
}
}

Ok(0)
}

Commands::BenchPublished { toolchain, db } => {
log_db(&db);
let pool = database::Pool::open(&db.db);
let mut rt = build_async_runtime();
let conn = rt.block_on(pool.connection());
let toolchain = create_toolchain_from_published_version(&toolchain, &target_triple)?;
bench_published_artifact(conn, &mut rt, toolchain, &benchmark_dirs)?;
Expand Down Expand Up @@ -1211,6 +1214,7 @@ Make sure to modify `{dir}/perf-config.json` if the category/artifact don't matc
}
Commands::PurgeArtifact { name, db } => {
let pool = Pool::open(&db.db);
let rt = build_async_runtime();
let conn = rt.block_on(pool.connection());
rt.block_on(conn.purge_artifact(&ArtifactId::Tag(name.clone())));

Expand All @@ -1220,6 +1224,17 @@ Make sure to modify `{dir}/perf-config.json` if the category/artifact don't matc
}
}

fn build_async_runtime() -> Runtime {
let mut builder = tokio::runtime::Builder::new_multi_thread();
// We want to minimize noise from the runtime
builder
.worker_threads(1)
.max_blocking_threads(1)
.enable_time()
.enable_io();
builder.build().expect("built runtime")
}

fn print_binary_stats(
name_header: &str,
items: HashMap<String, u64>,
Expand Down