Skip to content

Commit 81df0a7

Browse files
authored
tests/unhealthy_database: Refactor wait_until_healthy() to call pool.get() multiple times until timeout (#8449)
1 parent 1af5ce3 commit 81df0a7

File tree

1 file changed

+30
-16
lines changed

1 file changed

+30
-16
lines changed

src/tests/unhealthy_database.rs

Lines changed: 30 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,30 @@
11
use crate::util::{RequestHelper, TestApp};
22
use deadpool_diesel::postgres::Pool;
3-
use deadpool_diesel::Timeouts;
43
use http::StatusCode;
5-
use std::time::Duration;
4+
use std::time::{Duration, Instant};
5+
use tracing::info;
66

77
const DB_HEALTHY_TIMEOUT: Duration = Duration::from_millis(2000);
88

9-
fn default_timeouts() -> Timeouts {
10-
Timeouts::wait_millis(DB_HEALTHY_TIMEOUT.as_millis() as u64)
11-
}
12-
13-
fn wait_until_healthy(pool: &Pool, app: &TestApp) {
14-
let _ = app
15-
.runtime()
16-
.block_on(pool.timeout_get(&default_timeouts()))
17-
.expect("the database did not return healthy");
9+
async fn wait_until_healthy(pool: &Pool) {
10+
info!("Waiting for the database to become healthy…");
11+
12+
let start_time = Instant::now();
13+
loop {
14+
let result = pool.get().await;
15+
if result.is_ok() {
16+
info!("Database is healthy now");
17+
return;
18+
}
19+
20+
if start_time.elapsed() < DB_HEALTHY_TIMEOUT {
21+
info!("Database is not healthy yet, retrying…");
22+
tokio::time::sleep(Duration::from_millis(100)).await;
23+
} else {
24+
info!("Database did not become healthy within the timeout");
25+
let _ = result.expect("the database did not return healthy");
26+
}
27+
}
1828
}
1929

2030
#[test]
@@ -30,7 +40,8 @@ fn http_error_with_unhealthy_database() {
3040
assert_eq!(response.status(), StatusCode::SERVICE_UNAVAILABLE);
3141

3242
app.primary_db_chaosproxy().restore_networking().unwrap();
33-
wait_until_healthy(&app.as_inner().deadpool_primary, &app);
43+
app.runtime()
44+
.block_on(wait_until_healthy(&app.as_inner().deadpool_primary));
3445

3546
let response = anon.get::<()>("/api/v1/summary");
3647
assert_eq!(response.status(), StatusCode::OK);
@@ -53,7 +64,8 @@ fn fallback_to_replica_returns_user_info() {
5364

5465
// restore primary database connection
5566
app.primary_db_chaosproxy().restore_networking().unwrap();
56-
wait_until_healthy(&app.as_inner().deadpool_primary, &app);
67+
app.runtime()
68+
.block_on(wait_until_healthy(&app.as_inner().deadpool_primary));
5769
}
5870

5971
#[test]
@@ -79,14 +91,15 @@ fn restored_replica_returns_user_info() {
7991
.deadpool_replica
8092
.as_ref()
8193
.expect("no replica database configured");
82-
wait_until_healthy(replica, &app);
94+
app.runtime().block_on(wait_until_healthy(replica));
8395

8496
let response = owner.get::<()>(URL);
8597
assert_eq!(response.status(), StatusCode::OK);
8698

8799
// restore connection
88100
app.primary_db_chaosproxy().restore_networking().unwrap();
89-
wait_until_healthy(&app.as_inner().deadpool_primary, &app);
101+
app.runtime()
102+
.block_on(wait_until_healthy(&app.as_inner().deadpool_primary));
90103
}
91104

92105
#[test]
@@ -107,7 +120,8 @@ fn restored_primary_returns_user_info() {
107120

108121
// Once the replica database is restored, it should serve as a fallback again
109122
app.primary_db_chaosproxy().restore_networking().unwrap();
110-
wait_until_healthy(&app.as_inner().deadpool_primary, &app);
123+
app.runtime()
124+
.block_on(wait_until_healthy(&app.as_inner().deadpool_primary));
111125

112126
let response = owner.get::<()>(URL);
113127
assert_eq!(response.status(), StatusCode::OK);

0 commit comments

Comments
 (0)