1
1
use crate :: util:: { RequestHelper , TestApp } ;
2
2
use deadpool_diesel:: postgres:: Pool ;
3
- use deadpool_diesel:: Timeouts ;
4
3
use http:: StatusCode ;
5
- use std:: time:: Duration ;
4
+ use std:: time:: { Duration , Instant } ;
5
+ use tracing:: info;
6
6
7
7
const DB_HEALTHY_TIMEOUT : Duration = Duration :: from_millis ( 2000 ) ;
8
8
9
- fn default_timeouts ( ) -> Timeouts {
10
- Timeouts :: wait_millis ( DB_HEALTHY_TIMEOUT . as_millis ( ) as u64 )
11
- }
12
-
13
- fn wait_until_healthy ( pool : & Pool , app : & TestApp ) {
14
- let _ = app
15
- . runtime ( )
16
- . block_on ( pool. timeout_get ( & default_timeouts ( ) ) )
17
- . expect ( "the database did not return healthy" ) ;
9
+ async fn wait_until_healthy ( pool : & Pool ) {
10
+ info ! ( "Waiting for the database to become healthy…" ) ;
11
+
12
+ let start_time = Instant :: now ( ) ;
13
+ loop {
14
+ let result = pool. get ( ) . await ;
15
+ if result. is_ok ( ) {
16
+ info ! ( "Database is healthy now" ) ;
17
+ return ;
18
+ }
19
+
20
+ if start_time. elapsed ( ) < DB_HEALTHY_TIMEOUT {
21
+ info ! ( "Database is not healthy yet, retrying…" ) ;
22
+ tokio:: time:: sleep ( Duration :: from_millis ( 100 ) ) . await ;
23
+ } else {
24
+ info ! ( "Database did not become healthy within the timeout" ) ;
25
+ let _ = result. expect ( "the database did not return healthy" ) ;
26
+ }
27
+ }
18
28
}
19
29
20
30
#[ test]
@@ -30,7 +40,8 @@ fn http_error_with_unhealthy_database() {
30
40
assert_eq ! ( response. status( ) , StatusCode :: SERVICE_UNAVAILABLE ) ;
31
41
32
42
app. primary_db_chaosproxy ( ) . restore_networking ( ) . unwrap ( ) ;
33
- wait_until_healthy ( & app. as_inner ( ) . deadpool_primary , & app) ;
43
+ app. runtime ( )
44
+ . block_on ( wait_until_healthy ( & app. as_inner ( ) . deadpool_primary ) ) ;
34
45
35
46
let response = anon. get :: < ( ) > ( "/api/v1/summary" ) ;
36
47
assert_eq ! ( response. status( ) , StatusCode :: OK ) ;
@@ -53,7 +64,8 @@ fn fallback_to_replica_returns_user_info() {
53
64
54
65
// restore primary database connection
55
66
app. primary_db_chaosproxy ( ) . restore_networking ( ) . unwrap ( ) ;
56
- wait_until_healthy ( & app. as_inner ( ) . deadpool_primary , & app) ;
67
+ app. runtime ( )
68
+ . block_on ( wait_until_healthy ( & app. as_inner ( ) . deadpool_primary ) ) ;
57
69
}
58
70
59
71
#[ test]
@@ -79,14 +91,15 @@ fn restored_replica_returns_user_info() {
79
91
. deadpool_replica
80
92
. as_ref ( )
81
93
. expect ( "no replica database configured" ) ;
82
- wait_until_healthy ( replica, & app ) ;
94
+ app . runtime ( ) . block_on ( wait_until_healthy ( replica) ) ;
83
95
84
96
let response = owner. get :: < ( ) > ( URL ) ;
85
97
assert_eq ! ( response. status( ) , StatusCode :: OK ) ;
86
98
87
99
// restore connection
88
100
app. primary_db_chaosproxy ( ) . restore_networking ( ) . unwrap ( ) ;
89
- wait_until_healthy ( & app. as_inner ( ) . deadpool_primary , & app) ;
101
+ app. runtime ( )
102
+ . block_on ( wait_until_healthy ( & app. as_inner ( ) . deadpool_primary ) ) ;
90
103
}
91
104
92
105
#[ test]
@@ -107,7 +120,8 @@ fn restored_primary_returns_user_info() {
107
120
108
121
// Once the replica database is restored, it should serve as a fallback again
109
122
app. primary_db_chaosproxy ( ) . restore_networking ( ) . unwrap ( ) ;
110
- wait_until_healthy ( & app. as_inner ( ) . deadpool_primary , & app) ;
123
+ app. runtime ( )
124
+ . block_on ( wait_until_healthy ( & app. as_inner ( ) . deadpool_primary ) ) ;
111
125
112
126
let response = owner. get :: < ( ) > ( URL ) ;
113
127
assert_eq ! ( response. status( ) , StatusCode :: OK ) ;
0 commit comments