26
26
from networking_mlnx .plugins .ml2 .drivers .sdn import constants as sdn_const
27
27
28
28
29
- def check_for_pending_or_processing_ops (session , object_uuid , operation = None ):
30
- q = session .query (sdn_journal_db .SdnJournal ).filter (
29
+ @db_api .CONTEXT_READER
30
+ def check_for_pending_or_processing_ops (context , object_uuid , operation = None ):
31
+ q = context .session .query (sdn_journal_db .SdnJournal ).filter (
31
32
or_ (sdn_journal_db .SdnJournal .state == sdn_const .PENDING ,
32
33
sdn_journal_db .SdnJournal .state == sdn_const .PROCESSING ),
33
34
sdn_journal_db .SdnJournal .object_uuid == object_uuid )
@@ -36,11 +37,12 @@ def check_for_pending_or_processing_ops(session, object_uuid, operation=None):
36
37
q = q .filter (sdn_journal_db .SdnJournal .operation .in_ (operation ))
37
38
else :
38
39
q = q .filter (sdn_journal_db .SdnJournal .operation == operation )
39
- return session .query (q .exists ()).scalar ()
40
+ return context . session .query (q .exists ()).scalar ()
40
41
41
42
42
- def check_for_pending_delete_ops_with_parent (session , object_type , parent_id ):
43
- rows = session .query (sdn_journal_db .SdnJournal ).filter (
43
+ @db_api .CONTEXT_READER
44
+ def check_for_pending_delete_ops_with_parent (context , object_type , parent_id ):
45
+ rows = context .session .query (sdn_journal_db .SdnJournal ).filter (
44
46
or_ (sdn_journal_db .SdnJournal .state == sdn_const .PENDING ,
45
47
sdn_journal_db .SdnJournal .state == sdn_const .PROCESSING ),
46
48
sdn_journal_db .SdnJournal .object_type == object_type ,
@@ -54,123 +56,137 @@ def check_for_pending_delete_ops_with_parent(session, object_type, parent_id):
54
56
return False
55
57
56
58
57
- def check_for_older_ops (session , row ):
58
- q = session .query (sdn_journal_db .SdnJournal ).filter (
59
+ @db_api .CONTEXT_READER
60
+ def check_for_older_ops (context , row ):
61
+ q = context .session .query (sdn_journal_db .SdnJournal ).filter (
59
62
or_ (sdn_journal_db .SdnJournal .state == sdn_const .PENDING ,
60
63
sdn_journal_db .SdnJournal .state == sdn_const .PROCESSING ),
61
64
sdn_journal_db .SdnJournal .object_uuid == row .object_uuid ,
62
65
sdn_journal_db .SdnJournal .created_at < row .created_at ,
63
66
sdn_journal_db .SdnJournal .id != row .id )
64
- return session .query (q .exists ()).scalar ()
67
+ return context . session .query (q .exists ()).scalar ()
65
68
66
69
67
- def get_all_db_rows (session ):
68
- return session .query (sdn_journal_db .SdnJournal ).all ()
70
+ @db_api .CONTEXT_READER
71
+ def get_all_db_rows (context ):
72
+ return context .session .query (sdn_journal_db .SdnJournal ).all ()
69
73
70
74
71
- def get_all_db_rows_by_state (session , state ):
72
- return session .query (sdn_journal_db .SdnJournal ).filter_by (
75
+ @db_api .CONTEXT_READER
76
+ def get_all_db_rows_by_state (context , state ):
77
+ return context .session .query (sdn_journal_db .SdnJournal ).filter_by (
73
78
state = state ).all ()
74
79
75
80
81
+ def _get_row_with_lock (session ):
82
+ row = session .query (sdn_journal_db .SdnJournal ).filter_by (
83
+ state = sdn_const .PENDING ).order_by (
84
+ asc (sdn_journal_db .SdnJournal .last_retried )).with_for_update (
85
+ ).first ()
86
+ return row
87
+
88
+
76
89
# Retry deadlock exception for Galera DB.
77
90
# If two (or more) different threads call this method at the same time, they
78
91
# might both succeed in changing the same row to pending, but at least one
79
92
# of them will get a deadlock from Galera and will have to retry the operation.
80
93
@db_api .retry_db_errors
81
- def get_oldest_pending_db_row_with_lock (session ):
82
- with session .begin ():
83
- row = session .query (sdn_journal_db .SdnJournal ).filter_by (
84
- state = sdn_const .PENDING ).order_by (
85
- asc (sdn_journal_db .SdnJournal .last_retried )).with_for_update (
86
- ).first ()
87
- if row :
88
- update_db_row_state (session , row , sdn_const .PROCESSING )
89
-
94
+ @db_api .CONTEXT_WRITER
95
+ def get_oldest_pending_db_row_with_lock (context ):
96
+ row = _get_row_with_lock (context .session )
97
+ if row :
98
+ _update_db_row_state (context .session , row , sdn_const .PROCESSING )
90
99
return row
91
100
92
101
93
102
@db_api .retry_db_errors
94
- def get_all_monitoring_db_row_by_oldest ( session ):
95
- with session . begin ( ):
96
- rows = session .query (sdn_journal_db .SdnJournal ).filter_by (
97
- state = sdn_const .MONITORING ).order_by (
98
- asc (sdn_journal_db .SdnJournal .last_retried )).all ()
103
+ @ db_api . CONTEXT_READER
104
+ def get_all_monitoring_db_row_by_oldest ( context ):
105
+ rows = context . session .query (sdn_journal_db .SdnJournal ).filter_by (
106
+ state = sdn_const .MONITORING ).order_by (
107
+ asc (sdn_journal_db .SdnJournal .last_retried )).all ()
99
108
return rows
100
109
101
110
102
111
@oslo_db_api .wrap_db_retry (max_retries = db_api .MAX_RETRIES )
103
- def update_db_row_state (session , row , state ):
112
+ @db_api .CONTEXT_WRITER
113
+ def update_db_row_state (context , row , state ):
114
+ _update_db_row_state (context .session , row , state )
115
+
116
+
117
+ def _update_db_row_state (session , row , state ):
104
118
row .state = state
105
119
session .merge (row )
106
- session .flush ()
107
120
108
121
109
122
@oslo_db_api .wrap_db_retry (max_retries = db_api .MAX_RETRIES )
110
- def update_db_row_job_id (session , row , job_id ):
123
+ @db_api .CONTEXT_WRITER
124
+ def update_db_row_job_id (context , row , job_id ):
111
125
row .job_id = job_id
112
- session .merge (row )
113
- session .flush ()
126
+ context .session .merge (row )
114
127
115
128
116
- def update_pending_db_row_retry (session , row , retry_count ):
129
+ @oslo_db_api .wrap_db_retry (max_retries = db_api .MAX_RETRIES )
130
+ @db_api .CONTEXT_WRITER
131
+ def update_pending_db_row_retry (context , row , retry_count ):
117
132
if row .retry_count >= retry_count and retry_count != - 1 :
118
- update_db_row_state ( session , row , sdn_const .FAILED )
133
+ _update_db_row_state ( context . session , row , sdn_const .FAILED )
119
134
else :
120
135
row .retry_count += 1
121
- update_db_row_state ( session , row , sdn_const .PENDING )
136
+ _update_db_row_state ( context . session , row , sdn_const .PENDING )
122
137
123
138
124
139
# This function is currently not used.
125
140
# Deleted resources are marked as 'deleted' in the database.
126
141
@oslo_db_api .wrap_db_retry (max_retries = db_api .MAX_RETRIES )
127
- def delete_row (session , row = None , row_id = None ):
142
+ @db_api .CONTEXT_WRITER
143
+ def delete_row (context , row = None , row_id = None ):
128
144
if row_id :
129
- row = session .query (sdn_journal_db .SdnJournal ).filter_by (
145
+ row = context . session .query (sdn_journal_db .SdnJournal ).filter_by (
130
146
id = row_id ).one ()
131
147
if row :
132
- session .delete (row )
133
- session .flush ()
148
+ context .session .delete (row )
134
149
135
150
136
151
@oslo_db_api .wrap_db_retry (max_retries = db_api .MAX_RETRIES )
137
- def create_pending_row (session , object_type , object_uuid ,
152
+ @db_api .CONTEXT_WRITER
153
+ def create_pending_row (context , object_type , object_uuid ,
138
154
operation , data ):
139
155
data = jsonutils .dumps (data )
140
156
row = sdn_journal_db .SdnJournal (object_type = object_type ,
141
157
object_uuid = object_uuid ,
142
158
operation = operation , data = data ,
143
159
created_at = func .now (),
144
160
state = sdn_const .PENDING )
145
- session .add (row )
146
- # Keep session flush for unit tests. NOOP for L2/L3 events since calls are
147
- # made inside database session transaction with subtransactions=True.
148
- session .flush ()
161
+ context .session .add (row )
149
162
150
163
151
- @db_api .retry_db_errors
152
164
def _update_maintenance_state (session , expected_state , state ):
153
- with session .begin ():
154
- row = session .query (sdn_maintenance_db .SdnMaintenance ).filter_by (
155
- state = expected_state ).with_for_update ().one_or_none ()
156
- if row is None :
157
- return False
165
+ row = session .query (sdn_maintenance_db .SdnMaintenance ).filter_by (
166
+ state = expected_state ).with_for_update ().one_or_none ()
167
+ if row is None :
168
+ return False
158
169
159
- row .state = state
160
- return True
170
+ row .state = state
171
+ return True
161
172
162
173
163
- def lock_maintenance (session ):
164
- return _update_maintenance_state (session , sdn_const .PENDING ,
174
+ @db_api .retry_db_errors
175
+ @db_api .CONTEXT_WRITER
176
+ def lock_maintenance (context ):
177
+ return _update_maintenance_state (context .session , sdn_const .PENDING ,
165
178
sdn_const .PROCESSING )
166
179
167
180
168
- def unlock_maintenance (session ):
169
- return _update_maintenance_state (session , sdn_const .PROCESSING ,
181
+ @db_api .retry_db_errors
182
+ @db_api .CONTEXT_WRITER
183
+ def unlock_maintenance (context ):
184
+ return _update_maintenance_state (context .session , sdn_const .PROCESSING ,
170
185
sdn_const .PENDING )
171
186
172
187
173
- def update_maintenance_operation (session , operation = None ):
188
+ @db_api .CONTEXT_WRITER
189
+ def update_maintenance_operation (context , operation = None ):
174
190
"""Update the current maintenance operation details.
175
191
176
192
The function assumes the lock is held, so it mustn't be run outside of a
@@ -180,28 +196,28 @@ def update_maintenance_operation(session, operation=None):
180
196
if operation :
181
197
op_text = operation .__name__
182
198
183
- with session .begin ():
184
- row = session . query ( sdn_maintenance_db .SdnMaintenance ).one_or_none ()
185
- row .processing_operation = op_text
186
-
187
-
188
- def delete_rows_by_state_and_time ( session , state , time_delta ):
189
- with session . begin ( ):
190
- now = session .execute (func .now ()).scalar ()
191
- session .query (sdn_journal_db .SdnJournal ).filter (
192
- sdn_journal_db .SdnJournal .state == state ,
193
- sdn_journal_db .SdnJournal .last_retried < now - time_delta ).delete (
194
- synchronize_session = False )
195
- session .expire_all ()
196
-
197
-
198
- def reset_processing_rows ( session , max_timedelta ):
199
- with session . begin ( ):
200
- now = session .execute (func .now ()).scalar ()
201
- max_timedelta = datetime .timedelta (seconds = max_timedelta )
202
- rows = session .query (sdn_journal_db .SdnJournal ).filter (
203
- sdn_journal_db .SdnJournal .last_retried < now - max_timedelta ,
204
- sdn_journal_db .SdnJournal .state == sdn_const .PROCESSING ,
205
- ).update ({'state' : sdn_const .PENDING })
199
+ row = context . session .query (
200
+ sdn_maintenance_db .SdnMaintenance ).one_or_none ()
201
+ row .processing_operation = op_text
202
+
203
+
204
+ @ db_api . CONTEXT_WRITER
205
+ def delete_rows_by_state_and_time ( context , state , time_delta ):
206
+ now = context . session .execute (func .now ()).scalar ()
207
+ context . session .query (sdn_journal_db .SdnJournal ).filter (
208
+ sdn_journal_db .SdnJournal .state == state ,
209
+ sdn_journal_db .SdnJournal .last_retried < now - time_delta ).delete (
210
+ synchronize_session = False )
211
+ context . session .expire_all ()
212
+
213
+
214
+ @ db_api . CONTEXT_WRITER
215
+ def reset_processing_rows ( context , max_timedelta ):
216
+ now = context . session .execute (func .now ()).scalar ()
217
+ max_timedelta = datetime .timedelta (seconds = max_timedelta )
218
+ rows = context . session .query (sdn_journal_db .SdnJournal ).filter (
219
+ sdn_journal_db .SdnJournal .last_retried < now - max_timedelta ,
220
+ sdn_journal_db .SdnJournal .state == sdn_const .PROCESSING ,
221
+ ).update ({'state' : sdn_const .PENDING })
206
222
207
223
return rows
0 commit comments