@@ -168,7 +168,7 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
168
168
169
169
void vhost_work_init (struct vhost_work * work , vhost_work_fn_t fn )
170
170
{
171
- INIT_LIST_HEAD ( & work -> node );
171
+ clear_bit ( VHOST_WORK_QUEUED , & work -> flags );
172
172
work -> fn = fn ;
173
173
init_waitqueue_head (& work -> done );
174
174
}
@@ -246,23 +246,24 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
246
246
247
247
void vhost_work_queue (struct vhost_dev * dev , struct vhost_work * work )
248
248
{
249
- unsigned long flags ;
249
+ if (!dev -> worker )
250
+ return ;
250
251
251
- spin_lock_irqsave (& dev -> work_lock , flags );
252
- if (list_empty (& work -> node )) {
253
- list_add_tail (& work -> node , & dev -> work_list );
254
- spin_unlock_irqrestore (& dev -> work_lock , flags );
252
+ if (!test_and_set_bit (VHOST_WORK_QUEUED , & work -> flags )) {
253
+ /* We can only add the work to the list after we're
254
+ * sure it was not in the list.
255
+ */
256
+ smp_mb ();
257
+ llist_add (& work -> node , & dev -> work_list );
255
258
wake_up_process (dev -> worker );
256
- } else {
257
- spin_unlock_irqrestore (& dev -> work_lock , flags );
258
259
}
259
260
}
260
261
EXPORT_SYMBOL_GPL (vhost_work_queue );
261
262
262
263
/* A lockless hint for busy polling code to exit the loop */
263
264
bool vhost_has_work (struct vhost_dev * dev )
264
265
{
265
- return !list_empty (& dev -> work_list );
266
+ return !llist_empty (& dev -> work_list );
266
267
}
267
268
EXPORT_SYMBOL_GPL (vhost_has_work );
268
269
@@ -305,7 +306,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
305
306
static int vhost_worker (void * data )
306
307
{
307
308
struct vhost_dev * dev = data ;
308
- struct vhost_work * work = NULL ;
309
+ struct vhost_work * work , * work_next ;
310
+ struct llist_node * node ;
309
311
mm_segment_t oldfs = get_fs ();
310
312
311
313
set_fs (USER_DS );
@@ -315,29 +317,25 @@ static int vhost_worker(void *data)
315
317
/* mb paired w/ kthread_stop */
316
318
set_current_state (TASK_INTERRUPTIBLE );
317
319
318
- spin_lock_irq (& dev -> work_lock );
319
-
320
320
if (kthread_should_stop ()) {
321
- spin_unlock_irq (& dev -> work_lock );
322
321
__set_current_state (TASK_RUNNING );
323
322
break ;
324
323
}
325
- if (!list_empty (& dev -> work_list )) {
326
- work = list_first_entry (& dev -> work_list ,
327
- struct vhost_work , node );
328
- list_del_init (& work -> node );
329
- } else
330
- work = NULL ;
331
- spin_unlock_irq (& dev -> work_lock );
332
324
333
- if (work ) {
325
+ node = llist_del_all (& dev -> work_list );
326
+ if (!node )
327
+ schedule ();
328
+
329
+ node = llist_reverse_order (node );
330
+ /* make sure flag is seen after deletion */
331
+ smp_wmb ();
332
+ llist_for_each_entry_safe (work , work_next , node , node ) {
333
+ clear_bit (VHOST_WORK_QUEUED , & work -> flags );
334
334
__set_current_state (TASK_RUNNING );
335
335
work -> fn (work );
336
336
if (need_resched ())
337
337
schedule ();
338
- } else
339
- schedule ();
340
-
338
+ }
341
339
}
342
340
unuse_mm (dev -> mm );
343
341
set_fs (oldfs );
@@ -398,9 +396,9 @@ void vhost_dev_init(struct vhost_dev *dev,
398
396
dev -> log_file = NULL ;
399
397
dev -> memory = NULL ;
400
398
dev -> mm = NULL ;
401
- spin_lock_init (& dev -> work_lock );
402
- INIT_LIST_HEAD (& dev -> work_list );
403
399
dev -> worker = NULL ;
400
+ init_llist_head (& dev -> work_list );
401
+
404
402
405
403
for (i = 0 ; i < dev -> nvqs ; ++ i ) {
406
404
vq = dev -> vqs [i ];
@@ -566,7 +564,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
566
564
/* No one will access memory at this point */
567
565
kvfree (dev -> memory );
568
566
dev -> memory = NULL ;
569
- WARN_ON (!list_empty (& dev -> work_list ));
567
+ WARN_ON (!llist_empty (& dev -> work_list ));
570
568
if (dev -> worker ) {
571
569
kthread_stop (dev -> worker );
572
570
dev -> worker = NULL ;
0 commit comments