@@ -409,6 +409,8 @@ struct io_ring_ctx {
409
409
struct {
410
410
spinlock_t completion_lock ;
411
411
412
+ spinlock_t timeout_lock ;
413
+
412
414
/*
413
415
* ->iopoll_list is protected by the ctx->uring_lock for
414
416
* io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -1188,6 +1190,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1188
1190
mutex_init (& ctx -> uring_lock );
1189
1191
init_waitqueue_head (& ctx -> cq_wait );
1190
1192
spin_lock_init (& ctx -> completion_lock );
1193
+ spin_lock_init (& ctx -> timeout_lock );
1191
1194
INIT_LIST_HEAD (& ctx -> iopoll_list );
1192
1195
INIT_LIST_HEAD (& ctx -> defer_list );
1193
1196
INIT_LIST_HEAD (& ctx -> timeout_list );
@@ -1328,6 +1331,7 @@ static void io_queue_async_work(struct io_kiocb *req)
1328
1331
1329
1332
static void io_kill_timeout (struct io_kiocb * req , int status )
1330
1333
__must_hold (& req - > ctx - > completion_lock )
1334
+ __must_hold (& req - > ctx - > timeout_lock )
1331
1335
{
1332
1336
struct io_timeout_data * io = req -> async_data ;
1333
1337
@@ -1355,9 +1359,12 @@ static void io_queue_deferred(struct io_ring_ctx *ctx)
1355
1359
}
1356
1360
1357
1361
static void io_flush_timeouts (struct io_ring_ctx * ctx )
1362
+ __must_hold (& ctx - > completion_lock )
1358
1363
{
1359
1364
u32 seq = ctx -> cached_cq_tail - atomic_read (& ctx -> cq_timeouts );
1365
+ unsigned long flags ;
1360
1366
1367
+ spin_lock_irqsave (& ctx -> timeout_lock , flags );
1361
1368
while (!list_empty (& ctx -> timeout_list )) {
1362
1369
u32 events_needed , events_got ;
1363
1370
struct io_kiocb * req = list_first_entry (& ctx -> timeout_list ,
@@ -1382,6 +1389,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
1382
1389
io_kill_timeout (req , 0 );
1383
1390
}
1384
1391
ctx -> cq_last_tm_flush = seq ;
1392
+ spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
1385
1393
}
1386
1394
1387
1395
static void __io_commit_cqring_flush (struct io_ring_ctx * ctx )
@@ -5455,6 +5463,20 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
5455
5463
return 0 ;
5456
5464
}
5457
5465
5466
+ static void io_req_task_timeout (struct io_kiocb * req )
5467
+ {
5468
+ struct io_ring_ctx * ctx = req -> ctx ;
5469
+
5470
+ spin_lock_irq (& ctx -> completion_lock );
5471
+ io_cqring_fill_event (ctx , req -> user_data , - ETIME , 0 );
5472
+ io_commit_cqring (ctx );
5473
+ spin_unlock_irq (& ctx -> completion_lock );
5474
+
5475
+ io_cqring_ev_posted (ctx );
5476
+ req_set_fail (req );
5477
+ io_put_req (req );
5478
+ }
5479
+
5458
5480
static enum hrtimer_restart io_timeout_fn (struct hrtimer * timer )
5459
5481
{
5460
5482
struct io_timeout_data * data = container_of (timer ,
@@ -5463,24 +5485,20 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5463
5485
struct io_ring_ctx * ctx = req -> ctx ;
5464
5486
unsigned long flags ;
5465
5487
5466
- spin_lock_irqsave (& ctx -> completion_lock , flags );
5488
+ spin_lock_irqsave (& ctx -> timeout_lock , flags );
5467
5489
list_del_init (& req -> timeout .list );
5468
5490
atomic_set (& req -> ctx -> cq_timeouts ,
5469
5491
atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
5492
+ spin_unlock_irqrestore (& ctx -> timeout_lock , flags );
5470
5493
5471
- io_cqring_fill_event (ctx , req -> user_data , - ETIME , 0 );
5472
- io_commit_cqring (ctx );
5473
- spin_unlock_irqrestore (& ctx -> completion_lock , flags );
5474
-
5475
- io_cqring_ev_posted (ctx );
5476
- req_set_fail (req );
5477
- io_put_req (req );
5494
+ req -> io_task_work .func = io_req_task_timeout ;
5495
+ io_req_task_work_add (req );
5478
5496
return HRTIMER_NORESTART ;
5479
5497
}
5480
5498
5481
5499
static struct io_kiocb * io_timeout_extract (struct io_ring_ctx * ctx ,
5482
5500
__u64 user_data )
5483
- __must_hold (& ctx - > completion_lock )
5501
+ __must_hold (& ctx - > timeout_lock )
5484
5502
{
5485
5503
struct io_timeout_data * io ;
5486
5504
struct io_kiocb * req ;
@@ -5502,7 +5520,7 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5502
5520
}
5503
5521
5504
5522
static int io_timeout_cancel (struct io_ring_ctx * ctx , __u64 user_data )
5505
- __must_hold (& ctx - > completion_lock )
5523
+ __must_hold (& ctx - > timeout_lock )
5506
5524
{
5507
5525
struct io_kiocb * req = io_timeout_extract (ctx , user_data );
5508
5526
@@ -5517,7 +5535,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5517
5535
5518
5536
static int io_timeout_update (struct io_ring_ctx * ctx , __u64 user_data ,
5519
5537
struct timespec64 * ts , enum hrtimer_mode mode )
5520
- __must_hold (& ctx - > completion_lock )
5538
+ __must_hold (& ctx - > timeout_lock )
5521
5539
{
5522
5540
struct io_kiocb * req = io_timeout_extract (ctx , user_data );
5523
5541
struct io_timeout_data * data ;
@@ -5576,13 +5594,15 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5576
5594
struct io_ring_ctx * ctx = req -> ctx ;
5577
5595
int ret ;
5578
5596
5579
- spin_lock_irq (& ctx -> completion_lock );
5597
+ spin_lock_irq (& ctx -> timeout_lock );
5580
5598
if (!(req -> timeout_rem .flags & IORING_TIMEOUT_UPDATE ))
5581
5599
ret = io_timeout_cancel (ctx , tr -> addr );
5582
5600
else
5583
5601
ret = io_timeout_update (ctx , tr -> addr , & tr -> ts ,
5584
5602
io_translate_timeout_mode (tr -> flags ));
5603
+ spin_unlock_irq (& ctx -> timeout_lock );
5585
5604
5605
+ spin_lock_irq (& ctx -> completion_lock );
5586
5606
io_cqring_fill_event (ctx , req -> user_data , ret , 0 );
5587
5607
io_commit_cqring (ctx );
5588
5608
spin_unlock_irq (& ctx -> completion_lock );
@@ -5637,7 +5657,7 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5637
5657
struct list_head * entry ;
5638
5658
u32 tail , off = req -> timeout .off ;
5639
5659
5640
- spin_lock_irq (& ctx -> completion_lock );
5660
+ spin_lock_irq (& ctx -> timeout_lock );
5641
5661
5642
5662
/*
5643
5663
* sqe->off holds how many events that need to occur for this
@@ -5676,7 +5696,7 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5676
5696
list_add (& req -> timeout .list , entry );
5677
5697
data -> timer .function = io_timeout_fn ;
5678
5698
hrtimer_start (& data -> timer , timespec64_to_ktime (data -> ts ), data -> mode );
5679
- spin_unlock_irq (& ctx -> completion_lock );
5699
+ spin_unlock_irq (& ctx -> timeout_lock );
5680
5700
return 0 ;
5681
5701
}
5682
5702
@@ -5730,7 +5750,9 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5730
5750
spin_lock_irqsave (& ctx -> completion_lock , flags );
5731
5751
if (ret != - ENOENT )
5732
5752
goto done ;
5753
+ spin_lock (& ctx -> timeout_lock );
5733
5754
ret = io_timeout_cancel (ctx , sqe_addr );
5755
+ spin_unlock (& ctx -> timeout_lock );
5734
5756
if (ret != - ENOENT )
5735
5757
goto done ;
5736
5758
ret = io_poll_cancel (ctx , sqe_addr , false);
@@ -5772,7 +5794,9 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
5772
5794
spin_lock_irq (& ctx -> completion_lock );
5773
5795
if (ret != - ENOENT )
5774
5796
goto done ;
5797
+ spin_lock (& ctx -> timeout_lock );
5775
5798
ret = io_timeout_cancel (ctx , sqe_addr );
5799
+ spin_unlock (& ctx -> timeout_lock );
5776
5800
if (ret != - ENOENT )
5777
5801
goto done ;
5778
5802
ret = io_poll_cancel (ctx , sqe_addr , false);
@@ -8801,12 +8825,14 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
8801
8825
int canceled = 0 ;
8802
8826
8803
8827
spin_lock_irq (& ctx -> completion_lock );
8828
+ spin_lock (& ctx -> timeout_lock );
8804
8829
list_for_each_entry_safe (req , tmp , & ctx -> timeout_list , timeout .list ) {
8805
8830
if (io_match_task (req , tsk , cancel_all )) {
8806
8831
io_kill_timeout (req , - ECANCELED );
8807
8832
canceled ++ ;
8808
8833
}
8809
8834
}
8835
+ spin_unlock (& ctx -> timeout_lock );
8810
8836
if (canceled != 0 )
8811
8837
io_commit_cqring (ctx );
8812
8838
spin_unlock_irq (& ctx -> completion_lock );
0 commit comments