Skip to content

Commit 6294f36

Browse files
isilenceaxboe
authored andcommitted
io_uring: clean up tctx_task_work()
After recent fixes, tctx_task_work() always does proper spinlocking before looking into ->task_list, so now we don't need atomics for ->task_state, replace it with non-atomic task_running using the critical section. Tide it up, combine two separate block with spinlocking, and always try to splice in there, so we do less locking when new requests are arriving during the function execution. Signed-off-by: Pavel Begunkov <[email protected]> [axboe: fix missing ->task_running reset on task_work_add() failure] Signed-off-by: Jens Axboe <[email protected]>
1 parent 5d70904 commit 6294f36

File tree

1 file changed

+14
-18
lines changed

1 file changed

+14
-18
lines changed

fs/io_uring.c

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -476,8 +476,8 @@ struct io_uring_task {
476476

477477
spinlock_t task_lock;
478478
struct io_wq_work_list task_list;
479-
unsigned long task_state;
480479
struct callback_head task_work;
480+
bool task_running;
481481
};
482482

483483
/*
@@ -1960,9 +1960,13 @@ static void tctx_task_work(struct callback_head *cb)
19601960
spin_lock_irq(&tctx->task_lock);
19611961
node = tctx->task_list.first;
19621962
INIT_WQ_LIST(&tctx->task_list);
1963+
if (!node)
1964+
tctx->task_running = false;
19631965
spin_unlock_irq(&tctx->task_lock);
1966+
if (!node)
1967+
break;
19641968

1965-
while (node) {
1969+
do {
19661970
struct io_wq_work_node *next = node->next;
19671971
struct io_kiocb *req = container_of(node, struct io_kiocb,
19681972
io_task_work.node);
@@ -1974,19 +1978,8 @@ static void tctx_task_work(struct callback_head *cb)
19741978
}
19751979
req->io_task_work.func(req);
19761980
node = next;
1977-
}
1978-
if (wq_list_empty(&tctx->task_list)) {
1979-
spin_lock_irq(&tctx->task_lock);
1980-
clear_bit(0, &tctx->task_state);
1981-
if (wq_list_empty(&tctx->task_list)) {
1982-
spin_unlock_irq(&tctx->task_lock);
1983-
break;
1984-
}
1985-
spin_unlock_irq(&tctx->task_lock);
1986-
/* another tctx_task_work() is enqueued, yield */
1987-
if (test_and_set_bit(0, &tctx->task_state))
1988-
break;
1989-
}
1981+
} while (node);
1982+
19901983
cond_resched();
19911984
}
19921985

@@ -2000,16 +1993,19 @@ static void io_req_task_work_add(struct io_kiocb *req)
20001993
enum task_work_notify_mode notify;
20011994
struct io_wq_work_node *node;
20021995
unsigned long flags;
1996+
bool running;
20031997

20041998
WARN_ON_ONCE(!tctx);
20051999

20062000
spin_lock_irqsave(&tctx->task_lock, flags);
20072001
wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
2002+
running = tctx->task_running;
2003+
if (!running)
2004+
tctx->task_running = true;
20082005
spin_unlock_irqrestore(&tctx->task_lock, flags);
20092006

20102007
/* task_work already pending, we're done */
2011-
if (test_bit(0, &tctx->task_state) ||
2012-
test_and_set_bit(0, &tctx->task_state))
2008+
if (running)
20132009
return;
20142010

20152011
/*
@@ -2024,8 +2020,8 @@ static void io_req_task_work_add(struct io_kiocb *req)
20242020
return;
20252021
}
20262022

2027-
clear_bit(0, &tctx->task_state);
20282023
spin_lock_irqsave(&tctx->task_lock, flags);
2024+
tctx->task_running = false;
20292025
node = tctx->task_list.first;
20302026
INIT_WQ_LIST(&tctx->task_list);
20312027
spin_unlock_irqrestore(&tctx->task_lock, flags);

0 commit comments

Comments
 (0)