@@ -218,6 +218,7 @@ struct io_rsrc_put {
218
218
union {
219
219
void * rsrc ;
220
220
struct file * file ;
221
+ struct io_mapped_ubuf * buf ;
221
222
};
222
223
};
223
224
@@ -404,6 +405,7 @@ struct io_ring_ctx {
404
405
unsigned nr_user_files ;
405
406
406
407
/* if used, fixed mapped user buffers */
408
+ struct io_rsrc_data * buf_data ;
407
409
unsigned nr_user_bufs ;
408
410
struct io_mapped_ubuf * * user_bufs ;
409
411
@@ -5927,7 +5929,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5927
5929
5928
5930
printk_once (KERN_WARNING "io_uring: unhandled opcode %d\n" ,
5929
5931
req -> opcode );
5930
- return - EINVAL ;
5932
+ return - EINVAL ;
5931
5933
}
5932
5934
5933
5935
static int io_req_prep_async (struct io_kiocb * req )
@@ -8110,19 +8112,36 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
8110
8112
* slot = NULL ;
8111
8113
}
8112
8114
8113
- static int io_sqe_buffers_unregister (struct io_ring_ctx * ctx )
8115
+ static void io_rsrc_buf_put (struct io_ring_ctx * ctx , struct io_rsrc_put * prsrc )
8114
8116
{
8115
- unsigned int i ;
8117
+ /* no updates yet, so not used */
8118
+ WARN_ON_ONCE (1 );
8119
+ }
8116
8120
8117
- if (!ctx -> user_bufs )
8118
- return - ENXIO ;
8121
+ static void __io_sqe_buffers_unregister (struct io_ring_ctx * ctx )
8122
+ {
8123
+ unsigned int i ;
8119
8124
8120
8125
for (i = 0 ; i < ctx -> nr_user_bufs ; i ++ )
8121
8126
io_buffer_unmap (ctx , & ctx -> user_bufs [i ]);
8122
8127
kfree (ctx -> user_bufs );
8128
+ kfree (ctx -> buf_data );
8123
8129
ctx -> user_bufs = NULL ;
8130
+ ctx -> buf_data = NULL ;
8124
8131
ctx -> nr_user_bufs = 0 ;
8125
- return 0 ;
8132
+ }
8133
+
8134
+ static int io_sqe_buffers_unregister (struct io_ring_ctx * ctx )
8135
+ {
8136
+ int ret ;
8137
+
8138
+ if (!ctx -> buf_data )
8139
+ return - ENXIO ;
8140
+
8141
+ ret = io_rsrc_ref_quiesce (ctx -> buf_data , ctx );
8142
+ if (!ret )
8143
+ __io_sqe_buffers_unregister (ctx );
8144
+ return ret ;
8126
8145
}
8127
8146
8128
8147
static int io_copy_iov (struct io_ring_ctx * ctx , struct iovec * dst ,
@@ -8342,17 +8361,26 @@ static int io_buffer_validate(struct iovec *iov)
8342
8361
static int io_sqe_buffers_register (struct io_ring_ctx * ctx , void __user * arg ,
8343
8362
unsigned int nr_args )
8344
8363
{
8364
+ struct page * last_hpage = NULL ;
8365
+ struct io_rsrc_data * data ;
8345
8366
int i , ret ;
8346
8367
struct iovec iov ;
8347
- struct page * last_hpage = NULL ;
8348
8368
8349
8369
if (ctx -> user_bufs )
8350
8370
return - EBUSY ;
8351
8371
if (!nr_args || nr_args > UIO_MAXIOV )
8352
8372
return - EINVAL ;
8353
- ret = io_buffers_map_alloc (ctx , nr_args );
8373
+ ret = io_rsrc_node_switch_start (ctx );
8354
8374
if (ret )
8355
8375
return ret ;
8376
+ data = io_rsrc_data_alloc (ctx , io_rsrc_buf_put , nr_args );
8377
+ if (!data )
8378
+ return - ENOMEM ;
8379
+ ret = io_buffers_map_alloc (ctx , nr_args );
8380
+ if (ret ) {
8381
+ kfree (data );
8382
+ return ret ;
8383
+ }
8356
8384
8357
8385
for (i = 0 ; i < nr_args ; i ++ , ctx -> nr_user_bufs ++ ) {
8358
8386
ret = io_copy_iov (ctx , & iov , arg , i );
@@ -8368,9 +8396,13 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8368
8396
break ;
8369
8397
}
8370
8398
8371
- if (ret )
8372
- io_sqe_buffers_unregister (ctx );
8399
+ WARN_ON_ONCE (ctx -> buf_data );
8373
8400
8401
+ ctx -> buf_data = data ;
8402
+ if (ret )
8403
+ __io_sqe_buffers_unregister (ctx );
8404
+ else
8405
+ io_rsrc_node_switch (ctx , NULL );
8374
8406
return ret ;
8375
8407
}
8376
8408
@@ -8445,22 +8477,29 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
8445
8477
mutex_unlock (& ctx -> uring_lock );
8446
8478
}
8447
8479
8480
+ static bool io_wait_rsrc_data (struct io_rsrc_data * data )
8481
+ {
8482
+ if (!data )
8483
+ return false;
8484
+ if (!atomic_dec_and_test (& data -> refs ))
8485
+ wait_for_completion (& data -> done );
8486
+ return true;
8487
+ }
8488
+
8448
8489
static void io_ring_ctx_free (struct io_ring_ctx * ctx )
8449
8490
{
8450
8491
io_sq_thread_finish (ctx );
8451
- io_sqe_buffers_unregister (ctx );
8452
8492
8453
8493
if (ctx -> mm_account ) {
8454
8494
mmdrop (ctx -> mm_account );
8455
8495
ctx -> mm_account = NULL ;
8456
8496
}
8457
8497
8458
8498
mutex_lock (& ctx -> uring_lock );
8459
- if (ctx -> file_data ) {
8460
- if (! atomic_dec_and_test ( & ctx -> file_data -> refs ))
8461
- wait_for_completion ( & ctx -> file_data -> done );
8499
+ if (io_wait_rsrc_data ( ctx -> buf_data ))
8500
+ __io_sqe_buffers_unregister ( ctx );
8501
+ if ( io_wait_rsrc_data ( ctx -> file_data ))
8462
8502
__io_sqe_files_unregister (ctx );
8463
- }
8464
8503
if (ctx -> rings )
8465
8504
__io_cqring_overflow_flush (ctx , true);
8466
8505
mutex_unlock (& ctx -> uring_lock );
@@ -9825,6 +9864,8 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
9825
9864
static bool io_register_op_must_quiesce (int op )
9826
9865
{
9827
9866
switch (op ) {
9867
+ case IORING_REGISTER_BUFFERS :
9868
+ case IORING_UNREGISTER_BUFFERS :
9828
9869
case IORING_REGISTER_FILES :
9829
9870
case IORING_UNREGISTER_FILES :
9830
9871
case IORING_REGISTER_FILES_UPDATE :
0 commit comments