@@ -8114,8 +8114,8 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
8114
8114
8115
8115
static void io_rsrc_buf_put (struct io_ring_ctx * ctx , struct io_rsrc_put * prsrc )
8116
8116
{
8117
- /* no updates yet, so not used */
8118
- WARN_ON_ONCE ( 1 ) ;
8117
+ io_buffer_unmap ( ctx , & prsrc -> buf );
8118
+ prsrc -> buf = NULL ;
8119
8119
}
8120
8120
8121
8121
static void __io_sqe_buffers_unregister (struct io_ring_ctx * ctx )
@@ -8359,7 +8359,7 @@ static int io_buffer_validate(struct iovec *iov)
8359
8359
}
8360
8360
8361
8361
static int io_sqe_buffers_register (struct io_ring_ctx * ctx , void __user * arg ,
8362
- unsigned int nr_args )
8362
+ unsigned int nr_args , u64 __user * tags )
8363
8363
{
8364
8364
struct page * last_hpage = NULL ;
8365
8365
struct io_rsrc_data * data ;
@@ -8383,6 +8383,12 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8383
8383
}
8384
8384
8385
8385
for (i = 0 ; i < nr_args ; i ++ , ctx -> nr_user_bufs ++ ) {
8386
+ u64 tag = 0 ;
8387
+
8388
+ if (tags && copy_from_user (& tag , & tags [i ], sizeof (tag ))) {
8389
+ ret = - EFAULT ;
8390
+ break ;
8391
+ }
8386
8392
ret = io_copy_iov (ctx , & iov , arg , i );
8387
8393
if (ret )
8388
8394
break ;
@@ -8394,6 +8400,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8394
8400
& last_hpage );
8395
8401
if (ret )
8396
8402
break ;
8403
+ data -> tags [i ] = tag ;
8397
8404
}
8398
8405
8399
8406
WARN_ON_ONCE (ctx -> buf_data );
@@ -8406,6 +8413,62 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8406
8413
return ret ;
8407
8414
}
8408
8415
8416
+ static int __io_sqe_buffers_update (struct io_ring_ctx * ctx ,
8417
+ struct io_uring_rsrc_update2 * up ,
8418
+ unsigned int nr_args )
8419
+ {
8420
+ u64 __user * tags = u64_to_user_ptr (up -> tags );
8421
+ struct iovec iov , __user * iovs = u64_to_user_ptr (up -> data );
8422
+ struct io_mapped_ubuf * imu ;
8423
+ struct page * last_hpage = NULL ;
8424
+ bool needs_switch = false;
8425
+ __u32 done ;
8426
+ int i , err ;
8427
+
8428
+ if (!ctx -> buf_data )
8429
+ return - ENXIO ;
8430
+ if (up -> offset + nr_args > ctx -> nr_user_bufs )
8431
+ return - EINVAL ;
8432
+
8433
+ for (done = 0 ; done < nr_args ; done ++ ) {
8434
+ u64 tag = 0 ;
8435
+
8436
+ err = io_copy_iov (ctx , & iov , iovs , done );
8437
+ if (err )
8438
+ break ;
8439
+ if (tags && copy_from_user (& tag , & tags [done ], sizeof (tag ))) {
8440
+ err = - EFAULT ;
8441
+ break ;
8442
+ }
8443
+
8444
+ i = array_index_nospec (up -> offset + done , ctx -> nr_user_bufs );
8445
+ imu = ctx -> user_bufs [i ];
8446
+ if (imu ) {
8447
+ err = io_queue_rsrc_removal (ctx -> buf_data , up -> offset + done ,
8448
+ ctx -> rsrc_node , imu );
8449
+ if (err )
8450
+ break ;
8451
+ ctx -> user_bufs [i ] = NULL ;
8452
+ needs_switch = true;
8453
+ }
8454
+
8455
+ if (iov .iov_base || iov .iov_len ) {
8456
+ err = io_buffer_validate (& iov );
8457
+ if (err )
8458
+ break ;
8459
+ err = io_sqe_buffer_register (ctx , & iov , & ctx -> user_bufs [i ],
8460
+ & last_hpage );
8461
+ if (err )
8462
+ break ;
8463
+ ctx -> buf_data -> tags [up -> offset + done ] = tag ;
8464
+ }
8465
+ }
8466
+
8467
+ if (needs_switch )
8468
+ io_rsrc_node_switch (ctx , ctx -> buf_data );
8469
+ return done ? done : err ;
8470
+ }
8471
+
8409
8472
static int io_eventfd_register (struct io_ring_ctx * ctx , void __user * arg )
8410
8473
{
8411
8474
__s32 __user * fds = arg ;
@@ -9807,6 +9870,8 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
9807
9870
switch (type ) {
9808
9871
case IORING_RSRC_FILE :
9809
9872
return __io_sqe_files_update (ctx , up , nr_args );
9873
+ case IORING_RSRC_BUFFER :
9874
+ return __io_sqe_buffers_update (ctx , up , nr_args );
9810
9875
}
9811
9876
return - EINVAL ;
9812
9877
}
@@ -9857,6 +9922,9 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
9857
9922
case IORING_RSRC_FILE :
9858
9923
return io_sqe_files_register (ctx , u64_to_user_ptr (rr .data ),
9859
9924
rr .nr , u64_to_user_ptr (rr .tags ));
9925
+ case IORING_RSRC_BUFFER :
9926
+ return io_sqe_buffers_register (ctx , u64_to_user_ptr (rr .data ),
9927
+ rr .nr , u64_to_user_ptr (rr .tags ));
9860
9928
}
9861
9929
return - EINVAL ;
9862
9930
}
@@ -9933,7 +10001,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9933
10001
9934
10002
switch (opcode ) {
9935
10003
case IORING_REGISTER_BUFFERS :
9936
- ret = io_sqe_buffers_register (ctx , arg , nr_args );
10004
+ ret = io_sqe_buffers_register (ctx , arg , nr_args , NULL );
9937
10005
break ;
9938
10006
case IORING_UNREGISTER_BUFFERS :
9939
10007
ret = - EINVAL ;
0 commit comments