@@ -1251,75 +1251,43 @@ static int loop_clr_fd(struct loop_device *lo)
1251
1251
return __loop_clr_fd (lo , false);
1252
1252
}
1253
1253
1254
+ /**
1255
+ * loop_set_status_from_info - configure device from loop_info
1256
+ * @lo: struct loop_device to configure
1257
+ * @info: struct loop_info64 to configure the device with
1258
+ *
1259
+ * Configures the loop device parameters according to the passed
1260
+ * in loop_info64 configuration.
1261
+ */
1254
1262
static int
1255
- loop_set_status (struct loop_device * lo , const struct loop_info64 * info )
1263
+ loop_set_status_from_info (struct loop_device * lo ,
1264
+ const struct loop_info64 * info )
1256
1265
{
1257
1266
int err ;
1258
1267
struct loop_func_table * xfer ;
1259
1268
kuid_t uid = current_uid ();
1260
- struct block_device * bdev ;
1261
- bool partscan = false;
1262
- bool size_changed = false;
1263
-
1264
- err = mutex_lock_killable (& loop_ctl_mutex );
1265
- if (err )
1266
- return err ;
1267
- if (lo -> lo_encrypt_key_size &&
1268
- !uid_eq (lo -> lo_key_owner , uid ) &&
1269
- !capable (CAP_SYS_ADMIN )) {
1270
- err = - EPERM ;
1271
- goto out_unlock ;
1272
- }
1273
- if (lo -> lo_state != Lo_bound ) {
1274
- err = - ENXIO ;
1275
- goto out_unlock ;
1276
- }
1277
- if ((unsigned int ) info -> lo_encrypt_key_size > LO_KEY_SIZE ) {
1278
- err = - EINVAL ;
1279
- goto out_unlock ;
1280
- }
1281
-
1282
- if (lo -> lo_offset != info -> lo_offset ||
1283
- lo -> lo_sizelimit != info -> lo_sizelimit ) {
1284
- size_changed = true;
1285
- sync_blockdev (lo -> lo_device );
1286
- kill_bdev (lo -> lo_device );
1287
- }
1288
1269
1289
- /* I/O need to be drained during transfer transition */
1290
- blk_mq_freeze_queue (lo -> lo_queue );
1291
-
1292
- if (size_changed && lo -> lo_device -> bd_inode -> i_mapping -> nrpages ) {
1293
- /* If any pages were dirtied after kill_bdev(), try again */
1294
- err = - EAGAIN ;
1295
- pr_warn ("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n" ,
1296
- __func__ , lo -> lo_number , lo -> lo_file_name ,
1297
- lo -> lo_device -> bd_inode -> i_mapping -> nrpages );
1298
- goto out_unfreeze ;
1299
- }
1270
+ if ((unsigned int ) info -> lo_encrypt_key_size > LO_KEY_SIZE )
1271
+ return - EINVAL ;
1300
1272
1301
1273
err = loop_release_xfer (lo );
1302
1274
if (err )
1303
- goto out_unfreeze ;
1275
+ return err ;
1304
1276
1305
1277
if (info -> lo_encrypt_type ) {
1306
1278
unsigned int type = info -> lo_encrypt_type ;
1307
1279
1308
- if (type >= MAX_LO_CRYPT ) {
1309
- err = - EINVAL ;
1310
- goto out_unfreeze ;
1311
- }
1280
+ if (type >= MAX_LO_CRYPT )
1281
+ return - EINVAL ;
1312
1282
xfer = xfer_funcs [type ];
1313
- if (xfer == NULL ) {
1314
- err = - EINVAL ;
1315
- goto out_unfreeze ;
1316
- }
1283
+ if (xfer == NULL )
1284
+ return - EINVAL ;
1317
1285
} else
1318
1286
xfer = NULL ;
1319
1287
1320
1288
err = loop_init_xfer (lo , xfer , info );
1321
1289
if (err )
1322
- goto out_unfreeze ;
1290
+ return err ;
1323
1291
1324
1292
lo -> lo_offset = info -> lo_offset ;
1325
1293
lo -> lo_sizelimit = info -> lo_sizelimit ;
@@ -1346,6 +1314,55 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1346
1314
lo -> lo_key_owner = uid ;
1347
1315
}
1348
1316
1317
+ return 0 ;
1318
+ }
1319
+
1320
+ static int
1321
+ loop_set_status (struct loop_device * lo , const struct loop_info64 * info )
1322
+ {
1323
+ int err ;
1324
+ struct block_device * bdev ;
1325
+ kuid_t uid = current_uid ();
1326
+ bool partscan = false;
1327
+ bool size_changed = false;
1328
+
1329
+ err = mutex_lock_killable (& loop_ctl_mutex );
1330
+ if (err )
1331
+ return err ;
1332
+ if (lo -> lo_encrypt_key_size &&
1333
+ !uid_eq (lo -> lo_key_owner , uid ) &&
1334
+ !capable (CAP_SYS_ADMIN )) {
1335
+ err = - EPERM ;
1336
+ goto out_unlock ;
1337
+ }
1338
+ if (lo -> lo_state != Lo_bound ) {
1339
+ err = - ENXIO ;
1340
+ goto out_unlock ;
1341
+ }
1342
+
1343
+ if (lo -> lo_offset != info -> lo_offset ||
1344
+ lo -> lo_sizelimit != info -> lo_sizelimit ) {
1345
+ size_changed = true;
1346
+ sync_blockdev (lo -> lo_device );
1347
+ kill_bdev (lo -> lo_device );
1348
+ }
1349
+
1350
+ /* I/O need to be drained during transfer transition */
1351
+ blk_mq_freeze_queue (lo -> lo_queue );
1352
+
1353
+ if (size_changed && lo -> lo_device -> bd_inode -> i_mapping -> nrpages ) {
1354
+ /* If any pages were dirtied after kill_bdev(), try again */
1355
+ err = - EAGAIN ;
1356
+ pr_warn ("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n" ,
1357
+ __func__ , lo -> lo_number , lo -> lo_file_name ,
1358
+ lo -> lo_device -> bd_inode -> i_mapping -> nrpages );
1359
+ goto out_unfreeze ;
1360
+ }
1361
+
1362
+ err = loop_set_status_from_info (lo , info );
1363
+ if (err )
1364
+ goto out_unfreeze ;
1365
+
1349
1366
if (size_changed ) {
1350
1367
loff_t new_size = get_size (lo -> lo_offset , lo -> lo_sizelimit ,
1351
1368
lo -> lo_backing_file );
0 commit comments