@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
250
250
struct list_head * tmp , * next ;
251
251
252
252
list_for_each_safe (tmp , next , devices ) {
253
- struct dm_dev * dd = list_entry (tmp , struct dm_dev , list );
253
+ struct dm_dev_internal * dd =
254
+ list_entry (tmp , struct dm_dev_internal , list );
254
255
kfree (dd );
255
256
}
256
257
}
@@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev)
327
328
/*
328
329
* See if we've already got a device in the list.
329
330
*/
330
- static struct dm_dev * find_device (struct list_head * l , dev_t dev )
331
+ static struct dm_dev_internal * find_device (struct list_head * l , dev_t dev )
331
332
{
332
- struct dm_dev * dd ;
333
+ struct dm_dev_internal * dd ;
333
334
334
335
list_for_each_entry (dd , l , list )
335
- if (dd -> bdev -> bd_dev == dev )
336
+ if (dd -> dm_dev . bdev -> bd_dev == dev )
336
337
return dd ;
337
338
338
339
return NULL ;
@@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
341
342
/*
342
343
* Open a device so we can use it as a map destination.
343
344
*/
344
- static int open_dev (struct dm_dev * d , dev_t dev , struct mapped_device * md )
345
+ static int open_dev (struct dm_dev_internal * d , dev_t dev ,
346
+ struct mapped_device * md )
345
347
{
346
348
static char * _claim_ptr = "I belong to device-mapper" ;
347
349
struct block_device * bdev ;
348
350
349
351
int r ;
350
352
351
- BUG_ON (d -> bdev );
353
+ BUG_ON (d -> dm_dev . bdev );
352
354
353
- bdev = open_by_devnum (dev , d -> mode );
355
+ bdev = open_by_devnum (dev , d -> dm_dev . mode );
354
356
if (IS_ERR (bdev ))
355
357
return PTR_ERR (bdev );
356
358
r = bd_claim_by_disk (bdev , _claim_ptr , dm_disk (md ));
357
359
if (r )
358
360
blkdev_put (bdev );
359
361
else
360
- d -> bdev = bdev ;
362
+ d -> dm_dev . bdev = bdev ;
361
363
return r ;
362
364
}
363
365
364
366
/*
365
367
* Close a device that we've been using.
366
368
*/
367
- static void close_dev (struct dm_dev * d , struct mapped_device * md )
369
+ static void close_dev (struct dm_dev_internal * d , struct mapped_device * md )
368
370
{
369
- if (!d -> bdev )
371
+ if (!d -> dm_dev . bdev )
370
372
return ;
371
373
372
- bd_release_from_disk (d -> bdev , dm_disk (md ));
373
- blkdev_put (d -> bdev );
374
- d -> bdev = NULL ;
374
+ bd_release_from_disk (d -> dm_dev . bdev , dm_disk (md ));
375
+ blkdev_put (d -> dm_dev . bdev );
376
+ d -> dm_dev . bdev = NULL ;
375
377
}
376
378
377
379
/*
378
380
* If possible, this checks an area of a destination device is valid.
379
381
*/
380
- static int check_device_area (struct dm_dev * dd , sector_t start , sector_t len )
382
+ static int check_device_area (struct dm_dev_internal * dd , sector_t start ,
383
+ sector_t len )
381
384
{
382
- sector_t dev_size = dd -> bdev -> bd_inode -> i_size >> SECTOR_SHIFT ;
385
+ sector_t dev_size = dd -> dm_dev . bdev -> bd_inode -> i_size >> SECTOR_SHIFT ;
383
386
384
387
if (!dev_size )
385
388
return 1 ;
@@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
392
395
* careful to leave things as they were if we fail to reopen the
393
396
* device.
394
397
*/
395
- static int upgrade_mode (struct dm_dev * dd , int new_mode , struct mapped_device * md )
398
+ static int upgrade_mode (struct dm_dev_internal * dd , int new_mode ,
399
+ struct mapped_device * md )
396
400
{
397
401
int r ;
398
- struct dm_dev dd_copy ;
399
- dev_t dev = dd -> bdev -> bd_dev ;
402
+ struct dm_dev_internal dd_copy ;
403
+ dev_t dev = dd -> dm_dev . bdev -> bd_dev ;
400
404
401
405
dd_copy = * dd ;
402
406
403
- dd -> mode |= new_mode ;
404
- dd -> bdev = NULL ;
407
+ dd -> dm_dev . mode |= new_mode ;
408
+ dd -> dm_dev . bdev = NULL ;
405
409
r = open_dev (dd , dev , md );
406
410
if (!r )
407
411
close_dev (& dd_copy , md );
@@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
421
425
{
422
426
int r ;
423
427
dev_t uninitialized_var (dev );
424
- struct dm_dev * dd ;
428
+ struct dm_dev_internal * dd ;
425
429
unsigned int major , minor ;
426
430
427
431
BUG_ON (!t );
@@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
443
447
if (!dd )
444
448
return - ENOMEM ;
445
449
446
- dd -> mode = mode ;
447
- dd -> bdev = NULL ;
450
+ dd -> dm_dev . mode = mode ;
451
+ dd -> dm_dev . bdev = NULL ;
448
452
449
453
if ((r = open_dev (dd , dev , t -> md ))) {
450
454
kfree (dd );
451
455
return r ;
452
456
}
453
457
454
- format_dev_t (dd -> name , dev );
458
+ format_dev_t (dd -> dm_dev . name , dev );
455
459
456
460
atomic_set (& dd -> count , 0 );
457
461
list_add (& dd -> list , & t -> devices );
458
462
459
- } else if (dd -> mode != (mode | dd -> mode )) {
463
+ } else if (dd -> dm_dev . mode != (mode | dd -> dm_dev . mode )) {
460
464
r = upgrade_mode (dd , mode , t -> md );
461
465
if (r )
462
466
return r ;
@@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
465
469
466
470
if (!check_device_area (dd , start , len )) {
467
471
DMWARN ("device %s too small for target" , path );
468
- dm_put_device (ti , dd );
472
+ dm_put_device (ti , & dd -> dm_dev );
469
473
return - EINVAL ;
470
474
}
471
475
472
- * result = dd ;
476
+ * result = & dd -> dm_dev ;
473
477
474
478
return 0 ;
475
479
}
@@ -540,8 +544,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
540
544
/*
541
545
* Decrement a devices use count and remove it if necessary.
542
546
*/
543
- void dm_put_device (struct dm_target * ti , struct dm_dev * dd )
547
+ void dm_put_device (struct dm_target * ti , struct dm_dev * d )
544
548
{
549
+ struct dm_dev_internal * dd = container_of (d , struct dm_dev_internal ,
550
+ dm_dev );
551
+
545
552
if (atomic_dec_and_test (& dd -> count )) {
546
553
close_dev (dd , ti -> table -> md );
547
554
list_del (& dd -> list );
@@ -937,12 +944,12 @@ int dm_table_resume_targets(struct dm_table *t)
937
944
938
945
int dm_table_any_congested (struct dm_table * t , int bdi_bits )
939
946
{
940
- struct dm_dev * dd ;
947
+ struct dm_dev_internal * dd ;
941
948
struct list_head * devices = dm_table_get_devices (t );
942
949
int r = 0 ;
943
950
944
951
list_for_each_entry (dd , devices , list ) {
945
- struct request_queue * q = bdev_get_queue (dd -> bdev );
952
+ struct request_queue * q = bdev_get_queue (dd -> dm_dev . bdev );
946
953
r |= bdi_congested (& q -> backing_dev_info , bdi_bits );
947
954
}
948
955
@@ -951,11 +958,11 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
951
958
952
959
void dm_table_unplug_all (struct dm_table * t )
953
960
{
954
- struct dm_dev * dd ;
961
+ struct dm_dev_internal * dd ;
955
962
struct list_head * devices = dm_table_get_devices (t );
956
963
957
964
list_for_each_entry (dd , devices , list ) {
958
- struct request_queue * q = bdev_get_queue (dd -> bdev );
965
+ struct request_queue * q = bdev_get_queue (dd -> dm_dev . bdev );
959
966
960
967
blk_unplug (q );
961
968
}
0 commit comments