@@ -110,24 +110,20 @@ EXPORT_SYMBOL(invalidate_bdev);
110
110
int truncate_bdev_range (struct block_device * bdev , fmode_t mode ,
111
111
loff_t lstart , loff_t lend )
112
112
{
113
- struct block_device * claimed_bdev = NULL ;
114
- int err ;
115
-
116
113
/*
117
114
* If we don't hold exclusive handle for the device, upgrade to it
118
115
* while we discard the buffer cache to avoid discarding buffers
119
116
* under live filesystem.
120
117
*/
121
118
if (!(mode & FMODE_EXCL )) {
122
- claimed_bdev = bdev_whole (bdev );
123
- err = bd_prepare_to_claim (bdev , claimed_bdev ,
124
- truncate_bdev_range );
119
+ int err = bd_prepare_to_claim (bdev , truncate_bdev_range );
125
120
if (err )
126
121
return err ;
127
122
}
123
+
128
124
truncate_inode_pages_range (bdev -> bd_inode -> i_mapping , lstart , lend );
129
- if (claimed_bdev )
130
- bd_abort_claiming (bdev , claimed_bdev , truncate_bdev_range );
125
+ if (!( mode & FMODE_EXCL ) )
126
+ bd_abort_claiming (bdev , truncate_bdev_range );
131
127
return 0 ;
132
128
}
133
129
EXPORT_SYMBOL (truncate_bdev_range );
@@ -978,7 +974,6 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
978
974
/**
979
975
* bd_prepare_to_claim - claim a block device
980
976
* @bdev: block device of interest
981
- * @whole: the whole device containing @bdev, may equal @bdev
982
977
* @holder: holder trying to claim @bdev
983
978
*
984
979
* Claim @bdev. This function fails if @bdev is already claimed by another
@@ -988,9 +983,12 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
988
983
* RETURNS:
989
984
* 0 if @bdev can be claimed, -EBUSY otherwise.
990
985
*/
991
- int bd_prepare_to_claim (struct block_device * bdev , struct block_device * whole ,
992
- void * holder )
986
+ int bd_prepare_to_claim (struct block_device * bdev , void * holder )
993
987
{
988
+ struct block_device * whole = bdev_whole (bdev );
989
+
990
+ if (WARN_ON_ONCE (!holder ))
991
+ return - EINVAL ;
994
992
retry :
995
993
spin_lock (& bdev_lock );
996
994
/* if someone else claimed, fail */
@@ -1030,15 +1028,15 @@ static void bd_clear_claiming(struct block_device *whole, void *holder)
1030
1028
/**
1031
1029
* bd_finish_claiming - finish claiming of a block device
1032
1030
* @bdev: block device of interest
1033
- * @whole: whole block device
1034
1031
* @holder: holder that has claimed @bdev
1035
1032
*
1036
1033
* Finish exclusive open of a block device. Mark the device as exlusively
1037
1034
* open by the holder and wake up all waiters for exclusive open to finish.
1038
1035
*/
1039
- static void bd_finish_claiming (struct block_device * bdev ,
1040
- struct block_device * whole , void * holder )
1036
+ static void bd_finish_claiming (struct block_device * bdev , void * holder )
1041
1037
{
1038
+ struct block_device * whole = bdev_whole (bdev );
1039
+
1042
1040
spin_lock (& bdev_lock );
1043
1041
BUG_ON (!bd_may_claim (bdev , whole , holder ));
1044
1042
/*
@@ -1063,11 +1061,10 @@ static void bd_finish_claiming(struct block_device *bdev,
1063
1061
* also used when exclusive open is not actually desired and we just needed
1064
1062
* to block other exclusive openers for a while.
1065
1063
*/
1066
- void bd_abort_claiming (struct block_device * bdev , struct block_device * whole ,
1067
- void * holder )
1064
+ void bd_abort_claiming (struct block_device * bdev , void * holder )
1068
1065
{
1069
1066
spin_lock (& bdev_lock );
1070
- bd_clear_claiming (whole , holder );
1067
+ bd_clear_claiming (bdev_whole ( bdev ) , holder );
1071
1068
spin_unlock (& bdev_lock );
1072
1069
}
1073
1070
EXPORT_SYMBOL (bd_abort_claiming );
@@ -1487,7 +1484,6 @@ void blkdev_put_no_open(struct block_device *bdev)
1487
1484
*/
1488
1485
struct block_device * blkdev_get_by_dev (dev_t dev , fmode_t mode , void * holder )
1489
1486
{
1490
- struct block_device * claiming ;
1491
1487
bool unblock_events = true;
1492
1488
struct block_device * bdev ;
1493
1489
struct gendisk * disk ;
@@ -1510,15 +1506,9 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1510
1506
disk = bdev -> bd_disk ;
1511
1507
1512
1508
if (mode & FMODE_EXCL ) {
1513
- WARN_ON_ONCE (!holder );
1514
-
1515
- ret = - ENOMEM ;
1516
- claiming = bdget_disk (disk , 0 );
1517
- if (!claiming )
1518
- goto put_blkdev ;
1519
- ret = bd_prepare_to_claim (bdev , claiming , holder );
1509
+ ret = bd_prepare_to_claim (bdev , holder );
1520
1510
if (ret )
1521
- goto put_claiming ;
1511
+ goto put_blkdev ;
1522
1512
}
1523
1513
1524
1514
disk_block_events (disk );
@@ -1528,7 +1518,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1528
1518
if (ret )
1529
1519
goto abort_claiming ;
1530
1520
if (mode & FMODE_EXCL ) {
1531
- bd_finish_claiming (bdev , claiming , holder );
1521
+ bd_finish_claiming (bdev , holder );
1532
1522
1533
1523
/*
1534
1524
* Block event polling for write claims if requested. Any write
@@ -1547,18 +1537,13 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1547
1537
1548
1538
if (unblock_events )
1549
1539
disk_unblock_events (disk );
1550
- if (mode & FMODE_EXCL )
1551
- bdput (claiming );
1552
1540
return bdev ;
1553
1541
1554
1542
abort_claiming :
1555
1543
if (mode & FMODE_EXCL )
1556
- bd_abort_claiming (bdev , claiming , holder );
1544
+ bd_abort_claiming (bdev , holder );
1557
1545
mutex_unlock (& bdev -> bd_mutex );
1558
1546
disk_unblock_events (disk );
1559
- put_claiming :
1560
- if (mode & FMODE_EXCL )
1561
- bdput (claiming );
1562
1547
put_blkdev :
1563
1548
blkdev_put_no_open (bdev );
1564
1549
if (ret == - ERESTARTSYS )
0 commit comments