@@ -489,6 +489,13 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
489
489
* devices.
490
490
*/
491
491
struct new_mapping ;
492
+
493
+ struct pool_features {
494
+ unsigned zero_new_blocks :1 ;
495
+ unsigned discard_enabled :1 ;
496
+ unsigned discard_passdown :1 ;
497
+ };
498
+
492
499
struct pool {
493
500
struct list_head list ;
494
501
struct dm_target * ti ; /* Only set if a pool target is bound */
@@ -502,7 +509,7 @@ struct pool {
502
509
dm_block_t offset_mask ;
503
510
dm_block_t low_water_blocks ;
504
511
505
- unsigned zero_new_blocks : 1 ;
512
+ struct pool_features pf ;
506
513
unsigned low_water_triggered :1 ; /* A dm event has been sent */
507
514
unsigned no_free_space :1 ; /* A -ENOSPC warning has been issued */
508
515
@@ -543,7 +550,7 @@ struct pool_c {
543
550
struct dm_target_callbacks callbacks ;
544
551
545
552
dm_block_t low_water_blocks ;
546
- unsigned zero_new_blocks : 1 ;
553
+ struct pool_features pf ;
547
554
};
548
555
549
556
/*
@@ -1051,7 +1058,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1051
1058
* zeroing pre-existing data, we can issue the bio immediately.
1052
1059
* Otherwise we use kcopyd to zero the data first.
1053
1060
*/
1054
- if (!pool -> zero_new_blocks )
1061
+ if (!pool -> pf . zero_new_blocks )
1055
1062
process_prepared_mapping (m );
1056
1063
1057
1064
else if (io_overwrites_block (pool , bio )) {
@@ -1202,7 +1209,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1202
1209
*/
1203
1210
m = get_next_mapping (pool );
1204
1211
m -> tc = tc ;
1205
- m -> pass_discard = !lookup_result .shared ;
1212
+ m -> pass_discard = ( !lookup_result .shared ) & pool -> pf . discard_passdown ;
1206
1213
m -> virt_block = block ;
1207
1214
m -> data_block = lookup_result .block ;
1208
1215
m -> cell = cell ;
@@ -1617,7 +1624,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1617
1624
1618
1625
pool -> ti = ti ;
1619
1626
pool -> low_water_blocks = pt -> low_water_blocks ;
1620
- pool -> zero_new_blocks = pt -> zero_new_blocks ;
1627
+ pool -> pf = pt -> pf ;
1621
1628
1622
1629
return 0 ;
1623
1630
}
@@ -1631,6 +1638,14 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1631
1638
/*----------------------------------------------------------------
1632
1639
* Pool creation
1633
1640
*--------------------------------------------------------------*/
1641
+ /* Initialize pool features. */
1642
+ static void pool_features_init (struct pool_features * pf )
1643
+ {
1644
+ pf -> zero_new_blocks = 1 ;
1645
+ pf -> discard_enabled = 1 ;
1646
+ pf -> discard_passdown = 1 ;
1647
+ }
1648
+
1634
1649
static void __pool_destroy (struct pool * pool )
1635
1650
{
1636
1651
__pool_table_remove (pool );
@@ -1678,7 +1693,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1678
1693
pool -> block_shift = ffs (block_size ) - 1 ;
1679
1694
pool -> offset_mask = block_size - 1 ;
1680
1695
pool -> low_water_blocks = 0 ;
1681
- pool -> zero_new_blocks = 1 ;
1696
+ pool_features_init ( & pool -> pf ) ;
1682
1697
pool -> prison = prison_create (PRISON_CELLS );
1683
1698
if (!pool -> prison ) {
1684
1699
* error = "Error creating pool's bio prison" ;
@@ -1775,7 +1790,8 @@ static void __pool_dec(struct pool *pool)
1775
1790
1776
1791
static struct pool * __pool_find (struct mapped_device * pool_md ,
1777
1792
struct block_device * metadata_dev ,
1778
- unsigned long block_size , char * * error )
1793
+ unsigned long block_size , char * * error ,
1794
+ int * created )
1779
1795
{
1780
1796
struct pool * pool = __pool_table_lookup_metadata_dev (metadata_dev );
1781
1797
@@ -1791,8 +1807,10 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
1791
1807
return ERR_PTR (- EINVAL );
1792
1808
__pool_inc (pool );
1793
1809
1794
- } else
1810
+ } else {
1795
1811
pool = pool_create (pool_md , metadata_dev , block_size , error );
1812
+ * created = 1 ;
1813
+ }
1796
1814
}
1797
1815
1798
1816
return pool ;
@@ -1816,10 +1834,6 @@ static void pool_dtr(struct dm_target *ti)
1816
1834
mutex_unlock (& dm_thin_pool_table .mutex );
1817
1835
}
1818
1836
1819
- struct pool_features {
1820
- unsigned zero_new_blocks :1 ;
1821
- };
1822
-
1823
1837
static int parse_pool_features (struct dm_arg_set * as , struct pool_features * pf ,
1824
1838
struct dm_target * ti )
1825
1839
{
@@ -1828,7 +1842,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1828
1842
const char * arg_name ;
1829
1843
1830
1844
static struct dm_arg _args [] = {
1831
- {0 , 1 , "Invalid number of pool feature arguments" },
1845
+ {0 , 3 , "Invalid number of pool feature arguments" },
1832
1846
};
1833
1847
1834
1848
/*
@@ -1848,6 +1862,12 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1848
1862
if (!strcasecmp (arg_name , "skip_block_zeroing" )) {
1849
1863
pf -> zero_new_blocks = 0 ;
1850
1864
continue ;
1865
+ } else if (!strcasecmp (arg_name , "ignore_discard" )) {
1866
+ pf -> discard_enabled = 0 ;
1867
+ continue ;
1868
+ } else if (!strcasecmp (arg_name , "no_discard_passdown" )) {
1869
+ pf -> discard_passdown = 0 ;
1870
+ continue ;
1851
1871
}
1852
1872
1853
1873
ti -> error = "Unrecognised pool feature requested" ;
@@ -1865,10 +1885,12 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1865
1885
*
1866
1886
* Optional feature arguments are:
1867
1887
* skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1888
+ * ignore_discard: disable discard
1889
+ * no_discard_passdown: don't pass discards down to the data device
1868
1890
*/
1869
1891
static int pool_ctr (struct dm_target * ti , unsigned argc , char * * argv )
1870
1892
{
1871
- int r ;
1893
+ int r , pool_created = 0 ;
1872
1894
struct pool_c * pt ;
1873
1895
struct pool * pool ;
1874
1896
struct pool_features pf ;
@@ -1928,8 +1950,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1928
1950
/*
1929
1951
* Set default pool features.
1930
1952
*/
1931
- memset (& pf , 0 , sizeof (pf ));
1932
- pf .zero_new_blocks = 1 ;
1953
+ pool_features_init (& pf );
1933
1954
1934
1955
dm_consume_args (& as , 4 );
1935
1956
r = parse_pool_features (& as , & pf , ti );
@@ -1943,21 +1964,58 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1943
1964
}
1944
1965
1945
1966
pool = __pool_find (dm_table_get_md (ti -> table ), metadata_dev -> bdev ,
1946
- block_size , & ti -> error );
1967
+ block_size , & ti -> error , & pool_created );
1947
1968
if (IS_ERR (pool )) {
1948
1969
r = PTR_ERR (pool );
1949
1970
goto out_free_pt ;
1950
1971
}
1951
1972
1973
+ /*
1974
+ * 'pool_created' reflects whether this is the first table load.
1975
+ * Top level discard support is not allowed to be changed after
1976
+ * initial load. This would require a pool reload to trigger thin
1977
+ * device changes.
1978
+ */
1979
+ if (!pool_created && pf .discard_enabled != pool -> pf .discard_enabled ) {
1980
+ ti -> error = "Discard support cannot be disabled once enabled" ;
1981
+ r = - EINVAL ;
1982
+ goto out_flags_changed ;
1983
+ }
1984
+
1985
+ /*
1986
+ * If discard_passdown was enabled verify that the data device
1987
+ * supports discards. Disable discard_passdown if not; otherwise
1988
+ * -EOPNOTSUPP will be returned.
1989
+ */
1990
+ if (pf .discard_passdown ) {
1991
+ struct request_queue * q = bdev_get_queue (data_dev -> bdev );
1992
+ if (!q || !blk_queue_discard (q )) {
1993
+ DMWARN ("Discard unsupported by data device: Disabling discard passdown." );
1994
+ pf .discard_passdown = 0 ;
1995
+ }
1996
+ }
1997
+
1952
1998
pt -> pool = pool ;
1953
1999
pt -> ti = ti ;
1954
2000
pt -> metadata_dev = metadata_dev ;
1955
2001
pt -> data_dev = data_dev ;
1956
2002
pt -> low_water_blocks = low_water_blocks ;
1957
- pt -> zero_new_blocks = pf . zero_new_blocks ;
2003
+ pt -> pf = pf ;
1958
2004
ti -> num_flush_requests = 1 ;
1959
- ti -> num_discard_requests = 1 ;
1960
- ti -> discards_supported = 1 ;
2005
+ /*
2006
+ * Only need to enable discards if the pool should pass
2007
+ * them down to the data device. The thin device's discard
2008
+ * processing will cause mappings to be removed from the btree.
2009
+ */
2010
+ if (pf .discard_enabled && pf .discard_passdown ) {
2011
+ ti -> num_discard_requests = 1 ;
2012
+ /*
2013
+ * Setting 'discards_supported' circumvents the normal
2014
+ * stacking of discard limits (this keeps the pool and
2015
+ * thin devices' discard limits consistent).
2016
+ */
2017
+ ti -> discards_supported = 1 ;
2018
+ }
1961
2019
ti -> private = pt ;
1962
2020
1963
2021
pt -> callbacks .congested_fn = pool_is_congested ;
@@ -1967,6 +2025,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1967
2025
1968
2026
return 0 ;
1969
2027
2028
+ out_flags_changed :
2029
+ __pool_dec (pool );
1970
2030
out_free_pt :
1971
2031
kfree (pt );
1972
2032
out :
@@ -2255,7 +2315,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2255
2315
static int pool_status (struct dm_target * ti , status_type_t type ,
2256
2316
char * result , unsigned maxlen )
2257
2317
{
2258
- int r ;
2318
+ int r , count ;
2259
2319
unsigned sz = 0 ;
2260
2320
uint64_t transaction_id ;
2261
2321
dm_block_t nr_free_blocks_data ;
@@ -2318,10 +2378,19 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2318
2378
(unsigned long )pool -> sectors_per_block ,
2319
2379
(unsigned long long )pt -> low_water_blocks );
2320
2380
2321
- DMEMIT ("%u " , !pool -> zero_new_blocks );
2381
+ count = !pool -> pf .zero_new_blocks + !pool -> pf .discard_enabled +
2382
+ !pool -> pf .discard_passdown ;
2383
+ DMEMIT ("%u " , count );
2322
2384
2323
- if (!pool -> zero_new_blocks )
2385
+ if (!pool -> pf . zero_new_blocks )
2324
2386
DMEMIT ("skip_block_zeroing " );
2387
+
2388
+ if (!pool -> pf .discard_enabled )
2389
+ DMEMIT ("ignore_discard " );
2390
+
2391
+ if (!pool -> pf .discard_passdown )
2392
+ DMEMIT ("no_discard_passdown " );
2393
+
2325
2394
break ;
2326
2395
}
2327
2396
@@ -2352,13 +2421,17 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2352
2421
2353
2422
static void set_discard_limits (struct pool * pool , struct queue_limits * limits )
2354
2423
{
2424
+ /*
2425
+ * FIXME: these limits may be incompatible with the pool's data device
2426
+ */
2355
2427
limits -> max_discard_sectors = pool -> sectors_per_block ;
2356
2428
2357
2429
/*
2358
2430
* This is just a hint, and not enforced. We have to cope with
2359
2431
* bios that overlap 2 blocks.
2360
2432
*/
2361
2433
limits -> discard_granularity = pool -> sectors_per_block << SECTOR_SHIFT ;
2434
+ limits -> discard_zeroes_data = pool -> pf .zero_new_blocks ;
2362
2435
}
2363
2436
2364
2437
static void pool_io_hints (struct dm_target * ti , struct queue_limits * limits )
@@ -2368,14 +2441,15 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2368
2441
2369
2442
blk_limits_io_min (limits , 0 );
2370
2443
blk_limits_io_opt (limits , pool -> sectors_per_block << SECTOR_SHIFT );
2371
- set_discard_limits (pool , limits );
2444
+ if (pool -> pf .discard_enabled )
2445
+ set_discard_limits (pool , limits );
2372
2446
}
2373
2447
2374
2448
static struct target_type pool_target = {
2375
2449
.name = "thin-pool" ,
2376
2450
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2377
2451
DM_TARGET_IMMUTABLE ,
2378
- .version = {1 , 0 , 0 },
2452
+ .version = {1 , 1 , 0 },
2379
2453
.module = THIS_MODULE ,
2380
2454
.ctr = pool_ctr ,
2381
2455
.dtr = pool_dtr ,
@@ -2417,6 +2491,9 @@ static void thin_dtr(struct dm_target *ti)
2417
2491
* pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2418
2492
* dev_id: the internal device identifier
2419
2493
* origin_dev: a device external to the pool that should act as the origin
2494
+ *
2495
+ * If the pool device has discards disabled, they get disabled for the thin
2496
+ * device as well.
2420
2497
*/
2421
2498
static int thin_ctr (struct dm_target * ti , unsigned argc , char * * argv )
2422
2499
{
@@ -2485,8 +2562,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2485
2562
2486
2563
ti -> split_io = tc -> pool -> sectors_per_block ;
2487
2564
ti -> num_flush_requests = 1 ;
2488
- ti -> num_discard_requests = 1 ;
2489
- ti -> discards_supported = 1 ;
2565
+
2566
+ /* In case the pool supports discards, pass them on. */
2567
+ if (tc -> pool -> pf .discard_enabled ) {
2568
+ ti -> discards_supported = 1 ;
2569
+ ti -> num_discard_requests = 1 ;
2570
+ }
2490
2571
2491
2572
dm_put (pool_md );
2492
2573
0 commit comments