Skip to content

Commit eb2aa48

Browse files
jthornberkergon
authored andcommitted
dm thin: prepare to support discard
This patch contains the ground work needed for dm-thin to support discard. - Adds endio function that replaces shared_read_endio. - Introduce an explicit 'quiesced' flag into the new_mapping structure. Before, this was implicitly indicated by m->list being empty. - The map_info->ptr remains constant for the duration of a bio's trip through the thin target. Make it easier to reason about it. Signed-off-by: Joe Thornber <[email protected]> Signed-off-by: Mike Snitzer <[email protected]> Signed-off-by: Alasdair G Kergon <[email protected]>
1 parent 6efd6e8 commit eb2aa48

File tree

1 file changed

+72
-53
lines changed

1 file changed

+72
-53
lines changed

drivers/md/dm-thin.c

Lines changed: 72 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ struct pool {
523523

524524
struct bio_list retry_on_resume_list;
525525

526-
struct deferred_set ds; /* FIXME: move to thin_c */
526+
struct deferred_set shared_read_ds;
527527

528528
struct new_mapping *next_mapping;
529529
mempool_t *mapping_pool;
@@ -618,6 +618,12 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
618618

619619
/*----------------------------------------------------------------*/
620620

621+
struct endio_hook {
622+
struct thin_c *tc;
623+
struct deferred_entry *shared_read_entry;
624+
struct new_mapping *overwrite_mapping;
625+
};
626+
621627
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
622628
{
623629
struct bio *bio;
@@ -628,7 +634,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
628634
bio_list_init(master);
629635

630636
while ((bio = bio_list_pop(&bios))) {
631-
if (dm_get_mapinfo(bio)->ptr == tc)
637+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
638+
if (h->tc == tc)
632639
bio_endio(bio, DM_ENDIO_REQUEUE);
633640
else
634641
bio_list_add(master, bio);
@@ -716,16 +723,11 @@ static void wake_worker(struct pool *pool)
716723
/*
717724
* Bio endio functions.
718725
*/
719-
struct endio_hook {
720-
struct thin_c *tc;
721-
bio_end_io_t *saved_bi_end_io;
722-
struct deferred_entry *entry;
723-
};
724-
725726
struct new_mapping {
726727
struct list_head list;
727728

728-
int prepared;
729+
unsigned quiesced:1;
730+
unsigned prepared:1;
729731

730732
struct thin_c *tc;
731733
dm_block_t virt_block;
@@ -747,7 +749,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
747749
{
748750
struct pool *pool = m->tc->pool;
749751

750-
if (list_empty(&m->list) && m->prepared) {
752+
if (m->quiesced && m->prepared) {
751753
list_add(&m->list, &pool->prepared_mappings);
752754
wake_worker(pool);
753755
}
@@ -770,7 +772,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
770772
static void overwrite_endio(struct bio *bio, int err)
771773
{
772774
unsigned long flags;
773-
struct new_mapping *m = dm_get_mapinfo(bio)->ptr;
775+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
776+
struct new_mapping *m = h->overwrite_mapping;
774777
struct pool *pool = m->tc->pool;
775778

776779
m->err = err;
@@ -781,31 +784,6 @@ static void overwrite_endio(struct bio *bio, int err)
781784
spin_unlock_irqrestore(&pool->lock, flags);
782785
}
783786

784-
static void shared_read_endio(struct bio *bio, int err)
785-
{
786-
struct list_head mappings;
787-
struct new_mapping *m, *tmp;
788-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
789-
unsigned long flags;
790-
struct pool *pool = h->tc->pool;
791-
792-
bio->bi_end_io = h->saved_bi_end_io;
793-
bio_endio(bio, err);
794-
795-
INIT_LIST_HEAD(&mappings);
796-
ds_dec(h->entry, &mappings);
797-
798-
spin_lock_irqsave(&pool->lock, flags);
799-
list_for_each_entry_safe(m, tmp, &mappings, list) {
800-
list_del(&m->list);
801-
INIT_LIST_HEAD(&m->list);
802-
__maybe_add_mapping(m);
803-
}
804-
spin_unlock_irqrestore(&pool->lock, flags);
805-
806-
mempool_free(h, pool->endio_hook_pool);
807-
}
808-
809787
/*----------------------------------------------------------------*/
810788

811789
/*
@@ -957,6 +935,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
957935
struct new_mapping *m = get_next_mapping(pool);
958936

959937
INIT_LIST_HEAD(&m->list);
938+
m->quiesced = 0;
960939
m->prepared = 0;
961940
m->tc = tc;
962941
m->virt_block = virt_block;
@@ -965,7 +944,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
965944
m->err = 0;
966945
m->bio = NULL;
967946

968-
ds_add_work(&pool->ds, &m->list);
947+
if (!ds_add_work(&pool->shared_read_ds, &m->list))
948+
m->quiesced = 1;
969949

970950
/*
971951
* IO to pool_dev remaps to the pool target's data_dev.
@@ -974,9 +954,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
974954
* bio immediately. Otherwise we use kcopyd to clone the data first.
975955
*/
976956
if (io_overwrites_block(pool, bio)) {
957+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
958+
h->overwrite_mapping = m;
977959
m->bio = bio;
978960
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
979-
dm_get_mapinfo(bio)->ptr = m;
980961
remap_and_issue(tc, bio, data_dest);
981962
} else {
982963
struct dm_io_region from, to;
@@ -1023,6 +1004,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
10231004
struct new_mapping *m = get_next_mapping(pool);
10241005

10251006
INIT_LIST_HEAD(&m->list);
1007+
m->quiesced = 1;
10261008
m->prepared = 0;
10271009
m->tc = tc;
10281010
m->virt_block = virt_block;
@@ -1040,9 +1022,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
10401022
process_prepared_mapping(m);
10411023

10421024
else if (io_overwrites_block(pool, bio)) {
1025+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1026+
h->overwrite_mapping = m;
10431027
m->bio = bio;
10441028
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1045-
dm_get_mapinfo(bio)->ptr = m;
10461029
remap_and_issue(tc, bio, data_block);
10471030

10481031
} else {
@@ -1129,7 +1112,8 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
11291112
*/
11301113
static void retry_on_resume(struct bio *bio)
11311114
{
1132-
struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
1115+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1116+
struct thin_c *tc = h->tc;
11331117
struct pool *pool = tc->pool;
11341118
unsigned long flags;
11351119

@@ -1195,13 +1179,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
11951179
if (bio_data_dir(bio) == WRITE)
11961180
break_sharing(tc, bio, block, &key, lookup_result, cell);
11971181
else {
1198-
struct endio_hook *h;
1199-
h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1182+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
12001183

1201-
h->tc = tc;
1202-
h->entry = ds_inc(&pool->ds);
1203-
save_and_set_endio(bio, &h->saved_bi_end_io, shared_read_endio);
1204-
dm_get_mapinfo(bio)->ptr = h;
1184+
h->shared_read_entry = ds_inc(&pool->shared_read_ds);
12051185

12061186
cell_release_singleton(cell, bio);
12071187
remap_and_issue(tc, bio, lookup_result->block);
@@ -1325,7 +1305,9 @@ static void process_deferred_bios(struct pool *pool)
13251305
spin_unlock_irqrestore(&pool->lock, flags);
13261306

13271307
while ((bio = bio_list_pop(&bios))) {
1328-
struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
1308+
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1309+
struct thin_c *tc = h->tc;
1310+
13291311
/*
13301312
* If we've got no free new_mapping structs, and processing
13311313
* this bio might require one, we pause until there are some
@@ -1408,6 +1390,18 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
14081390
wake_worker(pool);
14091391
}
14101392

1393+
static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1394+
{
1395+
struct pool *pool = tc->pool;
1396+
struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1397+
1398+
h->tc = tc;
1399+
h->shared_read_entry = NULL;
1400+
h->overwrite_mapping = NULL;
1401+
1402+
return h;
1403+
}
1404+
14111405
/*
14121406
* Non-blocking function called from the thin target's map function.
14131407
*/
@@ -1420,11 +1414,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
14201414
struct dm_thin_device *td = tc->td;
14211415
struct dm_thin_lookup_result result;
14221416

1423-
/*
1424-
* Save the thin context for easy access from the deferred bio later.
1425-
*/
1426-
map_context->ptr = tc;
1427-
1417+
map_context->ptr = thin_hook_bio(tc, bio);
14281418
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
14291419
thin_defer_bio(tc, bio);
14301420
return DM_MAPIO_SUBMITTED;
@@ -1604,7 +1594,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
16041594
pool->low_water_triggered = 0;
16051595
pool->no_free_space = 0;
16061596
bio_list_init(&pool->retry_on_resume_list);
1607-
ds_init(&pool->ds);
1597+
ds_init(&pool->shared_read_ds);
16081598

16091599
pool->next_mapping = NULL;
16101600
pool->mapping_pool =
@@ -2394,6 +2384,34 @@ static int thin_map(struct dm_target *ti, struct bio *bio,
23942384
return thin_bio_map(ti, bio, map_context);
23952385
}
23962386

2387+
static int thin_endio(struct dm_target *ti,
2388+
struct bio *bio, int err,
2389+
union map_info *map_context)
2390+
{
2391+
unsigned long flags;
2392+
struct endio_hook *h = map_context->ptr;
2393+
struct list_head work;
2394+
struct new_mapping *m, *tmp;
2395+
struct pool *pool = h->tc->pool;
2396+
2397+
if (h->shared_read_entry) {
2398+
INIT_LIST_HEAD(&work);
2399+
ds_dec(h->shared_read_entry, &work);
2400+
2401+
spin_lock_irqsave(&pool->lock, flags);
2402+
list_for_each_entry_safe(m, tmp, &work, list) {
2403+
list_del(&m->list);
2404+
m->quiesced = 1;
2405+
__maybe_add_mapping(m);
2406+
}
2407+
spin_unlock_irqrestore(&pool->lock, flags);
2408+
}
2409+
2410+
mempool_free(h, pool->endio_hook_pool);
2411+
2412+
return 0;
2413+
}
2414+
23972415
static void thin_postsuspend(struct dm_target *ti)
23982416
{
23992417
if (dm_noflush_suspending(ti))
@@ -2481,6 +2499,7 @@ static struct target_type thin_target = {
24812499
.ctr = thin_ctr,
24822500
.dtr = thin_dtr,
24832501
.map = thin_map,
2502+
.end_io = thin_endio,
24842503
.postsuspend = thin_postsuspend,
24852504
.status = thin_status,
24862505
.iterate_devices = thin_iterate_devices,

0 commit comments

Comments
 (0)