Skip to content

Commit ae1093b

Browse files
Mikulas Patockasnitm
authored andcommitted
dm snapshot: use mutex instead of rw_semaphore
The rw_semaphore is acquired for read only in two places, neither is performance-critical. So replace it with a mutex -- which is more efficient. Signed-off-by: Mikulas Patocka <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent 7690e25 commit ae1093b

File tree

1 file changed

+43
-41
lines changed

1 file changed

+43
-41
lines changed

drivers/md/dm-snap.c

Lines changed: 43 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ struct dm_exception_table {
4747
};
4848

4949
struct dm_snapshot {
50-
struct rw_semaphore lock;
50+
struct mutex lock;
5151

5252
struct dm_dev *origin;
5353
struct dm_dev *cow;
@@ -439,9 +439,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
439439
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
440440
continue;
441441

442-
down_read(&s->lock);
442+
mutex_lock(&s->lock);
443443
active = s->active;
444-
up_read(&s->lock);
444+
mutex_unlock(&s->lock);
445445

446446
if (active) {
447447
if (snap_src)
@@ -909,7 +909,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
909909
int r;
910910
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
911911

912-
down_write(&s->lock);
912+
mutex_lock(&s->lock);
913913

914914
/*
915915
* Process chunks (and associated exceptions) in reverse order
@@ -924,7 +924,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
924924
b = __release_queued_bios_after_merge(s);
925925

926926
out:
927-
up_write(&s->lock);
927+
mutex_unlock(&s->lock);
928928
if (b)
929929
flush_bios(b);
930930

@@ -983,9 +983,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
983983
if (linear_chunks < 0) {
984984
DMERR("Read error in exception store: "
985985
"shutting down merge");
986-
down_write(&s->lock);
986+
mutex_lock(&s->lock);
987987
s->merge_failed = 1;
988-
up_write(&s->lock);
988+
mutex_unlock(&s->lock);
989989
}
990990
goto shut;
991991
}
@@ -1026,10 +1026,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
10261026
previous_count = read_pending_exceptions_done_count();
10271027
}
10281028

1029-
down_write(&s->lock);
1029+
mutex_lock(&s->lock);
10301030
s->first_merging_chunk = old_chunk;
10311031
s->num_merging_chunks = linear_chunks;
1032-
up_write(&s->lock);
1032+
mutex_unlock(&s->lock);
10331033

10341034
/* Wait until writes to all 'linear_chunks' drain */
10351035
for (i = 0; i < linear_chunks; i++)
@@ -1071,10 +1071,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
10711071
return;
10721072

10731073
shut:
1074-
down_write(&s->lock);
1074+
mutex_lock(&s->lock);
10751075
s->merge_failed = 1;
10761076
b = __release_queued_bios_after_merge(s);
1077-
up_write(&s->lock);
1077+
mutex_unlock(&s->lock);
10781078
error_bios(b);
10791079

10801080
merge_shutdown(s);
@@ -1173,7 +1173,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
11731173
s->exception_start_sequence = 0;
11741174
s->exception_complete_sequence = 0;
11751175
INIT_LIST_HEAD(&s->out_of_order_list);
1176-
init_rwsem(&s->lock);
1176+
mutex_init(&s->lock);
11771177
INIT_LIST_HEAD(&s->list);
11781178
spin_lock_init(&s->pe_lock);
11791179
s->state_bits = 0;
@@ -1338,9 +1338,9 @@ static void snapshot_dtr(struct dm_target *ti)
13381338
/* Check whether exception handover must be cancelled */
13391339
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
13401340
if (snap_src && snap_dest && (s == snap_src)) {
1341-
down_write(&snap_dest->lock);
1341+
mutex_lock(&snap_dest->lock);
13421342
snap_dest->valid = 0;
1343-
up_write(&snap_dest->lock);
1343+
mutex_unlock(&snap_dest->lock);
13441344
DMERR("Cancelling snapshot handover.");
13451345
}
13461346
up_read(&_origins_lock);
@@ -1371,6 +1371,8 @@ static void snapshot_dtr(struct dm_target *ti)
13711371

13721372
dm_exception_store_destroy(s->store);
13731373

1374+
mutex_destroy(&s->lock);
1375+
13741376
dm_put_device(ti, s->cow);
13751377

13761378
dm_put_device(ti, s->origin);
@@ -1458,22 +1460,22 @@ static void pending_complete(void *context, int success)
14581460

14591461
if (!success) {
14601462
/* Read/write error - snapshot is unusable */
1461-
down_write(&s->lock);
1463+
mutex_lock(&s->lock);
14621464
__invalidate_snapshot(s, -EIO);
14631465
error = 1;
14641466
goto out;
14651467
}
14661468

14671469
e = alloc_completed_exception(GFP_NOIO);
14681470
if (!e) {
1469-
down_write(&s->lock);
1471+
mutex_lock(&s->lock);
14701472
__invalidate_snapshot(s, -ENOMEM);
14711473
error = 1;
14721474
goto out;
14731475
}
14741476
*e = pe->e;
14751477

1476-
down_write(&s->lock);
1478+
mutex_lock(&s->lock);
14771479
if (!s->valid) {
14781480
free_completed_exception(e);
14791481
error = 1;
@@ -1498,7 +1500,7 @@ static void pending_complete(void *context, int success)
14981500
full_bio->bi_end_io = pe->full_bio_end_io;
14991501
increment_pending_exceptions_done_count();
15001502

1501-
up_write(&s->lock);
1503+
mutex_unlock(&s->lock);
15021504

15031505
/* Submit any pending write bios */
15041506
if (error) {
@@ -1694,7 +1696,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
16941696

16951697
/* FIXME: should only take write lock if we need
16961698
* to copy an exception */
1697-
down_write(&s->lock);
1699+
mutex_lock(&s->lock);
16981700

16991701
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
17001702
bio_data_dir(bio) == WRITE)) {
@@ -1717,9 +1719,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
17171719
if (bio_data_dir(bio) == WRITE) {
17181720
pe = __lookup_pending_exception(s, chunk);
17191721
if (!pe) {
1720-
up_write(&s->lock);
1722+
mutex_unlock(&s->lock);
17211723
pe = alloc_pending_exception(s);
1722-
down_write(&s->lock);
1724+
mutex_lock(&s->lock);
17231725

17241726
if (!s->valid || s->snapshot_overflowed) {
17251727
free_pending_exception(pe);
@@ -1754,7 +1756,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
17541756
bio->bi_iter.bi_size ==
17551757
(s->store->chunk_size << SECTOR_SHIFT)) {
17561758
pe->started = 1;
1757-
up_write(&s->lock);
1759+
mutex_unlock(&s->lock);
17581760
start_full_bio(pe, bio);
17591761
goto out;
17601762
}
@@ -1764,7 +1766,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
17641766
if (!pe->started) {
17651767
/* this is protected by snap->lock */
17661768
pe->started = 1;
1767-
up_write(&s->lock);
1769+
mutex_unlock(&s->lock);
17681770
start_copy(pe);
17691771
goto out;
17701772
}
@@ -1774,7 +1776,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
17741776
}
17751777

17761778
out_unlock:
1777-
up_write(&s->lock);
1779+
mutex_unlock(&s->lock);
17781780
out:
17791781
return r;
17801782
}
@@ -1810,7 +1812,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
18101812

18111813
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
18121814

1813-
down_write(&s->lock);
1815+
mutex_lock(&s->lock);
18141816

18151817
/* Full merging snapshots are redirected to the origin */
18161818
if (!s->valid)
@@ -1841,12 +1843,12 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
18411843
bio_set_dev(bio, s->origin->bdev);
18421844

18431845
if (bio_data_dir(bio) == WRITE) {
1844-
up_write(&s->lock);
1846+
mutex_unlock(&s->lock);
18451847
return do_origin(s->origin, bio);
18461848
}
18471849

18481850
out_unlock:
1849-
up_write(&s->lock);
1851+
mutex_unlock(&s->lock);
18501852

18511853
return r;
18521854
}
@@ -1878,7 +1880,7 @@ static int snapshot_preresume(struct dm_target *ti)
18781880
down_read(&_origins_lock);
18791881
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
18801882
if (snap_src && snap_dest) {
1881-
down_read(&snap_src->lock);
1883+
mutex_lock(&snap_src->lock);
18821884
if (s == snap_src) {
18831885
DMERR("Unable to resume snapshot source until "
18841886
"handover completes.");
@@ -1888,7 +1890,7 @@ static int snapshot_preresume(struct dm_target *ti)
18881890
"source is suspended.");
18891891
r = -EINVAL;
18901892
}
1891-
up_read(&snap_src->lock);
1893+
mutex_unlock(&snap_src->lock);
18921894
}
18931895
up_read(&_origins_lock);
18941896

@@ -1934,11 +1936,11 @@ static void snapshot_resume(struct dm_target *ti)
19341936

19351937
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
19361938
if (snap_src && snap_dest) {
1937-
down_write(&snap_src->lock);
1938-
down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1939+
mutex_lock(&snap_src->lock);
1940+
mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
19391941
__handover_exceptions(snap_src, snap_dest);
1940-
up_write(&snap_dest->lock);
1941-
up_write(&snap_src->lock);
1942+
mutex_unlock(&snap_dest->lock);
1943+
mutex_unlock(&snap_src->lock);
19421944
}
19431945

19441946
up_read(&_origins_lock);
@@ -1953,9 +1955,9 @@ static void snapshot_resume(struct dm_target *ti)
19531955
/* Now we have correct chunk size, reregister */
19541956
reregister_snapshot(s);
19551957

1956-
down_write(&s->lock);
1958+
mutex_lock(&s->lock);
19571959
s->active = 1;
1958-
up_write(&s->lock);
1960+
mutex_unlock(&s->lock);
19591961
}
19601962

19611963
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -1995,7 +1997,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
19951997
switch (type) {
19961998
case STATUSTYPE_INFO:
19971999

1998-
down_write(&snap->lock);
2000+
mutex_lock(&snap->lock);
19992001

20002002
if (!snap->valid)
20012003
DMEMIT("Invalid");
@@ -2020,7 +2022,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
20202022
DMEMIT("Unknown");
20212023
}
20222024

2023-
up_write(&snap->lock);
2025+
mutex_unlock(&snap->lock);
20242026

20252027
break;
20262028

@@ -2086,7 +2088,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
20862088
if (dm_target_is_snapshot_merge(snap->ti))
20872089
continue;
20882090

2089-
down_write(&snap->lock);
2091+
mutex_lock(&snap->lock);
20902092

20912093
/* Only deal with valid and active snapshots */
20922094
if (!snap->valid || !snap->active)
@@ -2113,9 +2115,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
21132115

21142116
pe = __lookup_pending_exception(snap, chunk);
21152117
if (!pe) {
2116-
up_write(&snap->lock);
2118+
mutex_unlock(&snap->lock);
21172119
pe = alloc_pending_exception(snap);
2118-
down_write(&snap->lock);
2120+
mutex_lock(&snap->lock);
21192121

21202122
if (!snap->valid) {
21212123
free_pending_exception(pe);
@@ -2158,7 +2160,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
21582160
}
21592161

21602162
next_snapshot:
2161-
up_write(&snap->lock);
2163+
mutex_unlock(&snap->lock);
21622164

21632165
if (pe_to_start_now) {
21642166
start_copy(pe_to_start_now);

0 commit comments

Comments
 (0)