Skip to content

Commit 9583f1c

Browse files
committed
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD fixes from Shaohua Li: "This fixes several bugs, three of them are marked for stable: - an initialization issue fixed by Ming - a bio clone race issue fixed by me - an async tx flush issue fixed by Ofer - other cleanups" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: MD: fix warnning for UP case md/raid5: add thread_group worker async_tx_issue_pending_all md: simplify code with bio_io_error md/raid1: fix writebehind bio clone md: raid1-10: move raid1/raid10 common code into raid1-10.c md: raid1/raid10: initialize bvec table via bio_add_page() md: remove 'idx' from 'struct resync_pages'
2 parents 1731a47 + ed9b66d commit 9583f1c

File tree

6 files changed

+115
-126
lines changed

6 files changed

+115
-126
lines changed

drivers/md/md.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
22872287

22882288
static bool set_in_sync(struct mddev *mddev)
22892289
{
2290-
WARN_ON_ONCE(!spin_is_locked(&mddev->lock));
2290+
WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
22912291
if (!mddev->in_sync) {
22922292
mddev->sync_checkers++;
22932293
spin_unlock(&mddev->lock);

drivers/md/md.h

Lines changed: 0 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
731731
!bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
732732
mddev->queue->limits.max_write_zeroes_sectors = 0;
733733
}
734-
735-
/* Maximum size of each resync request */
736-
#define RESYNC_BLOCK_SIZE (64*1024)
737-
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
738-
739-
/* for managing resync I/O pages */
740-
struct resync_pages {
741-
unsigned idx; /* for get/put page from the pool */
742-
void *raid_bio;
743-
struct page *pages[RESYNC_PAGES];
744-
};
745-
746-
static inline int resync_alloc_pages(struct resync_pages *rp,
747-
gfp_t gfp_flags)
748-
{
749-
int i;
750-
751-
for (i = 0; i < RESYNC_PAGES; i++) {
752-
rp->pages[i] = alloc_page(gfp_flags);
753-
if (!rp->pages[i])
754-
goto out_free;
755-
}
756-
757-
return 0;
758-
759-
out_free:
760-
while (--i >= 0)
761-
put_page(rp->pages[i]);
762-
return -ENOMEM;
763-
}
764-
765-
static inline void resync_free_pages(struct resync_pages *rp)
766-
{
767-
int i;
768-
769-
for (i = 0; i < RESYNC_PAGES; i++)
770-
put_page(rp->pages[i]);
771-
}
772-
773-
static inline void resync_get_all_pages(struct resync_pages *rp)
774-
{
775-
int i;
776-
777-
for (i = 0; i < RESYNC_PAGES; i++)
778-
get_page(rp->pages[i]);
779-
}
780-
781-
static inline struct page *resync_fetch_page(struct resync_pages *rp,
782-
unsigned idx)
783-
{
784-
if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
785-
return NULL;
786-
return rp->pages[idx];
787-
}
788734
#endif /* _MD_MD_H */

drivers/md/raid1-10.c

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
/* Maximum size of each resync request */
2+
#define RESYNC_BLOCK_SIZE (64*1024)
3+
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
4+
5+
/* for managing resync I/O pages */
6+
struct resync_pages {
7+
void *raid_bio;
8+
struct page *pages[RESYNC_PAGES];
9+
};
10+
11+
static inline int resync_alloc_pages(struct resync_pages *rp,
12+
gfp_t gfp_flags)
13+
{
14+
int i;
15+
16+
for (i = 0; i < RESYNC_PAGES; i++) {
17+
rp->pages[i] = alloc_page(gfp_flags);
18+
if (!rp->pages[i])
19+
goto out_free;
20+
}
21+
22+
return 0;
23+
24+
out_free:
25+
while (--i >= 0)
26+
put_page(rp->pages[i]);
27+
return -ENOMEM;
28+
}
29+
30+
static inline void resync_free_pages(struct resync_pages *rp)
31+
{
32+
int i;
33+
34+
for (i = 0; i < RESYNC_PAGES; i++)
35+
put_page(rp->pages[i]);
36+
}
37+
38+
static inline void resync_get_all_pages(struct resync_pages *rp)
39+
{
40+
int i;
41+
42+
for (i = 0; i < RESYNC_PAGES; i++)
43+
get_page(rp->pages[i]);
44+
}
45+
46+
static inline struct page *resync_fetch_page(struct resync_pages *rp,
47+
unsigned idx)
48+
{
49+
if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
50+
return NULL;
51+
return rp->pages[idx];
52+
}
53+
54+
/*
55+
* 'strct resync_pages' stores actual pages used for doing the resync
56+
* IO, and it is per-bio, so make .bi_private points to it.
57+
*/
58+
static inline struct resync_pages *get_resync_pages(struct bio *bio)
59+
{
60+
return bio->bi_private;
61+
}
62+
63+
/* generally called after bio_reset() for reseting bvec */
64+
static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
65+
int size)
66+
{
67+
int idx = 0;
68+
69+
/* initialize bvec table again */
70+
do {
71+
struct page *page = resync_fetch_page(rp, idx);
72+
int len = min_t(int, size, PAGE_SIZE);
73+
74+
/*
75+
* won't fail because the vec table is big
76+
* enough to hold all these pages
77+
*/
78+
bio_add_page(bio, page, len, 0);
79+
size -= len;
80+
} while (idx++ < RESYNC_PAGES && size > 0);
81+
}

drivers/md/raid1.c

Lines changed: 20 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
8181
#define raid1_log(md, fmt, args...) \
8282
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
8383

84-
/*
85-
* 'strct resync_pages' stores actual pages used for doing the resync
86-
* IO, and it is per-bio, so make .bi_private points to it.
87-
*/
88-
static inline struct resync_pages *get_resync_pages(struct bio *bio)
89-
{
90-
return bio->bi_private;
91-
}
84+
#include "raid1-10.c"
9285

9386
/*
9487
* for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
170163
resync_get_all_pages(rp);
171164
}
172165

173-
rp->idx = 0;
174166
rp->raid_bio = r1_bio;
175167
bio->bi_private = rp;
176168
}
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
492484
}
493485

494486
if (behind) {
495-
/* we release behind master bio when all write are done */
496-
if (r1_bio->behind_master_bio == bio)
497-
to_put = NULL;
498-
499487
if (test_bit(WriteMostly, &rdev->flags))
500488
atomic_dec(&r1_bio->behind_remaining);
501489

@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
802790
bio->bi_next = NULL;
803791
bio->bi_bdev = rdev->bdev;
804792
if (test_bit(Faulty, &rdev->flags)) {
805-
bio->bi_status = BLK_STS_IOERR;
806-
bio_endio(bio);
793+
bio_io_error(bio);
807794
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
808795
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
809796
/* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
10881075
wake_up(&conf->wait_barrier);
10891076
}
10901077

1091-
static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
1078+
static void alloc_behind_master_bio(struct r1bio *r1_bio,
10921079
struct bio *bio)
10931080
{
10941081
int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
10981085

10991086
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
11001087
if (!behind_bio)
1101-
goto fail;
1088+
return;
11021089

11031090
/* discard op, we don't support writezero/writesame yet */
1104-
if (!bio_has_data(bio))
1091+
if (!bio_has_data(bio)) {
1092+
behind_bio->bi_iter.bi_size = size;
11051093
goto skip_copy;
1094+
}
11061095

11071096
while (i < vcnt && size) {
11081097
struct page *page;
@@ -1123,14 +1112,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
11231112
r1_bio->behind_master_bio = behind_bio;;
11241113
set_bit(R1BIO_BehindIO, &r1_bio->state);
11251114

1126-
return behind_bio;
1115+
return;
11271116

11281117
free_pages:
11291118
pr_debug("%dB behind alloc failed, doing sync I/O\n",
11301119
bio->bi_iter.bi_size);
11311120
bio_free_pages(behind_bio);
1132-
fail:
1133-
return behind_bio;
1121+
bio_put(behind_bio);
11341122
}
11351123

11361124
struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14831471
(atomic_read(&bitmap->behind_writes)
14841472
< mddev->bitmap_info.max_write_behind) &&
14851473
!waitqueue_active(&bitmap->behind_wait)) {
1486-
mbio = alloc_behind_master_bio(r1_bio, bio);
1474+
alloc_behind_master_bio(r1_bio, bio);
14871475
}
14881476

14891477
bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14931481
first_clone = 0;
14941482
}
14951483

1496-
if (!mbio) {
1497-
if (r1_bio->behind_master_bio)
1498-
mbio = bio_clone_fast(r1_bio->behind_master_bio,
1499-
GFP_NOIO,
1500-
mddev->bio_set);
1501-
else
1502-
mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1503-
}
1484+
if (r1_bio->behind_master_bio)
1485+
mbio = bio_clone_fast(r1_bio->behind_master_bio,
1486+
GFP_NOIO, mddev->bio_set);
1487+
else
1488+
mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
15041489

15051490
if (r1_bio->behind_master_bio) {
15061491
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
20862071
/* Fix variable parts of all bios */
20872072
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
20882073
for (i = 0; i < conf->raid_disks * 2; i++) {
2089-
int j;
2090-
int size;
20912074
blk_status_t status;
2092-
struct bio_vec *bi;
20932075
struct bio *b = r1_bio->bios[i];
20942076
struct resync_pages *rp = get_resync_pages(b);
20952077
if (b->bi_end_io != end_sync_read)
@@ -2098,24 +2080,15 @@ static void process_checks(struct r1bio *r1_bio)
20982080
status = b->bi_status;
20992081
bio_reset(b);
21002082
b->bi_status = status;
2101-
b->bi_vcnt = vcnt;
2102-
b->bi_iter.bi_size = r1_bio->sectors << 9;
21032083
b->bi_iter.bi_sector = r1_bio->sector +
21042084
conf->mirrors[i].rdev->data_offset;
21052085
b->bi_bdev = conf->mirrors[i].rdev->bdev;
21062086
b->bi_end_io = end_sync_read;
21072087
rp->raid_bio = r1_bio;
21082088
b->bi_private = rp;
21092089

2110-
size = b->bi_iter.bi_size;
2111-
bio_for_each_segment_all(bi, b, j) {
2112-
bi->bv_offset = 0;
2113-
if (size > PAGE_SIZE)
2114-
bi->bv_len = PAGE_SIZE;
2115-
else
2116-
bi->bv_len = size;
2117-
size -= PAGE_SIZE;
2118-
}
2090+
/* initialize bvec table again */
2091+
md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
21192092
}
21202093
for (primary = 0; primary < conf->raid_disks * 2; primary++)
21212094
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
23662339
wbio = bio_clone_fast(r1_bio->behind_master_bio,
23672340
GFP_NOIO,
23682341
mddev->bio_set);
2369-
/* We really need a _all clone */
2370-
wbio->bi_iter = (struct bvec_iter){ 0 };
23712342
} else {
23722343
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
23732344
mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
26192590
int good_sectors = RESYNC_SECTORS;
26202591
int min_bad = 0; /* number of sectors that are bad in all devices */
26212592
int idx = sector_to_idx(sector_nr);
2593+
int page_idx = 0;
26222594

26232595
if (!conf->r1buf_pool)
26242596
if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
28462818
bio = r1_bio->bios[i];
28472819
rp = get_resync_pages(bio);
28482820
if (bio->bi_end_io) {
2849-
page = resync_fetch_page(rp, rp->idx++);
2821+
page = resync_fetch_page(rp, page_idx);
28502822

28512823
/*
28522824
* won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
28582830
nr_sectors += len>>9;
28592831
sector_nr += len>>9;
28602832
sync_blocks -= (len>>9);
2861-
} while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
2833+
} while (++page_idx < RESYNC_PAGES);
28622834

28632835
r1_bio->sectors = nr_sectors;
28642836

0 commit comments

Comments
 (0)