Skip to content

Commit b0af205

Browse files
committed
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm: detect lost queue dm: publish dm_vcalloc dm: publish dm_table_unplug_all dm: publish dm_get_mapinfo dm: export struct dm_dev dm crypt: avoid unnecessary wait when splitting bio dm crypt: tidy ctx pending dm crypt: fix async inc_pending dm crypt: move dec_pending on error into write_io_submit dm crypt: remove inc_pending from write_io_submit dm crypt: tidy write loop pending dm crypt: tidy crypt alloc dm crypt: tidy inc pending dm exception store: use chunk_t for_areas dm exception store: introduce area_location function dm raid1: kcopyd should stop on error if errors handled dm mpath: remove is_active from struct dm_path dm mpath: use more error codes Fixed up trivial conflict in drivers/md/dm-mpath.c manually.
2 parents 73f6aa4 + 0c2322e commit b0af205

File tree

9 files changed

+190
-118
lines changed

9 files changed

+190
-118
lines changed

drivers/md/dm-crypt.c

Lines changed: 66 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
333333
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
334334
ctx->sector = sector + cc->iv_offset;
335335
init_completion(&ctx->restart);
336-
atomic_set(&ctx->pending, 1);
337336
}
338337

339338
static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
408407
{
409408
int r;
410409

410+
atomic_set(&ctx->pending, 1);
411+
411412
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
412413
ctx->idx_out < ctx->bio_out->bi_vcnt) {
413414

@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
456457
/*
457458
* Generate a new unfragmented bio with the given size
458459
* This should never violate the device limitations
459-
* May return a smaller bio when running out of pages
460+
* May return a smaller bio when running out of pages, indicated by
461+
* *out_of_pages set to 1.
460462
*/
461-
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
463+
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464+
unsigned *out_of_pages)
462465
{
463466
struct crypt_config *cc = io->target->private;
464467
struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
472475
return NULL;
473476

474477
clone_init(io, clone);
478+
*out_of_pages = 0;
475479

476480
for (i = 0; i < nr_iovecs; i++) {
477481
page = mempool_alloc(cc->page_pool, gfp_mask);
478-
if (!page)
482+
if (!page) {
483+
*out_of_pages = 1;
479484
break;
485+
}
480486

481487
/*
482488
* if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
517523
}
518524
}
519525

526+
static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
527+
struct bio *bio, sector_t sector)
528+
{
529+
struct crypt_config *cc = ti->private;
530+
struct dm_crypt_io *io;
531+
532+
io = mempool_alloc(cc->io_pool, GFP_NOIO);
533+
io->target = ti;
534+
io->base_bio = bio;
535+
io->sector = sector;
536+
io->error = 0;
537+
atomic_set(&io->pending, 0);
538+
539+
return io;
540+
}
541+
542+
static void crypt_inc_pending(struct dm_crypt_io *io)
543+
{
544+
atomic_inc(&io->pending);
545+
}
546+
520547
/*
521548
* One of the bios was finished. Check for completion of
522549
* the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
591618
struct bio *base_bio = io->base_bio;
592619
struct bio *clone;
593620

594-
atomic_inc(&io->pending);
621+
crypt_inc_pending(io);
595622

596623
/*
597624
* The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
653680
crypt_free_buffer_pages(cc, clone);
654681
bio_put(clone);
655682
io->error = -EIO;
683+
crypt_dec_pending(io);
656684
return;
657685
}
658686

@@ -664,66 +692,67 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
664692

665693
if (async)
666694
kcryptd_queue_io(io);
667-
else {
668-
atomic_inc(&io->pending);
695+
else
669696
generic_make_request(clone);
670-
}
671697
}
672698

673-
static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
699+
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
674700
{
675701
struct crypt_config *cc = io->target->private;
676702
struct bio *clone;
703+
int crypt_finished;
704+
unsigned out_of_pages = 0;
677705
unsigned remaining = io->base_bio->bi_size;
678706
int r;
679707

708+
/*
709+
* Prevent io from disappearing until this function completes.
710+
*/
711+
crypt_inc_pending(io);
712+
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
713+
680714
/*
681715
* The allocated buffers can be smaller than the whole bio,
682716
* so repeat the whole process until all the data can be handled.
683717
*/
684718
while (remaining) {
685-
clone = crypt_alloc_buffer(io, remaining);
719+
clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
686720
if (unlikely(!clone)) {
687721
io->error = -ENOMEM;
688-
return;
722+
break;
689723
}
690724

691725
io->ctx.bio_out = clone;
692726
io->ctx.idx_out = 0;
693727

694728
remaining -= clone->bi_size;
695729

730+
crypt_inc_pending(io);
696731
r = crypt_convert(cc, &io->ctx);
732+
crypt_finished = atomic_dec_and_test(&io->ctx.pending);
697733

698-
if (atomic_dec_and_test(&io->ctx.pending)) {
699-
/* processed, no running async crypto */
734+
/* Encryption was already finished, submit io now */
735+
if (crypt_finished) {
700736
kcryptd_crypt_write_io_submit(io, r, 0);
701-
if (unlikely(r < 0))
702-
return;
703-
} else
704-
atomic_inc(&io->pending);
705737

706-
/* out of memory -> run queues */
707-
if (unlikely(remaining)) {
708-
/* wait for async crypto then reinitialize pending */
709-
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
710-
atomic_set(&io->ctx.pending, 1);
711-
congestion_wait(WRITE, HZ/100);
738+
/*
739+
* If there was an error, do not try next fragments.
740+
* For async, error is processed in async handler.
741+
*/
742+
if (unlikely(r < 0))
743+
break;
712744
}
713-
}
714-
}
715745

716-
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
717-
{
718-
struct crypt_config *cc = io->target->private;
719-
720-
/*
721-
* Prevent io from disappearing until this function completes.
722-
*/
723-
atomic_inc(&io->pending);
746+
/*
747+
* Out of memory -> run queues
748+
* But don't wait if split was due to the io size restriction
749+
*/
750+
if (unlikely(out_of_pages))
751+
congestion_wait(WRITE, HZ/100);
724752

725-
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
726-
kcryptd_crypt_write_convert_loop(io);
753+
if (unlikely(remaining))
754+
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
755+
}
727756

728757
crypt_dec_pending(io);
729758
}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
741770
struct crypt_config *cc = io->target->private;
742771
int r = 0;
743772

744-
atomic_inc(&io->pending);
773+
crypt_inc_pending(io);
745774

746775
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
747776
io->sector);
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
11081137
static int crypt_map(struct dm_target *ti, struct bio *bio,
11091138
union map_info *map_context)
11101139
{
1111-
struct crypt_config *cc = ti->private;
11121140
struct dm_crypt_io *io;
11131141

1114-
io = mempool_alloc(cc->io_pool, GFP_NOIO);
1115-
io->target = ti;
1116-
io->base_bio = bio;
1117-
io->sector = bio->bi_sector - ti->begin;
1118-
io->error = 0;
1119-
atomic_set(&io->pending, 0);
1142+
io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
11201143

11211144
if (bio_data_dir(io->base_bio) == READ)
11221145
kcryptd_queue_io(io);

drivers/md/dm-exception-store.c

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -108,12 +108,12 @@ struct pstore {
108108
* Used to keep track of which metadata area the data in
109109
* 'chunk' refers to.
110110
*/
111-
uint32_t current_area;
111+
chunk_t current_area;
112112

113113
/*
114114
* The next free chunk for an exception.
115115
*/
116-
uint32_t next_free;
116+
chunk_t next_free;
117117

118118
/*
119119
* The index of next free exception in the current
@@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
175175
/*
176176
* Read or write a chunk aligned and sized block of data from a device.
177177
*/
178-
static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
178+
static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
179179
{
180180
struct dm_io_region where = {
181181
.bdev = ps->snap->cow->bdev,
@@ -208,17 +208,24 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
208208
return req.result;
209209
}
210210

211+
/*
212+
* Convert a metadata area index to a chunk index.
213+
*/
214+
static chunk_t area_location(struct pstore *ps, chunk_t area)
215+
{
216+
return 1 + ((ps->exceptions_per_area + 1) * area);
217+
}
218+
211219
/*
212220
* Read or write a metadata area. Remembering to skip the first
213221
* chunk which holds the header.
214222
*/
215-
static int area_io(struct pstore *ps, uint32_t area, int rw)
223+
static int area_io(struct pstore *ps, chunk_t area, int rw)
216224
{
217225
int r;
218-
uint32_t chunk;
226+
chunk_t chunk;
219227

220-
/* convert a metadata area index to a chunk index */
221-
chunk = 1 + ((ps->exceptions_per_area + 1) * area);
228+
chunk = area_location(ps, area);
222229

223230
r = chunk_io(ps, chunk, rw, 0);
224231
if (r)
@@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
228235
return 0;
229236
}
230237

231-
static int zero_area(struct pstore *ps, uint32_t area)
238+
static int zero_area(struct pstore *ps, chunk_t area)
232239
{
233240
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
234241
return area_io(ps, area, WRITE);
@@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)
404411

405412
static int read_exceptions(struct pstore *ps)
406413
{
407-
uint32_t area;
414+
chunk_t area;
408415
int r, full = 1;
409416

410417
/*
@@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
517524
{
518525
struct pstore *ps = get_info(store);
519526
uint32_t stride;
527+
chunk_t next_free;
520528
sector_t size = get_dev_size(store->snap->cow->bdev);
521529

522530
/* Is there enough room ? */
@@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
530538
* into account the location of the metadata chunks.
531539
*/
532540
stride = (ps->exceptions_per_area + 1);
533-
if ((++ps->next_free % stride) == 1)
541+
next_free = ++ps->next_free;
542+
if (sector_div(next_free, stride) == 1)
534543
ps->next_free++;
535544

536545
atomic_inc(&ps->pending_count);

drivers/md/dm-ioctl.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
11311131
unsigned int count = 0;
11321132
struct list_head *tmp;
11331133
size_t len, needed;
1134-
struct dm_dev *dd;
1134+
struct dm_dev_internal *dd;
11351135
struct dm_target_deps *deps;
11361136

11371137
deps = get_result_buffer(param, param_size, &len);
@@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
11571157
deps->count = count;
11581158
count = 0;
11591159
list_for_each_entry (dd, dm_table_get_devices(table), list)
1160-
deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
1160+
deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
11611161

11621162
param->data_size = param->data_start + needed;
11631163
}

0 commit comments

Comments
 (0)