Skip to content

Commit 933f01d

Browse files
Milan Brozkergon
authored andcommitted
dm crypt: avoid unnecessary wait when splitting bio
Don't wait between submitting crypt requests for a bio unless we are short of memory. There are two situations when we must split an encrypted bio: 1) there are no free pages; 2) the new bio would violate underlying device restrictions (e.g. max hw segments). In case (2) we do not need to wait. Add output variable to crypt_alloc_buffer() to distinguish between these cases. Signed-off-by: Milan Broz <[email protected]> Signed-off-by: Alasdair G Kergon <[email protected]>
1 parent c808161 commit 933f01d

File tree

1 file changed

+18
-8
lines changed

1 file changed

+18
-8
lines changed

drivers/md/dm-crypt.c

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -457,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
457457
/*
458458
* Generate a new unfragmented bio with the given size
459459
* This should never violate the device limitations
460-
* May return a smaller bio when running out of pages
460+
* May return a smaller bio when running out of pages, indicated by
461+
* *out_of_pages set to 1.
461462
*/
462-
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
463+
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464+
unsigned *out_of_pages)
463465
{
464466
struct crypt_config *cc = io->target->private;
465467
struct bio *clone;
@@ -473,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
473475
return NULL;
474476

475477
clone_init(io, clone);
478+
*out_of_pages = 0;
476479

477480
for (i = 0; i < nr_iovecs; i++) {
478481
page = mempool_alloc(cc->page_pool, gfp_mask);
479-
if (!page)
482+
if (!page) {
483+
*out_of_pages = 1;
480484
break;
485+
}
481486

482487
/*
483488
* if additional pages cannot be allocated without waiting,
@@ -696,6 +701,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
696701
struct crypt_config *cc = io->target->private;
697702
struct bio *clone;
698703
int crypt_finished;
704+
unsigned out_of_pages = 0;
699705
unsigned remaining = io->base_bio->bi_size;
700706
int r;
701707

@@ -710,7 +716,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
710716
* so repeat the whole process until all the data can be handled.
711717
*/
712718
while (remaining) {
713-
clone = crypt_alloc_buffer(io, remaining);
719+
clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
714720
if (unlikely(!clone)) {
715721
io->error = -ENOMEM;
716722
break;
@@ -737,11 +743,15 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
737743
break;
738744
}
739745

740-
/* out of memory -> run queues */
741-
if (unlikely(remaining)) {
742-
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
746+
/*
747+
* Out of memory -> run queues
748+
* But don't wait if split was due to the io size restriction
749+
*/
750+
if (unlikely(out_of_pages))
743751
congestion_wait(WRITE, HZ/100);
744-
}
752+
753+
if (unlikely(remaining))
754+
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
745755
}
746756

747757
crypt_dec_pending(io);

0 commit comments

Comments
 (0)