@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
333
333
ctx -> idx_out = bio_out ? bio_out -> bi_idx : 0 ;
334
334
ctx -> sector = sector + cc -> iv_offset ;
335
335
init_completion (& ctx -> restart );
336
- atomic_set (& ctx -> pending , 1 );
337
336
}
338
337
339
338
static int crypt_convert_block (struct crypt_config * cc ,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
408
407
{
409
408
int r ;
410
409
410
+ atomic_set (& ctx -> pending , 1 );
411
+
411
412
while (ctx -> idx_in < ctx -> bio_in -> bi_vcnt &&
412
413
ctx -> idx_out < ctx -> bio_out -> bi_vcnt ) {
413
414
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
456
457
/*
457
458
* Generate a new unfragmented bio with the given size
458
459
* This should never violate the device limitations
459
- * May return a smaller bio when running out of pages
460
+ * May return a smaller bio when running out of pages, indicated by
461
+ * *out_of_pages set to 1.
460
462
*/
461
- static struct bio * crypt_alloc_buffer (struct dm_crypt_io * io , unsigned size )
463
+ static struct bio * crypt_alloc_buffer (struct dm_crypt_io * io , unsigned size ,
464
+ unsigned * out_of_pages )
462
465
{
463
466
struct crypt_config * cc = io -> target -> private ;
464
467
struct bio * clone ;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
472
475
return NULL ;
473
476
474
477
clone_init (io , clone );
478
+ * out_of_pages = 0 ;
475
479
476
480
for (i = 0 ; i < nr_iovecs ; i ++ ) {
477
481
page = mempool_alloc (cc -> page_pool , gfp_mask );
478
- if (!page )
482
+ if (!page ) {
483
+ * out_of_pages = 1 ;
479
484
break ;
485
+ }
480
486
481
487
/*
482
488
* if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
517
523
}
518
524
}
519
525
526
+ static struct dm_crypt_io * crypt_io_alloc (struct dm_target * ti ,
527
+ struct bio * bio , sector_t sector )
528
+ {
529
+ struct crypt_config * cc = ti -> private ;
530
+ struct dm_crypt_io * io ;
531
+
532
+ io = mempool_alloc (cc -> io_pool , GFP_NOIO );
533
+ io -> target = ti ;
534
+ io -> base_bio = bio ;
535
+ io -> sector = sector ;
536
+ io -> error = 0 ;
537
+ atomic_set (& io -> pending , 0 );
538
+
539
+ return io ;
540
+ }
541
+
542
+ static void crypt_inc_pending (struct dm_crypt_io * io )
543
+ {
544
+ atomic_inc (& io -> pending );
545
+ }
546
+
520
547
/*
521
548
* One of the bios was finished. Check for completion of
522
549
* the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
591
618
struct bio * base_bio = io -> base_bio ;
592
619
struct bio * clone ;
593
620
594
- atomic_inc ( & io -> pending );
621
+ crypt_inc_pending ( io );
595
622
596
623
/*
597
624
* The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
653
680
crypt_free_buffer_pages (cc , clone );
654
681
bio_put (clone );
655
682
io -> error = - EIO ;
683
+ crypt_dec_pending (io );
656
684
return ;
657
685
}
658
686
@@ -664,66 +692,67 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
664
692
665
693
if (async )
666
694
kcryptd_queue_io (io );
667
- else {
668
- atomic_inc (& io -> pending );
695
+ else
669
696
generic_make_request (clone );
670
- }
671
697
}
672
698
673
- static void kcryptd_crypt_write_convert_loop (struct dm_crypt_io * io )
699
+ static void kcryptd_crypt_write_convert (struct dm_crypt_io * io )
674
700
{
675
701
struct crypt_config * cc = io -> target -> private ;
676
702
struct bio * clone ;
703
+ int crypt_finished ;
704
+ unsigned out_of_pages = 0 ;
677
705
unsigned remaining = io -> base_bio -> bi_size ;
678
706
int r ;
679
707
708
+ /*
709
+ * Prevent io from disappearing until this function completes.
710
+ */
711
+ crypt_inc_pending (io );
712
+ crypt_convert_init (cc , & io -> ctx , NULL , io -> base_bio , io -> sector );
713
+
680
714
/*
681
715
* The allocated buffers can be smaller than the whole bio,
682
716
* so repeat the whole process until all the data can be handled.
683
717
*/
684
718
while (remaining ) {
685
- clone = crypt_alloc_buffer (io , remaining );
719
+ clone = crypt_alloc_buffer (io , remaining , & out_of_pages );
686
720
if (unlikely (!clone )) {
687
721
io -> error = - ENOMEM ;
688
- return ;
722
+ break ;
689
723
}
690
724
691
725
io -> ctx .bio_out = clone ;
692
726
io -> ctx .idx_out = 0 ;
693
727
694
728
remaining -= clone -> bi_size ;
695
729
730
+ crypt_inc_pending (io );
696
731
r = crypt_convert (cc , & io -> ctx );
732
+ crypt_finished = atomic_dec_and_test (& io -> ctx .pending );
697
733
698
- if ( atomic_dec_and_test ( & io -> ctx . pending )) {
699
- /* processed, no running async crypto */
734
+ /* Encryption was already finished, submit io now */
735
+ if ( crypt_finished ) {
700
736
kcryptd_crypt_write_io_submit (io , r , 0 );
701
- if (unlikely (r < 0 ))
702
- return ;
703
- } else
704
- atomic_inc (& io -> pending );
705
737
706
- /* out of memory -> run queues */
707
- if ( unlikely ( remaining )) {
708
- /* wait for async crypto then reinitialize pending */
709
- wait_event ( cc -> writeq , ! atomic_read ( & io -> ctx . pending ));
710
- atomic_set ( & io -> ctx . pending , 1 );
711
- congestion_wait ( WRITE , HZ / 100 ) ;
738
+ /*
739
+ * If there was an error, do not try next fragments.
740
+ * For async, error is processed in async handler.
741
+ */
742
+ if ( unlikely ( r < 0 ))
743
+ break ;
712
744
}
713
- }
714
- }
715
745
716
- static void kcryptd_crypt_write_convert (struct dm_crypt_io * io )
717
- {
718
- struct crypt_config * cc = io -> target -> private ;
719
-
720
- /*
721
- * Prevent io from disappearing until this function completes.
722
- */
723
- atomic_inc (& io -> pending );
746
+ /*
747
+ * Out of memory -> run queues
748
+ * But don't wait if split was due to the io size restriction
749
+ */
750
+ if (unlikely (out_of_pages ))
751
+ congestion_wait (WRITE , HZ /100 );
724
752
725
- crypt_convert_init (cc , & io -> ctx , NULL , io -> base_bio , io -> sector );
726
- kcryptd_crypt_write_convert_loop (io );
753
+ if (unlikely (remaining ))
754
+ wait_event (cc -> writeq , !atomic_read (& io -> ctx .pending ));
755
+ }
727
756
728
757
crypt_dec_pending (io );
729
758
}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
741
770
struct crypt_config * cc = io -> target -> private ;
742
771
int r = 0 ;
743
772
744
- atomic_inc ( & io -> pending );
773
+ crypt_inc_pending ( io );
745
774
746
775
crypt_convert_init (cc , & io -> ctx , io -> base_bio , io -> base_bio ,
747
776
io -> sector );
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
1108
1137
static int crypt_map (struct dm_target * ti , struct bio * bio ,
1109
1138
union map_info * map_context )
1110
1139
{
1111
- struct crypt_config * cc = ti -> private ;
1112
1140
struct dm_crypt_io * io ;
1113
1141
1114
- io = mempool_alloc (cc -> io_pool , GFP_NOIO );
1115
- io -> target = ti ;
1116
- io -> base_bio = bio ;
1117
- io -> sector = bio -> bi_sector - ti -> begin ;
1118
- io -> error = 0 ;
1119
- atomic_set (& io -> pending , 0 );
1142
+ io = crypt_io_alloc (ti , bio , bio -> bi_sector - ti -> begin );
1120
1143
1121
1144
if (bio_data_dir (io -> base_bio ) == READ )
1122
1145
kcryptd_queue_io (io );
0 commit comments