@@ -454,6 +454,95 @@ void zram_page_end_io(struct bio *bio)
454
454
bio_put (bio );
455
455
}
456
456
457
+ /*
458
+ * Returns 1 if the submission is successful.
459
+ */
460
+ static int read_from_bdev_async (struct zram * zram , struct bio_vec * bvec ,
461
+ unsigned long entry , struct bio * parent )
462
+ {
463
+ struct bio * bio ;
464
+
465
+ bio = bio_alloc (GFP_ATOMIC , 1 );
466
+ if (!bio )
467
+ return - ENOMEM ;
468
+
469
+ bio -> bi_iter .bi_sector = entry * (PAGE_SIZE >> 9 );
470
+ bio -> bi_bdev = zram -> bdev ;
471
+ if (!bio_add_page (bio , bvec -> bv_page , bvec -> bv_len , bvec -> bv_offset )) {
472
+ bio_put (bio );
473
+ return - EIO ;
474
+ }
475
+
476
+ if (!parent ) {
477
+ bio -> bi_opf = REQ_OP_READ ;
478
+ bio -> bi_end_io = zram_page_end_io ;
479
+ } else {
480
+ bio -> bi_opf = parent -> bi_opf ;
481
+ bio_chain (bio , parent );
482
+ }
483
+
484
+ submit_bio (bio );
485
+ return 1 ;
486
+ }
487
+
488
+ struct zram_work {
489
+ struct work_struct work ;
490
+ struct zram * zram ;
491
+ unsigned long entry ;
492
+ struct bio * bio ;
493
+ };
494
+
495
+ #if PAGE_SIZE != 4096
496
+ static void zram_sync_read (struct work_struct * work )
497
+ {
498
+ struct bio_vec bvec ;
499
+ struct zram_work * zw = container_of (work , struct zram_work , work );
500
+ struct zram * zram = zw -> zram ;
501
+ unsigned long entry = zw -> entry ;
502
+ struct bio * bio = zw -> bio ;
503
+
504
+ read_from_bdev_async (zram , & bvec , entry , bio );
505
+ }
506
+
507
+ /*
508
+ * Block layer want one ->make_request_fn to be active at a time
509
+ * so if we use chained IO with parent IO in same context,
510
+ * it's a deadlock. To avoid, it, it uses worker thread context.
511
+ */
512
+ static int read_from_bdev_sync (struct zram * zram , struct bio_vec * bvec ,
513
+ unsigned long entry , struct bio * bio )
514
+ {
515
+ struct zram_work work ;
516
+
517
+ work .zram = zram ;
518
+ work .entry = entry ;
519
+ work .bio = bio ;
520
+
521
+ INIT_WORK_ONSTACK (& work .work , zram_sync_read );
522
+ queue_work (system_unbound_wq , & work .work );
523
+ flush_work (& work .work );
524
+ destroy_work_on_stack (& work .work );
525
+
526
+ return 1 ;
527
+ }
528
+ #else
529
+ static int read_from_bdev_sync (struct zram * zram , struct bio_vec * bvec ,
530
+ unsigned long entry , struct bio * bio )
531
+ {
532
+ WARN_ON (1 );
533
+ return - EIO ;
534
+ }
535
+ #endif
536
+
537
+ static int read_from_bdev (struct zram * zram , struct bio_vec * bvec ,
538
+ unsigned long entry , struct bio * parent , bool sync )
539
+ {
540
+ if (sync )
541
+ return read_from_bdev_sync (zram , bvec , entry , parent );
542
+ else
543
+ return read_from_bdev_async (zram , bvec , entry , parent );
544
+ }
545
+
457
546
static int write_to_bdev (struct zram * zram , struct bio_vec * bvec ,
458
547
u32 index , struct bio * parent ,
459
548
unsigned long * pentry )
@@ -514,6 +603,12 @@ static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
514
603
{
515
604
return - EIO ;
516
605
}
606
+
607
+ static int read_from_bdev (struct zram * zram , struct bio_vec * bvec ,
608
+ unsigned long entry , struct bio * parent , bool sync )
609
+ {
610
+ return - EIO ;
611
+ }
517
612
static void zram_wb_clear (struct zram * zram , u32 index ) {}
518
613
#endif
519
614
@@ -773,13 +868,31 @@ static void zram_free_page(struct zram *zram, size_t index)
773
868
zram_set_obj_size (zram , index , 0 );
774
869
}
775
870
776
- static int __zram_bvec_read (struct zram * zram , struct page * page , u32 index )
871
+ static int __zram_bvec_read (struct zram * zram , struct page * page , u32 index ,
872
+ struct bio * bio , bool partial_io )
777
873
{
778
874
int ret ;
779
875
unsigned long handle ;
780
876
unsigned int size ;
781
877
void * src , * dst ;
782
878
879
+ if (zram_wb_enabled (zram )) {
880
+ zram_slot_lock (zram , index );
881
+ if (zram_test_flag (zram , index , ZRAM_WB )) {
882
+ struct bio_vec bvec ;
883
+
884
+ zram_slot_unlock (zram , index );
885
+
886
+ bvec .bv_page = page ;
887
+ bvec .bv_len = PAGE_SIZE ;
888
+ bvec .bv_offset = 0 ;
889
+ return read_from_bdev (zram , & bvec ,
890
+ zram_get_element (zram , index ),
891
+ bio , partial_io );
892
+ }
893
+ zram_slot_unlock (zram , index );
894
+ }
895
+
783
896
if (zram_same_page_read (zram , index , page , 0 , PAGE_SIZE ))
784
897
return 0 ;
785
898
@@ -812,7 +925,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index)
812
925
}
813
926
814
927
static int zram_bvec_read (struct zram * zram , struct bio_vec * bvec ,
815
- u32 index , int offset )
928
+ u32 index , int offset , struct bio * bio )
816
929
{
817
930
int ret ;
818
931
struct page * page ;
@@ -825,7 +938,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
825
938
return - ENOMEM ;
826
939
}
827
940
828
- ret = __zram_bvec_read (zram , page , index );
941
+ ret = __zram_bvec_read (zram , page , index , bio , is_partial_io ( bvec ) );
829
942
if (unlikely (ret ))
830
943
goto out ;
831
944
@@ -988,7 +1101,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
988
1101
if (!page )
989
1102
return - ENOMEM ;
990
1103
991
- ret = __zram_bvec_read (zram , page , index );
1104
+ ret = __zram_bvec_read (zram , page , index , bio , true );
992
1105
if (ret )
993
1106
goto out ;
994
1107
@@ -1065,7 +1178,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1065
1178
1066
1179
if (!is_write ) {
1067
1180
atomic64_inc (& zram -> stats .num_reads );
1068
- ret = zram_bvec_read (zram , bvec , index , offset );
1181
+ ret = zram_bvec_read (zram , bvec , index , offset , bio );
1069
1182
flush_dcache_page (bvec -> bv_page );
1070
1183
} else {
1071
1184
atomic64_inc (& zram -> stats .num_writes );
0 commit comments