@@ -445,9 +445,76 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
445
445
WARN_ON_ONCE (!was_set );
446
446
}
447
447
448
+ void zram_page_end_io (struct bio * bio )
449
+ {
450
+ struct page * page = bio -> bi_io_vec [0 ].bv_page ;
451
+
452
+ page_endio (page , op_is_write (bio_op (bio )),
453
+ blk_status_to_errno (bio -> bi_status ));
454
+ bio_put (bio );
455
+ }
456
+
457
+ static int write_to_bdev (struct zram * zram , struct bio_vec * bvec ,
458
+ u32 index , struct bio * parent ,
459
+ unsigned long * pentry )
460
+ {
461
+ struct bio * bio ;
462
+ unsigned long entry ;
463
+
464
+ bio = bio_alloc (GFP_ATOMIC , 1 );
465
+ if (!bio )
466
+ return - ENOMEM ;
467
+
468
+ entry = get_entry_bdev (zram );
469
+ if (!entry ) {
470
+ bio_put (bio );
471
+ return - ENOSPC ;
472
+ }
473
+
474
+ bio -> bi_iter .bi_sector = entry * (PAGE_SIZE >> 9 );
475
+ bio -> bi_bdev = zram -> bdev ;
476
+ if (!bio_add_page (bio , bvec -> bv_page , bvec -> bv_len ,
477
+ bvec -> bv_offset )) {
478
+ bio_put (bio );
479
+ put_entry_bdev (zram , entry );
480
+ return - EIO ;
481
+ }
482
+
483
+ if (!parent ) {
484
+ bio -> bi_opf = REQ_OP_WRITE | REQ_SYNC ;
485
+ bio -> bi_end_io = zram_page_end_io ;
486
+ } else {
487
+ bio -> bi_opf = parent -> bi_opf ;
488
+ bio_chain (bio , parent );
489
+ }
490
+
491
+ submit_bio (bio );
492
+ * pentry = entry ;
493
+
494
+ return 0 ;
495
+ }
496
+
497
+ static void zram_wb_clear (struct zram * zram , u32 index )
498
+ {
499
+ unsigned long entry ;
500
+
501
+ zram_clear_flag (zram , index , ZRAM_WB );
502
+ entry = zram_get_element (zram , index );
503
+ zram_set_element (zram , index , 0 );
504
+ put_entry_bdev (zram , entry );
505
+ }
506
+
448
507
#else
449
508
static bool zram_wb_enabled (struct zram * zram ) { return false; }
450
509
static inline void reset_bdev (struct zram * zram ) {};
510
+ static int write_to_bdev (struct zram * zram , struct bio_vec * bvec ,
511
+ u32 index , struct bio * parent ,
512
+ unsigned long * pentry )
513
+
514
+ {
515
+ return - EIO ;
516
+ }
517
+ static void zram_wb_clear (struct zram * zram , u32 index ) {}
451
518
#endif
452
519
453
520
@@ -672,7 +739,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
672
739
*/
673
740
static void zram_free_page (struct zram * zram , size_t index )
674
741
{
675
- unsigned long handle = zram_get_handle (zram , index );
742
+ unsigned long handle ;
743
+
744
+ if (zram_wb_enabled (zram ) && zram_test_flag (zram , index , ZRAM_WB )) {
745
+ zram_wb_clear (zram , index );
746
+ atomic64_dec (& zram -> stats .pages_stored );
747
+ return ;
748
+ }
676
749
677
750
/*
678
751
* No memory is allocated for same element filled pages.
@@ -686,6 +759,7 @@ static void zram_free_page(struct zram *zram, size_t index)
686
759
return ;
687
760
}
688
761
762
+ handle = zram_get_handle (zram , index );
689
763
if (!handle )
690
764
return ;
691
765
@@ -770,7 +844,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
770
844
return ret ;
771
845
}
772
846
773
- static int __zram_bvec_write (struct zram * zram , struct bio_vec * bvec , u32 index )
847
+ static int __zram_bvec_write (struct zram * zram , struct bio_vec * bvec ,
848
+ u32 index , struct bio * bio )
774
849
{
775
850
int ret = 0 ;
776
851
unsigned long alloced_pages ;
@@ -781,6 +856,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
781
856
struct page * page = bvec -> bv_page ;
782
857
unsigned long element = 0 ;
783
858
enum zram_pageflags flags = 0 ;
859
+ bool allow_wb = true;
784
860
785
861
mem = kmap_atomic (page );
786
862
if (page_same_filled (mem , & element )) {
@@ -805,8 +881,20 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
805
881
return ret ;
806
882
}
807
883
808
- if (unlikely (comp_len > max_zpage_size ))
884
+ if (unlikely (comp_len > max_zpage_size )) {
885
+ if (zram_wb_enabled (zram ) && allow_wb ) {
886
+ zcomp_stream_put (zram -> comp );
887
+ ret = write_to_bdev (zram , bvec , index , bio , & element );
888
+ if (!ret ) {
889
+ flags = ZRAM_WB ;
890
+ ret = 1 ;
891
+ goto out ;
892
+ }
893
+ allow_wb = false;
894
+ goto compress_again ;
895
+ }
809
896
comp_len = PAGE_SIZE ;
897
+ }
810
898
811
899
/*
812
900
* handle allocation has 2 paths:
@@ -866,10 +954,11 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
866
954
*/
867
955
zram_slot_lock (zram , index );
868
956
zram_free_page (zram , index );
869
- if (flags == ZRAM_SAME ) {
870
- zram_set_flag (zram , index , ZRAM_SAME );
957
+
958
+ if (flags ) {
959
+ zram_set_flag (zram , index , flags );
871
960
zram_set_element (zram , index , element );
872
- } else {
961
+ } else {
873
962
zram_set_handle (zram , index , handle );
874
963
zram_set_obj_size (zram , index , comp_len );
875
964
}
@@ -881,7 +970,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
881
970
}
882
971
883
972
static int zram_bvec_write (struct zram * zram , struct bio_vec * bvec ,
884
- u32 index , int offset )
973
+ u32 index , int offset , struct bio * bio )
885
974
{
886
975
int ret ;
887
976
struct page * page = NULL ;
@@ -914,7 +1003,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
914
1003
vec .bv_offset = 0 ;
915
1004
}
916
1005
917
- ret = __zram_bvec_write (zram , & vec , index );
1006
+ ret = __zram_bvec_write (zram , & vec , index , bio );
918
1007
out :
919
1008
if (is_partial_io (bvec ))
920
1009
__free_page (page );
@@ -965,7 +1054,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
965
1054
* Returns 1 if IO request was successfully submitted.
966
1055
*/
967
1056
static int zram_bvec_rw (struct zram * zram , struct bio_vec * bvec , u32 index ,
968
- int offset , bool is_write )
1057
+ int offset , bool is_write , struct bio * bio )
969
1058
{
970
1059
unsigned long start_time = jiffies ;
971
1060
int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ ;
@@ -980,7 +1069,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
980
1069
flush_dcache_page (bvec -> bv_page );
981
1070
} else {
982
1071
atomic64_inc (& zram -> stats .num_writes );
983
- ret = zram_bvec_write (zram , bvec , index , offset );
1072
+ ret = zram_bvec_write (zram , bvec , index , offset , bio );
984
1073
}
985
1074
986
1075
generic_end_io_acct (rw_acct , & zram -> disk -> part0 , start_time );
@@ -1024,7 +1113,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
1024
1113
bv .bv_len = min_t (unsigned int , PAGE_SIZE - offset ,
1025
1114
unwritten );
1026
1115
if (zram_bvec_rw (zram , & bv , index , offset ,
1027
- op_is_write (bio_op (bio ))) < 0 )
1116
+ op_is_write (bio_op (bio )), bio ) < 0 )
1028
1117
goto out ;
1029
1118
1030
1119
bv .bv_offset += bv .bv_len ;
@@ -1098,7 +1187,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
1098
1187
bv .bv_len = PAGE_SIZE ;
1099
1188
bv .bv_offset = 0 ;
1100
1189
1101
- ret = zram_bvec_rw (zram , & bv , index , offset , is_write );
1190
+ ret = zram_bvec_rw (zram , & bv , index , offset , is_write , NULL );
1102
1191
out :
1103
1192
/*
1104
1193
* If I/O fails, just return error(ie, non-zero) without
0 commit comments