@@ -68,6 +68,7 @@ enum nullb_device_flags {
68
68
NULLB_DEV_FL_CACHE = 3 ,
69
69
};
70
70
71
+ #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
71
72
/*
72
73
* nullb_page is a page in memory for nullb devices.
73
74
*
@@ -82,10 +83,10 @@ enum nullb_device_flags {
82
83
*/
83
84
struct nullb_page {
84
85
struct page * page ;
85
- unsigned long bitmap ;
86
+ DECLARE_BITMAP ( bitmap , MAP_SZ ) ;
86
87
};
87
- #define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
88
- #define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
88
+ #define NULLB_PAGE_LOCK (MAP_SZ - 1)
89
+ #define NULLB_PAGE_FREE (MAP_SZ - 2)
89
90
90
91
struct nullb_device {
91
92
struct nullb * nullb ;
@@ -725,7 +726,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
725
726
if (!t_page -> page )
726
727
goto out_freepage ;
727
728
728
- t_page -> bitmap = 0 ;
729
+ memset ( t_page -> bitmap , 0 , sizeof ( t_page -> bitmap )) ;
729
730
return t_page ;
730
731
out_freepage :
731
732
kfree (t_page );
@@ -735,13 +736,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
735
736
736
737
static void null_free_page (struct nullb_page * t_page )
737
738
{
738
- __set_bit (NULLB_PAGE_FREE , & t_page -> bitmap );
739
- if (test_bit (NULLB_PAGE_LOCK , & t_page -> bitmap ))
739
+ __set_bit (NULLB_PAGE_FREE , t_page -> bitmap );
740
+ if (test_bit (NULLB_PAGE_LOCK , t_page -> bitmap ))
740
741
return ;
741
742
__free_page (t_page -> page );
742
743
kfree (t_page );
743
744
}
744
745
746
+ static bool null_page_empty (struct nullb_page * page )
747
+ {
748
+ int size = MAP_SZ - 2 ;
749
+
750
+ return find_first_bit (page -> bitmap , size ) == size ;
751
+ }
752
+
745
753
static void null_free_sector (struct nullb * nullb , sector_t sector ,
746
754
bool is_cache )
747
755
{
@@ -756,9 +764,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
756
764
757
765
t_page = radix_tree_lookup (root , idx );
758
766
if (t_page ) {
759
- __clear_bit (sector_bit , & t_page -> bitmap );
767
+ __clear_bit (sector_bit , t_page -> bitmap );
760
768
761
- if (! t_page -> bitmap ) {
769
+ if (null_page_empty ( t_page ) ) {
762
770
ret = radix_tree_delete_item (root , idx , t_page );
763
771
WARN_ON (ret != t_page );
764
772
null_free_page (ret );
@@ -829,7 +837,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
829
837
t_page = radix_tree_lookup (root , idx );
830
838
WARN_ON (t_page && t_page -> page -> index != idx );
831
839
832
- if (t_page && (for_write || test_bit (sector_bit , & t_page -> bitmap )))
840
+ if (t_page && (for_write || test_bit (sector_bit , t_page -> bitmap )))
833
841
return t_page ;
834
842
835
843
return NULL ;
@@ -892,10 +900,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
892
900
893
901
t_page = null_insert_page (nullb , idx << PAGE_SECTORS_SHIFT , true);
894
902
895
- __clear_bit (NULLB_PAGE_LOCK , & c_page -> bitmap );
896
- if (test_bit (NULLB_PAGE_FREE , & c_page -> bitmap )) {
903
+ __clear_bit (NULLB_PAGE_LOCK , c_page -> bitmap );
904
+ if (test_bit (NULLB_PAGE_FREE , c_page -> bitmap )) {
897
905
null_free_page (c_page );
898
- if (t_page && t_page -> bitmap == 0 ) {
906
+ if (t_page && null_page_empty ( t_page ) ) {
899
907
ret = radix_tree_delete_item (& nullb -> dev -> data ,
900
908
idx , t_page );
901
909
null_free_page (t_page );
@@ -911,11 +919,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
911
919
912
920
for (i = 0 ; i < PAGE_SECTORS ;
913
921
i += (nullb -> dev -> blocksize >> SECTOR_SHIFT )) {
914
- if (test_bit (i , & c_page -> bitmap )) {
922
+ if (test_bit (i , c_page -> bitmap )) {
915
923
offset = (i << SECTOR_SHIFT );
916
924
memcpy (dst + offset , src + offset ,
917
925
nullb -> dev -> blocksize );
918
- __set_bit (i , & t_page -> bitmap );
926
+ __set_bit (i , t_page -> bitmap );
919
927
}
920
928
}
921
929
@@ -952,10 +960,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
952
960
* We found the page which is being flushed to disk by other
953
961
* threads
954
962
*/
955
- if (test_bit (NULLB_PAGE_LOCK , & c_pages [i ]-> bitmap ))
963
+ if (test_bit (NULLB_PAGE_LOCK , c_pages [i ]-> bitmap ))
956
964
c_pages [i ] = NULL ;
957
965
else
958
- __set_bit (NULLB_PAGE_LOCK , & c_pages [i ]-> bitmap );
966
+ __set_bit (NULLB_PAGE_LOCK , c_pages [i ]-> bitmap );
959
967
}
960
968
961
969
one_round = 0 ;
@@ -1008,7 +1016,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
1008
1016
kunmap_atomic (dst );
1009
1017
kunmap_atomic (src );
1010
1018
1011
- __set_bit (sector & SECTOR_MASK , & t_page -> bitmap );
1019
+ __set_bit (sector & SECTOR_MASK , t_page -> bitmap );
1012
1020
1013
1021
if (is_fua )
1014
1022
null_free_sector (nullb , sector , true);
@@ -1922,10 +1930,6 @@ static int __init null_init(void)
1922
1930
struct nullb * nullb ;
1923
1931
struct nullb_device * dev ;
1924
1932
1925
- /* check for nullb_page.bitmap */
1926
- if (sizeof (unsigned long ) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT ))
1927
- return - EINVAL ;
1928
-
1929
1933
if (g_bs > PAGE_SIZE ) {
1930
1934
pr_warn ("null_blk: invalid block size\n" );
1931
1935
pr_warn ("null_blk: defaults block size to %lu\n" , PAGE_SIZE );
0 commit comments