@@ -74,6 +74,7 @@ enum nullb_device_flags {
74
74
NULLB_DEV_FL_CACHE = 3 ,
75
75
};
76
76
77
+ #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
77
78
/*
78
79
* nullb_page is a page in memory for nullb devices.
79
80
*
@@ -88,10 +89,10 @@ enum nullb_device_flags {
88
89
*/
89
90
struct nullb_page {
90
91
struct page * page ;
91
- unsigned long bitmap ;
92
+ DECLARE_BITMAP ( bitmap , MAP_SZ ) ;
92
93
};
93
- #define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
94
- #define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
94
+ #define NULLB_PAGE_LOCK (MAP_SZ - 1)
95
+ #define NULLB_PAGE_FREE (MAP_SZ - 2)
95
96
96
97
struct nullb_device {
97
98
struct nullb * nullb ;
@@ -733,7 +734,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
733
734
if (!t_page -> page )
734
735
goto out_freepage ;
735
736
736
- t_page -> bitmap = 0 ;
737
+ memset ( t_page -> bitmap , 0 , sizeof ( t_page -> bitmap )) ;
737
738
return t_page ;
738
739
out_freepage :
739
740
kfree (t_page );
@@ -743,13 +744,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
743
744
744
745
static void null_free_page (struct nullb_page * t_page )
745
746
{
746
- __set_bit (NULLB_PAGE_FREE , & t_page -> bitmap );
747
- if (test_bit (NULLB_PAGE_LOCK , & t_page -> bitmap ))
747
+ __set_bit (NULLB_PAGE_FREE , t_page -> bitmap );
748
+ if (test_bit (NULLB_PAGE_LOCK , t_page -> bitmap ))
748
749
return ;
749
750
__free_page (t_page -> page );
750
751
kfree (t_page );
751
752
}
752
753
754
+ static bool null_page_empty (struct nullb_page * page )
755
+ {
756
+ int size = MAP_SZ - 2 ;
757
+
758
+ return find_first_bit (page -> bitmap , size ) == size ;
759
+ }
760
+
753
761
static void null_free_sector (struct nullb * nullb , sector_t sector ,
754
762
bool is_cache )
755
763
{
@@ -764,9 +772,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
764
772
765
773
t_page = radix_tree_lookup (root , idx );
766
774
if (t_page ) {
767
- __clear_bit (sector_bit , & t_page -> bitmap );
775
+ __clear_bit (sector_bit , t_page -> bitmap );
768
776
769
- if (! t_page -> bitmap ) {
777
+ if (null_page_empty ( t_page ) ) {
770
778
ret = radix_tree_delete_item (root , idx , t_page );
771
779
WARN_ON (ret != t_page );
772
780
null_free_page (ret );
@@ -837,7 +845,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
837
845
t_page = radix_tree_lookup (root , idx );
838
846
WARN_ON (t_page && t_page -> page -> index != idx );
839
847
840
- if (t_page && (for_write || test_bit (sector_bit , & t_page -> bitmap )))
848
+ if (t_page && (for_write || test_bit (sector_bit , t_page -> bitmap )))
841
849
return t_page ;
842
850
843
851
return NULL ;
@@ -900,10 +908,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
900
908
901
909
t_page = null_insert_page (nullb , idx << PAGE_SECTORS_SHIFT , true);
902
910
903
- __clear_bit (NULLB_PAGE_LOCK , & c_page -> bitmap );
904
- if (test_bit (NULLB_PAGE_FREE , & c_page -> bitmap )) {
911
+ __clear_bit (NULLB_PAGE_LOCK , c_page -> bitmap );
912
+ if (test_bit (NULLB_PAGE_FREE , c_page -> bitmap )) {
905
913
null_free_page (c_page );
906
- if (t_page && t_page -> bitmap == 0 ) {
914
+ if (t_page && null_page_empty ( t_page ) ) {
907
915
ret = radix_tree_delete_item (& nullb -> dev -> data ,
908
916
idx , t_page );
909
917
null_free_page (t_page );
@@ -919,11 +927,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
919
927
920
928
for (i = 0 ; i < PAGE_SECTORS ;
921
929
i += (nullb -> dev -> blocksize >> SECTOR_SHIFT )) {
922
- if (test_bit (i , & c_page -> bitmap )) {
930
+ if (test_bit (i , c_page -> bitmap )) {
923
931
offset = (i << SECTOR_SHIFT );
924
932
memcpy (dst + offset , src + offset ,
925
933
nullb -> dev -> blocksize );
926
- __set_bit (i , & t_page -> bitmap );
934
+ __set_bit (i , t_page -> bitmap );
927
935
}
928
936
}
929
937
@@ -960,10 +968,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
960
968
* We found the page which is being flushed to disk by other
961
969
* threads
962
970
*/
963
- if (test_bit (NULLB_PAGE_LOCK , & c_pages [i ]-> bitmap ))
971
+ if (test_bit (NULLB_PAGE_LOCK , c_pages [i ]-> bitmap ))
964
972
c_pages [i ] = NULL ;
965
973
else
966
- __set_bit (NULLB_PAGE_LOCK , & c_pages [i ]-> bitmap );
974
+ __set_bit (NULLB_PAGE_LOCK , c_pages [i ]-> bitmap );
967
975
}
968
976
969
977
one_round = 0 ;
@@ -1016,7 +1024,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
1016
1024
kunmap_atomic (dst );
1017
1025
kunmap_atomic (src );
1018
1026
1019
- __set_bit (sector & SECTOR_MASK , & t_page -> bitmap );
1027
+ __set_bit (sector & SECTOR_MASK , t_page -> bitmap );
1020
1028
1021
1029
if (is_fua )
1022
1030
null_free_sector (nullb , sector , true);
@@ -1846,10 +1854,6 @@ static int __init null_init(void)
1846
1854
struct nullb * nullb ;
1847
1855
struct nullb_device * dev ;
1848
1856
1849
- /* check for nullb_page.bitmap */
1850
- if (sizeof (unsigned long ) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT ))
1851
- return - EINVAL ;
1852
-
1853
1857
if (g_bs > PAGE_SIZE ) {
1854
1858
pr_warn ("null_blk: invalid block size\n" );
1855
1859
pr_warn ("null_blk: defaults block size to %lu\n" , PAGE_SIZE );
0 commit comments