Skip to content

Commit a649488

Browse files
Ming Leigregkh
authored andcommitted
block: null_blk: fix 'Invalid parameters' when loading module
[ Upstream commit 66231ad ] On ARM64, the default page size has been 64K on some distributions, and we should allow ARM64 people to play null_blk. This patch fixes the issue by extend page bitmap size for supporting other non-4KB PAGE_SIZE. Cc: Bart Van Assche <[email protected]> Cc: Shaohua Li <[email protected]> Cc: Kyungchan Koh <[email protected]>, Cc: weiping zhang <[email protected]> Cc: Yi Zhang <[email protected]> Reported-by: Yi Zhang <[email protected]> Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Sasha Levin <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent f0078d2 commit a649488

File tree

1 file changed

+25
-21
lines changed

1 file changed

+25
-21
lines changed

drivers/block/null_blk.c

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ enum nullb_device_flags {
6868
NULLB_DEV_FL_CACHE = 3,
6969
};
7070

71+
#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
7172
/*
7273
* nullb_page is a page in memory for nullb devices.
7374
*
@@ -82,10 +83,10 @@ enum nullb_device_flags {
8283
*/
8384
struct nullb_page {
8485
struct page *page;
85-
unsigned long bitmap;
86+
DECLARE_BITMAP(bitmap, MAP_SZ);
8687
};
87-
#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
88-
#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
88+
#define NULLB_PAGE_LOCK (MAP_SZ - 1)
89+
#define NULLB_PAGE_FREE (MAP_SZ - 2)
8990

9091
struct nullb_device {
9192
struct nullb *nullb;
@@ -725,7 +726,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
725726
if (!t_page->page)
726727
goto out_freepage;
727728

728-
t_page->bitmap = 0;
729+
memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
729730
return t_page;
730731
out_freepage:
731732
kfree(t_page);
@@ -735,13 +736,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
735736

736737
static void null_free_page(struct nullb_page *t_page)
737738
{
738-
__set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
739-
if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
739+
__set_bit(NULLB_PAGE_FREE, t_page->bitmap);
740+
if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
740741
return;
741742
__free_page(t_page->page);
742743
kfree(t_page);
743744
}
744745

746+
static bool null_page_empty(struct nullb_page *page)
747+
{
748+
int size = MAP_SZ - 2;
749+
750+
return find_first_bit(page->bitmap, size) == size;
751+
}
752+
745753
static void null_free_sector(struct nullb *nullb, sector_t sector,
746754
bool is_cache)
747755
{
@@ -756,9 +764,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
756764

757765
t_page = radix_tree_lookup(root, idx);
758766
if (t_page) {
759-
__clear_bit(sector_bit, &t_page->bitmap);
767+
__clear_bit(sector_bit, t_page->bitmap);
760768

761-
if (!t_page->bitmap) {
769+
if (null_page_empty(t_page)) {
762770
ret = radix_tree_delete_item(root, idx, t_page);
763771
WARN_ON(ret != t_page);
764772
null_free_page(ret);
@@ -829,7 +837,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
829837
t_page = radix_tree_lookup(root, idx);
830838
WARN_ON(t_page && t_page->page->index != idx);
831839

832-
if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
840+
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
833841
return t_page;
834842

835843
return NULL;
@@ -892,10 +900,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
892900

893901
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
894902

895-
__clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
896-
if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
903+
__clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
904+
if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
897905
null_free_page(c_page);
898-
if (t_page && t_page->bitmap == 0) {
906+
if (t_page && null_page_empty(t_page)) {
899907
ret = radix_tree_delete_item(&nullb->dev->data,
900908
idx, t_page);
901909
null_free_page(t_page);
@@ -911,11 +919,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
911919

912920
for (i = 0; i < PAGE_SECTORS;
913921
i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
914-
if (test_bit(i, &c_page->bitmap)) {
922+
if (test_bit(i, c_page->bitmap)) {
915923
offset = (i << SECTOR_SHIFT);
916924
memcpy(dst + offset, src + offset,
917925
nullb->dev->blocksize);
918-
__set_bit(i, &t_page->bitmap);
926+
__set_bit(i, t_page->bitmap);
919927
}
920928
}
921929

@@ -952,10 +960,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
952960
* We found the page which is being flushed to disk by other
953961
* threads
954962
*/
955-
if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
963+
if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
956964
c_pages[i] = NULL;
957965
else
958-
__set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
966+
__set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
959967
}
960968

961969
one_round = 0;
@@ -1008,7 +1016,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
10081016
kunmap_atomic(dst);
10091017
kunmap_atomic(src);
10101018

1011-
__set_bit(sector & SECTOR_MASK, &t_page->bitmap);
1019+
__set_bit(sector & SECTOR_MASK, t_page->bitmap);
10121020

10131021
if (is_fua)
10141022
null_free_sector(nullb, sector, true);
@@ -1922,10 +1930,6 @@ static int __init null_init(void)
19221930
struct nullb *nullb;
19231931
struct nullb_device *dev;
19241932

1925-
/* check for nullb_page.bitmap */
1926-
if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
1927-
return -EINVAL;
1928-
19291933
if (g_bs > PAGE_SIZE) {
19301934
pr_warn("null_blk: invalid block size\n");
19311935
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);

0 commit comments

Comments
 (0)