Skip to content

Commit 49d1ec8

Browse files
Ming Leiaxboe
authored andcommitted
block: manage bio slab cache by xarray
Managing bio slab cache via xarray by using slab cache size as xarray index, and storing 'struct bio_slab' instance into xarray. So code is simplified a lot, meantime it becomes more readable than before. Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Ming Lei <[email protected]> Reviewed-by: Pavel Begunkov <[email protected]> Tested-by: Pavel Begunkov <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 1a23e06 commit 49d1ec8

File tree

1 file changed

+49
-67
lines changed

1 file changed

+49
-67
lines changed

block/bio.c

Lines changed: 49 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/highmem.h>
2020
#include <linux/sched/sysctl.h>
2121
#include <linux/blk-crypto.h>
22+
#include <linux/xarray.h>
2223

2324
#include <trace/events/block.h>
2425
#include "blk.h"
@@ -58,89 +59,80 @@ struct bio_slab {
5859
char name[8];
5960
};
6061
static DEFINE_MUTEX(bio_slab_lock);
61-
static struct bio_slab *bio_slabs;
62-
static unsigned int bio_slab_nr, bio_slab_max;
62+
static DEFINE_XARRAY(bio_slabs);
6363

64-
static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
64+
static struct bio_slab *create_bio_slab(unsigned int size)
6565
{
66-
unsigned int sz = sizeof(struct bio) + extra_size;
67-
struct kmem_cache *slab = NULL;
68-
struct bio_slab *bslab, *new_bio_slabs;
69-
unsigned int new_bio_slab_max;
70-
unsigned int i, entry = -1;
66+
struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
7167

72-
mutex_lock(&bio_slab_lock);
68+
if (!bslab)
69+
return NULL;
7370

74-
i = 0;
75-
while (i < bio_slab_nr) {
76-
bslab = &bio_slabs[i];
71+
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
72+
bslab->slab = kmem_cache_create(bslab->name, size,
73+
ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
74+
if (!bslab->slab)
75+
goto fail_alloc_slab;
7776

78-
if (!bslab->slab && entry == -1)
79-
entry = i;
80-
else if (bslab->slab_size == sz) {
81-
slab = bslab->slab;
82-
bslab->slab_ref++;
83-
break;
84-
}
85-
i++;
86-
}
77+
bslab->slab_ref = 1;
78+
bslab->slab_size = size;
8779

88-
if (slab)
89-
goto out_unlock;
90-
91-
if (bio_slab_nr == bio_slab_max && entry == -1) {
92-
new_bio_slab_max = bio_slab_max << 1;
93-
new_bio_slabs = krealloc(bio_slabs,
94-
new_bio_slab_max * sizeof(struct bio_slab),
95-
GFP_KERNEL);
96-
if (!new_bio_slabs)
97-
goto out_unlock;
98-
bio_slab_max = new_bio_slab_max;
99-
bio_slabs = new_bio_slabs;
100-
}
101-
if (entry == -1)
102-
entry = bio_slab_nr++;
80+
if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
81+
return bslab;
10382

104-
bslab = &bio_slabs[entry];
83+
kmem_cache_destroy(bslab->slab);
10584

106-
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
107-
slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
108-
SLAB_HWCACHE_ALIGN, NULL);
109-
if (!slab)
110-
goto out_unlock;
85+
fail_alloc_slab:
86+
kfree(bslab);
87+
return NULL;
88+
}
11189

112-
bslab->slab = slab;
113-
bslab->slab_ref = 1;
114-
bslab->slab_size = sz;
115-
out_unlock:
90+
static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
91+
{
92+
return bs->front_pad + sizeof(struct bio) +
93+
BIO_INLINE_VECS * sizeof(struct bio_vec);
94+
}
95+
96+
static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
97+
{
98+
unsigned int size = bs_bio_slab_size(bs);
99+
struct bio_slab *bslab;
100+
101+
mutex_lock(&bio_slab_lock);
102+
bslab = xa_load(&bio_slabs, size);
103+
if (bslab)
104+
bslab->slab_ref++;
105+
else
106+
bslab = create_bio_slab(size);
116107
mutex_unlock(&bio_slab_lock);
117-
return slab;
108+
109+
if (bslab)
110+
return bslab->slab;
111+
return NULL;
118112
}
119113

120114
static void bio_put_slab(struct bio_set *bs)
121115
{
122116
struct bio_slab *bslab = NULL;
123-
unsigned int i;
117+
unsigned int slab_size = bs_bio_slab_size(bs);
124118

125119
mutex_lock(&bio_slab_lock);
126120

127-
for (i = 0; i < bio_slab_nr; i++) {
128-
if (bs->bio_slab == bio_slabs[i].slab) {
129-
bslab = &bio_slabs[i];
130-
break;
131-
}
132-
}
133-
121+
bslab = xa_load(&bio_slabs, slab_size);
134122
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
135123
goto out;
136124

125+
WARN_ON_ONCE(bslab->slab != bs->bio_slab);
126+
137127
WARN_ON(!bslab->slab_ref);
138128

139129
if (--bslab->slab_ref)
140130
goto out;
141131

132+
xa_erase(&bio_slabs, slab_size);
133+
142134
kmem_cache_destroy(bslab->slab);
143-
bslab->slab = NULL;
135+
kfree(bslab);
144136

145137
out:
146138
mutex_unlock(&bio_slab_lock);
@@ -1570,15 +1562,13 @@ int bioset_init(struct bio_set *bs,
15701562
unsigned int front_pad,
15711563
int flags)
15721564
{
1573-
unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1574-
15751565
bs->front_pad = front_pad;
15761566

15771567
spin_lock_init(&bs->rescue_lock);
15781568
bio_list_init(&bs->rescue_list);
15791569
INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
15801570

1581-
bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1571+
bs->bio_slab = bio_find_or_create_slab(bs);
15821572
if (!bs->bio_slab)
15831573
return -ENOMEM;
15841574

@@ -1642,16 +1632,8 @@ static void __init biovec_init_slabs(void)
16421632

16431633
static int __init init_bio(void)
16441634
{
1645-
bio_slab_max = 2;
1646-
bio_slab_nr = 0;
1647-
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
1648-
GFP_KERNEL);
1649-
16501635
BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
16511636

1652-
if (!bio_slabs)
1653-
panic("bio: can't allocate bios\n");
1654-
16551637
bio_integrity_init();
16561638
biovec_init_slabs();
16571639

0 commit comments

Comments
 (0)