|
19 | 19 | #include <linux/highmem.h>
|
20 | 20 | #include <linux/sched/sysctl.h>
|
21 | 21 | #include <linux/blk-crypto.h>
|
| 22 | +#include <linux/xarray.h> |
22 | 23 |
|
23 | 24 | #include <trace/events/block.h>
|
24 | 25 | #include "blk.h"
|
@@ -58,89 +59,80 @@ struct bio_slab {
|
58 | 59 | char name[8];
|
59 | 60 | };
|
60 | 61 | static DEFINE_MUTEX(bio_slab_lock);
|
61 |
| -static struct bio_slab *bio_slabs; |
62 |
| -static unsigned int bio_slab_nr, bio_slab_max; |
| 62 | +static DEFINE_XARRAY(bio_slabs); |
63 | 63 |
|
64 |
| -static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) |
| 64 | +static struct bio_slab *create_bio_slab(unsigned int size) |
65 | 65 | {
|
66 |
| - unsigned int sz = sizeof(struct bio) + extra_size; |
67 |
| - struct kmem_cache *slab = NULL; |
68 |
| - struct bio_slab *bslab, *new_bio_slabs; |
69 |
| - unsigned int new_bio_slab_max; |
70 |
| - unsigned int i, entry = -1; |
| 66 | + struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); |
71 | 67 |
|
72 |
| - mutex_lock(&bio_slab_lock); |
| 68 | + if (!bslab) |
| 69 | + return NULL; |
73 | 70 |
|
74 |
| - i = 0; |
75 |
| - while (i < bio_slab_nr) { |
76 |
| - bslab = &bio_slabs[i]; |
| 71 | + snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); |
| 72 | + bslab->slab = kmem_cache_create(bslab->name, size, |
| 73 | + ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL); |
| 74 | + if (!bslab->slab) |
| 75 | + goto fail_alloc_slab; |
77 | 76 |
|
78 |
| - if (!bslab->slab && entry == -1) |
79 |
| - entry = i; |
80 |
| - else if (bslab->slab_size == sz) { |
81 |
| - slab = bslab->slab; |
82 |
| - bslab->slab_ref++; |
83 |
| - break; |
84 |
| - } |
85 |
| - i++; |
86 |
| - } |
| 77 | + bslab->slab_ref = 1; |
| 78 | + bslab->slab_size = size; |
87 | 79 |
|
88 |
| - if (slab) |
89 |
| - goto out_unlock; |
90 |
| - |
91 |
| - if (bio_slab_nr == bio_slab_max && entry == -1) { |
92 |
| - new_bio_slab_max = bio_slab_max << 1; |
93 |
| - new_bio_slabs = krealloc(bio_slabs, |
94 |
| - new_bio_slab_max * sizeof(struct bio_slab), |
95 |
| - GFP_KERNEL); |
96 |
| - if (!new_bio_slabs) |
97 |
| - goto out_unlock; |
98 |
| - bio_slab_max = new_bio_slab_max; |
99 |
| - bio_slabs = new_bio_slabs; |
100 |
| - } |
101 |
| - if (entry == -1) |
102 |
| - entry = bio_slab_nr++; |
| 80 | + if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) |
| 81 | + return bslab; |
103 | 82 |
|
104 |
| - bslab = &bio_slabs[entry]; |
| 83 | + kmem_cache_destroy(bslab->slab); |
105 | 84 |
|
106 |
| - snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); |
107 |
| - slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, |
108 |
| - SLAB_HWCACHE_ALIGN, NULL); |
109 |
| - if (!slab) |
110 |
| - goto out_unlock; |
| 85 | +fail_alloc_slab: |
| 86 | + kfree(bslab); |
| 87 | + return NULL; |
| 88 | +} |
111 | 89 |
|
112 |
| - bslab->slab = slab; |
113 |
| - bslab->slab_ref = 1; |
114 |
| - bslab->slab_size = sz; |
115 |
| -out_unlock: |
| 90 | +static inline unsigned int bs_bio_slab_size(struct bio_set *bs) |
| 91 | +{ |
| 92 | + return bs->front_pad + sizeof(struct bio) + |
| 93 | + BIO_INLINE_VECS * sizeof(struct bio_vec); |
| 94 | +} |
| 95 | + |
| 96 | +static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) |
| 97 | +{ |
| 98 | + unsigned int size = bs_bio_slab_size(bs); |
| 99 | + struct bio_slab *bslab; |
| 100 | + |
| 101 | + mutex_lock(&bio_slab_lock); |
| 102 | + bslab = xa_load(&bio_slabs, size); |
| 103 | + if (bslab) |
| 104 | + bslab->slab_ref++; |
| 105 | + else |
| 106 | + bslab = create_bio_slab(size); |
116 | 107 | mutex_unlock(&bio_slab_lock);
|
117 |
| - return slab; |
| 108 | + |
| 109 | + if (bslab) |
| 110 | + return bslab->slab; |
| 111 | + return NULL; |
118 | 112 | }
|
119 | 113 |
|
120 | 114 | static void bio_put_slab(struct bio_set *bs)
|
121 | 115 | {
|
122 | 116 | struct bio_slab *bslab = NULL;
|
123 |
| - unsigned int i; |
| 117 | + unsigned int slab_size = bs_bio_slab_size(bs); |
124 | 118 |
|
125 | 119 | mutex_lock(&bio_slab_lock);
|
126 | 120 |
|
127 |
| - for (i = 0; i < bio_slab_nr; i++) { |
128 |
| - if (bs->bio_slab == bio_slabs[i].slab) { |
129 |
| - bslab = &bio_slabs[i]; |
130 |
| - break; |
131 |
| - } |
132 |
| - } |
133 |
| - |
| 121 | + bslab = xa_load(&bio_slabs, slab_size); |
134 | 122 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
|
135 | 123 | goto out;
|
136 | 124 |
|
| 125 | + WARN_ON_ONCE(bslab->slab != bs->bio_slab); |
| 126 | + |
137 | 127 | WARN_ON(!bslab->slab_ref);
|
138 | 128 |
|
139 | 129 | if (--bslab->slab_ref)
|
140 | 130 | goto out;
|
141 | 131 |
|
| 132 | + xa_erase(&bio_slabs, slab_size); |
| 133 | + |
142 | 134 | kmem_cache_destroy(bslab->slab);
|
143 |
| - bslab->slab = NULL; |
| 135 | + kfree(bslab); |
144 | 136 |
|
145 | 137 | out:
|
146 | 138 | mutex_unlock(&bio_slab_lock);
|
@@ -1570,15 +1562,13 @@ int bioset_init(struct bio_set *bs,
|
1570 | 1562 | unsigned int front_pad,
|
1571 | 1563 | int flags)
|
1572 | 1564 | {
|
1573 |
| - unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); |
1574 |
| - |
1575 | 1565 | bs->front_pad = front_pad;
|
1576 | 1566 |
|
1577 | 1567 | spin_lock_init(&bs->rescue_lock);
|
1578 | 1568 | bio_list_init(&bs->rescue_list);
|
1579 | 1569 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
|
1580 | 1570 |
|
1581 |
| - bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); |
| 1571 | + bs->bio_slab = bio_find_or_create_slab(bs); |
1582 | 1572 | if (!bs->bio_slab)
|
1583 | 1573 | return -ENOMEM;
|
1584 | 1574 |
|
@@ -1642,16 +1632,8 @@ static void __init biovec_init_slabs(void)
|
1642 | 1632 |
|
1643 | 1633 | static int __init init_bio(void)
|
1644 | 1634 | {
|
1645 |
| - bio_slab_max = 2; |
1646 |
| - bio_slab_nr = 0; |
1647 |
| - bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab), |
1648 |
| - GFP_KERNEL); |
1649 |
| - |
1650 | 1635 | BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
|
1651 | 1636 |
|
1652 |
| - if (!bio_slabs) |
1653 |
| - panic("bio: can't allocate bios\n"); |
1654 |
| - |
1655 | 1637 | bio_integrity_init();
|
1656 | 1638 | biovec_init_slabs();
|
1657 | 1639 |
|
|
0 commit comments