Skip to content

Commit 062eacf

Browse files
urezkiakpm00
authored andcommitted
mm: vmalloc: remove a global vmap_blocks xarray
A global vmap_blocks-xarray array can be contented under heavy usage of the vm_map_ram()/vm_unmap_ram() APIs. The lock_stat shows that a "vmap_blocks.xa_lock" lock is a second in a top-list when it comes to contentions: <snip> ---------------------------------------- class name con-bounces contentions ... ---------------------------------------- vmap_area_lock: 2554079 2554276 ... -------------- vmap_area_lock 1297948 [<00000000dd41cbaa>] alloc_vmap_area+0x1c7/0x910 vmap_area_lock 1256330 [<000000009d927bf3>] free_vmap_block+0x4a/0xe0 vmap_area_lock 1 [<00000000c95c05a7>] find_vm_area+0x16/0x70 -------------- vmap_area_lock 1738590 [<00000000dd41cbaa>] alloc_vmap_area+0x1c7/0x910 vmap_area_lock 815688 [<000000009d927bf3>] free_vmap_block+0x4a/0xe0 vmap_area_lock 1 [<00000000c1d619d7>] __get_vm_area_node+0xd2/0x170 vmap_blocks.xa_lock: 862689 862698 ... ------------------- vmap_blocks.xa_lock 378418 [<00000000625a5626>] vm_map_ram+0x359/0x4a0 vmap_blocks.xa_lock 484280 [<00000000caa2ef03>] xa_erase+0xe/0x30 ------------------- vmap_blocks.xa_lock 576226 [<00000000caa2ef03>] xa_erase+0xe/0x30 vmap_blocks.xa_lock 286472 [<00000000625a5626>] vm_map_ram+0x359/0x4a0 ... <snip> that is a result of running vm_map_ram()/vm_unmap_ram() in a loop. The test creates 64(on 64 CPUs system) threads and each one maps/unmaps 1 page. After this change the "xa_lock" can be considered as a noise in the same test condition: <snip> ... &xa->xa_lock#1: 10333 10394 ... -------------- &xa->xa_lock#1 5349 [<00000000bbbc9751>] xa_erase+0xe/0x30 &xa->xa_lock#1 5045 [<0000000018def45d>] vm_map_ram+0x3a4/0x4f0 -------------- &xa->xa_lock#1 7326 [<0000000018def45d>] vm_map_ram+0x3a4/0x4f0 &xa->xa_lock#1 3068 [<00000000bbbc9751>] xa_erase+0xe/0x30 ... <snip> Running the test_vmalloc.sh run_test_mask=1024 nr_threads=64 nr_pages=5 shows around ~8 percent of throughput improvement of vm_map_ram() and vm_unmap_ram() APIs. This patch does not fix vmap_area_lock/free_vmap_area_lock and purge_vmap_area_lock bottle-necks, it is rather a separate rework. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Uladzislau Rezki (Sony) <[email protected]> Reviewed-by: Lorenzo Stoakes <[email protected]> Reviewed-by: Baoquan He <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Dave Chinner <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Oleksiy Avramchenko <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 62f31bd commit 062eacf

File tree

1 file changed

+63
-8
lines changed

1 file changed

+63
-8
lines changed

mm/vmalloc.c

Lines changed: 63 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1915,6 +1915,13 @@ static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
19151915
struct vmap_block_queue {
19161916
spinlock_t lock;
19171917
struct list_head free;
1918+
1919+
/*
1920+
* An xarray requires an extra memory dynamically to
1921+
* be allocated. If it is an issue, we can use rb-tree
1922+
* instead.
1923+
*/
1924+
struct xarray vmap_blocks;
19181925
};
19191926

19201927
struct vmap_block {
@@ -1932,11 +1939,48 @@ struct vmap_block {
19321939
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
19331940

19341941
/*
1935-
* XArray of vmap blocks, indexed by address, to quickly find a vmap block
1936-
* in the free path. Could get rid of this if we change the API to return a
1937-
* "cookie" from alloc, to be passed to free. But no big deal yet.
1942+
* In order to fast access to any "vmap_block" associated with a
1943+
* specific address, we use a hash.
1944+
*
1945+
* A per-cpu vmap_block_queue is used in both ways, to serialize
1946+
* an access to free block chains among CPUs(alloc path) and it
1947+
* also acts as a vmap_block hash(alloc/free paths). It means we
1948+
* overload it, since we already have the per-cpu array which is
1949+
* used as a hash table. When used as a hash a 'cpu' passed to
1950+
* per_cpu() is not actually a CPU but rather a hash index.
1951+
*
1952+
* A hash function is addr_to_vb_xarray() which hashes any address
1953+
* to a specific index(in a hash) it belongs to. This then uses a
1954+
* per_cpu() macro to access an array with generated index.
1955+
*
1956+
* An example:
1957+
*
1958+
* CPU_1 CPU_2 CPU_0
1959+
* | | |
1960+
* V V V
1961+
* 0 10 20 30 40 50 60
1962+
* |------|------|------|------|------|------|...<vmap address space>
1963+
* CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
1964+
*
1965+
* - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
1966+
* it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
1967+
*
1968+
* - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
1969+
* it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
1970+
*
1971+
* - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
1972+
* it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
1973+
*
1974+
* This technique almost always avoids lock contention on insert/remove,
1975+
* however xarray spinlocks protect against any contention that remains.
19381976
*/
1939-
static DEFINE_XARRAY(vmap_blocks);
1977+
static struct xarray *
1978+
addr_to_vb_xarray(unsigned long addr)
1979+
{
1980+
int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
1981+
1982+
return &per_cpu(vmap_block_queue, index).vmap_blocks;
1983+
}
19401984

19411985
/*
19421986
* We should probably have a fallback mechanism to allocate virtual memory
@@ -1974,6 +2018,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
19742018
struct vmap_block_queue *vbq;
19752019
struct vmap_block *vb;
19762020
struct vmap_area *va;
2021+
struct xarray *xa;
19772022
unsigned long vb_idx;
19782023
int node, err;
19792024
void *vaddr;
@@ -2007,8 +2052,9 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
20072052
bitmap_set(vb->used_map, 0, (1UL << order));
20082053
INIT_LIST_HEAD(&vb->free_list);
20092054

2055+
xa = addr_to_vb_xarray(va->va_start);
20102056
vb_idx = addr_to_vb_idx(va->va_start);
2011-
err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
2057+
err = xa_insert(xa, vb_idx, vb, gfp_mask);
20122058
if (err) {
20132059
kfree(vb);
20142060
free_vmap_area(va);
@@ -2026,8 +2072,10 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
20262072
static void free_vmap_block(struct vmap_block *vb)
20272073
{
20282074
struct vmap_block *tmp;
2075+
struct xarray *xa;
20292076

2030-
tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
2077+
xa = addr_to_vb_xarray(vb->va->va_start);
2078+
tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
20312079
BUG_ON(tmp != vb);
20322080

20332081
spin_lock(&vmap_area_lock);
@@ -2139,6 +2187,7 @@ static void vb_free(unsigned long addr, unsigned long size)
21392187
unsigned long offset;
21402188
unsigned int order;
21412189
struct vmap_block *vb;
2190+
struct xarray *xa;
21422191

21432192
BUG_ON(offset_in_page(size));
21442193
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -2147,7 +2196,10 @@ static void vb_free(unsigned long addr, unsigned long size)
21472196

21482197
order = get_order(size);
21492198
offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2150-
vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2199+
2200+
xa = addr_to_vb_xarray(addr);
2201+
vb = xa_load(xa, addr_to_vb_idx(addr));
2202+
21512203
spin_lock(&vb->lock);
21522204
bitmap_clear(vb->used_map, offset, (1UL << order));
21532205
spin_unlock(&vb->lock);
@@ -3525,6 +3577,7 @@ static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
35253577
{
35263578
char *start;
35273579
struct vmap_block *vb;
3580+
struct xarray *xa;
35283581
unsigned long offset;
35293582
unsigned int rs, re;
35303583
size_t remains, n;
@@ -3543,7 +3596,8 @@ static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
35433596
* Area is split into regions and tracked with vmap_block, read out
35443597
* each region and zero fill the hole between regions.
35453598
*/
3546-
vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
3599+
xa = addr_to_vb_xarray((unsigned long) addr);
3600+
vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
35473601
if (!vb)
35483602
goto finished_zero;
35493603

@@ -4337,6 +4391,7 @@ void __init vmalloc_init(void)
43374391
p = &per_cpu(vfree_deferred, i);
43384392
init_llist_head(&p->list);
43394393
INIT_WORK(&p->wq, delayed_vfree_work);
4394+
xa_init(&vbq->vmap_blocks);
43404395
}
43414396

43424397
/* Import existing vmlist entries. */

0 commit comments

Comments
 (0)