Skip to content

Commit db06d75

Browse files
committed
Merge branch 'for-4.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu fixes from Tejun Heo: "While adding GFP_ATOMIC support to the percpu allocator, the synchronization for the fast-path which doesn't require external allocations was separated into pcpu_lock. Unfortunately, it incorrectly decoupled async paths and percpu chunks could get destroyed while still being operated on. This contains two patches to fix the bug" * 'for-4.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: fix synchronization between synchronous map extension and chunk destruction percpu: fix synchronization between chunk->map_extend_work and chunk destruction
2 parents 35398ee + 6710e59 commit db06d75

File tree

1 file changed

+44
-29
lines changed

1 file changed

+44
-29
lines changed

mm/percpu.c

Lines changed: 44 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ struct pcpu_chunk {
112112
int map_used; /* # of map entries used before the sentry */
113113
int map_alloc; /* # of map entries allocated */
114114
int *map; /* allocation map */
115-
struct work_struct map_extend_work;/* async ->map[] extension */
115+
struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
116116

117117
void *data; /* chunk data */
118118
int first_free; /* no free below this */
@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
162162
static int pcpu_reserved_chunk_limit;
163163

164164
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
165-
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
165+
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
166166

167167
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168168

169+
/* chunks which need their map areas extended, protected by pcpu_lock */
170+
static LIST_HEAD(pcpu_map_extend_chunks);
171+
169172
/*
170173
* The number of empty populated pages, protected by pcpu_lock. The
171174
* reserved chunk doesn't contribute to the count.
@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
395398
{
396399
int margin, new_alloc;
397400

401+
lockdep_assert_held(&pcpu_lock);
402+
398403
if (is_atomic) {
399404
margin = 3;
400405

401406
if (chunk->map_alloc <
402-
chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
403-
pcpu_async_enabled)
404-
schedule_work(&chunk->map_extend_work);
407+
chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
408+
if (list_empty(&chunk->map_extend_list)) {
409+
list_add_tail(&chunk->map_extend_list,
410+
&pcpu_map_extend_chunks);
411+
pcpu_schedule_balance_work();
412+
}
413+
}
405414
} else {
406415
margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
407416
}
@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
435444
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
436445
unsigned long flags;
437446

447+
lockdep_assert_held(&pcpu_alloc_mutex);
448+
438449
new = pcpu_mem_zalloc(new_size);
439450
if (!new)
440451
return -ENOMEM;
@@ -467,20 +478,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
467478
return 0;
468479
}
469480

470-
static void pcpu_map_extend_workfn(struct work_struct *work)
471-
{
472-
struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
473-
map_extend_work);
474-
int new_alloc;
475-
476-
spin_lock_irq(&pcpu_lock);
477-
new_alloc = pcpu_need_to_extend(chunk, false);
478-
spin_unlock_irq(&pcpu_lock);
479-
480-
if (new_alloc)
481-
pcpu_extend_area_map(chunk, new_alloc);
482-
}
483-
484481
/**
485482
* pcpu_fit_in_area - try to fit the requested allocation in a candidate area
486483
* @chunk: chunk the candidate area belongs to
@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
740737
chunk->map_used = 1;
741738

742739
INIT_LIST_HEAD(&chunk->list);
743-
INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
740+
INIT_LIST_HEAD(&chunk->map_extend_list);
744741
chunk->free_size = pcpu_unit_size;
745742
chunk->contig_hint = pcpu_unit_size;
746743

@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
895892
return NULL;
896893
}
897894

895+
if (!is_atomic)
896+
mutex_lock(&pcpu_alloc_mutex);
897+
898898
spin_lock_irqsave(&pcpu_lock, flags);
899899

900900
/* serve reserved allocations from the reserved chunk if available */
@@ -967,12 +967,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
967967
if (is_atomic)
968968
goto fail;
969969

970-
mutex_lock(&pcpu_alloc_mutex);
971-
972970
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
973971
chunk = pcpu_create_chunk();
974972
if (!chunk) {
975-
mutex_unlock(&pcpu_alloc_mutex);
976973
err = "failed to allocate new chunk";
977974
goto fail;
978975
}
@@ -983,7 +980,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
983980
spin_lock_irqsave(&pcpu_lock, flags);
984981
}
985982

986-
mutex_unlock(&pcpu_alloc_mutex);
987983
goto restart;
988984

989985
area_found:
@@ -993,8 +989,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
993989
if (!is_atomic) {
994990
int page_start, page_end, rs, re;
995991

996-
mutex_lock(&pcpu_alloc_mutex);
997-
998992
page_start = PFN_DOWN(off);
999993
page_end = PFN_UP(off + size);
1000994

@@ -1005,7 +999,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1005999

10061000
spin_lock_irqsave(&pcpu_lock, flags);
10071001
if (ret) {
1008-
mutex_unlock(&pcpu_alloc_mutex);
10091002
pcpu_free_area(chunk, off, &occ_pages);
10101003
err = "failed to populate";
10111004
goto fail_unlock;
@@ -1045,6 +1038,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
10451038
/* see the flag handling in pcpu_blance_workfn() */
10461039
pcpu_atomic_alloc_failed = true;
10471040
pcpu_schedule_balance_work();
1041+
} else {
1042+
mutex_unlock(&pcpu_alloc_mutex);
10481043
}
10491044
return NULL;
10501045
}
@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
11291124
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
11301125
continue;
11311126

1127+
list_del_init(&chunk->map_extend_list);
11321128
list_move(&chunk->list, &to_free);
11331129
}
11341130

@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
11461142
pcpu_destroy_chunk(chunk);
11471143
}
11481144

1145+
/* service chunks which requested async area map extension */
1146+
do {
1147+
int new_alloc = 0;
1148+
1149+
spin_lock_irq(&pcpu_lock);
1150+
1151+
chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1152+
struct pcpu_chunk, map_extend_list);
1153+
if (chunk) {
1154+
list_del_init(&chunk->map_extend_list);
1155+
new_alloc = pcpu_need_to_extend(chunk, false);
1156+
}
1157+
1158+
spin_unlock_irq(&pcpu_lock);
1159+
1160+
if (new_alloc)
1161+
pcpu_extend_area_map(chunk, new_alloc);
1162+
} while (chunk);
1163+
11491164
/*
11501165
* Ensure there are certain number of free populated pages for
11511166
* atomic allocs. Fill up from the most packed so that atomic
@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
16441659
*/
16451660
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
16461661
INIT_LIST_HEAD(&schunk->list);
1647-
INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
1662+
INIT_LIST_HEAD(&schunk->map_extend_list);
16481663
schunk->base_addr = base_addr;
16491664
schunk->map = smap;
16501665
schunk->map_alloc = ARRAY_SIZE(smap);
@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
16731688
if (dyn_size) {
16741689
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
16751690
INIT_LIST_HEAD(&dchunk->list);
1676-
INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
1691+
INIT_LIST_HEAD(&dchunk->map_extend_list);
16771692
dchunk->base_addr = base_addr;
16781693
dchunk->map = dmap;
16791694
dchunk->map_alloc = ARRAY_SIZE(dmap);

0 commit comments

Comments
 (0)