Skip to content

Commit 815b196

Browse files
committed
Merge tag 'mm-hotfixes-stable-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull mm hotfixes from Andrew Morton: "Fixups for various recently-added and longer-term issues and a few minor tweaks: - fixes for material merged during this merge window - cc:stable fixes for more longstanding issues - minor mailmap and MAINTAINERS updates" * tag 'mm-hotfixes-stable-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/oom_kill.c: fix vm_oom_kill_table[] ifdeffery x86/kexec: fix memory leak of elf header buffer mm/memremap: fix missing call to untrack_pfn() in pagemap_range() mm: page_isolation: use compound_nr() correctly in isolate_single_pageblock() mm: hugetlb_vmemmap: fix CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON MAINTAINERS: add maintainer information for z3fold mailmap: update Josh Poimboeuf's email
2 parents e17fee8 + a19cad0 commit 815b196

File tree

7 files changed

+50
-35
lines changed

7 files changed

+50
-35
lines changed

.mailmap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,8 @@ Jordan Crouse <[email protected]> <[email protected]>
201201
202202
203203
204+
205+
204206
Juha Yrjola <at solidboot.com>
205207
Juha Yrjola <[email protected]>
206208
Juha Yrjola <[email protected]>

MAINTAINERS

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22002,6 +22002,13 @@ L: [email protected]
2200222002
S: Maintained
2200322003
F: mm/zbud.c
2200422004

22005+
Z3FOLD COMPRESSED PAGE ALLOCATOR
22006+
M: Vitaly Wool <[email protected]>
22007+
R: Miaohe Lin <[email protected]>
22008+
22009+
S: Maintained
22010+
F: mm/z3fold.c
22011+
2200522012
ZD1211RW WIRELESS DRIVER
2200622013
M: Ulrich Kunitz <[email protected]>
2200722014

arch/x86/kernel/machine_kexec_64.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -376,9 +376,6 @@ void machine_kexec(struct kimage *image)
376376
#ifdef CONFIG_KEXEC_FILE
377377
void *arch_kexec_kernel_image_load(struct kimage *image)
378378
{
379-
vfree(image->elf_headers);
380-
image->elf_headers = NULL;
381-
382379
if (!image->fops || !image->fops->load)
383380
return ERR_PTR(-ENOEXEC);
384381

@@ -514,6 +511,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
514511
(int)ELF64_R_TYPE(rel[i].r_info), value);
515512
return -ENOEXEC;
516513
}
514+
515+
int arch_kimage_file_post_load_cleanup(struct kimage *image)
516+
{
517+
vfree(image->elf_headers);
518+
image->elf_headers = NULL;
519+
image->elf_headers_sz = 0;
520+
521+
return kexec_image_post_load_cleanup_default(image);
522+
}
517523
#endif /* CONFIG_KEXEC_FILE */
518524

519525
static int

mm/hugetlb_vmemmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
3333
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
3434

3535
static enum vmemmap_optimize_mode vmemmap_optimize_mode =
36-
IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
36+
IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
3737

3838
static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
3939
{

mm/memremap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
214214

215215
if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
216216
error = -EINVAL;
217-
goto err_pfn_remap;
217+
goto err_kasan;
218218
}
219219

220220
mem_hotplug_begin();

mm/oom_kill.c

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -56,35 +56,6 @@ static int sysctl_panic_on_oom;
5656
static int sysctl_oom_kill_allocating_task;
5757
static int sysctl_oom_dump_tasks = 1;
5858

59-
#ifdef CONFIG_SYSCTL
60-
static struct ctl_table vm_oom_kill_table[] = {
61-
{
62-
.procname = "panic_on_oom",
63-
.data = &sysctl_panic_on_oom,
64-
.maxlen = sizeof(sysctl_panic_on_oom),
65-
.mode = 0644,
66-
.proc_handler = proc_dointvec_minmax,
67-
.extra1 = SYSCTL_ZERO,
68-
.extra2 = SYSCTL_TWO,
69-
},
70-
{
71-
.procname = "oom_kill_allocating_task",
72-
.data = &sysctl_oom_kill_allocating_task,
73-
.maxlen = sizeof(sysctl_oom_kill_allocating_task),
74-
.mode = 0644,
75-
.proc_handler = proc_dointvec,
76-
},
77-
{
78-
.procname = "oom_dump_tasks",
79-
.data = &sysctl_oom_dump_tasks,
80-
.maxlen = sizeof(sysctl_oom_dump_tasks),
81-
.mode = 0644,
82-
.proc_handler = proc_dointvec,
83-
},
84-
{}
85-
};
86-
#endif
87-
8859
/*
8960
* Serializes oom killer invocations (out_of_memory()) from all contexts to
9061
* prevent from over eager oom killing (e.g. when the oom killer is invoked
@@ -729,6 +700,35 @@ static void queue_oom_reaper(struct task_struct *tsk)
729700
add_timer(&tsk->oom_reaper_timer);
730701
}
731702

703+
#ifdef CONFIG_SYSCTL
704+
static struct ctl_table vm_oom_kill_table[] = {
705+
{
706+
.procname = "panic_on_oom",
707+
.data = &sysctl_panic_on_oom,
708+
.maxlen = sizeof(sysctl_panic_on_oom),
709+
.mode = 0644,
710+
.proc_handler = proc_dointvec_minmax,
711+
.extra1 = SYSCTL_ZERO,
712+
.extra2 = SYSCTL_TWO,
713+
},
714+
{
715+
.procname = "oom_kill_allocating_task",
716+
.data = &sysctl_oom_kill_allocating_task,
717+
.maxlen = sizeof(sysctl_oom_kill_allocating_task),
718+
.mode = 0644,
719+
.proc_handler = proc_dointvec,
720+
},
721+
{
722+
.procname = "oom_dump_tasks",
723+
.data = &sysctl_oom_dump_tasks,
724+
.maxlen = sizeof(sysctl_oom_dump_tasks),
725+
.mode = 0644,
726+
.proc_handler = proc_dointvec,
727+
},
728+
{}
729+
};
730+
#endif
731+
732732
static int __init oom_init(void)
733733
{
734734
oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");

mm/page_isolation.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,9 +385,9 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
385385
* above do the rest. If migration is not possible, just fail.
386386
*/
387387
if (PageCompound(page)) {
388-
unsigned long nr_pages = compound_nr(page);
389388
struct page *head = compound_head(page);
390389
unsigned long head_pfn = page_to_pfn(head);
390+
unsigned long nr_pages = compound_nr(head);
391391

392392
if (head_pfn + nr_pages <= boundary_pfn) {
393393
pfn = head_pfn + nr_pages;

0 commit comments

Comments
 (0)