Skip to content

Commit 18bf340

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "15 fixes" * emailed patches from Andrew Morton <[email protected]>: tools/vm: fix cross-compile build coredump: fix null pointer dereference on coredump mm: shmem: disable interrupt when acquiring info->lock in userfaultfd_copy path shmem: fix possible deadlocks on shmlock_user_lock vmalloc: fix remap_vmalloc_range() bounds checks mm/shmem: fix build without THP mm/ksm: fix NULL pointer dereference when KSM zero page is enabled tools/build: tweak unused value workaround checkpatch: fix a typo in the regex for $allocFunctions mm, gup: return EINTR when gup is interrupted by fatal signals mm/hugetlb: fix a addressing exception caused by huge_pte_offset MAINTAINERS: add an entry for kfifo mm/userfaultfd: disable userfaultfd-wp on x86_32 slub: avoid redzone when choosing freepointer location sh: fix build error in mm/init.c
2 parents 8160a56 + cf01699 commit 18bf340

File tree

16 files changed

+70
-27
lines changed

16 files changed

+70
-27
lines changed

MAINTAINERS

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9417,6 +9417,13 @@ F: include/linux/keyctl.h
94179417
F: include/uapi/linux/keyctl.h
94189418
F: security/keys/
94199419

9420+
KFIFO
9421+
M: Stefani Seibold <[email protected]>
9422+
S: Maintained
9423+
F: include/linux/kfifo.h
9424+
F: lib/kfifo.c
9425+
F: samples/kfifo/
9426+
94209427
KGDB / KDB /debug_core
94219428
M: Jason Wessel <[email protected]>
94229429
M: Daniel Thompson <[email protected]>

arch/sh/mm/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
412412
unsigned long nr_pages = size >> PAGE_SHIFT;
413413
int ret;
414414

415-
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)
415+
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
416416
return -EINVAL;
417417

418418
/* We only have ZONE_NORMAL, so this is easy.. */

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ config X86
149149
select HAVE_ARCH_TRACEHOOK
150150
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
151151
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
152-
select HAVE_ARCH_USERFAULTFD_WP if USERFAULTFD
152+
select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD
153153
select HAVE_ARCH_VMAP_STACK if X86_64
154154
select HAVE_ARCH_WITHIN_STACK_FRAMES
155155
select HAVE_ASM_MODVERSIONS

fs/coredump.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
211211
return -ENOMEM;
212212
(*argv)[(*argc)++] = 0;
213213
++pat_ptr;
214+
if (!(*pat_ptr))
215+
return -ENOMEM;
214216
}
215217

216218
/* Repeat as long as we have more pattern to process and more output

fs/proc/vmcore.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
266266
if (start < offset + dump->size) {
267267
tsz = min(offset + (u64)dump->size - start, (u64)size);
268268
buf = dump->buf + start - offset;
269-
if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
269+
if (remap_vmalloc_range_partial(vma, dst, buf, 0,
270+
tsz)) {
270271
ret = -EFAULT;
271272
goto out_unlock;
272273
}
@@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
624625
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
625626
kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
626627
if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
627-
kaddr, tsz))
628+
kaddr, 0, tsz))
628629
goto fail;
629630

630631
size -= tsz;

include/linux/vmalloc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ extern void vunmap(const void *addr);
137137

138138
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
139139
unsigned long uaddr, void *kaddr,
140-
unsigned long size);
140+
unsigned long pgoff, unsigned long size);
141141

142142
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
143143
unsigned long pgoff);

mm/gup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1088,7 +1088,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
10881088
* potentially allocating memory.
10891089
*/
10901090
if (fatal_signal_pending(current)) {
1091-
ret = -ERESTARTSYS;
1091+
ret = -EINTR;
10921092
goto out;
10931093
}
10941094
cond_resched();

mm/hugetlb.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5365,8 +5365,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
53655365
{
53665366
pgd_t *pgd;
53675367
p4d_t *p4d;
5368-
pud_t *pud;
5369-
pmd_t *pmd;
5368+
pud_t *pud, pud_entry;
5369+
pmd_t *pmd, pmd_entry;
53705370

53715371
pgd = pgd_offset(mm, addr);
53725372
if (!pgd_present(*pgd))
@@ -5376,17 +5376,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
53765376
return NULL;
53775377

53785378
pud = pud_offset(p4d, addr);
5379-
if (sz != PUD_SIZE && pud_none(*pud))
5379+
pud_entry = READ_ONCE(*pud);
5380+
if (sz != PUD_SIZE && pud_none(pud_entry))
53805381
return NULL;
53815382
/* hugepage or swap? */
5382-
if (pud_huge(*pud) || !pud_present(*pud))
5383+
if (pud_huge(pud_entry) || !pud_present(pud_entry))
53835384
return (pte_t *)pud;
53845385

53855386
pmd = pmd_offset(pud, addr);
5386-
if (sz != PMD_SIZE && pmd_none(*pmd))
5387+
pmd_entry = READ_ONCE(*pmd);
5388+
if (sz != PMD_SIZE && pmd_none(pmd_entry))
53875389
return NULL;
53885390
/* hugepage or swap? */
5389-
if (pmd_huge(*pmd) || !pmd_present(*pmd))
5391+
if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
53905392
return (pte_t *)pmd;
53915393

53925394
return NULL;

mm/ksm.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2112,8 +2112,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
21122112

21132113
down_read(&mm->mmap_sem);
21142114
vma = find_mergeable_vma(mm, rmap_item->address);
2115-
err = try_to_merge_one_page(vma, page,
2116-
ZERO_PAGE(rmap_item->address));
2115+
if (vma) {
2116+
err = try_to_merge_one_page(vma, page,
2117+
ZERO_PAGE(rmap_item->address));
2118+
} else {
2119+
/*
2120+
* If the vma is out of date, we do not need to
2121+
* continue.
2122+
*/
2123+
err = 0;
2124+
}
21172125
up_read(&mm->mmap_sem);
21182126
/*
21192127
* In case of failure, the page was not really empty, so we

mm/shmem.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -952,7 +952,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
952952
VM_BUG_ON_PAGE(PageWriteback(page), page);
953953
if (shmem_punch_compound(page, start, end))
954954
truncate_inode_page(mapping, page);
955-
else {
955+
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
956956
/* Wipe the page and don't get stuck */
957957
clear_highpage(page);
958958
flush_dcache_page(page);
@@ -2179,7 +2179,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
21792179
struct shmem_inode_info *info = SHMEM_I(inode);
21802180
int retval = -ENOMEM;
21812181

2182-
spin_lock_irq(&info->lock);
2182+
/*
2183+
* What serializes the accesses to info->flags?
2184+
* ipc_lock_object() when called from shmctl_do_lock(),
2185+
* no serialization needed when called from shm_destroy().
2186+
*/
21832187
if (lock && !(info->flags & VM_LOCKED)) {
21842188
if (!user_shm_lock(inode->i_size, user))
21852189
goto out_nomem;
@@ -2194,7 +2198,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
21942198
retval = 0;
21952199

21962200
out_nomem:
2197-
spin_unlock_irq(&info->lock);
21982201
return retval;
21992202
}
22002203

@@ -2399,11 +2402,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23992402

24002403
lru_cache_add_anon(page);
24012404

2402-
spin_lock(&info->lock);
2405+
spin_lock_irq(&info->lock);
24032406
info->alloced++;
24042407
inode->i_blocks += BLOCKS_PER_PAGE;
24052408
shmem_recalc_inode(inode);
2406-
spin_unlock(&info->lock);
2409+
spin_unlock_irq(&info->lock);
24072410

24082411
inc_mm_counter(dst_mm, mm_counter_file(page));
24092412
page_add_file_rmap(page, false);

mm/slub.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3533,6 +3533,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
35333533
{
35343534
slab_flags_t flags = s->flags;
35353535
unsigned int size = s->object_size;
3536+
unsigned int freepointer_area;
35363537
unsigned int order;
35373538

35383539
/*
@@ -3541,6 +3542,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
35413542
* the possible location of the free pointer.
35423543
*/
35433544
size = ALIGN(size, sizeof(void *));
3545+
/*
3546+
* This is the area of the object where a freepointer can be
3547+
* safely written. If redzoning adds more to the inuse size, we
3548+
* can't use that portion for writing the freepointer, so
3549+
* s->offset must be limited within this for the general case.
3550+
*/
3551+
freepointer_area = size;
35443552

35453553
#ifdef CONFIG_SLUB_DEBUG
35463554
/*
@@ -3582,13 +3590,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
35823590
*/
35833591
s->offset = size;
35843592
size += sizeof(void *);
3585-
} else if (size > sizeof(void *)) {
3593+
} else if (freepointer_area > sizeof(void *)) {
35863594
/*
35873595
* Store freelist pointer near middle of object to keep
35883596
* it away from the edges of the object to avoid small
35893597
* sized over/underflows from neighboring allocations.
35903598
*/
3591-
s->offset = ALIGN(size / 2, sizeof(void *));
3599+
s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
35923600
}
35933601

35943602
#ifdef CONFIG_SLUB_DEBUG

mm/vmalloc.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include <linux/llist.h>
3535
#include <linux/bitops.h>
3636
#include <linux/rbtree_augmented.h>
37+
#include <linux/overflow.h>
3738

3839
#include <linux/uaccess.h>
3940
#include <asm/tlbflush.h>
@@ -3054,6 +3055,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
30543055
* @vma: vma to cover
30553056
* @uaddr: target user address to start at
30563057
* @kaddr: virtual address of vmalloc kernel memory
3058+
* @pgoff: offset from @kaddr to start at
30573059
* @size: size of map area
30583060
*
30593061
* Returns: 0 for success, -Exxx on failure
@@ -3066,9 +3068,15 @@ long vwrite(char *buf, char *addr, unsigned long count)
30663068
* Similar to remap_pfn_range() (see mm/memory.c)
30673069
*/
30683070
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3069-
void *kaddr, unsigned long size)
3071+
void *kaddr, unsigned long pgoff,
3072+
unsigned long size)
30703073
{
30713074
struct vm_struct *area;
3075+
unsigned long off;
3076+
unsigned long end_index;
3077+
3078+
if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3079+
return -EINVAL;
30723080

30733081
size = PAGE_ALIGN(size);
30743082

@@ -3082,8 +3090,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
30823090
if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
30833091
return -EINVAL;
30843092

3085-
if (kaddr + size > area->addr + get_vm_area_size(area))
3093+
if (check_add_overflow(size, off, &end_index) ||
3094+
end_index > get_vm_area_size(area))
30863095
return -EINVAL;
3096+
kaddr += off;
30873097

30883098
do {
30893099
struct page *page = vmalloc_to_page(kaddr);
@@ -3122,7 +3132,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
31223132
unsigned long pgoff)
31233133
{
31243134
return remap_vmalloc_range_partial(vma, vma->vm_start,
3125-
addr + (pgoff << PAGE_SHIFT),
3135+
addr, pgoff,
31263136
vma->vm_end - vma->vm_start);
31273137
}
31283138
EXPORT_SYMBOL(remap_vmalloc_range);

samples/vfio-mdev/mdpy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
418418
return -EINVAL;
419419

420420
return remap_vmalloc_range_partial(vma, vma->vm_start,
421-
mdev_state->memblk,
421+
mdev_state->memblk, 0,
422422
vma->vm_end - vma->vm_start);
423423
}
424424

scripts/checkpatch.pl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,7 @@ sub hash_show_words {
479479
(?:kv|k|v)[czm]alloc(?:_node|_array)? |
480480
kstrdup(?:_const)? |
481481
kmemdup(?:_nul)?) |
482-
(?:\w+)?alloc_skb(?:ip_align)? |
482+
(?:\w+)?alloc_skb(?:_ip_align)? |
483483
# dev_alloc_skb/netdev_alloc_skb, et al
484484
dma_alloc_coherent
485485
)};

tools/build/feature/test-sync-compare-and-swap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ int main(int argc, char *argv[])
77
{
88
uint64_t old, new = argc;
99

10-
argv = argv;
10+
(void)argv;
1111
do {
1212
old = __sync_val_compare_and_swap(&x, 0, 0);
1313
} while (!__sync_bool_compare_and_swap(&x, old, new));

tools/vm/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
# SPDX-License-Identifier: GPL-2.0
22
# Makefile for vm tools
33
#
4+
include ../scripts/Makefile.include
5+
46
TARGETS=page-types slabinfo page_owner_sort
57

68
LIB_DIR = ../lib/api

0 commit comments

Comments
 (0)