Skip to content

Commit 553c89e

Browse files
committed
Merge tag 'mm-hotfixes-stable-2024-12-07-22-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "24 hotfixes. 17 are cc:stable. 15 are MM and 9 are non-MM. The usual bunch of singletons - please see the relevant changelogs for details" * tag 'mm-hotfixes-stable-2024-12-07-22-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (24 commits) iio: magnetometer: yas530: use signed integer type for clamp limits sched/numa: fix memory leak due to the overwritten vma->numab_state mm/damon: fix order of arguments in damos_before_apply tracepoint lib: stackinit: hide never-taken branch from compiler mm/filemap: don't call folio_test_locked() without a reference in next_uptodate_folio() scatterlist: fix incorrect func name in kernel-doc mm: correct typo in MMAP_STATE() macro mm: respect mmap hint address when aligning for THP mm: memcg: declare do_memsw_account inline mm/codetag: swap tags when migrate pages ocfs2: update seq_file index in ocfs2_dlm_seq_next stackdepot: fix stack_depot_save_flags() in NMI context mm: open-code page_folio() in dump_page() mm: open-code PageTail in folio_flags() and const_folio_flags() mm: fix vrealloc()'s KASAN poisoning logic Revert "readahead: properly shorten readahead when falling back to do_page_cache_ra()" selftests/damon: add _damon_sysfs.py to TEST_FILES selftest: hugetlb_dio: fix test naming ocfs2: free inode when ocfs2_get_init_inode() fails nilfs2: fix potential out-of-bounds memory access in nilfs_find_entry() ...
2 parents 62b5a46 + f1ee548 commit 553c89e

File tree

27 files changed

+128
-88
lines changed

27 files changed

+128
-88
lines changed

drivers/iio/magnetometer/yamaha-yas530.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
372372
u8 data[8];
373373
u16 xy1y2[3];
374374
s32 h[3], s[3];
375+
int half_range = BIT(13);
375376
int i, ret;
376377

377378
mutex_lock(&yas5xx->lock);
@@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
406407
/* The second version of YAS537 needs to include calibration coefficients */
407408
if (yas5xx->version == YAS537_VERSION_1) {
408409
for (i = 0; i < 3; i++)
409-
s[i] = xy1y2[i] - BIT(13);
410-
h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
411-
h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
412-
h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
410+
s[i] = xy1y2[i] - half_range;
411+
h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range;
412+
h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range;
413+
h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range;
413414
for (i = 0; i < 3; i++) {
414-
clamp_val(h[i], -BIT(13), BIT(13) - 1);
415-
xy1y2[i] = h[i] + BIT(13);
415+
h[i] = clamp(h[i], -half_range, half_range - 1);
416+
xy1y2[i] = h[i] + half_range;
416417
}
417418
}
418419

fs/nilfs2/dir.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode)
7070
*/
7171
static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
7272
{
73-
unsigned int last_byte = inode->i_size;
73+
u64 last_byte = inode->i_size;
7474

7575
last_byte -= page_nr << PAGE_SHIFT;
7676
if (last_byte > PAGE_SIZE)

fs/ocfs2/dlmglue.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3110,6 +3110,7 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
31103110
struct ocfs2_lock_res *iter = v;
31113111
struct ocfs2_lock_res *dummy = &priv->p_iter_res;
31123112

3113+
(*pos)++;
31133114
spin_lock(&ocfs2_dlm_tracking_lock);
31143115
iter = ocfs2_dlm_next_res(iter, priv);
31153116
list_del_init(&dummy->l_debug_list);

fs/ocfs2/namei.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,8 +200,10 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
200200
mode = mode_strip_sgid(&nop_mnt_idmap, dir, mode);
201201
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
202202
status = dquot_initialize(inode);
203-
if (status)
203+
if (status) {
204+
iput(inode);
204205
return ERR_PTR(status);
206+
}
205207

206208
return inode;
207209
}

fs/proc/vmcore.c

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -414,6 +414,34 @@ static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
414414
return __read_vmcore(iter, &iocb->ki_pos);
415415
}
416416

417+
/**
418+
* vmcore_alloc_buf - allocate buffer in vmalloc memory
419+
* @size: size of buffer
420+
*
421+
* If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
422+
* the buffer to user-space by means of remap_vmalloc_range().
423+
*
424+
* If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
425+
* disabled and there's no need to allow users to mmap the buffer.
426+
*/
427+
static inline char *vmcore_alloc_buf(size_t size)
428+
{
429+
#ifdef CONFIG_MMU
430+
return vmalloc_user(size);
431+
#else
432+
return vzalloc(size);
433+
#endif
434+
}
435+
436+
/*
437+
* Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
438+
* essential for mmap_vmcore() in order to map physically
439+
* non-contiguous objects (ELF header, ELF note segment and memory
440+
* regions in the 1st kernel pointed to by PT_LOAD entries) into
441+
* virtually contiguous user-space in ELF layout.
442+
*/
443+
#ifdef CONFIG_MMU
444+
417445
/*
418446
* The vmcore fault handler uses the page cache and fills data using the
419447
* standard __read_vmcore() function.
@@ -457,34 +485,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
457485
#endif
458486
}
459487

460-
/**
461-
* vmcore_alloc_buf - allocate buffer in vmalloc memory
462-
* @size: size of buffer
463-
*
464-
* If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
465-
* the buffer to user-space by means of remap_vmalloc_range().
466-
*
467-
* If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
468-
* disabled and there's no need to allow users to mmap the buffer.
469-
*/
470-
static inline char *vmcore_alloc_buf(size_t size)
471-
{
472-
#ifdef CONFIG_MMU
473-
return vmalloc_user(size);
474-
#else
475-
return vzalloc(size);
476-
#endif
477-
}
478-
479-
/*
480-
* Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
481-
* essential for mmap_vmcore() in order to map physically
482-
* non-contiguous objects (ELF header, ELF note segment and memory
483-
* regions in the 1st kernel pointed to by PT_LOAD entries) into
484-
* virtually contiguous user-space in ELF layout.
485-
*/
486-
#ifdef CONFIG_MMU
487-
488488
static const struct vm_operations_struct vmcore_mmap_ops = {
489489
.fault = mmap_vmcore_fault,
490490
};

include/linux/page-flags.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
306306
{
307307
const struct page *page = &folio->page;
308308

309-
VM_BUG_ON_PGFLAGS(PageTail(page), page);
309+
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
310310
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
311311
return &page[n].flags;
312312
}
@@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
315315
{
316316
struct page *page = &folio->page;
317317

318-
VM_BUG_ON_PGFLAGS(PageTail(page), page);
318+
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
319319
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
320320
return &page[n].flags;
321321
}

include/linux/pgalloc_tag.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
231231
}
232232

233233
void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
234-
void pgalloc_tag_copy(struct folio *new, struct folio *old);
234+
void pgalloc_tag_swap(struct folio *new, struct folio *old);
235235

236236
void __init alloc_tag_sec_init(void);
237237

@@ -245,7 +245,7 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL
245245
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
246246
static inline void alloc_tag_sec_init(void) {}
247247
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
248-
static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {}
248+
static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
249249

250250
#endif /* CONFIG_MEM_ALLOC_PROFILING */
251251

include/linux/scatterlist.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
313313
}
314314

315315
/**
316-
* sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
316+
* sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address
317317
* @sg: SG entry
318318
*
319319
* Description:

include/linux/stackdepot.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; }
147147
* If the provided stack trace comes from the interrupt context, only the part
148148
* up to the interrupt entry is saved.
149149
*
150-
* Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
150+
* Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
151151
* alloc_pages() cannot be used from the current context. Currently
152152
* this is the case for contexts where neither %GFP_ATOMIC nor
153153
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
@@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; }
156156
*/
157157
depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
158158
unsigned int nr_entries,
159-
gfp_t gfp_flags,
159+
gfp_t alloc_flags,
160160
depot_flags_t depot_flags);
161161

162162
/**
@@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
175175
* Return: Handle of the stack trace stored in depot, 0 on failure
176176
*/
177177
depot_stack_handle_t stack_depot_save(unsigned long *entries,
178-
unsigned int nr_entries, gfp_t gfp_flags);
178+
unsigned int nr_entries, gfp_t alloc_flags);
179179

180180
/**
181181
* __stack_depot_get_stack_record - Get a pointer to a stack_record struct

include/trace/events/damon.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply,
1515
unsigned int target_idx, struct damon_region *r,
1616
unsigned int nr_regions, bool do_trace),
1717

18-
TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
18+
TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
1919

2020
TP_CONDITION(do_trace),
2121

kernel/sched/fair.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3399,10 +3399,16 @@ static void task_numa_work(struct callback_head *work)
33993399

34003400
/* Initialise new per-VMA NUMAB state. */
34013401
if (!vma->numab_state) {
3402-
vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
3403-
GFP_KERNEL);
3404-
if (!vma->numab_state)
3402+
struct vma_numab_state *ptr;
3403+
3404+
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
3405+
if (!ptr)
3406+
continue;
3407+
3408+
if (cmpxchg(&vma->numab_state, NULL, ptr)) {
3409+
kfree(ptr);
34053410
continue;
3411+
}
34063412

34073413
vma->numab_state->start_scan_seq = mm->numa_scan_seq;
34083414

lib/alloc_tag.c

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -189,26 +189,34 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
189189
}
190190
}
191191

192-
void pgalloc_tag_copy(struct folio *new, struct folio *old)
192+
void pgalloc_tag_swap(struct folio *new, struct folio *old)
193193
{
194-
union pgtag_ref_handle handle;
195-
union codetag_ref ref;
196-
struct alloc_tag *tag;
194+
union pgtag_ref_handle handle_old, handle_new;
195+
union codetag_ref ref_old, ref_new;
196+
struct alloc_tag *tag_old, *tag_new;
197197

198-
tag = pgalloc_tag_get(&old->page);
199-
if (!tag)
198+
tag_old = pgalloc_tag_get(&old->page);
199+
if (!tag_old)
200+
return;
201+
tag_new = pgalloc_tag_get(&new->page);
202+
if (!tag_new)
200203
return;
201204

202-
if (!get_page_tag_ref(&new->page, &ref, &handle))
205+
if (!get_page_tag_ref(&old->page, &ref_old, &handle_old))
203206
return;
207+
if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) {
208+
put_page_tag_ref(handle_old);
209+
return;
210+
}
211+
212+
/* swap tags */
213+
__alloc_tag_ref_set(&ref_old, tag_new);
214+
update_page_tag_ref(handle_old, &ref_old);
215+
__alloc_tag_ref_set(&ref_new, tag_old);
216+
update_page_tag_ref(handle_new, &ref_new);
204217

205-
/* Clear the old ref to the original allocation tag. */
206-
clear_page_tag_ref(&old->page);
207-
/* Decrement the counters of the tag on get_new_folio. */
208-
alloc_tag_sub(&ref, folio_size(new));
209-
__alloc_tag_ref_set(&ref, tag);
210-
update_page_tag_ref(handle, &ref);
211-
put_page_tag_ref(handle);
218+
put_page_tag_ref(handle_old);
219+
put_page_tag_ref(handle_new);
212220
}
213221

214222
static void shutdown_mem_profiling(bool remove_file)

lib/stackdepot.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
630630
prealloc = page_address(page);
631631
}
632632

633-
raw_spin_lock_irqsave(&pool_lock, flags);
633+
if (in_nmi()) {
634+
/* We can never allocate in NMI context. */
635+
WARN_ON_ONCE(can_alloc);
636+
/* Best effort; bail if we fail to take the lock. */
637+
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
638+
goto exit;
639+
} else {
640+
raw_spin_lock_irqsave(&pool_lock, flags);
641+
}
634642
printk_deferred_enter();
635643

636644
/* Try to find again, to avoid concurrently inserting duplicates. */

lib/stackinit_kunit.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,7 @@ static noinline void test_ ## name (struct kunit *test) \
212212
static noinline DO_NOTHING_TYPE_ ## which(var_type) \
213213
do_nothing_ ## name(var_type *ptr) \
214214
{ \
215+
OPTIMIZER_HIDE_VAR(ptr); \
215216
/* Will always be true, but compiler doesn't know. */ \
216217
if ((unsigned long)ptr > 0x2) \
217218
return DO_NOTHING_RETURN_ ## which(ptr); \

mm/debug.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,19 +124,22 @@ static void __dump_page(const struct page *page)
124124
{
125125
struct folio *foliop, folio;
126126
struct page precise;
127+
unsigned long head;
127128
unsigned long pfn = page_to_pfn(page);
128129
unsigned long idx, nr_pages = 1;
129130
int loops = 5;
130131

131132
again:
132133
memcpy(&precise, page, sizeof(*page));
133-
foliop = page_folio(&precise);
134-
if (foliop == (struct folio *)&precise) {
134+
head = precise.compound_head;
135+
if ((head & 1) == 0) {
136+
foliop = (struct folio *)&precise;
135137
idx = 0;
136138
if (!folio_test_large(foliop))
137139
goto dump;
138140
foliop = (struct folio *)page;
139141
} else {
142+
foliop = (struct folio *)(head - 1);
140143
idx = folio_page_idx(foliop, page);
141144
}
142145

mm/filemap.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3501,10 +3501,10 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
35013501
continue;
35023502
if (xa_is_value(folio))
35033503
continue;
3504-
if (folio_test_locked(folio))
3505-
continue;
35063504
if (!folio_try_get(folio))
35073505
continue;
3506+
if (folio_test_locked(folio))
3507+
goto skip;
35083508
/* Has the page moved or been split? */
35093509
if (unlikely(folio != xas_reload(xas)))
35103510
goto skip;

mm/gup.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,12 @@ static inline void sanity_check_pinned_pages(struct page **pages,
5252
*/
5353
for (; npages; npages--, pages++) {
5454
struct page *page = *pages;
55-
struct folio *folio = page_folio(page);
55+
struct folio *folio;
56+
57+
if (!page)
58+
continue;
59+
60+
folio = page_folio(page);
5661

5762
if (is_zero_page(page) ||
5863
!folio_test_anon(folio))
@@ -409,6 +414,10 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
409414

410415
sanity_check_pinned_pages(pages, npages);
411416
for (i = 0; i < npages; i += nr) {
417+
if (!pages[i]) {
418+
nr = 1;
419+
continue;
420+
}
412421
folio = gup_folio_next(pages, npages, i, &nr);
413422
gup_put_folio(folio, nr, FOLL_PIN);
414423
}

mm/kasan/report.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ static inline void fail_non_kasan_kunit_test(void) { }
201201

202202
#endif /* CONFIG_KUNIT */
203203

204-
static DEFINE_SPINLOCK(report_lock);
204+
static DEFINE_RAW_SPINLOCK(report_lock);
205205

206206
static void start_report(unsigned long *flags, bool sync)
207207
{
@@ -212,7 +212,7 @@ static void start_report(unsigned long *flags, bool sync)
212212
lockdep_off();
213213
/* Make sure we don't end up in loop. */
214214
report_suppress_start();
215-
spin_lock_irqsave(&report_lock, *flags);
215+
raw_spin_lock_irqsave(&report_lock, *flags);
216216
pr_err("==================================================================\n");
217217
}
218218

@@ -222,7 +222,7 @@ static void end_report(unsigned long *flags, const void *addr, bool is_write)
222222
trace_error_report_end(ERROR_DETECTOR_KASAN,
223223
(unsigned long)addr);
224224
pr_err("==================================================================\n");
225-
spin_unlock_irqrestore(&report_lock, *flags);
225+
raw_spin_unlock_irqrestore(&report_lock, *flags);
226226
if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
227227
check_panic_on_warn("KASAN");
228228
switch (kasan_arg_fault) {

0 commit comments

Comments
 (0)