Skip to content

Commit ac46d4f

Browse files
Jérôme Glissetorvalds
authored andcommitted
mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end cakks. No functional changes with this patch. [[email protected]: coding style fixes] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Jérôme Glisse <[email protected]> Acked-by: Christian König <[email protected]> Acked-by: Jan Kara <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Ross Zwisler <[email protected]> Cc: Dan Williams <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Radim Krcmar <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Felix Kuehling <[email protected]> Cc: Ralph Campbell <[email protected]> Cc: John Hubbard <[email protected]> From: Jérôme Glisse <[email protected]> Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3 fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Jérôme Glisse <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 5d6527a commit ac46d4f

File tree

17 files changed

+262
-250
lines changed

17 files changed

+262
-250
lines changed

fs/dax.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -779,7 +779,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
779779

780780
i_mmap_lock_read(mapping);
781781
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
782-
unsigned long address, start, end;
782+
struct mmu_notifier_range range;
783+
unsigned long address;
783784

784785
cond_resched();
785786

@@ -793,7 +794,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
793794
* call mmu_notifier_invalidate_range_start() on our behalf
794795
* before taking any lock.
795796
*/
796-
if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
797+
if (follow_pte_pmd(vma->vm_mm, address, &range,
798+
&ptep, &pmdp, &ptl))
797799
continue;
798800

799801
/*
@@ -835,7 +837,7 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
835837
pte_unmap_unlock(ptep, ptl);
836838
}
837839

838-
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
840+
mmu_notifier_invalidate_range_end(&range);
839841
}
840842
i_mmap_unlock_read(mapping);
841843
}

fs/proc/task_mmu.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1096,6 +1096,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
10961096
return -ESRCH;
10971097
mm = get_task_mm(task);
10981098
if (mm) {
1099+
struct mmu_notifier_range range;
10991100
struct clear_refs_private cp = {
11001101
.type = type,
11011102
};
@@ -1139,11 +1140,13 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
11391140
downgrade_write(&mm->mmap_sem);
11401141
break;
11411142
}
1142-
mmu_notifier_invalidate_range_start(mm, 0, -1);
1143+
1144+
mmu_notifier_range_init(&range, mm, 0, -1UL);
1145+
mmu_notifier_invalidate_range_start(&range);
11431146
}
11441147
walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
11451148
if (type == CLEAR_REFS_SOFT_DIRTY)
1146-
mmu_notifier_invalidate_range_end(mm, 0, -1);
1149+
mmu_notifier_invalidate_range_end(&range);
11471150
tlb_finish_mmu(&tlb, 0, -1);
11481151
up_read(&mm->mmap_sem);
11491152
out_mm:

include/linux/mm.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1451,6 +1451,8 @@ struct mm_walk {
14511451
void *private;
14521452
};
14531453

1454+
struct mmu_notifier_range;
1455+
14541456
int walk_page_range(unsigned long addr, unsigned long end,
14551457
struct mm_walk *walk);
14561458
int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
@@ -1459,8 +1461,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
14591461
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
14601462
struct vm_area_struct *vma);
14611463
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1462-
unsigned long *start, unsigned long *end,
1463-
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1464+
struct mmu_notifier_range *range,
1465+
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
14641466
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
14651467
unsigned long *pfn);
14661468
int follow_phys(struct vm_area_struct *vma, unsigned long address,

include/linux/mmu_notifier.h

Lines changed: 58 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -220,11 +220,8 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
220220
unsigned long address);
221221
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
222222
unsigned long address, pte_t pte);
223-
extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
224-
unsigned long start, unsigned long end,
225-
bool blockable);
226-
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
227-
unsigned long start, unsigned long end,
223+
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
224+
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
228225
bool only_end);
229226
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
230227
unsigned long start, unsigned long end);
@@ -268,33 +265,37 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
268265
__mmu_notifier_change_pte(mm, address, pte);
269266
}
270267

271-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
272-
unsigned long start, unsigned long end)
268+
static inline void
269+
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
273270
{
274-
if (mm_has_notifiers(mm))
275-
__mmu_notifier_invalidate_range_start(mm, start, end, true);
271+
if (mm_has_notifiers(range->mm)) {
272+
range->blockable = true;
273+
__mmu_notifier_invalidate_range_start(range);
274+
}
276275
}
277276

278-
static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
279-
unsigned long start, unsigned long end)
277+
static inline int
278+
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
280279
{
281-
if (mm_has_notifiers(mm))
282-
return __mmu_notifier_invalidate_range_start(mm, start, end, false);
280+
if (mm_has_notifiers(range->mm)) {
281+
range->blockable = false;
282+
return __mmu_notifier_invalidate_range_start(range);
283+
}
283284
return 0;
284285
}
285286

286-
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
287-
unsigned long start, unsigned long end)
287+
static inline void
288+
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
288289
{
289-
if (mm_has_notifiers(mm))
290-
__mmu_notifier_invalidate_range_end(mm, start, end, false);
290+
if (mm_has_notifiers(range->mm))
291+
__mmu_notifier_invalidate_range_end(range, false);
291292
}
292293

293-
static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
294-
unsigned long start, unsigned long end)
294+
static inline void
295+
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
295296
{
296-
if (mm_has_notifiers(mm))
297-
__mmu_notifier_invalidate_range_end(mm, start, end, true);
297+
if (mm_has_notifiers(range->mm))
298+
__mmu_notifier_invalidate_range_end(range, true);
298299
}
299300

300301
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -315,6 +316,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
315316
__mmu_notifier_mm_destroy(mm);
316317
}
317318

319+
320+
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
321+
struct mm_struct *mm,
322+
unsigned long start,
323+
unsigned long end)
324+
{
325+
range->mm = mm;
326+
range->start = start;
327+
range->end = end;
328+
}
329+
318330
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
319331
({ \
320332
int __young; \
@@ -427,6 +439,23 @@ extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
427439

428440
#else /* CONFIG_MMU_NOTIFIER */
429441

442+
struct mmu_notifier_range {
443+
unsigned long start;
444+
unsigned long end;
445+
};
446+
447+
static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
448+
unsigned long start,
449+
unsigned long end)
450+
{
451+
range->start = start;
452+
range->end = end;
453+
}
454+
455+
#define mmu_notifier_range_init(range, mm, start, end) \
456+
_mmu_notifier_range_init(range, start, end)
457+
458+
430459
static inline int mm_has_notifiers(struct mm_struct *mm)
431460
{
432461
return 0;
@@ -454,24 +483,24 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
454483
{
455484
}
456485

457-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
458-
unsigned long start, unsigned long end)
486+
static inline void
487+
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
459488
{
460489
}
461490

462-
static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
463-
unsigned long start, unsigned long end)
491+
static inline int
492+
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
464493
{
465494
return 0;
466495
}
467496

468-
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
469-
unsigned long start, unsigned long end)
497+
static inline
498+
void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
470499
{
471500
}
472501

473-
static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
474-
unsigned long start, unsigned long end)
502+
static inline void
503+
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
475504
{
476505
}
477506

kernel/events/uprobes.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -171,11 +171,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
171171
.address = addr,
172172
};
173173
int err;
174-
/* For mmu_notifiers */
175-
const unsigned long mmun_start = addr;
176-
const unsigned long mmun_end = addr + PAGE_SIZE;
174+
struct mmu_notifier_range range;
177175
struct mem_cgroup *memcg;
178176

177+
mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE);
178+
179179
VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
180180

181181
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
@@ -186,7 +186,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
186186
/* For try_to_free_swap() and munlock_vma_page() below */
187187
lock_page(old_page);
188188

189-
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
189+
mmu_notifier_invalidate_range_start(&range);
190190
err = -EAGAIN;
191191
if (!page_vma_mapped_walk(&pvmw)) {
192192
mem_cgroup_cancel_charge(new_page, memcg, false);
@@ -220,7 +220,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
220220

221221
err = 0;
222222
unlock:
223-
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
223+
mmu_notifier_invalidate_range_end(&range);
224224
unlock_page(old_page);
225225
return err;
226226
}

mm/huge_memory.c

Lines changed: 25 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1134,8 +1134,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
11341134
int i;
11351135
vm_fault_t ret = 0;
11361136
struct page **pages;
1137-
unsigned long mmun_start; /* For mmu_notifiers */
1138-
unsigned long mmun_end; /* For mmu_notifiers */
1137+
struct mmu_notifier_range range;
11391138

11401139
pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
11411140
GFP_KERNEL);
@@ -1173,9 +1172,9 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
11731172
cond_resched();
11741173
}
11751174

1176-
mmun_start = haddr;
1177-
mmun_end = haddr + HPAGE_PMD_SIZE;
1178-
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1175+
mmu_notifier_range_init(&range, vma->vm_mm, haddr,
1176+
haddr + HPAGE_PMD_SIZE);
1177+
mmu_notifier_invalidate_range_start(&range);
11791178

11801179
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
11811180
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
@@ -1220,8 +1219,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
12201219
* No need to double call mmu_notifier->invalidate_range() callback as
12211220
* the above pmdp_huge_clear_flush_notify() did already call it.
12221221
*/
1223-
mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
1224-
mmun_end);
1222+
mmu_notifier_invalidate_range_only_end(&range);
12251223

12261224
ret |= VM_FAULT_WRITE;
12271225
put_page(page);
@@ -1231,7 +1229,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
12311229

12321230
out_free_pages:
12331231
spin_unlock(vmf->ptl);
1234-
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1232+
mmu_notifier_invalidate_range_end(&range);
12351233
for (i = 0; i < HPAGE_PMD_NR; i++) {
12361234
memcg = (void *)page_private(pages[i]);
12371235
set_page_private(pages[i], 0);
@@ -1248,8 +1246,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
12481246
struct page *page = NULL, *new_page;
12491247
struct mem_cgroup *memcg;
12501248
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1251-
unsigned long mmun_start; /* For mmu_notifiers */
1252-
unsigned long mmun_end; /* For mmu_notifiers */
1249+
struct mmu_notifier_range range;
12531250
gfp_t huge_gfp; /* for allocation and charge */
12541251
vm_fault_t ret = 0;
12551252

@@ -1338,9 +1335,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
13381335
vma, HPAGE_PMD_NR);
13391336
__SetPageUptodate(new_page);
13401337

1341-
mmun_start = haddr;
1342-
mmun_end = haddr + HPAGE_PMD_SIZE;
1343-
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1338+
mmu_notifier_range_init(&range, vma->vm_mm, haddr,
1339+
haddr + HPAGE_PMD_SIZE);
1340+
mmu_notifier_invalidate_range_start(&range);
13441341

13451342
spin_lock(vmf->ptl);
13461343
if (page)
@@ -1375,8 +1372,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
13751372
* No need to double call mmu_notifier->invalidate_range() callback as
13761373
* the above pmdp_huge_clear_flush_notify() did already call it.
13771374
*/
1378-
mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
1379-
mmun_end);
1375+
mmu_notifier_invalidate_range_only_end(&range);
13801376
out:
13811377
return ret;
13821378
out_unlock:
@@ -2015,23 +2011,23 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
20152011
unsigned long address)
20162012
{
20172013
spinlock_t *ptl;
2018-
struct mm_struct *mm = vma->vm_mm;
2019-
unsigned long haddr = address & HPAGE_PUD_MASK;
2014+
struct mmu_notifier_range range;
20202015

2021-
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
2022-
ptl = pud_lock(mm, pud);
2016+
mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK,
2017+
(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2018+
mmu_notifier_invalidate_range_start(&range);
2019+
ptl = pud_lock(vma->vm_mm, pud);
20232020
if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
20242021
goto out;
2025-
__split_huge_pud_locked(vma, pud, haddr);
2022+
__split_huge_pud_locked(vma, pud, range.start);
20262023

20272024
out:
20282025
spin_unlock(ptl);
20292026
/*
20302027
* No need to double call mmu_notifier->invalidate_range() callback as
20312028
* the above pudp_huge_clear_flush_notify() did already call it.
20322029
*/
2033-
mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
2034-
HPAGE_PUD_SIZE);
2030+
mmu_notifier_invalidate_range_only_end(&range);
20352031
}
20362032
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
20372033

@@ -2233,11 +2229,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
22332229
unsigned long address, bool freeze, struct page *page)
22342230
{
22352231
spinlock_t *ptl;
2236-
struct mm_struct *mm = vma->vm_mm;
2237-
unsigned long haddr = address & HPAGE_PMD_MASK;
2232+
struct mmu_notifier_range range;
22382233

2239-
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2240-
ptl = pmd_lock(mm, pmd);
2234+
mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK,
2235+
(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2236+
mmu_notifier_invalidate_range_start(&range);
2237+
ptl = pmd_lock(vma->vm_mm, pmd);
22412238

22422239
/*
22432240
* If caller asks to setup a migration entries, we need a page to check
@@ -2253,7 +2250,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
22532250
clear_page_mlock(page);
22542251
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
22552252
goto out;
2256-
__split_huge_pmd_locked(vma, pmd, haddr, freeze);
2253+
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
22572254
out:
22582255
spin_unlock(ptl);
22592256
/*
@@ -2269,8 +2266,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
22692266
* any further changes to individual pte will notify. So no need
22702267
* to call mmu_notifier->invalidate_range()
22712268
*/
2272-
mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
2273-
HPAGE_PMD_SIZE);
2269+
mmu_notifier_invalidate_range_only_end(&range);
22742270
}
22752271

22762272
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,

0 commit comments

Comments
 (0)