Skip to content

Commit 013339d

Browse files
shakeelbtorvalds
authored andcommitted
mm/rmap: always do TTU_IGNORE_ACCESS
Since commit 369ea82 ("mm/rmap: update to new mmu_notifier semantic v2"), the code to check the secondary MMU's page table access bit is broken for !(TTU_IGNORE_ACCESS) because the page is unmapped from the secondary MMU's page table before the check. More specifically for those secondary MMUs which unmap the memory in mmu_notifier_invalidate_range_start() like kvm. However memory reclaim is the only user of !(TTU_IGNORE_ACCESS) or the absence of TTU_IGNORE_ACCESS and it explicitly performs the page table access check before trying to unmap the page. So, at worst the reclaim will miss accesses in a very short window if we remove page table access check in unmapping code. There is an unintented consequence of !(TTU_IGNORE_ACCESS) for the memcg reclaim. From memcg reclaim the page_referenced() only account the accesses from the processes which are in the same memcg of the target page but the unmapping code is considering accesses from all the processes, so, decreasing the effectiveness of memcg reclaim. The simplest solution is to always assume TTU_IGNORE_ACCESS in unmapping code. Link: https://lkml.kernel.org/r/[email protected] Fixes: 369ea82 ("mm/rmap: update to new mmu_notifier semantic v2") Signed-off-by: Shakeel Butt <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Jerome Glisse <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Dan Williams <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent eefbfa7 commit 013339d

File tree

7 files changed

+11
-27
lines changed

7 files changed

+11
-27
lines changed

include/linux/rmap.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ enum ttu_flags {
9191

9292
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
9393
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
94-
TTU_IGNORE_ACCESS = 0x10, /* don't age */
9594
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
9695
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
9796
* and caller guarantees they will

mm/huge_memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2321,7 +2321,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
23212321

23222322
static void unmap_page(struct page *page)
23232323
{
2324-
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2324+
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
23252325
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
23262326
bool unmap_success;
23272327

mm/memory-failure.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -989,7 +989,7 @@ static int get_hwpoison_page(struct page *page)
989989
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
990990
int flags, struct page **hpagep)
991991
{
992-
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
992+
enum ttu_flags ttu = TTU_IGNORE_MLOCK;
993993
struct address_space *mapping;
994994
LIST_HEAD(tokill);
995995
bool unmap_success = true;

mm/memory_hotplug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1304,7 +1304,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
13041304
if (WARN_ON(PageLRU(page)))
13051305
isolate_lru_page(page);
13061306
if (page_mapped(page))
1307-
try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
1307+
try_to_unmap(page, TTU_IGNORE_MLOCK);
13081308
continue;
13091309
}
13101310

mm/migrate.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1122,8 +1122,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
11221122
/* Establish migration ptes */
11231123
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
11241124
page);
1125-
try_to_unmap(page,
1126-
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1125+
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK);
11271126
page_was_mapped = 1;
11281127
}
11291128

@@ -1329,8 +1328,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
13291328

13301329
if (page_mapped(hpage)) {
13311330
bool mapping_locked = false;
1332-
enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK|
1333-
TTU_IGNORE_ACCESS;
1331+
enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK;
13341332

13351333
if (!PageAnon(hpage)) {
13361334
/*
@@ -2688,7 +2686,7 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
26882686
*/
26892687
static void migrate_vma_unmap(struct migrate_vma *migrate)
26902688
{
2691-
int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2689+
int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK;
26922690
const unsigned long npages = migrate->npages;
26932691
const unsigned long start = migrate->start;
26942692
unsigned long addr, i, restore = 0;

mm/rmap.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1533,15 +1533,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15331533
goto discard;
15341534
}
15351535

1536-
if (!(flags & TTU_IGNORE_ACCESS)) {
1537-
if (ptep_clear_flush_young_notify(vma, address,
1538-
pvmw.pte)) {
1539-
ret = false;
1540-
page_vma_mapped_walk_done(&pvmw);
1541-
break;
1542-
}
1543-
}
1544-
15451536
/* Nuke the page table entry. */
15461537
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
15471538
if (should_defer_flush(mm, flags)) {

mm/vmscan.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1072,7 +1072,6 @@ static void page_check_dirty_writeback(struct page *page,
10721072
static unsigned int shrink_page_list(struct list_head *page_list,
10731073
struct pglist_data *pgdat,
10741074
struct scan_control *sc,
1075-
enum ttu_flags ttu_flags,
10761075
struct reclaim_stat *stat,
10771076
bool ignore_references)
10781077
{
@@ -1297,7 +1296,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
12971296
* processes. Try to unmap it here.
12981297
*/
12991298
if (page_mapped(page)) {
1300-
enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1299+
enum ttu_flags flags = TTU_BATCH_FLUSH;
13011300
bool was_swapbacked = PageSwapBacked(page);
13021301

13031302
if (unlikely(PageTransHuge(page)))
@@ -1514,7 +1513,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
15141513
}
15151514

15161515
nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1517-
TTU_IGNORE_ACCESS, &stat, true);
1516+
&stat, true);
15181517
list_splice(&clean_pages, page_list);
15191518
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
15201519
-(long)nr_reclaimed);
@@ -1958,8 +1957,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
19581957
if (nr_taken == 0)
19591958
return 0;
19601959

1961-
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1962-
&stat, false);
1960+
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
19631961

19641962
spin_lock_irq(&pgdat->lru_lock);
19651963

@@ -2131,8 +2129,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
21312129

21322130
nr_reclaimed += shrink_page_list(&node_page_list,
21332131
NODE_DATA(nid),
2134-
&sc, 0,
2135-
&dummy_stat, false);
2132+
&sc, &dummy_stat, false);
21362133
while (!list_empty(&node_page_list)) {
21372134
page = lru_to_page(&node_page_list);
21382135
list_del(&page->lru);
@@ -2145,8 +2142,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
21452142
if (!list_empty(&node_page_list)) {
21462143
nr_reclaimed += shrink_page_list(&node_page_list,
21472144
NODE_DATA(nid),
2148-
&sc, 0,
2149-
&dummy_stat, false);
2145+
&sc, &dummy_stat, false);
21502146
while (!list_empty(&node_page_list)) {
21512147
page = lru_to_page(&node_page_list);
21522148
list_del(&page->lru);

0 commit comments

Comments
 (0)