Skip to content

Commit b1b0dea

Browse files
Chen Gangtorvalds
authored andcommitted
mm: memcontrol: let mem_cgroup_move_account() have effect only if MMU enabled
When !MMU, it will report warning. The related warning with allmodconfig under c6x: CC mm/memcontrol.o mm/memcontrol.c:2802:12: warning: 'mem_cgroup_move_account' defined but not used [-Wunused-function] static int mem_cgroup_move_account(struct page *page, ^ Signed-off-by: Chen Gang <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 6b63783 commit b1b0dea

File tree

1 file changed

+86
-86
lines changed

1 file changed

+86
-86
lines changed

mm/memcontrol.c

Lines changed: 86 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -2785,92 +2785,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
27852785
}
27862786
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
27872787

2788-
/**
2789-
* mem_cgroup_move_account - move account of the page
2790-
* @page: the page
2791-
* @nr_pages: number of regular pages (>1 for huge pages)
2792-
* @from: mem_cgroup which the page is moved from.
2793-
* @to: mem_cgroup which the page is moved to. @from != @to.
2794-
*
2795-
* The caller must confirm following.
2796-
* - page is not on LRU (isolate_page() is useful.)
2797-
* - compound_lock is held when nr_pages > 1
2798-
*
2799-
* This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
2800-
* from old cgroup.
2801-
*/
2802-
static int mem_cgroup_move_account(struct page *page,
2803-
unsigned int nr_pages,
2804-
struct mem_cgroup *from,
2805-
struct mem_cgroup *to)
2806-
{
2807-
unsigned long flags;
2808-
int ret;
2809-
2810-
VM_BUG_ON(from == to);
2811-
VM_BUG_ON_PAGE(PageLRU(page), page);
2812-
/*
2813-
* The page is isolated from LRU. So, collapse function
2814-
* will not handle this page. But page splitting can happen.
2815-
* Do this check under compound_page_lock(). The caller should
2816-
* hold it.
2817-
*/
2818-
ret = -EBUSY;
2819-
if (nr_pages > 1 && !PageTransHuge(page))
2820-
goto out;
2821-
2822-
/*
2823-
* Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
2824-
* of its source page while we change it: page migration takes
2825-
* both pages off the LRU, but page cache replacement doesn't.
2826-
*/
2827-
if (!trylock_page(page))
2828-
goto out;
2829-
2830-
ret = -EINVAL;
2831-
if (page->mem_cgroup != from)
2832-
goto out_unlock;
2833-
2834-
spin_lock_irqsave(&from->move_lock, flags);
2835-
2836-
if (!PageAnon(page) && page_mapped(page)) {
2837-
__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
2838-
nr_pages);
2839-
__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
2840-
nr_pages);
2841-
}
2842-
2843-
if (PageWriteback(page)) {
2844-
__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
2845-
nr_pages);
2846-
__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
2847-
nr_pages);
2848-
}
2849-
2850-
/*
2851-
* It is safe to change page->mem_cgroup here because the page
2852-
* is referenced, charged, and isolated - we can't race with
2853-
* uncharging, charging, migration, or LRU putback.
2854-
*/
2855-
2856-
/* caller should have done css_get */
2857-
page->mem_cgroup = to;
2858-
spin_unlock_irqrestore(&from->move_lock, flags);
2859-
2860-
ret = 0;
2861-
2862-
local_irq_disable();
2863-
mem_cgroup_charge_statistics(to, page, nr_pages);
2864-
memcg_check_events(to, page);
2865-
mem_cgroup_charge_statistics(from, page, -nr_pages);
2866-
memcg_check_events(from, page);
2867-
local_irq_enable();
2868-
out_unlock:
2869-
unlock_page(page);
2870-
out:
2871-
return ret;
2872-
}
2873-
28742788
#ifdef CONFIG_MEMCG_SWAP
28752789
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
28762790
bool charge)
@@ -4822,6 +4736,92 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
48224736
return page;
48234737
}
48244738

4739+
/**
4740+
* mem_cgroup_move_account - move account of the page
4741+
* @page: the page
4742+
* @nr_pages: number of regular pages (>1 for huge pages)
4743+
* @from: mem_cgroup which the page is moved from.
4744+
* @to: mem_cgroup which the page is moved to. @from != @to.
4745+
*
4746+
* The caller must confirm following.
4747+
* - page is not on LRU (isolate_page() is useful.)
4748+
* - compound_lock is held when nr_pages > 1
4749+
*
4750+
* This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4751+
* from old cgroup.
4752+
*/
4753+
static int mem_cgroup_move_account(struct page *page,
4754+
unsigned int nr_pages,
4755+
struct mem_cgroup *from,
4756+
struct mem_cgroup *to)
4757+
{
4758+
unsigned long flags;
4759+
int ret;
4760+
4761+
VM_BUG_ON(from == to);
4762+
VM_BUG_ON_PAGE(PageLRU(page), page);
4763+
/*
4764+
* The page is isolated from LRU. So, collapse function
4765+
* will not handle this page. But page splitting can happen.
4766+
* Do this check under compound_page_lock(). The caller should
4767+
* hold it.
4768+
*/
4769+
ret = -EBUSY;
4770+
if (nr_pages > 1 && !PageTransHuge(page))
4771+
goto out;
4772+
4773+
/*
4774+
* Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
4775+
* of its source page while we change it: page migration takes
4776+
* both pages off the LRU, but page cache replacement doesn't.
4777+
*/
4778+
if (!trylock_page(page))
4779+
goto out;
4780+
4781+
ret = -EINVAL;
4782+
if (page->mem_cgroup != from)
4783+
goto out_unlock;
4784+
4785+
spin_lock_irqsave(&from->move_lock, flags);
4786+
4787+
if (!PageAnon(page) && page_mapped(page)) {
4788+
__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4789+
nr_pages);
4790+
__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4791+
nr_pages);
4792+
}
4793+
4794+
if (PageWriteback(page)) {
4795+
__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4796+
nr_pages);
4797+
__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4798+
nr_pages);
4799+
}
4800+
4801+
/*
4802+
* It is safe to change page->mem_cgroup here because the page
4803+
* is referenced, charged, and isolated - we can't race with
4804+
* uncharging, charging, migration, or LRU putback.
4805+
*/
4806+
4807+
/* caller should have done css_get */
4808+
page->mem_cgroup = to;
4809+
spin_unlock_irqrestore(&from->move_lock, flags);
4810+
4811+
ret = 0;
4812+
4813+
local_irq_disable();
4814+
mem_cgroup_charge_statistics(to, page, nr_pages);
4815+
memcg_check_events(to, page);
4816+
mem_cgroup_charge_statistics(from, page, -nr_pages);
4817+
memcg_check_events(from, page);
4818+
local_irq_enable();
4819+
out_unlock:
4820+
unlock_page(page);
4821+
out:
4822+
return ret;
4823+
}
4824+
48254825
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
48264826
unsigned long addr, pte_t ptent, union mc_target *target)
48274827
{

0 commit comments

Comments
 (0)