@@ -4839,7 +4839,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4839
4839
unsigned long addr , unsigned long end ,
4840
4840
struct mm_walk * walk )
4841
4841
{
4842
- struct vm_area_struct * vma = walk -> private ;
4842
+ struct vm_area_struct * vma = walk -> vma ;
4843
4843
pte_t * pte ;
4844
4844
spinlock_t * ptl ;
4845
4845
@@ -4865,20 +4865,13 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4865
4865
static unsigned long mem_cgroup_count_precharge (struct mm_struct * mm )
4866
4866
{
4867
4867
unsigned long precharge ;
4868
- struct vm_area_struct * vma ;
4869
4868
4869
+ struct mm_walk mem_cgroup_count_precharge_walk = {
4870
+ .pmd_entry = mem_cgroup_count_precharge_pte_range ,
4871
+ .mm = mm ,
4872
+ };
4870
4873
down_read (& mm -> mmap_sem );
4871
- for (vma = mm -> mmap ; vma ; vma = vma -> vm_next ) {
4872
- struct mm_walk mem_cgroup_count_precharge_walk = {
4873
- .pmd_entry = mem_cgroup_count_precharge_pte_range ,
4874
- .mm = mm ,
4875
- .private = vma ,
4876
- };
4877
- if (is_vm_hugetlb_page (vma ))
4878
- continue ;
4879
- walk_page_range (vma -> vm_start , vma -> vm_end ,
4880
- & mem_cgroup_count_precharge_walk );
4881
- }
4874
+ walk_page_range (0 , ~0UL , & mem_cgroup_count_precharge_walk );
4882
4875
up_read (& mm -> mmap_sem );
4883
4876
4884
4877
precharge = mc .precharge ;
@@ -5011,7 +5004,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5011
5004
struct mm_walk * walk )
5012
5005
{
5013
5006
int ret = 0 ;
5014
- struct vm_area_struct * vma = walk -> private ;
5007
+ struct vm_area_struct * vma = walk -> vma ;
5015
5008
pte_t * pte ;
5016
5009
spinlock_t * ptl ;
5017
5010
enum mc_target_type target_type ;
@@ -5107,7 +5100,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5107
5100
5108
5101
static void mem_cgroup_move_charge (struct mm_struct * mm )
5109
5102
{
5110
- struct vm_area_struct * vma ;
5103
+ struct mm_walk mem_cgroup_move_charge_walk = {
5104
+ .pmd_entry = mem_cgroup_move_charge_pte_range ,
5105
+ .mm = mm ,
5106
+ };
5111
5107
5112
5108
lru_add_drain_all ();
5113
5109
/*
@@ -5130,24 +5126,11 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
5130
5126
cond_resched ();
5131
5127
goto retry ;
5132
5128
}
5133
- for (vma = mm -> mmap ; vma ; vma = vma -> vm_next ) {
5134
- int ret ;
5135
- struct mm_walk mem_cgroup_move_charge_walk = {
5136
- .pmd_entry = mem_cgroup_move_charge_pte_range ,
5137
- .mm = mm ,
5138
- .private = vma ,
5139
- };
5140
- if (is_vm_hugetlb_page (vma ))
5141
- continue ;
5142
- ret = walk_page_range (vma -> vm_start , vma -> vm_end ,
5143
- & mem_cgroup_move_charge_walk );
5144
- if (ret )
5145
- /*
5146
- * means we have consumed all precharges and failed in
5147
- * doing additional charge. Just abandon here.
5148
- */
5149
- break ;
5150
- }
5129
+ /*
5130
+ * When we have consumed all precharges and failed in doing
5131
+ * additional charge, the page walk just aborts.
5132
+ */
5133
+ walk_page_range (0 , ~0UL , & mem_cgroup_move_charge_walk );
5151
5134
up_read (& mm -> mmap_sem );
5152
5135
atomic_dec (& mc .from -> moving_account );
5153
5136
}
0 commit comments