Skip to content

Commit a9b3f86

Browse files
minatorvalds
authored andcommitted
hugetlb: support file_region coalescing again
An earlier patch in this series disabled file_region coalescing in order to hang the hugetlb_cgroup uncharge info on the file_region entries. This patch re-adds support for coalescing of file_region entries. Essentially everytime we add an entry, we call a recursive function that tries to coalesce the added region with the regions next to it. The worst case call depth for this function is 3: one to coalesce with the region next to it, one to coalesce to the region prev, and one to reach the base case. This is an important performance optimization as private mappings add their entries page by page, and we could incur big performance costs for large mappings with lots of file_region entries in their resv_map. [[email protected]: fix CONFIG_CGROUP_HUGETLB ifdefs] Link: http://lkml.kernel.org/r/[email protected] [[email protected]: remove check_coalesce_bug debug code] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Mina Almasry <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Acked-by: David Rientjes <[email protected]> Cc: Greg Thelen <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Sandipan Das <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Shuah Khan <[email protected]> Cc: Randy Dunlap <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 08cf9fa commit a9b3f86

File tree

1 file changed

+44
-0
lines changed

1 file changed

+44
-0
lines changed

mm/hugetlb.c

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -276,6 +276,48 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
276276
#endif
277277
}
278278

279+
static bool has_same_uncharge_info(struct file_region *rg,
280+
struct file_region *org)
281+
{
282+
#ifdef CONFIG_CGROUP_HUGETLB
283+
return rg && org &&
284+
rg->reservation_counter == org->reservation_counter &&
285+
rg->css == org->css;
286+
287+
#else
288+
return true;
289+
#endif
290+
}
291+
292+
static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
293+
{
294+
struct file_region *nrg = NULL, *prg = NULL;
295+
296+
prg = list_prev_entry(rg, link);
297+
if (&prg->link != &resv->regions && prg->to == rg->from &&
298+
has_same_uncharge_info(prg, rg)) {
299+
prg->to = rg->to;
300+
301+
list_del(&rg->link);
302+
kfree(rg);
303+
304+
coalesce_file_region(resv, prg);
305+
return;
306+
}
307+
308+
nrg = list_next_entry(rg, link);
309+
if (&nrg->link != &resv->regions && nrg->from == rg->to &&
310+
has_same_uncharge_info(nrg, rg)) {
311+
nrg->from = rg->from;
312+
313+
list_del(&rg->link);
314+
kfree(rg);
315+
316+
coalesce_file_region(resv, nrg);
317+
return;
318+
}
319+
}
320+
279321
/* Must be called with resv->lock held. Calling this with count_only == true
280322
* will count the number of pages to be added but will not modify the linked
281323
* list. If regions_needed != NULL and count_only == true, then regions_needed
@@ -327,6 +369,7 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
327369
record_hugetlb_cgroup_uncharge_info(h_cg, h,
328370
resv, nrg);
329371
list_add(&nrg->link, rg->link.prev);
372+
coalesce_file_region(resv, nrg);
330373
} else if (regions_needed)
331374
*regions_needed += 1;
332375
}
@@ -344,6 +387,7 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
344387
resv, last_accounted_offset, t);
345388
record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
346389
list_add(&nrg->link, rg->link.prev);
390+
coalesce_file_region(resv, nrg);
347391
} else if (regions_needed)
348392
*regions_needed += 1;
349393
}

0 commit comments

Comments
 (0)