Skip to content

Commit ef6a3c6

Browse files
Miklos Szereditorvalds
authored andcommitted
mm: add replace_page_cache_page() function
This function basically does: remove_from_page_cache(old); page_cache_release(old); add_to_page_cache_locked(new); Except it does this atomically, so there's no possibility for the "add" to fail because of a race. If memory cgroups are enabled, then the memory cgroup charge is also moved from the old page to the new. This function is currently used by fuse to move pages into the page cache on read, instead of copying the page contents. [[email protected]: add freepage() hook to replace_page_cache_page()] Signed-off-by: Miklos Szeredi <[email protected]> Acked-by: Rik van Riel <[email protected]> Acked-by: KAMEZAWA Hiroyuki <[email protected]> Cc: Mel Gorman <[email protected]> Signed-off-by: Minchan Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 318b275 commit ef6a3c6

File tree

6 files changed

+80
-11
lines changed

6 files changed

+80
-11
lines changed

fs/fuse/dev.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -737,14 +737,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
737737
if (WARN_ON(PageMlocked(oldpage)))
738738
goto out_fallback_unlock;
739739

740-
remove_from_page_cache(oldpage);
741-
page_cache_release(oldpage);
742-
743-
err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
740+
err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
744741
if (err) {
745-
printk(KERN_WARNING "fuse_try_move_page: failed to add page");
746-
goto out_fallback_unlock;
742+
unlock_page(newpage);
743+
return err;
747744
}
745+
748746
page_cache_get(newpage);
749747

750748
if (!(buf->flags & PIPE_BUF_FLAG_LRU))

include/linux/memcontrol.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
9696

9797
extern int
9898
mem_cgroup_prepare_migration(struct page *page,
99-
struct page *newpage, struct mem_cgroup **ptr);
99+
struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
100100
extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
101101
struct page *oldpage, struct page *newpage, bool migration_ok);
102102

@@ -249,7 +249,7 @@ static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
249249

250250
static inline int
251251
mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
252-
struct mem_cgroup **ptr)
252+
struct mem_cgroup **ptr, gfp_t gfp_mask)
253253
{
254254
return 0;
255255
}

include/linux/pagemap.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -457,6 +457,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
457457
pgoff_t index, gfp_t gfp_mask);
458458
extern void remove_from_page_cache(struct page *page);
459459
extern void __remove_from_page_cache(struct page *page);
460+
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
460461

461462
/*
462463
* Like add_to_page_cache_locked, but used to add newly allocated pages:

mm/filemap.c

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,76 @@ int filemap_write_and_wait_range(struct address_space *mapping,
386386
}
387387
EXPORT_SYMBOL(filemap_write_and_wait_range);
388388

389+
/**
390+
* replace_page_cache_page - replace a pagecache page with a new one
391+
* @old: page to be replaced
392+
* @new: page to replace with
393+
* @gfp_mask: allocation mode
394+
*
395+
* This function replaces a page in the pagecache with a new one. On
396+
* success it acquires the pagecache reference for the new page and
397+
* drops it for the old page. Both the old and new pages must be
398+
* locked. This function does not add the new page to the LRU, the
399+
* caller must do that.
400+
*
401+
* The remove + add is atomic. The only way this function can fail is
402+
* memory allocation failure.
403+
*/
404+
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
405+
{
406+
int error;
407+
struct mem_cgroup *memcg = NULL;
408+
409+
VM_BUG_ON(!PageLocked(old));
410+
VM_BUG_ON(!PageLocked(new));
411+
VM_BUG_ON(new->mapping);
412+
413+
/*
414+
* This is not page migration, but prepare_migration and
415+
* end_migration does enough work for charge replacement.
416+
*
417+
* In the longer term we probably want a specialized function
418+
* for moving the charge from old to new in a more efficient
419+
* manner.
420+
*/
421+
error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
422+
if (error)
423+
return error;
424+
425+
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
426+
if (!error) {
427+
struct address_space *mapping = old->mapping;
428+
void (*freepage)(struct page *);
429+
430+
pgoff_t offset = old->index;
431+
freepage = mapping->a_ops->freepage;
432+
433+
page_cache_get(new);
434+
new->mapping = mapping;
435+
new->index = offset;
436+
437+
spin_lock_irq(&mapping->tree_lock);
438+
__remove_from_page_cache(old);
439+
error = radix_tree_insert(&mapping->page_tree, offset, new);
440+
BUG_ON(error);
441+
mapping->nrpages++;
442+
__inc_zone_page_state(new, NR_FILE_PAGES);
443+
if (PageSwapBacked(new))
444+
__inc_zone_page_state(new, NR_SHMEM);
445+
spin_unlock_irq(&mapping->tree_lock);
446+
radix_tree_preload_end();
447+
if (freepage)
448+
freepage(old);
449+
page_cache_release(old);
450+
mem_cgroup_end_migration(memcg, old, new, true);
451+
} else {
452+
mem_cgroup_end_migration(memcg, old, new, false);
453+
}
454+
455+
return error;
456+
}
457+
EXPORT_SYMBOL_GPL(replace_page_cache_page);
458+
389459
/**
390460
* add_to_page_cache_locked - add a locked page to the pagecache
391461
* @page: page to add

mm/memcontrol.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2883,7 +2883,7 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
28832883
* page belongs to.
28842884
*/
28852885
int mem_cgroup_prepare_migration(struct page *page,
2886-
struct page *newpage, struct mem_cgroup **ptr)
2886+
struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
28872887
{
28882888
struct page_cgroup *pc;
28892889
struct mem_cgroup *mem = NULL;
@@ -2940,7 +2940,7 @@ int mem_cgroup_prepare_migration(struct page *page,
29402940
return 0;
29412941

29422942
*ptr = mem;
2943-
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
2943+
ret = __mem_cgroup_try_charge(NULL, gfp_mask, ptr, false, PAGE_SIZE);
29442944
css_put(&mem->css);/* drop extra refcnt */
29452945
if (ret || *ptr == NULL) {
29462946
if (PageAnon(page)) {

mm/migrate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -678,7 +678,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
678678
}
679679

680680
/* charge against new page */
681-
charge = mem_cgroup_prepare_migration(page, newpage, &mem);
681+
charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
682682
if (charge == -ENOMEM) {
683683
rc = -ENOMEM;
684684
goto unlock;

0 commit comments

Comments
 (0)