97
97
#include <linux/mm_inline.h>
98
98
#include <linux/mmu_notifier.h>
99
99
#include <linux/printk.h>
100
+ #include <linux/swapops.h>
100
101
101
102
#include <asm/tlbflush.h>
102
103
#include <linux/uaccess.h>
@@ -426,6 +427,49 @@ static inline bool queue_pages_required(struct page *page,
426
427
return node_isset (nid , * qp -> nmask ) == !(flags & MPOL_MF_INVERT );
427
428
}
428
429
430
+ static int queue_pages_pmd (pmd_t * pmd , spinlock_t * ptl , unsigned long addr ,
431
+ unsigned long end , struct mm_walk * walk )
432
+ {
433
+ int ret = 0 ;
434
+ struct page * page ;
435
+ struct queue_pages * qp = walk -> private ;
436
+ unsigned long flags ;
437
+
438
+ if (unlikely (is_pmd_migration_entry (* pmd ))) {
439
+ ret = 1 ;
440
+ goto unlock ;
441
+ }
442
+ page = pmd_page (* pmd );
443
+ if (is_huge_zero_page (page )) {
444
+ spin_unlock (ptl );
445
+ __split_huge_pmd (walk -> vma , pmd , addr , false, NULL );
446
+ goto out ;
447
+ }
448
+ if (!thp_migration_supported ()) {
449
+ get_page (page );
450
+ spin_unlock (ptl );
451
+ lock_page (page );
452
+ ret = split_huge_page (page );
453
+ unlock_page (page );
454
+ put_page (page );
455
+ goto out ;
456
+ }
457
+ if (!queue_pages_required (page , qp )) {
458
+ ret = 1 ;
459
+ goto unlock ;
460
+ }
461
+
462
+ ret = 1 ;
463
+ flags = qp -> flags ;
464
+ /* go to thp migration */
465
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL ))
466
+ migrate_page_add (page , qp -> pagelist , flags );
467
+ unlock :
468
+ spin_unlock (ptl );
469
+ out :
470
+ return ret ;
471
+ }
472
+
429
473
/*
430
474
* Scan through pages checking if pages follow certain conditions,
431
475
* and move them to the pagelist if they do.
@@ -437,30 +481,15 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
437
481
struct page * page ;
438
482
struct queue_pages * qp = walk -> private ;
439
483
unsigned long flags = qp -> flags ;
440
- int nid , ret ;
484
+ int ret ;
441
485
pte_t * pte ;
442
486
spinlock_t * ptl ;
443
487
444
- if (pmd_trans_huge (* pmd )) {
445
- ptl = pmd_lock (walk -> mm , pmd );
446
- if (pmd_trans_huge (* pmd )) {
447
- page = pmd_page (* pmd );
448
- if (is_huge_zero_page (page )) {
449
- spin_unlock (ptl );
450
- __split_huge_pmd (vma , pmd , addr , false, NULL );
451
- } else {
452
- get_page (page );
453
- spin_unlock (ptl );
454
- lock_page (page );
455
- ret = split_huge_page (page );
456
- unlock_page (page );
457
- put_page (page );
458
- if (ret )
459
- return 0 ;
460
- }
461
- } else {
462
- spin_unlock (ptl );
463
- }
488
+ ptl = pmd_trans_huge_lock (pmd , vma );
489
+ if (ptl ) {
490
+ ret = queue_pages_pmd (pmd , ptl , addr , end , walk );
491
+ if (ret )
492
+ return 0 ;
464
493
}
465
494
466
495
if (pmd_trans_unstable (pmd ))
@@ -481,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
481
510
continue ;
482
511
if (!queue_pages_required (page , qp ))
483
512
continue ;
484
- if (PageTransCompound (page )) {
513
+ if (PageTransCompound (page ) && ! thp_migration_supported () ) {
485
514
get_page (page );
486
515
pte_unmap_unlock (pte , ptl );
487
516
lock_page (page );
@@ -893,19 +922,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
893
922
894
923
#ifdef CONFIG_MIGRATION
895
924
/*
896
- * page migration
925
+ * page migration, thp tail pages can be passed.
897
926
*/
898
927
static void migrate_page_add (struct page * page , struct list_head * pagelist ,
899
928
unsigned long flags )
900
929
{
930
+ struct page * head = compound_head (page );
901
931
/*
902
932
* Avoid migrating a page that is shared with others.
903
933
*/
904
- if ((flags & MPOL_MF_MOVE_ALL ) || page_mapcount (page ) == 1 ) {
905
- if (!isolate_lru_page (page )) {
906
- list_add_tail (& page -> lru , pagelist );
907
- inc_node_page_state (page , NR_ISOLATED_ANON +
908
- page_is_file_cache (page ));
934
+ if ((flags & MPOL_MF_MOVE_ALL ) || page_mapcount (head ) == 1 ) {
935
+ if (!isolate_lru_page (head )) {
936
+ list_add_tail (& head -> lru , pagelist );
937
+ mod_node_page_state (page_pgdat (head ),
938
+ NR_ISOLATED_ANON + page_is_file_cache (head ),
939
+ hpage_nr_pages (head ));
909
940
}
910
941
}
911
942
}
@@ -915,7 +946,17 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
915
946
if (PageHuge (page ))
916
947
return alloc_huge_page_node (page_hstate (compound_head (page )),
917
948
node );
918
- else
949
+ else if (thp_migration_supported () && PageTransHuge (page )) {
950
+ struct page * thp ;
951
+
952
+ thp = alloc_pages_node (node ,
953
+ (GFP_TRANSHUGE | __GFP_THISNODE ),
954
+ HPAGE_PMD_ORDER );
955
+ if (!thp )
956
+ return NULL ;
957
+ prep_transhuge_page (thp );
958
+ return thp ;
959
+ } else
919
960
return __alloc_pages_node (node , GFP_HIGHUSER_MOVABLE |
920
961
__GFP_THISNODE , 0 );
921
962
}
@@ -1081,6 +1122,15 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
1081
1122
if (PageHuge (page )) {
1082
1123
BUG_ON (!vma );
1083
1124
return alloc_huge_page_noerr (vma , address , 1 );
1125
+ } else if (thp_migration_supported () && PageTransHuge (page )) {
1126
+ struct page * thp ;
1127
+
1128
+ thp = alloc_hugepage_vma (GFP_TRANSHUGE , vma , address ,
1129
+ HPAGE_PMD_ORDER );
1130
+ if (!thp )
1131
+ return NULL ;
1132
+ prep_transhuge_page (thp );
1133
+ return thp ;
1084
1134
}
1085
1135
/*
1086
1136
* if !vma, alloc_page_vma() will use task or system default policy
0 commit comments