@@ -1031,32 +1031,23 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1031
1031
{
1032
1032
int ret = 0 ;
1033
1033
1034
- spin_lock (& tlb -> mm -> page_table_lock );
1035
- if (likely (pmd_trans_huge (* pmd ))) {
1036
- if (unlikely (pmd_trans_splitting (* pmd ))) {
1037
- spin_unlock (& tlb -> mm -> page_table_lock );
1038
- wait_split_huge_page (vma -> anon_vma ,
1039
- pmd );
1040
- } else {
1041
- struct page * page ;
1042
- pgtable_t pgtable ;
1043
- pgtable = get_pmd_huge_pte (tlb -> mm );
1044
- page = pmd_page (* pmd );
1045
- pmd_clear (pmd );
1046
- tlb_remove_pmd_tlb_entry (tlb , pmd , addr );
1047
- page_remove_rmap (page );
1048
- VM_BUG_ON (page_mapcount (page ) < 0 );
1049
- add_mm_counter (tlb -> mm , MM_ANONPAGES , - HPAGE_PMD_NR );
1050
- VM_BUG_ON (!PageHead (page ));
1051
- tlb -> mm -> nr_ptes -- ;
1052
- spin_unlock (& tlb -> mm -> page_table_lock );
1053
- tlb_remove_page (tlb , page );
1054
- pte_free (tlb -> mm , pgtable );
1055
- ret = 1 ;
1056
- }
1057
- } else
1034
+ if (__pmd_trans_huge_lock (pmd , vma ) == 1 ) {
1035
+ struct page * page ;
1036
+ pgtable_t pgtable ;
1037
+ pgtable = get_pmd_huge_pte (tlb -> mm );
1038
+ page = pmd_page (* pmd );
1039
+ pmd_clear (pmd );
1040
+ tlb_remove_pmd_tlb_entry (tlb , pmd , addr );
1041
+ page_remove_rmap (page );
1042
+ VM_BUG_ON (page_mapcount (page ) < 0 );
1043
+ add_mm_counter (tlb -> mm , MM_ANONPAGES , - HPAGE_PMD_NR );
1044
+ VM_BUG_ON (!PageHead (page ));
1045
+ tlb -> mm -> nr_ptes -- ;
1058
1046
spin_unlock (& tlb -> mm -> page_table_lock );
1059
-
1047
+ tlb_remove_page (tlb , page );
1048
+ pte_free (tlb -> mm , pgtable );
1049
+ ret = 1 ;
1050
+ }
1060
1051
return ret ;
1061
1052
}
1062
1053
@@ -1066,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1066
1057
{
1067
1058
int ret = 0 ;
1068
1059
1069
- spin_lock (& vma -> vm_mm -> page_table_lock );
1070
- if (likely (pmd_trans_huge (* pmd ))) {
1071
- ret = !pmd_trans_splitting (* pmd );
1072
- spin_unlock (& vma -> vm_mm -> page_table_lock );
1073
- if (unlikely (!ret ))
1074
- wait_split_huge_page (vma -> anon_vma , pmd );
1075
- else {
1076
- /*
1077
- * All logical pages in the range are present
1078
- * if backed by a huge page.
1079
- */
1080
- memset (vec , 1 , (end - addr ) >> PAGE_SHIFT );
1081
- }
1082
- } else
1060
+ if (__pmd_trans_huge_lock (pmd , vma ) == 1 ) {
1061
+ /*
1062
+ * All logical pages in the range are present
1063
+ * if backed by a huge page.
1064
+ */
1083
1065
spin_unlock (& vma -> vm_mm -> page_table_lock );
1066
+ memset (vec , 1 , (end - addr ) >> PAGE_SHIFT );
1067
+ ret = 1 ;
1068
+ }
1084
1069
1085
1070
return ret ;
1086
1071
}
@@ -1110,20 +1095,11 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1110
1095
goto out ;
1111
1096
}
1112
1097
1113
- spin_lock (& mm -> page_table_lock );
1114
- if (likely (pmd_trans_huge (* old_pmd ))) {
1115
- if (pmd_trans_splitting (* old_pmd )) {
1116
- spin_unlock (& mm -> page_table_lock );
1117
- wait_split_huge_page (vma -> anon_vma , old_pmd );
1118
- ret = -1 ;
1119
- } else {
1120
- pmd = pmdp_get_and_clear (mm , old_addr , old_pmd );
1121
- VM_BUG_ON (!pmd_none (* new_pmd ));
1122
- set_pmd_at (mm , new_addr , new_pmd , pmd );
1123
- spin_unlock (& mm -> page_table_lock );
1124
- ret = 1 ;
1125
- }
1126
- } else {
1098
+ ret = __pmd_trans_huge_lock (old_pmd , vma );
1099
+ if (ret == 1 ) {
1100
+ pmd = pmdp_get_and_clear (mm , old_addr , old_pmd );
1101
+ VM_BUG_ON (!pmd_none (* new_pmd ));
1102
+ set_pmd_at (mm , new_addr , new_pmd , pmd );
1127
1103
spin_unlock (& mm -> page_table_lock );
1128
1104
}
1129
1105
out :
@@ -1136,24 +1112,41 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1136
1112
struct mm_struct * mm = vma -> vm_mm ;
1137
1113
int ret = 0 ;
1138
1114
1139
- spin_lock (& mm -> page_table_lock );
1115
+ if (__pmd_trans_huge_lock (pmd , vma ) == 1 ) {
1116
+ pmd_t entry ;
1117
+ entry = pmdp_get_and_clear (mm , addr , pmd );
1118
+ entry = pmd_modify (entry , newprot );
1119
+ set_pmd_at (mm , addr , pmd , entry );
1120
+ spin_unlock (& vma -> vm_mm -> page_table_lock );
1121
+ ret = 1 ;
1122
+ }
1123
+
1124
+ return ret ;
1125
+ }
1126
+
1127
+ /*
1128
+ * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1129
+ * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1130
+ *
1131
+ * Note that if it returns 1, this routine returns without unlocking page
1132
+ * table locks. So callers must unlock them.
1133
+ */
1134
+ int __pmd_trans_huge_lock (pmd_t * pmd , struct vm_area_struct * vma )
1135
+ {
1136
+ spin_lock (& vma -> vm_mm -> page_table_lock );
1140
1137
if (likely (pmd_trans_huge (* pmd ))) {
1141
1138
if (unlikely (pmd_trans_splitting (* pmd ))) {
1142
- spin_unlock (& mm -> page_table_lock );
1139
+ spin_unlock (& vma -> vm_mm -> page_table_lock );
1143
1140
wait_split_huge_page (vma -> anon_vma , pmd );
1141
+ return -1 ;
1144
1142
} else {
1145
- pmd_t entry ;
1146
-
1147
- entry = pmdp_get_and_clear (mm , addr , pmd );
1148
- entry = pmd_modify (entry , newprot );
1149
- set_pmd_at (mm , addr , pmd , entry );
1150
- spin_unlock (& vma -> vm_mm -> page_table_lock );
1151
- ret = 1 ;
1143
+ /* Thp mapped by 'pmd' is stable, so we can
1144
+ * handle it as it is. */
1145
+ return 1 ;
1152
1146
}
1153
- } else
1154
- spin_unlock (& vma -> vm_mm -> page_table_lock );
1155
-
1156
- return ret ;
1147
+ }
1148
+ spin_unlock (& vma -> vm_mm -> page_table_lock );
1149
+ return 0 ;
1157
1150
}
1158
1151
1159
1152
pmd_t * page_check_address_pmd (struct page * page ,
0 commit comments