@@ -2200,33 +2200,41 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
2200
2200
}
2201
2201
2202
2202
src_page = pmd_page (src_pmdval );
2203
- if (unlikely (!PageAnonExclusive (src_page ))) {
2204
- spin_unlock (src_ptl );
2205
- return - EBUSY ;
2206
- }
2207
2203
2208
- src_folio = page_folio (src_page );
2209
- folio_get (src_folio );
2204
+ if (!is_huge_zero_pmd (src_pmdval )) {
2205
+ if (unlikely (!PageAnonExclusive (src_page ))) {
2206
+ spin_unlock (src_ptl );
2207
+ return - EBUSY ;
2208
+ }
2209
+
2210
+ src_folio = page_folio (src_page );
2211
+ folio_get (src_folio );
2212
+ } else
2213
+ src_folio = NULL ;
2214
+
2210
2215
spin_unlock (src_ptl );
2211
2216
2212
2217
flush_cache_range (src_vma , src_addr , src_addr + HPAGE_PMD_SIZE );
2213
2218
mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , mm , src_addr ,
2214
2219
src_addr + HPAGE_PMD_SIZE );
2215
2220
mmu_notifier_invalidate_range_start (& range );
2216
2221
2217
- folio_lock (src_folio );
2222
+ if (src_folio ) {
2223
+ folio_lock (src_folio );
2218
2224
2219
- /*
2220
- * split_huge_page walks the anon_vma chain without the page
2221
- * lock. Serialize against it with the anon_vma lock, the page
2222
- * lock is not enough.
2223
- */
2224
- src_anon_vma = folio_get_anon_vma (src_folio );
2225
- if (!src_anon_vma ) {
2226
- err = - EAGAIN ;
2227
- goto unlock_folio ;
2228
- }
2229
- anon_vma_lock_write (src_anon_vma );
2225
+ /*
2226
+ * split_huge_page walks the anon_vma chain without the page
2227
+ * lock. Serialize against it with the anon_vma lock, the page
2228
+ * lock is not enough.
2229
+ */
2230
+ src_anon_vma = folio_get_anon_vma (src_folio );
2231
+ if (!src_anon_vma ) {
2232
+ err = - EAGAIN ;
2233
+ goto unlock_folio ;
2234
+ }
2235
+ anon_vma_lock_write (src_anon_vma );
2236
+ } else
2237
+ src_anon_vma = NULL ;
2230
2238
2231
2239
dst_ptl = pmd_lockptr (mm , dst_pmd );
2232
2240
double_pt_lock (src_ptl , dst_ptl );
@@ -2235,45 +2243,54 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
2235
2243
err = - EAGAIN ;
2236
2244
goto unlock_ptls ;
2237
2245
}
2238
- if (folio_maybe_dma_pinned (src_folio ) ||
2239
- !PageAnonExclusive (& src_folio -> page )) {
2240
- err = - EBUSY ;
2241
- goto unlock_ptls ;
2242
- }
2246
+ if (src_folio ) {
2247
+ if (folio_maybe_dma_pinned (src_folio ) ||
2248
+ !PageAnonExclusive (& src_folio -> page )) {
2249
+ err = - EBUSY ;
2250
+ goto unlock_ptls ;
2251
+ }
2243
2252
2244
- if (WARN_ON_ONCE (!folio_test_head (src_folio )) ||
2245
- WARN_ON_ONCE (!folio_test_anon (src_folio ))) {
2246
- err = - EBUSY ;
2247
- goto unlock_ptls ;
2248
- }
2253
+ if (WARN_ON_ONCE (!folio_test_head (src_folio )) ||
2254
+ WARN_ON_ONCE (!folio_test_anon (src_folio ))) {
2255
+ err = - EBUSY ;
2256
+ goto unlock_ptls ;
2257
+ }
2249
2258
2250
- folio_move_anon_rmap (src_folio , dst_vma );
2251
- WRITE_ONCE (src_folio -> index , linear_page_index (dst_vma , dst_addr ));
2259
+ folio_move_anon_rmap (src_folio , dst_vma );
2260
+ WRITE_ONCE (src_folio -> index , linear_page_index (dst_vma , dst_addr ));
2252
2261
2253
- src_pmdval = pmdp_huge_clear_flush (src_vma , src_addr , src_pmd );
2254
- /* Folio got pinned from under us. Put it back and fail the move. */
2255
- if (folio_maybe_dma_pinned (src_folio )) {
2256
- set_pmd_at (mm , src_addr , src_pmd , src_pmdval );
2257
- err = - EBUSY ;
2258
- goto unlock_ptls ;
2259
- }
2262
+ src_pmdval = pmdp_huge_clear_flush (src_vma , src_addr , src_pmd );
2263
+ /* Folio got pinned from under us. Put it back and fail the move. */
2264
+ if (folio_maybe_dma_pinned (src_folio )) {
2265
+ set_pmd_at (mm , src_addr , src_pmd , src_pmdval );
2266
+ err = - EBUSY ;
2267
+ goto unlock_ptls ;
2268
+ }
2260
2269
2261
- _dst_pmd = mk_huge_pmd (& src_folio -> page , dst_vma -> vm_page_prot );
2262
- /* Follow mremap() behavior and treat the entry dirty after the move */
2263
- _dst_pmd = pmd_mkwrite (pmd_mkdirty (_dst_pmd ), dst_vma );
2270
+ _dst_pmd = mk_huge_pmd (& src_folio -> page , dst_vma -> vm_page_prot );
2271
+ /* Follow mremap() behavior and treat the entry dirty after the move */
2272
+ _dst_pmd = pmd_mkwrite (pmd_mkdirty (_dst_pmd ), dst_vma );
2273
+ } else {
2274
+ src_pmdval = pmdp_huge_clear_flush (src_vma , src_addr , src_pmd );
2275
+ _dst_pmd = mk_huge_pmd (src_page , dst_vma -> vm_page_prot );
2276
+ }
2264
2277
set_pmd_at (mm , dst_addr , dst_pmd , _dst_pmd );
2265
2278
2266
2279
src_pgtable = pgtable_trans_huge_withdraw (mm , src_pmd );
2267
2280
pgtable_trans_huge_deposit (mm , dst_pmd , src_pgtable );
2268
2281
unlock_ptls :
2269
2282
double_pt_unlock (src_ptl , dst_ptl );
2270
- anon_vma_unlock_write (src_anon_vma );
2271
- put_anon_vma (src_anon_vma );
2283
+ if (src_anon_vma ) {
2284
+ anon_vma_unlock_write (src_anon_vma );
2285
+ put_anon_vma (src_anon_vma );
2286
+ }
2272
2287
unlock_folio :
2273
2288
/* unblock rmap walks */
2274
- folio_unlock (src_folio );
2289
+ if (src_folio )
2290
+ folio_unlock (src_folio );
2275
2291
mmu_notifier_invalidate_range_end (& range );
2276
- folio_put (src_folio );
2292
+ if (src_folio )
2293
+ folio_put (src_folio );
2277
2294
return err ;
2278
2295
}
2279
2296
#endif /* CONFIG_USERFAULTFD */
0 commit comments