@@ -1040,27 +1040,90 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1040
1040
}
1041
1041
1042
1042
#ifdef CONFIG_MEMCG
1043
- static inline
1044
- struct deferred_split * get_deferred_split_queue (struct folio * folio )
1043
+ static inline struct mem_cgroup * folio_split_queue_memcg (struct folio * folio ,
1044
+ struct deferred_split * queue )
1045
+ {
1046
+ if (mem_cgroup_disabled ())
1047
+ return NULL ;
1048
+ if (& NODE_DATA (folio_nid (folio ))-> deferred_split_queue == queue )
1049
+ return NULL ;
1050
+ return container_of (queue , struct mem_cgroup , deferred_split_queue );
1051
+ }
1052
+
1053
+ static inline struct deferred_split * folio_memcg_split_queue (struct folio * folio )
1045
1054
{
1046
1055
struct mem_cgroup * memcg = folio_memcg (folio );
1047
- struct pglist_data * pgdat = NODE_DATA (folio_nid (folio ));
1048
1056
1049
- if (memcg )
1050
- return & memcg -> deferred_split_queue ;
1051
- else
1052
- return & pgdat -> deferred_split_queue ;
1057
+ return memcg ? & memcg -> deferred_split_queue : NULL ;
1053
1058
}
1054
1059
#else
1055
- static inline
1056
- struct deferred_split * get_deferred_split_queue ( struct folio * folio )
1060
+ static inline struct mem_cgroup * folio_split_queue_memcg ( struct folio * folio ,
1061
+ struct deferred_split * queue )
1057
1062
{
1058
- struct pglist_data * pgdat = NODE_DATA (folio_nid (folio ));
1063
+ return NULL ;
1064
+ }
1059
1065
1060
- return & pgdat -> deferred_split_queue ;
1066
+ static inline struct deferred_split * folio_memcg_split_queue (struct folio * folio )
1067
+ {
1068
+ return NULL ;
1061
1069
}
1062
1070
#endif
1063
1071
1072
+ static struct deferred_split * folio_split_queue (struct folio * folio )
1073
+ {
1074
+ struct deferred_split * queue = folio_memcg_split_queue (folio );
1075
+
1076
+ return queue ? : & NODE_DATA (folio_nid (folio ))-> deferred_split_queue ;
1077
+ }
1078
+
1079
+ static struct deferred_split * folio_split_queue_lock (struct folio * folio )
1080
+ {
1081
+ struct deferred_split * queue ;
1082
+
1083
+ rcu_read_lock ();
1084
+ retry :
1085
+ queue = folio_split_queue (folio );
1086
+ spin_lock (& queue -> split_queue_lock );
1087
+
1088
+ if (unlikely (folio_split_queue_memcg (folio , queue ) != folio_memcg (folio ))) {
1089
+ spin_unlock (& queue -> split_queue_lock );
1090
+ goto retry ;
1091
+ }
1092
+ rcu_read_unlock ();
1093
+
1094
+ return queue ;
1095
+ }
1096
+
1097
+ static struct deferred_split *
1098
+ folio_split_queue_lock_irqsave (struct folio * folio , unsigned long * flags )
1099
+ {
1100
+ struct deferred_split * queue ;
1101
+
1102
+ rcu_read_lock ();
1103
+ retry :
1104
+ queue = folio_split_queue (folio );
1105
+ spin_lock_irqsave (& queue -> split_queue_lock , * flags );
1106
+
1107
+ if (unlikely (folio_split_queue_memcg (folio , queue ) != folio_memcg (folio ))) {
1108
+ spin_unlock_irqrestore (& queue -> split_queue_lock , * flags );
1109
+ goto retry ;
1110
+ }
1111
+ rcu_read_unlock ();
1112
+
1113
+ return queue ;
1114
+ }
1115
+
1116
+ static inline void split_queue_unlock (struct deferred_split * queue )
1117
+ {
1118
+ spin_unlock (& queue -> split_queue_lock );
1119
+ }
1120
+
1121
+ static inline void split_queue_unlock_irqrestore (struct deferred_split * queue ,
1122
+ unsigned long flags )
1123
+ {
1124
+ spin_unlock_irqrestore (& queue -> split_queue_lock , flags );
1125
+ }
1126
+
1064
1127
static inline bool is_transparent_hugepage (const struct folio * folio )
1065
1128
{
1066
1129
if (!folio_test_large (folio ))
@@ -3361,7 +3424,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3361
3424
unsigned int new_order )
3362
3425
{
3363
3426
struct folio * folio = page_folio (page );
3364
- struct deferred_split * ds_queue = get_deferred_split_queue ( folio ) ;
3427
+ struct deferred_split * ds_queue ;
3365
3428
/* reset xarray order to new order after split */
3366
3429
XA_STATE_ORDER (xas , & folio -> mapping -> i_pages , folio -> index , new_order );
3367
3430
bool is_anon = folio_test_anon (folio );
@@ -3509,7 +3572,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3509
3572
}
3510
3573
3511
3574
/* Prevent deferred_split_scan() touching ->_refcount */
3512
- spin_lock ( & ds_queue -> split_queue_lock );
3575
+ ds_queue = folio_split_queue_lock ( folio );
3513
3576
if (folio_ref_freeze (folio , 1 + extra_pins )) {
3514
3577
if (folio_order (folio ) > 1 &&
3515
3578
!list_empty (& folio -> _deferred_list )) {
@@ -3527,7 +3590,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3527
3590
*/
3528
3591
list_del_init (& folio -> _deferred_list );
3529
3592
}
3530
- spin_unlock ( & ds_queue -> split_queue_lock );
3593
+ split_queue_unlock ( ds_queue );
3531
3594
if (mapping ) {
3532
3595
int nr = folio_nr_pages (folio );
3533
3596
@@ -3552,7 +3615,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3552
3615
__split_huge_page (page , list , end , new_order );
3553
3616
ret = 0 ;
3554
3617
} else {
3555
- spin_unlock ( & ds_queue -> split_queue_lock );
3618
+ split_queue_unlock ( ds_queue );
3556
3619
fail :
3557
3620
if (mapping )
3558
3621
xas_unlock (& xas );
@@ -3622,8 +3685,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
3622
3685
WARN_ON_ONCE (folio_ref_count (folio ));
3623
3686
WARN_ON_ONCE (!mem_cgroup_disabled () && !folio_memcg (folio ));
3624
3687
3625
- ds_queue = get_deferred_split_queue (folio );
3626
- spin_lock_irqsave (& ds_queue -> split_queue_lock , flags );
3688
+ ds_queue = folio_split_queue_lock_irqsave (folio , & flags );
3627
3689
if (!list_empty (& folio -> _deferred_list )) {
3628
3690
ds_queue -> split_queue_len -- ;
3629
3691
if (folio_test_partially_mapped (folio )) {
@@ -3634,18 +3696,15 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
3634
3696
list_del_init (& folio -> _deferred_list );
3635
3697
unqueued = true;
3636
3698
}
3637
- spin_unlock_irqrestore ( & ds_queue -> split_queue_lock , flags );
3699
+ split_queue_unlock_irqrestore ( ds_queue , flags );
3638
3700
3639
3701
return unqueued ; /* useful for debug warnings */
3640
3702
}
3641
3703
3642
3704
/* partially_mapped=false won't clear PG_partially_mapped folio flag */
3643
3705
void deferred_split_folio (struct folio * folio , bool partially_mapped )
3644
3706
{
3645
- struct deferred_split * ds_queue = get_deferred_split_queue (folio );
3646
- #ifdef CONFIG_MEMCG
3647
- struct mem_cgroup * memcg = folio_memcg (folio );
3648
- #endif
3707
+ struct deferred_split * ds_queue ;
3649
3708
unsigned long flags ;
3650
3709
3651
3710
/*
@@ -3668,7 +3727,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
3668
3727
if (folio_test_swapcache (folio ))
3669
3728
return ;
3670
3729
3671
- spin_lock_irqsave ( & ds_queue -> split_queue_lock , flags );
3730
+ ds_queue = folio_split_queue_lock_irqsave ( folio , & flags );
3672
3731
if (partially_mapped ) {
3673
3732
if (!folio_test_partially_mapped (folio )) {
3674
3733
folio_set_partially_mapped (folio );
@@ -3683,15 +3742,15 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
3683
3742
VM_WARN_ON_FOLIO (folio_test_partially_mapped (folio ), folio );
3684
3743
}
3685
3744
if (list_empty (& folio -> _deferred_list )) {
3745
+ struct mem_cgroup * memcg ;
3746
+ memcg = folio_split_queue_memcg (folio , ds_queue );
3686
3747
list_add_tail (& folio -> _deferred_list , & ds_queue -> split_queue );
3687
3748
ds_queue -> split_queue_len ++ ;
3688
- #ifdef CONFIG_MEMCG
3689
3749
if (memcg )
3690
3750
set_shrinker_bit (memcg , folio_nid (folio ),
3691
- deferred_split_shrinker -> id );
3692
- #endif
3751
+ shrinker_id (deferred_split_shrinker ));
3693
3752
}
3694
- spin_unlock_irqrestore ( & ds_queue -> split_queue_lock , flags );
3753
+ split_queue_unlock_irqrestore ( ds_queue , flags );
3695
3754
}
3696
3755
3697
3756
static unsigned long deferred_split_count (struct shrinker * shrink ,
0 commit comments