@@ -168,9 +168,9 @@ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168
168
*/
169
169
static int pcpu_nr_empty_pop_pages ;
170
170
171
- /* reclaim work to release fully free chunks, scheduled from free path */
172
- static void pcpu_reclaim (struct work_struct * work );
173
- static DECLARE_WORK (pcpu_reclaim_work , pcpu_reclaim );
171
+ /* balance work is used to populate or destroy chunks asynchronously */
172
+ static void pcpu_balance_workfn (struct work_struct * work );
173
+ static DECLARE_WORK (pcpu_balance_work , pcpu_balance_workfn );
174
174
175
175
static bool pcpu_addr_in_first_chunk (void * addr )
176
176
{
@@ -1080,36 +1080,33 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1080
1080
}
1081
1081
1082
1082
/**
1083
- * pcpu_reclaim - reclaim fully free chunks, workqueue function
1083
+ * pcpu_balance_workfn - reclaim fully free chunks, workqueue function
1084
1084
* @work: unused
1085
1085
*
1086
1086
* Reclaim all fully free chunks except for the first one.
1087
- *
1088
- * CONTEXT:
1089
- * workqueue context.
1090
1087
*/
1091
- static void pcpu_reclaim (struct work_struct * work )
1088
+ static void pcpu_balance_workfn (struct work_struct * work )
1092
1089
{
1093
- LIST_HEAD (todo );
1094
- struct list_head * head = & pcpu_slot [pcpu_nr_slots - 1 ];
1090
+ LIST_HEAD (to_free );
1091
+ struct list_head * free_head = & pcpu_slot [pcpu_nr_slots - 1 ];
1095
1092
struct pcpu_chunk * chunk , * next ;
1096
1093
1097
1094
mutex_lock (& pcpu_alloc_mutex );
1098
1095
spin_lock_irq (& pcpu_lock );
1099
1096
1100
- list_for_each_entry_safe (chunk , next , head , list ) {
1097
+ list_for_each_entry_safe (chunk , next , free_head , list ) {
1101
1098
WARN_ON (chunk -> immutable );
1102
1099
1103
1100
/* spare the first one */
1104
- if (chunk == list_first_entry (head , struct pcpu_chunk , list ))
1101
+ if (chunk == list_first_entry (free_head , struct pcpu_chunk , list ))
1105
1102
continue ;
1106
1103
1107
- list_move (& chunk -> list , & todo );
1104
+ list_move (& chunk -> list , & to_free );
1108
1105
}
1109
1106
1110
1107
spin_unlock_irq (& pcpu_lock );
1111
1108
1112
- list_for_each_entry_safe (chunk , next , & todo , list ) {
1109
+ list_for_each_entry_safe (chunk , next , & to_free , list ) {
1113
1110
int rs , re ;
1114
1111
1115
1112
pcpu_for_each_pop_region (chunk , rs , re , 0 , pcpu_unit_pages ) {
@@ -1163,7 +1160,7 @@ void free_percpu(void __percpu *ptr)
1163
1160
1164
1161
list_for_each_entry (pos , & pcpu_slot [pcpu_nr_slots - 1 ], list )
1165
1162
if (pos != chunk ) {
1166
- schedule_work (& pcpu_reclaim_work );
1163
+ schedule_work (& pcpu_balance_work );
1167
1164
break ;
1168
1165
}
1169
1166
}
0 commit comments