Skip to content

Commit 0a4954a

Browse files
ftang1torvalds
authored andcommitted
percpu_counter: add percpu_counter_sync()
percpu_counter's accuracy is related to its batch size. For a percpu_counter with a big batch, its deviation could be big, so when the counter's batch is runtime changed to a smaller value for better accuracy, there could also be requirment to reduce the big deviation. So add a percpu-counter sync function to be run on each CPU. Reported-by: kernel test robot <[email protected]> Signed-off-by: Feng Tang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Cc: Dennis Zhou <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Qian Cai <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Huang Ying <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Haiyang Zhang <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Kees Cook <[email protected]> Cc: "K. Y. Srinivasan" <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Tim Chen <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4e2ee51 commit 0a4954a

File tree

2 files changed

+23
-0
lines changed

2 files changed

+23
-0
lines changed

include/linux/percpu_counter.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
4444
s32 batch);
4545
s64 __percpu_counter_sum(struct percpu_counter *fbc);
4646
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
47+
void percpu_counter_sync(struct percpu_counter *fbc);
4748

4849
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
4950
{
@@ -172,6 +173,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
172173
return true;
173174
}
174175

176+
static inline void percpu_counter_sync(struct percpu_counter *fbc)
177+
{
178+
}
175179
#endif /* CONFIG_SMP */
176180

177181
static inline void percpu_counter_inc(struct percpu_counter *fbc)

lib/percpu_counter.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
9898
}
9999
EXPORT_SYMBOL(percpu_counter_add_batch);
100100

101+
/*
102+
* For percpu_counter with a big batch, the devication of its count could
103+
* be big, and there is requirement to reduce the deviation, like when the
104+
* counter's batch could be runtime decreased to get a better accuracy,
105+
* which can be achieved by running this sync function on each CPU.
106+
*/
107+
void percpu_counter_sync(struct percpu_counter *fbc)
108+
{
109+
unsigned long flags;
110+
s64 count;
111+
112+
raw_spin_lock_irqsave(&fbc->lock, flags);
113+
count = __this_cpu_read(*fbc->counters);
114+
fbc->count += count;
115+
__this_cpu_sub(*fbc->counters, count);
116+
raw_spin_unlock_irqrestore(&fbc->lock, flags);
117+
}
118+
EXPORT_SYMBOL(percpu_counter_sync);
119+
101120
/*
102121
* Add up all the per-cpu counts, return the result. This is a more accurate
103122
* but much slower version of percpu_counter_read_positive()

0 commit comments

Comments
 (0)