Skip to content

Commit c7138f3

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf_counter: fix perf_poll()
Impact: fix kerneltop 100% CPU usage Only return a poll event when there's actually been one, poll_wait() doesn't actually wait for the waitq you pass it, it only enqueues you on it. Only once all FDs have been iterated and none of thm returned a poll-event will it schedule(). Also make it return POLL_HUP when there's not mmap() area to read from. Further, fix a silly bug in the write code. Reported-by: Mike Galbraith <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Arjan van de Ven <[email protected]> Orig-LKML-Reference: <1237897096.24918.181.camel@twins> Signed-off-by: Ingo Molnar <[email protected]>
1 parent f66c6b2 commit c7138f3

File tree

2 files changed

+13
-2
lines changed

2 files changed

+13
-2
lines changed

include/linux/perf_counter.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ struct file;
246246
struct perf_mmap_data {
247247
struct rcu_head rcu_head;
248248
int nr_pages;
249+
atomic_t wakeup;
249250
atomic_t head;
250251
struct perf_counter_mmap_page *user_page;
251252
void *data_pages[0];

kernel/perf_counter.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1161,7 +1161,16 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
11611161
static unsigned int perf_poll(struct file *file, poll_table *wait)
11621162
{
11631163
struct perf_counter *counter = file->private_data;
1164-
unsigned int events = POLLIN;
1164+
struct perf_mmap_data *data;
1165+
unsigned int events;
1166+
1167+
rcu_read_lock();
1168+
data = rcu_dereference(counter->data);
1169+
if (data)
1170+
events = atomic_xchg(&data->wakeup, 0);
1171+
else
1172+
events = POLL_HUP;
1173+
rcu_read_unlock();
11651174

11661175
poll_wait(file, &counter->waitq, wait);
11671176

@@ -1425,7 +1434,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi,
14251434

14261435
do {
14271436
offset = head = atomic_read(&data->head);
1428-
head += sizeof(u64);
1437+
head += size;
14291438
} while (atomic_cmpxchg(&data->head, offset, head) != offset);
14301439

14311440
wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
@@ -1446,6 +1455,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi,
14461455
* generate a poll() wakeup for every page boundary crossed
14471456
*/
14481457
if (wakeup) {
1458+
atomic_xchg(&data->wakeup, POLL_IN);
14491459
__perf_counter_update_userpage(counter, data);
14501460
if (nmi) {
14511461
counter->wakeup_pending = 1;

0 commit comments

Comments
 (0)