Skip to content

Commit 50f16a8

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf: Remove type specific target pointers
The only reason CQM had to use a hard-coded pmu type was so it could use cqm_target in hw_perf_event. Do away with the {tp,bp,cqm}_target pointers and provide a non type specific one. This allows us to do away with that silly pmu type as well. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Vince Weaver <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 4e16ed9 commit 50f16a8

File tree

8 files changed

+19
-29
lines changed

8 files changed

+19
-29
lines changed

arch/arm/kernel/hw_breakpoint.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
648648
* Per-cpu breakpoints are not supported by our stepping
649649
* mechanism.
650650
*/
651-
if (!bp->hw.bp_target)
651+
if (!bp->hw.target)
652652
return -EINVAL;
653653

654654
/*

arch/arm64/kernel/hw_breakpoint.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
527527
* Disallow per-task kernel breakpoints since these would
528528
* complicate the stepping code.
529529
*/
530-
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
530+
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
531531
return -EINVAL;
532532

533533
return 0;

arch/x86/kernel/cpu/perf_event_intel_cqm.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
263263
/*
264264
* Events that target same task are placed into the same cache group.
265265
*/
266-
if (a->hw.cqm_target == b->hw.cqm_target)
266+
if (a->hw.target == b->hw.target)
267267
return true;
268268

269269
/*
@@ -279,7 +279,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
279279
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
280280
{
281281
if (event->attach_state & PERF_ATTACH_TASK)
282-
return perf_cgroup_from_task(event->hw.cqm_target);
282+
return perf_cgroup_from_task(event->hw.target);
283283

284284
return event->cgrp;
285285
}
@@ -1365,8 +1365,7 @@ static int __init intel_cqm_init(void)
13651365

13661366
__perf_cpu_notifier(intel_cqm_cpu_notifier);
13671367

1368-
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm",
1369-
PERF_TYPE_INTEL_CQM);
1368+
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
13701369
if (ret)
13711370
pr_err("Intel CQM perf registration failed: %d\n", ret);
13721371
else

include/linux/perf_event.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,6 @@ struct hw_perf_event {
119119
struct hrtimer hrtimer;
120120
};
121121
struct { /* tracepoint */
122-
struct task_struct *tp_target;
123122
/* for tp_event->class */
124123
struct list_head tp_list;
125124
};
@@ -129,7 +128,6 @@ struct hw_perf_event {
129128
struct list_head cqm_events_entry;
130129
struct list_head cqm_groups_entry;
131130
struct list_head cqm_group_entry;
132-
struct task_struct *cqm_target;
133131
};
134132
#ifdef CONFIG_HAVE_HW_BREAKPOINT
135133
struct { /* breakpoint */
@@ -138,12 +136,12 @@ struct hw_perf_event {
138136
* problem hw_breakpoint has with context
139137
* creation and event initalization.
140138
*/
141-
struct task_struct *bp_target;
142139
struct arch_hw_breakpoint info;
143140
struct list_head bp_list;
144141
};
145142
#endif
146143
};
144+
struct task_struct *target;
147145
int state;
148146
local64_t prev_count;
149147
u64 sample_period;

include/uapi/linux/perf_event.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ enum perf_type_id {
3232
PERF_TYPE_HW_CACHE = 3,
3333
PERF_TYPE_RAW = 4,
3434
PERF_TYPE_BREAKPOINT = 5,
35-
PERF_TYPE_INTEL_CQM = 6,
3635

3736
PERF_TYPE_MAX, /* non-ABI */
3837
};

kernel/events/core.c

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7171,18 +7171,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
71717171

71727172
if (task) {
71737173
event->attach_state = PERF_ATTACH_TASK;
7174-
7175-
if (attr->type == PERF_TYPE_TRACEPOINT)
7176-
event->hw.tp_target = task;
7177-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
71787174
/*
7179-
* hw_breakpoint is a bit difficult here..
7175+
* XXX pmu::event_init needs to know what task to account to
7176+
* and we cannot use the ctx information because we need the
7177+
* pmu before we get a ctx.
71807178
*/
7181-
else if (attr->type == PERF_TYPE_BREAKPOINT)
7182-
event->hw.bp_target = task;
7183-
#endif
7184-
else if (attr->type == PERF_TYPE_INTEL_CQM)
7185-
event->hw.cqm_target = task;
7179+
event->hw.target = task;
71867180
}
71877181

71887182
if (!overflow_handler && parent_event) {

kernel/events/hw_breakpoint.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
116116
*/
117117
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
118118
{
119-
struct task_struct *tsk = bp->hw.bp_target;
119+
struct task_struct *tsk = bp->hw.target;
120120
struct perf_event *iter;
121121
int count = 0;
122122

123123
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
124-
if (iter->hw.bp_target == tsk &&
124+
if (iter->hw.target == tsk &&
125125
find_slot_idx(iter) == type &&
126126
(iter->cpu < 0 || cpu == iter->cpu))
127127
count += hw_breakpoint_weight(iter);
@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
153153
int nr;
154154

155155
nr = info->cpu_pinned;
156-
if (!bp->hw.bp_target)
156+
if (!bp->hw.target)
157157
nr += max_task_bp_pinned(cpu, type);
158158
else
159159
nr += task_bp_pinned(cpu, bp, type);
@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
210210
weight = -weight;
211211

212212
/* Pinned counter cpu profiling */
213-
if (!bp->hw.bp_target) {
213+
if (!bp->hw.target) {
214214
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
215215
return;
216216
}

kernel/trace/trace_uprobe.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
10051005
return true;
10061006

10071007
list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1008-
if (event->hw.tp_target->mm == mm)
1008+
if (event->hw.target->mm == mm)
10091009
return true;
10101010
}
10111011

@@ -1015,18 +1015,18 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
10151015
static inline bool
10161016
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
10171017
{
1018-
return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
1018+
return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
10191019
}
10201020

10211021
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
10221022
{
10231023
bool done;
10241024

10251025
write_lock(&tu->filter.rwlock);
1026-
if (event->hw.tp_target) {
1026+
if (event->hw.target) {
10271027
list_del(&event->hw.tp_list);
10281028
done = tu->filter.nr_systemwide ||
1029-
(event->hw.tp_target->flags & PF_EXITING) ||
1029+
(event->hw.target->flags & PF_EXITING) ||
10301030
uprobe_filter_event(tu, event);
10311031
} else {
10321032
tu->filter.nr_systemwide--;
@@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
10461046
int err;
10471047

10481048
write_lock(&tu->filter.rwlock);
1049-
if (event->hw.tp_target) {
1049+
if (event->hw.target) {
10501050
/*
10511051
* event->parent != NULL means copy_process(), we can avoid
10521052
* uprobe_apply(). current->mm must be probed and we can rely

0 commit comments

Comments
 (0)