@@ -35,27 +35,52 @@ void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
35
35
spin_unlock_irq (& task -> sighand -> siglock );
36
36
}
37
37
38
- static int check_clock (const clockid_t which_clock )
38
+ /*
39
+ * Functions for validating access to tasks.
40
+ */
41
+ static struct task_struct * lookup_task (const pid_t pid , bool thread )
39
42
{
40
- int error = 0 ;
41
43
struct task_struct * p ;
42
- const pid_t pid = CPUCLOCK_PID (which_clock );
43
44
44
- if (CPUCLOCK_WHICH (which_clock ) >= CPUCLOCK_MAX )
45
- return - EINVAL ;
45
+ if (!pid )
46
+ return thread ? current : current -> group_leader ;
47
+
48
+ p = find_task_by_vpid (pid );
49
+ if (!p || p == current )
50
+ return p ;
51
+ if (thread )
52
+ return same_thread_group (p , current ) ? p : NULL ;
53
+ if (p == current )
54
+ return p ;
55
+ return has_group_leader_pid (p ) ? p : NULL ;
56
+ }
57
+
58
+ static struct task_struct * __get_task_for_clock (const clockid_t clock ,
59
+ bool getref )
60
+ {
61
+ const bool thread = !!CPUCLOCK_PERTHREAD (clock );
62
+ const pid_t pid = CPUCLOCK_PID (clock );
63
+ struct task_struct * p ;
46
64
47
- if (pid == 0 )
48
- return 0 ;
65
+ if (CPUCLOCK_WHICH ( clock ) >= CPUCLOCK_MAX )
66
+ return NULL ;
49
67
50
68
rcu_read_lock ();
51
- p = find_task_by_vpid (pid );
52
- if (!p || !(CPUCLOCK_PERTHREAD (which_clock ) ?
53
- same_thread_group (p , current ) : has_group_leader_pid (p ))) {
54
- error = - EINVAL ;
55
- }
69
+ p = lookup_task (pid , thread );
70
+ if (p && getref )
71
+ get_task_struct (p );
56
72
rcu_read_unlock ();
73
+ return p ;
74
+ }
57
75
58
- return error ;
76
+ static inline struct task_struct * get_task_for_clock (const clockid_t clock )
77
+ {
78
+ return __get_task_for_clock (clock , true);
79
+ }
80
+
81
+ static inline int validate_clock_permissions (const clockid_t clock )
82
+ {
83
+ return __get_task_for_clock (clock , false) ? 0 : - EINVAL ;
59
84
}
60
85
61
86
/*
@@ -125,7 +150,8 @@ static inline u64 virt_ticks(struct task_struct *p)
125
150
static int
126
151
posix_cpu_clock_getres (const clockid_t which_clock , struct timespec64 * tp )
127
152
{
128
- int error = check_clock (which_clock );
153
+ int error = validate_clock_permissions (which_clock );
154
+
129
155
if (!error ) {
130
156
tp -> tv_sec = 0 ;
131
157
tp -> tv_nsec = ((NSEC_PER_SEC + HZ - 1 ) / HZ );
@@ -142,20 +168,17 @@ posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
142
168
}
143
169
144
170
static int
145
- posix_cpu_clock_set (const clockid_t which_clock , const struct timespec64 * tp )
171
+ posix_cpu_clock_set (const clockid_t clock , const struct timespec64 * tp )
146
172
{
173
+ int error = validate_clock_permissions (clock );
174
+
147
175
/*
148
176
* You can never reset a CPU clock, but we check for other errors
149
177
* in the call before failing with EPERM.
150
178
*/
151
- int error = check_clock (which_clock );
152
- if (error == 0 ) {
153
- error = - EPERM ;
154
- }
155
- return error ;
179
+ return error ? : - EPERM ;
156
180
}
157
181
158
-
159
182
/*
160
183
* Sample a per-thread clock for the given task.
161
184
*/
0 commit comments