Skip to content

Commit 789f90f

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf_counter: per user mlock gift
Instead of a per-process mlock gift for perf-counters, use a per-user gift so that there is less of a DoS potential. [ Impact: allow less worst-case unprivileged memory consumption ] Signed-off-by: Peter Zijlstra <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Corey Ashford <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 548e1dd commit 789f90f

File tree

2 files changed

+19
-7
lines changed

2 files changed

+19
-7
lines changed

include/linux/sched.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -674,6 +674,10 @@ struct user_struct {
674674
struct work_struct work;
675675
#endif
676676
#endif
677+
678+
#ifdef CONFIG_PERF_COUNTERS
679+
atomic_long_t locked_vm;
680+
#endif
677681
};
678682

679683
extern int uids_sysfs_init(void);

kernel/perf_counter.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly;
4545
static atomic_t nr_comm_tracking __read_mostly;
4646

4747
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48-
int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */
48+
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
4949

5050
/*
5151
* Lock for (sysadmin-configurable) counter reservations:
@@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
15221522

15231523
if (atomic_dec_and_mutex_lock(&counter->mmap_count,
15241524
&counter->mmap_mutex)) {
1525+
struct user_struct *user = current_user();
1526+
1527+
atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
15251528
vma->vm_mm->locked_vm -= counter->data->nr_locked;
15261529
perf_mmap_data_free(counter);
15271530
mutex_unlock(&counter->mmap_mutex);
@@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = {
15371540
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
15381541
{
15391542
struct perf_counter *counter = file->private_data;
1543+
struct user_struct *user = current_user();
15401544
unsigned long vma_size;
15411545
unsigned long nr_pages;
1546+
unsigned long user_locked, user_lock_limit;
15421547
unsigned long locked, lock_limit;
1548+
long user_extra, extra;
15431549
int ret = 0;
1544-
long extra;
15451550

15461551
if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
15471552
return -EINVAL;
@@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
15691574
goto unlock;
15701575
}
15711576

1572-
extra = nr_pages /* + 1 only account the data pages */;
1573-
extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1574-
if (extra < 0)
1575-
extra = 0;
1577+
user_extra = nr_pages + 1;
1578+
user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1579+
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
15761580

1577-
locked = vma->vm_mm->locked_vm + extra;
1581+
extra = 0;
1582+
if (user_locked > user_lock_limit)
1583+
extra = user_locked - user_lock_limit;
15781584

15791585
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
15801586
lock_limit >>= PAGE_SHIFT;
1587+
locked = vma->vm_mm->locked_vm + extra;
15811588

15821589
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
15831590
ret = -EPERM;
@@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
15901597
goto unlock;
15911598

15921599
atomic_set(&counter->mmap_count, 1);
1600+
atomic_long_add(user_extra, &user->locked_vm);
15931601
vma->vm_mm->locked_vm += extra;
15941602
counter->data->nr_locked = extra;
15951603
unlock:

0 commit comments

Comments
 (0)