Skip to content

Commit a91dbff

Browse files
wdebruijdavem330
authored andcommitted
sock: ulimit on MSG_ZEROCOPY pages
Bound the number of pages that a user may pin. Follow the lead of perf tools to maintain a per-user bound on memory locked pages commit 789f90f ("perf_counter: per user mlock gift") Signed-off-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 4ab6c99 commit a91dbff

File tree

3 files changed

+55
-1
lines changed

3 files changed

+55
-1
lines changed

include/linux/sched/user.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,8 @@ struct user_struct {
3636
struct hlist_node uidhash_node;
3737
kuid_t uid;
3838

39-
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
39+
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
40+
defined(CONFIG_NET)
4041
atomic_long_t locked_vm;
4142
#endif
4243
};

include/linux/skbuff.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -457,6 +457,11 @@ struct ubuf_info {
457457
};
458458
};
459459
atomic_t refcnt;
460+
461+
struct mmpin {
462+
struct user_struct *user;
463+
unsigned int num_pg;
464+
} mmp;
460465
};
461466

462467
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))

net/core/skbuff.c

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -897,6 +897,44 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
897897
}
898898
EXPORT_SYMBOL_GPL(skb_morph);
899899

900+
static int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
901+
{
902+
unsigned long max_pg, num_pg, new_pg, old_pg;
903+
struct user_struct *user;
904+
905+
if (capable(CAP_IPC_LOCK) || !size)
906+
return 0;
907+
908+
num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
909+
max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
910+
user = mmp->user ? : current_user();
911+
912+
do {
913+
old_pg = atomic_long_read(&user->locked_vm);
914+
new_pg = old_pg + num_pg;
915+
if (new_pg > max_pg)
916+
return -ENOBUFS;
917+
} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
918+
old_pg);
919+
920+
if (!mmp->user) {
921+
mmp->user = get_uid(user);
922+
mmp->num_pg = num_pg;
923+
} else {
924+
mmp->num_pg += num_pg;
925+
}
926+
927+
return 0;
928+
}
929+
930+
static void mm_unaccount_pinned_pages(struct mmpin *mmp)
931+
{
932+
if (mmp->user) {
933+
atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
934+
free_uid(mmp->user);
935+
}
936+
}
937+
900938
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
901939
{
902940
struct ubuf_info *uarg;
@@ -913,6 +951,12 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
913951

914952
BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
915953
uarg = (void *)skb->cb;
954+
uarg->mmp.user = NULL;
955+
956+
if (mm_account_pinned_pages(&uarg->mmp, size)) {
957+
kfree_skb(skb);
958+
return NULL;
959+
}
916960

917961
uarg->callback = sock_zerocopy_callback;
918962
uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
@@ -956,6 +1000,8 @@ struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
9561000

9571001
next = (u32)atomic_read(&sk->sk_zckey);
9581002
if ((u32)(uarg->id + uarg->len) == next) {
1003+
if (mm_account_pinned_pages(&uarg->mmp, size))
1004+
return NULL;
9591005
uarg->len++;
9601006
uarg->bytelen = bytelen;
9611007
atomic_set(&sk->sk_zckey, ++next);
@@ -1038,6 +1084,8 @@ EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
10381084
void sock_zerocopy_put(struct ubuf_info *uarg)
10391085
{
10401086
if (uarg && atomic_dec_and_test(&uarg->refcnt)) {
1087+
mm_unaccount_pinned_pages(&uarg->mmp);
1088+
10411089
if (uarg->callback)
10421090
uarg->callback(uarg, uarg->zerocopy);
10431091
else

0 commit comments

Comments
 (0)