Skip to content

Commit c7cf89b

Browse files
committed
Merge branch 'sctp-fully-support-memory-accounting'
Xin Long says: ==================== sctp: fully support memory accounting sctp memory accounting is added in this patchset by using these kernel APIs on send side: - sk_mem_charge() - sk_mem_uncharge() - sk_wmem_schedule() - sk_under_memory_pressure() - sk_mem_reclaim() and these on receive side: - sk_mem_charge() - sk_mem_uncharge() - sk_rmem_schedule() - sk_under_memory_pressure() - sk_mem_reclaim() With sctp memory accounting, we can limit the memory allocation by either sysctl: # sysctl -w net.sctp.sctp_mem="10 20 50" or cgroup: # echo $((8<<14)) > \ /sys/fs/cgroup/memory/sctp_mem/memory.kmem.tcp.limit_in_bytes When the socket is under memory pressure, the send side will block and wait, while the receive side will renege or drop. v1->v2: - add the missing Reported/Tested/Acked/-bys. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 93144b0 + 9dde27d commit c7cf89b

File tree

5 files changed

+23
-17
lines changed

5 files changed

+23
-17
lines changed

include/net/sctp/sctp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
421421
/*
422422
* This mimics the behavior of skb_set_owner_r
423423
*/
424-
sk->sk_forward_alloc -= event->rmem_len;
424+
sk_mem_charge(sk, event->rmem_len);
425425
}
426426

427427
/* Tests if the list has one and only one entry. */

net/sctp/sm_statefuns.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc,
64126412
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
64136413
* memory usage too much
64146414
*/
6415-
if (*sk->sk_prot_creator->memory_pressure) {
6415+
if (sk_under_memory_pressure(sk)) {
64166416
if (sctp_tsnmap_has_gap(map) &&
64176417
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
64186418
pr_debug("%s: under pressure, reneging for tsn:%u\n",
64196419
__func__, tsn);
64206420
deliver = SCTP_CMD_RENEGE;
6421-
}
6421+
} else {
6422+
sk_mem_reclaim(sk);
6423+
}
64226424
}
64236425

64246426
/*

net/sctp/socket.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1913,7 +1913,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
19131913
if (sctp_wspace(asoc) < (int)msg_len)
19141914
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
19151915

1916-
if (sctp_wspace(asoc) <= 0) {
1916+
if (sk_under_memory_pressure(sk))
1917+
sk_mem_reclaim(sk);
1918+
1919+
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
19171920
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
19181921
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
19191922
if (err)
@@ -8930,7 +8933,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
89308933
goto do_error;
89318934
if (signal_pending(current))
89328935
goto do_interrupted;
8933-
if ((int)msg_len <= sctp_wspace(asoc))
8936+
if (sk_under_memory_pressure(sk))
8937+
sk_mem_reclaim(sk);
8938+
if ((int)msg_len <= sctp_wspace(asoc) &&
8939+
sk_wmem_schedule(sk, msg_len))
89348940
break;
89358941

89368942
/* Let another process have a go. Since we are going

net/sctp/ulpevent.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
634634
gfp_t gfp)
635635
{
636636
struct sctp_ulpevent *event = NULL;
637-
struct sk_buff *skb;
638-
size_t padding, len;
637+
struct sk_buff *skb = chunk->skb;
638+
struct sock *sk = asoc->base.sk;
639+
size_t padding, datalen;
639640
int rx_count;
640641

641642
/*
@@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
646647
if (asoc->ep->rcvbuf_policy)
647648
rx_count = atomic_read(&asoc->rmem_alloc);
648649
else
649-
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
650+
rx_count = atomic_read(&sk->sk_rmem_alloc);
650651

651-
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
652+
datalen = ntohs(chunk->chunk_hdr->length);
652653

653-
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
654-
(!sk_rmem_schedule(asoc->base.sk, chunk->skb,
655-
chunk->skb->truesize)))
656-
goto fail;
657-
}
654+
if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
655+
goto fail;
658656

659657
/* Clone the original skb, sharing the data. */
660658
skb = skb_clone(chunk->skb, gfp);
@@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
681679
* The sender should never pad with more than 3 bytes. The receiver
682680
* MUST ignore the padding bytes.
683681
*/
684-
len = ntohs(chunk->chunk_hdr->length);
685-
padding = SCTP_PAD4(len) - len;
682+
padding = SCTP_PAD4(datalen) - datalen;
686683

687684
/* Fixup cloned skb with just this chunks data. */
688685
skb_trim(skb, chunk->chunk_end - padding - skb->data);

net/sctp/ulpqueue.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
11041104
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
11051105
}
11061106
/* If able to free enough room, accept this chunk. */
1107-
if (freed >= needed) {
1107+
if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1108+
freed >= needed) {
11081109
int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
11091110
/*
11101111
* Enter partial delivery if chunk has not been

0 commit comments

Comments
 (0)