Skip to content

Commit 7354c8c

Browse files
Florian Westphaldavem330
authored andcommitted
net: tcp: split ack slow/fast events from cwnd_event
The congestion control ops "cwnd_event" currently supports CA_EVENT_FAST_ACK and CA_EVENT_SLOW_ACK events (among others). Both FAST and SLOW_ACK are only used by Westwood congestion control algorithm. This removes both flags from cwnd_event and adds a new in_ack_event callback for this. The goal is to be able to provide more detailed information about ACKs, such as whether ECE flag was set, or whether the ACK resulted in a window update. It is required for DataCenter TCP (DCTCP) congestion control algorithm as it makes a different choice depending on ECE being set or not. Joint work with Daniel Borkmann and Glenn Judd. Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Glenn Judd <[email protected]> Acked-by: Stephen Hemminger <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 30e502a commit 7354c8c

File tree

3 files changed

+32
-16
lines changed

3 files changed

+32
-16
lines changed

include/net/tcp.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -763,8 +763,10 @@ enum tcp_ca_event {
763763
CA_EVENT_CWND_RESTART, /* congestion window restart */
764764
CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
765765
CA_EVENT_LOSS, /* loss timeout */
766-
CA_EVENT_FAST_ACK, /* in sequence ack */
767-
CA_EVENT_SLOW_ACK, /* other ack */
766+
};
767+
768+
enum tcp_ca_ack_event_flags {
769+
CA_ACK_SLOWPATH = (1 << 0),
768770
};
769771

770772
/*
@@ -796,6 +798,8 @@ struct tcp_congestion_ops {
796798
void (*set_state)(struct sock *sk, u8 new_state);
797799
/* call when cwnd event occurs (optional) */
798800
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
801+
/* call when ack arrives (optional) */
802+
void (*in_ack_event)(struct sock *sk, u32 flags);
799803
/* new value of cwnd after loss (optional) */
800804
u32 (*undo_cwnd)(struct sock *sk);
801805
/* hook for packet ack accounting (optional) */

net/ipv4/tcp_input.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3362,6 +3362,14 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
33623362
}
33633363
}
33643364

3365+
static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
3366+
{
3367+
const struct inet_connection_sock *icsk = inet_csk(sk);
3368+
3369+
if (icsk->icsk_ca_ops->in_ack_event)
3370+
icsk->icsk_ca_ops->in_ack_event(sk, flags);
3371+
}
3372+
33653373
/* This routine deals with incoming acks, but not outgoing ones. */
33663374
static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
33673375
{
@@ -3421,7 +3429,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
34213429
tp->snd_una = ack;
34223430
flag |= FLAG_WIN_UPDATE;
34233431

3424-
tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3432+
tcp_in_ack_event(sk, 0);
34253433

34263434
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
34273435
} else {
@@ -3439,7 +3447,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
34393447
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
34403448
flag |= FLAG_ECE;
34413449

3442-
tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
3450+
tcp_in_ack_event(sk, CA_ACK_SLOWPATH);
34433451
}
34443452

34453453
/* We passed data and got it acked, remove any soft error

net/ipv4/tcp_westwood.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -220,32 +220,35 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
220220
return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
221221
}
222222

223+
static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
224+
{
225+
if (ack_flags & CA_ACK_SLOWPATH) {
226+
struct westwood *w = inet_csk_ca(sk);
227+
228+
westwood_update_window(sk);
229+
w->bk += westwood_acked_count(sk);
230+
231+
update_rtt_min(w);
232+
return;
233+
}
234+
235+
westwood_fast_bw(sk);
236+
}
237+
223238
static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
224239
{
225240
struct tcp_sock *tp = tcp_sk(sk);
226241
struct westwood *w = inet_csk_ca(sk);
227242

228243
switch (event) {
229-
case CA_EVENT_FAST_ACK:
230-
westwood_fast_bw(sk);
231-
break;
232-
233244
case CA_EVENT_COMPLETE_CWR:
234245
tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
235246
break;
236-
237247
case CA_EVENT_LOSS:
238248
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
239249
/* Update RTT_min when next ack arrives */
240250
w->reset_rtt_min = 1;
241251
break;
242-
243-
case CA_EVENT_SLOW_ACK:
244-
westwood_update_window(sk);
245-
w->bk += westwood_acked_count(sk);
246-
update_rtt_min(w);
247-
break;
248-
249252
default:
250253
/* don't care */
251254
break;
@@ -274,6 +277,7 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
274277
.ssthresh = tcp_reno_ssthresh,
275278
.cong_avoid = tcp_reno_cong_avoid,
276279
.cwnd_event = tcp_westwood_event,
280+
.in_ack_event = tcp_westwood_ack,
277281
.get_info = tcp_westwood_info,
278282
.pkts_acked = tcp_westwood_pkts_acked,
279283

0 commit comments

Comments
 (0)