Skip to content

Commit c410bf0

Browse files
committed
rxrpc: Fix the excessive initial retransmission timeout
rxrpc currently uses a fixed 4s retransmission timeout until the RTT is sufficiently sampled. This can cause problems with some fileservers with calls to the cache manager in the afs filesystem being dropped from the fileserver because a packet goes missing and the retransmission timeout is greater than the call expiry timeout. Fix this by: (1) Copying the RTT/RTO calculation code from Linux's TCP implementation and altering it to fit rxrpc. (2) Altering the various users of the RTT to make use of the new SRTT value. (3) Replacing the use of rxrpc_resend_timeout to use the calculated RTO value instead (which is needed in jiffies), along with a backoff. Notes: (1) rxrpc provides RTT samples by matching the serial numbers on outgoing DATA packets that have the RXRPC_REQUEST_ACK set and PING ACK packets against the reference serial number in incoming REQUESTED ACK and PING-RESPONSE ACK packets. (2) Each packet that is transmitted on an rxrpc connection gets a new per-connection serial number, even for retransmissions, so an ACK can be cross-referenced to a specific trigger packet. This allows RTT information to be drawn from retransmitted DATA packets also. (3) rxrpc maintains the RTT/RTO state on the rxrpc_peer record rather than on an rxrpc_call because many RPC calls won't live long enough to generate more than one sample. (4) The calculated SRTT value is in units of 8ths of a microsecond rather than nanoseconds. The (S)RTT and RTO values are displayed in /proc/net/rxrpc/peers. Fixes: 17926a7 ([AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both"") Signed-off-by: David Howells <[email protected]>
1 parent 42c556f commit c410bf0

File tree

17 files changed

+266
-155
lines changed

17 files changed

+266
-155
lines changed

fs/afs/fs_probe.c

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,8 @@ void afs_fileserver_probe_result(struct afs_call *call)
3232
struct afs_server *server = call->server;
3333
unsigned int server_index = call->server_index;
3434
unsigned int index = call->addr_ix;
35-
unsigned int rtt = UINT_MAX;
35+
unsigned int rtt_us;
3636
bool have_result = false;
37-
u64 _rtt;
3837
int ret = call->error;
3938

4039
_enter("%pU,%u", &server->uuid, index);
@@ -93,15 +92,9 @@ void afs_fileserver_probe_result(struct afs_call *call)
9392
}
9493
}
9594

96-
/* Get the RTT and scale it to fit into a 32-bit value that represents
97-
* over a minute of time so that we can access it with one instruction
98-
* on a 32-bit system.
99-
*/
100-
_rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
101-
_rtt /= 64;
102-
rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
103-
if (rtt < server->probe.rtt) {
104-
server->probe.rtt = rtt;
95+
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
96+
if (rtt_us < server->probe.rtt) {
97+
server->probe.rtt = rtt_us;
10598
alist->preferred = index;
10699
have_result = true;
107100
}
@@ -113,8 +106,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
113106
spin_unlock(&server->probe_lock);
114107

115108
_debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
116-
server_index, index, &alist->addrs[index].transport,
117-
(unsigned int)rtt, ret);
109+
server_index, index, &alist->addrs[index].transport, rtt_us, ret);
118110

119111
have_result |= afs_fs_probe_done(server);
120112
if (have_result)

fs/afs/vl_probe.c

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,9 @@ void afs_vlserver_probe_result(struct afs_call *call)
3131
struct afs_addr_list *alist = call->alist;
3232
struct afs_vlserver *server = call->vlserver;
3333
unsigned int server_index = call->server_index;
34+
unsigned int rtt_us = 0;
3435
unsigned int index = call->addr_ix;
35-
unsigned int rtt = UINT_MAX;
3636
bool have_result = false;
37-
u64 _rtt;
3837
int ret = call->error;
3938

4039
_enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code);
@@ -93,15 +92,9 @@ void afs_vlserver_probe_result(struct afs_call *call)
9392
}
9493
}
9594

96-
/* Get the RTT and scale it to fit into a 32-bit value that represents
97-
* over a minute of time so that we can access it with one instruction
98-
* on a 32-bit system.
99-
*/
100-
_rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
101-
_rtt /= 64;
102-
rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
103-
if (rtt < server->probe.rtt) {
104-
server->probe.rtt = rtt;
95+
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
96+
if (rtt_us < server->probe.rtt) {
97+
server->probe.rtt = rtt_us;
10598
alist->preferred = index;
10699
have_result = true;
107100
}
@@ -113,8 +106,7 @@ void afs_vlserver_probe_result(struct afs_call *call)
113106
spin_unlock(&server->probe_lock);
114107

115108
_debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
116-
server_index, index, &alist->addrs[index].transport,
117-
(unsigned int)rtt, ret);
109+
server_index, index, &alist->addrs[index].transport, rtt_us, ret);
118110

119111
have_result |= afs_vl_probe_done(server);
120112
if (have_result) {

include/net/af_rxrpc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
5959
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
6060
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
6161
struct sockaddr_rxrpc *);
62-
u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
62+
u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *);
6363
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
6464
rxrpc_user_attach_call_t, unsigned long, gfp_t,
6565
unsigned int);

include/trace/events/rxrpc.h

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1112,18 +1112,17 @@ TRACE_EVENT(rxrpc_rtt_tx,
11121112
TRACE_EVENT(rxrpc_rtt_rx,
11131113
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
11141114
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
1115-
s64 rtt, u8 nr, s64 avg),
1115+
u32 rtt, u32 rto),
11161116

1117-
TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
1117+
TP_ARGS(call, why, send_serial, resp_serial, rtt, rto),
11181118

11191119
TP_STRUCT__entry(
11201120
__field(unsigned int, call )
11211121
__field(enum rxrpc_rtt_rx_trace, why )
1122-
__field(u8, nr )
11231122
__field(rxrpc_serial_t, send_serial )
11241123
__field(rxrpc_serial_t, resp_serial )
1125-
__field(s64, rtt )
1126-
__field(u64, avg )
1124+
__field(u32, rtt )
1125+
__field(u32, rto )
11271126
),
11281127

11291128
TP_fast_assign(
@@ -1132,18 +1131,16 @@ TRACE_EVENT(rxrpc_rtt_rx,
11321131
__entry->send_serial = send_serial;
11331132
__entry->resp_serial = resp_serial;
11341133
__entry->rtt = rtt;
1135-
__entry->nr = nr;
1136-
__entry->avg = avg;
1134+
__entry->rto = rto;
11371135
),
11381136

1139-
TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
1137+
TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u",
11401138
__entry->call,
11411139
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
11421140
__entry->send_serial,
11431141
__entry->resp_serial,
11441142
__entry->rtt,
1145-
__entry->nr,
1146-
__entry->avg)
1143+
__entry->rto)
11471144
);
11481145

11491146
TRACE_EVENT(rxrpc_timer,

net/rxrpc/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ rxrpc-y := \
2525
peer_event.o \
2626
peer_object.o \
2727
recvmsg.o \
28+
rtt.o \
2829
security.o \
2930
sendmsg.o \
3031
skbuff.o \

net/rxrpc/ar-internal.h

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
#include <linux/atomic.h>
99
#include <linux/seqlock.h>
10+
#include <linux/win_minmax.h>
1011
#include <net/net_namespace.h>
1112
#include <net/netns/generic.h>
1213
#include <net/sock.h>
@@ -311,11 +312,14 @@ struct rxrpc_peer {
311312
#define RXRPC_RTT_CACHE_SIZE 32
312313
spinlock_t rtt_input_lock; /* RTT lock for input routine */
313314
ktime_t rtt_last_req; /* Time of last RTT request */
314-
u64 rtt; /* Current RTT estimate (in nS) */
315-
u64 rtt_sum; /* Sum of cache contents */
316-
u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
317-
u8 rtt_cursor; /* next entry at which to insert */
318-
u8 rtt_usage; /* amount of cache actually used */
315+
unsigned int rtt_count; /* Number of samples we've got */
316+
317+
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
318+
u32 mdev_us; /* medium deviation */
319+
u32 mdev_max_us; /* maximal mdev for the last rtt period */
320+
u32 rttvar_us; /* smoothed mdev_max */
321+
u32 rto_j; /* Retransmission timeout in jiffies */
322+
u8 backoff; /* Backoff timeout */
319323

320324
u8 cong_cwnd; /* Congestion window size */
321325
};
@@ -1041,7 +1045,6 @@ extern unsigned long rxrpc_idle_ack_delay;
10411045
extern unsigned int rxrpc_rx_window_size;
10421046
extern unsigned int rxrpc_rx_mtu;
10431047
extern unsigned int rxrpc_rx_jumbo_max;
1044-
extern unsigned long rxrpc_resend_timeout;
10451048

10461049
extern const s8 rxrpc_ack_priority[];
10471050

@@ -1069,8 +1072,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
10691072
* peer_event.c
10701073
*/
10711074
void rxrpc_error_report(struct sock *);
1072-
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1073-
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
10741075
void rxrpc_peer_keepalive_worker(struct work_struct *);
10751076

10761077
/*
@@ -1102,6 +1103,14 @@ extern const struct seq_operations rxrpc_peer_seq_ops;
11021103
void rxrpc_notify_socket(struct rxrpc_call *);
11031104
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
11041105

1106+
/*
1107+
* rtt.c
1108+
*/
1109+
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1110+
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1111+
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
1112+
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
1113+
11051114
/*
11061115
* rxkad.c
11071116
*/

net/rxrpc/call_accept.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
248248
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
249249
ktime_t now = skb->tstamp;
250250

251-
if (call->peer->rtt_usage < 3 ||
251+
if (call->peer->rtt_count < 3 ||
252252
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
253253
rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
254254
true, true,

net/rxrpc/call_event.c

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
111111
} else {
112112
unsigned long now = jiffies, ack_at;
113113

114-
if (call->peer->rtt_usage > 0)
115-
ack_at = nsecs_to_jiffies(call->peer->rtt);
114+
if (call->peer->srtt_us != 0)
115+
ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
116116
else
117117
ack_at = expiry;
118118

@@ -157,24 +157,18 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
157157
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
158158
{
159159
struct sk_buff *skb;
160-
unsigned long resend_at;
160+
unsigned long resend_at, rto_j;
161161
rxrpc_seq_t cursor, seq, top;
162-
ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
162+
ktime_t now, max_age, oldest, ack_ts;
163163
int ix;
164164
u8 annotation, anno_type, retrans = 0, unacked = 0;
165165

166166
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
167167

168-
if (call->peer->rtt_usage > 1)
169-
timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
170-
else
171-
timeout = ms_to_ktime(rxrpc_resend_timeout);
172-
min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
173-
if (ktime_before(timeout, min_timeo))
174-
timeout = min_timeo;
168+
rto_j = call->peer->rto_j;
175169

176170
now = ktime_get_real();
177-
max_age = ktime_sub(now, timeout);
171+
max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
178172

179173
spin_lock_bh(&call->lock);
180174

@@ -219,7 +213,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
219213
}
220214

221215
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
222-
resend_at += jiffies + rxrpc_resend_timeout;
216+
resend_at += jiffies + rto_j;
223217
WRITE_ONCE(call->resend_at, resend_at);
224218

225219
if (unacked)
@@ -234,7 +228,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
234228
rxrpc_timer_set_for_resend);
235229
spin_unlock_bh(&call->lock);
236230
ack_ts = ktime_sub(now, call->acks_latest_ts);
237-
if (ktime_to_ns(ack_ts) < call->peer->rtt)
231+
if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
238232
goto out;
239233
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
240234
rxrpc_propose_ack_ping_for_lost_ack);

net/rxrpc/input.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -91,11 +91,11 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
9191
/* We analyse the number of packets that get ACK'd per RTT
9292
* period and increase the window if we managed to fill it.
9393
*/
94-
if (call->peer->rtt_usage == 0)
94+
if (call->peer->rtt_count == 0)
9595
goto out;
9696
if (ktime_before(skb->tstamp,
97-
ktime_add_ns(call->cong_tstamp,
98-
call->peer->rtt)))
97+
ktime_add_us(call->cong_tstamp,
98+
call->peer->srtt_us >> 3)))
9999
goto out_no_clear_ca;
100100
change = rxrpc_cong_rtt_window_end;
101101
call->cong_tstamp = skb->tstamp;

net/rxrpc/misc.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,6 @@ unsigned int rxrpc_rx_mtu = 5692;
6363
*/
6464
unsigned int rxrpc_rx_jumbo_max = 4;
6565

66-
/*
67-
* Time till packet resend (in milliseconds).
68-
*/
69-
unsigned long rxrpc_resend_timeout = 4 * HZ;
70-
7166
const s8 rxrpc_ack_priority[] = {
7267
[0] = 0,
7368
[RXRPC_ACK_DELAY] = 1,

net/rxrpc/output.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
369369
(test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
370370
retrans ||
371371
call->cong_mode == RXRPC_CALL_SLOW_START ||
372-
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
372+
(call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
373373
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
374374
ktime_get_real())))
375375
whdr.flags |= RXRPC_REQUEST_ACK;
@@ -423,13 +423,10 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
423423
if (whdr.flags & RXRPC_REQUEST_ACK) {
424424
call->peer->rtt_last_req = skb->tstamp;
425425
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
426-
if (call->peer->rtt_usage > 1) {
426+
if (call->peer->rtt_count > 1) {
427427
unsigned long nowj = jiffies, ack_lost_at;
428428

429-
ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
430-
if (ack_lost_at < 1)
431-
ack_lost_at = 1;
432-
429+
ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
433430
ack_lost_at += nowj;
434431
WRITE_ONCE(call->ack_lost_at, ack_lost_at);
435432
rxrpc_reduce_call_timer(call, ack_lost_at, nowj,

net/rxrpc/peer_event.c

Lines changed: 0 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -295,52 +295,6 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
295295
}
296296
}
297297

298-
/*
299-
* Add RTT information to cache. This is called in softirq mode and has
300-
* exclusive access to the peer RTT data.
301-
*/
302-
void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
303-
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
304-
ktime_t send_time, ktime_t resp_time)
305-
{
306-
struct rxrpc_peer *peer = call->peer;
307-
s64 rtt;
308-
u64 sum = peer->rtt_sum, avg;
309-
u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
310-
311-
rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
312-
if (rtt < 0)
313-
return;
314-
315-
spin_lock(&peer->rtt_input_lock);
316-
317-
/* Replace the oldest datum in the RTT buffer */
318-
sum -= peer->rtt_cache[cursor];
319-
sum += rtt;
320-
peer->rtt_cache[cursor] = rtt;
321-
peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
322-
peer->rtt_sum = sum;
323-
if (usage < RXRPC_RTT_CACHE_SIZE) {
324-
usage++;
325-
peer->rtt_usage = usage;
326-
}
327-
328-
spin_unlock(&peer->rtt_input_lock);
329-
330-
/* Now recalculate the average */
331-
if (usage == RXRPC_RTT_CACHE_SIZE) {
332-
avg = sum / RXRPC_RTT_CACHE_SIZE;
333-
} else {
334-
avg = sum;
335-
do_div(avg, usage);
336-
}
337-
338-
/* Don't need to update this under lock */
339-
peer->rtt = avg;
340-
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
341-
usage, avg);
342-
}
343-
344298
/*
345299
* Perform keep-alive pings.
346300
*/

0 commit comments

Comments
 (0)