Skip to content

Commit 4915a40

Browse files
committed
Merge branch 'net-iucv-updates-2021-01-28'
Julian Wiedmann says: ==================== net/iucv: updates 2021-01-28 This reworks & simplifies the TX notification path in af_iucv, so that we can send out SG skbs over TRANS_HIPER sockets. Also remove a noisy WARN_ONCE() in the RX path. ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 14a6daf + 2c3b445 commit 4915a40

File tree

3 files changed

+53
-78
lines changed

3 files changed

+53
-78
lines changed

drivers/s390/net/qeth_core_main.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1409,10 +1409,12 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
14091409
struct sk_buff *skb;
14101410

14111411
skb_queue_walk(&buf->skb_list, skb) {
1412+
struct sock *sk = skb->sk;
1413+
14121414
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
14131415
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1414-
if (skb->sk && skb->sk->sk_family == PF_IUCV)
1415-
iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1416+
if (sk && sk->sk_family == PF_IUCV)
1417+
iucv_sk(sk)->sk_txnotify(sk, notification);
14161418
}
14171419
}
14181420

include/net/iucv/af_iucv.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,11 +128,12 @@ struct iucv_sock {
128128
u8 flags;
129129
u16 msglimit;
130130
u16 msglimit_peer;
131+
atomic_t skbs_in_xmit;
131132
atomic_t msg_sent;
132133
atomic_t msg_recv;
133134
atomic_t pendings;
134135
int transport;
135-
void (*sk_txnotify)(struct sk_buff *skb,
136+
void (*sk_txnotify)(struct sock *sk,
136137
enum iucv_tx_notify n);
137138
};
138139

net/iucv/af_iucv.c

Lines changed: 47 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ static struct sock *iucv_accept_dequeue(struct sock *parent,
8989
static void iucv_sock_kill(struct sock *sk);
9090
static void iucv_sock_close(struct sock *sk);
9191

92-
static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
92+
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
9393

9494
/* Call Back functions */
9595
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
@@ -182,7 +182,7 @@ static inline int iucv_below_msglim(struct sock *sk)
182182
if (sk->sk_state != IUCV_CONNECTED)
183183
return 1;
184184
if (iucv->transport == AF_IUCV_TRANS_IUCV)
185-
return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
185+
return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
186186
else
187187
return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
188188
(atomic_read(&iucv->pendings) <= 0));
@@ -211,7 +211,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
211211
{
212212
struct iucv_sock *iucv = iucv_sk(sock);
213213
struct af_iucv_trans_hdr *phs_hdr;
214-
struct sk_buff *nskb;
215214
int err, confirm_recv = 0;
216215

217216
phs_hdr = skb_push(skb, sizeof(*phs_hdr));
@@ -257,22 +256,16 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
257256
err = -EMSGSIZE;
258257
goto err_free;
259258
}
260-
skb_trim(skb, skb->dev->mtu);
259+
err = pskb_trim(skb, skb->dev->mtu);
260+
if (err)
261+
goto err_free;
261262
}
262263
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
263264

264-
__skb_header_release(skb);
265-
nskb = skb_clone(skb, GFP_ATOMIC);
266-
if (!nskb) {
267-
err = -ENOMEM;
268-
goto err_free;
269-
}
270-
271-
skb_queue_tail(&iucv->send_skb_q, nskb);
265+
atomic_inc(&iucv->skbs_in_xmit);
272266
err = dev_queue_xmit(skb);
273267
if (net_xmit_eval(err)) {
274-
skb_unlink(nskb, &iucv->send_skb_q);
275-
kfree_skb(nskb);
268+
atomic_dec(&iucv->skbs_in_xmit);
276269
} else {
277270
atomic_sub(confirm_recv, &iucv->msg_recv);
278271
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
@@ -424,7 +417,7 @@ static void iucv_sock_close(struct sock *sk)
424417
sk->sk_state = IUCV_CLOSING;
425418
sk->sk_state_change(sk);
426419

427-
if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
420+
if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
428421
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
429422
timeo = sk->sk_lingertime;
430423
else
@@ -491,6 +484,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
491484
atomic_set(&iucv->pendings, 0);
492485
iucv->flags = 0;
493486
iucv->msglimit = 0;
487+
atomic_set(&iucv->skbs_in_xmit, 0);
494488
atomic_set(&iucv->msg_sent, 0);
495489
atomic_set(&iucv->msg_recv, 0);
496490
iucv->path = NULL;
@@ -1004,7 +998,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1004998
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1005999
headroom = sizeof(struct af_iucv_trans_hdr) +
10061000
LL_RESERVED_SPACE(iucv->hs_dev);
1007-
linear = len;
1001+
linear = min(len, PAGE_SIZE - headroom);
10081002
} else {
10091003
if (len < PAGE_SIZE) {
10101004
linear = len;
@@ -1055,6 +1049,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
10551049
}
10561050
} else { /* Classic VM IUCV transport */
10571051
skb_queue_tail(&iucv->send_skb_q, skb);
1052+
atomic_inc(&iucv->skbs_in_xmit);
10581053

10591054
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
10601055
skb->len <= 7) {
@@ -1063,6 +1058,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
10631058
/* on success: there is no message_complete callback */
10641059
/* for an IPRMDATA msg; remove skb from send queue */
10651060
if (err == 0) {
1061+
atomic_dec(&iucv->skbs_in_xmit);
10661062
skb_unlink(skb, &iucv->send_skb_q);
10671063
kfree_skb(skb);
10681064
}
@@ -1071,6 +1067,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
10711067
/* IUCV_IPRMDATA path flag is set... sever path */
10721068
if (err == 0x15) {
10731069
pr_iucv->path_sever(iucv->path, NULL);
1070+
atomic_dec(&iucv->skbs_in_xmit);
10741071
skb_unlink(skb, &iucv->send_skb_q);
10751072
err = -EPIPE;
10761073
goto fail;
@@ -1109,6 +1106,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
11091106
} else {
11101107
err = -EPIPE;
11111108
}
1109+
1110+
atomic_dec(&iucv->skbs_in_xmit);
11121111
skb_unlink(skb, &iucv->send_skb_q);
11131112
goto fail;
11141113
}
@@ -1748,10 +1747,14 @@ static void iucv_callback_txdone(struct iucv_path *path,
17481747
{
17491748
struct sock *sk = path->private;
17501749
struct sk_buff *this = NULL;
1751-
struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1750+
struct sk_buff_head *list;
17521751
struct sk_buff *list_skb;
1752+
struct iucv_sock *iucv;
17531753
unsigned long flags;
17541754

1755+
iucv = iucv_sk(sk);
1756+
list = &iucv->send_skb_q;
1757+
17551758
bh_lock_sock(sk);
17561759

17571760
spin_lock_irqsave(&list->lock, flags);
@@ -1761,8 +1764,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
17611764
break;
17621765
}
17631766
}
1764-
if (this)
1767+
if (this) {
1768+
atomic_dec(&iucv->skbs_in_xmit);
17651769
__skb_unlink(this, list);
1770+
}
1771+
17661772
spin_unlock_irqrestore(&list->lock, flags);
17671773

17681774
if (this) {
@@ -1772,7 +1778,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
17721778
}
17731779

17741780
if (sk->sk_state == IUCV_CLOSING) {
1775-
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1781+
if (atomic_read(&iucv->skbs_in_xmit) == 0) {
17761782
sk->sk_state = IUCV_CLOSED;
17771783
sk->sk_state_change(sk);
17781784
}
@@ -2036,7 +2042,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
20362042
char nullstring[8];
20372043

20382044
if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
2039-
WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
20402045
kfree_skb(skb);
20412046
return NET_RX_SUCCESS;
20422047
}
@@ -2132,73 +2137,40 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
21322137
* afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
21332138
* transport
21342139
**/
2135-
static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2136-
enum iucv_tx_notify n)
2140+
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
21372141
{
2138-
struct sock *isk = skb->sk;
2139-
struct sock *sk = NULL;
2140-
struct iucv_sock *iucv = NULL;
2141-
struct sk_buff_head *list;
2142-
struct sk_buff *list_skb;
2143-
struct sk_buff *nskb;
2144-
unsigned long flags;
2145-
2146-
read_lock_irqsave(&iucv_sk_list.lock, flags);
2147-
sk_for_each(sk, &iucv_sk_list.head)
2148-
if (sk == isk) {
2149-
iucv = iucv_sk(sk);
2150-
break;
2151-
}
2152-
read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2142+
struct iucv_sock *iucv = iucv_sk(sk);
21532143

2154-
if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2144+
if (sock_flag(sk, SOCK_ZAPPED))
21552145
return;
21562146

2157-
list = &iucv->send_skb_q;
2158-
spin_lock_irqsave(&list->lock, flags);
2159-
skb_queue_walk_safe(list, list_skb, nskb) {
2160-
if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2161-
switch (n) {
2162-
case TX_NOTIFY_OK:
2163-
__skb_unlink(list_skb, list);
2164-
kfree_skb(list_skb);
2165-
iucv_sock_wake_msglim(sk);
2166-
break;
2167-
case TX_NOTIFY_PENDING:
2168-
atomic_inc(&iucv->pendings);
2169-
break;
2170-
case TX_NOTIFY_DELAYED_OK:
2171-
__skb_unlink(list_skb, list);
2172-
atomic_dec(&iucv->pendings);
2173-
if (atomic_read(&iucv->pendings) <= 0)
2174-
iucv_sock_wake_msglim(sk);
2175-
kfree_skb(list_skb);
2176-
break;
2177-
case TX_NOTIFY_UNREACHABLE:
2178-
case TX_NOTIFY_DELAYED_UNREACHABLE:
2179-
case TX_NOTIFY_TPQFULL: /* not yet used */
2180-
case TX_NOTIFY_GENERALERROR:
2181-
case TX_NOTIFY_DELAYED_GENERALERROR:
2182-
__skb_unlink(list_skb, list);
2183-
kfree_skb(list_skb);
2184-
if (sk->sk_state == IUCV_CONNECTED) {
2185-
sk->sk_state = IUCV_DISCONN;
2186-
sk->sk_state_change(sk);
2187-
}
2188-
break;
2189-
}
2190-
break;
2147+
switch (n) {
2148+
case TX_NOTIFY_OK:
2149+
atomic_dec(&iucv->skbs_in_xmit);
2150+
iucv_sock_wake_msglim(sk);
2151+
break;
2152+
case TX_NOTIFY_PENDING:
2153+
atomic_inc(&iucv->pendings);
2154+
break;
2155+
case TX_NOTIFY_DELAYED_OK:
2156+
atomic_dec(&iucv->skbs_in_xmit);
2157+
if (atomic_dec_return(&iucv->pendings) <= 0)
2158+
iucv_sock_wake_msglim(sk);
2159+
break;
2160+
default:
2161+
atomic_dec(&iucv->skbs_in_xmit);
2162+
if (sk->sk_state == IUCV_CONNECTED) {
2163+
sk->sk_state = IUCV_DISCONN;
2164+
sk->sk_state_change(sk);
21912165
}
21922166
}
2193-
spin_unlock_irqrestore(&list->lock, flags);
21942167

21952168
if (sk->sk_state == IUCV_CLOSING) {
2196-
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2169+
if (atomic_read(&iucv->skbs_in_xmit) == 0) {
21972170
sk->sk_state = IUCV_CLOSED;
21982171
sk->sk_state_change(sk);
21992172
}
22002173
}
2201-
22022174
}
22032175

22042176
/*

0 commit comments

Comments
 (0)