@@ -89,7 +89,7 @@ static struct sock *iucv_accept_dequeue(struct sock *parent,
89
89
static void iucv_sock_kill (struct sock * sk );
90
90
static void iucv_sock_close (struct sock * sk );
91
91
92
- static void afiucv_hs_callback_txnotify (struct sk_buff * , enum iucv_tx_notify );
92
+ static void afiucv_hs_callback_txnotify (struct sock * sk , enum iucv_tx_notify );
93
93
94
94
/* Call Back functions */
95
95
static void iucv_callback_rx (struct iucv_path * , struct iucv_message * );
@@ -182,7 +182,7 @@ static inline int iucv_below_msglim(struct sock *sk)
182
182
if (sk -> sk_state != IUCV_CONNECTED )
183
183
return 1 ;
184
184
if (iucv -> transport == AF_IUCV_TRANS_IUCV )
185
- return (skb_queue_len (& iucv -> send_skb_q ) < iucv -> path -> msglim );
185
+ return (atomic_read (& iucv -> skbs_in_xmit ) < iucv -> path -> msglim );
186
186
else
187
187
return ((atomic_read (& iucv -> msg_sent ) < iucv -> msglimit_peer ) &&
188
188
(atomic_read (& iucv -> pendings ) <= 0 ));
@@ -211,7 +211,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
211
211
{
212
212
struct iucv_sock * iucv = iucv_sk (sock );
213
213
struct af_iucv_trans_hdr * phs_hdr ;
214
- struct sk_buff * nskb ;
215
214
int err , confirm_recv = 0 ;
216
215
217
216
phs_hdr = skb_push (skb , sizeof (* phs_hdr ));
@@ -257,22 +256,16 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
257
256
err = - EMSGSIZE ;
258
257
goto err_free ;
259
258
}
260
- skb_trim (skb , skb -> dev -> mtu );
259
+ err = pskb_trim (skb , skb -> dev -> mtu );
260
+ if (err )
261
+ goto err_free ;
261
262
}
262
263
skb -> protocol = cpu_to_be16 (ETH_P_AF_IUCV );
263
264
264
- __skb_header_release (skb );
265
- nskb = skb_clone (skb , GFP_ATOMIC );
266
- if (!nskb ) {
267
- err = - ENOMEM ;
268
- goto err_free ;
269
- }
270
-
271
- skb_queue_tail (& iucv -> send_skb_q , nskb );
265
+ atomic_inc (& iucv -> skbs_in_xmit );
272
266
err = dev_queue_xmit (skb );
273
267
if (net_xmit_eval (err )) {
274
- skb_unlink (nskb , & iucv -> send_skb_q );
275
- kfree_skb (nskb );
268
+ atomic_dec (& iucv -> skbs_in_xmit );
276
269
} else {
277
270
atomic_sub (confirm_recv , & iucv -> msg_recv );
278
271
WARN_ON (atomic_read (& iucv -> msg_recv ) < 0 );
@@ -424,7 +417,7 @@ static void iucv_sock_close(struct sock *sk)
424
417
sk -> sk_state = IUCV_CLOSING ;
425
418
sk -> sk_state_change (sk );
426
419
427
- if (!err && ! skb_queue_empty (& iucv -> send_skb_q ) ) {
420
+ if (!err && atomic_read (& iucv -> skbs_in_xmit ) > 0 ) {
428
421
if (sock_flag (sk , SOCK_LINGER ) && sk -> sk_lingertime )
429
422
timeo = sk -> sk_lingertime ;
430
423
else
@@ -491,6 +484,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
491
484
atomic_set (& iucv -> pendings , 0 );
492
485
iucv -> flags = 0 ;
493
486
iucv -> msglimit = 0 ;
487
+ atomic_set (& iucv -> skbs_in_xmit , 0 );
494
488
atomic_set (& iucv -> msg_sent , 0 );
495
489
atomic_set (& iucv -> msg_recv , 0 );
496
490
iucv -> path = NULL ;
@@ -1004,7 +998,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1004
998
if (iucv -> transport == AF_IUCV_TRANS_HIPER ) {
1005
999
headroom = sizeof (struct af_iucv_trans_hdr ) +
1006
1000
LL_RESERVED_SPACE (iucv -> hs_dev );
1007
- linear = len ;
1001
+ linear = min ( len , PAGE_SIZE - headroom ) ;
1008
1002
} else {
1009
1003
if (len < PAGE_SIZE ) {
1010
1004
linear = len ;
@@ -1055,6 +1049,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1055
1049
}
1056
1050
} else { /* Classic VM IUCV transport */
1057
1051
skb_queue_tail (& iucv -> send_skb_q , skb );
1052
+ atomic_inc (& iucv -> skbs_in_xmit );
1058
1053
1059
1054
if (((iucv -> path -> flags & IUCV_IPRMDATA ) & iucv -> flags ) &&
1060
1055
skb -> len <= 7 ) {
@@ -1063,6 +1058,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1063
1058
/* on success: there is no message_complete callback */
1064
1059
/* for an IPRMDATA msg; remove skb from send queue */
1065
1060
if (err == 0 ) {
1061
+ atomic_dec (& iucv -> skbs_in_xmit );
1066
1062
skb_unlink (skb , & iucv -> send_skb_q );
1067
1063
kfree_skb (skb );
1068
1064
}
@@ -1071,6 +1067,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1071
1067
/* IUCV_IPRMDATA path flag is set... sever path */
1072
1068
if (err == 0x15 ) {
1073
1069
pr_iucv -> path_sever (iucv -> path , NULL );
1070
+ atomic_dec (& iucv -> skbs_in_xmit );
1074
1071
skb_unlink (skb , & iucv -> send_skb_q );
1075
1072
err = - EPIPE ;
1076
1073
goto fail ;
@@ -1109,6 +1106,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1109
1106
} else {
1110
1107
err = - EPIPE ;
1111
1108
}
1109
+
1110
+ atomic_dec (& iucv -> skbs_in_xmit );
1112
1111
skb_unlink (skb , & iucv -> send_skb_q );
1113
1112
goto fail ;
1114
1113
}
@@ -1748,10 +1747,14 @@ static void iucv_callback_txdone(struct iucv_path *path,
1748
1747
{
1749
1748
struct sock * sk = path -> private ;
1750
1749
struct sk_buff * this = NULL ;
1751
- struct sk_buff_head * list = & iucv_sk ( sk ) -> send_skb_q ;
1750
+ struct sk_buff_head * list ;
1752
1751
struct sk_buff * list_skb ;
1752
+ struct iucv_sock * iucv ;
1753
1753
unsigned long flags ;
1754
1754
1755
+ iucv = iucv_sk (sk );
1756
+ list = & iucv -> send_skb_q ;
1757
+
1755
1758
bh_lock_sock (sk );
1756
1759
1757
1760
spin_lock_irqsave (& list -> lock , flags );
@@ -1761,8 +1764,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
1761
1764
break ;
1762
1765
}
1763
1766
}
1764
- if (this )
1767
+ if (this ) {
1768
+ atomic_dec (& iucv -> skbs_in_xmit );
1765
1769
__skb_unlink (this , list );
1770
+ }
1771
+
1766
1772
spin_unlock_irqrestore (& list -> lock , flags );
1767
1773
1768
1774
if (this ) {
@@ -1772,7 +1778,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1772
1778
}
1773
1779
1774
1780
if (sk -> sk_state == IUCV_CLOSING ) {
1775
- if (skb_queue_empty ( & iucv_sk ( sk ) -> send_skb_q ) ) {
1781
+ if (atomic_read ( & iucv -> skbs_in_xmit ) == 0 ) {
1776
1782
sk -> sk_state = IUCV_CLOSED ;
1777
1783
sk -> sk_state_change (sk );
1778
1784
}
@@ -2036,7 +2042,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2036
2042
char nullstring [8 ];
2037
2043
2038
2044
if (!pskb_may_pull (skb , sizeof (* trans_hdr ))) {
2039
- WARN_ONCE (1 , "AF_IUCV failed to receive skb, len=%u" , skb -> len );
2040
2045
kfree_skb (skb );
2041
2046
return NET_RX_SUCCESS ;
2042
2047
}
@@ -2132,73 +2137,40 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2132
2137
* afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2133
2138
* transport
2134
2139
**/
2135
- static void afiucv_hs_callback_txnotify (struct sk_buff * skb ,
2136
- enum iucv_tx_notify n )
2140
+ static void afiucv_hs_callback_txnotify (struct sock * sk , enum iucv_tx_notify n )
2137
2141
{
2138
- struct sock * isk = skb -> sk ;
2139
- struct sock * sk = NULL ;
2140
- struct iucv_sock * iucv = NULL ;
2141
- struct sk_buff_head * list ;
2142
- struct sk_buff * list_skb ;
2143
- struct sk_buff * nskb ;
2144
- unsigned long flags ;
2145
-
2146
- read_lock_irqsave (& iucv_sk_list .lock , flags );
2147
- sk_for_each (sk , & iucv_sk_list .head )
2148
- if (sk == isk ) {
2149
- iucv = iucv_sk (sk );
2150
- break ;
2151
- }
2152
- read_unlock_irqrestore (& iucv_sk_list .lock , flags );
2142
+ struct iucv_sock * iucv = iucv_sk (sk );
2153
2143
2154
- if (! iucv || sock_flag (sk , SOCK_ZAPPED ))
2144
+ if (sock_flag (sk , SOCK_ZAPPED ))
2155
2145
return ;
2156
2146
2157
- list = & iucv -> send_skb_q ;
2158
- spin_lock_irqsave (& list -> lock , flags );
2159
- skb_queue_walk_safe (list , list_skb , nskb ) {
2160
- if (skb_shinfo (list_skb ) == skb_shinfo (skb )) {
2161
- switch (n ) {
2162
- case TX_NOTIFY_OK :
2163
- __skb_unlink (list_skb , list );
2164
- kfree_skb (list_skb );
2165
- iucv_sock_wake_msglim (sk );
2166
- break ;
2167
- case TX_NOTIFY_PENDING :
2168
- atomic_inc (& iucv -> pendings );
2169
- break ;
2170
- case TX_NOTIFY_DELAYED_OK :
2171
- __skb_unlink (list_skb , list );
2172
- atomic_dec (& iucv -> pendings );
2173
- if (atomic_read (& iucv -> pendings ) <= 0 )
2174
- iucv_sock_wake_msglim (sk );
2175
- kfree_skb (list_skb );
2176
- break ;
2177
- case TX_NOTIFY_UNREACHABLE :
2178
- case TX_NOTIFY_DELAYED_UNREACHABLE :
2179
- case TX_NOTIFY_TPQFULL : /* not yet used */
2180
- case TX_NOTIFY_GENERALERROR :
2181
- case TX_NOTIFY_DELAYED_GENERALERROR :
2182
- __skb_unlink (list_skb , list );
2183
- kfree_skb (list_skb );
2184
- if (sk -> sk_state == IUCV_CONNECTED ) {
2185
- sk -> sk_state = IUCV_DISCONN ;
2186
- sk -> sk_state_change (sk );
2187
- }
2188
- break ;
2189
- }
2190
- break ;
2147
+ switch (n ) {
2148
+ case TX_NOTIFY_OK :
2149
+ atomic_dec (& iucv -> skbs_in_xmit );
2150
+ iucv_sock_wake_msglim (sk );
2151
+ break ;
2152
+ case TX_NOTIFY_PENDING :
2153
+ atomic_inc (& iucv -> pendings );
2154
+ break ;
2155
+ case TX_NOTIFY_DELAYED_OK :
2156
+ atomic_dec (& iucv -> skbs_in_xmit );
2157
+ if (atomic_dec_return (& iucv -> pendings ) <= 0 )
2158
+ iucv_sock_wake_msglim (sk );
2159
+ break ;
2160
+ default :
2161
+ atomic_dec (& iucv -> skbs_in_xmit );
2162
+ if (sk -> sk_state == IUCV_CONNECTED ) {
2163
+ sk -> sk_state = IUCV_DISCONN ;
2164
+ sk -> sk_state_change (sk );
2191
2165
}
2192
2166
}
2193
- spin_unlock_irqrestore (& list -> lock , flags );
2194
2167
2195
2168
if (sk -> sk_state == IUCV_CLOSING ) {
2196
- if (skb_queue_empty ( & iucv_sk ( sk ) -> send_skb_q ) ) {
2169
+ if (atomic_read ( & iucv -> skbs_in_xmit ) == 0 ) {
2197
2170
sk -> sk_state = IUCV_CLOSED ;
2198
2171
sk -> sk_state_change (sk );
2199
2172
}
2200
2173
}
2201
-
2202
2174
}
2203
2175
2204
2176
/*
0 commit comments