@@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
237
237
struct rxe_mc_elem * mce ;
238
238
struct rxe_qp * qp ;
239
239
union ib_gid dgid ;
240
- struct sk_buff * per_qp_skb ;
241
- struct rxe_pkt_info * per_qp_pkt ;
242
240
int err ;
243
241
244
242
if (skb -> protocol == htons (ETH_P_IP ))
@@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
250
248
/* lookup mcast group corresponding to mgid, takes a ref */
251
249
mcg = rxe_pool_get_key (& rxe -> mc_grp_pool , & dgid );
252
250
if (!mcg )
253
- goto err1 ; /* mcast group not registered */
251
+ goto drop ; /* mcast group not registered */
254
252
255
253
spin_lock_bh (& mcg -> mcg_lock );
256
254
255
+ /* this is unreliable datagram service so we let
256
+ * failures to deliver a multicast packet to a
257
+ * single QP happen and just move on and try
258
+ * the rest of them on the list
259
+ */
257
260
list_for_each_entry (mce , & mcg -> qp_list , qp_list ) {
258
261
qp = mce -> qp ;
259
262
@@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
266
269
if (err )
267
270
continue ;
268
271
269
- /* for all but the last qp create a new clone of the
270
- * skb and pass to the qp. If an error occurs in the
271
- * checks for the last qp in the list we need to
272
- * free the skb since it hasn't been passed on to
273
- * rxe_rcv_pkt() which would free it later.
272
+ /* for all but the last QP create a new clone of the
273
+ * skb and pass to the QP. Pass the original skb to
274
+ * the last QP in the list.
274
275
*/
275
276
if (mce -> qp_list .next != & mcg -> qp_list ) {
276
- per_qp_skb = skb_clone (skb , GFP_ATOMIC );
277
- if (WARN_ON (!ib_device_try_get (& rxe -> ib_dev ))) {
278
- kfree_skb (per_qp_skb );
277
+ struct sk_buff * cskb ;
278
+ struct rxe_pkt_info * cpkt ;
279
+
280
+ cskb = skb_clone (skb , GFP_ATOMIC );
281
+ if (unlikely (!cskb ))
279
282
continue ;
283
+
284
+ if (WARN_ON (!ib_device_try_get (& rxe -> ib_dev ))) {
285
+ kfree_skb (cskb );
286
+ break ;
280
287
}
288
+
289
+ cpkt = SKB_TO_PKT (cskb );
290
+ cpkt -> qp = qp ;
291
+ rxe_add_ref (qp );
292
+ rxe_rcv_pkt (cpkt , cskb );
281
293
} else {
282
- per_qp_skb = skb ;
283
- /* show we have consumed the skb */
284
- skb = NULL ;
294
+ pkt -> qp = qp ;
295
+ rxe_add_ref (qp );
296
+ rxe_rcv_pkt (pkt , skb );
297
+ skb = NULL ; /* mark consumed */
285
298
}
286
-
287
- if (unlikely (!per_qp_skb ))
288
- continue ;
289
-
290
- per_qp_pkt = SKB_TO_PKT (per_qp_skb );
291
- per_qp_pkt -> qp = qp ;
292
- rxe_add_ref (qp );
293
- rxe_rcv_pkt (per_qp_pkt , per_qp_skb );
294
299
}
295
300
296
301
spin_unlock_bh (& mcg -> mcg_lock );
297
302
298
303
rxe_drop_ref (mcg ); /* drop ref from rxe_pool_get_key. */
299
304
300
- err1 :
301
- /* free skb if not consumed */
305
+ if (likely (!skb ))
306
+ return ;
307
+
308
+ /* This only occurs if one of the checks fails on the last
309
+ * QP in the list above
310
+ */
311
+
312
+ drop :
302
313
kfree_skb (skb );
303
314
ib_device_put (& rxe -> ib_dev );
304
315
}
0 commit comments