@@ -88,6 +88,31 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
88
88
ib_dma_unmap_page (priv -> ca , mapping [i + 1 ], PAGE_SIZE , DMA_FROM_DEVICE );
89
89
}
90
90
91
+ static void ipoib_cm_dma_unmap_tx (struct ipoib_dev_priv * priv ,
92
+ struct ipoib_tx_buf * tx_req )
93
+ {
94
+ struct sk_buff * skb ;
95
+ int i , offs ;
96
+
97
+ skb = tx_req -> skb ;
98
+ if (skb_shinfo (skb )-> nr_frags ) {
99
+ offs = 0 ;
100
+ if (skb_headlen (skb )) {
101
+ ib_dma_unmap_single (priv -> ca , tx_req -> mapping [0 ],
102
+ skb_headlen (skb ), DMA_TO_DEVICE );
103
+ offs = 1 ;
104
+ }
105
+ for (i = 0 ; i < skb_shinfo (skb )-> nr_frags ; ++ i ) {
106
+ const skb_frag_t * frag = & skb_shinfo (skb )-> frags [i ];
107
+
108
+ ib_dma_unmap_page (priv -> ca , tx_req -> mapping [i + offs ],
109
+ skb_frag_size (frag ), DMA_TO_DEVICE );
110
+ }
111
+ } else
112
+ ib_dma_unmap_single (priv -> ca , tx_req -> mapping [0 ], skb -> len ,
113
+ DMA_TO_DEVICE );
114
+ }
115
+
91
116
static int ipoib_cm_post_receive_srq (struct net_device * dev , int id )
92
117
{
93
118
struct ipoib_dev_priv * priv = netdev_priv (dev );
@@ -707,11 +732,39 @@ static inline int post_send(struct ipoib_dev_priv *priv,
707
732
return ib_post_send (tx -> qp , & priv -> tx_wr , & bad_wr );
708
733
}
709
734
735
+ static inline int post_send_sg (struct ipoib_dev_priv * priv ,
736
+ struct ipoib_cm_tx * tx ,
737
+ unsigned int wr_id ,
738
+ struct sk_buff * skb ,
739
+ u64 mapping [MAX_SKB_FRAGS + 1 ])
740
+ {
741
+ struct ib_send_wr * bad_wr ;
742
+ int i , off ;
743
+ skb_frag_t * frags = skb_shinfo (skb )-> frags ;
744
+ int nr_frags = skb_shinfo (skb )-> nr_frags ;
745
+
746
+ if (skb_headlen (skb )) {
747
+ priv -> tx_sge [0 ].addr = mapping [0 ];
748
+ priv -> tx_sge [0 ].length = skb_headlen (skb );
749
+ off = 1 ;
750
+ } else
751
+ off = 0 ;
752
+
753
+ for (i = 0 ; i < nr_frags ; ++ i ) {
754
+ priv -> tx_sge [i + off ].addr = mapping [i + off ];
755
+ priv -> tx_sge [i + off ].length = frags [i ].size ;
756
+ }
757
+ priv -> tx_wr .num_sge = nr_frags + off ;
758
+ priv -> tx_wr .wr_id = wr_id | IPOIB_OP_CM ;
759
+
760
+ return ib_post_send (tx -> qp , & priv -> tx_wr , & bad_wr );
761
+ }
762
+
710
763
void ipoib_cm_send (struct net_device * dev , struct sk_buff * skb , struct ipoib_cm_tx * tx )
711
764
{
712
765
struct ipoib_dev_priv * priv = netdev_priv (dev );
713
- struct ipoib_cm_tx_buf * tx_req ;
714
- u64 addr ;
766
+ struct ipoib_tx_buf * tx_req ;
767
+ u64 addr = 0 ;
715
768
int rc ;
716
769
717
770
if (unlikely (skb -> len > tx -> mtu )) {
@@ -735,24 +788,37 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
735
788
*/
736
789
tx_req = & tx -> tx_ring [tx -> tx_head & (ipoib_sendq_size - 1 )];
737
790
tx_req -> skb = skb ;
738
- addr = ib_dma_map_single (priv -> ca , skb -> data , skb -> len , DMA_TO_DEVICE );
739
- if (unlikely (ib_dma_mapping_error (priv -> ca , addr ))) {
740
- ++ dev -> stats .tx_errors ;
741
- dev_kfree_skb_any (skb );
742
- return ;
743
- }
744
791
745
- tx_req -> mapping = addr ;
792
+ if (skb_shinfo (skb )-> nr_frags ) {
793
+ if (unlikely (ipoib_dma_map_tx (priv -> ca , tx_req ))) {
794
+ ++ dev -> stats .tx_errors ;
795
+ dev_kfree_skb_any (skb );
796
+ return ;
797
+ }
798
+ rc = post_send_sg (priv , tx , tx -> tx_head &
799
+ (ipoib_sendq_size - 1 ),
800
+ skb , tx_req -> mapping );
801
+ } else {
802
+ addr = ib_dma_map_single (priv -> ca , skb -> data , skb -> len ,
803
+ DMA_TO_DEVICE );
804
+ if (unlikely (ib_dma_mapping_error (priv -> ca , addr ))) {
805
+ ++ dev -> stats .tx_errors ;
806
+ dev_kfree_skb_any (skb );
807
+ return ;
808
+ }
809
+
810
+ tx_req -> mapping [0 ] = addr ;
746
811
747
- skb_orphan (skb );
748
- skb_dst_drop (skb );
812
+ skb_orphan (skb );
813
+ skb_dst_drop (skb );
749
814
750
- rc = post_send (priv , tx , tx -> tx_head & (ipoib_sendq_size - 1 ),
751
- addr , skb -> len );
815
+ rc = post_send (priv , tx , tx -> tx_head & (ipoib_sendq_size - 1 ),
816
+ addr , skb -> len );
817
+ }
752
818
if (unlikely (rc )) {
753
819
ipoib_warn (priv , "post_send failed, error %d\n" , rc );
754
820
++ dev -> stats .tx_errors ;
755
- ib_dma_unmap_single (priv -> ca , addr , skb -> len , DMA_TO_DEVICE );
821
+ ipoib_cm_dma_unmap_tx (priv , tx_req );
756
822
dev_kfree_skb_any (skb );
757
823
} else {
758
824
dev -> trans_start = jiffies ;
@@ -777,7 +843,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
777
843
struct ipoib_dev_priv * priv = netdev_priv (dev );
778
844
struct ipoib_cm_tx * tx = wc -> qp -> qp_context ;
779
845
unsigned int wr_id = wc -> wr_id & ~IPOIB_OP_CM ;
780
- struct ipoib_cm_tx_buf * tx_req ;
846
+ struct ipoib_tx_buf * tx_req ;
781
847
unsigned long flags ;
782
848
783
849
ipoib_dbg_data (priv , "cm send completion: id %d, status: %d\n" ,
@@ -791,7 +857,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
791
857
792
858
tx_req = & tx -> tx_ring [wr_id ];
793
859
794
- ib_dma_unmap_single (priv -> ca , tx_req -> mapping , tx_req -> skb -> len , DMA_TO_DEVICE );
860
+ ipoib_cm_dma_unmap_tx (priv , tx_req );
795
861
796
862
/* FIXME: is this right? Shouldn't we only increment on success? */
797
863
++ dev -> stats .tx_packets ;
@@ -1046,6 +1112,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1046
1112
1047
1113
struct ib_qp * tx_qp ;
1048
1114
1115
+ if (dev -> features & NETIF_F_SG )
1116
+ attr .cap .max_send_sge = MAX_SKB_FRAGS + 1 ;
1117
+
1049
1118
tx_qp = ib_create_qp (priv -> pd , & attr );
1050
1119
if (PTR_ERR (tx_qp ) == - EINVAL ) {
1051
1120
ipoib_warn (priv , "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n" ,
@@ -1180,7 +1249,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1180
1249
static void ipoib_cm_tx_destroy (struct ipoib_cm_tx * p )
1181
1250
{
1182
1251
struct ipoib_dev_priv * priv = netdev_priv (p -> dev );
1183
- struct ipoib_cm_tx_buf * tx_req ;
1252
+ struct ipoib_tx_buf * tx_req ;
1184
1253
unsigned long begin ;
1185
1254
1186
1255
ipoib_dbg (priv , "Destroy active connection 0x%x head 0x%x tail 0x%x\n" ,
@@ -1207,8 +1276,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1207
1276
1208
1277
while ((int ) p -> tx_tail - (int ) p -> tx_head < 0 ) {
1209
1278
tx_req = & p -> tx_ring [p -> tx_tail & (ipoib_sendq_size - 1 )];
1210
- ib_dma_unmap_single (priv -> ca , tx_req -> mapping , tx_req -> skb -> len ,
1211
- DMA_TO_DEVICE );
1279
+ ipoib_cm_dma_unmap_tx (priv , tx_req );
1212
1280
dev_kfree_skb_any (tx_req -> skb );
1213
1281
++ p -> tx_tail ;
1214
1282
netif_tx_lock_bh (p -> dev );
@@ -1498,7 +1566,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1498
1566
spin_unlock_irq (& priv -> lock );
1499
1567
}
1500
1568
1501
-
1502
1569
static ssize_t show_mode (struct device * d , struct device_attribute * attr ,
1503
1570
char * buf )
1504
1571
{
0 commit comments