133
133
#define PREFIX_MULTI_ADDR 0x33
134
134
/* ethernet header length */
135
135
#define ETH_HDR_LEN 14
136
+ #define XVE_EOIB_MAGIC 0x8919
137
+ #define ETH_P_XVE_CTRL 0x8919
138
+ #define XVE_EOIB_LEN 4
139
+
140
+ #define XVE_VNET_MODE_RC 1
141
+ #define XVE_VNET_MODE_UD 2
142
+
143
+ #define XVE_MAX_RX_QUEUES 16
144
+ #define XVE_MAX_TX_QUEUES 16
136
145
137
146
/* constants */
138
147
enum xve_flush_level {
@@ -142,8 +151,9 @@ enum xve_flush_level {
142
151
};
143
152
144
153
enum {
145
- XVE_UD_HEAD_SIZE = IB_GRH_BYTES + VLAN_ETH_HLEN ,
146
- XVE_UD_RX_SG = 2 , /* max buffer needed for 4K mtu */
154
+ XVE_UD_HEAD_SIZE = IB_GRH_BYTES + VLAN_ETH_HLEN + XVE_EOIB_LEN + 2048 ,
155
+ XVE_UD_RX_OVN_SG = 2 , /* max buffer needed for 4K mtu */
156
+ XVE_UD_RX_EDR_SG = 3 , /* max buffer needed for 10K mtu */
147
157
XVE_CM_MTU = 0x10000 - 0x20 , /* padding to align header to 16 */
148
158
XVE_CM_BUF_SIZE = XVE_CM_MTU + VLAN_ETH_HLEN ,
149
159
XVE_CM_HEAD_SIZE = XVE_CM_BUF_SIZE % PAGE_SIZE ,
@@ -300,6 +310,11 @@ enum {
300
310
XVE_EVENT_PKEY_CHANGE_COUNTER ,
301
311
XVE_INVALID_EVENT_COUNTER ,
302
312
313
+ XVE_GW_MCAST_TX ,
314
+ XVE_HBEAT_COUNTER ,
315
+ XVE_LINK_STATUS_COUNTER ,
316
+ XVE_RX_NOGRH ,
317
+
303
318
XVE_MAX_COUNTERS
304
319
};
305
320
@@ -400,7 +415,8 @@ enum {
400
415
DEBUG_CONTINUE_UNLOAD = 0x00002000 ,
401
416
DEBUG_MISC_INFO = 0x00004000 ,
402
417
DEBUG_IBDEV_INFO = 0x00008000 ,
403
- DEBUG_CM_INFO = 0x00010000
418
+ DEBUG_CM_INFO = 0x00010000 ,
419
+ DEBUG_CTRL_INFO = 0x00020000
404
420
};
405
421
406
422
#define XVE_OP_RECV (1ul << 31)
@@ -433,20 +449,39 @@ enum {
433
449
#define XVE_OVER_QUOTA 23
434
450
#define XVE_TSO_CHANGE 24
435
451
#define XVE_RXBATCH_CHANGE 25
452
+ #define XVE_VNIC_READY_PENDING 26
453
+ #define XVE_HBEAT_LOST 27
454
+ #define XVE_GW_STATE_UP 28
455
+
436
456
#define MODULE_NAME "XVE"
437
457
#define ALIGN_TO_FF (a ) (a & 0xff)
438
458
#define XVE_FWT_ENTRY_VALID 1
439
459
#define XVE_FWT_ENTRY_REFRESH 2
440
- #define XVE_UD_MTU (ib_mtu ) (ib_mtu - VLAN_ETH_HLEN)
441
- #define XVE_UD_BUF_SIZE (ib_mtu ) (ib_mtu + IB_GRH_BYTES + VLAN_ETH_HLEN)
442
- #define XVE_MIN_PACKET_LEN 60
460
+ #define XVE_UD_MTU (ib_mtu ) (ib_mtu - (VLAN_ETH_HLEN + XVE_EOIB_LEN))
461
+ #define XVE_UD_BUF_SIZE (ib_mtu ) (ib_mtu + IB_GRH_BYTES + \
462
+ (VLAN_ETH_HLEN + XVE_EOIB_LEN))
463
+ #define XVE_MIN_PACKET_LEN 64
464
+
465
+ enum xcm_type {
466
+ XSMP_XCM_OVN ,
467
+ XSMP_XCM_NOUPLINK ,
468
+ XSMP_XCM_UPLINK
469
+ };
470
+
471
+ #define xve_is_uplink (priv ) ((priv)->vnic_type == XSMP_XCM_UPLINK)
472
+ #define xve_is_ovn (priv ) ((priv)->vnic_type == XSMP_XCM_OVN)
473
+ #define xve_is_edr (priv ) (!xve_is_ovn(priv))
474
+ #define xve_gw_linkup (priv ) test_bit(XVE_GW_STATE_UP, &(priv)->state)
475
+ #define xve_ud_rx_sg (priv ) (xve_is_edr(priv) ? XVE_UD_RX_EDR_SG : \
476
+ XVE_UD_RX_OVN_SG)
443
477
444
478
/*Extern declarations */
445
479
extern int xve_debug_level ;
446
480
extern int xve_cm_single_qp ;
447
481
extern u32 xve_hash_salt ;
448
482
extern int xve_sendq_size ;
449
483
extern int xve_recvq_size ;
484
+ extern int xve_max_send_cqe ;
450
485
extern struct ib_sa_client xve_sa_client ;
451
486
extern u32 xve_counters [];
452
487
extern struct workqueue_struct * xve_taskqueue ;
@@ -481,11 +516,12 @@ struct xve_mcast {
481
516
482
517
struct xve_rx_buf {
483
518
struct sk_buff * skb ;
484
- u64 mapping [XVE_UD_RX_SG ];
519
+ u64 mapping [XVE_UD_RX_EDR_SG ];
485
520
};
486
521
487
522
struct xve_tx_buf {
488
523
struct sk_buff * skb ;
524
+ struct xve_ah * ah ;
489
525
u64 mapping [MAX_SKB_FRAGS + 1 ];
490
526
};
491
527
@@ -591,6 +627,46 @@ struct xve_fwt_s {
591
627
unsigned num ;
592
628
};
593
629
630
+ #define XVE_VNIC_HBEAT 1
631
+ #define XVE_VNIC_LINK_STATE 2
632
+
633
+ #define XVE_HBEAT_LOSS_THRES 3
634
+ struct xve_keep_alive {
635
+ uint32_t pvi_id ;
636
+ uint32_t type ;
637
+ uint64_t tca_hbeat_cnt ;
638
+ uint32_t uplink_status ;
639
+ } __packed ;
640
+
641
+ struct xve_gw_info {
642
+ union ib_gid t_gid ;
643
+ u32 t_ctrl_qp ;
644
+ u32 t_data_qp ;
645
+ u32 t_qkey ;
646
+ u16 t_pkey ;
647
+ };
648
+
649
+ struct xve_eoib_hdr {
650
+ union {
651
+ struct { /* CX */
652
+ __u8 encap_data ;
653
+ __u8 seg_off ;
654
+ __be16 seg_id ;
655
+ };
656
+ struct { /* PSIF */
657
+ __be16 magic ;
658
+ __be16 tss_mask_sz ;
659
+ };
660
+ };
661
+ } __packed ;
662
+
663
+
664
+ struct xve_rx_cm_info {
665
+ struct ib_sge rx_sge [XVE_CM_RX_SG ];
666
+ struct ib_recv_wr rx_wr ;
667
+ };
668
+
669
+
594
670
/*
595
671
* Device private locking: network stack tx_lock protects members used
596
672
* in TX fast path, lock protects everything else. lock nests inside
@@ -608,9 +684,13 @@ struct xve_dev_priv {
608
684
struct ib_qp * qp ;
609
685
union ib_gid local_gid ;
610
686
union ib_gid bcast_mgid ;
687
+ __be16 bcast_mlid ;
611
688
u16 local_lid ;
612
689
u32 qkey ;
613
690
691
+ /* Device attributes */
692
+ struct ib_device_attr dev_attr ;
693
+
614
694
/* Netdev related attributes */
615
695
struct net_device * netdev ;
616
696
struct net_device_stats stats ;
@@ -636,6 +716,9 @@ struct xve_dev_priv {
636
716
unsigned long jiffies ;
637
717
struct xve_fwt_s xve_fwt ;
638
718
int aging_delay ;
719
+ void * pci ;
720
+ uint32_t hb_interval ;
721
+ uint64_t last_hbeat ;
639
722
640
723
struct xve_cm_dev_priv cm ;
641
724
unsigned int cm_supported ;
@@ -650,8 +733,10 @@ struct xve_dev_priv {
650
733
unsigned int mcast_mtu ;
651
734
unsigned int max_ib_mtu ;
652
735
char mode [64 ];
653
-
654
736
/* TX and RX Ring attributes */
737
+ int xve_recvq_size ;
738
+ int xve_sendq_size ;
739
+ int xve_max_send_cqe ;
655
740
struct xve_rx_buf * rx_ring ;
656
741
struct xve_tx_buf * tx_ring ;
657
742
unsigned tx_head ;
@@ -661,7 +746,8 @@ struct xve_dev_priv {
661
746
struct ib_send_wr tx_wr ;
662
747
struct ib_wc send_wc [MAX_SEND_CQE ];
663
748
struct ib_recv_wr rx_wr ;
664
- struct ib_sge rx_sge [XVE_UD_RX_SG ];
749
+ /* Allocate EDR SG for now */
750
+ struct ib_sge rx_sge [XVE_UD_RX_EDR_SG ];
665
751
struct ib_wc ibwc [XVE_NUM_WC ];
666
752
struct ib_cq * recv_cq ;
667
753
struct ib_cq * send_cq ;
@@ -674,9 +760,12 @@ struct xve_dev_priv {
674
760
u64 resource_id ;
675
761
u64 mac ;
676
762
u32 net_id ;
763
+ u32 install_flag ;
677
764
u16 mp_flag ;
678
- char vnet_mode ;
765
+ u8 vnet_mode ;
766
+ u8 vnic_type ;
679
767
char xve_name [XVE_MAX_NAME_SIZE ];
768
+ struct xve_gw_info gw ;
680
769
681
770
/* Proc related attributes */
682
771
struct proc_dir_entry * nic_dir ;
@@ -696,7 +785,7 @@ struct xve_ah {
696
785
struct ib_ah * ah ;
697
786
struct list_head list ;
698
787
struct kref ref ;
699
- unsigned last_send ;
788
+ atomic_t refcnt ;
700
789
};
701
790
702
791
struct ib_packed_grh {
@@ -724,7 +813,10 @@ struct xve_path {
724
813
struct rb_node rb_node ;
725
814
struct list_head list ;
726
815
int valid ;
816
+ int index ;
727
817
struct sk_buff_head queue ;
818
+ struct sk_buff_head uplink_queue ;
819
+ atomic_t users ;
728
820
};
729
821
730
822
struct xve_work {
@@ -790,14 +882,6 @@ struct icmp6_ndp {
790
882
dev->stats.rx_bytes += len; \
791
883
} while (0)
792
884
793
- #define SET_FLUSH_BIT (priv , bit ) \
794
- do { \
795
- unsigned long flags; \
796
- spin_lock_irqsave(&priv->lock, flags); \
797
- set_bit(bit, &priv->state); \
798
- spin_unlock_irqrestore(&priv->lock, flags); \
799
- } while (0)
800
-
801
885
#define PRINT (level , x , fmt , arg ...) \
802
886
printk(level "%s: " fmt, MODULE_NAME, ##arg)
803
887
#define XSMP_ERROR (fmt , arg ...) \
@@ -807,18 +891,18 @@ struct icmp6_ndp {
807
891
((struct xve_dev_priv *) priv)->netdev->name, \
808
892
## arg)
809
893
#define xve_warn (priv , format , arg ...) \
810
- xve_printk(KERN_WARNING, priv, format , ## arg)
894
+ xve_printk(KERN_WARNING, priv, format, ## arg)
811
895
812
896
#define XSMP_INFO (fmt , arg ...) \
813
897
do { \
814
898
if (xve_debug_level & DEBUG_XSMP_INFO) \
815
- PRINT(KERN_DEBUG, "XSMP", fmt , ## arg);\
899
+ PRINT(KERN_DEBUG, "XSMP", fmt, ## arg);\
816
900
} while (0)
817
901
818
902
#define xve_test (fmt , arg ...) \
819
903
do { \
820
904
if (xve_debug_level & DEBUG_TEST_INFO) \
821
- PRINT(KERN_DEBUG, "DEBUG", fmt , ## arg); \
905
+ PRINT(KERN_DEBUG, "DEBUG", fmt, ## arg); \
822
906
} while (0)
823
907
824
908
#define xve_dbg_data (priv , format , arg ...) \
@@ -827,10 +911,16 @@ struct icmp6_ndp {
827
911
xve_printk(KERN_DEBUG, priv, format, \
828
912
## arg); \
829
913
} while (0)
914
+ #define xve_dbg_ctrl (priv , format , arg ...) \
915
+ do { \
916
+ if (xve_debug_level & DEBUG_CTRL_INFO) \
917
+ xve_printk(KERN_DEBUG, priv, format, \
918
+ ## arg); \
919
+ } while (0)
830
920
#define xve_dbg_mcast (priv , format , arg ...) \
831
921
do { \
832
922
if (xve_debug_level & DEBUG_MCAST_INFO) \
833
- xve_printk(KERN_ERR, priv, format , ## arg); \
923
+ xve_printk(KERN_ERR, priv, format, ## arg); \
834
924
} while (0)
835
925
#define xve_debug (level , priv , format , arg ...) \
836
926
do { \
@@ -899,6 +989,8 @@ static inline void xve_send_skb(struct xve_dev_priv *priv, struct sk_buff *skb)
899
989
900
990
if (netdev -> features & NETIF_F_LRO )
901
991
lro_receive_skb (& priv -> lro .lro_mgr , skb , NULL );
992
+ else if (netdev -> features & NETIF_F_GRO )
993
+ napi_gro_receive (& priv -> napi , skb );
902
994
else
903
995
netif_receive_skb (skb );
904
996
@@ -1018,8 +1110,11 @@ static inline void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
1018
1110
1019
1111
if (length == 0 ) {
1020
1112
/* don't need this page */
1021
- skb_fill_page_desc (toskb , i , skb_frag_page (frag ),
1022
- 0 , PAGE_SIZE );
1113
+ if (toskb )
1114
+ skb_fill_page_desc (toskb , i , skb_frag_page (frag )
1115
+ , 0 , PAGE_SIZE );
1116
+ else
1117
+ __free_page (skb_shinfo (skb )-> frags [i ].page .p );
1023
1118
-- skb_shinfo (skb )-> nr_frags ;
1024
1119
} else {
1025
1120
size = min_t (unsigned , length , (unsigned )PAGE_SIZE );
@@ -1046,11 +1141,20 @@ static inline void xve_put_ah(struct xve_ah *ah)
1046
1141
kref_put (& ah -> ref , xve_free_ah );
1047
1142
}
1048
1143
1144
+ static inline void xve_put_ah_refcnt (struct xve_ah * address )
1145
+ {
1146
+ atomic_dec (& address -> refcnt );
1147
+ }
1148
+ static inline void xve_get_ah_refcnt (struct xve_ah * address )
1149
+ {
1150
+ atomic_inc (& address -> refcnt );
1151
+ }
1152
+
1049
1153
int xve_open (struct net_device * dev );
1050
1154
int xve_add_pkey_attr (struct net_device * dev );
1051
1155
1052
- void xve_send (struct net_device * dev , struct sk_buff * skb ,
1053
- struct xve_ah * address , u32 qpn );
1156
+ int xve_send (struct net_device * dev , struct sk_buff * skb ,
1157
+ struct xve_ah * address , u32 qpn , int type );
1054
1158
int poll_tx (struct xve_dev_priv * priv );
1055
1159
int xve_xsmp_send_oper_state (struct xve_dev_priv * priv , u64 vid , int state );
1056
1160
void handle_carrier_state (struct xve_dev_priv * priv , char state );
@@ -1096,7 +1200,7 @@ void xve_remove_fwt_entry(struct xve_dev_priv *priv,
1096
1200
void xve_fwt_entry_free (struct xve_dev_priv * priv ,
1097
1201
struct xve_fwt_entry * fwt_entry );
1098
1202
1099
- void xve_mcast_send (struct net_device * dev , void * mgid , struct sk_buff * skb );
1203
+ int xve_mcast_send (struct net_device * dev , void * mgid , struct sk_buff * skb );
1100
1204
void xve_advert_mcast_join (struct xve_dev_priv * priv );
1101
1205
int xve_mcast_start_thread (struct net_device * dev );
1102
1206
int xve_mcast_stop_thread (struct net_device * dev , int flush );
@@ -1129,7 +1233,7 @@ int xve_send_hbeat(struct xve_dev_priv *xvep);
1129
1233
void xve_xsmp_handle_oper_req (xsmp_cookie_t xsmp_hndl , u64 resource_id );
1130
1234
1131
1235
/*CM */
1132
- void xve_cm_send (struct net_device * dev , struct sk_buff * skb ,
1236
+ int xve_cm_send (struct net_device * dev , struct sk_buff * skb ,
1133
1237
struct xve_cm_ctx * tx );
1134
1238
int xve_cm_dev_open (struct net_device * dev );
1135
1239
void xve_cm_dev_stop (struct net_device * dev );
@@ -1163,9 +1267,11 @@ void xve_prepare_skb(struct xve_dev_priv *priv, struct sk_buff *skb);
1163
1267
void xve_tables_exit (void );
1164
1268
void xve_remove_one (struct xve_dev_priv * priv );
1165
1269
struct xve_path * __path_find (struct net_device * netdev , void * gid );
1166
- extern int xve_add_proc_entry (struct xve_dev_priv * vp );
1270
+ int xve_add_proc_entry (struct xve_dev_priv * vp );
1167
1271
void xve_remove_proc_entry (struct xve_dev_priv * vp );
1168
- extern int xve_change_rxbatch (struct xve_dev_priv * xvep , int flag );
1272
+ int xve_gw_send (struct net_device * priv , struct sk_buff * skb );
1273
+ struct xve_path * xve_get_gw_path (struct net_device * dev );
1274
+ void xve_set_oper_up_state (struct xve_dev_priv * priv );
1169
1275
1170
1276
static inline int xve_continue_unload (void )
1171
1277
{
@@ -1179,7 +1285,7 @@ static inline int xve_get_misc_info(void)
1179
1285
1180
1286
static inline int xg_vlan_tx_tag_present (struct sk_buff * skb )
1181
1287
{
1182
- struct vlan_ethhdr * veth = ( struct vlan_ethhdr * )( skb -> data );
1288
+ struct vlan_ethhdr * veth = vlan_eth_hdr ( skb );
1183
1289
1184
1290
return veth -> h_vlan_proto == htons (ETH_P_8021Q );
1185
1291
}
0 commit comments