Skip to content

Commit 38093fb

Browse files
Pradeep GopanapalliGoKu-Gear6
authored andcommitted
1) Support vnic for EDR based platform(uVnic) 2) Supported Types now Type 0 - XSMP_XCM_OVN - Xsigo VP780/OSDN standalone Chassis, (add pvi) Type 1 - XSMP_XCM_NOUPLINK - EDR Without uplink (add public-network) Type 2 - XSMP_XCM_UPLINK -EDR with uplink (add public-network <with -if> 3) Intelligence in driver to support all the modes 4) Added Code for printing Multicast LID [Revision 8008] 5) removed style errors
Reviewed-by: Sajid Zia <[email protected]> Signed-off-by: Pradeep Gopanapalli <[email protected]> Signed-off-by: Qing Huang <[email protected]>
1 parent 6ff369d commit 38093fb

File tree

11 files changed

+1155
-413
lines changed

11 files changed

+1155
-413
lines changed

drivers/infiniband/ulp/xsigo/xve/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XVE) := xve.o
22
xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
33
xve_ethtool.o xve_cm.o xve_stats.o
44

5-
ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8004\"
5+
ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8008\"
66
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
77
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
88
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT

drivers/infiniband/ulp/xsigo/xve/xve.h

Lines changed: 138 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,15 @@
133133
#define PREFIX_MULTI_ADDR 0x33
134134
/* ethernet header length */
135135
#define ETH_HDR_LEN 14
136+
#define XVE_EOIB_MAGIC 0x8919
137+
#define ETH_P_XVE_CTRL 0x8919
138+
#define XVE_EOIB_LEN 4
139+
140+
#define XVE_VNET_MODE_RC 1
141+
#define XVE_VNET_MODE_UD 2
142+
143+
#define XVE_MAX_RX_QUEUES 16
144+
#define XVE_MAX_TX_QUEUES 16
136145

137146
/* constants */
138147
enum xve_flush_level {
@@ -142,8 +151,9 @@ enum xve_flush_level {
142151
};
143152

144153
enum {
145-
XVE_UD_HEAD_SIZE = IB_GRH_BYTES + VLAN_ETH_HLEN,
146-
XVE_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
154+
XVE_UD_HEAD_SIZE = IB_GRH_BYTES + VLAN_ETH_HLEN + XVE_EOIB_LEN + 2048,
155+
XVE_UD_RX_OVN_SG = 2, /* max buffer needed for 4K mtu */
156+
XVE_UD_RX_EDR_SG = 3, /* max buffer needed for 10K mtu */
147157
XVE_CM_MTU = 0x10000 - 0x20, /* padding to align header to 16 */
148158
XVE_CM_BUF_SIZE = XVE_CM_MTU + VLAN_ETH_HLEN,
149159
XVE_CM_HEAD_SIZE = XVE_CM_BUF_SIZE % PAGE_SIZE,
@@ -300,6 +310,11 @@ enum {
300310
XVE_EVENT_PKEY_CHANGE_COUNTER,
301311
XVE_INVALID_EVENT_COUNTER,
302312

313+
XVE_GW_MCAST_TX,
314+
XVE_HBEAT_COUNTER,
315+
XVE_LINK_STATUS_COUNTER,
316+
XVE_RX_NOGRH,
317+
303318
XVE_MAX_COUNTERS
304319
};
305320

@@ -400,7 +415,8 @@ enum {
400415
DEBUG_CONTINUE_UNLOAD = 0x00002000,
401416
DEBUG_MISC_INFO = 0x00004000,
402417
DEBUG_IBDEV_INFO = 0x00008000,
403-
DEBUG_CM_INFO = 0x00010000
418+
DEBUG_CM_INFO = 0x00010000,
419+
DEBUG_CTRL_INFO = 0x00020000
404420
};
405421

406422
#define XVE_OP_RECV (1ul << 31)
@@ -433,20 +449,39 @@ enum {
433449
#define XVE_OVER_QUOTA 23
434450
#define XVE_TSO_CHANGE 24
435451
#define XVE_RXBATCH_CHANGE 25
452+
#define XVE_VNIC_READY_PENDING 26
453+
#define XVE_HBEAT_LOST 27
454+
#define XVE_GW_STATE_UP 28
455+
436456
#define MODULE_NAME "XVE"
437457
#define ALIGN_TO_FF(a) (a & 0xff)
438458
#define XVE_FWT_ENTRY_VALID 1
439459
#define XVE_FWT_ENTRY_REFRESH 2
440-
#define XVE_UD_MTU(ib_mtu) (ib_mtu - VLAN_ETH_HLEN)
441-
#define XVE_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES + VLAN_ETH_HLEN)
442-
#define XVE_MIN_PACKET_LEN 60
460+
#define XVE_UD_MTU(ib_mtu) (ib_mtu - (VLAN_ETH_HLEN + XVE_EOIB_LEN))
461+
#define XVE_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES + \
462+
(VLAN_ETH_HLEN + XVE_EOIB_LEN))
463+
#define XVE_MIN_PACKET_LEN 64
464+
465+
enum xcm_type {
466+
XSMP_XCM_OVN,
467+
XSMP_XCM_NOUPLINK,
468+
XSMP_XCM_UPLINK
469+
};
470+
471+
#define xve_is_uplink(priv) ((priv)->vnic_type == XSMP_XCM_UPLINK)
472+
#define xve_is_ovn(priv) ((priv)->vnic_type == XSMP_XCM_OVN)
473+
#define xve_is_edr(priv) (!xve_is_ovn(priv))
474+
#define xve_gw_linkup(priv) test_bit(XVE_GW_STATE_UP, &(priv)->state)
475+
#define xve_ud_rx_sg(priv) (xve_is_edr(priv) ? XVE_UD_RX_EDR_SG : \
476+
XVE_UD_RX_OVN_SG)
443477

444478
/*Extern declarations */
445479
extern int xve_debug_level;
446480
extern int xve_cm_single_qp;
447481
extern u32 xve_hash_salt;
448482
extern int xve_sendq_size;
449483
extern int xve_recvq_size;
484+
extern int xve_max_send_cqe;
450485
extern struct ib_sa_client xve_sa_client;
451486
extern u32 xve_counters[];
452487
extern struct workqueue_struct *xve_taskqueue;
@@ -481,11 +516,12 @@ struct xve_mcast {
481516

482517
struct xve_rx_buf {
483518
struct sk_buff *skb;
484-
u64 mapping[XVE_UD_RX_SG];
519+
u64 mapping[XVE_UD_RX_EDR_SG];
485520
};
486521

487522
struct xve_tx_buf {
488523
struct sk_buff *skb;
524+
struct xve_ah *ah;
489525
u64 mapping[MAX_SKB_FRAGS + 1];
490526
};
491527

@@ -591,6 +627,46 @@ struct xve_fwt_s {
591627
unsigned num;
592628
};
593629

630+
#define XVE_VNIC_HBEAT 1
631+
#define XVE_VNIC_LINK_STATE 2
632+
633+
#define XVE_HBEAT_LOSS_THRES 3
634+
struct xve_keep_alive {
635+
uint32_t pvi_id;
636+
uint32_t type;
637+
uint64_t tca_hbeat_cnt;
638+
uint32_t uplink_status;
639+
} __packed;
640+
641+
struct xve_gw_info {
642+
union ib_gid t_gid;
643+
u32 t_ctrl_qp;
644+
u32 t_data_qp;
645+
u32 t_qkey;
646+
u16 t_pkey;
647+
};
648+
649+
struct xve_eoib_hdr {
650+
union {
651+
struct { /* CX */
652+
__u8 encap_data;
653+
__u8 seg_off;
654+
__be16 seg_id;
655+
};
656+
struct { /* PSIF */
657+
__be16 magic;
658+
__be16 tss_mask_sz;
659+
};
660+
};
661+
} __packed;
662+
663+
664+
struct xve_rx_cm_info {
665+
struct ib_sge rx_sge[XVE_CM_RX_SG];
666+
struct ib_recv_wr rx_wr;
667+
};
668+
669+
594670
/*
595671
* Device private locking: network stack tx_lock protects members used
596672
* in TX fast path, lock protects everything else. lock nests inside
@@ -608,9 +684,13 @@ struct xve_dev_priv {
608684
struct ib_qp *qp;
609685
union ib_gid local_gid;
610686
union ib_gid bcast_mgid;
687+
__be16 bcast_mlid;
611688
u16 local_lid;
612689
u32 qkey;
613690

691+
/* Device attributes */
692+
struct ib_device_attr dev_attr;
693+
614694
/* Netdev related attributes */
615695
struct net_device *netdev;
616696
struct net_device_stats stats;
@@ -636,6 +716,9 @@ struct xve_dev_priv {
636716
unsigned long jiffies;
637717
struct xve_fwt_s xve_fwt;
638718
int aging_delay;
719+
void *pci;
720+
uint32_t hb_interval;
721+
uint64_t last_hbeat;
639722

640723
struct xve_cm_dev_priv cm;
641724
unsigned int cm_supported;
@@ -650,8 +733,10 @@ struct xve_dev_priv {
650733
unsigned int mcast_mtu;
651734
unsigned int max_ib_mtu;
652735
char mode[64];
653-
654736
/* TX and RX Ring attributes */
737+
int xve_recvq_size;
738+
int xve_sendq_size;
739+
int xve_max_send_cqe;
655740
struct xve_rx_buf *rx_ring;
656741
struct xve_tx_buf *tx_ring;
657742
unsigned tx_head;
@@ -661,7 +746,8 @@ struct xve_dev_priv {
661746
struct ib_send_wr tx_wr;
662747
struct ib_wc send_wc[MAX_SEND_CQE];
663748
struct ib_recv_wr rx_wr;
664-
struct ib_sge rx_sge[XVE_UD_RX_SG];
749+
/* Allocate EDR SG for now */
750+
struct ib_sge rx_sge[XVE_UD_RX_EDR_SG];
665751
struct ib_wc ibwc[XVE_NUM_WC];
666752
struct ib_cq *recv_cq;
667753
struct ib_cq *send_cq;
@@ -674,9 +760,12 @@ struct xve_dev_priv {
674760
u64 resource_id;
675761
u64 mac;
676762
u32 net_id;
763+
u32 install_flag;
677764
u16 mp_flag;
678-
char vnet_mode;
765+
u8 vnet_mode;
766+
u8 vnic_type;
679767
char xve_name[XVE_MAX_NAME_SIZE];
768+
struct xve_gw_info gw;
680769

681770
/* Proc related attributes */
682771
struct proc_dir_entry *nic_dir;
@@ -696,7 +785,7 @@ struct xve_ah {
696785
struct ib_ah *ah;
697786
struct list_head list;
698787
struct kref ref;
699-
unsigned last_send;
788+
atomic_t refcnt;
700789
};
701790

702791
struct ib_packed_grh {
@@ -724,7 +813,10 @@ struct xve_path {
724813
struct rb_node rb_node;
725814
struct list_head list;
726815
int valid;
816+
int index;
727817
struct sk_buff_head queue;
818+
struct sk_buff_head uplink_queue;
819+
atomic_t users;
728820
};
729821

730822
struct xve_work {
@@ -790,14 +882,6 @@ struct icmp6_ndp {
790882
dev->stats.rx_bytes += len; \
791883
} while (0)
792884

793-
#define SET_FLUSH_BIT(priv, bit) \
794-
do { \
795-
unsigned long flags; \
796-
spin_lock_irqsave(&priv->lock, flags); \
797-
set_bit(bit, &priv->state); \
798-
spin_unlock_irqrestore(&priv->lock, flags); \
799-
} while (0)
800-
801885
#define PRINT(level, x, fmt, arg...) \
802886
printk(level "%s: " fmt, MODULE_NAME, ##arg)
803887
#define XSMP_ERROR(fmt, arg...) \
@@ -807,18 +891,18 @@ struct icmp6_ndp {
807891
((struct xve_dev_priv *) priv)->netdev->name, \
808892
## arg)
809893
#define xve_warn(priv, format, arg...) \
810-
xve_printk(KERN_WARNING, priv, format , ## arg)
894+
xve_printk(KERN_WARNING, priv, format, ## arg)
811895

812896
#define XSMP_INFO(fmt, arg...) \
813897
do { \
814898
if (xve_debug_level & DEBUG_XSMP_INFO) \
815-
PRINT(KERN_DEBUG, "XSMP", fmt , ## arg);\
899+
PRINT(KERN_DEBUG, "XSMP", fmt, ## arg);\
816900
} while (0)
817901

818902
#define xve_test(fmt, arg...) \
819903
do { \
820904
if (xve_debug_level & DEBUG_TEST_INFO) \
821-
PRINT(KERN_DEBUG, "DEBUG", fmt , ## arg); \
905+
PRINT(KERN_DEBUG, "DEBUG", fmt, ## arg); \
822906
} while (0)
823907

824908
#define xve_dbg_data(priv, format, arg...) \
@@ -827,10 +911,16 @@ struct icmp6_ndp {
827911
xve_printk(KERN_DEBUG, priv, format, \
828912
## arg); \
829913
} while (0)
914+
#define xve_dbg_ctrl(priv, format, arg...) \
915+
do { \
916+
if (xve_debug_level & DEBUG_CTRL_INFO) \
917+
xve_printk(KERN_DEBUG, priv, format, \
918+
## arg); \
919+
} while (0)
830920
#define xve_dbg_mcast(priv, format, arg...) \
831921
do { \
832922
if (xve_debug_level & DEBUG_MCAST_INFO) \
833-
xve_printk(KERN_ERR, priv, format , ## arg); \
923+
xve_printk(KERN_ERR, priv, format, ## arg); \
834924
} while (0)
835925
#define xve_debug(level, priv, format, arg...) \
836926
do { \
@@ -899,6 +989,8 @@ static inline void xve_send_skb(struct xve_dev_priv *priv, struct sk_buff *skb)
899989

900990
if (netdev->features & NETIF_F_LRO)
901991
lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
992+
else if (netdev->features & NETIF_F_GRO)
993+
napi_gro_receive(&priv->napi, skb);
902994
else
903995
netif_receive_skb(skb);
904996

@@ -1018,8 +1110,11 @@ static inline void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
10181110

10191111
if (length == 0) {
10201112
/* don't need this page */
1021-
skb_fill_page_desc(toskb, i, skb_frag_page(frag),
1022-
0, PAGE_SIZE);
1113+
if (toskb)
1114+
skb_fill_page_desc(toskb, i, skb_frag_page(frag)
1115+
, 0, PAGE_SIZE);
1116+
else
1117+
__free_page(skb_shinfo(skb)->frags[i].page.p);
10231118
--skb_shinfo(skb)->nr_frags;
10241119
} else {
10251120
size = min_t(unsigned, length, (unsigned)PAGE_SIZE);
@@ -1046,11 +1141,20 @@ static inline void xve_put_ah(struct xve_ah *ah)
10461141
kref_put(&ah->ref, xve_free_ah);
10471142
}
10481143

1144+
static inline void xve_put_ah_refcnt(struct xve_ah *address)
1145+
{
1146+
atomic_dec(&address->refcnt);
1147+
}
1148+
static inline void xve_get_ah_refcnt(struct xve_ah *address)
1149+
{
1150+
atomic_inc(&address->refcnt);
1151+
}
1152+
10491153
int xve_open(struct net_device *dev);
10501154
int xve_add_pkey_attr(struct net_device *dev);
10511155

1052-
void xve_send(struct net_device *dev, struct sk_buff *skb,
1053-
struct xve_ah *address, u32 qpn);
1156+
int xve_send(struct net_device *dev, struct sk_buff *skb,
1157+
struct xve_ah *address, u32 qpn, int type);
10541158
int poll_tx(struct xve_dev_priv *priv);
10551159
int xve_xsmp_send_oper_state(struct xve_dev_priv *priv, u64 vid, int state);
10561160
void handle_carrier_state(struct xve_dev_priv *priv, char state);
@@ -1096,7 +1200,7 @@ void xve_remove_fwt_entry(struct xve_dev_priv *priv,
10961200
void xve_fwt_entry_free(struct xve_dev_priv *priv,
10971201
struct xve_fwt_entry *fwt_entry);
10981202

1099-
void xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
1203+
int xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
11001204
void xve_advert_mcast_join(struct xve_dev_priv *priv);
11011205
int xve_mcast_start_thread(struct net_device *dev);
11021206
int xve_mcast_stop_thread(struct net_device *dev, int flush);
@@ -1129,7 +1233,7 @@ int xve_send_hbeat(struct xve_dev_priv *xvep);
11291233
void xve_xsmp_handle_oper_req(xsmp_cookie_t xsmp_hndl, u64 resource_id);
11301234

11311235
/*CM */
1132-
void xve_cm_send(struct net_device *dev, struct sk_buff *skb,
1236+
int xve_cm_send(struct net_device *dev, struct sk_buff *skb,
11331237
struct xve_cm_ctx *tx);
11341238
int xve_cm_dev_open(struct net_device *dev);
11351239
void xve_cm_dev_stop(struct net_device *dev);
@@ -1163,9 +1267,11 @@ void xve_prepare_skb(struct xve_dev_priv *priv, struct sk_buff *skb);
11631267
void xve_tables_exit(void);
11641268
void xve_remove_one(struct xve_dev_priv *priv);
11651269
struct xve_path *__path_find(struct net_device *netdev, void *gid);
1166-
extern int xve_add_proc_entry(struct xve_dev_priv *vp);
1270+
int xve_add_proc_entry(struct xve_dev_priv *vp);
11671271
void xve_remove_proc_entry(struct xve_dev_priv *vp);
1168-
extern int xve_change_rxbatch(struct xve_dev_priv *xvep, int flag);
1272+
int xve_gw_send(struct net_device *priv, struct sk_buff *skb);
1273+
struct xve_path *xve_get_gw_path(struct net_device *dev);
1274+
void xve_set_oper_up_state(struct xve_dev_priv *priv);
11691275

11701276
static inline int xve_continue_unload(void)
11711277
{
@@ -1179,7 +1285,7 @@ static inline int xve_get_misc_info(void)
11791285

11801286
static inline int xg_vlan_tx_tag_present(struct sk_buff *skb)
11811287
{
1182-
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
1288+
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
11831289

11841290
return veth->h_vlan_proto == htons(ETH_P_8021Q);
11851291
}

0 commit comments

Comments
 (0)