@@ -131,7 +131,7 @@ static int get_port_state(struct ib_device *ibdev,
131
131
int ret ;
132
132
133
133
memset (& attr , 0 , sizeof (attr ));
134
- ret = mlx5_ib_query_port (ibdev , port_num , & attr );
134
+ ret = ibdev -> query_port (ibdev , port_num , & attr );
135
135
if (!ret )
136
136
* state = attr .state ;
137
137
return ret ;
@@ -1278,6 +1278,22 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1278
1278
return ret ;
1279
1279
}
1280
1280
1281
+ static int mlx5_ib_rep_query_port (struct ib_device * ibdev , u8 port ,
1282
+ struct ib_port_attr * props )
1283
+ {
1284
+ int ret ;
1285
+
1286
+ /* Only link layer == ethernet is valid for representors */
1287
+ ret = mlx5_query_port_roce (ibdev , port , props );
1288
+ if (ret || !props )
1289
+ return ret ;
1290
+
1291
+ /* We don't support GIDS */
1292
+ props -> gid_tbl_len = 0 ;
1293
+
1294
+ return ret ;
1295
+ }
1296
+
1281
1297
static int mlx5_ib_query_gid (struct ib_device * ibdev , u8 port , int index ,
1282
1298
union ib_gid * gid )
1283
1299
{
@@ -3794,6 +3810,25 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
3794
3810
return 0 ;
3795
3811
}
3796
3812
3813
+ static int mlx5_port_rep_immutable (struct ib_device * ibdev , u8 port_num ,
3814
+ struct ib_port_immutable * immutable )
3815
+ {
3816
+ struct ib_port_attr attr ;
3817
+ int err ;
3818
+
3819
+ immutable -> core_cap_flags = RDMA_CORE_PORT_RAW_PACKET ;
3820
+
3821
+ err = ib_query_port (ibdev , port_num , & attr );
3822
+ if (err )
3823
+ return err ;
3824
+
3825
+ immutable -> pkey_tbl_len = attr .pkey_tbl_len ;
3826
+ immutable -> gid_tbl_len = attr .gid_tbl_len ;
3827
+ immutable -> core_cap_flags = RDMA_CORE_PORT_RAW_PACKET ;
3828
+
3829
+ return 0 ;
3830
+ }
3831
+
3797
3832
static void get_dev_fw_str (struct ib_device * ibdev , char * str )
3798
3833
{
3799
3834
struct mlx5_ib_dev * dev =
@@ -3870,14 +3905,10 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
3870
3905
{
3871
3906
int err ;
3872
3907
3873
- err = mlx5_add_netdev_notifier (dev , port_num );
3874
- if (err )
3875
- return err ;
3876
-
3877
3908
if (MLX5_CAP_GEN (dev -> mdev , roce )) {
3878
3909
err = mlx5_nic_vport_enable_roce (dev -> mdev );
3879
3910
if (err )
3880
- goto err_unregister_netdevice_notifier ;
3911
+ return err ;
3881
3912
}
3882
3913
3883
3914
err = mlx5_eth_lag_init (dev );
@@ -3890,8 +3921,6 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
3890
3921
if (MLX5_CAP_GEN (dev -> mdev , roce ))
3891
3922
mlx5_nic_vport_disable_roce (dev -> mdev );
3892
3923
3893
- err_unregister_netdevice_notifier :
3894
- mlx5_remove_netdev_notifier (dev , port_num );
3895
3924
return err ;
3896
3925
}
3897
3926
@@ -4664,7 +4693,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4664
4693
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ );
4665
4694
4666
4695
dev -> ib_dev .query_device = mlx5_ib_query_device ;
4667
- dev -> ib_dev .query_port = mlx5_ib_query_port ;
4668
4696
dev -> ib_dev .get_link_layer = mlx5_ib_port_link_layer ;
4669
4697
dev -> ib_dev .query_gid = mlx5_ib_query_gid ;
4670
4698
dev -> ib_dev .add_gid = mlx5_ib_add_gid ;
@@ -4707,7 +4735,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4707
4735
dev -> ib_dev .alloc_mr = mlx5_ib_alloc_mr ;
4708
4736
dev -> ib_dev .map_mr_sg = mlx5_ib_map_mr_sg ;
4709
4737
dev -> ib_dev .check_mr_status = mlx5_ib_check_mr_status ;
4710
- dev -> ib_dev .get_port_immutable = mlx5_port_immutable ;
4711
4738
dev -> ib_dev .get_dev_fw_str = get_dev_fw_str ;
4712
4739
dev -> ib_dev .get_vector_affinity = mlx5_ib_get_vector_affinity ;
4713
4740
if (MLX5_CAP_GEN (mdev , ipoib_enhanced_offloads ))
@@ -4758,44 +4785,107 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4758
4785
return 0 ;
4759
4786
}
4760
4787
4788
+ static int mlx5_ib_stage_non_default_cb (struct mlx5_ib_dev * dev )
4789
+ {
4790
+ dev -> ib_dev .get_port_immutable = mlx5_port_immutable ;
4791
+ dev -> ib_dev .query_port = mlx5_ib_query_port ;
4792
+
4793
+ return 0 ;
4794
+ }
4795
+
4796
+ static int mlx5_ib_stage_rep_non_default_cb (struct mlx5_ib_dev * dev )
4797
+ {
4798
+ dev -> ib_dev .get_port_immutable = mlx5_port_rep_immutable ;
4799
+ dev -> ib_dev .query_port = mlx5_ib_rep_query_port ;
4800
+
4801
+ return 0 ;
4802
+ }
4803
+
4804
+ static int mlx5_ib_stage_common_roce_init (struct mlx5_ib_dev * dev ,
4805
+ u8 port_num )
4806
+ {
4807
+ int i ;
4808
+
4809
+ for (i = 0 ; i < dev -> num_ports ; i ++ ) {
4810
+ dev -> roce [i ].dev = dev ;
4811
+ dev -> roce [i ].native_port_num = i + 1 ;
4812
+ dev -> roce [i ].last_port_state = IB_PORT_DOWN ;
4813
+ }
4814
+
4815
+ dev -> ib_dev .get_netdev = mlx5_ib_get_netdev ;
4816
+ dev -> ib_dev .create_wq = mlx5_ib_create_wq ;
4817
+ dev -> ib_dev .modify_wq = mlx5_ib_modify_wq ;
4818
+ dev -> ib_dev .destroy_wq = mlx5_ib_destroy_wq ;
4819
+ dev -> ib_dev .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table ;
4820
+ dev -> ib_dev .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table ;
4821
+
4822
+ dev -> ib_dev .uverbs_ex_cmd_mask |=
4823
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ ) |
4824
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ ) |
4825
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ ) |
4826
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL ) |
4827
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL );
4828
+
4829
+ return mlx5_add_netdev_notifier (dev , port_num );
4830
+ }
4831
+
4832
+ static void mlx5_ib_stage_common_roce_cleanup (struct mlx5_ib_dev * dev )
4833
+ {
4834
+ u8 port_num = mlx5_core_native_port_num (dev -> mdev ) - 1 ;
4835
+
4836
+ mlx5_remove_netdev_notifier (dev , port_num );
4837
+ }
4838
+
4839
+ int mlx5_ib_stage_rep_roce_init (struct mlx5_ib_dev * dev )
4840
+ {
4841
+ struct mlx5_core_dev * mdev = dev -> mdev ;
4842
+ enum rdma_link_layer ll ;
4843
+ int port_type_cap ;
4844
+ int err = 0 ;
4845
+ u8 port_num ;
4846
+
4847
+ port_num = mlx5_core_native_port_num (dev -> mdev ) - 1 ;
4848
+ port_type_cap = MLX5_CAP_GEN (mdev , port_type );
4849
+ ll = mlx5_port_type_cap_to_rdma_ll (port_type_cap );
4850
+
4851
+ if (ll == IB_LINK_LAYER_ETHERNET )
4852
+ err = mlx5_ib_stage_common_roce_init (dev , port_num );
4853
+
4854
+ return err ;
4855
+ }
4856
+
4857
+ void mlx5_ib_stage_rep_roce_cleanup (struct mlx5_ib_dev * dev )
4858
+ {
4859
+ mlx5_ib_stage_common_roce_cleanup (dev );
4860
+ }
4861
+
4761
4862
static int mlx5_ib_stage_roce_init (struct mlx5_ib_dev * dev )
4762
4863
{
4763
4864
struct mlx5_core_dev * mdev = dev -> mdev ;
4764
4865
enum rdma_link_layer ll ;
4765
4866
int port_type_cap ;
4766
4867
u8 port_num ;
4767
4868
int err ;
4768
- int i ;
4769
4869
4770
4870
port_num = mlx5_core_native_port_num (dev -> mdev ) - 1 ;
4771
4871
port_type_cap = MLX5_CAP_GEN (mdev , port_type );
4772
4872
ll = mlx5_port_type_cap_to_rdma_ll (port_type_cap );
4773
4873
4774
4874
if (ll == IB_LINK_LAYER_ETHERNET ) {
4775
- for (i = 0 ; i < dev -> num_ports ; i ++ ) {
4776
- dev -> roce [i ].dev = dev ;
4777
- dev -> roce [i ].native_port_num = i + 1 ;
4778
- dev -> roce [i ].last_port_state = IB_PORT_DOWN ;
4779
- }
4875
+ err = mlx5_ib_stage_common_roce_init (dev , port_num );
4876
+ if (err )
4877
+ return err ;
4780
4878
4781
- dev -> ib_dev .get_netdev = mlx5_ib_get_netdev ;
4782
- dev -> ib_dev .create_wq = mlx5_ib_create_wq ;
4783
- dev -> ib_dev .modify_wq = mlx5_ib_modify_wq ;
4784
- dev -> ib_dev .destroy_wq = mlx5_ib_destroy_wq ;
4785
- dev -> ib_dev .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table ;
4786
- dev -> ib_dev .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table ;
4787
- dev -> ib_dev .uverbs_ex_cmd_mask |=
4788
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ ) |
4789
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ ) |
4790
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ ) |
4791
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL ) |
4792
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL );
4793
4879
err = mlx5_enable_eth (dev , port_num );
4794
4880
if (err )
4795
- return err ;
4881
+ goto cleanup ;
4796
4882
}
4797
4883
4798
4884
return 0 ;
4885
+ cleanup :
4886
+ mlx5_ib_stage_common_roce_cleanup (dev );
4887
+
4888
+ return err ;
4799
4889
}
4800
4890
4801
4891
static void mlx5_ib_stage_roce_cleanup (struct mlx5_ib_dev * dev )
@@ -4811,7 +4901,7 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
4811
4901
4812
4902
if (ll == IB_LINK_LAYER_ETHERNET ) {
4813
4903
mlx5_disable_eth (dev );
4814
- mlx5_remove_netdev_notifier (dev , port_num );
4904
+ mlx5_ib_stage_common_roce_cleanup (dev );
4815
4905
}
4816
4906
}
4817
4907
@@ -5017,6 +5107,9 @@ static const struct mlx5_ib_profile pf_profile = {
5017
5107
STAGE_CREATE (MLX5_IB_STAGE_CAPS ,
5018
5108
mlx5_ib_stage_caps_init ,
5019
5109
NULL ),
5110
+ STAGE_CREATE (MLX5_IB_STAGE_NON_DEFAULT_CB ,
5111
+ mlx5_ib_stage_non_default_cb ,
5112
+ NULL ),
5020
5113
STAGE_CREATE (MLX5_IB_STAGE_ROCE ,
5021
5114
mlx5_ib_stage_roce_init ,
5022
5115
mlx5_ib_stage_roce_cleanup ),
0 commit comments