@@ -165,6 +165,93 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
165
165
return 0 ;
166
166
}
167
167
168
+ static void ib_gid_to_mlx5_roce_addr (const union ib_gid * gid ,
169
+ const struct ib_gid_attr * attr ,
170
+ void * mlx5_addr )
171
+ {
172
+ #define MLX5_SET_RA (p , f , v ) MLX5_SET(roce_addr_layout, p, f, v)
173
+ char * mlx5_addr_l3_addr = MLX5_ADDR_OF (roce_addr_layout , mlx5_addr ,
174
+ source_l3_address );
175
+ void * mlx5_addr_mac = MLX5_ADDR_OF (roce_addr_layout , mlx5_addr ,
176
+ source_mac_47_32 );
177
+
178
+ if (!gid )
179
+ return ;
180
+
181
+ ether_addr_copy (mlx5_addr_mac , attr -> ndev -> dev_addr );
182
+
183
+ if (is_vlan_dev (attr -> ndev )) {
184
+ MLX5_SET_RA (mlx5_addr , vlan_valid , 1 );
185
+ MLX5_SET_RA (mlx5_addr , vlan_id , vlan_dev_vlan_id (attr -> ndev ));
186
+ }
187
+
188
+ switch (attr -> gid_type ) {
189
+ case IB_GID_TYPE_IB :
190
+ MLX5_SET_RA (mlx5_addr , roce_version , MLX5_ROCE_VERSION_1 );
191
+ break ;
192
+ case IB_GID_TYPE_ROCE_UDP_ENCAP :
193
+ MLX5_SET_RA (mlx5_addr , roce_version , MLX5_ROCE_VERSION_2 );
194
+ break ;
195
+
196
+ default :
197
+ WARN_ON (true);
198
+ }
199
+
200
+ if (attr -> gid_type != IB_GID_TYPE_IB ) {
201
+ if (ipv6_addr_v4mapped ((void * )gid ))
202
+ MLX5_SET_RA (mlx5_addr , roce_l3_type ,
203
+ MLX5_ROCE_L3_TYPE_IPV4 );
204
+ else
205
+ MLX5_SET_RA (mlx5_addr , roce_l3_type ,
206
+ MLX5_ROCE_L3_TYPE_IPV6 );
207
+ }
208
+
209
+ if ((attr -> gid_type == IB_GID_TYPE_IB ) ||
210
+ !ipv6_addr_v4mapped ((void * )gid ))
211
+ memcpy (mlx5_addr_l3_addr , gid , sizeof (* gid ));
212
+ else
213
+ memcpy (& mlx5_addr_l3_addr [12 ], & gid -> raw [12 ], 4 );
214
+ }
215
+
216
+ static int set_roce_addr (struct ib_device * device , u8 port_num ,
217
+ unsigned int index ,
218
+ const union ib_gid * gid ,
219
+ const struct ib_gid_attr * attr )
220
+ {
221
+ struct mlx5_ib_dev * dev = to_mdev (device );
222
+ u32 in [MLX5_ST_SZ_DW (set_roce_address_in )];
223
+ u32 out [MLX5_ST_SZ_DW (set_roce_address_out )];
224
+ void * in_addr = MLX5_ADDR_OF (set_roce_address_in , in , roce_address );
225
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer (device , port_num );
226
+
227
+ if (ll != IB_LINK_LAYER_ETHERNET )
228
+ return - EINVAL ;
229
+
230
+ memset (in , 0 , sizeof (in ));
231
+
232
+ ib_gid_to_mlx5_roce_addr (gid , attr , in_addr );
233
+
234
+ MLX5_SET (set_roce_address_in , in , roce_address_index , index );
235
+ MLX5_SET (set_roce_address_in , in , opcode , MLX5_CMD_OP_SET_ROCE_ADDRESS );
236
+
237
+ memset (out , 0 , sizeof (out ));
238
+ return mlx5_cmd_exec (dev -> mdev , in , sizeof (in ), out , sizeof (out ));
239
+ }
240
+
241
+ static int mlx5_ib_add_gid (struct ib_device * device , u8 port_num ,
242
+ unsigned int index , const union ib_gid * gid ,
243
+ const struct ib_gid_attr * attr ,
244
+ __always_unused void * * context )
245
+ {
246
+ return set_roce_addr (device , port_num , index , gid , attr );
247
+ }
248
+
249
+ static int mlx5_ib_del_gid (struct ib_device * device , u8 port_num ,
250
+ unsigned int index , __always_unused void * * context )
251
+ {
252
+ return set_roce_addr (device , port_num , index , NULL , NULL );
253
+ }
254
+
168
255
static int mlx5_use_mad_ifc (struct mlx5_ib_dev * dev )
169
256
{
170
257
return !dev -> mdev -> issi ;
@@ -1515,6 +1602,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1515
1602
if (ll == IB_LINK_LAYER_ETHERNET )
1516
1603
dev -> ib_dev .get_netdev = mlx5_ib_get_netdev ;
1517
1604
dev -> ib_dev .query_gid = mlx5_ib_query_gid ;
1605
+ dev -> ib_dev .add_gid = mlx5_ib_add_gid ;
1606
+ dev -> ib_dev .del_gid = mlx5_ib_del_gid ;
1518
1607
dev -> ib_dev .query_pkey = mlx5_ib_query_pkey ;
1519
1608
dev -> ib_dev .modify_device = mlx5_ib_modify_device ;
1520
1609
dev -> ib_dev .modify_port = mlx5_ib_modify_port ;
0 commit comments