31
31
*/
32
32
33
33
#include <linux/module.h>
34
+ #include <linux/pid.h>
35
+ #include <linux/pid_namespace.h>
34
36
#include <net/netlink.h>
35
37
#include <rdma/rdma_netlink.h>
36
38
@@ -52,6 +54,11 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
52
54
[RDMA_NLDEV_ATTR_PORT_STATE ] = { .type = NLA_U8 },
53
55
[RDMA_NLDEV_ATTR_PORT_PHYS_STATE ] = { .type = NLA_U8 },
54
56
[RDMA_NLDEV_ATTR_DEV_NODE_TYPE ] = { .type = NLA_U8 },
57
+ [RDMA_NLDEV_ATTR_RES_SUMMARY ] = { .type = NLA_NESTED },
58
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY ] = { .type = NLA_NESTED },
59
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME ] = { .type = NLA_NUL_STRING ,
60
+ .len = 16 },
61
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR ] = { .type = NLA_U64 },
55
62
};
56
63
57
64
static int fill_nldev_handle (struct sk_buff * msg , struct ib_device * device )
@@ -134,6 +141,65 @@ static int fill_port_info(struct sk_buff *msg,
134
141
return 0 ;
135
142
}
136
143
144
+ static int fill_res_info_entry (struct sk_buff * msg ,
145
+ const char * name , u64 curr )
146
+ {
147
+ struct nlattr * entry_attr ;
148
+
149
+ entry_attr = nla_nest_start (msg , RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY );
150
+ if (!entry_attr )
151
+ return - EMSGSIZE ;
152
+
153
+ if (nla_put_string (msg , RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME , name ))
154
+ goto err ;
155
+ if (nla_put_u64_64bit (msg ,
156
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR , curr , 0 ))
157
+ goto err ;
158
+
159
+ nla_nest_end (msg , entry_attr );
160
+ return 0 ;
161
+
162
+ err :
163
+ nla_nest_cancel (msg , entry_attr );
164
+ return - EMSGSIZE ;
165
+ }
166
+
167
+ static int fill_res_info (struct sk_buff * msg , struct ib_device * device )
168
+ {
169
+ static const char * const names [RDMA_RESTRACK_MAX ] = {
170
+ [RDMA_RESTRACK_PD ] = "pd" ,
171
+ [RDMA_RESTRACK_CQ ] = "cq" ,
172
+ [RDMA_RESTRACK_QP ] = "qp" ,
173
+ };
174
+
175
+ struct rdma_restrack_root * res = & device -> res ;
176
+ struct nlattr * table_attr ;
177
+ int ret , i , curr ;
178
+
179
+ if (fill_nldev_handle (msg , device ))
180
+ return - EMSGSIZE ;
181
+
182
+ table_attr = nla_nest_start (msg , RDMA_NLDEV_ATTR_RES_SUMMARY );
183
+ if (!table_attr )
184
+ return - EMSGSIZE ;
185
+
186
+ for (i = 0 ; i < RDMA_RESTRACK_MAX ; i ++ ) {
187
+ if (!names [i ])
188
+ continue ;
189
+ curr = rdma_restrack_count (res , i , task_active_pid_ns (current ));
190
+ ret = fill_res_info_entry (msg , names [i ], curr );
191
+ if (ret )
192
+ goto err ;
193
+ }
194
+
195
+ nla_nest_end (msg , table_attr );
196
+ return 0 ;
197
+
198
+ err :
199
+ nla_nest_cancel (msg , table_attr );
200
+ return ret ;
201
+ }
202
+
137
203
static int nldev_get_doit (struct sk_buff * skb , struct nlmsghdr * nlh ,
138
204
struct netlink_ext_ack * extack )
139
205
{
@@ -329,6 +395,83 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
329
395
return skb -> len ;
330
396
}
331
397
398
+ static int nldev_res_get_doit (struct sk_buff * skb , struct nlmsghdr * nlh ,
399
+ struct netlink_ext_ack * extack )
400
+ {
401
+ struct nlattr * tb [RDMA_NLDEV_ATTR_MAX ];
402
+ struct ib_device * device ;
403
+ struct sk_buff * msg ;
404
+ u32 index ;
405
+ int ret ;
406
+
407
+ ret = nlmsg_parse (nlh , 0 , tb , RDMA_NLDEV_ATTR_MAX - 1 ,
408
+ nldev_policy , extack );
409
+ if (ret || !tb [RDMA_NLDEV_ATTR_DEV_INDEX ])
410
+ return - EINVAL ;
411
+
412
+ index = nla_get_u32 (tb [RDMA_NLDEV_ATTR_DEV_INDEX ]);
413
+ device = ib_device_get_by_index (index );
414
+ if (!device )
415
+ return - EINVAL ;
416
+
417
+ msg = nlmsg_new (NLMSG_DEFAULT_SIZE , GFP_KERNEL );
418
+ if (!msg )
419
+ goto err ;
420
+
421
+ nlh = nlmsg_put (msg , NETLINK_CB (skb ).portid , nlh -> nlmsg_seq ,
422
+ RDMA_NL_GET_TYPE (RDMA_NL_NLDEV , RDMA_NLDEV_CMD_RES_GET ),
423
+ 0 , 0 );
424
+
425
+ ret = fill_res_info (msg , device );
426
+ if (ret )
427
+ goto err_free ;
428
+
429
+ nlmsg_end (msg , nlh );
430
+ put_device (& device -> dev );
431
+ return rdma_nl_unicast (msg , NETLINK_CB (skb ).portid );
432
+
433
+ err_free :
434
+ nlmsg_free (msg );
435
+ err :
436
+ put_device (& device -> dev );
437
+ return ret ;
438
+ }
439
+
440
+ static int _nldev_res_get_dumpit (struct ib_device * device ,
441
+ struct sk_buff * skb ,
442
+ struct netlink_callback * cb ,
443
+ unsigned int idx )
444
+ {
445
+ int start = cb -> args [0 ];
446
+ struct nlmsghdr * nlh ;
447
+
448
+ if (idx < start )
449
+ return 0 ;
450
+
451
+ nlh = nlmsg_put (skb , NETLINK_CB (cb -> skb ).portid , cb -> nlh -> nlmsg_seq ,
452
+ RDMA_NL_GET_TYPE (RDMA_NL_NLDEV , RDMA_NLDEV_CMD_RES_GET ),
453
+ 0 , NLM_F_MULTI );
454
+
455
+ if (fill_res_info (skb , device )) {
456
+ nlmsg_cancel (skb , nlh );
457
+ goto out ;
458
+ }
459
+
460
+ nlmsg_end (skb , nlh );
461
+
462
+ idx ++ ;
463
+
464
+ out :
465
+ cb -> args [0 ] = idx ;
466
+ return skb -> len ;
467
+ }
468
+
469
+ static int nldev_res_get_dumpit (struct sk_buff * skb ,
470
+ struct netlink_callback * cb )
471
+ {
472
+ return ib_enum_all_devs (_nldev_res_get_dumpit , skb , cb );
473
+ }
474
+
332
475
static const struct rdma_nl_cbs nldev_cb_table [RDMA_NLDEV_NUM_OPS ] = {
333
476
[RDMA_NLDEV_CMD_GET ] = {
334
477
.doit = nldev_get_doit ,
@@ -338,6 +481,10 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
338
481
.doit = nldev_port_get_doit ,
339
482
.dump = nldev_port_get_dumpit ,
340
483
},
484
+ [RDMA_NLDEV_CMD_RES_GET ] = {
485
+ .doit = nldev_res_get_doit ,
486
+ .dump = nldev_res_get_dumpit ,
487
+ },
341
488
};
342
489
343
490
void __init nldev_init (void )
0 commit comments