38
38
#define pr_fmt (fmt ) KBUILD_MODNAME ": " fmt
39
39
40
40
#include <linux/dma-mapping.h>
41
+ #include <linux/idr.h>
41
42
#include <linux/slab.h>
42
43
#include <linux/module.h>
43
44
#include <linux/security.h>
@@ -58,8 +59,13 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
58
59
module_param_named (recv_queue_size , mad_recvq_size , int , 0444 );
59
60
MODULE_PARM_DESC (recv_queue_size , "Size of receive queue in number of work requests" );
60
61
62
+ /*
63
+ * The mlx4 driver uses the top byte to distinguish which virtual function
64
+ * generated the MAD, so we must avoid using it.
65
+ */
66
+ #define AGENT_ID_LIMIT (1 << 24)
67
+ static DEFINE_IDR (ib_mad_clients );
61
68
static struct list_head ib_mad_port_list ;
62
- static atomic_t ib_mad_client_id = ATOMIC_INIT (0 );
63
69
64
70
/* Port list lock */
65
71
static DEFINE_SPINLOCK (ib_mad_port_list_lock );
@@ -377,13 +383,24 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
377
383
goto error4 ;
378
384
}
379
385
380
- spin_lock_irq (& port_priv -> reg_lock );
381
- mad_agent_priv -> agent .hi_tid = atomic_inc_return (& ib_mad_client_id );
386
+ idr_preload (GFP_KERNEL );
387
+ idr_lock (& ib_mad_clients );
388
+ ret2 = idr_alloc_cyclic (& ib_mad_clients , mad_agent_priv , 0 ,
389
+ AGENT_ID_LIMIT , GFP_ATOMIC );
390
+ idr_unlock (& ib_mad_clients );
391
+ idr_preload_end ();
392
+
393
+ if (ret2 < 0 ) {
394
+ ret = ERR_PTR (ret2 );
395
+ goto error5 ;
396
+ }
397
+ mad_agent_priv -> agent .hi_tid = ret2 ;
382
398
383
399
/*
384
400
* Make sure MAD registration (if supplied)
385
401
* is non overlapping with any existing ones
386
402
*/
403
+ spin_lock_irq (& port_priv -> reg_lock );
387
404
if (mad_reg_req ) {
388
405
mgmt_class = convert_mgmt_class (mad_reg_req -> mgmt_class );
389
406
if (!is_vendor_class (mgmt_class )) {
@@ -394,7 +411,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
394
411
if (method ) {
395
412
if (method_in_use (& method ,
396
413
mad_reg_req ))
397
- goto error5 ;
414
+ goto error6 ;
398
415
}
399
416
}
400
417
ret2 = add_nonoui_reg_req (mad_reg_req , mad_agent_priv ,
@@ -410,24 +427,25 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
410
427
if (is_vendor_method_in_use (
411
428
vendor_class ,
412
429
mad_reg_req ))
413
- goto error5 ;
430
+ goto error6 ;
414
431
}
415
432
}
416
433
ret2 = add_oui_reg_req (mad_reg_req , mad_agent_priv );
417
434
}
418
435
if (ret2 ) {
419
436
ret = ERR_PTR (ret2 );
420
- goto error5 ;
437
+ goto error6 ;
421
438
}
422
439
}
423
-
424
- /* Add mad agent into port's agent list */
425
- list_add_tail (& mad_agent_priv -> agent_list , & port_priv -> agent_list );
426
440
spin_unlock_irq (& port_priv -> reg_lock );
427
441
428
442
return & mad_agent_priv -> agent ;
429
- error5 :
443
+ error6 :
430
444
spin_unlock_irq (& port_priv -> reg_lock );
445
+ idr_lock (& ib_mad_clients );
446
+ idr_remove (& ib_mad_clients , mad_agent_priv -> agent .hi_tid );
447
+ idr_unlock (& ib_mad_clients );
448
+ error5 :
431
449
ib_mad_agent_security_cleanup (& mad_agent_priv -> agent );
432
450
error4 :
433
451
kfree (reg_req );
@@ -589,8 +607,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
589
607
590
608
spin_lock_irq (& port_priv -> reg_lock );
591
609
remove_mad_reg_req (mad_agent_priv );
592
- list_del (& mad_agent_priv -> agent_list );
593
610
spin_unlock_irq (& port_priv -> reg_lock );
611
+ idr_lock (& ib_mad_clients );
612
+ idr_remove (& ib_mad_clients , mad_agent_priv -> agent .hi_tid );
613
+ idr_unlock (& ib_mad_clients );
594
614
595
615
flush_workqueue (port_priv -> wq );
596
616
ib_cancel_rmpp_recvs (mad_agent_priv );
@@ -601,7 +621,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
601
621
ib_mad_agent_security_cleanup (& mad_agent_priv -> agent );
602
622
603
623
kfree (mad_agent_priv -> reg_req );
604
- kfree (mad_agent_priv );
624
+ kfree_rcu (mad_agent_priv , rcu );
605
625
}
606
626
607
627
static void unregister_mad_snoop (struct ib_mad_snoop_private * mad_snoop_priv )
@@ -1722,22 +1742,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1722
1742
struct ib_mad_agent_private * mad_agent = NULL ;
1723
1743
unsigned long flags ;
1724
1744
1725
- spin_lock_irqsave (& port_priv -> reg_lock , flags );
1726
1745
if (ib_response_mad (mad_hdr )) {
1727
1746
u32 hi_tid ;
1728
- struct ib_mad_agent_private * entry ;
1729
1747
1730
1748
/*
1731
1749
* Routing is based on high 32 bits of transaction ID
1732
1750
* of MAD.
1733
1751
*/
1734
1752
hi_tid = be64_to_cpu (mad_hdr -> tid ) >> 32 ;
1735
- list_for_each_entry (entry , & port_priv -> agent_list , agent_list ) {
1736
- if (entry -> agent .hi_tid == hi_tid ) {
1737
- mad_agent = entry ;
1738
- break ;
1739
- }
1740
- }
1753
+ rcu_read_lock ();
1754
+ mad_agent = idr_find (& ib_mad_clients , hi_tid );
1755
+ if (mad_agent && !atomic_inc_not_zero (& mad_agent -> refcount ))
1756
+ mad_agent = NULL ;
1757
+ rcu_read_unlock ();
1741
1758
} else {
1742
1759
struct ib_mad_mgmt_class_table * class ;
1743
1760
struct ib_mad_mgmt_method_table * method ;
@@ -1746,6 +1763,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1746
1763
const struct ib_vendor_mad * vendor_mad ;
1747
1764
int index ;
1748
1765
1766
+ spin_lock_irqsave (& port_priv -> reg_lock , flags );
1749
1767
/*
1750
1768
* Routing is based on version, class, and method
1751
1769
* For "newer" vendor MADs, also based on OUI
@@ -1785,20 +1803,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1785
1803
~IB_MGMT_METHOD_RESP ];
1786
1804
}
1787
1805
}
1806
+ if (mad_agent )
1807
+ atomic_inc (& mad_agent -> refcount );
1808
+ out :
1809
+ spin_unlock_irqrestore (& port_priv -> reg_lock , flags );
1788
1810
}
1789
1811
1790
- if (mad_agent ) {
1791
- if (mad_agent -> agent .recv_handler )
1792
- atomic_inc (& mad_agent -> refcount );
1793
- else {
1794
- dev_notice (& port_priv -> device -> dev ,
1795
- "No receive handler for client %p on port %d\n" ,
1796
- & mad_agent -> agent , port_priv -> port_num );
1797
- mad_agent = NULL ;
1798
- }
1812
+ if (mad_agent && !mad_agent -> agent .recv_handler ) {
1813
+ dev_notice (& port_priv -> device -> dev ,
1814
+ "No receive handler for client %p on port %d\n" ,
1815
+ & mad_agent -> agent , port_priv -> port_num );
1816
+ deref_mad_agent (mad_agent );
1817
+ mad_agent = NULL ;
1799
1818
}
1800
- out :
1801
- spin_unlock_irqrestore (& port_priv -> reg_lock , flags );
1802
1819
1803
1820
return mad_agent ;
1804
1821
}
@@ -3161,7 +3178,6 @@ static int ib_mad_port_open(struct ib_device *device,
3161
3178
port_priv -> device = device ;
3162
3179
port_priv -> port_num = port_num ;
3163
3180
spin_lock_init (& port_priv -> reg_lock );
3164
- INIT_LIST_HEAD (& port_priv -> agent_list );
3165
3181
init_mad_qp (port_priv , & port_priv -> qp_info [0 ]);
3166
3182
init_mad_qp (port_priv , & port_priv -> qp_info [1 ]);
3167
3183
@@ -3340,6 +3356,9 @@ int ib_mad_init(void)
3340
3356
3341
3357
INIT_LIST_HEAD (& ib_mad_port_list );
3342
3358
3359
+ /* Client ID 0 is used for snoop-only clients */
3360
+ idr_alloc (& ib_mad_clients , NULL , 0 , 0 , GFP_KERNEL );
3361
+
3343
3362
if (ib_register_client (& mad_client )) {
3344
3363
pr_err ("Couldn't register ib_mad client\n" );
3345
3364
return - EINVAL ;
0 commit comments