15
15
#include <linux/err.h>
16
16
#include <linux/ctype.h>
17
17
#include <linux/processor.h>
18
- #include <net/smc.h>
19
-
20
- #include <asm/debug.h>
21
18
22
19
#include "ism.h"
23
20
@@ -34,6 +31,84 @@ static const struct pci_device_id ism_device_table[] = {
34
31
MODULE_DEVICE_TABLE (pci , ism_device_table );
35
32
36
33
static debug_info_t * ism_debug_info ;
34
+ static const struct smcd_ops ism_ops ;
35
+
36
+ #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
37
+ static struct ism_client * clients [MAX_CLIENTS ]; /* use an array rather than */
38
+ /* a list for fast mapping */
39
+ static u8 max_client ;
40
+ static DEFINE_SPINLOCK (clients_lock );
41
+ struct ism_dev_list {
42
+ struct list_head list ;
43
+ struct mutex mutex ; /* protects ism device list */
44
+ };
45
+
46
+ static struct ism_dev_list ism_dev_list = {
47
+ .list = LIST_HEAD_INIT (ism_dev_list .list ),
48
+ .mutex = __MUTEX_INITIALIZER (ism_dev_list .mutex ),
49
+ };
50
+
51
+ int ism_register_client (struct ism_client * client )
52
+ {
53
+ struct ism_dev * ism ;
54
+ unsigned long flags ;
55
+ int i , rc = - ENOSPC ;
56
+
57
+ mutex_lock (& ism_dev_list .mutex );
58
+ spin_lock_irqsave (& clients_lock , flags );
59
+ for (i = 0 ; i < MAX_CLIENTS ; ++ i ) {
60
+ if (!clients [i ]) {
61
+ clients [i ] = client ;
62
+ client -> id = i ;
63
+ if (i == max_client )
64
+ max_client ++ ;
65
+ rc = 0 ;
66
+ break ;
67
+ }
68
+ }
69
+ spin_unlock_irqrestore (& clients_lock , flags );
70
+ if (i < MAX_CLIENTS ) {
71
+ /* initialize with all devices that we got so far */
72
+ list_for_each_entry (ism , & ism_dev_list .list , list ) {
73
+ ism -> priv [i ] = NULL ;
74
+ client -> add (ism );
75
+ }
76
+ }
77
+ mutex_unlock (& ism_dev_list .mutex );
78
+
79
+ return rc ;
80
+ }
81
+ EXPORT_SYMBOL_GPL (ism_register_client );
82
+
83
+ int ism_unregister_client (struct ism_client * client )
84
+ {
85
+ struct ism_dev * ism ;
86
+ unsigned long flags ;
87
+ int rc = 0 ;
88
+
89
+ mutex_lock (& ism_dev_list .mutex );
90
+ spin_lock_irqsave (& clients_lock , flags );
91
+ clients [client -> id ] = NULL ;
92
+ if (client -> id + 1 == max_client )
93
+ max_client -- ;
94
+ spin_unlock_irqrestore (& clients_lock , flags );
95
+ list_for_each_entry (ism , & ism_dev_list .list , list ) {
96
+ for (int i = 0 ; i < ISM_NR_DMBS ; ++ i ) {
97
+ if (ism -> sba_client_arr [i ] == client -> id ) {
98
+ pr_err ("%s: attempt to unregister client '%s'"
99
+ "with registered dmb(s)\n" , __func__ ,
100
+ client -> name );
101
+ rc = - EBUSY ;
102
+ goto out ;
103
+ }
104
+ }
105
+ }
106
+ out :
107
+ mutex_unlock (& ism_dev_list .mutex );
108
+
109
+ return rc ;
110
+ }
111
+ EXPORT_SYMBOL_GPL (ism_unregister_client );
37
112
38
113
static int ism_cmd (struct ism_dev * ism , void * cmd )
39
114
{
@@ -193,7 +268,7 @@ static int ism_read_local_gid(struct ism_dev *ism)
193
268
if (ret )
194
269
goto out ;
195
270
196
- ism -> smcd -> local_gid = cmd .response .gid ;
271
+ ism -> local_gid = cmd .response .gid ;
197
272
out :
198
273
return ret ;
199
274
}
@@ -437,21 +512,27 @@ static u16 ism_get_chid(struct smcd_dev *smcd)
437
512
438
513
static void ism_handle_event (struct ism_dev * ism )
439
514
{
440
- struct smcd_event * entry ;
515
+ struct ism_event * entry ;
516
+ int i ;
441
517
442
518
while ((ism -> ieq_idx + 1 ) != READ_ONCE (ism -> ieq -> header .idx )) {
443
519
if (++ (ism -> ieq_idx ) == ARRAY_SIZE (ism -> ieq -> entry ))
444
520
ism -> ieq_idx = 0 ;
445
521
446
522
entry = & ism -> ieq -> entry [ism -> ieq_idx ];
447
523
debug_event (ism_debug_info , 2 , entry , sizeof (* entry ));
448
- smcd_handle_event (ism -> smcd , entry );
524
+ spin_lock (& clients_lock );
525
+ for (i = 0 ; i < max_client ; ++ i )
526
+ if (clients [i ])
527
+ clients [i ]-> handle_event (ism , entry );
528
+ spin_unlock (& clients_lock );
449
529
}
450
530
}
451
531
452
532
static irqreturn_t ism_handle_irq (int irq , void * data )
453
533
{
454
534
struct ism_dev * ism = data ;
535
+ struct ism_client * clt ;
455
536
unsigned long bit , end ;
456
537
unsigned long * bv ;
457
538
u16 dmbemask ;
@@ -471,7 +552,8 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
471
552
dmbemask = ism -> sba -> dmbe_mask [bit + ISM_DMB_BIT_OFFSET ];
472
553
ism -> sba -> dmbe_mask [bit + ISM_DMB_BIT_OFFSET ] = 0 ;
473
554
barrier ();
474
- smcd_handle_irq (ism -> smcd , bit + ISM_DMB_BIT_OFFSET , dmbemask );
555
+ clt = clients [ism -> sba_client_arr [bit ]];
556
+ clt -> handle_irq (ism , bit + ISM_DMB_BIT_OFFSET , dmbemask );
475
557
}
476
558
477
559
if (ism -> sba -> e ) {
@@ -497,10 +579,21 @@ static const struct smcd_ops ism_ops = {
497
579
.get_chid = ism_get_chid ,
498
580
};
499
581
582
+ static void ism_dev_add_work_func (struct work_struct * work )
583
+ {
584
+ struct ism_client * client = container_of (work , struct ism_client ,
585
+ add_work );
586
+
587
+ client -> add (client -> tgt_ism );
588
+ atomic_dec (& client -> tgt_ism -> add_dev_cnt );
589
+ wake_up (& client -> tgt_ism -> waitq );
590
+ }
591
+
500
592
static int ism_dev_init (struct ism_dev * ism )
501
593
{
502
594
struct pci_dev * pdev = ism -> pdev ;
503
- int ret ;
595
+ unsigned long flags ;
596
+ int i , ret ;
504
597
505
598
ret = pci_alloc_irq_vectors (pdev , 1 , 1 , PCI_IRQ_MSI );
506
599
if (ret <= 0 )
@@ -527,6 +620,28 @@ static int ism_dev_init(struct ism_dev *ism)
527
620
/* hardware is V2 capable */
528
621
ism_create_system_eid ();
529
622
623
+ init_waitqueue_head (& ism -> waitq );
624
+ atomic_set (& ism -> free_clients_cnt , 0 );
625
+ atomic_set (& ism -> add_dev_cnt , 0 );
626
+
627
+ wait_event (ism -> waitq , !atomic_read (& ism -> add_dev_cnt ));
628
+ spin_lock_irqsave (& clients_lock , flags );
629
+ for (i = 0 ; i < max_client ; ++ i )
630
+ if (clients [i ]) {
631
+ INIT_WORK (& clients [i ]-> add_work ,
632
+ ism_dev_add_work_func );
633
+ clients [i ]-> tgt_ism = ism ;
634
+ atomic_inc (& ism -> add_dev_cnt );
635
+ schedule_work (& clients [i ]-> add_work );
636
+ }
637
+ spin_unlock_irqrestore (& clients_lock , flags );
638
+
639
+ wait_event (ism -> waitq , !atomic_read (& ism -> add_dev_cnt ));
640
+
641
+ mutex_lock (& ism_dev_list .mutex );
642
+ list_add (& ism -> list , & ism_dev_list .list );
643
+ mutex_unlock (& ism_dev_list .mutex );
644
+
530
645
ret = smcd_register_dev (ism -> smcd );
531
646
if (ret )
532
647
goto unreg_ieq ;
@@ -602,9 +717,36 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
602
717
return ret ;
603
718
}
604
719
720
+ static void ism_dev_remove_work_func (struct work_struct * work )
721
+ {
722
+ struct ism_client * client = container_of (work , struct ism_client ,
723
+ remove_work );
724
+
725
+ client -> remove (client -> tgt_ism );
726
+ atomic_dec (& client -> tgt_ism -> free_clients_cnt );
727
+ wake_up (& client -> tgt_ism -> waitq );
728
+ }
729
+
730
+ /* Callers must hold ism_dev_list.mutex */
605
731
static void ism_dev_exit (struct ism_dev * ism )
606
732
{
607
733
struct pci_dev * pdev = ism -> pdev ;
734
+ unsigned long flags ;
735
+ int i ;
736
+
737
+ wait_event (ism -> waitq , !atomic_read (& ism -> free_clients_cnt ));
738
+ spin_lock_irqsave (& clients_lock , flags );
739
+ for (i = 0 ; i < max_client ; ++ i )
740
+ if (clients [i ]) {
741
+ INIT_WORK (& clients [i ]-> remove_work ,
742
+ ism_dev_remove_work_func );
743
+ clients [i ]-> tgt_ism = ism ;
744
+ atomic_inc (& ism -> free_clients_cnt );
745
+ schedule_work (& clients [i ]-> remove_work );
746
+ }
747
+ spin_unlock_irqrestore (& clients_lock , flags );
748
+
749
+ wait_event (ism -> waitq , !atomic_read (& ism -> free_clients_cnt ));
608
750
609
751
smcd_unregister_dev (ism -> smcd );
610
752
if (SYSTEM_EID .serial_number [0 ] != '0' ||
@@ -614,18 +756,22 @@ static void ism_dev_exit(struct ism_dev *ism)
614
756
unregister_sba (ism );
615
757
free_irq (pci_irq_vector (pdev , 0 ), ism );
616
758
pci_free_irq_vectors (pdev );
759
+ list_del_init (& ism -> list );
617
760
}
618
761
619
762
static void ism_remove (struct pci_dev * pdev )
620
763
{
621
764
struct ism_dev * ism = dev_get_drvdata (& pdev -> dev );
622
765
766
+ mutex_lock (& ism_dev_list .mutex );
623
767
ism_dev_exit (ism );
768
+ mutex_unlock (& ism_dev_list .mutex );
624
769
625
770
smcd_free_dev (ism -> smcd );
626
771
pci_clear_master (pdev );
627
772
pci_release_mem_regions (pdev );
628
773
pci_disable_device (pdev );
774
+ device_del (& ism -> dev );
629
775
dev_set_drvdata (& pdev -> dev , NULL );
630
776
kfree (ism );
631
777
}
@@ -645,6 +791,8 @@ static int __init ism_init(void)
645
791
if (!ism_debug_info )
646
792
return - ENODEV ;
647
793
794
+ memset (clients , 0 , sizeof (clients ));
795
+ max_client = 0 ;
648
796
debug_register_view (ism_debug_info , & debug_hex_ascii_view );
649
797
ret = pci_register_driver (& ism_driver );
650
798
if (ret )
@@ -655,6 +803,14 @@ static int __init ism_init(void)
655
803
656
804
static void __exit ism_exit (void )
657
805
{
806
+ struct ism_dev * ism ;
807
+
808
+ mutex_lock (& ism_dev_list .mutex );
809
+ list_for_each_entry (ism , & ism_dev_list .list , list ) {
810
+ ism_dev_exit (ism );
811
+ }
812
+ mutex_unlock (& ism_dev_list .mutex );
813
+
658
814
pci_unregister_driver (& ism_driver );
659
815
debug_unregister (ism_debug_info );
660
816
}
0 commit comments