@@ -491,6 +491,7 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
491
491
return NULL ;
492
492
cxl_rr -> port = port ;
493
493
cxl_rr -> region = cxlr ;
494
+ cxl_rr -> nr_targets = 1 ;
494
495
xa_init (& cxl_rr -> endpoints );
495
496
496
497
rc = xa_insert (& port -> regions , (unsigned long )cxlr , cxl_rr , GFP_KERNEL );
@@ -531,10 +532,12 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
531
532
struct cxl_decoder * cxld = cxl_rr -> decoder ;
532
533
struct cxl_ep * ep = cxl_ep_load (port , cxled_to_memdev (cxled ));
533
534
534
- rc = xa_insert (& cxl_rr -> endpoints , (unsigned long )cxled , ep ,
535
- GFP_KERNEL );
536
- if (rc )
537
- return rc ;
535
+ if (ep ) {
536
+ rc = xa_insert (& cxl_rr -> endpoints , (unsigned long )cxled , ep ,
537
+ GFP_KERNEL );
538
+ if (rc )
539
+ return rc ;
540
+ }
538
541
cxl_rr -> nr_eps ++ ;
539
542
540
543
if (!cxld -> region ) {
@@ -655,6 +658,16 @@ static int cxl_port_attach_region(struct cxl_port *port,
655
658
goto out_erase ;
656
659
}
657
660
661
+ dev_dbg (& cxlr -> dev ,
662
+ "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n" ,
663
+ dev_name (port -> uport ), dev_name (& port -> dev ),
664
+ dev_name (& cxld -> dev ), dev_name (& cxlmd -> dev ),
665
+ dev_name (& cxled -> cxld .dev ), pos ,
666
+ ep ? ep -> next ? dev_name (ep -> next -> uport ) :
667
+ dev_name (& cxlmd -> dev ) :
668
+ "none" ,
669
+ cxl_rr -> nr_eps , cxl_rr -> nr_targets );
670
+
658
671
return 0 ;
659
672
out_erase :
660
673
if (cxl_rr -> nr_eps == 0 )
@@ -673,15 +686,22 @@ static void cxl_port_detach_region(struct cxl_port *port,
673
686
struct cxl_endpoint_decoder * cxled )
674
687
{
675
688
struct cxl_region_ref * cxl_rr ;
676
- struct cxl_ep * ep ;
689
+ struct cxl_ep * ep = NULL ;
677
690
678
691
lockdep_assert_held_write (& cxl_region_rwsem );
679
692
680
693
cxl_rr = cxl_rr_load (port , cxlr );
681
694
if (!cxl_rr )
682
695
return ;
683
696
684
- ep = xa_erase (& cxl_rr -> endpoints , (unsigned long )cxled );
697
+ /*
698
+ * Endpoint ports do not carry cxl_ep references, and they
699
+ * never target more than one endpoint by definition
700
+ */
701
+ if (cxl_rr -> decoder == & cxled -> cxld )
702
+ cxl_rr -> nr_eps -- ;
703
+ else
704
+ ep = xa_erase (& cxl_rr -> endpoints , (unsigned long )cxled );
685
705
if (ep ) {
686
706
struct cxl_ep * ep_iter ;
687
707
unsigned long index ;
@@ -702,6 +722,224 @@ static void cxl_port_detach_region(struct cxl_port *port,
702
722
free_region_ref (cxl_rr );
703
723
}
704
724
725
+ static int check_last_peer (struct cxl_endpoint_decoder * cxled ,
726
+ struct cxl_ep * ep , struct cxl_region_ref * cxl_rr ,
727
+ int distance )
728
+ {
729
+ struct cxl_memdev * cxlmd = cxled_to_memdev (cxled );
730
+ struct cxl_region * cxlr = cxl_rr -> region ;
731
+ struct cxl_region_params * p = & cxlr -> params ;
732
+ struct cxl_endpoint_decoder * cxled_peer ;
733
+ struct cxl_port * port = cxl_rr -> port ;
734
+ struct cxl_memdev * cxlmd_peer ;
735
+ struct cxl_ep * ep_peer ;
736
+ int pos = cxled -> pos ;
737
+
738
+ /*
739
+ * If this position wants to share a dport with the last endpoint mapped
740
+ * then that endpoint, at index 'position - distance', must also be
741
+ * mapped by this dport.
742
+ */
743
+ if (pos < distance ) {
744
+ dev_dbg (& cxlr -> dev , "%s:%s: cannot host %s:%s at %d\n" ,
745
+ dev_name (port -> uport ), dev_name (& port -> dev ),
746
+ dev_name (& cxlmd -> dev ), dev_name (& cxled -> cxld .dev ), pos );
747
+ return - ENXIO ;
748
+ }
749
+ cxled_peer = p -> targets [pos - distance ];
750
+ cxlmd_peer = cxled_to_memdev (cxled_peer );
751
+ ep_peer = cxl_ep_load (port , cxlmd_peer );
752
+ if (ep -> dport != ep_peer -> dport ) {
753
+ dev_dbg (& cxlr -> dev ,
754
+ "%s:%s: %s:%s pos %d mismatched peer %s:%s\n" ,
755
+ dev_name (port -> uport ), dev_name (& port -> dev ),
756
+ dev_name (& cxlmd -> dev ), dev_name (& cxled -> cxld .dev ), pos ,
757
+ dev_name (& cxlmd_peer -> dev ),
758
+ dev_name (& cxled_peer -> cxld .dev ));
759
+ return - ENXIO ;
760
+ }
761
+
762
+ return 0 ;
763
+ }
764
+
765
+ static int cxl_port_setup_targets (struct cxl_port * port ,
766
+ struct cxl_region * cxlr ,
767
+ struct cxl_endpoint_decoder * cxled )
768
+ {
769
+ struct cxl_root_decoder * cxlrd = to_cxl_root_decoder (cxlr -> dev .parent );
770
+ int parent_iw , parent_ig , ig , iw , rc , inc = 0 , pos = cxled -> pos ;
771
+ struct cxl_port * parent_port = to_cxl_port (port -> dev .parent );
772
+ struct cxl_region_ref * cxl_rr = cxl_rr_load (port , cxlr );
773
+ struct cxl_memdev * cxlmd = cxled_to_memdev (cxled );
774
+ struct cxl_ep * ep = cxl_ep_load (port , cxlmd );
775
+ struct cxl_region_params * p = & cxlr -> params ;
776
+ struct cxl_decoder * cxld = cxl_rr -> decoder ;
777
+ struct cxl_switch_decoder * cxlsd ;
778
+ u16 eig , peig ;
779
+ u8 eiw , peiw ;
780
+
781
+ /*
782
+ * While root level decoders support x3, x6, x12, switch level
783
+ * decoders only support powers of 2 up to x16.
784
+ */
785
+ if (!is_power_of_2 (cxl_rr -> nr_targets )) {
786
+ dev_dbg (& cxlr -> dev , "%s:%s: invalid target count %d\n" ,
787
+ dev_name (port -> uport ), dev_name (& port -> dev ),
788
+ cxl_rr -> nr_targets );
789
+ return - EINVAL ;
790
+ }
791
+
792
+ cxlsd = to_cxl_switch_decoder (& cxld -> dev );
793
+ if (cxl_rr -> nr_targets_set ) {
794
+ int i , distance ;
795
+
796
+ distance = p -> nr_targets / cxl_rr -> nr_targets ;
797
+ for (i = 0 ; i < cxl_rr -> nr_targets_set ; i ++ )
798
+ if (ep -> dport == cxlsd -> target [i ]) {
799
+ rc = check_last_peer (cxled , ep , cxl_rr ,
800
+ distance );
801
+ if (rc )
802
+ return rc ;
803
+ goto out_target_set ;
804
+ }
805
+ goto add_target ;
806
+ }
807
+
808
+ if (is_cxl_root (parent_port )) {
809
+ parent_ig = cxlrd -> cxlsd .cxld .interleave_granularity ;
810
+ parent_iw = cxlrd -> cxlsd .cxld .interleave_ways ;
811
+ /*
812
+ * For purposes of address bit routing, use power-of-2 math for
813
+ * switch ports.
814
+ */
815
+ if (!is_power_of_2 (parent_iw ))
816
+ parent_iw /= 3 ;
817
+ } else {
818
+ struct cxl_region_ref * parent_rr ;
819
+ struct cxl_decoder * parent_cxld ;
820
+
821
+ parent_rr = cxl_rr_load (parent_port , cxlr );
822
+ parent_cxld = parent_rr -> decoder ;
823
+ parent_ig = parent_cxld -> interleave_granularity ;
824
+ parent_iw = parent_cxld -> interleave_ways ;
825
+ }
826
+
827
+ granularity_to_cxl (parent_ig , & peig );
828
+ ways_to_cxl (parent_iw , & peiw );
829
+
830
+ iw = cxl_rr -> nr_targets ;
831
+ ways_to_cxl (iw , & eiw );
832
+ if (cxl_rr -> nr_targets > 1 ) {
833
+ u32 address_bit = max (peig + peiw , eiw + peig );
834
+
835
+ eig = address_bit - eiw + 1 ;
836
+ } else {
837
+ eiw = peiw ;
838
+ eig = peig ;
839
+ }
840
+
841
+ rc = cxl_to_granularity (eig , & ig );
842
+ if (rc ) {
843
+ dev_dbg (& cxlr -> dev , "%s:%s: invalid interleave: %d\n" ,
844
+ dev_name (port -> uport ), dev_name (& port -> dev ),
845
+ 256 << eig );
846
+ return rc ;
847
+ }
848
+
849
+ cxld -> interleave_ways = iw ;
850
+ cxld -> interleave_granularity = ig ;
851
+ dev_dbg (& cxlr -> dev , "%s:%s iw: %d ig: %d\n" , dev_name (port -> uport ),
852
+ dev_name (& port -> dev ), iw , ig );
853
+ add_target :
854
+ if (cxl_rr -> nr_targets_set == cxl_rr -> nr_targets ) {
855
+ dev_dbg (& cxlr -> dev ,
856
+ "%s:%s: targets full trying to add %s:%s at %d\n" ,
857
+ dev_name (port -> uport ), dev_name (& port -> dev ),
858
+ dev_name (& cxlmd -> dev ), dev_name (& cxled -> cxld .dev ), pos );
859
+ return - ENXIO ;
860
+ }
861
+ cxlsd -> target [cxl_rr -> nr_targets_set ] = ep -> dport ;
862
+ inc = 1 ;
863
+ out_target_set :
864
+ cxl_rr -> nr_targets_set += inc ;
865
+ dev_dbg (& cxlr -> dev , "%s:%s target[%d] = %s for %s:%s @ %d\n" ,
866
+ dev_name (port -> uport ), dev_name (& port -> dev ),
867
+ cxl_rr -> nr_targets_set - 1 , dev_name (ep -> dport -> dport ),
868
+ dev_name (& cxlmd -> dev ), dev_name (& cxled -> cxld .dev ), pos );
869
+
870
+ return 0 ;
871
+ }
872
+
873
+ static void cxl_port_reset_targets (struct cxl_port * port ,
874
+ struct cxl_region * cxlr )
875
+ {
876
+ struct cxl_region_ref * cxl_rr = cxl_rr_load (port , cxlr );
877
+
878
+ /*
879
+ * After the last endpoint has been detached the entire cxl_rr may now
880
+ * be gone.
881
+ */
882
+ if (cxl_rr )
883
+ cxl_rr -> nr_targets_set = 0 ;
884
+ }
885
+
886
+ static void cxl_region_teardown_targets (struct cxl_region * cxlr )
887
+ {
888
+ struct cxl_region_params * p = & cxlr -> params ;
889
+ struct cxl_endpoint_decoder * cxled ;
890
+ struct cxl_memdev * cxlmd ;
891
+ struct cxl_port * iter ;
892
+ struct cxl_ep * ep ;
893
+ int i ;
894
+
895
+ for (i = 0 ; i < p -> nr_targets ; i ++ ) {
896
+ cxled = p -> targets [i ];
897
+ cxlmd = cxled_to_memdev (cxled );
898
+
899
+ iter = cxled_to_port (cxled );
900
+ while (!is_cxl_root (to_cxl_port (iter -> dev .parent )))
901
+ iter = to_cxl_port (iter -> dev .parent );
902
+
903
+ for (ep = cxl_ep_load (iter , cxlmd ); iter ;
904
+ iter = ep -> next , ep = cxl_ep_load (iter , cxlmd ))
905
+ cxl_port_reset_targets (iter , cxlr );
906
+ }
907
+ }
908
+
909
+ static int cxl_region_setup_targets (struct cxl_region * cxlr )
910
+ {
911
+ struct cxl_region_params * p = & cxlr -> params ;
912
+ struct cxl_endpoint_decoder * cxled ;
913
+ struct cxl_memdev * cxlmd ;
914
+ struct cxl_port * iter ;
915
+ struct cxl_ep * ep ;
916
+ int i , rc ;
917
+
918
+ for (i = 0 ; i < p -> nr_targets ; i ++ ) {
919
+ cxled = p -> targets [i ];
920
+ cxlmd = cxled_to_memdev (cxled );
921
+
922
+ iter = cxled_to_port (cxled );
923
+ while (!is_cxl_root (to_cxl_port (iter -> dev .parent )))
924
+ iter = to_cxl_port (iter -> dev .parent );
925
+
926
+ /*
927
+ * Descend the topology tree programming targets while
928
+ * looking for conflicts.
929
+ */
930
+ for (ep = cxl_ep_load (iter , cxlmd ); iter ;
931
+ iter = ep -> next , ep = cxl_ep_load (iter , cxlmd )) {
932
+ rc = cxl_port_setup_targets (iter , cxlr , cxled );
933
+ if (rc ) {
934
+ cxl_region_teardown_targets (cxlr );
935
+ return rc ;
936
+ }
937
+ }
938
+ }
939
+
940
+ return 0 ;
941
+ }
942
+
705
943
static int cxl_region_attach (struct cxl_region * cxlr ,
706
944
struct cxl_endpoint_decoder * cxled , int pos )
707
945
{
@@ -814,8 +1052,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
814
1052
cxled -> pos = pos ;
815
1053
p -> nr_targets ++ ;
816
1054
817
- if (p -> nr_targets == p -> interleave_ways )
1055
+ if (p -> nr_targets == p -> interleave_ways ) {
1056
+ rc = cxl_region_setup_targets (cxlr );
1057
+ if (rc )
1058
+ goto err ;
818
1059
p -> state = CXL_CONFIG_ACTIVE ;
1060
+ }
819
1061
820
1062
return 0 ;
821
1063
@@ -854,8 +1096,10 @@ static void cxl_region_detach(struct cxl_endpoint_decoder *cxled)
854
1096
goto out ;
855
1097
}
856
1098
857
- if (p -> state == CXL_CONFIG_ACTIVE )
1099
+ if (p -> state == CXL_CONFIG_ACTIVE ) {
858
1100
p -> state = CXL_CONFIG_INTERLEAVE_ACTIVE ;
1101
+ cxl_region_teardown_targets (cxlr );
1102
+ }
859
1103
p -> targets [cxled -> pos ] = NULL ;
860
1104
p -> nr_targets -- ;
861
1105
0 commit comments