@@ -507,7 +507,6 @@ static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
507
507
static bool intel_cqm_rmid_stabilize (unsigned int * available )
508
508
{
509
509
struct cqm_rmid_entry * entry , * tmp ;
510
- struct perf_event * event ;
511
510
512
511
lockdep_assert_held (& cache_mutex );
513
512
@@ -577,19 +576,9 @@ static bool intel_cqm_rmid_stabilize(unsigned int *available)
577
576
578
577
/*
579
578
* If we have groups waiting for RMIDs, hand
580
- * them one now.
579
+ * them one now provided they don't conflict .
581
580
*/
582
- list_for_each_entry (event , & cache_groups ,
583
- hw .cqm_groups_entry ) {
584
- if (__rmid_valid (event -> hw .cqm_rmid ))
585
- continue ;
586
-
587
- intel_cqm_xchg_rmid (event , entry -> rmid );
588
- entry = NULL ;
589
- break ;
590
- }
591
-
592
- if (!entry )
581
+ if (intel_cqm_sched_in_event (entry -> rmid ))
593
582
continue ;
594
583
595
584
/*
@@ -604,25 +593,73 @@ static bool intel_cqm_rmid_stabilize(unsigned int *available)
604
593
605
594
/*
606
595
* Pick a victim group and move it to the tail of the group list.
596
+ * @next: The first group without an RMID
607
597
*/
608
- static struct perf_event *
609
- __intel_cqm_pick_and_rotate (void )
598
+ static void __intel_cqm_pick_and_rotate (struct perf_event * next )
610
599
{
611
600
struct perf_event * rotor ;
601
+ unsigned int rmid ;
612
602
613
603
lockdep_assert_held (& cache_mutex );
614
- lockdep_assert_held (& cache_lock );
615
604
616
605
rotor = list_first_entry (& cache_groups , struct perf_event ,
617
606
hw .cqm_groups_entry );
607
+
608
+ /*
609
+ * The group at the front of the list should always have a valid
610
+ * RMID. If it doesn't then no groups have RMIDs assigned and we
611
+ * don't need to rotate the list.
612
+ */
613
+ if (next == rotor )
614
+ return ;
615
+
616
+ rmid = intel_cqm_xchg_rmid (rotor , INVALID_RMID );
617
+ __put_rmid (rmid );
618
+
618
619
list_rotate_left (& cache_groups );
620
+ }
621
+
622
+ /*
623
+ * Deallocate the RMIDs from any events that conflict with @event, and
624
+ * place them on the back of the group list.
625
+ */
626
+ static void intel_cqm_sched_out_conflicting_events (struct perf_event * event )
627
+ {
628
+ struct perf_event * group , * g ;
629
+ unsigned int rmid ;
630
+
631
+ lockdep_assert_held (& cache_mutex );
632
+
633
+ list_for_each_entry_safe (group , g , & cache_groups , hw .cqm_groups_entry ) {
634
+ if (group == event )
635
+ continue ;
636
+
637
+ rmid = group -> hw .cqm_rmid ;
638
+
639
+ /*
640
+ * Skip events that don't have a valid RMID.
641
+ */
642
+ if (!__rmid_valid (rmid ))
643
+ continue ;
644
+
645
+ /*
646
+ * No conflict? No problem! Leave the event alone.
647
+ */
648
+ if (!__conflict_event (group , event ))
649
+ continue ;
619
650
620
- return rotor ;
651
+ intel_cqm_xchg_rmid (group , INVALID_RMID );
652
+ __put_rmid (rmid );
653
+ }
621
654
}
622
655
623
656
/*
624
657
* Attempt to rotate the groups and assign new RMIDs.
625
658
*
659
+ * We rotate for two reasons,
660
+ * 1. To handle the scheduling of conflicting events
661
+ * 2. To recycle RMIDs
662
+ *
626
663
* Rotating RMIDs is complicated because the hardware doesn't give us
627
664
* any clues.
628
665
*
@@ -642,11 +679,10 @@ __intel_cqm_pick_and_rotate(void)
642
679
*/
643
680
static bool __intel_cqm_rmid_rotate (void )
644
681
{
645
- struct perf_event * group , * rotor , * start = NULL ;
682
+ struct perf_event * group , * start = NULL ;
646
683
unsigned int threshold_limit ;
647
684
unsigned int nr_needed = 0 ;
648
685
unsigned int nr_available ;
649
- unsigned int rmid ;
650
686
bool rotated = false;
651
687
652
688
mutex_lock (& cache_mutex );
@@ -678,7 +714,9 @@ static bool __intel_cqm_rmid_rotate(void)
678
714
goto stabilize ;
679
715
680
716
/*
681
- * We have more event groups without RMIDs than available RMIDs.
717
+ * We have more event groups without RMIDs than available RMIDs,
718
+ * or we have event groups that conflict with the ones currently
719
+ * scheduled.
682
720
*
683
721
* We force deallocate the rmid of the group at the head of
684
722
* cache_groups. The first event group without an RMID then gets
@@ -688,30 +726,22 @@ static bool __intel_cqm_rmid_rotate(void)
688
726
* Rotate the cache_groups list so the previous head is now the
689
727
* tail.
690
728
*/
691
- rotor = __intel_cqm_pick_and_rotate ();
692
- rmid = intel_cqm_xchg_rmid (rotor , INVALID_RMID );
693
-
694
- /*
695
- * The group at the front of the list should always have a valid
696
- * RMID. If it doesn't then no groups have RMIDs assigned.
697
- */
698
- if (!__rmid_valid (rmid ))
699
- goto stabilize ;
729
+ __intel_cqm_pick_and_rotate (start );
700
730
701
731
/*
702
732
* If the rotation is going to succeed, reduce the threshold so
703
733
* that we don't needlessly reuse dirty RMIDs.
704
734
*/
705
735
if (__rmid_valid (intel_cqm_rotation_rmid )) {
706
736
intel_cqm_xchg_rmid (start , intel_cqm_rotation_rmid );
707
- intel_cqm_rotation_rmid = INVALID_RMID ;
737
+ intel_cqm_rotation_rmid = __get_rmid ();
738
+
739
+ intel_cqm_sched_out_conflicting_events (start );
708
740
709
741
if (__intel_cqm_threshold )
710
742
__intel_cqm_threshold -- ;
711
743
}
712
744
713
- __put_rmid (rmid );
714
-
715
745
rotated = true;
716
746
717
747
stabilize :
@@ -794,25 +824,37 @@ static void intel_cqm_rmid_rotate(struct work_struct *work)
794
824
*
795
825
* If we're part of a group, we use the group's RMID.
796
826
*/
797
- static int intel_cqm_setup_event (struct perf_event * event ,
798
- struct perf_event * * group )
827
+ static void intel_cqm_setup_event (struct perf_event * event ,
828
+ struct perf_event * * group )
799
829
{
800
830
struct perf_event * iter ;
831
+ unsigned int rmid ;
832
+ bool conflict = false;
801
833
802
834
list_for_each_entry (iter , & cache_groups , hw .cqm_groups_entry ) {
835
+ rmid = iter -> hw .cqm_rmid ;
836
+
803
837
if (__match_event (iter , event )) {
804
838
/* All tasks in a group share an RMID */
805
- event -> hw .cqm_rmid = iter -> hw . cqm_rmid ;
839
+ event -> hw .cqm_rmid = rmid ;
806
840
* group = iter ;
807
- return 0 ;
841
+ return ;
808
842
}
809
843
810
- if (__conflict_event (iter , event ))
811
- return - EBUSY ;
844
+ /*
845
+ * We only care about conflicts for events that are
846
+ * actually scheduled in (and hence have a valid RMID).
847
+ */
848
+ if (__conflict_event (iter , event ) && __rmid_valid (rmid ))
849
+ conflict = true;
812
850
}
813
851
814
- event -> hw .cqm_rmid = __get_rmid ();
815
- return 0 ;
852
+ if (conflict )
853
+ rmid = INVALID_RMID ;
854
+ else
855
+ rmid = __get_rmid ();
856
+
857
+ event -> hw .cqm_rmid = rmid ;
816
858
}
817
859
818
860
static void intel_cqm_event_read (struct perf_event * event )
@@ -1030,7 +1072,6 @@ static int intel_cqm_event_init(struct perf_event *event)
1030
1072
{
1031
1073
struct perf_event * group = NULL ;
1032
1074
bool rotate = false;
1033
- int err ;
1034
1075
1035
1076
if (event -> attr .type != intel_cqm_pmu .type )
1036
1077
return - ENOENT ;
@@ -1056,9 +1097,7 @@ static int intel_cqm_event_init(struct perf_event *event)
1056
1097
mutex_lock (& cache_mutex );
1057
1098
1058
1099
/* Will also set rmid */
1059
- err = intel_cqm_setup_event (event , & group );
1060
- if (err )
1061
- goto out ;
1100
+ intel_cqm_setup_event (event , & group );
1062
1101
1063
1102
if (group ) {
1064
1103
list_add_tail (& event -> hw .cqm_group_entry ,
@@ -1078,13 +1117,12 @@ static int intel_cqm_event_init(struct perf_event *event)
1078
1117
rotate = true;
1079
1118
}
1080
1119
1081
- out :
1082
1120
mutex_unlock (& cache_mutex );
1083
1121
1084
1122
if (rotate )
1085
1123
schedule_delayed_work (& intel_cqm_rmid_work , 0 );
1086
1124
1087
- return err ;
1125
+ return 0 ;
1088
1126
}
1089
1127
1090
1128
EVENT_ATTR_STR (llc_occupancy , intel_cqm_llc , "event=0x01" );
0 commit comments