@@ -438,6 +438,11 @@ static unsigned long _read_level(struct dev_pm_opp *opp, int index)
438
438
return opp -> level ;
439
439
}
440
440
441
+ static unsigned long _read_bw (struct dev_pm_opp * opp , int index )
442
+ {
443
+ return opp -> bandwidth [index ].peak ;
444
+ }
445
+
441
446
/* Generic comparison helpers */
442
447
static bool _compare_exact (struct dev_pm_opp * * opp , struct dev_pm_opp * temp_opp ,
443
448
unsigned long opp_key , unsigned long key )
@@ -711,42 +716,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
711
716
* The callers are required to call dev_pm_opp_put() for the returned OPP after
712
717
* use.
713
718
*/
714
- struct dev_pm_opp * dev_pm_opp_find_bw_ceil (struct device * dev ,
715
- unsigned int * bw , int index )
719
+ struct dev_pm_opp * dev_pm_opp_find_bw_ceil (struct device * dev , unsigned int * bw ,
720
+ int index )
716
721
{
717
- struct opp_table * opp_table ;
718
- struct dev_pm_opp * temp_opp , * opp = ERR_PTR (- ERANGE );
719
-
720
- if (!dev || !bw ) {
721
- dev_err (dev , "%s: Invalid argument bw=%p\n" , __func__ , bw );
722
- return ERR_PTR (- EINVAL );
723
- }
724
-
725
- opp_table = _find_opp_table (dev );
726
- if (IS_ERR (opp_table ))
727
- return ERR_CAST (opp_table );
728
-
729
- if (index >= opp_table -> path_count )
730
- return ERR_PTR (- EINVAL );
731
-
732
- mutex_lock (& opp_table -> lock );
733
-
734
- list_for_each_entry (temp_opp , & opp_table -> opp_list , node ) {
735
- if (temp_opp -> available && temp_opp -> bandwidth ) {
736
- if (temp_opp -> bandwidth [index ].peak >= * bw ) {
737
- opp = temp_opp ;
738
- * bw = opp -> bandwidth [index ].peak ;
739
-
740
- /* Increment the reference count of OPP */
741
- dev_pm_opp_get (opp );
742
- break ;
743
- }
744
- }
745
- }
746
-
747
- mutex_unlock (& opp_table -> lock );
748
- dev_pm_opp_put_opp_table (opp_table );
722
+ unsigned long temp = * bw ;
723
+ struct dev_pm_opp * opp ;
749
724
725
+ opp = _find_key_ceil (dev , & temp , index , true, _read_bw );
726
+ * bw = temp ;
750
727
return opp ;
751
728
}
752
729
EXPORT_SYMBOL_GPL (dev_pm_opp_find_bw_ceil );
@@ -773,41 +750,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
773
750
struct dev_pm_opp * dev_pm_opp_find_bw_floor (struct device * dev ,
774
751
unsigned int * bw , int index )
775
752
{
776
- struct opp_table * opp_table ;
777
- struct dev_pm_opp * temp_opp , * opp = ERR_PTR (- ERANGE );
778
-
779
- if (!dev || !bw ) {
780
- dev_err (dev , "%s: Invalid argument bw=%p\n" , __func__ , bw );
781
- return ERR_PTR (- EINVAL );
782
- }
783
-
784
- opp_table = _find_opp_table (dev );
785
- if (IS_ERR (opp_table ))
786
- return ERR_CAST (opp_table );
787
-
788
- if (index >= opp_table -> path_count )
789
- return ERR_PTR (- EINVAL );
790
-
791
- mutex_lock (& opp_table -> lock );
792
-
793
- list_for_each_entry (temp_opp , & opp_table -> opp_list , node ) {
794
- if (temp_opp -> available && temp_opp -> bandwidth ) {
795
- /* go to the next node, before choosing prev */
796
- if (temp_opp -> bandwidth [index ].peak > * bw )
797
- break ;
798
- opp = temp_opp ;
799
- }
800
- }
801
-
802
- /* Increment the reference count of OPP */
803
- if (!IS_ERR (opp ))
804
- dev_pm_opp_get (opp );
805
- mutex_unlock (& opp_table -> lock );
806
- dev_pm_opp_put_opp_table (opp_table );
807
-
808
- if (!IS_ERR (opp ))
809
- * bw = opp -> bandwidth [index ].peak ;
753
+ unsigned long temp = * bw ;
754
+ struct dev_pm_opp * opp ;
810
755
756
+ opp = _find_key_floor (dev , & temp , index , true, _read_bw );
757
+ * bw = temp ;
811
758
return opp ;
812
759
}
813
760
EXPORT_SYMBOL_GPL (dev_pm_opp_find_bw_floor );
0 commit comments