@@ -722,19 +722,24 @@ struct flush_icache_range_args {
722
722
unsigned long start ;
723
723
unsigned long end ;
724
724
unsigned int type ;
725
+ bool user ;
725
726
};
726
727
727
728
static inline void __local_r4k_flush_icache_range (unsigned long start ,
728
729
unsigned long end ,
729
- unsigned int type )
730
+ unsigned int type ,
731
+ bool user )
730
732
{
731
733
if (!cpu_has_ic_fills_f_dc ) {
732
734
if (type == R4K_INDEX ||
733
735
(type & R4K_INDEX && end - start >= dcache_size )) {
734
736
r4k_blast_dcache ();
735
737
} else {
736
738
R4600_HIT_CACHEOP_WAR_IMPL ;
737
- protected_blast_dcache_range (start , end );
739
+ if (user )
740
+ protected_blast_dcache_range (start , end );
741
+ else
742
+ blast_dcache_range (start , end );
738
743
}
739
744
}
740
745
@@ -748,7 +753,10 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
748
753
break ;
749
754
750
755
default :
751
- protected_blast_icache_range (start , end );
756
+ if (user )
757
+ protected_blast_icache_range (start , end );
758
+ else
759
+ blast_icache_range (start , end );
752
760
break ;
753
761
}
754
762
}
@@ -757,7 +765,13 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
757
765
static inline void local_r4k_flush_icache_range (unsigned long start ,
758
766
unsigned long end )
759
767
{
760
- __local_r4k_flush_icache_range (start , end , R4K_HIT | R4K_INDEX );
768
+ __local_r4k_flush_icache_range (start , end , R4K_HIT | R4K_INDEX , false);
769
+ }
770
+
771
+ static inline void local_r4k_flush_icache_user_range (unsigned long start ,
772
+ unsigned long end )
773
+ {
774
+ __local_r4k_flush_icache_range (start , end , R4K_HIT | R4K_INDEX , true);
761
775
}
762
776
763
777
static inline void local_r4k_flush_icache_range_ipi (void * args )
@@ -766,18 +780,21 @@ static inline void local_r4k_flush_icache_range_ipi(void *args)
766
780
unsigned long start = fir_args -> start ;
767
781
unsigned long end = fir_args -> end ;
768
782
unsigned int type = fir_args -> type ;
783
+ bool user = fir_args -> user ;
769
784
770
- __local_r4k_flush_icache_range (start , end , type );
785
+ __local_r4k_flush_icache_range (start , end , type , user );
771
786
}
772
787
773
- static void r4k_flush_icache_range (unsigned long start , unsigned long end )
788
+ static void __r4k_flush_icache_range (unsigned long start , unsigned long end ,
789
+ bool user )
774
790
{
775
791
struct flush_icache_range_args args ;
776
792
unsigned long size , cache_size ;
777
793
778
794
args .start = start ;
779
795
args .end = end ;
780
796
args .type = R4K_HIT | R4K_INDEX ;
797
+ args .user = user ;
781
798
782
799
/*
783
800
* Indexed cache ops require an SMP call.
@@ -803,6 +820,16 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
803
820
instruction_hazard ();
804
821
}
805
822
823
+ static void r4k_flush_icache_range (unsigned long start , unsigned long end )
824
+ {
825
+ return __r4k_flush_icache_range (start , end , false);
826
+ }
827
+
828
+ static void r4k_flush_icache_user_range (unsigned long start , unsigned long end )
829
+ {
830
+ return __r4k_flush_icache_range (start , end , true);
831
+ }
832
+
806
833
#if defined(CONFIG_DMA_NONCOHERENT ) || defined(CONFIG_DMA_MAYBE_COHERENT )
807
834
808
835
static void r4k_dma_cache_wback_inv (unsigned long addr , unsigned long size )
@@ -1904,8 +1931,8 @@ void r4k_cache_init(void)
1904
1931
flush_data_cache_page = r4k_flush_data_cache_page ;
1905
1932
flush_icache_range = r4k_flush_icache_range ;
1906
1933
local_flush_icache_range = local_r4k_flush_icache_range ;
1907
- __flush_icache_user_range = r4k_flush_icache_range ;
1908
- __local_flush_icache_user_range = local_r4k_flush_icache_range ;
1934
+ __flush_icache_user_range = r4k_flush_icache_user_range ;
1935
+ __local_flush_icache_user_range = local_r4k_flush_icache_user_range ;
1909
1936
1910
1937
#if defined(CONFIG_DMA_NONCOHERENT ) || defined(CONFIG_DMA_MAYBE_COHERENT )
1911
1938
if (coherentio ) {
0 commit comments