@@ -182,32 +182,11 @@ static int force_on = 0;
182
182
* 64-127: Reserved
183
183
*/
184
184
struct root_entry {
185
- u64 val ;
186
- u64 rsvd1 ;
185
+ u64 lo ;
186
+ u64 hi ;
187
187
};
188
188
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189
- static inline bool root_present (struct root_entry * root )
190
- {
191
- return (root -> val & 1 );
192
- }
193
- static inline void set_root_present (struct root_entry * root )
194
- {
195
- root -> val |= 1 ;
196
- }
197
- static inline void set_root_value (struct root_entry * root , unsigned long value )
198
- {
199
- root -> val &= ~VTD_PAGE_MASK ;
200
- root -> val |= value & VTD_PAGE_MASK ;
201
- }
202
189
203
- static inline struct context_entry *
204
- get_context_addr_from_root (struct root_entry * root )
205
- {
206
- return (struct context_entry * )
207
- (root_present (root )?phys_to_virt (
208
- root -> val & VTD_PAGE_MASK ) :
209
- NULL );
210
- }
211
190
212
191
/*
213
192
* low 64 bits:
@@ -681,6 +660,40 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
681
660
domain -> iommu_superpage = domain_update_iommu_superpage (NULL );
682
661
}
683
662
663
+ static inline struct context_entry * iommu_context_addr (struct intel_iommu * iommu ,
664
+ u8 bus , u8 devfn , int alloc )
665
+ {
666
+ struct root_entry * root = & iommu -> root_entry [bus ];
667
+ struct context_entry * context ;
668
+ u64 * entry ;
669
+
670
+ if (ecap_ecs (iommu -> ecap )) {
671
+ if (devfn >= 0x80 ) {
672
+ devfn -= 0x80 ;
673
+ entry = & root -> hi ;
674
+ }
675
+ devfn *= 2 ;
676
+ }
677
+ entry = & root -> lo ;
678
+ if (* entry & 1 )
679
+ context = phys_to_virt (* entry & VTD_PAGE_MASK );
680
+ else {
681
+ unsigned long phy_addr ;
682
+ if (!alloc )
683
+ return NULL ;
684
+
685
+ context = alloc_pgtable_page (iommu -> node );
686
+ if (!context )
687
+ return NULL ;
688
+
689
+ __iommu_flush_cache (iommu , (void * )context , CONTEXT_SIZE );
690
+ phy_addr = virt_to_phys ((void * )context );
691
+ * entry = phy_addr | 1 ;
692
+ __iommu_flush_cache (iommu , entry , sizeof (* entry ));
693
+ }
694
+ return & context [devfn ];
695
+ }
696
+
684
697
static struct intel_iommu * device_to_iommu (struct device * dev , u8 * bus , u8 * devfn )
685
698
{
686
699
struct dmar_drhd_unit * drhd = NULL ;
@@ -740,75 +753,36 @@ static void domain_flush_cache(struct dmar_domain *domain,
740
753
clflush_cache_range (addr , size );
741
754
}
742
755
743
- /* Gets context entry for a given bus and devfn */
744
- static struct context_entry * device_to_context_entry (struct intel_iommu * iommu ,
745
- u8 bus , u8 devfn )
746
- {
747
- struct root_entry * root ;
748
- struct context_entry * context ;
749
- unsigned long phy_addr ;
750
- unsigned long flags ;
751
-
752
- spin_lock_irqsave (& iommu -> lock , flags );
753
- root = & iommu -> root_entry [bus ];
754
- context = get_context_addr_from_root (root );
755
- if (!context ) {
756
- context = (struct context_entry * )
757
- alloc_pgtable_page (iommu -> node );
758
- if (!context ) {
759
- spin_unlock_irqrestore (& iommu -> lock , flags );
760
- return NULL ;
761
- }
762
- __iommu_flush_cache (iommu , (void * )context , CONTEXT_SIZE );
763
- phy_addr = virt_to_phys ((void * )context );
764
- set_root_value (root , phy_addr );
765
- set_root_present (root );
766
- __iommu_flush_cache (iommu , root , sizeof (* root ));
767
- }
768
- spin_unlock_irqrestore (& iommu -> lock , flags );
769
- return & context [devfn ];
770
- }
771
-
772
756
static int device_context_mapped (struct intel_iommu * iommu , u8 bus , u8 devfn )
773
757
{
774
- struct root_entry * root ;
775
758
struct context_entry * context ;
776
- int ret ;
759
+ int ret = 0 ;
777
760
unsigned long flags ;
778
761
779
762
spin_lock_irqsave (& iommu -> lock , flags );
780
- root = & iommu -> root_entry [bus ];
781
- context = get_context_addr_from_root (root );
782
- if (!context ) {
783
- ret = 0 ;
784
- goto out ;
785
- }
786
- ret = context_present (& context [devfn ]);
787
- out :
763
+ context = iommu_context_addr (iommu , bus , devfn , 0 );
764
+ if (context )
765
+ ret = context_present (context );
788
766
spin_unlock_irqrestore (& iommu -> lock , flags );
789
767
return ret ;
790
768
}
791
769
792
770
static void clear_context_table (struct intel_iommu * iommu , u8 bus , u8 devfn )
793
771
{
794
- struct root_entry * root ;
795
772
struct context_entry * context ;
796
773
unsigned long flags ;
797
774
798
775
spin_lock_irqsave (& iommu -> lock , flags );
799
- root = & iommu -> root_entry [bus ];
800
- context = get_context_addr_from_root (root );
776
+ context = iommu_context_addr (iommu , bus , devfn , 0 );
801
777
if (context ) {
802
- context_clear_entry (& context [devfn ]);
803
- __iommu_flush_cache (iommu , & context [devfn ], \
804
- sizeof (* context ));
778
+ context_clear_entry (context );
779
+ __iommu_flush_cache (iommu , context , sizeof (* context ));
805
780
}
806
781
spin_unlock_irqrestore (& iommu -> lock , flags );
807
782
}
808
783
809
784
static void free_context_table (struct intel_iommu * iommu )
810
785
{
811
- struct root_entry * root ;
812
786
int i ;
813
787
unsigned long flags ;
814
788
struct context_entry * context ;
@@ -818,10 +792,17 @@ static void free_context_table(struct intel_iommu *iommu)
818
792
goto out ;
819
793
}
820
794
for (i = 0 ; i < ROOT_ENTRY_NR ; i ++ ) {
821
- root = & iommu -> root_entry [i ];
822
- context = get_context_addr_from_root (root );
795
+ context = iommu_context_addr (iommu , i , 0 , 0 );
796
+ if (context )
797
+ free_pgtable_page (context );
798
+
799
+ if (!ecap_ecs (iommu -> ecap ))
800
+ continue ;
801
+
802
+ context = iommu_context_addr (iommu , i , 0x80 , 0 );
823
803
if (context )
824
804
free_pgtable_page (context );
805
+
825
806
}
826
807
free_pgtable_page (iommu -> root_entry );
827
808
iommu -> root_entry = NULL ;
@@ -1145,14 +1126,16 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1145
1126
1146
1127
static void iommu_set_root_entry (struct intel_iommu * iommu )
1147
1128
{
1148
- void * addr ;
1129
+ u64 addr ;
1149
1130
u32 sts ;
1150
1131
unsigned long flag ;
1151
1132
1152
- addr = iommu -> root_entry ;
1133
+ addr = virt_to_phys (iommu -> root_entry );
1134
+ if (ecap_ecs (iommu -> ecap ))
1135
+ addr |= DMA_RTADDR_RTT ;
1153
1136
1154
1137
raw_spin_lock_irqsave (& iommu -> register_lock , flag );
1155
- dmar_writeq (iommu -> reg + DMAR_RTADDR_REG , virt_to_phys ( addr ) );
1138
+ dmar_writeq (iommu -> reg + DMAR_RTADDR_REG , addr );
1156
1139
1157
1140
writel (iommu -> gcmd | DMA_GCMD_SRTP , iommu -> reg + DMAR_GCMD_REG );
1158
1141
@@ -1798,7 +1781,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1798
1781
BUG_ON (translation != CONTEXT_TT_PASS_THROUGH &&
1799
1782
translation != CONTEXT_TT_MULTI_LEVEL );
1800
1783
1801
- context = device_to_context_entry (iommu , bus , devfn );
1784
+ spin_lock_irqsave (& iommu -> lock , flags );
1785
+ context = iommu_context_addr (iommu , bus , devfn , 1 );
1786
+ spin_unlock_irqrestore (& iommu -> lock , flags );
1802
1787
if (!context )
1803
1788
return - ENOMEM ;
1804
1789
spin_lock_irqsave (& iommu -> lock , flags );
0 commit comments