@@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu,
350
350
struct device * dev );
351
351
static int domain_detach_iommu (struct dmar_domain * domain ,
352
352
struct intel_iommu * iommu );
353
+ static bool device_is_rmrr_locked (struct device * dev );
353
354
354
355
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
355
356
int dmar_disabled = 0 ;
@@ -2808,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
2808
2809
2809
2810
static int __init si_domain_init (int hw )
2810
2811
{
2811
- int nid , ret ;
2812
+ struct dmar_rmrr_unit * rmrr ;
2813
+ struct device * dev ;
2814
+ int i , nid , ret ;
2812
2815
2813
2816
si_domain = alloc_domain (DOMAIN_FLAG_STATIC_IDENTITY );
2814
2817
if (!si_domain )
@@ -2819,8 +2822,6 @@ static int __init si_domain_init(int hw)
2819
2822
return - EFAULT ;
2820
2823
}
2821
2824
2822
- pr_debug ("Identity mapping domain allocated\n" );
2823
-
2824
2825
if (hw )
2825
2826
return 0 ;
2826
2827
@@ -2836,16 +2837,38 @@ static int __init si_domain_init(int hw)
2836
2837
}
2837
2838
}
2838
2839
2840
+ /*
2841
+ * Normally we use DMA domains for devices which have RMRRs. But we
2842
+ * loose this requirement for graphic and usb devices. Identity map
2843
+ * the RMRRs for graphic and USB devices so that they could use the
2844
+ * si_domain.
2845
+ */
2846
+ for_each_rmrr_units (rmrr ) {
2847
+ for_each_active_dev_scope (rmrr -> devices , rmrr -> devices_cnt ,
2848
+ i , dev ) {
2849
+ unsigned long long start = rmrr -> base_address ;
2850
+ unsigned long long end = rmrr -> end_address ;
2851
+
2852
+ if (device_is_rmrr_locked (dev ))
2853
+ continue ;
2854
+
2855
+ if (WARN_ON (end < start ||
2856
+ end >> agaw_to_width (si_domain -> agaw )))
2857
+ continue ;
2858
+
2859
+ ret = iommu_domain_identity_map (si_domain , start , end );
2860
+ if (ret )
2861
+ return ret ;
2862
+ }
2863
+ }
2864
+
2839
2865
return 0 ;
2840
2866
}
2841
2867
2842
2868
static int identity_mapping (struct device * dev )
2843
2869
{
2844
2870
struct device_domain_info * info ;
2845
2871
2846
- if (likely (!iommu_identity_mapping ))
2847
- return 0 ;
2848
-
2849
2872
info = dev -> archdata .iommu ;
2850
2873
if (info && info != DUMMY_DEVICE_DOMAIN_INFO )
2851
2874
return (info -> domain == si_domain );
@@ -3431,11 +3454,9 @@ static int __init init_dmars(void)
3431
3454
3432
3455
check_tylersburg_isoch ();
3433
3456
3434
- if (iommu_identity_mapping ) {
3435
- ret = si_domain_init (hw_pass_through );
3436
- if (ret )
3437
- goto free_iommu ;
3438
- }
3457
+ ret = si_domain_init (hw_pass_through );
3458
+ if (ret )
3459
+ goto free_iommu ;
3439
3460
3440
3461
3441
3462
/*
@@ -3628,9 +3649,6 @@ static bool iommu_need_mapping(struct device *dev)
3628
3649
if (iommu_dummy (dev ))
3629
3650
return false;
3630
3651
3631
- if (!iommu_identity_mapping )
3632
- return true;
3633
-
3634
3652
found = identity_mapping (dev );
3635
3653
if (found ) {
3636
3654
if (iommu_should_identity_map (dev , 0 ))
@@ -5051,32 +5069,40 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5051
5069
struct dmar_domain * dmar_domain ;
5052
5070
struct iommu_domain * domain ;
5053
5071
5054
- if (type != IOMMU_DOMAIN_UNMANAGED )
5055
- return NULL ;
5072
+ switch (type ) {
5073
+ case IOMMU_DOMAIN_UNMANAGED :
5074
+ dmar_domain = alloc_domain (DOMAIN_FLAG_VIRTUAL_MACHINE );
5075
+ if (!dmar_domain ) {
5076
+ pr_err ("Can't allocate dmar_domain\n" );
5077
+ return NULL ;
5078
+ }
5079
+ if (md_domain_init (dmar_domain , DEFAULT_DOMAIN_ADDRESS_WIDTH )) {
5080
+ pr_err ("Domain initialization failed\n" );
5081
+ domain_exit (dmar_domain );
5082
+ return NULL ;
5083
+ }
5084
+ domain_update_iommu_cap (dmar_domain );
5056
5085
5057
- dmar_domain = alloc_domain (DOMAIN_FLAG_VIRTUAL_MACHINE );
5058
- if (!dmar_domain ) {
5059
- pr_err ("Can't allocate dmar_domain\n" );
5060
- return NULL ;
5061
- }
5062
- if (md_domain_init (dmar_domain , DEFAULT_DOMAIN_ADDRESS_WIDTH )) {
5063
- pr_err ("Domain initialization failed\n" );
5064
- domain_exit (dmar_domain );
5086
+ domain = & dmar_domain -> domain ;
5087
+ domain -> geometry .aperture_start = 0 ;
5088
+ domain -> geometry .aperture_end =
5089
+ __DOMAIN_MAX_ADDR (dmar_domain -> gaw );
5090
+ domain -> geometry .force_aperture = true;
5091
+
5092
+ return domain ;
5093
+ case IOMMU_DOMAIN_IDENTITY :
5094
+ return & si_domain -> domain ;
5095
+ default :
5065
5096
return NULL ;
5066
5097
}
5067
- domain_update_iommu_cap (dmar_domain );
5068
-
5069
- domain = & dmar_domain -> domain ;
5070
- domain -> geometry .aperture_start = 0 ;
5071
- domain -> geometry .aperture_end = __DOMAIN_MAX_ADDR (dmar_domain -> gaw );
5072
- domain -> geometry .force_aperture = true;
5073
5098
5074
- return domain ;
5099
+ return NULL ;
5075
5100
}
5076
5101
5077
5102
static void intel_iommu_domain_free (struct iommu_domain * domain )
5078
5103
{
5079
- domain_exit (to_dmar_domain (domain ));
5104
+ if (domain != & si_domain -> domain )
5105
+ domain_exit (to_dmar_domain (domain ));
5080
5106
}
5081
5107
5082
5108
/*
0 commit comments