@@ -167,14 +167,7 @@ static void device_rbtree_remove(struct device_domain_info *info)
167
167
spin_unlock_irqrestore (& iommu -> device_rbtree_lock , flags );
168
168
}
169
169
170
- /*
171
- * This domain is a statically identity mapping domain.
172
- * 1. This domain creats a static 1:1 mapping to all usable memory.
173
- * 2. It maps to each iommu if successful.
174
- * 3. Each iommu mapps to this domain if successful.
175
- */
176
170
static struct dmar_domain * si_domain ;
177
- static int hw_pass_through = 1 ;
178
171
179
172
struct dmar_rmrr_unit {
180
173
struct list_head list ; /* list of rmrr units */
@@ -1647,7 +1640,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1647
1640
struct context_entry * context ;
1648
1641
int agaw , ret ;
1649
1642
1650
- if (hw_pass_through && domain_type_is_si (domain ))
1643
+ if (domain_type_is_si (domain ))
1651
1644
translation = CONTEXT_TT_PASS_THROUGH ;
1652
1645
1653
1646
pr_debug ("Set context mapping for %02x:%02x.%d\n" ,
@@ -1998,29 +1991,10 @@ static bool dev_is_real_dma_subdevice(struct device *dev)
1998
1991
pci_real_dma_dev (to_pci_dev (dev )) != to_pci_dev (dev );
1999
1992
}
2000
1993
2001
- static int iommu_domain_identity_map (struct dmar_domain * domain ,
2002
- unsigned long first_vpfn ,
2003
- unsigned long last_vpfn )
2004
- {
2005
- /*
2006
- * RMRR range might have overlap with physical memory range,
2007
- * clear it first
2008
- */
2009
- dma_pte_clear_range (domain , first_vpfn , last_vpfn );
2010
-
2011
- return __domain_mapping (domain , first_vpfn ,
2012
- first_vpfn , last_vpfn - first_vpfn + 1 ,
2013
- DMA_PTE_READ |DMA_PTE_WRITE , GFP_KERNEL );
2014
- }
2015
-
2016
1994
static int md_domain_init (struct dmar_domain * domain , int guest_width );
2017
1995
2018
- static int __init si_domain_init (int hw )
1996
+ static int __init si_domain_init (void )
2019
1997
{
2020
- struct dmar_rmrr_unit * rmrr ;
2021
- struct device * dev ;
2022
- int i , nid , ret ;
2023
-
2024
1998
si_domain = alloc_domain (IOMMU_DOMAIN_IDENTITY );
2025
1999
if (!si_domain )
2026
2000
return - EFAULT ;
@@ -2031,44 +2005,6 @@ static int __init si_domain_init(int hw)
2031
2005
return - EFAULT ;
2032
2006
}
2033
2007
2034
- if (hw )
2035
- return 0 ;
2036
-
2037
- for_each_online_node (nid ) {
2038
- unsigned long start_pfn , end_pfn ;
2039
- int i ;
2040
-
2041
- for_each_mem_pfn_range (i , nid , & start_pfn , & end_pfn , NULL ) {
2042
- ret = iommu_domain_identity_map (si_domain ,
2043
- mm_to_dma_pfn_start (start_pfn ),
2044
- mm_to_dma_pfn_end (end_pfn - 1 ));
2045
- if (ret )
2046
- return ret ;
2047
- }
2048
- }
2049
-
2050
- /*
2051
- * Identity map the RMRRs so that devices with RMRRs could also use
2052
- * the si_domain.
2053
- */
2054
- for_each_rmrr_units (rmrr ) {
2055
- for_each_active_dev_scope (rmrr -> devices , rmrr -> devices_cnt ,
2056
- i , dev ) {
2057
- unsigned long long start = rmrr -> base_address ;
2058
- unsigned long long end = rmrr -> end_address ;
2059
-
2060
- if (WARN_ON (end < start ||
2061
- end >> agaw_to_width (si_domain -> agaw )))
2062
- continue ;
2063
-
2064
- ret = iommu_domain_identity_map (si_domain ,
2065
- mm_to_dma_pfn_start (start >> PAGE_SHIFT ),
2066
- mm_to_dma_pfn_end (end >> PAGE_SHIFT ));
2067
- if (ret )
2068
- return ret ;
2069
- }
2070
- }
2071
-
2072
2008
return 0 ;
2073
2009
}
2074
2010
@@ -2094,7 +2030,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
2094
2030
2095
2031
if (!sm_supported (iommu ))
2096
2032
ret = domain_context_mapping (domain , dev );
2097
- else if (hw_pass_through && domain_type_is_si (domain ))
2033
+ else if (domain_type_is_si (domain ))
2098
2034
ret = intel_pasid_setup_pass_through (iommu , dev , IOMMU_NO_PASID );
2099
2035
else if (domain -> use_first_level )
2100
2036
ret = domain_setup_first_level (iommu , domain , dev , IOMMU_NO_PASID );
@@ -2449,8 +2385,6 @@ static int __init init_dmars(void)
2449
2385
}
2450
2386
}
2451
2387
2452
- if (!ecap_pass_through (iommu -> ecap ))
2453
- hw_pass_through = 0 ;
2454
2388
intel_svm_check (iommu );
2455
2389
}
2456
2390
@@ -2466,7 +2400,7 @@ static int __init init_dmars(void)
2466
2400
2467
2401
check_tylersburg_isoch ();
2468
2402
2469
- ret = si_domain_init (hw_pass_through );
2403
+ ret = si_domain_init ();
2470
2404
if (ret )
2471
2405
goto free_iommu ;
2472
2406
@@ -2893,12 +2827,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
2893
2827
if (ret )
2894
2828
goto out ;
2895
2829
2896
- if (hw_pass_through && !ecap_pass_through (iommu -> ecap )) {
2897
- pr_warn ("%s: Doesn't support hardware pass through.\n" ,
2898
- iommu -> name );
2899
- return - ENXIO ;
2900
- }
2901
-
2902
2830
sp = domain_update_iommu_superpage (NULL , iommu ) - 1 ;
2903
2831
if (sp >= 0 && !(cap_super_page_val (iommu -> cap ) & (1 << sp ))) {
2904
2832
pr_warn ("%s: Doesn't support large page.\n" ,
@@ -3149,43 +3077,6 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3149
3077
return 0 ;
3150
3078
}
3151
3079
3152
- static int intel_iommu_memory_notifier (struct notifier_block * nb ,
3153
- unsigned long val , void * v )
3154
- {
3155
- struct memory_notify * mhp = v ;
3156
- unsigned long start_vpfn = mm_to_dma_pfn_start (mhp -> start_pfn );
3157
- unsigned long last_vpfn = mm_to_dma_pfn_end (mhp -> start_pfn +
3158
- mhp -> nr_pages - 1 );
3159
-
3160
- switch (val ) {
3161
- case MEM_GOING_ONLINE :
3162
- if (iommu_domain_identity_map (si_domain ,
3163
- start_vpfn , last_vpfn )) {
3164
- pr_warn ("Failed to build identity map for [%lx-%lx]\n" ,
3165
- start_vpfn , last_vpfn );
3166
- return NOTIFY_BAD ;
3167
- }
3168
- break ;
3169
-
3170
- case MEM_OFFLINE :
3171
- case MEM_CANCEL_ONLINE :
3172
- {
3173
- LIST_HEAD (freelist );
3174
-
3175
- domain_unmap (si_domain , start_vpfn , last_vpfn , & freelist );
3176
- iommu_put_pages_list (& freelist );
3177
- }
3178
- break ;
3179
- }
3180
-
3181
- return NOTIFY_OK ;
3182
- }
3183
-
3184
- static struct notifier_block intel_iommu_memory_nb = {
3185
- .notifier_call = intel_iommu_memory_notifier ,
3186
- .priority = 0
3187
- };
3188
-
3189
3080
static void intel_disable_iommus (void )
3190
3081
{
3191
3082
struct intel_iommu * iommu = NULL ;
@@ -3482,12 +3373,7 @@ int __init intel_iommu_init(void)
3482
3373
3483
3374
iommu_pmu_register (iommu );
3484
3375
}
3485
- up_read (& dmar_global_lock );
3486
-
3487
- if (si_domain && !hw_pass_through )
3488
- register_memory_notifier (& intel_iommu_memory_nb );
3489
3376
3490
- down_read (& dmar_global_lock );
3491
3377
if (probe_acpi_namespace_devices ())
3492
3378
pr_warn ("ACPI name space devices didn't probe correctly\n" );
3493
3379
0 commit comments