@@ -302,14 +302,8 @@ static inline void context_clear_entry(struct context_entry *context)
302
302
static struct dmar_domain * si_domain ;
303
303
static int hw_pass_through = 1 ;
304
304
305
- /*
306
- * Domain represents a virtual machine, more than one devices
307
- * across iommus may be owned in one domain, e.g. kvm guest.
308
- */
309
- #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
310
-
311
305
/* si_domain contains mulitple devices */
312
- #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1 )
306
+ #define DOMAIN_FLAG_STATIC_IDENTITY BIT(0 )
313
307
314
308
#define for_each_domain_iommu (idx , domain ) \
315
309
for (idx = 0; idx < g_num_of_iommus; idx++) \
@@ -540,22 +534,11 @@ static inline void free_devinfo_mem(void *vaddr)
540
534
kmem_cache_free (iommu_devinfo_cache , vaddr );
541
535
}
542
536
543
- static inline int domain_type_is_vm (struct dmar_domain * domain )
544
- {
545
- return domain -> flags & DOMAIN_FLAG_VIRTUAL_MACHINE ;
546
- }
547
-
548
537
static inline int domain_type_is_si (struct dmar_domain * domain )
549
538
{
550
539
return domain -> flags & DOMAIN_FLAG_STATIC_IDENTITY ;
551
540
}
552
541
553
- static inline int domain_type_is_vm_or_si (struct dmar_domain * domain )
554
- {
555
- return domain -> flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
556
- DOMAIN_FLAG_STATIC_IDENTITY );
557
- }
558
-
559
542
static inline int domain_pfn_supported (struct dmar_domain * domain ,
560
543
unsigned long pfn )
561
544
{
@@ -603,7 +586,9 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
603
586
int iommu_id ;
604
587
605
588
/* si_domain and vm domain should not get here. */
606
- BUG_ON (domain_type_is_vm_or_si (domain ));
589
+ if (WARN_ON (domain -> domain .type != IOMMU_DOMAIN_DMA ))
590
+ return NULL ;
591
+
607
592
for_each_domain_iommu (iommu_id , domain )
608
593
break ;
609
594
@@ -1651,7 +1636,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1651
1636
if (!iommu -> domains || !iommu -> domain_ids )
1652
1637
return ;
1653
1638
1654
- again :
1655
1639
spin_lock_irqsave (& device_domain_lock , flags );
1656
1640
list_for_each_entry_safe (info , tmp , & device_domain_list , global ) {
1657
1641
struct dmar_domain * domain ;
@@ -1665,18 +1649,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1665
1649
domain = info -> domain ;
1666
1650
1667
1651
__dmar_remove_one_dev_info (info );
1668
-
1669
- if (!domain_type_is_vm_or_si (domain )) {
1670
- /*
1671
- * The domain_exit() function can't be called under
1672
- * device_domain_lock, as it takes this lock itself.
1673
- * So release the lock here and re-run the loop
1674
- * afterwards.
1675
- */
1676
- spin_unlock_irqrestore (& device_domain_lock , flags );
1677
- domain_exit (domain );
1678
- goto again ;
1679
- }
1680
1652
}
1681
1653
spin_unlock_irqrestore (& device_domain_lock , flags );
1682
1654
@@ -2339,26 +2311,16 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2339
2311
struct scatterlist * sg , unsigned long phys_pfn ,
2340
2312
unsigned long nr_pages , int prot )
2341
2313
{
2342
- int ret ;
2314
+ int iommu_id , ret ;
2343
2315
struct intel_iommu * iommu ;
2344
2316
2345
2317
/* Do the real mapping first */
2346
2318
ret = __domain_mapping (domain , iov_pfn , sg , phys_pfn , nr_pages , prot );
2347
2319
if (ret )
2348
2320
return ret ;
2349
2321
2350
- /* Notify about the new mapping */
2351
- if (domain_type_is_vm (domain )) {
2352
- /* VM typed domains can have more than one IOMMUs */
2353
- int iommu_id ;
2354
-
2355
- for_each_domain_iommu (iommu_id , domain ) {
2356
- iommu = g_iommus [iommu_id ];
2357
- __mapping_notify_one (iommu , domain , iov_pfn , nr_pages );
2358
- }
2359
- } else {
2360
- /* General domains only have one IOMMU */
2361
- iommu = domain_get_iommu (domain );
2322
+ for_each_domain_iommu (iommu_id , domain ) {
2323
+ iommu = g_iommus [iommu_id ];
2362
2324
__mapping_notify_one (iommu , domain , iov_pfn , nr_pages );
2363
2325
}
2364
2326
@@ -4599,9 +4561,6 @@ static int device_notifier(struct notifier_block *nb,
4599
4561
return 0 ;
4600
4562
4601
4563
dmar_remove_one_dev_info (dev );
4602
- if (!domain_type_is_vm_or_si (domain ) &&
4603
- list_empty (& domain -> devices ))
4604
- domain_exit (domain );
4605
4564
} else if (action == BUS_NOTIFY_ADD_DEVICE ) {
4606
4565
if (iommu_should_identity_map (dev , 1 ))
4607
4566
domain_add_dev_info (si_domain , dev );
@@ -5070,8 +5029,10 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5070
5029
struct iommu_domain * domain ;
5071
5030
5072
5031
switch (type ) {
5032
+ case IOMMU_DOMAIN_DMA :
5033
+ /* fallthrough */
5073
5034
case IOMMU_DOMAIN_UNMANAGED :
5074
- dmar_domain = alloc_domain (DOMAIN_FLAG_VIRTUAL_MACHINE );
5035
+ dmar_domain = alloc_domain (0 );
5075
5036
if (!dmar_domain ) {
5076
5037
pr_err ("Can't allocate dmar_domain\n" );
5077
5038
return NULL ;
@@ -5081,6 +5042,14 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5081
5042
domain_exit (dmar_domain );
5082
5043
return NULL ;
5083
5044
}
5045
+
5046
+ if (type == IOMMU_DOMAIN_DMA &&
5047
+ init_iova_flush_queue (& dmar_domain -> iovad ,
5048
+ iommu_flush_iova , iova_entry_free )) {
5049
+ pr_warn ("iova flush queue initialization failed\n" );
5050
+ intel_iommu_strict = 1 ;
5051
+ }
5052
+
5084
5053
domain_update_iommu_cap (dmar_domain );
5085
5054
5086
5055
domain = & dmar_domain -> domain ;
@@ -5291,13 +5260,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
5291
5260
struct dmar_domain * old_domain ;
5292
5261
5293
5262
old_domain = find_domain (dev );
5294
- if (old_domain ) {
5263
+ if (old_domain )
5295
5264
dmar_remove_one_dev_info (dev );
5296
-
5297
- if (!domain_type_is_vm_or_si (old_domain ) &&
5298
- list_empty (& old_domain -> devices ))
5299
- domain_exit (old_domain );
5300
- }
5301
5265
}
5302
5266
5303
5267
ret = prepare_domain_attach_device (domain , dev );
0 commit comments