@@ -1359,6 +1359,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
1359
1359
1360
1360
static void domain_update_iotlb (struct dmar_domain * domain )
1361
1361
{
1362
+ struct dev_pasid_info * dev_pasid ;
1362
1363
struct device_domain_info * info ;
1363
1364
bool has_iotlb_device = false;
1364
1365
unsigned long flags ;
@@ -1370,6 +1371,14 @@ static void domain_update_iotlb(struct dmar_domain *domain)
1370
1371
break ;
1371
1372
}
1372
1373
}
1374
+
1375
+ list_for_each_entry (dev_pasid , & domain -> dev_pasids , link_domain ) {
1376
+ info = dev_iommu_priv_get (dev_pasid -> dev );
1377
+ if (info -> ats_enabled ) {
1378
+ has_iotlb_device = true;
1379
+ break ;
1380
+ }
1381
+ }
1373
1382
domain -> has_iotlb_device = has_iotlb_device ;
1374
1383
spin_unlock_irqrestore (& domain -> lock , flags );
1375
1384
}
@@ -1455,6 +1464,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1455
1464
static void iommu_flush_dev_iotlb (struct dmar_domain * domain ,
1456
1465
u64 addr , unsigned mask )
1457
1466
{
1467
+ struct dev_pasid_info * dev_pasid ;
1458
1468
struct device_domain_info * info ;
1459
1469
unsigned long flags ;
1460
1470
@@ -1464,6 +1474,19 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1464
1474
spin_lock_irqsave (& domain -> lock , flags );
1465
1475
list_for_each_entry (info , & domain -> devices , link )
1466
1476
__iommu_flush_dev_iotlb (info , addr , mask );
1477
+
1478
+ list_for_each_entry (dev_pasid , & domain -> dev_pasids , link_domain ) {
1479
+ info = dev_iommu_priv_get (dev_pasid -> dev );
1480
+
1481
+ if (!info -> ats_enabled )
1482
+ continue ;
1483
+
1484
+ qi_flush_dev_iotlb_pasid (info -> iommu ,
1485
+ PCI_DEVID (info -> bus , info -> devfn ),
1486
+ info -> pfsid , dev_pasid -> pasid ,
1487
+ info -> ats_qdep , addr ,
1488
+ mask );
1489
+ }
1467
1490
spin_unlock_irqrestore (& domain -> lock , flags );
1468
1491
}
1469
1492
@@ -1472,9 +1495,13 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
1472
1495
unsigned long npages , bool ih )
1473
1496
{
1474
1497
u16 did = domain_id_iommu (domain , iommu );
1498
+ struct dev_pasid_info * dev_pasid ;
1475
1499
unsigned long flags ;
1476
1500
1477
1501
spin_lock_irqsave (& domain -> lock , flags );
1502
+ list_for_each_entry (dev_pasid , & domain -> dev_pasids , link_domain )
1503
+ qi_flush_piotlb (iommu , did , dev_pasid -> pasid , addr , npages , ih );
1504
+
1478
1505
if (!list_empty (& domain -> devices ))
1479
1506
qi_flush_piotlb (iommu , did , IOMMU_NO_PASID , addr , npages , ih );
1480
1507
spin_unlock_irqrestore (& domain -> lock , flags );
@@ -1739,6 +1766,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
1739
1766
domain -> use_first_level = true;
1740
1767
domain -> has_iotlb_device = false;
1741
1768
INIT_LIST_HEAD (& domain -> devices );
1769
+ INIT_LIST_HEAD (& domain -> dev_pasids );
1742
1770
spin_lock_init (& domain -> lock );
1743
1771
xa_init (& domain -> iommu_array );
1744
1772
@@ -4726,7 +4754,10 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
4726
4754
static void intel_iommu_remove_dev_pasid (struct device * dev , ioasid_t pasid )
4727
4755
{
4728
4756
struct intel_iommu * iommu = device_to_iommu (dev , NULL , NULL );
4757
+ struct dev_pasid_info * curr , * dev_pasid = NULL ;
4758
+ struct dmar_domain * dmar_domain ;
4729
4759
struct iommu_domain * domain ;
4760
+ unsigned long flags ;
4730
4761
4731
4762
domain = iommu_get_domain_for_dev_pasid (dev , pasid , 0 );
4732
4763
if (WARN_ON_ONCE (!domain ))
@@ -4742,17 +4773,79 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
4742
4773
goto out_tear_down ;
4743
4774
}
4744
4775
4745
- /*
4746
- * Should never reach here until we add support for attaching
4747
- * non-SVA domain to a pasid.
4748
- */
4749
- WARN_ON (1 );
4776
+ dmar_domain = to_dmar_domain (domain );
4777
+ spin_lock_irqsave (& dmar_domain -> lock , flags );
4778
+ list_for_each_entry (curr , & dmar_domain -> dev_pasids , link_domain ) {
4779
+ if (curr -> dev == dev && curr -> pasid == pasid ) {
4780
+ list_del (& curr -> link_domain );
4781
+ dev_pasid = curr ;
4782
+ break ;
4783
+ }
4784
+ }
4785
+ WARN_ON_ONCE (!dev_pasid );
4786
+ spin_unlock_irqrestore (& dmar_domain -> lock , flags );
4750
4787
4788
+ domain_detach_iommu (dmar_domain , iommu );
4789
+ kfree (dev_pasid );
4751
4790
out_tear_down :
4752
4791
intel_pasid_tear_down_entry (iommu , dev , pasid , false);
4753
4792
intel_drain_pasid_prq (dev , pasid );
4754
4793
}
4755
4794
4795
+ static int intel_iommu_set_dev_pasid (struct iommu_domain * domain ,
4796
+ struct device * dev , ioasid_t pasid )
4797
+ {
4798
+ struct device_domain_info * info = dev_iommu_priv_get (dev );
4799
+ struct dmar_domain * dmar_domain = to_dmar_domain (domain );
4800
+ struct intel_iommu * iommu = info -> iommu ;
4801
+ struct dev_pasid_info * dev_pasid ;
4802
+ unsigned long flags ;
4803
+ int ret ;
4804
+
4805
+ if (!pasid_supported (iommu ) || dev_is_real_dma_subdevice (dev ))
4806
+ return - EOPNOTSUPP ;
4807
+
4808
+ if (context_copied (iommu , info -> bus , info -> devfn ))
4809
+ return - EBUSY ;
4810
+
4811
+ ret = prepare_domain_attach_device (domain , dev );
4812
+ if (ret )
4813
+ return ret ;
4814
+
4815
+ dev_pasid = kzalloc (sizeof (* dev_pasid ), GFP_KERNEL );
4816
+ if (!dev_pasid )
4817
+ return - ENOMEM ;
4818
+
4819
+ ret = domain_attach_iommu (dmar_domain , iommu );
4820
+ if (ret )
4821
+ goto out_free ;
4822
+
4823
+ if (domain_type_is_si (dmar_domain ))
4824
+ ret = intel_pasid_setup_pass_through (iommu , dmar_domain ,
4825
+ dev , pasid );
4826
+ else if (dmar_domain -> use_first_level )
4827
+ ret = domain_setup_first_level (iommu , dmar_domain ,
4828
+ dev , pasid );
4829
+ else
4830
+ ret = intel_pasid_setup_second_level (iommu , dmar_domain ,
4831
+ dev , pasid );
4832
+ if (ret )
4833
+ goto out_detach_iommu ;
4834
+
4835
+ dev_pasid -> dev = dev ;
4836
+ dev_pasid -> pasid = pasid ;
4837
+ spin_lock_irqsave (& dmar_domain -> lock , flags );
4838
+ list_add (& dev_pasid -> link_domain , & dmar_domain -> dev_pasids );
4839
+ spin_unlock_irqrestore (& dmar_domain -> lock , flags );
4840
+
4841
+ return 0 ;
4842
+ out_detach_iommu :
4843
+ domain_detach_iommu (dmar_domain , iommu );
4844
+ out_free :
4845
+ kfree (dev_pasid );
4846
+ return ret ;
4847
+ }
4848
+
4756
4849
const struct iommu_ops intel_iommu_ops = {
4757
4850
.capable = intel_iommu_capable ,
4758
4851
.domain_alloc = intel_iommu_domain_alloc ,
@@ -4772,6 +4865,7 @@ const struct iommu_ops intel_iommu_ops = {
4772
4865
#endif
4773
4866
.default_domain_ops = & (const struct iommu_domain_ops ) {
4774
4867
.attach_dev = intel_iommu_attach_device ,
4868
+ .set_dev_pasid = intel_iommu_set_dev_pasid ,
4775
4869
.map_pages = intel_iommu_map_pages ,
4776
4870
.unmap_pages = intel_iommu_unmap_pages ,
4777
4871
.iotlb_sync_map = intel_iommu_iotlb_sync_map ,
0 commit comments