@@ -1243,17 +1243,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1243
1243
pages can only be freed after the IOTLB flush has been done. */
1244
1244
static struct page * domain_unmap (struct dmar_domain * domain ,
1245
1245
unsigned long start_pfn ,
1246
- unsigned long last_pfn )
1246
+ unsigned long last_pfn ,
1247
+ struct page * freelist )
1247
1248
{
1248
- struct page * freelist ;
1249
-
1250
1249
BUG_ON (!domain_pfn_supported (domain , start_pfn ));
1251
1250
BUG_ON (!domain_pfn_supported (domain , last_pfn ));
1252
1251
BUG_ON (start_pfn > last_pfn );
1253
1252
1254
1253
/* we don't need lock here; nobody else touches the iova range */
1255
1254
freelist = dma_pte_clear_level (domain , agaw_to_level (domain -> agaw ),
1256
- domain -> pgd , 0 , start_pfn , last_pfn , NULL );
1255
+ domain -> pgd , 0 , start_pfn , last_pfn ,
1256
+ freelist );
1257
1257
1258
1258
/* free pgd */
1259
1259
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN (domain -> gaw )) {
@@ -2011,7 +2011,8 @@ static void domain_exit(struct dmar_domain *domain)
2011
2011
if (domain -> pgd ) {
2012
2012
struct page * freelist ;
2013
2013
2014
- freelist = domain_unmap (domain , 0 , DOMAIN_MAX_PFN (domain -> gaw ));
2014
+ freelist = domain_unmap (domain , 0 ,
2015
+ DOMAIN_MAX_PFN (domain -> gaw ), NULL );
2015
2016
dma_free_pagelist (freelist );
2016
2017
}
2017
2018
@@ -3570,7 +3571,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3570
3571
if (dev_is_pci (dev ))
3571
3572
pdev = to_pci_dev (dev );
3572
3573
3573
- freelist = domain_unmap (domain , start_pfn , last_pfn );
3574
+ freelist = domain_unmap (domain , start_pfn , last_pfn , NULL );
3574
3575
if (intel_iommu_strict || (pdev && pdev -> untrusted ) ||
3575
3576
!has_iova_flush_queue (& domain -> iovad )) {
3576
3577
iommu_flush_iotlb_psi (iommu , domain , start_pfn ,
@@ -4636,7 +4637,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
4636
4637
struct page * freelist ;
4637
4638
4638
4639
freelist = domain_unmap (si_domain ,
4639
- start_vpfn , last_vpfn );
4640
+ start_vpfn , last_vpfn ,
4641
+ NULL );
4640
4642
4641
4643
rcu_read_lock ();
4642
4644
for_each_active_iommu (iommu , drhd )
@@ -5608,10 +5610,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
5608
5610
struct iommu_iotlb_gather * gather )
5609
5611
{
5610
5612
struct dmar_domain * dmar_domain = to_dmar_domain (domain );
5611
- struct page * freelist = NULL ;
5612
5613
unsigned long start_pfn , last_pfn ;
5613
- unsigned int npages ;
5614
- int iommu_id , level = 0 ;
5614
+ int level = 0 ;
5615
5615
5616
5616
/* Cope with horrid API which requires us to unmap more than the
5617
5617
size argument if it happens to be a large-page mapping. */
@@ -5623,22 +5623,38 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
5623
5623
start_pfn = iova >> VTD_PAGE_SHIFT ;
5624
5624
last_pfn = (iova + size - 1 ) >> VTD_PAGE_SHIFT ;
5625
5625
5626
- freelist = domain_unmap (dmar_domain , start_pfn , last_pfn );
5627
-
5628
- npages = last_pfn - start_pfn + 1 ;
5629
-
5630
- for_each_domain_iommu (iommu_id , dmar_domain )
5631
- iommu_flush_iotlb_psi (g_iommus [iommu_id ], dmar_domain ,
5632
- start_pfn , npages , !freelist , 0 );
5633
-
5634
- dma_free_pagelist (freelist );
5626
+ gather -> freelist = domain_unmap (dmar_domain , start_pfn ,
5627
+ last_pfn , gather -> freelist );
5635
5628
5636
5629
if (dmar_domain -> max_addr == iova + size )
5637
5630
dmar_domain -> max_addr = iova ;
5638
5631
5632
+ iommu_iotlb_gather_add_page (domain , gather , iova , size );
5633
+
5639
5634
return size ;
5640
5635
}
5641
5636
5637
+ static void intel_iommu_tlb_sync (struct iommu_domain * domain ,
5638
+ struct iommu_iotlb_gather * gather )
5639
+ {
5640
+ struct dmar_domain * dmar_domain = to_dmar_domain (domain );
5641
+ unsigned long iova_pfn = IOVA_PFN (gather -> start );
5642
+ size_t size = gather -> end - gather -> start ;
5643
+ unsigned long start_pfn , last_pfn ;
5644
+ unsigned long nrpages ;
5645
+ int iommu_id ;
5646
+
5647
+ nrpages = aligned_nrpages (gather -> start , size );
5648
+ start_pfn = mm_to_dma_pfn (iova_pfn );
5649
+ last_pfn = start_pfn + nrpages - 1 ;
5650
+
5651
+ for_each_domain_iommu (iommu_id , dmar_domain )
5652
+ iommu_flush_iotlb_psi (g_iommus [iommu_id ], dmar_domain ,
5653
+ start_pfn , nrpages , !gather -> freelist , 0 );
5654
+
5655
+ dma_free_pagelist (gather -> freelist );
5656
+ }
5657
+
5642
5658
static phys_addr_t intel_iommu_iova_to_phys (struct iommu_domain * domain ,
5643
5659
dma_addr_t iova )
5644
5660
{
@@ -6098,6 +6114,7 @@ const struct iommu_ops intel_iommu_ops = {
6098
6114
.aux_get_pasid = intel_iommu_aux_get_pasid ,
6099
6115
.map = intel_iommu_map ,
6100
6116
.unmap = intel_iommu_unmap ,
6117
+ .iotlb_sync = intel_iommu_tlb_sync ,
6101
6118
.iova_to_phys = intel_iommu_iova_to_phys ,
6102
6119
.probe_device = intel_iommu_probe_device ,
6103
6120
.probe_finalize = intel_iommu_probe_finalize ,
0 commit comments