@@ -378,6 +378,11 @@ struct dmar_domain {
378
378
DECLARE_BITMAP (iommu_bmp , DMAR_UNITS_SUPPORTED );
379
379
/* bitmap of iommus this domain uses*/
380
380
381
+ u16 iommu_did [DMAR_UNITS_SUPPORTED ];
382
+ /* Domain ids per IOMMU. Use u16 since
383
+ * domain ids are 16 bit wide according
384
+ * to VT-d spec, section 9.3 */
385
+
381
386
struct list_head devices ; /* all devices' list */
382
387
struct iova_domain iovad ; /* iova's that belong to this domain */
383
388
@@ -1543,11 +1548,13 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1543
1548
}
1544
1549
1545
1550
/*
1546
- * if Caching mode is set, then invalid translations are tagged
1547
- * with domainid 0. Hence we need to pre-allocate it.
1551
+ * If Caching mode is set, then invalid translations are tagged
1552
+ * with domain-id 0, hence we need to pre-allocate it. We also
1553
+ * use domain-id 0 as a marker for non-allocated domain-id, so
1554
+ * make sure it is not used for a real domain.
1548
1555
*/
1549
- if ( cap_caching_mode ( iommu -> cap ))
1550
- set_bit ( 0 , iommu -> domain_ids );
1556
+ set_bit ( 0 , iommu -> domain_ids );
1557
+
1551
1558
return 0 ;
1552
1559
}
1553
1560
@@ -1560,9 +1567,10 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1560
1567
for_each_set_bit (i , iommu -> domain_ids , cap_ndoms (iommu -> cap )) {
1561
1568
/*
1562
1569
* Domain id 0 is reserved for invalid translation
1563
- * if hardware supports caching mode.
1570
+ * if hardware supports caching mode and used as
1571
+ * a non-allocated marker.
1564
1572
*/
1565
- if (cap_caching_mode ( iommu -> cap ) && i == 0 )
1573
+ if (i == 0 )
1566
1574
continue ;
1567
1575
1568
1576
domain = iommu -> domains [i ];
@@ -1624,6 +1632,7 @@ static int __iommu_attach_domain(struct dmar_domain *domain,
1624
1632
if (num < ndomains ) {
1625
1633
set_bit (num , iommu -> domain_ids );
1626
1634
iommu -> domains [num ] = domain ;
1635
+ domain -> iommu_did [iommu -> seq_id ] = num ;
1627
1636
} else {
1628
1637
num = - ENOSPC ;
1629
1638
}
@@ -1650,12 +1659,10 @@ static int iommu_attach_vm_domain(struct dmar_domain *domain,
1650
1659
struct intel_iommu * iommu )
1651
1660
{
1652
1661
int num ;
1653
- unsigned long ndomains ;
1654
1662
1655
- ndomains = cap_ndoms (iommu -> cap );
1656
- for_each_set_bit (num , iommu -> domain_ids , ndomains )
1657
- if (iommu -> domains [num ] == domain )
1658
- return num ;
1663
+ num = domain -> iommu_did [iommu -> seq_id ];
1664
+ if (num )
1665
+ return num ;
1659
1666
1660
1667
return __iommu_attach_domain (domain , iommu );
1661
1668
}
@@ -1664,22 +1671,18 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1664
1671
struct intel_iommu * iommu )
1665
1672
{
1666
1673
unsigned long flags ;
1667
- int num , ndomains ;
1674
+ int num ;
1668
1675
1669
1676
spin_lock_irqsave (& iommu -> lock , flags );
1670
- if (domain_type_is_vm_or_si (domain )) {
1671
- ndomains = cap_ndoms (iommu -> cap );
1672
- for_each_set_bit (num , iommu -> domain_ids , ndomains ) {
1673
- if (iommu -> domains [num ] == domain ) {
1674
- clear_bit (num , iommu -> domain_ids );
1675
- iommu -> domains [num ] = NULL ;
1676
- break ;
1677
- }
1678
- }
1679
- } else {
1680
- clear_bit (domain -> id , iommu -> domain_ids );
1681
- iommu -> domains [domain -> id ] = NULL ;
1682
- }
1677
+
1678
+ num = domain -> iommu_did [iommu -> seq_id ];
1679
+
1680
+ if (num == 0 )
1681
+ return ;
1682
+
1683
+ clear_bit (num , iommu -> domain_ids );
1684
+ iommu -> domains [num ] = NULL ;
1685
+
1683
1686
spin_unlock_irqrestore (& iommu -> lock , flags );
1684
1687
}
1685
1688
@@ -1708,6 +1711,7 @@ static int domain_detach_iommu(struct dmar_domain *domain,
1708
1711
if (test_and_clear_bit (iommu -> seq_id , domain -> iommu_bmp )) {
1709
1712
count = -- domain -> iommu_count ;
1710
1713
domain_update_iommu_cap (domain );
1714
+ domain -> iommu_did [iommu -> seq_id ] = 0 ;
1711
1715
}
1712
1716
spin_unlock_irqrestore (& domain -> iommu_lock , flags );
1713
1717
0 commit comments