Skip to content

Commit e6f48be

Browse files
committed
Merge tag 'arm-smmu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into iommu/fixes
Arm SMMU fixes for 5.18 - Fix off-by-one in SMMUv3 SVA TLB invalidation - Disable large mappings to workaround nvidia erratum
2 parents da8669f + 4a25f2e commit e6f48be

File tree

2 files changed

+38
-1
lines changed

2 files changed

+38
-1
lines changed

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
183183
{
184184
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
185185
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
186-
size_t size = end - start + 1;
186+
size_t size;
187+
188+
/*
189+
* The mm_types defines vm_end as the first byte after the end address,
190+
* different from IOMMU subsystem using the last address of an address
191+
* range. So do a simple translation here by calculating size correctly.
192+
*/
193+
size = end - start;
187194

188195
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
189196
arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,

drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
258258
dev_name(dev), err);
259259
}
260260

261+
static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
262+
struct io_pgtable_cfg *pgtbl_cfg,
263+
struct device *dev)
264+
{
265+
struct arm_smmu_device *smmu = smmu_domain->smmu;
266+
const struct device_node *np = smmu->dev->of_node;
267+
268+
/*
269+
* Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
270+
* entries to not be invalidated correctly. The problem is that the walk
271+
* cache index generated for IOVA is not same across translation and
272+
* invalidation requests. This is leading to page faults when PMD entry
273+
* is released during unmap and populated with new PTE table during
274+
* subsequent map request. Disabling large page mappings avoids the
275+
* release of PMD entry and avoid translations seeing stale PMD entry in
276+
* walk cache.
277+
* Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
278+
* Tegra234.
279+
*/
280+
if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
281+
of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
282+
smmu->pgsize_bitmap = PAGE_SIZE;
283+
pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
284+
}
285+
286+
return 0;
287+
}
288+
261289
static const struct arm_smmu_impl nvidia_smmu_impl = {
262290
.read_reg = nvidia_smmu_read_reg,
263291
.write_reg = nvidia_smmu_write_reg,
@@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
268296
.global_fault = nvidia_smmu_global_fault,
269297
.context_fault = nvidia_smmu_context_fault,
270298
.probe_finalize = nvidia_smmu_probe_finalize,
299+
.init_context = nvidia_smmu_init_context,
271300
};
272301

273302
static const struct arm_smmu_impl nvidia_smmu_single_impl = {
274303
.probe_finalize = nvidia_smmu_probe_finalize,
304+
.init_context = nvidia_smmu_init_context,
275305
};
276306

277307
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)

0 commit comments

Comments
 (0)