Skip to content

Commit d68beb2

Browse files
nicolincjgunthorpe
authored andcommitted
iommu/arm-smmu-v3: Support IOMMU_HWPT_INVALIDATE using a VIOMMU object
Implement the vIOMMU's cache_invalidate op for user space to invalidate the IOTLB entries, Device ATS and CD entries that are cached by hardware. Add struct iommu_viommu_arm_smmuv3_invalidate defining invalidation entries that are simply in the native format of a 128-bit TLBI command. Scan those commands against the permitted command list and fix their VMID/SID fields to match what is stored in the vIOMMU. Link: https://patch.msgid.link/r/[email protected] Co-developed-by: Eric Auger <[email protected]> Signed-off-by: Eric Auger <[email protected]> Co-developed-by: Jason Gunthorpe <[email protected]> Signed-off-by: Nicolin Chen <[email protected]> Tested-by: Nicolin Chen <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent f27298a commit d68beb2

File tree

4 files changed

+166
-3
lines changed

4 files changed

+166
-3
lines changed

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c

Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,8 +215,134 @@ arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
215215
return &nested_domain->domain;
216216
}
217217

218+
static int arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid)
219+
{
220+
struct arm_smmu_master *master;
221+
struct device *dev;
222+
int ret = 0;
223+
224+
xa_lock(&vsmmu->core.vdevs);
225+
dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid);
226+
if (!dev) {
227+
ret = -EIO;
228+
goto unlock;
229+
}
230+
master = dev_iommu_priv_get(dev);
231+
232+
/* At this moment, iommufd only supports PCI device that has one SID */
233+
if (sid)
234+
*sid = master->streams[0].id;
235+
unlock:
236+
xa_unlock(&vsmmu->core.vdevs);
237+
return ret;
238+
}
239+
240+
/* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */
241+
struct arm_vsmmu_invalidation_cmd {
242+
union {
243+
u64 cmd[2];
244+
struct iommu_viommu_arm_smmuv3_invalidate ucmd;
245+
};
246+
};
247+
248+
/*
249+
* Convert, in place, the raw invalidation command into an internal format that
250+
* can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
251+
* stored in CPU endian.
252+
*
253+
* Enforce the VMID or SID on the command.
254+
*/
255+
static int arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu,
256+
struct arm_vsmmu_invalidation_cmd *cmd)
257+
{
258+
/* Commands are le64 stored in u64 */
259+
cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]);
260+
cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]);
261+
262+
switch (cmd->cmd[0] & CMDQ_0_OP) {
263+
case CMDQ_OP_TLBI_NSNH_ALL:
264+
/* Convert to NH_ALL */
265+
cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
266+
FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
267+
cmd->cmd[1] = 0;
268+
break;
269+
case CMDQ_OP_TLBI_NH_VA:
270+
case CMDQ_OP_TLBI_NH_VAA:
271+
case CMDQ_OP_TLBI_NH_ALL:
272+
case CMDQ_OP_TLBI_NH_ASID:
273+
cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
274+
cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
275+
break;
276+
case CMDQ_OP_ATC_INV:
277+
case CMDQ_OP_CFGI_CD:
278+
case CMDQ_OP_CFGI_CD_ALL: {
279+
u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]);
280+
281+
if (arm_vsmmu_vsid_to_sid(vsmmu, vsid, &sid))
282+
return -EIO;
283+
cmd->cmd[0] &= ~CMDQ_CFGI_0_SID;
284+
cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid);
285+
break;
286+
}
287+
default:
288+
return -EIO;
289+
}
290+
return 0;
291+
}
292+
293+
static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
294+
struct iommu_user_data_array *array)
295+
{
296+
struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
297+
struct arm_smmu_device *smmu = vsmmu->smmu;
298+
struct arm_vsmmu_invalidation_cmd *last;
299+
struct arm_vsmmu_invalidation_cmd *cmds;
300+
struct arm_vsmmu_invalidation_cmd *cur;
301+
struct arm_vsmmu_invalidation_cmd *end;
302+
int ret;
303+
304+
cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
305+
if (!cmds)
306+
return -ENOMEM;
307+
cur = cmds;
308+
end = cmds + array->entry_num;
309+
310+
static_assert(sizeof(*cmds) == 2 * sizeof(u64));
311+
ret = iommu_copy_struct_from_full_user_array(
312+
cmds, sizeof(*cmds), array,
313+
IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3);
314+
if (ret)
315+
goto out;
316+
317+
last = cmds;
318+
while (cur != end) {
319+
ret = arm_vsmmu_convert_user_cmd(vsmmu, cur);
320+
if (ret)
321+
goto out;
322+
323+
/* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
324+
cur++;
325+
if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1)
326+
continue;
327+
328+
/* FIXME always uses the main cmdq rather than trying to group by type */
329+
ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd,
330+
cur - last, true);
331+
if (ret) {
332+
cur--;
333+
goto out;
334+
}
335+
last = cur;
336+
}
337+
out:
338+
array->entry_num = cur - cmds;
339+
kfree(cmds);
340+
return ret;
341+
}
342+
218343
static const struct iommufd_viommu_ops arm_vsmmu_ops = {
219344
.alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
345+
.cache_invalidate = arm_vsmmu_cache_invalidate,
220346
};
221347

222348
struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
@@ -239,6 +365,14 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
239365
if (s2_parent->smmu != master->smmu)
240366
return ERR_PTR(-EINVAL);
241367

368+
/*
369+
* FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
370+
* defect is needed to determine if arm_vsmmu_cache_invalidate() needs
371+
* any change to remove this.
372+
*/
373+
if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
374+
return ERR_PTR(-EOPNOTSUPP);
375+
242376
/*
243377
* Must support some way to prevent the VM from bypassing the cache
244378
* because VFIO currently does not do any cache maintenance. canwbs

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -766,9 +766,9 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
766766
* insert their own list of commands then all of the commands from one
767767
* CPU will appear before any of the commands from the other CPU.
768768
*/
769-
static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
770-
struct arm_smmu_cmdq *cmdq,
771-
u64 *cmds, int n, bool sync)
769+
int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
770+
struct arm_smmu_cmdq *cmdq, u64 *cmds, int n,
771+
bool sync)
772772
{
773773
u64 cmd_sync[CMDQ_ENT_DWORDS];
774774
u32 prod;

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -529,6 +529,7 @@ struct arm_smmu_cmdq_ent {
529529
#define CMDQ_OP_TLBI_NH_ALL 0x10
530530
#define CMDQ_OP_TLBI_NH_ASID 0x11
531531
#define CMDQ_OP_TLBI_NH_VA 0x12
532+
#define CMDQ_OP_TLBI_NH_VAA 0x13
532533
#define CMDQ_OP_TLBI_EL2_ALL 0x20
533534
#define CMDQ_OP_TLBI_EL2_ASID 0x21
534535
#define CMDQ_OP_TLBI_EL2_VA 0x22
@@ -951,6 +952,10 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state);
951952
void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
952953
const struct arm_smmu_ste *target);
953954

955+
int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
956+
struct arm_smmu_cmdq *cmdq, u64 *cmds, int n,
957+
bool sync);
958+
954959
#ifdef CONFIG_ARM_SMMU_V3_SVA
955960
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
956961
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);

include/uapi/linux/iommufd.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -713,9 +713,11 @@ struct iommu_hwpt_get_dirty_bitmap {
713713
* enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
714714
* Data Type
715715
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
716+
* @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
716717
*/
717718
enum iommu_hwpt_invalidate_data_type {
718719
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
720+
IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1,
719721
};
720722

721723
/**
@@ -754,6 +756,28 @@ struct iommu_hwpt_vtd_s1_invalidate {
754756
__u32 __reserved;
755757
};
756758

759+
/**
760+
* struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
761+
* (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
762+
* @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
763+
* Must be little-endian.
764+
*
765+
* Supported command list only when passing in a vIOMMU via @hwpt_id:
766+
* CMDQ_OP_TLBI_NSNH_ALL
767+
* CMDQ_OP_TLBI_NH_VA
768+
* CMDQ_OP_TLBI_NH_VAA
769+
* CMDQ_OP_TLBI_NH_ALL
770+
* CMDQ_OP_TLBI_NH_ASID
771+
* CMDQ_OP_ATC_INV
772+
* CMDQ_OP_CFGI_CD
773+
* CMDQ_OP_CFGI_CD_ALL
774+
*
775+
* -EIO will be returned if the command is not supported.
776+
*/
777+
struct iommu_viommu_arm_smmuv3_invalidate {
778+
__aligned_le64 cmd[2];
779+
};
780+
757781
/**
758782
* struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
759783
* @size: sizeof(struct iommu_hwpt_invalidate)

0 commit comments

Comments
 (0)