@@ -984,7 +984,7 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
984
984
* The difference between val and sync_idx is bounded by the maximum size of
985
985
* a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
986
986
*/
987
- static int arm_smmu_sync_poll_msi (struct arm_smmu_device * smmu , u32 sync_idx )
987
+ static int __arm_smmu_sync_poll_msi (struct arm_smmu_device * smmu , u32 sync_idx )
988
988
{
989
989
ktime_t timeout = ktime_add_us (ktime_get (), ARM_SMMU_SYNC_TIMEOUT_US );
990
990
u32 val = smp_cond_load_acquire (& smmu -> sync_count ,
@@ -994,30 +994,53 @@ static int arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
994
994
return (int )(val - sync_idx ) < 0 ? - ETIMEDOUT : 0 ;
995
995
}
996
996
997
- static void arm_smmu_cmdq_issue_sync (struct arm_smmu_device * smmu )
997
+ static int __arm_smmu_cmdq_issue_sync_msi (struct arm_smmu_device * smmu )
998
+ {
999
+ u64 cmd [CMDQ_ENT_DWORDS ];
1000
+ unsigned long flags ;
1001
+ struct arm_smmu_cmdq_ent ent = {
1002
+ .opcode = CMDQ_OP_CMD_SYNC ,
1003
+ .sync = {
1004
+ .msidata = atomic_inc_return_relaxed (& smmu -> sync_nr ),
1005
+ .msiaddr = virt_to_phys (& smmu -> sync_count ),
1006
+ },
1007
+ };
1008
+
1009
+ arm_smmu_cmdq_build_cmd (cmd , & ent );
1010
+
1011
+ spin_lock_irqsave (& smmu -> cmdq .lock , flags );
1012
+ arm_smmu_cmdq_insert_cmd (smmu , cmd );
1013
+ spin_unlock_irqrestore (& smmu -> cmdq .lock , flags );
1014
+
1015
+ return __arm_smmu_sync_poll_msi (smmu , ent .sync .msidata );
1016
+ }
1017
+
1018
+ static int __arm_smmu_cmdq_issue_sync (struct arm_smmu_device * smmu )
998
1019
{
999
1020
u64 cmd [CMDQ_ENT_DWORDS ];
1000
1021
unsigned long flags ;
1001
1022
bool wfe = !!(smmu -> features & ARM_SMMU_FEAT_SEV );
1002
- bool msi = (smmu -> features & ARM_SMMU_FEAT_MSI ) &&
1003
- (smmu -> features & ARM_SMMU_FEAT_COHERENCY );
1004
1023
struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
1005
1024
int ret ;
1006
1025
1007
- if (msi ) {
1008
- ent .sync .msidata = atomic_inc_return_relaxed (& smmu -> sync_nr );
1009
- ent .sync .msiaddr = virt_to_phys (& smmu -> sync_count );
1010
- }
1011
1026
arm_smmu_cmdq_build_cmd (cmd , & ent );
1012
1027
1013
1028
spin_lock_irqsave (& smmu -> cmdq .lock , flags );
1014
1029
arm_smmu_cmdq_insert_cmd (smmu , cmd );
1015
- if (!msi )
1016
- ret = queue_poll_cons (& smmu -> cmdq .q , true, wfe );
1030
+ ret = queue_poll_cons (& smmu -> cmdq .q , true, wfe );
1017
1031
spin_unlock_irqrestore (& smmu -> cmdq .lock , flags );
1018
1032
1019
- if (msi )
1020
- ret = arm_smmu_sync_poll_msi (smmu , ent .sync .msidata );
1033
+ return ret ;
1034
+ }
1035
+
1036
+ static void arm_smmu_cmdq_issue_sync (struct arm_smmu_device * smmu )
1037
+ {
1038
+ int ret ;
1039
+ bool msi = (smmu -> features & ARM_SMMU_FEAT_MSI ) &&
1040
+ (smmu -> features & ARM_SMMU_FEAT_COHERENCY );
1041
+
1042
+ ret = msi ? __arm_smmu_cmdq_issue_sync_msi (smmu )
1043
+ : __arm_smmu_cmdq_issue_sync (smmu );
1021
1044
if (ret )
1022
1045
dev_err_ratelimited (smmu -> dev , "CMD_SYNC timeout\n" );
1023
1046
}
0 commit comments