@@ -103,15 +103,15 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
103
103
kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_FENCE_I_SENT );
104
104
break ;
105
105
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA :
106
- if (cp -> a2 == 0 && cp -> a3 == 0 )
106
+ if (( cp -> a2 == 0 && cp -> a3 == 0 ) || cp -> a3 == -1UL )
107
107
kvm_riscv_hfence_vvma_all (vcpu -> kvm , hbase , hmask );
108
108
else
109
109
kvm_riscv_hfence_vvma_gva (vcpu -> kvm , hbase , hmask ,
110
110
cp -> a2 , cp -> a3 , PAGE_SHIFT );
111
111
kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_HFENCE_VVMA_SENT );
112
112
break ;
113
113
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID :
114
- if (cp -> a2 == 0 && cp -> a3 == 0 )
114
+ if (( cp -> a2 == 0 && cp -> a3 == 0 ) || cp -> a3 == -1UL )
115
115
kvm_riscv_hfence_vvma_asid_all (vcpu -> kvm ,
116
116
hbase , hmask , cp -> a4 );
117
117
else
@@ -127,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
127
127
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID :
128
128
/*
129
129
* Until nested virtualization is implemented, the
130
- * SBI HFENCE calls should be treated as NOPs
130
+ * SBI HFENCE calls should return not supported
131
+ * hence fallthrough.
131
132
*/
132
- break ;
133
133
default :
134
134
retdata -> err_val = SBI_ERR_NOT_SUPPORTED ;
135
135
}
0 commit comments