@@ -133,14 +133,14 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
133
133
if (!p -> is_aarch32 || !p -> is_32bit ) {
134
134
val = p -> regval ;
135
135
} else {
136
- val = vcpu_sys_reg (vcpu , reg );
136
+ val = vcpu_read_sys_reg (vcpu , reg );
137
137
if (r -> reg % 2 )
138
138
val = (p -> regval << 32 ) | (u64 )lower_32_bits (val );
139
139
else
140
140
val = ((u64 )upper_32_bits (val ) << 32 ) |
141
141
lower_32_bits (p -> regval );
142
142
}
143
- vcpu_sys_reg (vcpu , reg ) = val ;
143
+ vcpu_write_sys_reg (vcpu , val , reg );
144
144
145
145
kvm_toggle_cache (vcpu , was_enabled );
146
146
return true;
@@ -249,10 +249,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
249
249
const struct sys_reg_desc * r )
250
250
{
251
251
if (p -> is_write ) {
252
- vcpu_sys_reg (vcpu , r -> reg ) = p -> regval ;
252
+ vcpu_write_sys_reg (vcpu , p -> regval , r -> reg ) ;
253
253
vcpu -> arch .debug_flags |= KVM_ARM64_DEBUG_DIRTY ;
254
254
} else {
255
- p -> regval = vcpu_sys_reg (vcpu , r -> reg );
255
+ p -> regval = vcpu_read_sys_reg (vcpu , r -> reg );
256
256
}
257
257
258
258
trace_trap_reg (__func__ , r -> reg , p -> is_write , p -> regval );
@@ -465,7 +465,8 @@ static void reset_wcr(struct kvm_vcpu *vcpu,
465
465
466
466
static void reset_amair_el1 (struct kvm_vcpu * vcpu , const struct sys_reg_desc * r )
467
467
{
468
- vcpu_sys_reg (vcpu , AMAIR_EL1 ) = read_sysreg (amair_el1 );
468
+ u64 amair = read_sysreg (amair_el1 );
469
+ vcpu_write_sys_reg (vcpu , amair , AMAIR_EL1 );
469
470
}
470
471
471
472
static void reset_mpidr (struct kvm_vcpu * vcpu , const struct sys_reg_desc * r )
@@ -482,7 +483,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
482
483
mpidr = (vcpu -> vcpu_id & 0x0f ) << MPIDR_LEVEL_SHIFT (0 );
483
484
mpidr |= ((vcpu -> vcpu_id >> 4 ) & 0xff ) << MPIDR_LEVEL_SHIFT (1 );
484
485
mpidr |= ((vcpu -> vcpu_id >> 12 ) & 0xff ) << MPIDR_LEVEL_SHIFT (2 );
485
- vcpu_sys_reg (vcpu , MPIDR_EL1 ) = (1ULL << 31 ) | mpidr ;
486
+ vcpu_write_sys_reg (vcpu , (1ULL << 31 ) | mpidr , MPIDR_EL1 ) ;
486
487
}
487
488
488
489
static void reset_pmcr (struct kvm_vcpu * vcpu , const struct sys_reg_desc * r )
@@ -496,12 +497,12 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
496
497
*/
497
498
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK )
498
499
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad )) & (~ARMV8_PMU_PMCR_E );
499
- vcpu_sys_reg (vcpu , PMCR_EL0 ) = val ;
500
+ __vcpu_sys_reg (vcpu , PMCR_EL0 ) = val ;
500
501
}
501
502
502
503
static bool check_pmu_access_disabled (struct kvm_vcpu * vcpu , u64 flags )
503
504
{
504
- u64 reg = vcpu_sys_reg (vcpu , PMUSERENR_EL0 );
505
+ u64 reg = __vcpu_sys_reg (vcpu , PMUSERENR_EL0 );
505
506
bool enabled = (reg & flags ) || vcpu_mode_priv (vcpu );
506
507
507
508
if (!enabled )
@@ -543,14 +544,14 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
543
544
544
545
if (p -> is_write ) {
545
546
/* Only update writeable bits of PMCR */
546
- val = vcpu_sys_reg (vcpu , PMCR_EL0 );
547
+ val = __vcpu_sys_reg (vcpu , PMCR_EL0 );
547
548
val &= ~ARMV8_PMU_PMCR_MASK ;
548
549
val |= p -> regval & ARMV8_PMU_PMCR_MASK ;
549
- vcpu_sys_reg (vcpu , PMCR_EL0 ) = val ;
550
+ __vcpu_sys_reg (vcpu , PMCR_EL0 ) = val ;
550
551
kvm_pmu_handle_pmcr (vcpu , val );
551
552
} else {
552
553
/* PMCR.P & PMCR.C are RAZ */
553
- val = vcpu_sys_reg (vcpu , PMCR_EL0 )
554
+ val = __vcpu_sys_reg (vcpu , PMCR_EL0 )
554
555
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C );
555
556
p -> regval = val ;
556
557
}
@@ -568,10 +569,10 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
568
569
return false;
569
570
570
571
if (p -> is_write )
571
- vcpu_sys_reg (vcpu , PMSELR_EL0 ) = p -> regval ;
572
+ __vcpu_sys_reg (vcpu , PMSELR_EL0 ) = p -> regval ;
572
573
else
573
574
/* return PMSELR.SEL field */
574
- p -> regval = vcpu_sys_reg (vcpu , PMSELR_EL0 )
575
+ p -> regval = __vcpu_sys_reg (vcpu , PMSELR_EL0 )
575
576
& ARMV8_PMU_COUNTER_MASK ;
576
577
577
578
return true;
@@ -604,7 +605,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
604
605
{
605
606
u64 pmcr , val ;
606
607
607
- pmcr = vcpu_sys_reg (vcpu , PMCR_EL0 );
608
+ pmcr = __vcpu_sys_reg (vcpu , PMCR_EL0 );
608
609
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT ) & ARMV8_PMU_PMCR_N_MASK ;
609
610
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX ) {
610
611
kvm_inject_undefined (vcpu );
@@ -629,7 +630,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
629
630
if (pmu_access_event_counter_el0_disabled (vcpu ))
630
631
return false;
631
632
632
- idx = vcpu_sys_reg (vcpu , PMSELR_EL0 )
633
+ idx = __vcpu_sys_reg (vcpu , PMSELR_EL0 )
633
634
& ARMV8_PMU_COUNTER_MASK ;
634
635
} else if (r -> Op2 == 0 ) {
635
636
/* PMCCNTR_EL0 */
@@ -684,7 +685,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
684
685
685
686
if (r -> CRn == 9 && r -> CRm == 13 && r -> Op2 == 1 ) {
686
687
/* PMXEVTYPER_EL0 */
687
- idx = vcpu_sys_reg (vcpu , PMSELR_EL0 ) & ARMV8_PMU_COUNTER_MASK ;
688
+ idx = __vcpu_sys_reg (vcpu , PMSELR_EL0 ) & ARMV8_PMU_COUNTER_MASK ;
688
689
reg = PMEVTYPER0_EL0 + idx ;
689
690
} else if (r -> CRn == 14 && (r -> CRm & 12 ) == 12 ) {
690
691
idx = ((r -> CRm & 3 ) << 3 ) | (r -> Op2 & 7 );
@@ -702,9 +703,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
702
703
703
704
if (p -> is_write ) {
704
705
kvm_pmu_set_counter_event_type (vcpu , p -> regval , idx );
705
- vcpu_sys_reg (vcpu , reg ) = p -> regval & ARMV8_PMU_EVTYPE_MASK ;
706
+ __vcpu_sys_reg (vcpu , reg ) = p -> regval & ARMV8_PMU_EVTYPE_MASK ;
706
707
} else {
707
- p -> regval = vcpu_sys_reg (vcpu , reg ) & ARMV8_PMU_EVTYPE_MASK ;
708
+ p -> regval = __vcpu_sys_reg (vcpu , reg ) & ARMV8_PMU_EVTYPE_MASK ;
708
709
}
709
710
710
711
return true;
@@ -726,15 +727,15 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
726
727
val = p -> regval & mask ;
727
728
if (r -> Op2 & 0x1 ) {
728
729
/* accessing PMCNTENSET_EL0 */
729
- vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) |= val ;
730
+ __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) |= val ;
730
731
kvm_pmu_enable_counter (vcpu , val );
731
732
} else {
732
733
/* accessing PMCNTENCLR_EL0 */
733
- vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) &= ~val ;
734
+ __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) &= ~val ;
734
735
kvm_pmu_disable_counter (vcpu , val );
735
736
}
736
737
} else {
737
- p -> regval = vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) & mask ;
738
+ p -> regval = __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) & mask ;
738
739
}
739
740
740
741
return true;
@@ -758,12 +759,12 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
758
759
759
760
if (r -> Op2 & 0x1 )
760
761
/* accessing PMINTENSET_EL1 */
761
- vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) |= val ;
762
+ __vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) |= val ;
762
763
else
763
764
/* accessing PMINTENCLR_EL1 */
764
- vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) &= ~val ;
765
+ __vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) &= ~val ;
765
766
} else {
766
- p -> regval = vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) & mask ;
767
+ p -> regval = __vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) & mask ;
767
768
}
768
769
769
770
return true;
@@ -783,12 +784,12 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
783
784
if (p -> is_write ) {
784
785
if (r -> CRm & 0x2 )
785
786
/* accessing PMOVSSET_EL0 */
786
- vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) |= (p -> regval & mask );
787
+ __vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) |= (p -> regval & mask );
787
788
else
788
789
/* accessing PMOVSCLR_EL0 */
789
- vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) &= ~(p -> regval & mask );
790
+ __vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) &= ~(p -> regval & mask );
790
791
} else {
791
- p -> regval = vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) & mask ;
792
+ p -> regval = __vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) & mask ;
792
793
}
793
794
794
795
return true;
@@ -825,10 +826,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
825
826
return false;
826
827
}
827
828
828
- vcpu_sys_reg (vcpu , PMUSERENR_EL0 ) = p -> regval
829
- & ARMV8_PMU_USERENR_MASK ;
829
+ __vcpu_sys_reg (vcpu , PMUSERENR_EL0 ) =
830
+ p -> regval & ARMV8_PMU_USERENR_MASK ;
830
831
} else {
831
- p -> regval = vcpu_sys_reg (vcpu , PMUSERENR_EL0 )
832
+ p -> regval = __vcpu_sys_reg (vcpu , PMUSERENR_EL0 )
832
833
& ARMV8_PMU_USERENR_MASK ;
833
834
}
834
835
@@ -2230,7 +2231,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
2230
2231
if (r -> get_user )
2231
2232
return (r -> get_user )(vcpu , r , reg , uaddr );
2232
2233
2233
- return reg_to_user (uaddr , & vcpu_sys_reg (vcpu , r -> reg ), reg -> id );
2234
+ return reg_to_user (uaddr , & __vcpu_sys_reg (vcpu , r -> reg ), reg -> id );
2234
2235
}
2235
2236
2236
2237
int kvm_arm_sys_reg_set_reg (struct kvm_vcpu * vcpu , const struct kvm_one_reg * reg )
@@ -2251,7 +2252,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
2251
2252
if (r -> set_user )
2252
2253
return (r -> set_user )(vcpu , r , reg , uaddr );
2253
2254
2254
- return reg_from_user (& vcpu_sys_reg (vcpu , r -> reg ), uaddr , reg -> id );
2255
+ return reg_from_user (& __vcpu_sys_reg (vcpu , r -> reg ), uaddr , reg -> id );
2255
2256
}
2256
2257
2257
2258
static unsigned int num_demux_regs (void )
@@ -2457,6 +2458,6 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2457
2458
reset_sys_reg_descs (vcpu , table , num );
2458
2459
2459
2460
for (num = 1 ; num < NR_SYS_REGS ; num ++ )
2460
- if (vcpu_sys_reg (vcpu , num ) == 0x4242424242424242 )
2461
- panic ("Didn't reset vcpu_sys_reg (%zi)" , num );
2461
+ if (__vcpu_sys_reg (vcpu , num ) == 0x4242424242424242 )
2462
+ panic ("Didn't reset __vcpu_sys_reg (%zi)" , num );
2462
2463
}
0 commit comments