Merge branch kvm-arm64/vgic-fixes-7.1 into kvmarm-master/next

* kvm-arm64/vgic-fixes-7.1:
  : .
  : FIrst pass at fixing a number of vgic-v5 bugs that were found
  : after the merge of the initial series.
  : .
  KVM: arm64: Advertise ID_AA64PFR2_EL1.GCIE
  KVM: arm64: vgic-v5: Fold PPI state for all exposed PPIs
  KVM: arm64: set_id_regs: Allow GICv3 support to be set at runtime
  KVM: arm64: Don't advertises GICv3 in ID_PFR1_EL1 if AArch32 isn't supported
  KVM: arm64: Correctly plumb ID_AA64PFR2_EL1 into pkvm idreg handling
  KVM: arm64: Move GICv5 timer PPI validation into timer_irqs_are_valid()
  KVM: arm64: Remove evaluation of timer state in kvm_cpu_has_pending_timer()
  KVM: arm64: Kill arch_timer_context::direct field
  KVM: arm64: vgic-v5: Correctly set dist->ready once initialised
  KVM: arm64: vgic-v5: Make the effective priority mask a strict limit
  KVM: arm64: vgic-v5: Cast vgic_apr to u32 to avoid undefined behaviours
  KVM: arm64: vgic-v5: Transfer edge pending state to ICH_PPI_PENDRx_EL2
  KVM: arm64: vgic-v5: Hold config_lock while finalizing GICv5 PPIs
  KVM: arm64: Account for RESx bits in __compute_fgt()
  KVM: arm64: Fix writeable mask for ID_AA64PFR2_EL1
  arm64: Fix field references for ICH_PPI_DVIR[01]_EL2
  KVM: arm64: Don't skip per-vcpu NV initialisation
  KVM: arm64: vgic: Don't reset cpuif/redist addresses at finalize time

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier
2026-04-08 12:26:00 +01:00
12 changed files with 125 additions and 92 deletions

View File

@@ -821,14 +821,7 @@ struct kvm_host_data {
/* PPI state tracking for GICv5-based guests */
struct {
/*
* For tracking the PPI pending state, we need both the entry
* state and exit state to correctly detect edges as it is
* possible that an interrupt has been injected in software in
* the interim.
*/
DECLARE_BITMAP(pendr_entry, VGIC_V5_NR_PRIVATE_IRQS);
DECLARE_BITMAP(pendr_exit, VGIC_V5_NR_PRIVATE_IRQS);
DECLARE_BITMAP(pendr, VGIC_V5_NR_PRIVATE_IRQS);
/* The saved state of the regs when leaving the guest */
DECLARE_BITMAP(activer_exit, VGIC_V5_NR_PRIVATE_IRQS);

View File

@@ -325,6 +325,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_GCIE_SHIFT, 4, ID_AA64PFR2_EL1_GCIE_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTEFAR_SHIFT, 4, ID_AA64PFR2_EL1_MTEFAR_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTESTOREONLY_SHIFT, 4, ID_AA64PFR2_EL1_MTESTOREONLY_NI),
ARM64_FTR_END,

View File

@@ -183,10 +183,6 @@ void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
map->emul_ptimer = vcpu_ptimer(vcpu);
}
map->direct_vtimer->direct = true;
if (map->direct_ptimer)
map->direct_ptimer->direct = true;
trace_kvm_get_timer_map(vcpu->vcpu_id, map);
}
@@ -406,11 +402,7 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
return kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer) ||
(vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0);
return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
}
/*
@@ -462,8 +454,15 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
return;
/* Skip injecting on GICv5 for directly injected (DVI'd) timers */
if (vgic_is_v5(vcpu->kvm) && timer_ctx->direct)
return;
if (vgic_is_v5(vcpu->kvm)) {
struct timer_map map;
get_timer_map(vcpu, &map);
if (map.direct_ptimer == timer_ctx ||
map.direct_vtimer == timer_ctx)
return;
}
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
@@ -1544,6 +1543,10 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
if (kvm_vgic_set_owner(vcpu, irq, ctx))
break;
/* With GICv5, the default PPI is what you get -- nothing else */
if (vgic_is_v5(vcpu->kvm) && irq != get_vgic_ppi(vcpu->kvm, default_ppi[i]))
break;
/*
* We know by construction that we only have PPIs, so all values
* are less than 32 for non-GICv5 VGICs. On GICv5, they are
@@ -1679,13 +1682,6 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -ENXIO;
}
/*
* The PPIs for the Arch Timers are architecturally defined for
* GICv5. Reject anything that changes them from the specified value.
*/
if (vgic_is_v5(vcpu->kvm) && vcpu->kvm->arch.timer_data.ppi[idx] != irq)
return -EINVAL;
/*
* We cannot validate the IRQ unicity before we run, so take it at
* face value. The verdict will be given on first vcpu run, for each

View File

@@ -1663,8 +1663,8 @@ static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysre
clear |= ~nested & m->nmask;
}
val |= set;
val &= ~clear;
val |= set | m->res1;
val &= ~(clear | m->res0);
*vcpu_fgt(vcpu, reg) = val;
}

View File

@@ -447,7 +447,7 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
/* CRm=4 */
AARCH64(SYS_ID_AA64PFR0_EL1),
AARCH64(SYS_ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
AARCH64(SYS_ID_AA64PFR2_EL1),
ID_UNALLOCATED(4,3),
AARCH64(SYS_ID_AA64ZFR0_EL1),
ID_UNALLOCATED(4,5),

View File

@@ -37,7 +37,7 @@ void __vgic_v5_save_ppi_state(struct vgic_v5_cpu_if *cpu_if)
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->activer_exit,
read_sysreg_s(SYS_ICH_PPI_ACTIVER0_EL2), 0, 64);
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->pendr_exit,
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->pendr,
read_sysreg_s(SYS_ICH_PPI_PENDR0_EL2), 0, 64);
cpu_if->vgic_ppi_priorityr[0] = read_sysreg_s(SYS_ICH_PPI_PRIORITYR0_EL2);
@@ -52,7 +52,7 @@ void __vgic_v5_save_ppi_state(struct vgic_v5_cpu_if *cpu_if)
if (VGIC_V5_NR_PRIVATE_IRQS == 128) {
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->activer_exit,
read_sysreg_s(SYS_ICH_PPI_ACTIVER1_EL2), 64, 64);
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->pendr_exit,
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->pendr,
read_sysreg_s(SYS_ICH_PPI_PENDR1_EL2), 64, 64);
cpu_if->vgic_ppi_priorityr[8] = read_sysreg_s(SYS_ICH_PPI_PRIORITYR8_EL2);
@@ -87,7 +87,7 @@ void __vgic_v5_restore_ppi_state(struct vgic_v5_cpu_if *cpu_if)
SYS_ICH_PPI_ENABLER0_EL2);
/* Update the pending state of the NON-DVI'd PPIs, only */
bitmap_andnot(pendr, host_data_ptr(vgic_v5_ppi_state)->pendr_entry,
bitmap_andnot(pendr, host_data_ptr(vgic_v5_ppi_state)->pendr,
cpu_if->vgic_ppi_dvir, VGIC_V5_NR_PRIVATE_IRQS);
write_sysreg_s(bitmap_read(pendr, 0, 64), SYS_ICH_PPI_PENDR0_EL2);

View File

@@ -3304,10 +3304,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR1_EL1_MPAM_frac |
ID_AA64PFR1_EL1_MTE)),
ID_FILTERED(ID_AA64PFR2_EL1, id_aa64pfr2_el1,
~(ID_AA64PFR2_EL1_FPMR |
ID_AA64PFR2_EL1_MTEFAR |
ID_AA64PFR2_EL1_MTESTOREONLY |
ID_AA64PFR2_EL1_GCIE)),
(ID_AA64PFR2_EL1_FPMR |
ID_AA64PFR2_EL1_MTEFAR |
ID_AA64PFR2_EL1_MTESTOREONLY |
ID_AA64PFR2_EL1_GCIE)),
ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
ID_HIDDEN(ID_AA64SMFR0_EL1),
@@ -5772,6 +5772,12 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
guard(mutex)(&kvm->arch.config_lock);
if (vcpu_has_nv(vcpu)) {
int ret = kvm_init_nv_sysregs(vcpu);
if (ret)
return ret;
}
if (kvm_vm_has_ran_once(kvm))
return 0;
@@ -5820,12 +5826,6 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
kvm_vgic_finalize_idregs(kvm);
}
if (vcpu_has_nv(vcpu)) {
int ret = kvm_init_nv_sysregs(vcpu);
if (ret)
return ret;
}
return 0;
}

View File

@@ -147,6 +147,15 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
break;
case KVM_DEV_TYPE_ARM_VGIC_V3:
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
break;
}
/*
* We've now created the GIC. Update the system register state
* to accurately reflect what we've created.
@@ -648,16 +657,20 @@ int kvm_vgic_map_resources(struct kvm *kvm)
needs_dist = false;
}
if (ret || !needs_dist)
if (ret)
goto out;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
if (needs_dist) {
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
goto out_slots;
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
goto out_slots;
}
} else {
mutex_unlock(&kvm->arch.config_lock);
}
smp_store_release(&dist->ready, true);
@@ -684,12 +697,11 @@ void kvm_vgic_finalize_idregs(struct kvm *kvm)
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
break;
case KVM_DEV_TYPE_ARM_VGIC_V3:
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
if (kvm_supports_32bit_el0())
pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
break;
case KVM_DEV_TYPE_ARM_VGIC_V5:
aa64pfr2 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR2_EL1, GCIE, IMP);

View File

@@ -172,6 +172,16 @@ int vgic_v5_finalize_ppi_state(struct kvm *kvm)
if (!vgic_is_v5(kvm))
return 0;
guard(mutex)(&kvm->arch.config_lock);
/*
* If SW_PPI has been advertised, then we know we already
* initialised the whole thing, and we can return early. Yes,
* this is pretty hackish as far as state tracking goes...
*/
if (test_bit(GICV5_ARCH_PPI_SW_PPI, kvm->arch.vgic.gicv5_vm.vgic_ppi_mask))
return 0;
/* The PPI state for all VCPUs should be the same. Pick the first. */
vcpu0 = kvm_get_vcpu(kvm, 0);
@@ -202,7 +212,7 @@ int vgic_v5_finalize_ppi_state(struct kvm *kvm)
static u32 vgic_v5_get_effective_priority_mask(struct kvm_vcpu *vcpu)
{
struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
u32 highest_ap, priority_mask;
u32 highest_ap, priority_mask, apr;
/*
* If the guest's CPU has not opted to receive interrupts, then the
@@ -217,7 +227,8 @@ static u32 vgic_v5_get_effective_priority_mask(struct kvm_vcpu *vcpu)
* priority. Explicitly use the 32-bit version here as we have 32
* priorities. 32 then means that there are no active priorities.
*/
highest_ap = cpu_if->vgic_apr ? __builtin_ctz(cpu_if->vgic_apr) : 32;
apr = cpu_if->vgic_apr;
highest_ap = apr ? __builtin_ctz(apr) : 32;
/*
* An interrupt is of sufficient priority if it is equal to or
@@ -355,8 +366,8 @@ bool vgic_v5_has_pending_ppi(struct kvm_vcpu *vcpu)
irq = vgic_get_vcpu_irq(vcpu, intid);
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock)
has_pending = (irq->enabled && irq_is_pending(irq) &&
irq->priority <= priority_mask);
if (irq->enabled && irq->priority < priority_mask)
has_pending = irq->hw ? vgic_get_phys_line_level(irq) : irq_is_pending(irq);
vgic_put_irq(vcpu->kvm, irq);
@@ -374,24 +385,14 @@ bool vgic_v5_has_pending_ppi(struct kvm_vcpu *vcpu)
void vgic_v5_fold_ppi_state(struct kvm_vcpu *vcpu)
{
struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
DECLARE_BITMAP(changed_active, VGIC_V5_NR_PRIVATE_IRQS);
DECLARE_BITMAP(changed_pending, VGIC_V5_NR_PRIVATE_IRQS);
DECLARE_BITMAP(changed_bits, VGIC_V5_NR_PRIVATE_IRQS);
unsigned long *activer, *pendr_entry, *pendr;
unsigned long *activer, *pendr;
int i;
activer = host_data_ptr(vgic_v5_ppi_state)->activer_exit;
pendr_entry = host_data_ptr(vgic_v5_ppi_state)->pendr_entry;
pendr = host_data_ptr(vgic_v5_ppi_state)->pendr_exit;
pendr = host_data_ptr(vgic_v5_ppi_state)->pendr;
bitmap_xor(changed_active, cpu_if->vgic_ppi_activer, activer,
VGIC_V5_NR_PRIVATE_IRQS);
bitmap_xor(changed_pending, pendr_entry, pendr,
VGIC_V5_NR_PRIVATE_IRQS);
bitmap_or(changed_bits, changed_active, changed_pending,
VGIC_V5_NR_PRIVATE_IRQS);
for_each_set_bit(i, changed_bits, VGIC_V5_NR_PRIVATE_IRQS) {
for_each_set_bit(i, vcpu->kvm->arch.vgic.gicv5_vm.vgic_ppi_mask,
VGIC_V5_NR_PRIVATE_IRQS) {
u32 intid = vgic_v5_make_ppi(i);
struct vgic_irq *irq;
@@ -435,8 +436,11 @@ void vgic_v5_flush_ppi_state(struct kvm_vcpu *vcpu)
irq = vgic_get_vcpu_irq(vcpu, intid);
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock)
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
__assign_bit(i, pendr, irq_is_pending(irq));
if (irq->config == VGIC_CONFIG_EDGE)
irq->pending_latch = false;
}
vgic_put_irq(vcpu->kvm, irq);
}
@@ -448,15 +452,7 @@ void vgic_v5_flush_ppi_state(struct kvm_vcpu *vcpu)
* incoming changes are merged with the outgoing changes on the return
* path.
*/
bitmap_copy(host_data_ptr(vgic_v5_ppi_state)->pendr_entry, pendr,
VGIC_V5_NR_PRIVATE_IRQS);
/*
* Make sure that we can correctly detect "edges" in the PPI
* state. There's a path where we never actually enter the guest, and
* failure to do this risks losing pending state
*/
bitmap_copy(host_data_ptr(vgic_v5_ppi_state)->pendr_exit, pendr,
bitmap_copy(host_data_ptr(vgic_v5_ppi_state)->pendr, pendr,
VGIC_V5_NR_PRIVATE_IRQS);
}

View File

@@ -4888,11 +4888,11 @@ Field 0 DVI0
EndSysregFields
Sysreg ICH_PPI_DVIR0_EL2 3 4 12 10 0
Fields ICH_PPI_DVIx_EL2
Fields ICH_PPI_DVIRx_EL2
EndSysreg
Sysreg ICH_PPI_DVIR1_EL2 3 4 12 10 1
Fields ICH_PPI_DVIx_EL2
Fields ICH_PPI_DVIRx_EL2
EndSysreg
SysregFields ICH_PPI_ENABLERx_EL2

View File

@@ -76,9 +76,6 @@ struct arch_timer_context {
/* Duplicated state from arch_timer.c for convenience */
u32 host_timer_irq;
/* Is this a direct timer? */
bool direct;
};
struct timer_map {

View File

@@ -37,6 +37,9 @@ struct reg_ftr_bits {
* For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
*/
int64_t safe_val;
/* Allowed to be changed by the host after run */
bool mutable;
};
struct test_feature_reg {
@@ -44,7 +47,7 @@ struct test_feature_reg {
const struct reg_ftr_bits *ftr_bits;
};
#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \
#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL, MUT) \
{ \
.name = #NAME, \
.sign = SIGNED, \
@@ -52,15 +55,20 @@ struct test_feature_reg {
.shift = SHIFT, \
.mask = MASK, \
.safe_val = SAFE_VAL, \
.mutable = MUT, \
}
#define REG_FTR_BITS(type, reg, field, safe_val) \
__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
reg##_##field##_MASK, safe_val)
reg##_##field##_MASK, safe_val, false)
#define REG_FTR_BITS_MUTABLE(type, reg, field, safe_val) \
__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
reg##_##field##_MASK, safe_val, true)
#define S_REG_FTR_BITS(type, reg, field, safe_val) \
__REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
reg##_##field##_MASK, safe_val)
reg##_##field##_MASK, safe_val, false)
#define REG_FTR_END \
{ \
@@ -134,7 +142,8 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
/* GICv3 support will be forced at run time if available */
REG_FTR_BITS_MUTABLE(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1),
@@ -634,12 +643,38 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");
}
static uint64_t reset_mutable_bits(uint32_t id, uint64_t val)
{
struct test_feature_reg *reg = NULL;
for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
if (test_regs[i].reg == id) {
reg = &test_regs[i];
break;
}
}
if (!reg)
return val;
for (const struct reg_ftr_bits *bits = reg->ftr_bits; bits->type != FTR_END; bits++) {
if (bits->mutable) {
val &= ~bits->mask;
val |= bits->safe_val << bits->shift;
}
}
return val;
}
static void test_guest_reg_read(struct kvm_vcpu *vcpu)
{
bool done = false;
struct ucall uc;
while (!done) {
uint64_t val;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
@@ -647,9 +682,11 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_SYNC:
val = test_reg_vals[encoding_to_range_idx(uc.args[2])];
val = reset_mutable_bits(uc.args[2], val);
/* Make sure the written values are seen by guest */
TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
uc.args[3]);
TEST_ASSERT_EQ(val, reset_mutable_bits(uc.args[2], uc.args[3]));
break;
case UCALL_DONE:
done = true;
@@ -740,7 +777,8 @@ static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encodin
uint64_t observed;
observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
TEST_ASSERT_EQ(test_reg_vals[idx], observed);
TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]),
reset_mutable_bits(encoding, observed));
}
static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)