KVM: arm64: Route SEAs to the SError vector when EASE is set

One of the finest additions of FEAT_DoubleFault2 is the ability for
software to request *synchronous* external aborts be taken to the
SError vector, which of coure are *asynchronous* in nature.

Opinions be damned, implement the architecture and send SEAs to the
SError vector if EASE is set for the target context.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250708172532.1699409-18-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton
2025-07-08 10:25:22 -07:00
parent 178ec0ae35
commit fff97df2a0
3 changed files with 46 additions and 2 deletions

View File

@@ -2834,6 +2834,10 @@ int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE)
return kvm_inject_nested(vcpu, esr, except_type_serror);
return kvm_inject_nested_sync(vcpu, esr);
}

View File

@@ -339,6 +339,10 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
break;
case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR):
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_serror);
break;
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync);
break;
@@ -353,7 +357,7 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
default:
/*
* Only EL1_SYNC and EL2_{SYNC,IRQ,SERR} makes
* Only EL1_{SYNC,SERR} and EL2_{SYNC,IRQ,SERR} makes
* sense so far. Everything else gets silently
* ignored.
*/

View File

@@ -65,13 +65,49 @@ static void pend_sync_exception(struct kvm_vcpu *vcpu)
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
}
static void pend_serror_exception(struct kvm_vcpu *vcpu)
{
if (exception_target_el(vcpu) == PSR_MODE_EL1h)
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR);
else
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
}
static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx)
{
u64 sctlr2;
if (!kvm_has_sctlr2(vcpu->kvm))
return false;
if (is_nested_ctxt(vcpu) &&
!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En))
return false;
if (exception_target_el(vcpu) == PSR_MODE_EL1h)
sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1);
else
sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2);
return sctlr2 & BIT(idx);
}
static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu)
{
return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT);
}
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
u64 esr = 0;
pend_sync_exception(vcpu);
/* This delight is brought to you by FEAT_DoubleFault2. */
if (effective_sctlr2_ease(vcpu))
pend_serror_exception(vcpu);
else
pend_sync_exception(vcpu);
/*
* Build an {i,d}abort, depending on the level and the