KVM: arm64: Reserve pKVM handle during pkvm_init_host_vm()

When a pKVM guest is active, TLB invalidations triggered by host MMU
notifiers require a valid hypervisor handle. Currently, this handle is
only allocated when the first vCPU is run.

However, the guest's memory is associated with the host MMU much
earlier, during kvm_arch_init_vm(). This creates a window where an MMU
invalidation could occur after the kvm_pgtable pointer checked by the
notifiers is set but before the pKVM handle has been created.

Fix this by reserving the pKVM handle when the host VM is first set up.
Move the call to the __pkvm_reserve_vm hypercall from the first-vCPU-run
path into pkvm_init_host_vm(), which is called during initial VM setup.
This ensures the handle is available before any subsystem can trigger an
MMU notification for the VM.

The VM destruction path is updated to call __pkvm_unreserve_vm for cases
where a VM was reserved but never fully created at the hypervisor,
ensuring the handle is properly released.

This fix leverages the two-stage reservation/initialization hypercall
interface introduced in preceding patches.

Signed-off-by: Fuad Tabba <tabba@google.com>
Tested-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Fuad Tabba
2025-09-09 08:24:36 +01:00
committed by Marc Zyngier
parent 256b4668cd
commit 07aeb70707
2 changed files with 33 additions and 14 deletions

View File

@@ -90,6 +90,12 @@ static void __pkvm_destroy_hyp_vm(struct kvm *kvm)
if (pkvm_hyp_vm_is_created(kvm)) {
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
kvm->arch.pkvm.handle));
} else if (kvm->arch.pkvm.handle) {
/*
* The VM could have been reserved but hyp initialization has
* failed. Make sure to unreserve it.
*/
kvm_call_hyp_nvhe(__pkvm_unreserve_vm, kvm->arch.pkvm.handle);
}
kvm->arch.pkvm.handle = 0;
@@ -160,25 +166,16 @@ static int __pkvm_create_hyp_vm(struct kvm *kvm)
goto free_pgd;
}
/* Reserve the VM in hyp and obtain a hyp handle for the VM. */
ret = kvm_call_hyp_nvhe(__pkvm_reserve_vm);
if (ret < 0)
goto free_vm;
kvm->arch.pkvm.handle = ret;
/* Donate the VM memory to hyp and let hyp initialize it. */
ret = kvm_call_hyp_nvhe(__pkvm_init_vm, kvm, hyp_vm, pgd);
if (ret)
goto unreserve_vm;
goto free_vm;
kvm->arch.pkvm.is_created = true;
kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
return 0;
unreserve_vm:
kvm_call_hyp_nvhe(__pkvm_unreserve_vm, kvm->arch.pkvm.handle);
free_vm:
free_pages_exact(hyp_vm, hyp_vm_sz);
free_pgd:
@@ -224,6 +221,22 @@ void pkvm_destroy_hyp_vm(struct kvm *kvm)
int pkvm_init_host_vm(struct kvm *kvm)
{
int ret;
if (pkvm_hyp_vm_is_created(kvm))
return -EINVAL;
/* VM is already reserved, no need to proceed. */
if (kvm->arch.pkvm.handle)
return 0;
/* Reserve the VM in hyp and obtain a hyp handle for the VM. */
ret = kvm_call_hyp_nvhe(__pkvm_reserve_vm);
if (ret < 0)
return ret;
kvm->arch.pkvm.handle = ret;
return 0;
}