mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
* kvm-arm64/pkvm-protected-guest: (41 commits) : . : pKVM support for protected guests, implementing the very long : awaited support for anonymous memory, as the elusive guestmem : has failed to deliver on its promises despite a multi-year : effort. Patches courtesy of Will Deacon. From the initial cover : letter: : : "[...] this patch series implements support for protected guest : memory with pKVM, where pages are unmapped from the host as they are : faulted into the guest and can be shared back from the guest using pKVM : hypercalls. Protected guests are created using a new machine type : identifier and can be booted to a shell using the kvmtool patches : available at [2], which finally means that we are able to test the pVM : logic in pKVM. Since this is an incremental step towards full isolation : from the host (for example, the CPU register state and DMA accesses are : not yet isolated), creating a pVM requires a developer Kconfig option to : be enabled in addition to booting with 'kvm-arm.mode=protected' and : results in a kernel taint." : . KVM: arm64: Don't hold 'vm_table_lock' across guest page reclaim KVM: arm64: Allow get_pkvm_hyp_vm() to take a reference to a dying VM KVM: arm64: Prevent teardown finalisation of referenced 'hyp_vm' drivers/virt: pkvm: Add Kconfig dependency on DMA_RESTRICTED_POOL KVM: arm64: Rename PKVM_PAGE_STATE_MASK KVM: arm64: Extend pKVM page ownership selftests to cover guest hvcs KVM: arm64: Extend pKVM page ownership selftests to cover forced reclaim KVM: arm64: Register 'selftest_vm' in the VM table KVM: arm64: Extend pKVM page ownership selftests to cover guest donation KVM: arm64: Add some initial documentation for pKVM KVM: arm64: Allow userspace to create protected VMs when pKVM is enabled KVM: arm64: Implement the MEM_UNSHARE hypercall for protected VMs KVM: arm64: Implement the MEM_SHARE hypercall for protected VMs KVM: arm64: Add hvc handler at EL2 for hypercalls from protected VMs KVM: arm64: Return -EFAULT from VCPU_RUN on access to a poisoned pte KVM: arm64: Reclaim faulting page from pKVM in spurious fault handler KVM: arm64: Introduce hypercall to force reclaim of a protected page KVM: arm64: Annotate guest donations with handle and gfn in host stage-2 KVM: arm64: Change 'pkvm_handle_t' to u16 KVM: arm64: Introduce host_stage2_set_owner_metadata_locked() ... Signed-off-by: Marc Zyngier <maz@kernel.org>
83 lines
3.0 KiB
C
83 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2020 Google LLC
|
|
* Author: Quentin Perret <qperret@google.com>
|
|
*/
|
|
|
|
#ifndef __KVM_NVHE_MEM_PROTECT__
|
|
#define __KVM_NVHE_MEM_PROTECT__
|
|
#include <linux/kvm_host.h>
|
|
#include <asm/kvm_hyp.h>
|
|
#include <asm/kvm_mmu.h>
|
|
#include <asm/kvm_pgtable.h>
|
|
#include <asm/virt.h>
|
|
#include <nvhe/memory.h>
|
|
#include <nvhe/pkvm.h>
|
|
#include <nvhe/spinlock.h>
|
|
|
|
struct host_mmu {
|
|
struct kvm_arch arch;
|
|
struct kvm_pgtable pgt;
|
|
struct kvm_pgtable_mm_ops mm_ops;
|
|
hyp_spinlock_t lock;
|
|
};
|
|
extern struct host_mmu host_mmu;
|
|
|
|
/* This corresponds to page-table locking order */
|
|
enum pkvm_component_id {
|
|
PKVM_ID_HOST,
|
|
PKVM_ID_HYP,
|
|
PKVM_ID_GUEST,
|
|
};
|
|
|
|
int __pkvm_prot_finalize(void);
|
|
int __pkvm_host_share_hyp(u64 pfn);
|
|
int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
|
|
int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
|
|
int __pkvm_host_unshare_hyp(u64 pfn);
|
|
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
|
|
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
|
|
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
|
|
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
|
|
int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
|
|
int __pkvm_vcpu_in_poison_fault(struct pkvm_hyp_vcpu *hyp_vcpu);
|
|
int __pkvm_host_force_reclaim_page_guest(phys_addr_t phys);
|
|
int __pkvm_host_reclaim_page_guest(u64 gfn, struct pkvm_hyp_vm *vm);
|
|
int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
|
|
enum kvm_pgtable_prot prot);
|
|
int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
|
|
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
|
|
int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
|
|
int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm);
|
|
int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
|
|
|
|
bool addr_is_memory(phys_addr_t phys);
|
|
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
|
|
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
|
|
int kvm_host_prepare_stage2(void *pgt_pool_base);
|
|
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
|
|
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|
|
|
|
int hyp_pin_shared_mem(void *from, void *to);
|
|
void hyp_unpin_shared_mem(void *from, void *to);
|
|
void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
|
|
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
|
|
struct kvm_hyp_memcache *host_mc);
|
|
|
|
static __always_inline void __load_host_stage2(void)
|
|
{
|
|
if (static_branch_likely(&kvm_protected_mode_initialized))
|
|
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
|
|
else
|
|
write_sysreg(0, vttbr_el2);
|
|
}
|
|
|
|
#ifdef CONFIG_NVHE_EL2_DEBUG
|
|
void pkvm_ownership_selftest(void *base);
|
|
struct pkvm_hyp_vcpu *init_selftest_vm(void *virt);
|
|
void teardown_selftest_vm(void);
|
|
#else
|
|
static inline void pkvm_ownership_selftest(void *base) { }
|
|
#endif
|
|
#endif /* __KVM_NVHE_MEM_PROTECT__ */
|