mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
RISC-V: KVM: Factor-out MMU related declarations into separate headers
The MMU, TLB, and VMID management for KVM RISC-V already exists as seprate sources so create separate headers along these lines. This further simplifies asm/kvm_host.h header. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Tested-by: Atish Patra <atishp@rivosinc.com> Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com> Link: https://lore.kernel.org/r/20250618113532.471448-9-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
@@ -16,6 +16,8 @@
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/kvm_aia.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/kvm_tlb.h>
|
||||
#include <asm/kvm_vmid.h>
|
||||
#include <asm/kvm_vcpu_fp.h>
|
||||
#include <asm/kvm_vcpu_insn.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
@@ -55,24 +57,6 @@
|
||||
BIT(IRQ_VS_TIMER) | \
|
||||
BIT(IRQ_VS_EXT))
|
||||
|
||||
enum kvm_riscv_hfence_type {
|
||||
KVM_RISCV_HFENCE_UNKNOWN = 0,
|
||||
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
|
||||
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
|
||||
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
|
||||
KVM_RISCV_HFENCE_VVMA_GVA,
|
||||
};
|
||||
|
||||
struct kvm_riscv_hfence {
|
||||
enum kvm_riscv_hfence_type type;
|
||||
unsigned long asid;
|
||||
unsigned long order;
|
||||
gpa_t addr;
|
||||
gpa_t size;
|
||||
};
|
||||
|
||||
#define KVM_RISCV_VCPU_MAX_HFENCE 64
|
||||
|
||||
struct kvm_vm_stat {
|
||||
struct kvm_vm_stat_generic generic;
|
||||
};
|
||||
@@ -98,15 +82,6 @@ struct kvm_vcpu_stat {
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
struct kvm_vmid {
|
||||
/*
|
||||
* Writes to vmid_version and vmid happen with vmid_lock held
|
||||
* whereas reads happen without any lock held.
|
||||
*/
|
||||
unsigned long vmid_version;
|
||||
unsigned long vmid;
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
/* G-stage vmid */
|
||||
struct kvm_vmid vmid;
|
||||
@@ -310,77 +285,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
|
||||
|
||||
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
|
||||
gpa_t gpa, gpa_t gpsz,
|
||||
unsigned long order);
|
||||
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
|
||||
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
|
||||
unsigned long order);
|
||||
void kvm_riscv_local_hfence_gvma_all(void);
|
||||
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
|
||||
unsigned long asid,
|
||||
unsigned long gva,
|
||||
unsigned long gvsz,
|
||||
unsigned long order);
|
||||
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
|
||||
unsigned long asid);
|
||||
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
|
||||
unsigned long gva, unsigned long gvsz,
|
||||
unsigned long order);
|
||||
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
|
||||
|
||||
void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_riscv_fence_i(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask);
|
||||
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask,
|
||||
gpa_t gpa, gpa_t gpsz,
|
||||
unsigned long order);
|
||||
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask);
|
||||
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask,
|
||||
unsigned long gva, unsigned long gvsz,
|
||||
unsigned long order, unsigned long asid);
|
||||
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask,
|
||||
unsigned long asid);
|
||||
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask,
|
||||
unsigned long gva, unsigned long gvsz,
|
||||
unsigned long order);
|
||||
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
|
||||
unsigned long hbase, unsigned long hmask);
|
||||
|
||||
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
|
||||
phys_addr_t hpa, unsigned long size,
|
||||
bool writable, bool in_atomic);
|
||||
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
|
||||
unsigned long size);
|
||||
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
struct kvm_memory_slot *memslot,
|
||||
gpa_t gpa, unsigned long hva, bool is_write);
|
||||
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
|
||||
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
|
||||
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
|
||||
void __init kvm_riscv_gstage_mode_detect(void);
|
||||
unsigned long __init kvm_riscv_gstage_mode(void);
|
||||
int kvm_riscv_gstage_gpa_bits(void);
|
||||
|
||||
void __init kvm_riscv_gstage_vmid_detect(void);
|
||||
unsigned long kvm_riscv_gstage_vmid_bits(void);
|
||||
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
|
||||
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
|
||||
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
|
||||
|
||||
void __kvm_riscv_unpriv_trap(void);
|
||||
|
||||
Reference in New Issue
Block a user