mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
KVM: arm64: Convert translation level parameter to s8
With the introduction of FEAT_LPA2, the Arm ARM adds a new level of translation, level -1, so levels can now be in the range [-1;3]. 3 is always the last level and the first level is determined based on the number of VA bits in use. Convert level variables to use a signed type in preparation for supporting this new level -1. Since the last level is always anchored at 3, and the first level varies to suit the number of VA/IPA bits, take the opportunity to replace KVM_PGTABLE_MAX_LEVELS with the 2 macros KVM_PGTABLE_FIRST_LEVEL and KVM_PGTABLE_LAST_LEVEL. This removes the assumption from the code that levels run from 0 to KVM_PGTABLE_MAX_LEVELS - 1, which will soon no longer be true. Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20231127111737.1897081-9-ryan.roberts@arm.com
This commit is contained in:
committed by
Marc Zyngier
parent
bd412e2a31
commit
419edf48d7
@@ -101,7 +101,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
return IS_ALIGNED(ctx->addr, granule);
|
||||
}
|
||||
|
||||
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
|
||||
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
|
||||
{
|
||||
u64 shift = kvm_granule_shift(level);
|
||||
u64 mask = BIT(PAGE_SHIFT - 3) - 1;
|
||||
@@ -117,7 +117,7 @@ static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
|
||||
return (addr & mask) >> shift;
|
||||
}
|
||||
|
||||
static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
|
||||
static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
|
||||
{
|
||||
struct kvm_pgtable pgt = {
|
||||
.ia_bits = ia_bits,
|
||||
@@ -127,9 +127,9 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
|
||||
return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
|
||||
}
|
||||
|
||||
static bool kvm_pte_table(kvm_pte_t pte, u32 level)
|
||||
static bool kvm_pte_table(kvm_pte_t pte, s8 level)
|
||||
{
|
||||
if (level == KVM_PGTABLE_MAX_LEVELS - 1)
|
||||
if (level == KVM_PGTABLE_LAST_LEVEL)
|
||||
return false;
|
||||
|
||||
if (!kvm_pte_valid(pte))
|
||||
@@ -157,11 +157,11 @@ static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops
|
||||
return pte;
|
||||
}
|
||||
|
||||
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
|
||||
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
|
||||
{
|
||||
kvm_pte_t pte = kvm_phys_to_pte(pa);
|
||||
u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
|
||||
KVM_PTE_TYPE_BLOCK;
|
||||
u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
|
||||
KVM_PTE_TYPE_BLOCK;
|
||||
|
||||
pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
|
||||
pte |= FIELD_PREP(KVM_PTE_TYPE, type);
|
||||
@@ -206,11 +206,11 @@ static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
|
||||
}
|
||||
|
||||
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
|
||||
struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
|
||||
struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
|
||||
|
||||
static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
||||
struct kvm_pgtable_mm_ops *mm_ops,
|
||||
kvm_pteref_t pteref, u32 level)
|
||||
kvm_pteref_t pteref, s8 level)
|
||||
{
|
||||
enum kvm_pgtable_walk_flags flags = data->walker->flags;
|
||||
kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
|
||||
@@ -275,12 +275,13 @@ out:
|
||||
}
|
||||
|
||||
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
|
||||
struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level)
|
||||
struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
|
||||
{
|
||||
u32 idx;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
|
||||
if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
|
||||
level > KVM_PGTABLE_LAST_LEVEL))
|
||||
return -EINVAL;
|
||||
|
||||
for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
|
||||
@@ -343,7 +344,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
|
||||
struct leaf_walk_data {
|
||||
kvm_pte_t pte;
|
||||
u32 level;
|
||||
s8 level;
|
||||
};
|
||||
|
||||
static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
@@ -358,7 +359,7 @@ static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
}
|
||||
|
||||
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
|
||||
kvm_pte_t *ptep, u32 *level)
|
||||
kvm_pte_t *ptep, s8 *level)
|
||||
{
|
||||
struct leaf_walk_data data;
|
||||
struct kvm_pgtable_walker walker = {
|
||||
@@ -471,7 +472,7 @@ static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (hyp_map_walker_try_leaf(ctx, data))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
|
||||
if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
|
||||
return -EINVAL;
|
||||
|
||||
childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
|
||||
@@ -567,14 +568,19 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
||||
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
|
||||
struct kvm_pgtable_mm_ops *mm_ops)
|
||||
{
|
||||
u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
|
||||
s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
|
||||
ARM64_HW_PGTABLE_LEVELS(va_bits);
|
||||
|
||||
if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
|
||||
start_level > KVM_PGTABLE_LAST_LEVEL)
|
||||
return -EINVAL;
|
||||
|
||||
pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
|
||||
if (!pgt->pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
pgt->ia_bits = va_bits;
|
||||
pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
|
||||
pgt->start_level = start_level;
|
||||
pgt->mm_ops = mm_ops;
|
||||
pgt->mmu = NULL;
|
||||
pgt->force_pte_cb = NULL;
|
||||
@@ -628,7 +634,7 @@ struct stage2_map_data {
|
||||
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
|
||||
{
|
||||
u64 vtcr = VTCR_EL2_FLAGS;
|
||||
u8 lvls;
|
||||
s8 lvls;
|
||||
|
||||
vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
|
||||
vtcr |= VTCR_EL2_T0SZ(phys_shift);
|
||||
@@ -911,7 +917,7 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
{
|
||||
u64 phys = stage2_map_walker_phys_addr(ctx, data);
|
||||
|
||||
if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
|
||||
if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
|
||||
return false;
|
||||
|
||||
return kvm_block_mapping_supported(ctx, phys);
|
||||
@@ -990,7 +996,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (ret != -E2BIG)
|
||||
return ret;
|
||||
|
||||
if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
|
||||
if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
|
||||
return -EINVAL;
|
||||
|
||||
if (!data->memcache)
|
||||
@@ -1160,7 +1166,7 @@ struct stage2_attr_data {
|
||||
kvm_pte_t attr_set;
|
||||
kvm_pte_t attr_clr;
|
||||
kvm_pte_t pte;
|
||||
u32 level;
|
||||
s8 level;
|
||||
};
|
||||
|
||||
static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
@@ -1203,7 +1209,7 @@ static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
|
||||
u64 size, kvm_pte_t attr_set,
|
||||
kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
|
||||
u32 *level, enum kvm_pgtable_walk_flags flags)
|
||||
s8 *level, enum kvm_pgtable_walk_flags flags)
|
||||
{
|
||||
int ret;
|
||||
kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
|
||||
@@ -1305,7 +1311,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
|
||||
enum kvm_pgtable_prot prot)
|
||||
{
|
||||
int ret;
|
||||
u32 level;
|
||||
s8 level;
|
||||
kvm_pte_t set = 0, clr = 0;
|
||||
|
||||
if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
|
||||
@@ -1358,7 +1364,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
||||
}
|
||||
|
||||
kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
|
||||
u64 phys, u32 level,
|
||||
u64 phys, s8 level,
|
||||
enum kvm_pgtable_prot prot,
|
||||
void *mc, bool force_pte)
|
||||
{
|
||||
@@ -1416,7 +1422,7 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
|
||||
* fully populated tree up to the PTE entries. Note that @level is
|
||||
* interpreted as in "level @level entry".
|
||||
*/
|
||||
static int stage2_block_get_nr_page_tables(u32 level)
|
||||
static int stage2_block_get_nr_page_tables(s8 level)
|
||||
{
|
||||
switch (level) {
|
||||
case 1:
|
||||
@@ -1427,7 +1433,7 @@ static int stage2_block_get_nr_page_tables(u32 level)
|
||||
return 0;
|
||||
default:
|
||||
WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
|
||||
level >= KVM_PGTABLE_MAX_LEVELS);
|
||||
level > KVM_PGTABLE_LAST_LEVEL);
|
||||
return -EINVAL;
|
||||
};
|
||||
}
|
||||
@@ -1440,13 +1446,13 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
struct kvm_s2_mmu *mmu;
|
||||
kvm_pte_t pte = ctx->old, new, *childp;
|
||||
enum kvm_pgtable_prot prot;
|
||||
u32 level = ctx->level;
|
||||
s8 level = ctx->level;
|
||||
bool force_pte;
|
||||
int nr_pages;
|
||||
u64 phys;
|
||||
|
||||
/* No huge-pages exist at the last level */
|
||||
if (level == KVM_PGTABLE_MAX_LEVELS - 1)
|
||||
if (level == KVM_PGTABLE_LAST_LEVEL)
|
||||
return 0;
|
||||
|
||||
/* We only split valid block mappings */
|
||||
@@ -1523,7 +1529,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||
u64 vtcr = mmu->vtcr;
|
||||
u32 ia_bits = VTCR_EL2_IPA(vtcr);
|
||||
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
|
||||
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
|
||||
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
|
||||
pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
|
||||
@@ -1546,7 +1552,7 @@ size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
|
||||
{
|
||||
u32 ia_bits = VTCR_EL2_IPA(vtcr);
|
||||
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
|
||||
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
|
||||
return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
|
||||
}
|
||||
@@ -1582,7 +1588,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
pgt->pgd = NULL;
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
|
||||
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
|
||||
{
|
||||
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
||||
struct kvm_pgtable_walker walker = {
|
||||
|
||||
Reference in New Issue
Block a user