mirror of
https://github.com/torvalds/linux.git
synced 2026-04-24 17:42:27 -04:00
In arch_tlbbatch_should_defer() we use cpus_have_const_cap() to check for ARM64_WORKAROUND_REPEAT_TLBI, but this is not necessary and alternative_has_cap_*() would be preferable. For historical reasons, cpus_have_const_cap() is more complicated than it needs to be. Before cpucaps are finalized, it will perform a bitmap test of the system_cpucaps bitmap, and once cpucaps are finalized it will use an alternative branch. This used to be necessary to handle some race conditions in the window between cpucap detection and the subsequent patching of alternatives and static branches, where different branches could be out-of-sync with one another (or w.r.t. alternative sequences). Now that we use alternative branches instead of static branches, these are all patched atomically w.r.t. one another, and there are only a handful of cases that need special care in the window between cpucap detection and alternative patching. Due to the above, it would be nice to remove cpus_have_const_cap(), and migrate callers over to alternative_has_cap_*(), cpus_have_final_cap(), or cpus_have_cap() depending on when their requirements. This will remove redundant instructions and improve code generation, and will make it easier to determine how each callsite will behave before, during, and after alternative patching. The cpus_have_const_cap() check in arch_tlbbatch_should_defer() is an optimization to avoid some redundant work when the ARM64_WORKAROUND_REPEAT_TLBI cpucap is detected and forces the immediate use of TLBI + DSB ISH. In the window between detecting the ARM64_WORKAROUND_REPEAT_TLBI cpucap and patching alternatives this is not a big concern and there's no need to optimize this window at the expsense of subsequent usage at runtime. This patch replaces the use of cpus_have_const_cap() with alternative_has_cap_unlikely(), which will avoid generating code to test the system_cpucaps bitmap and should be better for all subsequent calls at runtime. The ARM64_WORKAROUND_REPEAT_TLBI cpucap is added to cpucap_is_possible() so that code can be elided entirely when this is not possible without requiring ifdeffery or IS_ENABLED() checks at each usage. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
68 lines
1.9 KiB
C
68 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
#ifndef __ASM_CPUCAPS_H
|
|
#define __ASM_CPUCAPS_H
|
|
|
|
#include <asm/cpucap-defs.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <linux/types.h>
|
|
/*
|
|
* Check whether a cpucap is possible at compiletime.
|
|
*/
|
|
static __always_inline bool
|
|
cpucap_is_possible(const unsigned int cap)
|
|
{
|
|
compiletime_assert(__builtin_constant_p(cap),
|
|
"cap must be a constant");
|
|
compiletime_assert(cap < ARM64_NCAPS,
|
|
"cap must be < ARM64_NCAPS");
|
|
|
|
switch (cap) {
|
|
case ARM64_HAS_PAN:
|
|
return IS_ENABLED(CONFIG_ARM64_PAN);
|
|
case ARM64_HAS_EPAN:
|
|
return IS_ENABLED(CONFIG_ARM64_EPAN);
|
|
case ARM64_SVE:
|
|
return IS_ENABLED(CONFIG_ARM64_SVE);
|
|
case ARM64_SME:
|
|
case ARM64_SME2:
|
|
case ARM64_SME_FA64:
|
|
return IS_ENABLED(CONFIG_ARM64_SME);
|
|
case ARM64_HAS_CNP:
|
|
return IS_ENABLED(CONFIG_ARM64_CNP);
|
|
case ARM64_HAS_ADDRESS_AUTH:
|
|
case ARM64_HAS_GENERIC_AUTH:
|
|
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH);
|
|
case ARM64_HAS_GIC_PRIO_MASKING:
|
|
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI);
|
|
case ARM64_MTE:
|
|
return IS_ENABLED(CONFIG_ARM64_MTE);
|
|
case ARM64_BTI:
|
|
return IS_ENABLED(CONFIG_ARM64_BTI);
|
|
case ARM64_HAS_TLB_RANGE:
|
|
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
|
|
case ARM64_UNMAP_KERNEL_AT_EL0:
|
|
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
|
case ARM64_WORKAROUND_843419:
|
|
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419);
|
|
case ARM64_WORKAROUND_1742098:
|
|
return IS_ENABLED(CONFIG_ARM64_ERRATUM_1742098);
|
|
case ARM64_WORKAROUND_2645198:
|
|
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198);
|
|
case ARM64_WORKAROUND_2658417:
|
|
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2658417);
|
|
case ARM64_WORKAROUND_CAVIUM_23154:
|
|
return IS_ENABLED(CONFIG_CAVIUM_ERRATUM_23154);
|
|
case ARM64_WORKAROUND_NVIDIA_CARMEL_CNP:
|
|
return IS_ENABLED(CONFIG_NVIDIA_CARMEL_CNP_ERRATUM);
|
|
case ARM64_WORKAROUND_REPEAT_TLBI:
|
|
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_CPUCAPS_H */
|