mirror of
https://github.com/torvalds/linux.git
synced 2026-05-04 06:22:40 -04:00
* arm64/for-next/perf: perf: arm_spe: Print the version of SPE detected perf: arm_spe: Add support for SPEv1.2 inverted event filtering perf: Add perf_event_attr::config3 drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable perf: arm_spe: Support new SPEv1.2/v8.7 'not taken' event perf: arm_spe: Use new PMSIDR_EL1 register enums perf: arm_spe: Drop BIT() and use FIELD_GET/PREP accessors arm64/sysreg: Convert SPE registers to automatic generation arm64: Drop SYS_ from SPE register defines perf: arm_spe: Use feature numbering for PMSEVFR_EL1 defines perf/marvell: Add ACPI support to TAD uncore driver perf/marvell: Add ACPI support to DDR uncore driver perf/arm-cmn: Reset DTM_PMU_CONFIG at probe drivers/perf: hisi: Extract initialization of "cpa_pmu->pmu" drivers/perf: hisi: Simplify the parameters of hisi_pmu_init() drivers/perf: hisi: Advertise the PERF_PMU_CAP_NO_EXCLUDE capability * for-next/sysreg: : arm64 sysreg and cpufeature fixes/updates KVM: arm64: Use symbolic definition for ISR_EL1.A arm64/sysreg: Add definition of ISR_EL1 arm64/sysreg: Add definition for ICC_NMIAR1_EL1 arm64/cpufeature: Remove 4 bit assumption in ARM64_FEATURE_MASK() arm64/sysreg: Fix errors in 32 bit enumeration values arm64/cpufeature: Fix field sign for DIT hwcap detection * for-next/sme: : SME-related updates arm64/sme: Optimise SME exit on syscall entry arm64/sme: Don't use streaming mode to probe the maximum SME VL arm64/ptrace: Use system_supports_tpidr2() to check for TPIDR2 support * for-next/kselftest: (23 commits) : arm64 kselftest fixes and improvements kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests kselftest/arm64: Copy whole EXTRA context kselftest/arm64: Fix enumeration of systems without 128 bit SME for SSVE+ZA kselftest/arm64: Fix enumeration of systems without 128 bit SME kselftest/arm64: Don't require FA64 for streaming SVE tests kselftest/arm64: Limit the maximum VL we try to set via ptrace kselftest/arm64: Correct buffer size for SME ZA storage kselftest/arm64: Remove the local NUM_VL definition kselftest/arm64: Verify simultaneous SSVE and ZA context generation kselftest/arm64: Verify that SSVE signal context has SVE_SIG_FLAG_SM set kselftest/arm64: Remove spurious comment from MTE test Makefile kselftest/arm64: Support build of MTE tests with clang kselftest/arm64: Initialise current at build time in signal tests kselftest/arm64: Don't pass headers to the compiler as source kselftest/arm64: Remove redundant _start labels from FP tests kselftest/arm64: Fix .pushsection for strings in FP tests kselftest/arm64: Run BTI selftests on systems without BTI kselftest/arm64: Fix test numbering when skipping tests kselftest/arm64: Skip non-power of 2 SVE vector lengths in fp-stress kselftest/arm64: Only enumerate power of two VLs in syscall-abi ... * for-next/misc: : Miscellaneous arm64 updates arm64/mm: Intercept pfn changes in set_pte_at() Documentation: arm64: correct spelling arm64: traps: attempt to dump all instructions arm64: Apply dynamic shadow call stack patching in two passes arm64: el2_setup.h: fix spelling typo in comments arm64: Kconfig: fix spelling arm64: cpufeature: Use kstrtobool() instead of strtobool() arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path arm64: make ARCH_FORCE_MAX_ORDER selectable * for-next/sme2: (23 commits) : Support for arm64 SME 2 and 2.1 arm64/sme: Fix __finalise_el2 SMEver check kselftest/arm64: Remove redundant _start labels from zt-test kselftest/arm64: Add coverage of SME 2 and 2.1 hwcaps kselftest/arm64: Add coverage of the ZT ptrace regset kselftest/arm64: Add SME2 coverage to syscall-abi kselftest/arm64: Add test coverage for ZT register signal frames kselftest/arm64: Teach the generic signal context validation about ZT kselftest/arm64: Enumerate SME2 in the signal test utility code kselftest/arm64: Cover ZT in the FP stress test kselftest/arm64: Add a stress test program for ZT0 arm64/sme: Add hwcaps for SME 2 and 2.1 features arm64/sme: Implement ZT0 ptrace support arm64/sme: Implement signal handling for ZT arm64/sme: Implement context switching for ZT0 arm64/sme: Provide storage for ZT0 arm64/sme: Add basic enumeration for SME2 arm64/sme: Enable host kernel to access ZT0 arm64/sme: Manually encode ZT0 load and store instructions arm64/esr: Document ISS for ZT0 being disabled arm64/sme: Document SME 2 and SME 2.1 ABI ... * for-next/tpidr2: : Include TPIDR2 in the signal context kselftest/arm64: Add test case for TPIDR2 signal frame records kselftest/arm64: Add TPIDR2 to the set of known signal context records arm64/signal: Include TPIDR2 in the signal context arm64/sme: Document ABI for TPIDR2 signal information * for-next/scs: : arm64: harden shadow call stack pointer handling arm64: Stash shadow stack pointer in the task struct on interrupt arm64: Always load shadow stack pointer directly from the task struct * for-next/compat-hwcap: : arm64: Expose compat ARMv8 AArch32 features (HWCAPs) arm64: Add compat hwcap SSBS arm64: Add compat hwcap SB arm64: Add compat hwcap I8MM arm64: Add compat hwcap ASIMDBF16 arm64: Add compat hwcap ASIMDFHM arm64: Add compat hwcap ASIMDDP arm64: Add compat hwcap FPHP and ASIMDHP * for-next/ftrace: : Add arm64 support for DYNAMICE_FTRACE_WITH_CALL_OPS arm64: avoid executing padding bytes during kexec / hibernation arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS arm64: ftrace: Update stale comment arm64: patching: Add aarch64_insn_write_literal_u64() arm64: insn: Add helpers for BTI arm64: Extend support for CONFIG_FUNCTION_ALIGNMENT ACPI: Don't build ACPICA with '-Os' Compiler attributes: GCC cold function alignment workarounds ftrace: Add DYNAMIC_FTRACE_WITH_CALL_OPS * for-next/efi-boot-mmu-on: : Permit arm64 EFI boot with MMU and caches on arm64: kprobes: Drop ID map text from kprobes blacklist arm64: head: Switch endianness before populating the ID map efi: arm64: enter with MMU and caches enabled arm64: head: Clean the ID map and the HYP text to the PoC if needed arm64: head: avoid cache invalidation when entering with the MMU on arm64: head: record the MMU state at primary entry arm64: kernel: move identity map out of .text mapping arm64: head: Move all finalise_el2 calls to after __enable_mmu * for-next/ptrauth: : arm64 pointer authentication cleanup arm64: pauth: don't sign leaf functions arm64: unify asm-arch manipulation * for-next/pseudo-nmi: : Pseudo-NMI code generation optimisations arm64: irqflags: use alternative branches for pseudo-NMI logic arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap arm64: make ARM64_HAS_GIC_PRIO_MASKING depend on ARM64_HAS_GIC_CPUIF_SYSREGS arm64: rename ARM64_HAS_IRQ_PRIO_MASKING to ARM64_HAS_GIC_PRIO_MASKING arm64: rename ARM64_HAS_SYSREG_GIC_CPUIF to ARM64_HAS_GIC_CPUIF_SYSREGS
409 lines
10 KiB
C
409 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (C) 2019 ARM Limited */
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <signal.h>
|
|
#include <string.h>
|
|
#include <unistd.h>
|
|
#include <assert.h>
|
|
#include <sys/auxv.h>
|
|
#include <linux/auxvec.h>
|
|
#include <ucontext.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
#include <kselftest.h>
|
|
|
|
#include "test_signals.h"
|
|
#include "test_signals_utils.h"
|
|
#include "testcases/testcases.h"
|
|
|
|
|
|
extern struct tdescr *current;
|
|
|
|
static int sig_copyctx = SIGTRAP;
|
|
|
|
static char const *const feats_names[FMAX_END] = {
|
|
" SSBS ",
|
|
" SVE ",
|
|
" SME ",
|
|
" FA64 ",
|
|
" SME2 ",
|
|
};
|
|
|
|
#define MAX_FEATS_SZ 128
|
|
static char feats_string[MAX_FEATS_SZ];
|
|
|
|
static inline char *feats_to_string(unsigned long feats)
|
|
{
|
|
size_t flen = MAX_FEATS_SZ - 1;
|
|
|
|
feats_string[0] = '\0';
|
|
|
|
for (int i = 0; i < FMAX_END; i++) {
|
|
if (feats & (1UL << i)) {
|
|
size_t tlen = strlen(feats_names[i]);
|
|
|
|
assert(flen > tlen);
|
|
flen -= tlen;
|
|
strncat(feats_string, feats_names[i], flen);
|
|
}
|
|
}
|
|
|
|
return feats_string;
|
|
}
|
|
|
|
static void unblock_signal(int signum)
|
|
{
|
|
sigset_t sset;
|
|
|
|
sigemptyset(&sset);
|
|
sigaddset(&sset, signum);
|
|
sigprocmask(SIG_UNBLOCK, &sset, NULL);
|
|
}
|
|
|
|
static void default_result(struct tdescr *td, bool force_exit)
|
|
{
|
|
if (td->result == KSFT_SKIP) {
|
|
fprintf(stderr, "==>> completed. SKIP.\n");
|
|
} else if (td->pass) {
|
|
fprintf(stderr, "==>> completed. PASS(1)\n");
|
|
td->result = KSFT_PASS;
|
|
} else {
|
|
fprintf(stdout, "==>> completed. FAIL(0)\n");
|
|
td->result = KSFT_FAIL;
|
|
}
|
|
|
|
if (force_exit)
|
|
exit(td->result);
|
|
}
|
|
|
|
/*
|
|
* The following handle_signal_* helpers are used by main default_handler
|
|
* and are meant to return true when signal is handled successfully:
|
|
* when false is returned instead, it means that the signal was somehow
|
|
* unexpected in that context and it was NOT handled; default_handler will
|
|
* take care of such unexpected situations.
|
|
*/
|
|
|
|
static bool handle_signal_unsupported(struct tdescr *td,
|
|
siginfo_t *si, void *uc)
|
|
{
|
|
if (feats_ok(td))
|
|
return false;
|
|
|
|
/* Mangling PC to avoid loops on original SIGILL */
|
|
((ucontext_t *)uc)->uc_mcontext.pc += 4;
|
|
|
|
if (!td->initialized) {
|
|
fprintf(stderr,
|
|
"Got SIG_UNSUPP @test_init. Ignore.\n");
|
|
} else {
|
|
fprintf(stderr,
|
|
"-- RX SIG_UNSUPP on unsupported feat...OK\n");
|
|
td->pass = 1;
|
|
default_result(current, 1);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool handle_signal_trigger(struct tdescr *td,
|
|
siginfo_t *si, void *uc)
|
|
{
|
|
td->triggered = 1;
|
|
/* ->run was asserted NON-NULL in test_setup() already */
|
|
td->run(td, si, uc);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool handle_signal_ok(struct tdescr *td,
|
|
siginfo_t *si, void *uc)
|
|
{
|
|
/*
|
|
* it's a bug in the test code when this assert fail:
|
|
* if sig_trig was defined, it must have been used before getting here.
|
|
*/
|
|
assert(!td->sig_trig || td->triggered);
|
|
fprintf(stderr,
|
|
"SIG_OK -- SP:0x%llX si_addr@:%p si_code:%d token@:%p offset:%ld\n",
|
|
((ucontext_t *)uc)->uc_mcontext.sp,
|
|
si->si_addr, si->si_code, td->token, td->token - si->si_addr);
|
|
/*
|
|
* fake_sigreturn tests, which have sanity_enabled=1, set, at the very
|
|
* last time, the token field to the SP address used to place the fake
|
|
* sigframe: so token==0 means we never made it to the end,
|
|
* segfaulting well-before, and the test is possibly broken.
|
|
*/
|
|
if (!td->sanity_disabled && !td->token) {
|
|
fprintf(stdout,
|
|
"current->token ZEROED...test is probably broken!\n");
|
|
abort();
|
|
}
|
|
/*
|
|
* Trying to narrow down the SEGV to the ones generated by Kernel itself
|
|
* via arm64_notify_segfault(). This is a best-effort check anyway, and
|
|
* the si_code check may need to change if this aspect of the kernel
|
|
* ABI changes.
|
|
*/
|
|
if (td->sig_ok == SIGSEGV && si->si_code != SEGV_ACCERR) {
|
|
fprintf(stdout,
|
|
"si_code != SEGV_ACCERR...test is probably broken!\n");
|
|
abort();
|
|
}
|
|
td->pass = 1;
|
|
/*
|
|
* Some tests can lead to SEGV loops: in such a case we want to
|
|
* terminate immediately exiting straight away; some others are not
|
|
* supposed to outlive the signal handler code, due to the content of
|
|
* the fake sigframe which caused the signal itself.
|
|
*/
|
|
default_result(current, 1);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool handle_signal_copyctx(struct tdescr *td,
|
|
siginfo_t *si, void *uc_in)
|
|
{
|
|
ucontext_t *uc = uc_in;
|
|
struct _aarch64_ctx *head;
|
|
struct extra_context *extra, *copied_extra;
|
|
size_t offset = 0;
|
|
size_t to_copy;
|
|
|
|
ASSERT_GOOD_CONTEXT(uc);
|
|
|
|
/* Mangling PC to avoid loops on original BRK instr */
|
|
uc->uc_mcontext.pc += 4;
|
|
|
|
/*
|
|
* Check for an preserve any extra data too with fixups.
|
|
*/
|
|
head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved;
|
|
head = get_header(head, EXTRA_MAGIC, td->live_sz, &offset);
|
|
if (head) {
|
|
extra = (struct extra_context *)head;
|
|
|
|
/*
|
|
* The extra buffer must be immediately after the
|
|
* extra_context and a 16 byte terminator. Include it
|
|
* in the copy, this was previously validated in
|
|
* ASSERT_GOOD_CONTEXT().
|
|
*/
|
|
to_copy = __builtin_offsetof(ucontext_t,
|
|
uc_mcontext.__reserved);
|
|
to_copy += offset + sizeof(struct extra_context) + 16;
|
|
to_copy += extra->size;
|
|
copied_extra = (struct extra_context *)&(td->live_uc->uc_mcontext.__reserved[offset]);
|
|
} else {
|
|
copied_extra = NULL;
|
|
to_copy = sizeof(ucontext_t);
|
|
}
|
|
|
|
if (to_copy > td->live_sz) {
|
|
fprintf(stderr,
|
|
"Not enough space to grab context, %lu/%lu bytes\n",
|
|
td->live_sz, to_copy);
|
|
return false;
|
|
}
|
|
|
|
memcpy(td->live_uc, uc, to_copy);
|
|
|
|
/*
|
|
* If there was any EXTRA_CONTEXT fix up the size to be the
|
|
* struct extra_context and the following terminator record,
|
|
* this means that the rest of the code does not need to have
|
|
* special handling for the record and we don't need to fix up
|
|
* datap for the new location.
|
|
*/
|
|
if (copied_extra)
|
|
copied_extra->head.size = sizeof(*copied_extra) + 16;
|
|
|
|
td->live_uc_valid = 1;
|
|
fprintf(stderr,
|
|
"%lu byte GOOD CONTEXT grabbed from sig_copyctx handler\n",
|
|
to_copy);
|
|
|
|
return true;
|
|
}
|
|
|
|
static void default_handler(int signum, siginfo_t *si, void *uc)
|
|
{
|
|
if (current->sig_unsupp && signum == current->sig_unsupp &&
|
|
handle_signal_unsupported(current, si, uc)) {
|
|
fprintf(stderr, "Handled SIG_UNSUPP\n");
|
|
} else if (current->sig_trig && signum == current->sig_trig &&
|
|
handle_signal_trigger(current, si, uc)) {
|
|
fprintf(stderr, "Handled SIG_TRIG\n");
|
|
} else if (current->sig_ok && signum == current->sig_ok &&
|
|
handle_signal_ok(current, si, uc)) {
|
|
fprintf(stderr, "Handled SIG_OK\n");
|
|
} else if (signum == sig_copyctx && current->live_uc &&
|
|
handle_signal_copyctx(current, si, uc)) {
|
|
fprintf(stderr, "Handled SIG_COPYCTX\n");
|
|
} else {
|
|
if (signum == SIGALRM && current->timeout) {
|
|
fprintf(stderr, "-- Timeout !\n");
|
|
} else {
|
|
fprintf(stderr,
|
|
"-- RX UNEXPECTED SIGNAL: %d\n", signum);
|
|
}
|
|
default_result(current, 1);
|
|
}
|
|
}
|
|
|
|
static int default_setup(struct tdescr *td)
|
|
{
|
|
struct sigaction sa;
|
|
|
|
sa.sa_sigaction = default_handler;
|
|
sa.sa_flags = SA_SIGINFO | SA_RESTART;
|
|
sa.sa_flags |= td->sa_flags;
|
|
sigemptyset(&sa.sa_mask);
|
|
/* uncatchable signals naturally skipped ... */
|
|
for (int sig = 1; sig < 32; sig++)
|
|
sigaction(sig, &sa, NULL);
|
|
/*
|
|
* RT Signals default disposition is Term but they cannot be
|
|
* generated by the Kernel in response to our tests; so just catch
|
|
* them all and report them as UNEXPECTED signals.
|
|
*/
|
|
for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++)
|
|
sigaction(sig, &sa, NULL);
|
|
|
|
/* just in case...unblock explicitly all we need */
|
|
if (td->sig_trig)
|
|
unblock_signal(td->sig_trig);
|
|
if (td->sig_ok)
|
|
unblock_signal(td->sig_ok);
|
|
if (td->sig_unsupp)
|
|
unblock_signal(td->sig_unsupp);
|
|
|
|
if (td->timeout) {
|
|
unblock_signal(SIGALRM);
|
|
alarm(td->timeout);
|
|
}
|
|
fprintf(stderr, "Registered handlers for all signals.\n");
|
|
|
|
return 1;
|
|
}
|
|
|
|
static inline int default_trigger(struct tdescr *td)
|
|
{
|
|
return !raise(td->sig_trig);
|
|
}
|
|
|
|
int test_init(struct tdescr *td)
|
|
{
|
|
if (td->sig_trig == sig_copyctx) {
|
|
fprintf(stdout,
|
|
"Signal %d is RESERVED, cannot be used as a trigger. Aborting\n",
|
|
sig_copyctx);
|
|
return 0;
|
|
}
|
|
/* just in case */
|
|
unblock_signal(sig_copyctx);
|
|
|
|
td->minsigstksz = getauxval(AT_MINSIGSTKSZ);
|
|
if (!td->minsigstksz)
|
|
td->minsigstksz = MINSIGSTKSZ;
|
|
fprintf(stderr, "Detected MINSTKSIGSZ:%d\n", td->minsigstksz);
|
|
|
|
if (td->feats_required || td->feats_incompatible) {
|
|
td->feats_supported = 0;
|
|
/*
|
|
* Checking for CPU required features using both the
|
|
* auxval and the arm64 MRS Emulation to read sysregs.
|
|
*/
|
|
if (getauxval(AT_HWCAP) & HWCAP_SSBS)
|
|
td->feats_supported |= FEAT_SSBS;
|
|
if (getauxval(AT_HWCAP) & HWCAP_SVE)
|
|
td->feats_supported |= FEAT_SVE;
|
|
if (getauxval(AT_HWCAP2) & HWCAP2_SME)
|
|
td->feats_supported |= FEAT_SME;
|
|
if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
|
|
td->feats_supported |= FEAT_SME_FA64;
|
|
if (getauxval(AT_HWCAP2) & HWCAP2_SME2)
|
|
td->feats_supported |= FEAT_SME2;
|
|
if (feats_ok(td)) {
|
|
if (td->feats_required & td->feats_supported)
|
|
fprintf(stderr,
|
|
"Required Features: [%s] supported\n",
|
|
feats_to_string(td->feats_required &
|
|
td->feats_supported));
|
|
if (!(td->feats_incompatible & td->feats_supported))
|
|
fprintf(stderr,
|
|
"Incompatible Features: [%s] absent\n",
|
|
feats_to_string(td->feats_incompatible));
|
|
} else {
|
|
if ((td->feats_required & td->feats_supported) !=
|
|
td->feats_supported)
|
|
fprintf(stderr,
|
|
"Required Features: [%s] NOT supported\n",
|
|
feats_to_string(td->feats_required &
|
|
~td->feats_supported));
|
|
if (td->feats_incompatible & td->feats_supported)
|
|
fprintf(stderr,
|
|
"Incompatible Features: [%s] supported\n",
|
|
feats_to_string(td->feats_incompatible &
|
|
~td->feats_supported));
|
|
|
|
|
|
td->result = KSFT_SKIP;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* Perform test specific additional initialization */
|
|
if (td->init && !td->init(td)) {
|
|
fprintf(stderr, "FAILED Testcase initialization.\n");
|
|
return 0;
|
|
}
|
|
td->initialized = 1;
|
|
fprintf(stderr, "Testcase initialized.\n");
|
|
|
|
return 1;
|
|
}
|
|
|
|
int test_setup(struct tdescr *td)
|
|
{
|
|
/* assert core invariants symptom of a rotten testcase */
|
|
assert(current);
|
|
assert(td);
|
|
assert(td->name);
|
|
assert(td->run);
|
|
|
|
/* Default result is FAIL if test setup fails */
|
|
td->result = KSFT_FAIL;
|
|
if (td->setup)
|
|
return td->setup(td);
|
|
else
|
|
return default_setup(td);
|
|
}
|
|
|
|
int test_run(struct tdescr *td)
|
|
{
|
|
if (td->trigger)
|
|
return td->trigger(td);
|
|
else if (td->sig_trig)
|
|
return default_trigger(td);
|
|
else
|
|
return td->run(td, NULL, NULL);
|
|
}
|
|
|
|
void test_result(struct tdescr *td)
|
|
{
|
|
if (td->initialized && td->result != KSFT_SKIP && td->check_result)
|
|
td->check_result(td);
|
|
default_result(td, 0);
|
|
}
|
|
|
|
void test_cleanup(struct tdescr *td)
|
|
{
|
|
if (td->cleanup)
|
|
td->cleanup(td);
|
|
}
|