mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
The APIs that allow backtracing across CPUs have always had a way to exclude the current CPU. This convenience means callers didn't need to find a place to allocate a CPU mask just to handle the common case. Let's extend the API to take a CPU ID to exclude instead of just a boolean. This isn't any more complex for the API to handle and allows the hardlockup detector to exclude a different CPU (the one it already did a trace for) without needing to find space for a CPU mask. Arguably, this new API also encourages safer behavior. Specifically if the caller wants to avoid tracing the current CPU (maybe because they already traced the current CPU) this makes it more obvious to the caller that they need to make sure that the current CPU ID can't change. [akpm@linux-foundation.org: fix trigger_allbutcpu_cpu_backtrace() stub] Link: https://lkml.kernel.org/r/20230804065935.v4.1.Ia35521b91fc781368945161d7b28538f9996c182@changeid Signed-off-by: Douglas Anderson <dianders@chromium.org> Acked-by: Michal Hocko <mhocko@suse.com> Cc: kernel test robot <lkp@intel.com> Cc: Lecopzer Chen <lecopzer.chen@mediatek.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Pingfan Liu <kernelfans@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
60 lines
1.3 KiB
C
60 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* HW NMI watchdog support
|
|
*
|
|
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
|
|
*
|
|
* Arch specific calls to support NMI watchdog
|
|
*
|
|
* Bits copied from original nmi.c file
|
|
*
|
|
*/
|
|
#include <linux/thread_info.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/nmi.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/nmi.h>
|
|
#include <linux/init.h>
|
|
#include <linux/delay.h>
|
|
|
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
|
|
u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
|
{
|
|
return (u64)(cpu_khz) * 1000 * watchdog_thresh;
|
|
}
|
|
#endif
|
|
|
|
#ifdef arch_trigger_cpumask_backtrace
|
|
static void nmi_raise_cpu_backtrace(cpumask_t *mask)
|
|
{
|
|
apic->send_IPI_mask(mask, NMI_VECTOR);
|
|
}
|
|
|
|
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
|
|
{
|
|
nmi_trigger_cpumask_backtrace(mask, exclude_cpu,
|
|
nmi_raise_cpu_backtrace);
|
|
}
|
|
|
|
static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
|
|
{
|
|
if (nmi_cpu_backtrace(regs))
|
|
return NMI_HANDLED;
|
|
|
|
return NMI_DONE;
|
|
}
|
|
NOKPROBE_SYMBOL(nmi_cpu_backtrace_handler);
|
|
|
|
static int __init register_nmi_cpu_backtrace_handler(void)
|
|
{
|
|
register_nmi_handler(NMI_LOCAL, nmi_cpu_backtrace_handler,
|
|
0, "arch_bt");
|
|
return 0;
|
|
}
|
|
early_initcall(register_nmi_cpu_backtrace_handler);
|
|
#endif
|