mirror of
https://github.com/torvalds/linux.git
synced 2026-05-05 23:05:25 -04:00
x86/xen: Move Xen upcall handler
Move the upcall handler to Xen-specific files. No functional changes. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Juergen Gross <jgross@suse.com> Reviewed-by: Sohil Mehta <sohil.mehta@intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Link: https://lore.kernel.org/r/20250314151220.862768-2-brgerst@gmail.com
This commit is contained in:
@@ -21,11 +21,6 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#include <xen/xen-ops.h>
|
||||
#include <xen/events.h>
|
||||
#endif
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/traps.h>
|
||||
@@ -455,70 +450,3 @@ SYSCALL_DEFINE0(ni_syscall)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
/*
|
||||
* Some hypercalls issued by the toolstack can take many 10s of
|
||||
* seconds. Allow tasks running hypercalls via the privcmd driver to
|
||||
* be voluntarily preempted even if full kernel preemption is
|
||||
* disabled.
|
||||
*
|
||||
* Such preemptible hypercalls are bracketed by
|
||||
* xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
|
||||
* calls.
|
||||
*/
|
||||
DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
|
||||
EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
|
||||
|
||||
/*
|
||||
* In case of scheduling the flag must be cleared and restored after
|
||||
* returning from schedule as the task might move to a different CPU.
|
||||
*/
|
||||
static __always_inline bool get_and_clear_inhcall(void)
|
||||
{
|
||||
bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
|
||||
|
||||
__this_cpu_write(xen_in_preemptible_hcall, false);
|
||||
return inhcall;
|
||||
}
|
||||
|
||||
static __always_inline void restore_inhcall(bool inhcall)
|
||||
{
|
||||
__this_cpu_write(xen_in_preemptible_hcall, inhcall);
|
||||
}
|
||||
#else
|
||||
static __always_inline bool get_and_clear_inhcall(void) { return false; }
|
||||
static __always_inline void restore_inhcall(bool inhcall) { }
|
||||
#endif
|
||||
|
||||
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
inc_irq_stat(irq_hv_callback_count);
|
||||
|
||||
xen_evtchn_do_upcall();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
{
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
bool inhcall;
|
||||
|
||||
instrumentation_begin();
|
||||
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
||||
|
||||
inhcall = get_and_clear_inhcall();
|
||||
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
|
||||
irqentry_exit_cond_resched();
|
||||
instrumentation_end();
|
||||
restore_inhcall(inhcall);
|
||||
} else {
|
||||
instrumentation_end();
|
||||
irqentry_exit(regs, state);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_XEN_PV */
|
||||
|
||||
@@ -73,6 +73,7 @@
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/pci_x86.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/irq_stack.h>
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
#include <asm/io_bitmap.h>
|
||||
#endif
|
||||
@@ -94,6 +95,44 @@ void *xen_initial_gdt;
|
||||
static int xen_cpu_up_prepare_pv(unsigned int cpu);
|
||||
static int xen_cpu_dead_pv(unsigned int cpu);
|
||||
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
/*
|
||||
* Some hypercalls issued by the toolstack can take many 10s of
|
||||
* seconds. Allow tasks running hypercalls via the privcmd driver to
|
||||
* be voluntarily preempted even if full kernel preemption is
|
||||
* disabled.
|
||||
*
|
||||
* Such preemptible hypercalls are bracketed by
|
||||
* xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
|
||||
* calls.
|
||||
*/
|
||||
DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
|
||||
EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
|
||||
|
||||
/*
|
||||
* In case of scheduling the flag must be cleared and restored after
|
||||
* returning from schedule as the task might move to a different CPU.
|
||||
*/
|
||||
static __always_inline bool get_and_clear_inhcall(void)
|
||||
{
|
||||
bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
|
||||
|
||||
__this_cpu_write(xen_in_preemptible_hcall, false);
|
||||
return inhcall;
|
||||
}
|
||||
|
||||
static __always_inline void restore_inhcall(bool inhcall)
|
||||
{
|
||||
__this_cpu_write(xen_in_preemptible_hcall, inhcall);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static __always_inline bool get_and_clear_inhcall(void) { return false; }
|
||||
static __always_inline void restore_inhcall(bool inhcall) { }
|
||||
|
||||
#endif
|
||||
|
||||
struct tls_descs {
|
||||
struct desc_struct desc[3];
|
||||
};
|
||||
@@ -687,6 +726,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
inc_irq_stat(irq_hv_callback_count);
|
||||
|
||||
xen_evtchn_do_upcall();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
{
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
bool inhcall;
|
||||
|
||||
instrumentation_begin();
|
||||
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
||||
|
||||
inhcall = get_and_clear_inhcall();
|
||||
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
|
||||
irqentry_exit_cond_resched();
|
||||
instrumentation_end();
|
||||
restore_inhcall(inhcall);
|
||||
} else {
|
||||
instrumentation_end();
|
||||
irqentry_exit(regs, state);
|
||||
}
|
||||
}
|
||||
|
||||
struct trap_array_entry {
|
||||
void (*orig)(void);
|
||||
void (*xen)(void);
|
||||
|
||||
Reference in New Issue
Block a user