Merge tag 'x86_cpu_for_7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Dave Hansen:

 - Complete LASS enabling: deal with vsyscall and EFI

   The existing Linear Address Space Separation (LASS) support punted
   on support for common EFI and vsyscall configs. Complete the
   implementation by supporting EFI and vsyscall=xonly.

 - Clean up CPUID usage in newer Intel "avs" audio driver and update the
   x86-cpuid-db file

* tag 'x86_cpu_for_7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools/x86/kcpuid: Update bitfields to x86-cpuid-db v3.0
  ASoC: Intel: avs: Include CPUID header at file scope
  ASoC: Intel: avs: Check maximum valid CPUID leaf
  x86/cpu: Remove LASS restriction on vsyscall emulation
  x86/vsyscall: Disable LASS if vsyscall mode is set to EMULATE
  x86/vsyscall: Restore vsyscall=xonly mode under LASS
  x86/traps: Consolidate user fixups in the #GP handler
  x86/vsyscall: Reorganize the page fault emulation code
  x86/cpu: Remove LASS restriction on EFI
  x86/efi: Disable LASS while executing runtime services
  x86/cpu: Defer LASS enabling until userspace comes up
This commit is contained in:
Linus Torvalds
2026-04-14 14:24:45 -07:00
11 changed files with 501 additions and 395 deletions

View File

@@ -8398,7 +8398,9 @@ Kernel parameters
emulate Vsyscalls turn into traps and are emulated
reasonably safely. The vsyscall page is
readable.
readable. This disables the Linear
Address Space Separation (LASS) security
feature and makes the system less secure.
xonly [default] Vsyscalls turn into traps and are
emulated reasonably safely. The vsyscall

View File

@@ -23,7 +23,7 @@
* soon be no new userspace code that will ever use a vsyscall.
*
* The code in this file emulates vsyscalls when notified of a page
* fault to a vsyscall address.
* fault or a general protection fault to a vsyscall address.
*/
#include <linux/kernel.h>
@@ -62,6 +62,11 @@ static int __init vsyscall_setup(char *str)
else
return -EINVAL;
if (cpu_feature_enabled(X86_FEATURE_LASS) && vsyscall_mode == EMULATE) {
setup_clear_cpu_cap(X86_FEATURE_LASS);
pr_warn_once("x86/cpu: Disabling LASS due to vsyscall=emulate\n");
}
return 0;
}
@@ -111,48 +116,17 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
}
}
bool emulate_vsyscall(unsigned long error_code,
struct pt_regs *regs, unsigned long address)
static bool __emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
long ret;
unsigned long orig_dx;
/* Write faults or kernel-privilege faults never get fixed up. */
if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
/* Confirm that the fault happened in 64-bit user mode */
if (!user_64bit_mode(regs))
return false;
/*
* Assume that faults at regs->ip are because of an
* instruction fetch. Return early and avoid
* emulation for faults during data accesses:
*/
if (address != regs->ip) {
/* Failed vsyscall read */
if (vsyscall_mode == EMULATE)
return false;
/*
* User code tried and failed to read the vsyscall page.
*/
warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
return false;
}
/*
* X86_PF_INSTR is only set when NX is supported. When
* available, use it to double-check that the emulation code
* is only being used for instruction fetches:
*/
if (cpu_feature_enabled(X86_FEATURE_NX))
WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
/*
* No point in checking CS -- the only way to get here is a user mode
* trap to a high address, which means that we're in 64-bit user code.
*/
if (vsyscall_mode == NONE) {
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall attempted with vsyscall=none");
@@ -280,6 +254,53 @@ sigsegv:
return true;
}
bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs,
unsigned long address)
{
/* Write faults or kernel-privilege faults never get fixed up. */
if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
return false;
/*
* Assume that faults at regs->ip are because of an instruction
* fetch. Return early and avoid emulation for faults during
* data accesses:
*/
if (address != regs->ip) {
/* Failed vsyscall read */
if (vsyscall_mode == EMULATE)
return false;
/* User code tried and failed to read the vsyscall page. */
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
return false;
}
/*
* X86_PF_INSTR is only set when NX is supported. When
* available, use it to double-check that the emulation code
* is only being used for instruction fetches:
*/
if (cpu_feature_enabled(X86_FEATURE_NX))
WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
return __emulate_vsyscall(regs, address);
}
bool emulate_vsyscall_gp(struct pt_regs *regs)
{
/* Without LASS, vsyscall accesses are expected to generate a #PF */
if (!cpu_feature_enabled(X86_FEATURE_LASS))
return false;
/* Emulate only if the RIP points to the vsyscall address */
if (!is_vsyscall_vaddr(regs->ip))
return false;
return __emulate_vsyscall(regs, regs->ip);
}
/*
* A pseudo VMA to allow ptrace access for the vsyscall page. This only
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does

View File

@@ -14,12 +14,17 @@ extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
* Called on instruction fetch fault in vsyscall page.
* Returns true if handled.
*/
extern bool emulate_vsyscall(unsigned long error_code,
struct pt_regs *regs, unsigned long address);
bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs, unsigned long address);
bool emulate_vsyscall_gp(struct pt_regs *regs);
#else
static inline void map_vsyscall(void) {}
static inline bool emulate_vsyscall(unsigned long error_code,
struct pt_regs *regs, unsigned long address)
static inline bool emulate_vsyscall_pf(unsigned long error_code,
struct pt_regs *regs, unsigned long address)
{
return false;
}
static inline bool emulate_vsyscall_gp(struct pt_regs *regs)
{
return false;
}

View File

@@ -409,27 +409,29 @@ out:
cr4_clear_bits(X86_CR4_UMIP);
}
static __always_inline void setup_lass(struct cpuinfo_x86 *c)
static int enable_lass(unsigned int cpu)
{
cr4_set_bits(X86_CR4_LASS);
return 0;
}
/*
* Finalize features that need to be enabled just before entering
* userspace. Note that this only runs on a single CPU. Use appropriate
* callbacks if all the CPUs need to reflect the same change.
*/
static int cpu_finalize_pre_userspace(void)
{
if (!cpu_feature_enabled(X86_FEATURE_LASS))
return;
return 0;
/*
* Legacy vsyscall page access causes a #GP when LASS is active.
* Disable LASS because the #GP handler doesn't support vsyscall
* emulation.
*
* Also disable LASS when running under EFI, as some runtime and
* boot services rely on 1:1 mappings in the lower half.
*/
if (IS_ENABLED(CONFIG_X86_VSYSCALL_EMULATION) ||
IS_ENABLED(CONFIG_EFI)) {
setup_clear_cpu_cap(X86_FEATURE_LASS);
return;
}
/* Runs on all online CPUs and future CPUs that come online. */
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/lass:enable", enable_lass, NULL);
cr4_set_bits(X86_CR4_LASS);
return 0;
}
late_initcall(cpu_finalize_pre_userspace);
/* These bits should not change their value after CPU init is finished. */
static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
@@ -2061,7 +2063,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
setup_umip(c);
setup_lass(c);
/*
* The vendor-specific functions might have changed features.

View File

@@ -70,6 +70,7 @@
#include <asm/tdx.h>
#include <asm/cfi.h>
#include <asm/msr.h>
#include <asm/vsyscall.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -921,11 +922,6 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
cond_local_irq_enable(regs);
if (static_cpu_has(X86_FEATURE_UMIP)) {
if (user_mode(regs) && fixup_umip_exception(regs))
goto exit;
}
if (v8086_mode(regs)) {
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
@@ -940,6 +936,12 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
goto exit;
if (fixup_umip_exception(regs))
goto exit;
if (emulate_vsyscall_gp(regs))
goto exit;
gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
goto exit;
}

View File

@@ -354,6 +354,9 @@ bool fixup_umip_exception(struct pt_regs *regs)
void __user *uaddr;
struct insn insn;
if (!cpu_feature_enabled(X86_FEATURE_UMIP))
return false;
if (!regs)
return false;

View File

@@ -1314,7 +1314,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* to consider the PF_PK bit.
*/
if (is_vsyscall_vaddr(address)) {
if (emulate_vsyscall(error_code, regs, address))
if (emulate_vsyscall_pf(error_code, regs, address))
return;
}
#endif

View File

@@ -55,6 +55,7 @@
*/
static u64 efi_va = EFI_VA_START;
static struct mm_struct *efi_prev_mm;
static unsigned long efi_cr4_lass;
/*
* We need our own copy of the higher levels of the page tables
@@ -443,16 +444,50 @@ static void efi_leave_mm(void)
unuse_temporary_mm(efi_prev_mm);
}
/*
* Toggle LASS to allow EFI to access any 1:1 mapped region in the lower
* half.
*
* Disable LASS only after switching to EFI-mm, as userspace is not
* mapped in it. Similar to EFI-mm, these rely on preemption being
* disabled and the calls being serialized.
*/
static void efi_disable_lass(void)
{
if (!cpu_feature_enabled(X86_FEATURE_LASS))
return;
lockdep_assert_preemption_disabled();
/* Save current CR4.LASS state */
efi_cr4_lass = cr4_read_shadow() & X86_CR4_LASS;
cr4_clear_bits(efi_cr4_lass);
}
static void efi_enable_lass(void)
{
if (!cpu_feature_enabled(X86_FEATURE_LASS))
return;
lockdep_assert_preemption_disabled();
/* Reprogram CR4.LASS only if it was set earlier */
cr4_set_bits(efi_cr4_lass);
}
void arch_efi_call_virt_setup(void)
{
efi_sync_low_kernel_mappings();
efi_fpu_begin();
firmware_restrict_branch_speculation_start();
efi_enter_mm();
efi_disable_lass();
}
void arch_efi_call_virt_teardown(void)
{
efi_enable_lass();
efi_leave_mm();
firmware_restrict_branch_speculation_end();
efi_fpu_end();

View File

@@ -95,7 +95,7 @@ config SND_SOC_INTEL_KEEMBAY
config SND_SOC_INTEL_AVS
tristate "Intel AVS driver"
depends on X86 || COMPILE_TEST
depends on X86
depends on PCI
depends on COMMON_CLK
select ACPI_NHLT if ACPI

View File

@@ -7,12 +7,11 @@
//
#include <linux/pci.h>
#include <asm/cpuid/api.h>
#include "avs.h"
#include "debug.h"
#include "messages.h"
#define CPUID_TSC_LEAF 0x15
static int avs_tgl_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
{
core_mask &= AVS_MAIN_CORE_MASK;
@@ -40,22 +39,37 @@ static int avs_tgl_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stal
return avs_dsp_core_stall(adev, core_mask, stall);
}
/*
* Succeed if CPUID(0x15) is not available, or if the nominal core crystal clock
* frequency cannot be enumerated from it. There is nothing to do in both cases.
*/
static int avs_tgl_set_xtal_freq(struct avs_dev *adev)
{
unsigned int freq;
int ret;
if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
freq = cpuid_ecx(CPUID_LEAF_TSC);
if (freq) {
ret = avs_ipc_set_fw_config(adev, 1, AVS_FW_CFG_XTAL_FREQ_HZ, sizeof(freq), &freq);
if (ret)
return AVS_IPC_RET(ret);
}
return 0;
}
static int avs_tgl_config_basefw(struct avs_dev *adev)
{
struct pci_dev *pci = adev->base.pci;
struct avs_bus_hwid hwid;
int ret;
#ifdef CONFIG_X86
unsigned int ecx;
#include <asm/cpuid/api.h>
ecx = cpuid_ecx(CPUID_TSC_LEAF);
if (ecx) {
ret = avs_ipc_set_fw_config(adev, 1, AVS_FW_CFG_XTAL_FREQ_HZ, sizeof(ecx), &ecx);
if (ret)
return AVS_IPC_RET(ret);
}
#endif
ret = avs_tgl_set_xtal_freq(adev);
if (ret)
return ret;
hwid.device = pci->device;
hwid.subsystem = pci->subsystem_vendor | (pci->subsystem_device << 16);

File diff suppressed because it is too large Load Diff