mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
xor: make xor.ko self-contained in lib/raid/
Move the asm/xor.h headers to lib/raid/xor/$(SRCARCH)/xor_arch.h and include/linux/raid/xor_impl.h to lib/raid/xor/xor_impl.h so that the xor.ko module implementation is self-contained in lib/raid/. As this remove the asm-generic mechanism a new kconfig symbol is added to indicate that a architecture-specific implementations exists, and xor_arch.h should be included. Link: https://lkml.kernel.org/r/20260327061704.3707577-22-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Tested-by: Eric Biggers <ebiggers@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Mason <clm@fb.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: David Sterba <dsterba@suse.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Li Nan <linan122@huawei.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Song Liu <song@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Ted Ts'o <tytso@mit.edu> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
352ebd066b
commit
e20043b476
@@ -1,8 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_UM_XOR_H
|
||||
#define _ASM_UM_XOR_H
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <../../x86/include/asm/xor.h>
|
||||
|
||||
#endif
|
||||
@@ -65,4 +65,3 @@ mandatory-y += vermagic.h
|
||||
mandatory-y += vga.h
|
||||
mandatory-y += video.h
|
||||
mandatory-y += word-at-a-time.h
|
||||
mandatory-y += xor.h
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* include/asm-generic/xor.h
|
||||
*
|
||||
* Generic optimized RAID-5 checksumming functions.
|
||||
*/
|
||||
|
||||
extern struct xor_block_template xor_block_8regs;
|
||||
extern struct xor_block_template xor_block_32regs;
|
||||
extern struct xor_block_template xor_block_8regs_p;
|
||||
extern struct xor_block_template xor_block_32regs_p;
|
||||
@@ -2,3 +2,18 @@
|
||||
|
||||
config XOR_BLOCKS
|
||||
tristate
|
||||
|
||||
# selected by architectures that provide an optimized XOR implementation
|
||||
config XOR_BLOCKS_ARCH
|
||||
depends on XOR_BLOCKS
|
||||
default y if ALPHA
|
||||
default y if ARM
|
||||
default y if ARM64
|
||||
default y if CPU_HAS_LSX # loongarch
|
||||
default y if ALTIVEC # powerpc
|
||||
default y if RISCV_ISA_V
|
||||
default y if SPARC
|
||||
default y if S390
|
||||
default y if X86_32
|
||||
default y if X86_64
|
||||
bool
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ccflags-y += -I $(src)
|
||||
|
||||
obj-$(CONFIG_XOR_BLOCKS) += xor.o
|
||||
|
||||
xor-y += xor-core.o
|
||||
@@ -8,6 +10,10 @@ xor-y += xor-32regs.o
|
||||
xor-y += xor-8regs-prefetch.o
|
||||
xor-y += xor-32regs-prefetch.o
|
||||
|
||||
ifeq ($(CONFIG_XOR_BLOCKS_ARCH),y)
|
||||
CFLAGS_xor-core.o += -I$(src)/$(SRCARCH)
|
||||
endif
|
||||
|
||||
xor-$(CONFIG_ALPHA) += alpha/xor.o
|
||||
xor-$(CONFIG_ARM) += arm/xor.o
|
||||
ifeq ($(CONFIG_ARM),y)
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
/*
|
||||
* Optimized XOR parity functions for alpha EV5 and EV6
|
||||
*/
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
extern void
|
||||
xor_alpha_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include <asm/special_insns.h>
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
extern struct xor_block_template xor_block_alpha;
|
||||
extern struct xor_block_template xor_block_alpha_prefetch;
|
||||
@@ -10,7 +9,6 @@ extern struct xor_block_template xor_block_alpha_prefetch;
|
||||
* Force the use of alpha_prefetch if EV6, as it is significantly faster in the
|
||||
* cold cache case.
|
||||
*/
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
if (implver() == IMPLVER_EV6) {
|
||||
@@ -2,8 +2,8 @@
|
||||
/*
|
||||
* Copyright (C) 2001 Russell King
|
||||
*/
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
extern struct xor_block_template const xor_block_neon_inner;
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include "xor_impl.h"
|
||||
|
||||
#ifndef __ARM_NEON__
|
||||
#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
/*
|
||||
* Copyright (C) 2001 Russell King
|
||||
*/
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
#define __XOR(a1, a2) a1 ^= a2
|
||||
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
/*
|
||||
* Copyright (C) 2001 Russell King
|
||||
*/
|
||||
#include <asm-generic/xor.h>
|
||||
#include <asm/neon.h>
|
||||
|
||||
extern struct xor_block_template xor_block_arm4regs;
|
||||
extern struct xor_block_template xor_block_neon;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_arm4regs);
|
||||
@@ -4,9 +4,9 @@
|
||||
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
#include "xor-neon.h"
|
||||
|
||||
#define XOR_TEMPLATE(_name) \
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <linux/cache.h>
|
||||
#include <asm/neon-intrinsics.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
#include "xor-neon.h"
|
||||
|
||||
void __xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -3,14 +3,11 @@
|
||||
* Authors: Jackie Liu <liuyun01@kylinos.cn>
|
||||
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
|
||||
*/
|
||||
|
||||
#include <asm-generic/xor.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
extern struct xor_block_template xor_block_neon;
|
||||
extern struct xor_block_template xor_block_eor3;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_8regs);
|
||||
@@ -2,9 +2,6 @@
|
||||
/*
|
||||
* Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
|
||||
*/
|
||||
#ifndef _ASM_LOONGARCH_XOR_H
|
||||
#define _ASM_LOONGARCH_XOR_H
|
||||
|
||||
#include <asm/cpu-features.h>
|
||||
|
||||
/*
|
||||
@@ -15,12 +12,10 @@
|
||||
* the scalar ones, maybe for errata or micro-op reasons. It may be
|
||||
* appropriate to revisit this after one or two more uarch generations.
|
||||
*/
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
extern struct xor_block_template xor_block_lsx;
|
||||
extern struct xor_block_template xor_block_lasx;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_8regs);
|
||||
@@ -36,5 +31,3 @@ static __always_inline void __init arch_xor_init(void)
|
||||
xor_register(&xor_block_lasx);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _ASM_LOONGARCH_XOR_H */
|
||||
@@ -6,9 +6,9 @@
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
#include "xor_simd.h"
|
||||
|
||||
#define MAKE_XOR_GLUE_2(flavor) \
|
||||
|
||||
@@ -5,15 +5,10 @@
|
||||
*
|
||||
* Author: Anton Blanchard <anton@au.ibm.com>
|
||||
*/
|
||||
#ifndef _ASM_POWERPC_XOR_H
|
||||
#define _ASM_POWERPC_XOR_H
|
||||
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
extern struct xor_block_template xor_block_altivec;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_8regs);
|
||||
@@ -25,5 +20,3 @@ static __always_inline void __init arch_xor_init(void)
|
||||
xor_register(&xor_block_altivec);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_XOR_H */
|
||||
@@ -7,9 +7,9 @@
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
#include "xor_vmx.h"
|
||||
|
||||
static void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
* Copyright (C) 2021 SiFive
|
||||
*/
|
||||
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/vector.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1,
|
||||
const unsigned long *__restrict p2)
|
||||
|
||||
@@ -3,11 +3,9 @@
|
||||
* Copyright (C) 2021 SiFive
|
||||
*/
|
||||
#include <asm/vector.h>
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
extern struct xor_block_template xor_block_rvv;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_8regs);
|
||||
@@ -7,8 +7,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
const unsigned long * __restrict p2)
|
||||
|
||||
@@ -5,15 +5,9 @@
|
||||
* Copyright IBM Corp. 2016
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
#ifndef _ASM_S390_XOR_H
|
||||
#define _ASM_S390_XOR_H
|
||||
|
||||
extern struct xor_block_template xor_block_xc;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_force(&xor_block_xc);
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_XOR_H */
|
||||
@@ -5,8 +5,8 @@
|
||||
*
|
||||
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
*/
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
static void
|
||||
sparc_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
|
||||
*/
|
||||
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
void xor_vis_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
const unsigned long * __restrict p2);
|
||||
|
||||
@@ -3,16 +3,12 @@
|
||||
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
|
||||
*/
|
||||
#ifndef ___ASM_SPARC_XOR_H
|
||||
#define ___ASM_SPARC_XOR_H
|
||||
|
||||
#if defined(__sparc__) && defined(__arch64__)
|
||||
#include <asm/spitfire.h>
|
||||
|
||||
extern struct xor_block_template xor_block_VIS;
|
||||
extern struct xor_block_template xor_block_niagara;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
/* Force VIS for everything except Niagara. */
|
||||
@@ -28,12 +24,8 @@ static __always_inline void __init arch_xor_init(void)
|
||||
}
|
||||
#else /* sparc64 */
|
||||
|
||||
/* For grins, also test the generic routines. */
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
extern struct xor_block_template xor_block_SPARC;
|
||||
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_8regs);
|
||||
@@ -41,4 +33,3 @@ static __always_inline void __init arch_xor_init(void)
|
||||
xor_register(&xor_block_SPARC);
|
||||
}
|
||||
#endif /* !sparc64 */
|
||||
#endif /* ___ASM_SPARC_XOR_H */
|
||||
2
lib/raid/xor/um/xor_arch.h
Normal file
2
lib/raid/xor/um/xor_arch.h
Normal file
@@ -0,0 +1,2 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <../x86/xor_arch.h>
|
||||
@@ -8,9 +8,9 @@
|
||||
* Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
#define BLOCK4(i) \
|
||||
BLOCK(32 * i, 0) \
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
*
|
||||
* Copyright (C) 1998 Ingo Molnar.
|
||||
*/
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
|
||||
#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
|
||||
|
||||
@@ -12,9 +12,9 @@
|
||||
* x86-64 changes / gcc fixes from Andi Kleen.
|
||||
* Copyright 2002 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
#include "xor_arch.h"
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* reduce register pressure */
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#ifndef _ASM_X86_XOR_H
|
||||
#define _ASM_X86_XOR_H
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm-generic/xor.h>
|
||||
|
||||
extern struct xor_block_template xor_block_pII_mmx;
|
||||
extern struct xor_block_template xor_block_p5_mmx;
|
||||
@@ -20,7 +16,6 @@ extern struct xor_block_template xor_block_avx;
|
||||
*
|
||||
* 32-bit without MMX can fall back to the generic routines.
|
||||
*/
|
||||
#define arch_xor_init arch_xor_init
|
||||
static __always_inline void __init arch_xor_init(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
@@ -39,5 +34,3 @@ static __always_inline void __init arch_xor_init(void)
|
||||
xor_register(&xor_block_32regs_p);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_XOR_H */
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm-generic/xor.h>
|
||||
#include "xor_impl.h"
|
||||
|
||||
static void
|
||||
xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm-generic/xor.h>
|
||||
#include "xor_impl.h"
|
||||
|
||||
static void
|
||||
xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm-generic/xor.h>
|
||||
#include "xor_impl.h"
|
||||
|
||||
static void
|
||||
xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <asm-generic/xor.h>
|
||||
#include "xor_impl.h"
|
||||
|
||||
static void
|
||||
xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
|
||||
@@ -9,10 +9,9 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/raid/xor.h>
|
||||
#include <linux/raid/xor_impl.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <asm/xor.h>
|
||||
#include "xor_impl.h"
|
||||
|
||||
/* The xor routines to use. */
|
||||
static struct xor_block_template *active_template;
|
||||
@@ -141,16 +140,21 @@ static int __init calibrate_xor_blocks(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xor_init(void)
|
||||
{
|
||||
#ifdef arch_xor_init
|
||||
arch_xor_init();
|
||||
#ifdef CONFIG_XOR_BLOCKS_ARCH
|
||||
#include "xor_arch.h" /* $SRCARCH/xor_arch.h */
|
||||
#else
|
||||
static void __init arch_xor_init(void)
|
||||
{
|
||||
xor_register(&xor_block_8regs);
|
||||
xor_register(&xor_block_8regs_p);
|
||||
xor_register(&xor_block_32regs);
|
||||
xor_register(&xor_block_32regs_p);
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_XOR_BLOCKS_ARCH */
|
||||
|
||||
static int __init xor_init(void)
|
||||
{
|
||||
arch_xor_init();
|
||||
|
||||
/*
|
||||
* If this arch/cpu has a short-circuited selection, don't loop through
|
||||
|
||||
@@ -24,6 +24,12 @@ struct xor_block_template {
|
||||
const unsigned long * __restrict);
|
||||
};
|
||||
|
||||
/* generic implementations */
|
||||
extern struct xor_block_template xor_block_8regs;
|
||||
extern struct xor_block_template xor_block_32regs;
|
||||
extern struct xor_block_template xor_block_8regs_p;
|
||||
extern struct xor_block_template xor_block_32regs_p;
|
||||
|
||||
void __init xor_register(struct xor_block_template *tmpl);
|
||||
void __init xor_force(struct xor_block_template *tmpl);
|
||||
|
||||
Reference in New Issue
Block a user