mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Move the asm/xor.h headers to lib/raid/xor/$(SRCARCH)/xor_arch.h and include/linux/raid/xor_impl.h to lib/raid/xor/xor_impl.h so that the xor.ko module implementation is self-contained in lib/raid/. As this remove the asm-generic mechanism a new kconfig symbol is added to indicate that a architecture-specific implementations exists, and xor_arch.h should be included. Link: https://lkml.kernel.org/r/20260327061704.3707577-22-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Tested-by: Eric Biggers <ebiggers@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Mason <clm@fb.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: David Sterba <dsterba@suse.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Li Nan <linan122@huawei.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Song Liu <song@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Ted Ts'o <tytso@mit.edu> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
268 lines
4.8 KiB
C
268 lines
4.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
#include <linux/prefetch.h>
|
|
#include "xor_impl.h"
|
|
|
|
static void
|
|
xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2)
|
|
{
|
|
long lines = bytes / (sizeof (long)) / 8 - 1;
|
|
|
|
prefetchw(p1);
|
|
prefetch(p2);
|
|
|
|
do {
|
|
register long d0, d1, d2, d3, d4, d5, d6, d7;
|
|
|
|
prefetchw(p1+8);
|
|
prefetch(p2+8);
|
|
once_more:
|
|
d0 = p1[0]; /* Pull the stuff into registers */
|
|
d1 = p1[1]; /* ... in bursts, if possible. */
|
|
d2 = p1[2];
|
|
d3 = p1[3];
|
|
d4 = p1[4];
|
|
d5 = p1[5];
|
|
d6 = p1[6];
|
|
d7 = p1[7];
|
|
d0 ^= p2[0];
|
|
d1 ^= p2[1];
|
|
d2 ^= p2[2];
|
|
d3 ^= p2[3];
|
|
d4 ^= p2[4];
|
|
d5 ^= p2[5];
|
|
d6 ^= p2[6];
|
|
d7 ^= p2[7];
|
|
p1[0] = d0; /* Store the result (in bursts) */
|
|
p1[1] = d1;
|
|
p1[2] = d2;
|
|
p1[3] = d3;
|
|
p1[4] = d4;
|
|
p1[5] = d5;
|
|
p1[6] = d6;
|
|
p1[7] = d7;
|
|
p1 += 8;
|
|
p2 += 8;
|
|
} while (--lines > 0);
|
|
if (lines == 0)
|
|
goto once_more;
|
|
}
|
|
|
|
static void
|
|
xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3)
|
|
{
|
|
long lines = bytes / (sizeof (long)) / 8 - 1;
|
|
|
|
prefetchw(p1);
|
|
prefetch(p2);
|
|
prefetch(p3);
|
|
|
|
do {
|
|
register long d0, d1, d2, d3, d4, d5, d6, d7;
|
|
|
|
prefetchw(p1+8);
|
|
prefetch(p2+8);
|
|
prefetch(p3+8);
|
|
once_more:
|
|
d0 = p1[0]; /* Pull the stuff into registers */
|
|
d1 = p1[1]; /* ... in bursts, if possible. */
|
|
d2 = p1[2];
|
|
d3 = p1[3];
|
|
d4 = p1[4];
|
|
d5 = p1[5];
|
|
d6 = p1[6];
|
|
d7 = p1[7];
|
|
d0 ^= p2[0];
|
|
d1 ^= p2[1];
|
|
d2 ^= p2[2];
|
|
d3 ^= p2[3];
|
|
d4 ^= p2[4];
|
|
d5 ^= p2[5];
|
|
d6 ^= p2[6];
|
|
d7 ^= p2[7];
|
|
d0 ^= p3[0];
|
|
d1 ^= p3[1];
|
|
d2 ^= p3[2];
|
|
d3 ^= p3[3];
|
|
d4 ^= p3[4];
|
|
d5 ^= p3[5];
|
|
d6 ^= p3[6];
|
|
d7 ^= p3[7];
|
|
p1[0] = d0; /* Store the result (in bursts) */
|
|
p1[1] = d1;
|
|
p1[2] = d2;
|
|
p1[3] = d3;
|
|
p1[4] = d4;
|
|
p1[5] = d5;
|
|
p1[6] = d6;
|
|
p1[7] = d7;
|
|
p1 += 8;
|
|
p2 += 8;
|
|
p3 += 8;
|
|
} while (--lines > 0);
|
|
if (lines == 0)
|
|
goto once_more;
|
|
}
|
|
|
|
static void
|
|
xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3,
|
|
const unsigned long * __restrict p4)
|
|
{
|
|
long lines = bytes / (sizeof (long)) / 8 - 1;
|
|
|
|
prefetchw(p1);
|
|
prefetch(p2);
|
|
prefetch(p3);
|
|
prefetch(p4);
|
|
|
|
do {
|
|
register long d0, d1, d2, d3, d4, d5, d6, d7;
|
|
|
|
prefetchw(p1+8);
|
|
prefetch(p2+8);
|
|
prefetch(p3+8);
|
|
prefetch(p4+8);
|
|
once_more:
|
|
d0 = p1[0]; /* Pull the stuff into registers */
|
|
d1 = p1[1]; /* ... in bursts, if possible. */
|
|
d2 = p1[2];
|
|
d3 = p1[3];
|
|
d4 = p1[4];
|
|
d5 = p1[5];
|
|
d6 = p1[6];
|
|
d7 = p1[7];
|
|
d0 ^= p2[0];
|
|
d1 ^= p2[1];
|
|
d2 ^= p2[2];
|
|
d3 ^= p2[3];
|
|
d4 ^= p2[4];
|
|
d5 ^= p2[5];
|
|
d6 ^= p2[6];
|
|
d7 ^= p2[7];
|
|
d0 ^= p3[0];
|
|
d1 ^= p3[1];
|
|
d2 ^= p3[2];
|
|
d3 ^= p3[3];
|
|
d4 ^= p3[4];
|
|
d5 ^= p3[5];
|
|
d6 ^= p3[6];
|
|
d7 ^= p3[7];
|
|
d0 ^= p4[0];
|
|
d1 ^= p4[1];
|
|
d2 ^= p4[2];
|
|
d3 ^= p4[3];
|
|
d4 ^= p4[4];
|
|
d5 ^= p4[5];
|
|
d6 ^= p4[6];
|
|
d7 ^= p4[7];
|
|
p1[0] = d0; /* Store the result (in bursts) */
|
|
p1[1] = d1;
|
|
p1[2] = d2;
|
|
p1[3] = d3;
|
|
p1[4] = d4;
|
|
p1[5] = d5;
|
|
p1[6] = d6;
|
|
p1[7] = d7;
|
|
p1 += 8;
|
|
p2 += 8;
|
|
p3 += 8;
|
|
p4 += 8;
|
|
} while (--lines > 0);
|
|
if (lines == 0)
|
|
goto once_more;
|
|
}
|
|
|
|
static void
|
|
xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3,
|
|
const unsigned long * __restrict p4,
|
|
const unsigned long * __restrict p5)
|
|
{
|
|
long lines = bytes / (sizeof (long)) / 8 - 1;
|
|
|
|
prefetchw(p1);
|
|
prefetch(p2);
|
|
prefetch(p3);
|
|
prefetch(p4);
|
|
prefetch(p5);
|
|
|
|
do {
|
|
register long d0, d1, d2, d3, d4, d5, d6, d7;
|
|
|
|
prefetchw(p1+8);
|
|
prefetch(p2+8);
|
|
prefetch(p3+8);
|
|
prefetch(p4+8);
|
|
prefetch(p5+8);
|
|
once_more:
|
|
d0 = p1[0]; /* Pull the stuff into registers */
|
|
d1 = p1[1]; /* ... in bursts, if possible. */
|
|
d2 = p1[2];
|
|
d3 = p1[3];
|
|
d4 = p1[4];
|
|
d5 = p1[5];
|
|
d6 = p1[6];
|
|
d7 = p1[7];
|
|
d0 ^= p2[0];
|
|
d1 ^= p2[1];
|
|
d2 ^= p2[2];
|
|
d3 ^= p2[3];
|
|
d4 ^= p2[4];
|
|
d5 ^= p2[5];
|
|
d6 ^= p2[6];
|
|
d7 ^= p2[7];
|
|
d0 ^= p3[0];
|
|
d1 ^= p3[1];
|
|
d2 ^= p3[2];
|
|
d3 ^= p3[3];
|
|
d4 ^= p3[4];
|
|
d5 ^= p3[5];
|
|
d6 ^= p3[6];
|
|
d7 ^= p3[7];
|
|
d0 ^= p4[0];
|
|
d1 ^= p4[1];
|
|
d2 ^= p4[2];
|
|
d3 ^= p4[3];
|
|
d4 ^= p4[4];
|
|
d5 ^= p4[5];
|
|
d6 ^= p4[6];
|
|
d7 ^= p4[7];
|
|
d0 ^= p5[0];
|
|
d1 ^= p5[1];
|
|
d2 ^= p5[2];
|
|
d3 ^= p5[3];
|
|
d4 ^= p5[4];
|
|
d5 ^= p5[5];
|
|
d6 ^= p5[6];
|
|
d7 ^= p5[7];
|
|
p1[0] = d0; /* Store the result (in bursts) */
|
|
p1[1] = d1;
|
|
p1[2] = d2;
|
|
p1[3] = d3;
|
|
p1[4] = d4;
|
|
p1[5] = d5;
|
|
p1[6] = d6;
|
|
p1[7] = d7;
|
|
p1 += 8;
|
|
p2 += 8;
|
|
p3 += 8;
|
|
p4 += 8;
|
|
p5 += 8;
|
|
} while (--lines > 0);
|
|
if (lines == 0)
|
|
goto once_more;
|
|
}
|
|
|
|
struct xor_block_template xor_block_32regs_p = {
|
|
.name = "32regs_prefetch",
|
|
.do_2 = xor_32regs_p_2,
|
|
.do_3 = xor_32regs_p_3,
|
|
.do_4 = xor_32regs_p_4,
|
|
.do_5 = xor_32regs_p_5,
|
|
};
|