Files
linux/lib/raid/xor/xor-8regs-prefetch.c
Christoph Hellwig 80dcf0a783 xor: pass the entire operation to the low-level ops
Currently the high-level xor code chunks up all operations into small
units for only up to 1 + 4 vectors, and passes it to four different
methods.  This means the FPU/vector context is entered and left a lot for
wide stripes, and a lot of indirect expensive indirect calls are
performed.  Switch to passing the entire gen_xor request to the low-level
ops, and provide a macro to dispatch it to the existing helper.

This reduce the number of indirect calls and FPU/vector context switches
by a factor approaching nr_stripes / 4, and also reduces source and binary
code size.

Link: https://lkml.kernel.org/r/20260327061704.3707577-27-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Sterba <dsterba@suse.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason A. Donenfeld <jason@zx2c4.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Li Nan <linan122@huawei.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Magnus Lindholm <linmag7@gmail.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Song Liu <song@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Ted Ts'o <tytso@mit.edu>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2026-04-02 23:36:21 -07:00

147 lines
3.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/prefetch.h>
#include "xor_impl.h"
static void
xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
do {
prefetchw(p1+8);
prefetch(p2+8);
once_more:
p1[0] ^= p2[0];
p1[1] ^= p2[1];
p1[2] ^= p2[2];
p1[3] ^= p2[3];
p1[4] ^= p2[4];
p1[5] ^= p2[5];
p1[6] ^= p2[6];
p1[7] ^= p2[7];
p1 += 8;
p2 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
do {
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
once_more:
p1[0] ^= p2[0] ^ p3[0];
p1[1] ^= p2[1] ^ p3[1];
p1[2] ^= p2[2] ^ p3[2];
p1[3] ^= p2[3] ^ p3[3];
p1[4] ^= p2[4] ^ p3[4];
p1[5] ^= p2[5] ^ p3[5];
p1[6] ^= p2[6] ^ p3[6];
p1[7] ^= p2[7] ^ p3[7];
p1 += 8;
p2 += 8;
p3 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
prefetch(p4);
do {
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
prefetch(p4+8);
once_more:
p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
prefetch(p4);
prefetch(p5);
do {
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
prefetch(p4+8);
prefetch(p5+8);
once_more:
p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
DO_XOR_BLOCKS(8regs_p, xor_8regs_p_2, xor_8regs_p_3, xor_8regs_p_4,
xor_8regs_p_5);
struct xor_block_template xor_block_8regs_p = {
.name = "8regs_prefetch",
.xor_gen = xor_gen_8regs_p,
};