mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Currently the high-level xor code chunks up all operations into small units for only up to 1 + 4 vectors, and passes it to four different methods. This means the FPU/vector context is entered and left a lot for wide stripes, and a lot of indirect expensive indirect calls are performed. Switch to passing the entire gen_xor request to the low-level ops, and provide a macro to dispatch it to the existing helper. This reduce the number of indirect calls and FPU/vector context switches by a factor approaching nr_stripes / 4, and also reduces source and binary code size. Link: https://lkml.kernel.org/r/20260327061704.3707577-27-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Tested-by: Eric Biggers <ebiggers@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Mason <clm@fb.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: David Sterba <dsterba@suse.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Li Nan <linan122@huawei.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Song Liu <song@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Ted Ts'o <tytso@mit.edu> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
157 lines
4.3 KiB
C
157 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Optimized XOR parity functions for AVX
|
|
*
|
|
* Copyright (C) 2012 Intel Corporation
|
|
* Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
|
|
*
|
|
* Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
|
|
*/
|
|
#include <linux/compiler.h>
|
|
#include <asm/fpu/api.h>
|
|
#include "xor_impl.h"
|
|
#include "xor_arch.h"
|
|
|
|
#define BLOCK4(i) \
|
|
BLOCK(32 * i, 0) \
|
|
BLOCK(32 * (i + 1), 1) \
|
|
BLOCK(32 * (i + 2), 2) \
|
|
BLOCK(32 * (i + 3), 3)
|
|
|
|
#define BLOCK16() \
|
|
BLOCK4(0) \
|
|
BLOCK4(4) \
|
|
BLOCK4(8) \
|
|
BLOCK4(12)
|
|
|
|
static void xor_avx_2(unsigned long bytes, unsigned long * __restrict p0,
|
|
const unsigned long * __restrict p1)
|
|
{
|
|
unsigned long lines = bytes >> 9;
|
|
|
|
while (lines--) {
|
|
#undef BLOCK
|
|
#define BLOCK(i, reg) \
|
|
do { \
|
|
asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p0[i / sizeof(*p0)])); \
|
|
asm volatile("vmovdqa %%ymm" #reg ", %0" : \
|
|
"=m" (p0[i / sizeof(*p0)])); \
|
|
} while (0);
|
|
|
|
BLOCK16()
|
|
|
|
p0 = (unsigned long *)((uintptr_t)p0 + 512);
|
|
p1 = (unsigned long *)((uintptr_t)p1 + 512);
|
|
}
|
|
}
|
|
|
|
static void xor_avx_3(unsigned long bytes, unsigned long * __restrict p0,
|
|
const unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2)
|
|
{
|
|
unsigned long lines = bytes >> 9;
|
|
|
|
while (lines--) {
|
|
#undef BLOCK
|
|
#define BLOCK(i, reg) \
|
|
do { \
|
|
asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p1[i / sizeof(*p1)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p0[i / sizeof(*p0)])); \
|
|
asm volatile("vmovdqa %%ymm" #reg ", %0" : \
|
|
"=m" (p0[i / sizeof(*p0)])); \
|
|
} while (0);
|
|
|
|
BLOCK16()
|
|
|
|
p0 = (unsigned long *)((uintptr_t)p0 + 512);
|
|
p1 = (unsigned long *)((uintptr_t)p1 + 512);
|
|
p2 = (unsigned long *)((uintptr_t)p2 + 512);
|
|
}
|
|
}
|
|
|
|
static void xor_avx_4(unsigned long bytes, unsigned long * __restrict p0,
|
|
const unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3)
|
|
{
|
|
unsigned long lines = bytes >> 9;
|
|
|
|
while (lines--) {
|
|
#undef BLOCK
|
|
#define BLOCK(i, reg) \
|
|
do { \
|
|
asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p2[i / sizeof(*p2)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p1[i / sizeof(*p1)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p0[i / sizeof(*p0)])); \
|
|
asm volatile("vmovdqa %%ymm" #reg ", %0" : \
|
|
"=m" (p0[i / sizeof(*p0)])); \
|
|
} while (0);
|
|
|
|
BLOCK16();
|
|
|
|
p0 = (unsigned long *)((uintptr_t)p0 + 512);
|
|
p1 = (unsigned long *)((uintptr_t)p1 + 512);
|
|
p2 = (unsigned long *)((uintptr_t)p2 + 512);
|
|
p3 = (unsigned long *)((uintptr_t)p3 + 512);
|
|
}
|
|
}
|
|
|
|
static void xor_avx_5(unsigned long bytes, unsigned long * __restrict p0,
|
|
const unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3,
|
|
const unsigned long * __restrict p4)
|
|
{
|
|
unsigned long lines = bytes >> 9;
|
|
|
|
while (lines--) {
|
|
#undef BLOCK
|
|
#define BLOCK(i, reg) \
|
|
do { \
|
|
asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p4[i / sizeof(*p4)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p3[i / sizeof(*p3)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p2[i / sizeof(*p2)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p1[i / sizeof(*p1)])); \
|
|
asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
|
|
"m" (p0[i / sizeof(*p0)])); \
|
|
asm volatile("vmovdqa %%ymm" #reg ", %0" : \
|
|
"=m" (p0[i / sizeof(*p0)])); \
|
|
} while (0);
|
|
|
|
BLOCK16()
|
|
|
|
p0 = (unsigned long *)((uintptr_t)p0 + 512);
|
|
p1 = (unsigned long *)((uintptr_t)p1 + 512);
|
|
p2 = (unsigned long *)((uintptr_t)p2 + 512);
|
|
p3 = (unsigned long *)((uintptr_t)p3 + 512);
|
|
p4 = (unsigned long *)((uintptr_t)p4 + 512);
|
|
}
|
|
}
|
|
|
|
DO_XOR_BLOCKS(avx_inner, xor_avx_2, xor_avx_3, xor_avx_4, xor_avx_5);
|
|
|
|
static void xor_gen_avx(void *dest, void **srcs, unsigned int src_cnt,
|
|
unsigned int bytes)
|
|
{
|
|
kernel_fpu_begin();
|
|
xor_gen_avx_inner(dest, srcs, src_cnt, bytes);
|
|
kernel_fpu_end();
|
|
}
|
|
|
|
struct xor_block_template xor_block_avx = {
|
|
.name = "avx",
|
|
.xor_gen = xor_gen_avx,
|
|
};
|