Files
linux/lib/raid/xor/arm64/xor-neon.c
Christoph Hellwig 80dcf0a783 xor: pass the entire operation to the low-level ops
Currently the high-level xor code chunks up all operations into small
units for only up to 1 + 4 vectors, and passes it to four different
methods.  This means the FPU/vector context is entered and left a lot for
wide stripes, and a lot of indirect expensive indirect calls are
performed.  Switch to passing the entire gen_xor request to the low-level
ops, and provide a macro to dispatch it to the existing helper.

This reduce the number of indirect calls and FPU/vector context switches
by a factor approaching nr_stripes / 4, and also reduces source and binary
code size.

Link: https://lkml.kernel.org/r/20260327061704.3707577-27-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Sterba <dsterba@suse.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason A. Donenfeld <jason@zx2c4.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Li Nan <linan122@huawei.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Magnus Lindholm <linmag7@gmail.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Song Liu <song@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Ted Ts'o <tytso@mit.edu>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2026-04-02 23:36:21 -07:00

313 lines
8.3 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Authors: Jackie Liu <liuyun01@kylinos.cn>
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
*/
#include <linux/cache.h>
#include <asm/neon-intrinsics.h>
#include "xor_impl.h"
#include "xor_arch.h"
#include "xor-neon.h"
static void __xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
} while (--lines > 0);
}
static void __xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* p1 ^= p3 */
v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
} while (--lines > 0);
}
static void __xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
uint64_t *dp4 = (uint64_t *)p4;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* p1 ^= p3 */
v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
/* p1 ^= p4 */
v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
dp4 += 8;
} while (--lines > 0);
}
static void __xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
const unsigned long * __restrict p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
uint64_t *dp4 = (uint64_t *)p4;
uint64_t *dp5 = (uint64_t *)p5;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* p1 ^= p3 */
v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
/* p1 ^= p4 */
v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
/* p1 ^= p5 */
v0 = veorq_u64(v0, vld1q_u64(dp5 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp5 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp5 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp5 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
dp4 += 8;
dp5 += 8;
} while (--lines > 0);
}
__DO_XOR_BLOCKS(neon_inner, __xor_neon_2, __xor_neon_3, __xor_neon_4,
__xor_neon_5);
static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r)
{
uint64x2_t res;
asm(ARM64_ASM_PREAMBLE ".arch_extension sha3\n"
"eor3 %0.16b, %1.16b, %2.16b, %3.16b"
: "=w"(res) : "w"(p), "w"(q), "w"(r));
return res;
}
static void __xor_eor3_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 ^ p3 */
v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0),
vld1q_u64(dp3 + 0));
v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2),
vld1q_u64(dp3 + 2));
v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4),
vld1q_u64(dp3 + 4));
v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6),
vld1q_u64(dp3 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
} while (--lines > 0);
}
static void __xor_eor3_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
uint64_t *dp4 = (uint64_t *)p4;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 ^ p3 */
v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0),
vld1q_u64(dp3 + 0));
v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2),
vld1q_u64(dp3 + 2));
v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4),
vld1q_u64(dp3 + 4));
v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6),
vld1q_u64(dp3 + 6));
/* p1 ^= p4 */
v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
dp4 += 8;
} while (--lines > 0);
}
static void __xor_eor3_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
const unsigned long * __restrict p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
uint64_t *dp4 = (uint64_t *)p4;
uint64_t *dp5 = (uint64_t *)p5;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 ^ p3 */
v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0),
vld1q_u64(dp3 + 0));
v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2),
vld1q_u64(dp3 + 2));
v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4),
vld1q_u64(dp3 + 4));
v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6),
vld1q_u64(dp3 + 6));
/* p1 ^= p4 ^ p5 */
v0 = eor3(v0, vld1q_u64(dp4 + 0), vld1q_u64(dp5 + 0));
v1 = eor3(v1, vld1q_u64(dp4 + 2), vld1q_u64(dp5 + 2));
v2 = eor3(v2, vld1q_u64(dp4 + 4), vld1q_u64(dp5 + 4));
v3 = eor3(v3, vld1q_u64(dp4 + 6), vld1q_u64(dp5 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
dp4 += 8;
dp5 += 8;
} while (--lines > 0);
}
__DO_XOR_BLOCKS(eor3_inner, __xor_neon_2, __xor_eor3_3, __xor_eor3_4,
__xor_eor3_5);