mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Currently the high-level xor code chunks up all operations into small units for only up to 1 + 4 vectors, and passes it to four different methods. This means the FPU/vector context is entered and left a lot for wide stripes, and a lot of indirect expensive indirect calls are performed. Switch to passing the entire gen_xor request to the low-level ops, and provide a macro to dispatch it to the existing helper. This reduce the number of indirect calls and FPU/vector context switches by a factor approaching nr_stripes / 4, and also reduces source and binary code size. Link: https://lkml.kernel.org/r/20260327061704.3707577-27-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Tested-by: Eric Biggers <ebiggers@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Mason <clm@fb.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: David Sterba <dsterba@suse.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Li Nan <linan122@huawei.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Song Liu <song@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Ted Ts'o <tytso@mit.edu> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
161 lines
2.9 KiB
C
161 lines
2.9 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
*
|
|
* Copyright (C) IBM Corporation, 2012
|
|
*
|
|
* Author: Anton Blanchard <anton@au.ibm.com>
|
|
*/
|
|
|
|
/*
|
|
* Sparse (as at v0.5.0) gets very, very confused by this file.
|
|
* Make it a bit simpler for it.
|
|
*/
|
|
#include "xor_impl.h"
|
|
#if !defined(__CHECKER__)
|
|
#include <altivec.h>
|
|
#else
|
|
#define vec_xor(a, b) a ^ b
|
|
#define vector __attribute__((vector_size(16)))
|
|
#endif
|
|
|
|
#include "xor_vmx.h"
|
|
|
|
typedef vector signed char unative_t;
|
|
|
|
#define DEFINE(V) \
|
|
unative_t *V = (unative_t *)V##_in; \
|
|
unative_t V##_0, V##_1, V##_2, V##_3
|
|
|
|
#define LOAD(V) \
|
|
do { \
|
|
V##_0 = V[0]; \
|
|
V##_1 = V[1]; \
|
|
V##_2 = V[2]; \
|
|
V##_3 = V[3]; \
|
|
} while (0)
|
|
|
|
#define STORE(V) \
|
|
do { \
|
|
V[0] = V##_0; \
|
|
V[1] = V##_1; \
|
|
V[2] = V##_2; \
|
|
V[3] = V##_3; \
|
|
} while (0)
|
|
|
|
#define XOR(V1, V2) \
|
|
do { \
|
|
V1##_0 = vec_xor(V1##_0, V2##_0); \
|
|
V1##_1 = vec_xor(V1##_1, V2##_1); \
|
|
V1##_2 = vec_xor(V1##_2, V2##_2); \
|
|
V1##_3 = vec_xor(V1##_3, V2##_3); \
|
|
} while (0)
|
|
|
|
static void __xor_altivec_2(unsigned long bytes,
|
|
unsigned long * __restrict v1_in,
|
|
const unsigned long * __restrict v2_in)
|
|
{
|
|
DEFINE(v1);
|
|
DEFINE(v2);
|
|
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
|
|
|
|
do {
|
|
LOAD(v1);
|
|
LOAD(v2);
|
|
XOR(v1, v2);
|
|
STORE(v1);
|
|
|
|
v1 += 4;
|
|
v2 += 4;
|
|
} while (--lines > 0);
|
|
}
|
|
|
|
static void __xor_altivec_3(unsigned long bytes,
|
|
unsigned long * __restrict v1_in,
|
|
const unsigned long * __restrict v2_in,
|
|
const unsigned long * __restrict v3_in)
|
|
{
|
|
DEFINE(v1);
|
|
DEFINE(v2);
|
|
DEFINE(v3);
|
|
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
|
|
|
|
do {
|
|
LOAD(v1);
|
|
LOAD(v2);
|
|
LOAD(v3);
|
|
XOR(v1, v2);
|
|
XOR(v1, v3);
|
|
STORE(v1);
|
|
|
|
v1 += 4;
|
|
v2 += 4;
|
|
v3 += 4;
|
|
} while (--lines > 0);
|
|
}
|
|
|
|
static void __xor_altivec_4(unsigned long bytes,
|
|
unsigned long * __restrict v1_in,
|
|
const unsigned long * __restrict v2_in,
|
|
const unsigned long * __restrict v3_in,
|
|
const unsigned long * __restrict v4_in)
|
|
{
|
|
DEFINE(v1);
|
|
DEFINE(v2);
|
|
DEFINE(v3);
|
|
DEFINE(v4);
|
|
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
|
|
|
|
do {
|
|
LOAD(v1);
|
|
LOAD(v2);
|
|
LOAD(v3);
|
|
LOAD(v4);
|
|
XOR(v1, v2);
|
|
XOR(v3, v4);
|
|
XOR(v1, v3);
|
|
STORE(v1);
|
|
|
|
v1 += 4;
|
|
v2 += 4;
|
|
v3 += 4;
|
|
v4 += 4;
|
|
} while (--lines > 0);
|
|
}
|
|
|
|
static void __xor_altivec_5(unsigned long bytes,
|
|
unsigned long * __restrict v1_in,
|
|
const unsigned long * __restrict v2_in,
|
|
const unsigned long * __restrict v3_in,
|
|
const unsigned long * __restrict v4_in,
|
|
const unsigned long * __restrict v5_in)
|
|
{
|
|
DEFINE(v1);
|
|
DEFINE(v2);
|
|
DEFINE(v3);
|
|
DEFINE(v4);
|
|
DEFINE(v5);
|
|
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
|
|
|
|
do {
|
|
LOAD(v1);
|
|
LOAD(v2);
|
|
LOAD(v3);
|
|
LOAD(v4);
|
|
LOAD(v5);
|
|
XOR(v1, v2);
|
|
XOR(v3, v4);
|
|
XOR(v1, v5);
|
|
XOR(v1, v3);
|
|
STORE(v1);
|
|
|
|
v1 += 4;
|
|
v2 += 4;
|
|
v3 += 4;
|
|
v4 += 4;
|
|
v5 += 4;
|
|
} while (--lines > 0);
|
|
}
|
|
|
|
__DO_XOR_BLOCKS(altivec_inner, __xor_altivec_2, __xor_altivec_3,
|
|
__xor_altivec_4, __xor_altivec_5);
|