riscv, bpf: Introduce shift add helper with Zba optimization

Zba extension is very useful for generating addresses that index into array
of basic data types. This patch introduces sh2add and sh3add helpers for
RV32 and RV64 respectively, to accelerate addressing for array of unsigned
long data.

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn@kernel.org>
Link: https://lore.kernel.org/bpf/20240524075543.4050464-3-xiao.w.wang@intel.com
This commit is contained in:
Xiao Wang
2024-05-24 15:55:43 +08:00
committed by Daniel Borkmann
parent 531876c800
commit 96a27ee76f
3 changed files with 37 additions and 8 deletions

View File

@@ -742,6 +742,17 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
/* RVZBA instructions. */
static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2)
{
return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33);
}
static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2)
{
return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33);
}
/* RVZBB instructions. */
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
{
@@ -1095,6 +1106,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
emit(rv_sw(rs1, off, rs2), ctx);
}
static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
{
if (rvzba_enabled()) {
emit(rvzba_sh2add(rd, rs1, rs2), ctx);
return;
}
emit_slli(rd, rs1, 2, ctx);
emit_add(rd, rd, rs2, ctx);
}
static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
{
if (rvzba_enabled()) {
emit(rvzba_sh3add(rd, rs1, rs2), ctx);
return;
}
emit_slli(rd, rs1, 3, ctx);
emit_add(rd, rd, rs2, ctx);
}
/* RV64-only helper functions. */
#if __riscv_xlen == 64