Files
linux/tools/perf/util/kvm-stat-arch/kvm-stat-loongarch.c
Ian Rogers 43af548436 perf kvm: Wire up e_machine
Pass the e_machine to the kvm functions so that they aren't just wired
to EM_HOST.

In the case of a session move some setup until the session
is created.

As the session isn't fully running the default EM_HOST is returned as no
e_machine can be found in a running machine.

This is, however, some marginal progress to cross platform support.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Aditya Bodkhe <aditya.b1@linux.ibm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: Anubhav Shelat <ashelat@redhat.com>
Cc: Anup Patel <anup@brainfault.org>
Cc: Athira Rajeev <atrajeev@linux.ibm.com>
Cc: Blake Jones <blakejones@google.com>
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <pjw@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quan Zhou <zhouquan@iscas.ac.cn>
Cc: Shimin Guo <shimin.guo@skydio.com>
Cc: Swapnil Sapkal <swapnil.sapkal@amd.com>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yunseong Kim <ysk@kzalloc.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2026-02-03 18:01:27 -03:00

153 lines
4.0 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <memory.h>
#include <dwarf-regs.h>
#include "../kvm-stat.h"
#include "../parse-events.h"
#include "../debug.h"
#include "../evsel.h"
#include "../evlist.h"
#include "../pmus.h"
#define LOONGARCH_EXCEPTION_INT 0
#define LOONGARCH_EXCEPTION_PIL 1
#define LOONGARCH_EXCEPTION_PIS 2
#define LOONGARCH_EXCEPTION_PIF 3
#define LOONGARCH_EXCEPTION_PME 4
#define LOONGARCH_EXCEPTION_FPD 15
#define LOONGARCH_EXCEPTION_SXD 16
#define LOONGARCH_EXCEPTION_ASXD 17
#define LOONGARCH_EXCEPTION_GSPR 22
#define LOONGARCH_EXCEPTION_CPUCFG 100
#define LOONGARCH_EXCEPTION_CSR 101
#define LOONGARCH_EXCEPTION_IOCSR 102
#define LOONGARCH_EXCEPTION_IDLE 103
#define LOONGARCH_EXCEPTION_OTHERS 104
#define LOONGARCH_EXCEPTION_HVC 23
#define loongarch_exception_type \
{LOONGARCH_EXCEPTION_INT, "Interrupt" }, \
{LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \
{LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \
{LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \
{LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \
{LOONGARCH_EXCEPTION_FPD, "FPU" }, \
{LOONGARCH_EXCEPTION_SXD, "LSX" }, \
{LOONGARCH_EXCEPTION_ASXD, "LASX" }, \
{LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \
{LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \
{LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \
{LOONGARCH_EXCEPTION_CSR, "CSR" }, \
{LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \
{LOONGARCH_EXCEPTION_IDLE, "Idle" }, \
{LOONGARCH_EXCEPTION_OTHERS, "Others" }
define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type);
static const char *kvm_reenter_trace = "kvm:kvm_reenter";
static const char * const __kvm_events_tp[] = {
"kvm:kvm_enter",
"kvm:kvm_reenter",
"kvm:kvm_exit",
"kvm:kvm_exit_gspr",
NULL,
};
static bool event_begin(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
return exit_event_begin(evsel, sample, key);
}
static bool event_end(struct evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
/*
* LoongArch kvm is different with other architectures
*
* There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with
* kvm:kvm_exit event.
* kvm:kvm_enter means returning to vmm and then to guest
* kvm:kvm_reenter means returning to guest immediately
*/
return evsel__name_is(evsel, kvm_entry_trace(EM_LOONGARCH)) ||
evsel__name_is(evsel, kvm_reenter_trace);
}
static void event_gspr_get_key(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
unsigned int insn;
key->key = LOONGARCH_EXCEPTION_OTHERS;
insn = evsel__intval(evsel, sample, "inst_word");
switch (insn >> 24) {
case 0:
/* CPUCFG inst trap */
if ((insn >> 10) == 0x1b)
key->key = LOONGARCH_EXCEPTION_CPUCFG;
break;
case 4:
/* CSR inst trap */
key->key = LOONGARCH_EXCEPTION_CSR;
break;
case 6:
/* IOCSR inst trap */
if ((insn >> 15) == 0xc90)
key->key = LOONGARCH_EXCEPTION_IOCSR;
else if ((insn >> 15) == 0xc91)
/* Idle inst trap */
key->key = LOONGARCH_EXCEPTION_IDLE;
break;
default:
key->key = LOONGARCH_EXCEPTION_OTHERS;
break;
}
}
static const struct child_event_ops child_events[] = {
{ .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key },
{ NULL, NULL },
};
static const struct kvm_events_ops exit_events = {
.is_begin_event = event_begin,
.is_end_event = event_end,
.child_ops = child_events,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events, },
{ NULL, NULL },
};
static const char * const __kvm_skip_events[] = {
NULL,
};
int __cpu_isa_init_loongarch(struct perf_kvm_stat *kvm)
{
kvm->exit_reasons_isa = "loongarch64";
kvm->exit_reasons = loongarch_exit_reasons;
return 0;
}
const char * const *__kvm_events_tp_loongarch(void)
{
return __kvm_events_tp;
}
const struct kvm_reg_events_ops *__kvm_reg_events_ops_loongarch(void)
{
return __kvm_reg_events_ops;
}
const char * const *__kvm_skip_events_loongarch(void)
{
return __kvm_skip_events;
}