drm/amdgpu: Add soc v1_0 ih client id table

To acommandate the specific ih client for soc v1_0

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Hawking Zhang
2025-07-02 16:21:26 +08:00
committed by Alex Deucher
parent 0c9ad47286
commit db9ca58e16
6 changed files with 96 additions and 8 deletions

View File

@@ -99,6 +99,41 @@ const char *soc15_ih_clientid_name[] = {
"MP1"
};
const char *soc_v1_0_ih_clientid_name[] = {
"IH",
"Reserved",
"ATHUB",
"BIF",
"Reserved",
"Reserved",
"Reserved",
"RLC",
"Reserved",
"Reserved",
"GFX",
"IMU",
"Reserved",
"Reserved",
"VCN1 or UVD1",
"THM",
"VCN or UVD",
"Reserved",
"VMC",
"Reserved",
"GRBM_CP",
"GC_AID",
"ROM_SMUIO",
"DF",
"Reserved",
"PWR",
"LSDMA",
"GC_UTCL2",
"nHT",
"Reserved",
"MP0",
"MP1",
};
const int node_id_to_phys_map[NODEID_MAX] = {
[AID0_NODEID] = 0,
[XCD0_NODEID] = 0,

View File

@@ -26,6 +26,7 @@
#include <linux/irqdomain.h>
#include "soc15_ih_clientid.h"
#include "soc_v1_0_ih_clientid.h"
#include "amdgpu_ih.h"
#define AMDGPU_MAX_IRQ_SRC_ID 0x100

View File

@@ -1140,21 +1140,21 @@ static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
&adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)

View File

@@ -124,7 +124,7 @@ static int gmc_v12_1_process_interrupt(struct amdgpu_device *adev,
write_fault = !!(entry->src_data[1] & 0x200000);
}
if (entry->client_id == SOC21_IH_CLIENTID_VMC) {
if (entry->client_id == SOC_V1_0_IH_CLIENTID_VMC) {
hub_name = "mmhub0";
vmhub = AMDGPU_MMHUB0(node_id / 4);
} else {
@@ -198,8 +198,8 @@ static int gmc_v12_1_process_interrupt(struct amdgpu_device *adev,
amdgpu_vm_put_task_info(task_info);
}
dev_err(adev->dev, " in page starting at address 0x%016llx from IH client %d\n",
addr, entry->client_id);
dev_err(adev->dev, " in page starting at address 0x%016llx from IH client %d (%s)\n",
addr, entry->client_id, soc_v1_0_ih_clientid_name[entry->client_id]);
if (amdgpu_sriov_vf(adev))
return 0;

View File

@@ -1277,7 +1277,7 @@ static int sdma_v7_1_sw_init(struct amdgpu_ip_block *ip_block)
u32 xcc_id;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
@@ -1526,7 +1526,7 @@ static int sdma_v7_1_process_trap_irq(struct amdgpu_device *adev,
}
switch (entry->client_id) {
case SOC21_IH_CLIENTID_GFX:
case SOC_V1_0_IH_CLIENTID_GFX:
switch (queue) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instances].ring);