mirror of
https://github.com/torvalds/linux.git
synced 2026-04-22 08:44:02 -04:00
Pull drm updates from Dave Airlie:
"This adds a couple of patches outside the drm core, all should be
acked appropriately, the string and pstore ones are the main ones that
come to mind.
Otherwise it's the usual drivers, xe is getting enabled by default on
some new hardware, we've changed the device number handling to allow
more devices, and we added some optional rust code to create QR codes
in the panic handler, an idea first suggested I think 10 years ago :-)
string:
- add mem_is_zero()
core:
- support more device numbers
- use XArray for minor ids
- add backlight constants
- Split dma fence array creation into alloc and arm
fbdev:
- remove usage of old fbdev hooks
kms:
- Add might_fault() to drm_modeset_lock priming
- Add dynamic per-crtc vblank configuration support
dma-buf:
- docs cleanup
buddy:
- Add start address support for trim function
printk:
- pass description to kmsg_dump
scheduler:
- Remove full_recover from drm_sched_start
ttm:
- Make LRU walk restartable after dropping locks
- Allow direct reclaim to allocate local memory
panic:
- add display QR code (in rust)
displayport:
- mst: GUID improvements
bridge:
- Silence error message on -EPROBE_DEFER
- analogix: Clean aup
- bridge-connector: Fix double free
- lt6505: Disable interrupt when powered off
- tc358767: Make default DP port preemphasis configurable
- lt9611uxc: require DRM_BRIDGE_ATTACH_NO_CONNECTOR
- anx7625: simplify OF array handling
- dw-hdmi: simplify clock handling
- lontium-lt8912b: fix mode validation
- nwl-dsi: fix mode vsync/hsync polarity
xe:
- Enable LunarLake and Battlemage support
- Introducing Xe2 ccs modifiers for integrated and discrete graphics
- rename xe perf to xe observation
- use wb caching on DGFX for system memory
- add fence timeouts
- Lunar Lake graphics/media/display workarounds
- Battlemage workarounds
- Battlemage GSC support
- GSC and HuC fw updates for LL/BM
- use dma_fence_chain_free
- refactor hw engine lookup and mmio access
- enable priority mem read for Xe2
- Add first GuC BMG fw
- fix dma-resv lock
- Fix DGFX display suspend/resume
- Use xe_managed for kernel BOs
- Use reserved copy engine for user binds on faulting devices
- Allow mixing dma-fence jobs and long-running faulting jobs
- fix media TLB invalidation
- fix rpm in TTM swapout path
- track resources and VF state by PF
i915:
- Type-C programming fix for MTL+
- FBC cleanup
- Calc vblank delay more accurately
- On DP MST, Enable LT fallback for UHBR<->non-UHBR rates
- Fix DP LTTPR detection
- limit relocations to INT_MAX
- fix long hangs in buddy allocator on DG2/A380
amdgpu:
- Per-queue reset support
- SDMA devcoredump support
- DCN 4.0.1 updates
- GFX12/VCN4/JPEG4 updates
- Convert vbios embedded EDID to drm_edid
- GFX9.3/9.4 devcoredump support
- process isolation framework for GFX 9.4.3/4
- take IOMMU mappings into account for P2P DMA
amdkfd:
- CRIU fixes
- HMM fix
- Enable process isolation support for GFX 9.4.3/4
- Allow users to target recommended SDMA engines
- KFD support for targetting queues on recommended SDMA engines
radeon:
- remove .load and drm_dev_alloc
- Fix vbios embedded EDID size handling
- Convert vbios embedded EDID to drm_edid
- Use GEM references instead of TTM
- r100 cp init cleanup
- Fix potential overflows in evergreen CS offset tracking
msm:
- DPU:
- implement DP/PHY mapping on SC8180X
- Enable writeback on SM8150, SC8180X, SM6125, SM6350
- DP:
- Enable widebus on all relevant chipsets
- MSM8998 HDMI support
- GPU:
- A642L speedbin support
- A615/A306/A621 support
- A7xx devcoredump support
ast:
- astdp: Support AST2600 with VGA
- Clean up HPD
- Fix timeout loop for DP link training
- reorganize output code by type (VGA, DP, etc)
- convert to struct drm_edid
- fix BMC handling for all outputs
exynos:
- drop stale MAINTAINERS pattern
- constify struct
loongson:
- use GEM refcount over TTM
mgag200:
- Improve BMC handling
- Support VBLANK intterupts
- transparently support BMC outputs
nouveau:
- Refactor and clean up internals
- Use GEM refcount over TTM's
gm12u320:
- convert to struct drm_edid
gma500:
- update i2c terms
lcdif:
- pixel clock fix
host1x:
- fix syncpoint IRQ during resume
- use iommu_paging_domain_alloc()
imx:
- ipuv3: convert to struct drm_edid
omapdrm:
- improve error handling
- use common helper for_each_endpoint_of_node()
panel:
- add support for BOE TV101WUM-LL2 plus DT bindings
- novatek-nt35950: improve error handling
- nv3051d: improve error handling
- panel-edp:
- add support for BOE NE140WUM-N6G
- revert support for SDC ATNA45AF01
- visionox-vtdr6130:
- improve error handling
- use devm_regulator_bulk_get_const()
- boe-th101mb31ig002:
- Support for starry-er88577 MIPI-DSI panel plus DT
- Fix porch parameter
- edp: Support AOU B116XTN02.3, AUO B116XAN06.1, AOU B116XAT04.1, BOE
NV140WUM-N41, BOE NV133WUM-N63, BOE NV116WHM-A4D, CMN N116BCA-EA2,
CMN N116BCP-EA2, CSW MNB601LS1-4
- himax-hx8394: Support Microchip AC40T08A MIPI Display panel plus DT
- ilitek-ili9806e: Support Densitron DMT028VGHMCMI-1D TFT plus DT
- jd9365da:
- Support Melfas lmfbx101117480 MIPI-DSI panel plus DT
- Refactor for code sharing
- panel-edp: fix name for HKC MB116AN01
- jd9365da: fix "exit sleep" commands
- jdi-fhd-r63452: simplify error handling with DSI multi-style
helpers
- mantix-mlaf057we51: simplify error handling with DSI multi-style
helpers
- simple:
- support Innolux G070ACE-LH3 plus DT bindings
- support On Tat Industrial Company KD50G21-40NT-A1 plus DT
bindings
- st7701:
- decouple DSI and DRM code
- add SPI support
- support Anbernic RG28XX plus DT bindings
mediatek:
- support alpha blending
- remove cl in struct cmdq_pkt
- ovl adaptor fix
- add power domain binding for mediatek DPI controller
renesas:
- rz-du: add support for RZ/G2UL plus DT bindings
rockchip:
- Improve DP sink-capability reporting
- dw_hdmi: Support 4k@60Hz
- vop:
- Support RGB display on Rockchip RK3066
- Support 4096px width
sti:
- convert to struct drm_edid
stm:
- Avoid UAF wih managed plane and CRTC helpers
- Fix module owner
- Fix error handling in probe
- Depend on COMMON_CLK
- ltdc:
- Fix transparency after disabling plane
- Remove unused interrupt
tegra:
- gr3d: improve PM domain handling
- convert to struct drm_edid
- Call drm_atomic_helper_shutdown()
vc4:
- fix PM during detect
- replace DRM_ERROR() with drm_error()
- v3d: simplify clock retrieval
v3d:
- Clean up perfmon
virtio:
- add DRM capset"
* tag 'drm-next-2024-09-19' of https://gitlab.freedesktop.org/drm/kernel: (1326 commits)
drm/xe: Fix missing conversion to xe_display_pm_runtime_resume
drm/xe/xe2hpg: Add Wa_15016589081
drm/xe: Don't keep stale pointer to bo->ggtt_node
drm/xe: fix missing 'xe_vm_put'
drm/xe: fix build warning with CONFIG_PM=n
drm/xe: Suppress missing outer rpm protection warning
drm/xe: prevent potential UAF in pf_provision_vf_ggtt()
drm/amd/display: Add all planes on CRTC to state for overlay cursor
drm/i915/bios: fix printk format width
drm/i915/display: Fix BMG CCS modifiers
drm/amdgpu: get rid of bogus includes of fdtable.h
drm/amdkfd: CRIU fixes
drm/amdgpu: fix a race in kfd_mem_export_dmabuf()
drm: new helper: drm_gem_prime_handle_to_dmabuf()
drm/amdgpu/atomfirmware: Silence UBSAN warning
drm/amdgpu: Fix kdoc entry in 'amdgpu_vm_cpu_prepare'
drm/amd/amdgpu: apply command submission parser for JPEG v1
drm/amd/amdgpu: apply command submission parser for JPEG v2+
drm/amd/pm: fix the pp_dpm_pcie issue on smu v14.0.2/3
drm/amd/pm: update the features set on smu v14.0.2/3
...
975 lines
20 KiB
C
975 lines
20 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#include "xe_gt.h"
|
|
|
|
#include <linux/minmax.h>
|
|
|
|
#include <drm/drm_managed.h>
|
|
#include <uapi/drm/xe_drm.h>
|
|
|
|
#include <generated/xe_wa_oob.h>
|
|
|
|
#include "instructions/xe_gfxpipe_commands.h"
|
|
#include "instructions/xe_mi_commands.h"
|
|
#include "regs/xe_gt_regs.h"
|
|
#include "xe_assert.h"
|
|
#include "xe_bb.h"
|
|
#include "xe_bo.h"
|
|
#include "xe_device.h"
|
|
#include "xe_exec_queue.h"
|
|
#include "xe_execlist.h"
|
|
#include "xe_force_wake.h"
|
|
#include "xe_ggtt.h"
|
|
#include "xe_gsc.h"
|
|
#include "xe_gt_ccs_mode.h"
|
|
#include "xe_gt_clock.h"
|
|
#include "xe_gt_freq.h"
|
|
#include "xe_gt_idle.h"
|
|
#include "xe_gt_mcr.h"
|
|
#include "xe_gt_pagefault.h"
|
|
#include "xe_gt_printk.h"
|
|
#include "xe_gt_sriov_pf.h"
|
|
#include "xe_gt_sysfs.h"
|
|
#include "xe_gt_tlb_invalidation.h"
|
|
#include "xe_gt_topology.h"
|
|
#include "xe_guc_exec_queue_types.h"
|
|
#include "xe_guc_pc.h"
|
|
#include "xe_hw_fence.h"
|
|
#include "xe_hw_engine_class_sysfs.h"
|
|
#include "xe_irq.h"
|
|
#include "xe_lmtt.h"
|
|
#include "xe_lrc.h"
|
|
#include "xe_map.h"
|
|
#include "xe_migrate.h"
|
|
#include "xe_mmio.h"
|
|
#include "xe_pat.h"
|
|
#include "xe_pm.h"
|
|
#include "xe_mocs.h"
|
|
#include "xe_reg_sr.h"
|
|
#include "xe_ring_ops.h"
|
|
#include "xe_sa.h"
|
|
#include "xe_sched_job.h"
|
|
#include "xe_sriov.h"
|
|
#include "xe_tuning.h"
|
|
#include "xe_uc.h"
|
|
#include "xe_uc_fw.h"
|
|
#include "xe_vm.h"
|
|
#include "xe_wa.h"
|
|
#include "xe_wopcm.h"
|
|
|
|
static void gt_fini(struct drm_device *drm, void *arg)
|
|
{
|
|
struct xe_gt *gt = arg;
|
|
|
|
destroy_workqueue(gt->ordered_wq);
|
|
}
|
|
|
|
struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
|
|
{
|
|
struct xe_gt *gt;
|
|
int err;
|
|
|
|
gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
|
|
if (!gt)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
gt->tile = tile;
|
|
gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
|
|
|
|
err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt);
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
|
|
return gt;
|
|
}
|
|
|
|
void xe_gt_sanitize(struct xe_gt *gt)
|
|
{
|
|
/*
|
|
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
|
|
* reload
|
|
*/
|
|
gt->uc.guc.submission_state.enabled = false;
|
|
}
|
|
|
|
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
|
|
{
|
|
u32 reg;
|
|
int err;
|
|
|
|
if (!XE_WA(gt, 16023588340))
|
|
return;
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
|
if (WARN_ON(err))
|
|
return;
|
|
|
|
if (!xe_gt_is_media_type(gt)) {
|
|
xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
|
|
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
|
|
reg |= CG_DIS_CNTLBUS;
|
|
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
|
|
}
|
|
|
|
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
|
|
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
|
}
|
|
|
|
static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
|
|
{
|
|
u32 reg;
|
|
int err;
|
|
|
|
if (!XE_WA(gt, 16023588340))
|
|
return;
|
|
|
|
if (xe_gt_is_media_type(gt))
|
|
return;
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
|
if (WARN_ON(err))
|
|
return;
|
|
|
|
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
|
|
reg &= ~CG_DIS_CNTLBUS;
|
|
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
|
|
|
|
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
|
}
|
|
|
|
/**
|
|
* xe_gt_remove() - Clean up the GT structures before driver removal
|
|
* @gt: the GT object
|
|
*
|
|
* This function should only act on objects/structures that must be cleaned
|
|
* before the driver removal callback is complete and therefore can't be
|
|
* deferred to a drmm action.
|
|
*/
|
|
void xe_gt_remove(struct xe_gt *gt)
|
|
{
|
|
int i;
|
|
|
|
xe_uc_remove(>->uc);
|
|
|
|
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
|
|
xe_hw_fence_irq_finish(>->fence_irq[i]);
|
|
|
|
xe_gt_disable_host_l2_vram(gt);
|
|
}
|
|
|
|
static void gt_reset_worker(struct work_struct *w);
|
|
|
|
static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
|
|
{
|
|
struct xe_sched_job *job;
|
|
struct xe_bb *bb;
|
|
struct dma_fence *fence;
|
|
long timeout;
|
|
|
|
bb = xe_bb_new(gt, 4, false);
|
|
if (IS_ERR(bb))
|
|
return PTR_ERR(bb);
|
|
|
|
job = xe_bb_create_job(q, bb);
|
|
if (IS_ERR(job)) {
|
|
xe_bb_free(bb, NULL);
|
|
return PTR_ERR(job);
|
|
}
|
|
|
|
xe_sched_job_arm(job);
|
|
fence = dma_fence_get(&job->drm.s_fence->finished);
|
|
xe_sched_job_push(job);
|
|
|
|
timeout = dma_fence_wait_timeout(fence, false, HZ);
|
|
dma_fence_put(fence);
|
|
xe_bb_free(bb, NULL);
|
|
if (timeout < 0)
|
|
return timeout;
|
|
else if (!timeout)
|
|
return -ETIME;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Convert back from encoded value to type-safe, only to be used when reg.mcr
|
|
* is true
|
|
*/
|
|
static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
|
|
{
|
|
return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
|
|
}
|
|
|
|
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
|
|
{
|
|
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
|
|
struct xe_reg_sr_entry *entry;
|
|
unsigned long idx;
|
|
struct xe_sched_job *job;
|
|
struct xe_bb *bb;
|
|
struct dma_fence *fence;
|
|
long timeout;
|
|
int count = 0;
|
|
|
|
if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
|
|
/* Big enough to emit all of the context's 3DSTATE */
|
|
bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
|
|
else
|
|
/* Just pick a large BB size */
|
|
bb = xe_bb_new(gt, SZ_4K, false);
|
|
|
|
if (IS_ERR(bb))
|
|
return PTR_ERR(bb);
|
|
|
|
xa_for_each(&sr->xa, idx, entry)
|
|
++count;
|
|
|
|
if (count) {
|
|
xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
|
|
|
|
bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
|
|
|
|
xa_for_each(&sr->xa, idx, entry) {
|
|
struct xe_reg reg = entry->reg;
|
|
struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
|
|
u32 val;
|
|
|
|
/*
|
|
* Skip reading the register if it's not really needed
|
|
*/
|
|
if (reg.masked)
|
|
val = entry->clr_bits << 16;
|
|
else if (entry->clr_bits + 1)
|
|
val = (reg.mcr ?
|
|
xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
|
|
xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
|
|
else
|
|
val = 0;
|
|
|
|
val |= entry->set_bits;
|
|
|
|
bb->cs[bb->len++] = reg.addr;
|
|
bb->cs[bb->len++] = val;
|
|
xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
|
|
}
|
|
}
|
|
|
|
xe_lrc_emit_hwe_state_instructions(q, bb);
|
|
|
|
job = xe_bb_create_job(q, bb);
|
|
if (IS_ERR(job)) {
|
|
xe_bb_free(bb, NULL);
|
|
return PTR_ERR(job);
|
|
}
|
|
|
|
xe_sched_job_arm(job);
|
|
fence = dma_fence_get(&job->drm.s_fence->finished);
|
|
xe_sched_job_push(job);
|
|
|
|
timeout = dma_fence_wait_timeout(fence, false, HZ);
|
|
dma_fence_put(fence);
|
|
xe_bb_free(bb, NULL);
|
|
if (timeout < 0)
|
|
return timeout;
|
|
else if (!timeout)
|
|
return -ETIME;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int xe_gt_record_default_lrcs(struct xe_gt *gt)
|
|
{
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
int err = 0;
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
struct xe_exec_queue *q, *nop_q;
|
|
void *default_lrc;
|
|
|
|
if (gt->default_lrc[hwe->class])
|
|
continue;
|
|
|
|
xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
|
|
xe_wa_process_lrc(hwe);
|
|
xe_hw_engine_setup_default_lrc_state(hwe);
|
|
xe_tuning_process_lrc(hwe);
|
|
|
|
default_lrc = drmm_kzalloc(&xe->drm,
|
|
xe_gt_lrc_size(gt, hwe->class),
|
|
GFP_KERNEL);
|
|
if (!default_lrc)
|
|
return -ENOMEM;
|
|
|
|
q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
|
|
hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
|
|
if (IS_ERR(q)) {
|
|
err = PTR_ERR(q);
|
|
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
|
|
hwe->name, q);
|
|
return err;
|
|
}
|
|
|
|
/* Prime golden LRC with known good state */
|
|
err = emit_wa_job(gt, q);
|
|
if (err) {
|
|
xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
|
|
hwe->name, ERR_PTR(err), q->guc->id);
|
|
goto put_exec_queue;
|
|
}
|
|
|
|
nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
|
|
1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
|
|
if (IS_ERR(nop_q)) {
|
|
err = PTR_ERR(nop_q);
|
|
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
|
|
hwe->name, nop_q);
|
|
goto put_exec_queue;
|
|
}
|
|
|
|
/* Switch to different LRC */
|
|
err = emit_nop_job(gt, nop_q);
|
|
if (err) {
|
|
xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
|
|
hwe->name, ERR_PTR(err), nop_q->guc->id);
|
|
goto put_nop_q;
|
|
}
|
|
|
|
/* Reload golden LRC to record the effect of any indirect W/A */
|
|
err = emit_nop_job(gt, q);
|
|
if (err) {
|
|
xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
|
|
hwe->name, ERR_PTR(err), q->guc->id);
|
|
goto put_nop_q;
|
|
}
|
|
|
|
xe_map_memcpy_from(xe, default_lrc,
|
|
&q->lrc[0]->bo->vmap,
|
|
xe_lrc_pphwsp_offset(q->lrc[0]),
|
|
xe_gt_lrc_size(gt, hwe->class));
|
|
|
|
gt->default_lrc[hwe->class] = default_lrc;
|
|
put_nop_q:
|
|
xe_exec_queue_put(nop_q);
|
|
put_exec_queue:
|
|
xe_exec_queue_put(q);
|
|
if (err)
|
|
break;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
int xe_gt_init_early(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt))) {
|
|
err = xe_gt_sriov_pf_init_early(gt);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
|
|
|
|
err = xe_wa_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_wa_process_gt(gt);
|
|
xe_wa_process_oob(gt);
|
|
xe_tuning_process_gt(gt);
|
|
|
|
xe_force_wake_init_gt(gt, gt_to_fw(gt));
|
|
spin_lock_init(>->global_invl_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void dump_pat_on_error(struct xe_gt *gt)
|
|
{
|
|
struct drm_printer p;
|
|
char prefix[32];
|
|
|
|
snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
|
|
p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
|
|
|
|
xe_pat_dump(gt, &p);
|
|
}
|
|
|
|
static int gt_fw_domain_init(struct xe_gt *gt)
|
|
{
|
|
int err, i;
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
|
if (err)
|
|
goto err_hw_fence_irq;
|
|
|
|
if (!xe_gt_is_media_type(gt)) {
|
|
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
|
|
}
|
|
|
|
/* Enable per hw engine IRQs */
|
|
xe_irq_enable_hwe(gt);
|
|
|
|
/* Rerun MCR init as we now have hw engine list */
|
|
xe_gt_mcr_init(gt);
|
|
|
|
err = xe_hw_engines_init_early(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
err = xe_hw_engine_class_sysfs_init(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
/* Initialize CCS mode sysfs after early initialization of HW engines */
|
|
err = xe_gt_ccs_mode_sysfs_init(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
/*
|
|
* Stash hardware-reported version. Since this register does not exist
|
|
* on pre-MTL platforms, reading it there will (correctly) return 0.
|
|
*/
|
|
gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
|
|
|
|
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
|
XE_WARN_ON(err);
|
|
|
|
return 0;
|
|
|
|
err_force_wake:
|
|
dump_pat_on_error(gt);
|
|
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
|
err_hw_fence_irq:
|
|
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
|
|
xe_hw_fence_irq_finish(>->fence_irq[i]);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int all_fw_domain_init(struct xe_gt *gt)
|
|
{
|
|
int err, i;
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (err)
|
|
goto err_hw_fence_irq;
|
|
|
|
xe_gt_mcr_set_implicit_defaults(gt);
|
|
xe_reg_sr_apply_mmio(>->reg_sr, gt);
|
|
|
|
err = xe_gt_clock_init(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
xe_mocs_init(gt);
|
|
err = xe_execlist_init(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
err = xe_hw_engines_init(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
err = xe_uc_init_post_hwconfig(>->uc);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
if (!xe_gt_is_media_type(gt)) {
|
|
/*
|
|
* USM has its only SA pool to non-block behind user operations
|
|
*/
|
|
if (gt_to_xe(gt)->info.has_usm) {
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
|
|
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
|
|
IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
|
|
if (IS_ERR(gt->usm.bb_pool)) {
|
|
err = PTR_ERR(gt->usm.bb_pool);
|
|
goto err_force_wake;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!xe_gt_is_media_type(gt)) {
|
|
struct xe_tile *tile = gt_to_tile(gt);
|
|
|
|
tile->migrate = xe_migrate_init(tile);
|
|
if (IS_ERR(tile->migrate)) {
|
|
err = PTR_ERR(tile->migrate);
|
|
goto err_force_wake;
|
|
}
|
|
}
|
|
|
|
err = xe_uc_init_hw(>->uc);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
/* Configure default CCS mode of 1 engine with all resources */
|
|
if (xe_gt_ccs_mode_enabled(gt)) {
|
|
gt->ccs_mode = 1;
|
|
xe_gt_apply_ccs_mode(gt);
|
|
}
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
|
|
xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_init_hw(gt);
|
|
|
|
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
XE_WARN_ON(err);
|
|
|
|
return 0;
|
|
|
|
err_force_wake:
|
|
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
err_hw_fence_irq:
|
|
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
|
|
xe_hw_fence_irq_finish(>->fence_irq[i]);
|
|
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Initialize enough GT to be able to load GuC in order to obtain hwconfig and
|
|
* enable CTB communication.
|
|
*/
|
|
int xe_gt_init_hwconfig(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
|
if (err)
|
|
goto out;
|
|
|
|
xe_gt_mcr_init_early(gt);
|
|
xe_pat_init(gt);
|
|
|
|
err = xe_uc_init(>->uc);
|
|
if (err)
|
|
goto out_fw;
|
|
|
|
err = xe_uc_init_hwconfig(>->uc);
|
|
if (err)
|
|
goto out_fw;
|
|
|
|
xe_gt_topology_init(gt);
|
|
xe_gt_mcr_init(gt);
|
|
xe_gt_enable_host_l2_vram(gt);
|
|
|
|
out_fw:
|
|
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
int xe_gt_init(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
int i;
|
|
|
|
INIT_WORK(>->reset.worker, gt_reset_worker);
|
|
|
|
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
|
|
gt->ring_ops[i] = xe_ring_ops_get(gt, i);
|
|
xe_hw_fence_irq_init(>->fence_irq[i]);
|
|
}
|
|
|
|
err = xe_gt_tlb_invalidation_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_pagefault_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_mocs_init_early(gt);
|
|
|
|
err = xe_gt_sysfs_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = gt_fw_domain_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_idle_init(>->gtidle);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_freq_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_force_wake_init_engines(gt, gt_to_fw(gt));
|
|
|
|
err = all_fw_domain_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_gt_record_user_engines(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void xe_gt_record_user_engines(struct xe_gt *gt)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
gt->user_engines.mask = 0;
|
|
memset(gt->user_engines.instances_per_class, 0,
|
|
sizeof(gt->user_engines.instances_per_class));
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
if (xe_hw_engine_is_reserved(hwe))
|
|
continue;
|
|
|
|
gt->user_engines.mask |= BIT_ULL(id);
|
|
gt->user_engines.instances_per_class[hwe->class]++;
|
|
}
|
|
|
|
xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
|
|
== gt->info.engine_mask);
|
|
}
|
|
|
|
static int do_gt_reset(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
xe_gsc_wa_14015076503(gt, true);
|
|
|
|
xe_mmio_write32(gt, GDRST, GRDOM_FULL);
|
|
err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
|
|
if (err)
|
|
xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
|
|
ERR_PTR(err));
|
|
|
|
xe_gsc_wa_14015076503(gt, false);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int vf_gt_restart(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
err = xe_uc_sanitize_reset(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_init_hw(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_start(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int do_gt_restart(struct xe_gt *gt)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
int err;
|
|
|
|
if (IS_SRIOV_VF(gt_to_xe(gt)))
|
|
return vf_gt_restart(gt);
|
|
|
|
xe_pat_init(gt);
|
|
|
|
xe_gt_enable_host_l2_vram(gt);
|
|
|
|
xe_gt_mcr_set_implicit_defaults(gt);
|
|
xe_reg_sr_apply_mmio(>->reg_sr, gt);
|
|
|
|
err = xe_wopcm_init(>->uc.wopcm);
|
|
if (err)
|
|
return err;
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
xe_hw_engine_enable_ring(hwe);
|
|
|
|
err = xe_uc_sanitize_reset(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_init_hw(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
|
|
xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_init_hw(gt);
|
|
|
|
xe_mocs_init(gt);
|
|
err = xe_uc_start(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
|
|
xe_reg_sr_apply_whitelist(hwe);
|
|
}
|
|
|
|
/* Get CCS mode in sync between sw/hw */
|
|
xe_gt_apply_ccs_mode(gt);
|
|
|
|
/* Restore GT freq to expected values */
|
|
xe_gt_sanitize_freq(gt);
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_restart(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int gt_reset(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
if (xe_device_wedged(gt_to_xe(gt)))
|
|
return -ECANCELED;
|
|
|
|
/* We only support GT resets with GuC submission */
|
|
if (!xe_device_uc_enabled(gt_to_xe(gt)))
|
|
return -ENODEV;
|
|
|
|
xe_gt_info(gt, "reset started\n");
|
|
|
|
xe_pm_runtime_get(gt_to_xe(gt));
|
|
|
|
if (xe_fault_inject_gt_reset()) {
|
|
err = -ECANCELED;
|
|
goto err_fail;
|
|
}
|
|
|
|
xe_gt_sanitize(gt);
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (err)
|
|
goto err_msg;
|
|
|
|
xe_uc_gucrc_disable(>->uc);
|
|
xe_uc_stop_prepare(>->uc);
|
|
xe_gt_pagefault_reset(gt);
|
|
|
|
xe_uc_stop(>->uc);
|
|
|
|
xe_gt_tlb_invalidation_reset(gt);
|
|
|
|
err = do_gt_reset(gt);
|
|
if (err)
|
|
goto err_out;
|
|
|
|
err = do_gt_restart(gt);
|
|
if (err)
|
|
goto err_out;
|
|
|
|
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
XE_WARN_ON(err);
|
|
xe_pm_runtime_put(gt_to_xe(gt));
|
|
|
|
xe_gt_info(gt, "reset done\n");
|
|
|
|
return 0;
|
|
|
|
err_out:
|
|
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
err_msg:
|
|
XE_WARN_ON(xe_uc_start(>->uc));
|
|
err_fail:
|
|
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
|
|
|
|
xe_device_declare_wedged(gt_to_xe(gt));
|
|
xe_pm_runtime_put(gt_to_xe(gt));
|
|
|
|
return err;
|
|
}
|
|
|
|
static void gt_reset_worker(struct work_struct *w)
|
|
{
|
|
struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
|
|
|
|
gt_reset(gt);
|
|
}
|
|
|
|
void xe_gt_reset_async(struct xe_gt *gt)
|
|
{
|
|
xe_gt_info(gt, "trying reset\n");
|
|
|
|
/* Don't do a reset while one is already in flight */
|
|
if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc))
|
|
return;
|
|
|
|
xe_gt_info(gt, "reset queued\n");
|
|
queue_work(gt->ordered_wq, >->reset.worker);
|
|
}
|
|
|
|
void xe_gt_suspend_prepare(struct xe_gt *gt)
|
|
{
|
|
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
|
|
xe_uc_stop_prepare(>->uc);
|
|
|
|
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
}
|
|
|
|
int xe_gt_suspend(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
xe_gt_dbg(gt, "suspending\n");
|
|
xe_gt_sanitize(gt);
|
|
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (err)
|
|
goto err_msg;
|
|
|
|
err = xe_uc_suspend(>->uc);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
xe_gt_idle_disable_pg(gt);
|
|
|
|
xe_gt_disable_host_l2_vram(gt);
|
|
|
|
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
xe_gt_dbg(gt, "suspended\n");
|
|
|
|
return 0;
|
|
|
|
err_force_wake:
|
|
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
err_msg:
|
|
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
|
|
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
|
|
* @gt: the GT object
|
|
*
|
|
* Called after driver init/GSC load completes to restore GT frequencies if we
|
|
* limited them for any WAs.
|
|
*/
|
|
int xe_gt_sanitize_freq(struct xe_gt *gt)
|
|
{
|
|
int ret = 0;
|
|
|
|
if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
|
|
xe_uc_fw_is_loaded(>->uc.gsc.fw)) && XE_WA(gt, 22019338487))
|
|
ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int xe_gt_resume(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
xe_gt_dbg(gt, "resuming\n");
|
|
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (err)
|
|
goto err_msg;
|
|
|
|
err = do_gt_restart(gt);
|
|
if (err)
|
|
goto err_force_wake;
|
|
|
|
xe_gt_idle_enable_pg(gt);
|
|
|
|
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
xe_gt_dbg(gt, "resumed\n");
|
|
|
|
return 0;
|
|
|
|
err_force_wake:
|
|
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
|
err_msg:
|
|
xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
|
|
|
|
return err;
|
|
}
|
|
|
|
struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
|
|
enum xe_engine_class class,
|
|
u16 instance, bool logical)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
if (hwe->class == class &&
|
|
((!logical && hwe->instance == instance) ||
|
|
(logical && hwe->logical_instance == instance)))
|
|
return hwe;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
|
|
enum xe_engine_class class)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
switch (class) {
|
|
case XE_ENGINE_CLASS_RENDER:
|
|
case XE_ENGINE_CLASS_COMPUTE:
|
|
if (hwe->class == XE_ENGINE_CLASS_RENDER ||
|
|
hwe->class == XE_ENGINE_CLASS_COMPUTE)
|
|
return hwe;
|
|
break;
|
|
default:
|
|
if (hwe->class == class)
|
|
return hwe;
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
return hwe;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_declare_wedged() - Declare GT wedged
|
|
* @gt: the GT object
|
|
*
|
|
* Wedge the GT which stops all submission, saves desired debug state, and
|
|
* cleans up anything which could timeout.
|
|
*/
|
|
void xe_gt_declare_wedged(struct xe_gt *gt)
|
|
{
|
|
xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
|
|
|
|
xe_uc_declare_wedged(>->uc);
|
|
xe_gt_tlb_invalidation_reset(gt);
|
|
}
|