mirror of
https://github.com/torvalds/linux.git
synced 2026-04-30 04:22:32 -04:00
GuC loading can take longer than it is supposed to for various reasons. So add in the code to cope with that and to report it when it happens. There are also many different reasons why GuC loading can fail, so add in the code for checking for those and for reporting issues in a meaningful manner rather than just hitting a timeout and saying 'fail: status = %x'. Also, remove the 'FIXME' comment about an i915 bug that has never been applicable to Xe! v2: Actually report the requested and granted frequencies rather than showing granted twice (review feedback from Badal). v3: Locally code all the timeout and end condition handling because a helper function is not allowed (review feedback from Lucas/Rodrigo). v4: Add more documentation comments and rename a define to add units (review feedback from Lucas). v5: Fix copy/paste error in xe_mmio_wait32_not (review feedback from Lucas) and rebase (no more return value from guc_wait_ucode). Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240518043700.3264362-3-John.C.Harrison@Intel.com
42 lines
1.3 KiB
C
42 lines
1.3 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2021-2023 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_MMIO_H_
|
|
#define _XE_MMIO_H_
|
|
|
|
#include "xe_gt_types.h"
|
|
|
|
struct xe_device;
|
|
struct xe_reg;
|
|
|
|
#define LMEM_BAR 2
|
|
|
|
int xe_mmio_init(struct xe_device *xe);
|
|
int xe_mmio_probe_tiles(struct xe_device *xe);
|
|
|
|
u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
|
|
u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
|
|
void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
|
|
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
|
|
u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
|
|
int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
|
|
bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
|
|
|
|
int xe_mmio_probe_vram(struct xe_device *xe);
|
|
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
|
|
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
|
|
u32 *out_val, bool atomic);
|
|
int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
|
|
u32 *out_val, bool atomic);
|
|
|
|
static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
|
|
{
|
|
if (addr < gt->mmio.adj_limit)
|
|
addr += gt->mmio.adj_offset;
|
|
return addr;
|
|
}
|
|
|
|
#endif
|