mirror of
https://github.com/torvalds/linux.git
synced 2026-04-23 00:55:48 -04:00
All recent platforms (including all the ones officially supported by the Xe driver) do not allow concurrent execution of RCS and CCS workloads from different address spaces, with the HW blocking the context switch when it detects such a scenario. The DUAL_QUEUE flag helps with this, by causing the GuC to not submit a context it knows will not be able to execute. This, however, causes a new problem: if RCS and CCS queues have pending workloads from different address spaces, the GuC needs to choose from which of the 2 queues to pick the next workload to execute. By default, the GuC prioritizes RCS submissions over CCS ones, which can lead to CCS workloads being significantly (or completely) starved of execution time. The driver can tune this by setting a dedicated scheduling policy KLV; this KLV allows the driver to specify a quantum (in ms) and a ratio (percentage value between 0 and 100), and the GuC will prioritize the CCS for that percentage of each quantum. Given that we want to guarantee enough RCS throughput to avoid missing frames, we set the yield policy to 20% of each 80ms interval. v2: updated quantum and ratio, improved comment, use xe_guc_submit_disable in gt_sanitize Fixes:d9a1ae0d17("drm/xe/guc: Enable WA_DUAL_QUEUE for newer platforms") Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Tested-by: Vinay Belgaumkar <vinay.belgaumkar@intel.com> Link: https://lore.kernel.org/r/20250905235632.3333247-2-daniele.ceraolospurio@intel.com (cherry picked from commit8843444843) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> [Rodrigo added #include "xe_guc_submit.h" while backporting]
46 lines
1.6 KiB
C
46 lines
1.6 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_GUC_SUBMIT_H_
|
|
#define _XE_GUC_SUBMIT_H_
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct drm_printer;
|
|
struct xe_exec_queue;
|
|
struct xe_guc;
|
|
|
|
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
|
|
int xe_guc_submit_enable(struct xe_guc *guc);
|
|
void xe_guc_submit_disable(struct xe_guc *guc);
|
|
|
|
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
|
|
void xe_guc_submit_reset_wait(struct xe_guc *guc);
|
|
void xe_guc_submit_stop(struct xe_guc *guc);
|
|
int xe_guc_submit_start(struct xe_guc *guc);
|
|
void xe_guc_submit_wedge(struct xe_guc *guc);
|
|
|
|
int xe_guc_read_stopped(struct xe_guc *guc);
|
|
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
|
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
|
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
|
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
|
|
u32 len);
|
|
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
|
int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
|
|
|
struct xe_guc_submit_exec_queue_snapshot *
|
|
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
|
|
void
|
|
xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot);
|
|
void
|
|
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
|
|
struct drm_printer *p);
|
|
void
|
|
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
|
|
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
|
|
|
|
#endif
|