mirror of
https://github.com/torvalds/linux.git
synced 2026-04-24 17:42:27 -04:00
Scheduler groups (a.k.a. Engine Groups Scheduling, or EGS) is a GuC feature that allows the driver to define groups of engines that are independently scheduled across VFs, which allows different VFs to be active on the HW at the same time on different groups. The feature is available for BMG and newer HW starting on GuC 70.53.0, but some required fixes have been added to GuC 70.55.1. This is intended for specific scenarios where the admin knows that the VFs are not going to fully utilize the HW and therefore assigning all of it to a single VF would lead to part of it being permanently idle. We do not allow the admin to decide how to divide the engines across groups, but we instead support specific configurations that are designed for specific use-cases. During PF initialization we detect which configurations are possible on a given GT and create the relevant groups. Since the GuC expect a mask for each class for each group, that is what we save when we init the configs. Right now we only have one use-case on the media GT. If the VFs are running a frame render + encoding at a not-too-high resolution (e.g. 1080@30fps) the render can produce frames faster than the video engine can encode them, which means that the maximum number of parallel VFs is limited by the VCS bandwidth. Since our products can have multiple VCS engines, allowing multiple VFs to be active on the different VCS engines at the same time allows us to run more parallel VFs on the same HW. Given that engines in the same media slice share some resources (e.g. SFC), we assign each media slice to a different scheduling group. We refer to this configuration as "media_slices", given that each slice gets its own group. Since upcoming products have a different number of video engines per-slice, for now we limit the media_slices mode to BMG, but we expect to add support for newer HW soon. Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Link: https://patch.msgid.link/20251218223846.1146344-17-daniele.ceraolospurio@intel.com
159 lines
4.4 KiB
C
159 lines
4.4 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_GT_H_
|
|
#define _XE_GT_H_
|
|
|
|
#include <linux/fault-inject.h>
|
|
|
|
#include <drm/drm_util.h>
|
|
|
|
#include "xe_device.h"
|
|
#include "xe_device_types.h"
|
|
#include "xe_gt_sriov_vf.h"
|
|
#include "xe_hw_engine.h"
|
|
|
|
#define for_each_hw_engine(hwe__, gt__, id__) \
|
|
for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
|
|
for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
|
|
xe_hw_engine_is_valid((hwe__)))
|
|
|
|
#define XE_ENGINE_INSTANCES_FROM_MASK(gt, NAME) \
|
|
(((gt)->info.engine_mask & XE_HW_ENGINE_##NAME##_MASK) >> XE_HW_ENGINE_##NAME##0)
|
|
|
|
#define RCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, RCS)
|
|
#define VCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, VCS)
|
|
#define VECS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, VECS)
|
|
#define CCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, CCS)
|
|
#define GSCCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, GSCCS)
|
|
|
|
/* Our devices have up to 4 media slices */
|
|
#define MAX_MEDIA_SLICES 4
|
|
|
|
#define GT_VER(gt) ({ \
|
|
typeof(gt) gt_ = (gt); \
|
|
struct xe_device *xe = gt_to_xe(gt_); \
|
|
xe_gt_is_media_type(gt_) ? MEDIA_VER(xe) : GRAPHICS_VER(xe); \
|
|
})
|
|
|
|
extern struct fault_attr gt_reset_failure;
|
|
static inline bool xe_fault_inject_gt_reset(void)
|
|
{
|
|
return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(>_reset_failure, 1);
|
|
}
|
|
|
|
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
|
|
int xe_gt_init_early(struct xe_gt *gt);
|
|
int xe_gt_init(struct xe_gt *gt);
|
|
void xe_gt_mmio_init(struct xe_gt *gt);
|
|
void xe_gt_declare_wedged(struct xe_gt *gt);
|
|
int xe_gt_record_default_lrcs(struct xe_gt *gt);
|
|
|
|
/**
|
|
* xe_gt_record_user_engines - save data related to engines available to
|
|
* userspace
|
|
* @gt: GT structure
|
|
*
|
|
* Walk the available HW engines from gt->info.engine_mask and calculate data
|
|
* related to those engines that may be used by userspace. To be used whenever
|
|
* available engines change in runtime (e.g. with ccs_mode) or during
|
|
* initialization
|
|
*/
|
|
void xe_gt_record_user_engines(struct xe_gt *gt);
|
|
|
|
void xe_gt_suspend_prepare(struct xe_gt *gt);
|
|
int xe_gt_suspend(struct xe_gt *gt);
|
|
void xe_gt_shutdown(struct xe_gt *gt);
|
|
int xe_gt_resume(struct xe_gt *gt);
|
|
void xe_gt_reset_async(struct xe_gt *gt);
|
|
int xe_gt_runtime_resume(struct xe_gt *gt);
|
|
int xe_gt_runtime_suspend(struct xe_gt *gt);
|
|
void xe_gt_sanitize(struct xe_gt *gt);
|
|
int xe_gt_sanitize_freq(struct xe_gt *gt);
|
|
|
|
/**
|
|
* xe_gt_wait_for_reset - wait for gt's async reset to finalize.
|
|
* @gt: GT structure
|
|
* Return:
|
|
* %true if it waited for the work to finish execution,
|
|
* %false if there was no scheduled reset or it was done.
|
|
*/
|
|
static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
|
|
{
|
|
return flush_work(>->reset.worker);
|
|
}
|
|
|
|
/**
|
|
* xe_gt_reset - perform synchronous reset
|
|
* @gt: GT structure
|
|
* Return:
|
|
* %true if it waited for the reset to finish,
|
|
* %false if there was no scheduled reset.
|
|
*/
|
|
static inline bool xe_gt_reset(struct xe_gt *gt)
|
|
{
|
|
xe_gt_reset_async(gt);
|
|
return xe_gt_wait_for_reset(gt);
|
|
}
|
|
|
|
/**
|
|
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
|
|
* first that matches the same reset domain as @class
|
|
* @gt: GT structure
|
|
* @class: hw engine class to lookup
|
|
*/
|
|
struct xe_hw_engine *
|
|
xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, enum xe_engine_class class);
|
|
|
|
/**
|
|
* xe_gt_any_hw_engine - scan the list of engines and return the
|
|
* first available
|
|
* @gt: GT structure
|
|
*/
|
|
struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt);
|
|
|
|
struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
|
|
enum xe_engine_class class,
|
|
u16 instance,
|
|
bool logical);
|
|
|
|
static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
|
|
{
|
|
return gt->info.has_indirect_ring_state &&
|
|
xe_device_uc_enabled(gt_to_xe(gt));
|
|
}
|
|
|
|
static inline bool xe_gt_is_main_type(struct xe_gt *gt)
|
|
{
|
|
return gt->info.type == XE_GT_TYPE_MAIN;
|
|
}
|
|
|
|
static inline bool xe_gt_is_media_type(struct xe_gt *gt)
|
|
{
|
|
return gt->info.type == XE_GT_TYPE_MEDIA;
|
|
}
|
|
|
|
static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
|
|
{
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
|
|
return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
|
|
hwe->instance == gt->usm.reserved_bcs_instance;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_recovery_pending() - GT recovery pending
|
|
* @gt: the &xe_gt
|
|
*
|
|
* Return: True if GT recovery in pending, False otherwise
|
|
*/
|
|
static inline bool xe_gt_recovery_pending(struct xe_gt *gt)
|
|
{
|
|
return IS_SRIOV_VF(gt_to_xe(gt)) &&
|
|
xe_gt_sriov_vf_recovery_pending(gt);
|
|
}
|
|
|
|
#endif
|