Pull drm updates from Dave Airlie:
 "cross-subsystem:
   - i2c-hid: Make elan touch controllers power on after panel is
     enabled
   - dt bindings for STM32MP25 SoC
   - pci vgaarb: use screen_info helpers
   - rust pin-init updates
   - add MEI driver for late binding firmware update/load

  uapi:
   - add ioctl for reassigning GEM handles
   - provide boot_display attribute on boot-up devices

  core:
   - document DRM_MODE_PAGE_FLIP_EVENT
   - add vendor specific recovery method to drm device wedged uevent

  gem:
   - Simplify gpuvm locking

  ttm:
   - add interface to populate buffers

  sched:
   - Fix race condition in trace code

  atomic:
   - Reallow no-op async page flips

  display:
   - dp: Fix command length

  video:
   - Improve pixel-format handling for struct screen_info

  rust:
   - drop Opaque<> from ioctl args
   - Alloc:
       - BorrowedPage type and AsPageIter traits
       - Implement Vmalloc::to_page() and VmallocPageIter
   - DMA/Scatterlist:
       - Add dma::DataDirection and type alias for dma_addr_t
       - Abstraction for struct scatterlist and sg_table
   - DRM:
       - simplify use of generics
       - add DriverFile type alias
       - drop Object::SIZE
   - Rust:
       - pin-init tree merge
       - Various methods for AsBytes and FromBytes traits

  gpuvm:
   - Support madvice in Xe driver

  gpusvm:
   - fix hmm_pfn_to_map_order usage in gpusvm

  bridge:
   - Improve and fix ref counting on bridge management
   - cdns-dsi: Various improvements to mode setting
   - Support Solomon SSD2825 plus DT bindings
   - Support Waveshare DSI2DPI plus DT bindings
   - Support Content Protection property
   - display-connector: Improve DP display detection
   - Add support for Radxa Ra620 plus DT bindings
   - adv7511: Provide SPD and HDMI infoframes
   - it6505: Replace crypto_shash with sha()
   - synopsys: Add support for DW DPTX Controller plus DT bindings
   - adv7511: Write full Audio infoframe
   - ite6263: Support vendor-specific infoframes
   - simple: Add support for Realtek RTD2171 DP-to-HDMI plus DT bindings

  panel:
   - panel-edp: Support mt8189 Chromebooks; Support BOE NV140WUM-N64;
     Support SHP LQ134Z1; Fixes
   - panel-simple: Support Olimex LCD-OLinuXino-5CTS plus DT bindings
   - Support Samsung AMS561RA01
   - Support Hydis HV101HD1 plus DT bindings
   - ilitek-ili9881c: Refactor mode setting; Add support for Bestar
     BSD1218-A101KL68 LCD plus DT bindings
   - lvds: Add support for Ampire AMP19201200B5TZQW-T03 to DT bindings
   - edp: Add support for additonal mt8189 Chromebook panels
   - lvds: Add DT bindings for EDT ETML0700Z8DHA

  amdgpu:
   - add CRIU support for gem objects
   - RAS updates
   - VCN SRAM load fixes
   - EDID read fixes
   - eDP ALPM support
   - Documentation updates
   - Rework PTE flag generation
   - DCE6 fixes
   - VCN devcoredump cleanup
   - MMHUB client id fixes
   - VCN 5.0.1 RAS support
   - SMU 13.0.x updates
   - Expanded PCIe DPC support
   - Expanded VCN reset support
   - VPE per queue reset support
   - give kernel jobs unique id for tracing
   - pre-populate exported buffers
   - cyan skillfish updates
   - make vbios build number available in sysfs
   - userq updates
   - HDCP updates
   - support MMIO remap page as ttm pool
   - JPEG parser updates
   - DCE6 DC updates
   - use devm for i2c buses
   - GPUVM locking updates
   - Drop non-DC DCE11 code
   - improve fallback handling for pixel encoding

  amdkfd:
   - SVM/page migration fixes
   - debugfs fixes
   - add CRIO support for gem objects
   - SVM updates

  radeon:
   - use dev_warn_once in CS parsers

  xe:
   - add madvise interface
   - add DRM_IOCTL_XE_VM_QUERY_MEMORY_RANGE_ATTRS to query VMA count
     and memory attributes
   - drop L# bank mask reporting from media GT3 on Xe3+.
   - add SLPC power_profile sysfs interface
   - add configs attribs to add post/mid context-switch commands
   - handle firmware reported hardware errors notifying userspace with
     device wedged uevent
   - use same dir structure across sysfs/debugfs
   - cleanup and future proof vram region init
   - add G-states and PCI link states to debugfs
   - Add SRIOV support for CCS surfaces on Xe2+
   - Enable SRIOV PF mode by default on supported platforms
   - move flush to common code
   - extended core workarounds for Xe2/3
   - use DRM scheduler for delayed GT TLB invalidations
   - configs improvements and allow VF device enablement
   - prep work to expose mmio regions to userspace
   - VF migration support added
   - prepare GPU SVM for THP migration
   - start fixing XE_PAGE_SIZE vs PAGE_SIZE
   - add PSMI support for hw validation
   - resize VF bars to max possible size according to number of VFs
   - Ensure GT is in C0 during resume
   - pre-populate exported buffers
   - replace xe_hmm with gpusvm
   - add more SVM GT stats to debugfs
   - improve fake pci and WA kunnit handle for new platform testing
   - Test GuC to GuC comms to add debugging
   - use attribute groups to simplify sysfs registration
   - add Late Binding firmware code to interact with MEI

  i915:
   - apply multiple JSL/EHL/Gen7/Gen6 workarounds properly
   - protect against overflow in active_engine()
   - Use try_cmpxchg64() in __active_lookup()
   - include GuC registers in error state
   - get rid of dev->struct_mutex
   - iopoll: generalize read_poll_timout
   - lots more display refactoring
   - Reject HBR3 in any eDP Panel
   - Prune modes for YUV420
   - Display Wa fix, additions, and updates
   - DP: Fix 2.7 Gbps link training on g4x
   - DP: Adjust the idle pattern handling
   - DP: Shuffle the link training code a bit
   - Don't set/read the DSI C clock divider on GLK
   - Enable_psr kernel parameter changes
   - Type-C enabled/disconnected dp-alt sink
   - Wildcat Lake enabling
   - DP HDR updates
   - DRAM detection
   - wait PSR idle on dsb commit
   - Remove FBC modulo 4 restriction for ADL-P+
   - panic: refactor framebuffer allocation

  habanalabs:
   - debug/visibility improvements
   - vmalloc-backed coherent mmap support
   - HLDIO infrastructure

  nova-core:
   - various register!() macro improvements
   - minor vbios/firmware fixes/refactoring
   - advance firmware boot stages; process Booter and patch signatures
   - process GSP and GSP bootloader
   - Add r570.144 firmware bindings and update to it
   - Move GSP boot code to own module
   - Use new pin-init features to store driver's private data in a
     single allocation
   - Update ARef import from sync::aref

  nova-drm:
   - Update ARef import from sync::aref

  tyr:
   - initial driver skeleton for a rust driver for ARM Mali GPUs
   - capable of powering up, query metadata and provide it to userspace.

  msm:
   - GPU and Core:
      - in DT bindings describe clocks per GPU type
      - GMU bandwidth voting for x1-85
      - a623/a663 speedbins
      - cleanup some remaining no-iommu leftovers after VM_BIND conversion
      - fix GEM obj 32b size truncation
      - add missing VM_BIND param validation
      - IFPC for x1-85 and a750
      - register xml and gen_header.py sync from mesa
   - Display:
      - add missing bindings for display on SC8180X
      - added DisplayPort MST bindings
      - conversion from round_rate() to determine_rate()

  amdxdna:
   - add IOCTL_AMDXDNA_GET_ARRAY
   - support user space allocated buffers
   - streamline PM interfaces
   - Refactoring wrt. hardware contexts
   - improve error reporting

  nouveau:
   - use GSP firmware by default
   - improve error reporting
   - Pre-populate exported buffers

  ast:
   - Clean up detection of DRAM config

  exynos:
   - add DSIM bridge driver support for Exynos7870
   - Document Exynos7870 DSIM compatible in dt-binding

  panthor:
   - Print task/pid on errors
   - Add support for Mali G710, G510, G310, Gx15, Gx20, Gx25
   - Improve cache flushing
   - Fail VM bind if BO has offset

  renesas:
   - convert to RUNTIME_PM_OPS

  rcar-du:
   - Make number of lanes configurable
   - Use RUNTIME_PM_OPS
   - Add support for DSI commands

  rocket:
   - Add driver for Rockchip NPU plus DT bindings
   - Use kfree() and sizeof() correctly
   - Test DMA status

  rockchip:
   - dsi2: Add support for RK3576 plus DT bindings
   - Add support for RK3588 DPTX output

  tidss:
   - Use crtc_ fields for programming display mode
   - Remove other drivers from aperture

  pixpaper:
   - Add support for Mayqueen Pixpaper plus DT bindings

  v3d:
   - Support querying nubmer of GPU resets for KHR_robustness

  stm:
   - Clean up logging
   - ltdc: Add support support for STM32MP257F-EV1 plus DT bindings

  sitronix:
   - st7571-i2c: Add support for inverted displays and 2-bit grayscale

  tidss:
   - Convert to kernel's FIELD_ macros

  vesadrm:
   - Support 8-bit palette mode

  imagination:
   - Improve power management
   - Add support for TH1520 GPU
   - Support Risc-V architectures

  v3d:
   - Improve job management and locking

  vkms:
   - Support variants of ARGB8888, ARGB16161616, RGB565, RGB888 and P01x
   - Spport YUV with 16-bit components"

* tag 'drm-next-2025-10-01' of https://gitlab.freedesktop.org/drm/kernel: (1455 commits)
  drm/amd: Add name to modes from amdgpu_connector_add_common_modes()
  drm/amd: Drop some common modes from amdgpu_connector_add_common_modes()
  drm/amdgpu: update MODULE_PARM_DESC for freesync_video
  drm/amd: Use dynamic array size declaration for amdgpu_connector_add_common_modes()
  drm/amd/display: Share dce100_validate_global with DCE6-8
  drm/amd/display: Share dce100_validate_bandwidth with DCE6-8
  drm/amdgpu: Fix fence signaling race condition in userqueue
  amd/amdkfd: enhance kfd process check in switch partition
  amd/amdkfd: resolve a race in amdgpu_amdkfd_device_fini_sw
  drm/amd/display: Reject modes with too high pixel clock on DCE6-10
  drm/amd: Drop unnecessary check in amdgpu_connector_add_common_modes()
  drm/amd/display: Only enable common modes for eDP and LVDS
  drm/amdgpu: remove the redeclaration of variable i
  drm/amdgpu/userq: assign an error code for invalid userq va
  drm/amdgpu: revert "rework reserved VMID handling" v2
  drm/amdgpu: remove leftover from enforcing isolation by VMID
  drm/amdgpu: Add fallback to pipe reset if KCQ ring reset fails
  accel/habanalabs: add Infineon version check
  accel/habanalabs/gaudi2: read preboot status after recovering from dirty state
  accel/habanalabs: add HL_GET_P_STATE passthrough type
  ...
This commit is contained in:
Linus Torvalds
2025-10-02 12:47:25 -07:00
1254 changed files with 52110 additions and 19221 deletions

View File

@@ -23,7 +23,7 @@
static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
bool clear, u64 get_val, u64 assign_val,
struct kunit *test)
struct kunit *test, struct drm_exec *exec)
{
struct dma_fence *fence;
struct ttm_tt *ttm;
@@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
u32 offset;
/* Move bo to VRAM if not already there. */
ret = xe_bo_validate(bo, NULL, false);
ret = xe_bo_validate(bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate bo.\n");
return ret;
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
ret = xe_bo_evict(bo);
ret = xe_bo_evict(bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -132,14 +132,15 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
/* TODO: Sanity check */
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
else
kunit_info(test, "Testing system memory\n");
bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
bo_flags);
bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
bo_flags, exec);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -149,18 +150,18 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
test);
test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data survives migration.\n");
ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
0xdeadbeefdeadbeefULL, test);
0xdeadbeefdeadbeefULL, test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test);
ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test, exec);
out_unlock:
xe_bo_unlock(bo);
@@ -210,6 +211,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
struct xe_gt *__gt;
int err, i, id;
@@ -218,25 +220,25 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for (i = 0; i < 2; ++i) {
xe_vm_lock(vm, false);
bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
bo = xe_bo_create_user(xe, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags);
bo_flags, exec);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "bo create err=%pe\n", bo);
break;
}
external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
external = xe_bo_create_user(xe, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags);
bo_flags, NULL);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
}
xe_bo_lock(external, false);
err = xe_bo_pin_external(external, false);
err = xe_bo_pin_external(external, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo pin err=%pe\n",
@@ -294,7 +296,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
if (i) {
down_read(&vm->lock);
xe_vm_lock(vm, false);
err = xe_bo_validate(bo, bo->vm, false);
err = xe_bo_validate(bo, bo->vm, false, exec);
xe_vm_unlock(vm);
up_read(&vm->lock);
if (err) {
@@ -303,7 +305,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
xe_bo_lock(external, false);
err = xe_bo_validate(external, NULL, false);
err = xe_bo_validate(external, NULL, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo valid err=%pe\n",
@@ -495,9 +497,9 @@ static int shrink_test_run_device(struct xe_device *xe)
INIT_LIST_HEAD(&link->link);
/* We can create bos using WC caching here. But it is slower. */
bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
bo = xe_bo_create_user(xe, NULL, XE_BO_SHRINK_SIZE,
DRM_XE_GEM_CPU_CACHING_WB,
XE_BO_FLAG_SYSTEM);
XE_BO_FLAG_SYSTEM, NULL);
if (IS_ERR(bo)) {
if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))

View File

@@ -27,7 +27,8 @@ static bool is_dynamic(struct dma_buf_test_params *params)
}
static void check_residency(struct kunit *test, struct xe_bo *exported,
struct xe_bo *imported, struct dma_buf *dmabuf)
struct xe_bo *imported, struct dma_buf *dmabuf,
struct drm_exec *exec)
{
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
u32 mem_type;
@@ -57,16 +58,12 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
return;
/*
* Evict exporter. Note that the gem object dma_buf member isn't
* set from xe_gem_prime_export(), and it's needed for the move_notify()
* functionality, so hack that up here. Evicting the exported bo will
* Evict exporter. Evicting the exported bo will
* evict also the imported bo through the move_notify() functionality if
* importer is on a different device. If they're on the same device,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
ret = xe_bo_evict(exported, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
@@ -81,7 +78,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
}
/* Re-validate the importer. This should move also exporter in. */
ret = xe_bo_validate(imported, NULL, false);
ret = xe_bo_validate(imported, NULL, false, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
@@ -117,8 +114,8 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
params->mem_mask);
bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
params->mem_mask, NULL);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -131,6 +128,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(dmabuf));
goto out;
}
bo->ttm.base.dma_buf = dmabuf;
import = xe_gem_prime_import(&xe->drm, dmabuf);
if (!IS_ERR(import)) {
@@ -145,11 +143,12 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
int err;
/* Is everything where we expect it to be? */
xe_bo_lock(import_bo, false);
err = xe_bo_validate(import_bo, NULL, false);
err = xe_bo_validate(import_bo, NULL, false, exec);
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
@@ -162,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
err == -ERESTARTSYS);
if (!err)
check_residency(test, bo, import_bo, dmabuf);
check_residency(test, bo, import_bo, dmabuf, exec);
xe_bo_unlock(import_bo);
}
drm_gem_object_put(import);
@@ -178,6 +177,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
}
bo->ttm.base.dma_buf = NULL;
dma_buf_put(dmabuf);
out:
drm_gem_object_put(&bo->ttm.base);
@@ -198,7 +198,7 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_FLAG_VRAM0,
{.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
@@ -230,7 +230,8 @@ static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},

View File

@@ -0,0 +1,776 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2025 Intel Corporation
*/
#include <linux/delay.h>
#include <kunit/test.h>
#include <kunit/visibility.h>
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_pm.h"
/*
* There are different ways to allocate the G2G buffers. The plan for this test
* is to make sure that all the possible options work. The particular option
* chosen by the driver may vary from one platform to another, it may also change
* with time. So to ensure consistency of testing, the relevant driver code is
* replicated here to guarantee it won't change without the test being updated
* to keep testing the other options.
*
* In order to test the actual code being used by the driver, there is also the
* 'default' scheme. That will use the official driver routines to test whatever
* method the driver is using on the current platform at the current time.
*/
enum {
/* Driver defined allocation scheme */
G2G_CTB_TYPE_DEFAULT,
/* Single buffer in host memory */
G2G_CTB_TYPE_HOST,
/* Single buffer in a specific tile, loops across all tiles */
G2G_CTB_TYPE_TILE,
};
/*
* Payload is opaque to GuC. So KMD can define any structure or size it wants.
*/
struct g2g_test_payload {
u32 tx_dev;
u32 tx_tile;
u32 rx_dev;
u32 rx_tile;
u32 seqno;
};
static void g2g_test_send(struct kunit *test, struct xe_guc *guc,
u32 far_tile, u32 far_dev,
struct g2g_test_payload *payload)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
u32 *action, total;
size_t payload_len;
int ret;
static_assert(IS_ALIGNED(sizeof(*payload), sizeof(u32)));
payload_len = sizeof(*payload) / sizeof(u32);
total = 4 + payload_len;
action = kunit_kmalloc_array(test, total, sizeof(*action), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, action);
action[0] = XE_GUC_ACTION_TEST_G2G_SEND;
action[1] = far_tile;
action[2] = far_dev;
action[3] = payload_len;
memcpy(action + 4, payload, payload_len * sizeof(u32));
atomic_inc(&xe->g2g_test_count);
/*
* Should specify the expected response notification here. Problem is that
* the response will be coming from a different GuC. By the end, it should
* all add up as long as an equal number of messages are sent from each GuC
* and to each GuC. However, in the middle negative reservation space errors
* and such like can occur. Rather than add intrusive changes to the CT layer
* it is simpler to just not bother counting it at all. The system should be
* idle when running the selftest, and the selftest's notification total size
* is well within the G2H allocation size. So there should be no issues with
* needing to block for space, which is all the tracking code is really for.
*/
ret = xe_guc_ct_send(&guc->ct, action, total, 0, 0);
kunit_kfree(test, action);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G send failed: %d [%d:%d -> %d:%d]\n", ret,
gt_to_tile(gt)->id, G2G_DEV(gt), far_tile, far_dev);
}
/*
* NB: Can't use KUNIT_ASSERT and friends in here as this is called asynchronously
* from the G2H notification handler. Need that to actually complete rather than
* thread-abort in order to keep the rest of the driver alive!
*/
int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *rx_gt = guc_to_gt(guc), *test_gt, *tx_gt = NULL;
u32 tx_tile, tx_dev, rx_tile, rx_dev, idx, got_len;
struct g2g_test_payload *payload;
size_t payload_len;
int ret = 0, i;
payload_len = sizeof(*payload) / sizeof(u32);
if (unlikely(len != (G2H_LEN_DW_G2G_NOTIFY_MIN + payload_len))) {
xe_gt_err(rx_gt, "G2G test notification invalid length %u", len);
ret = -EPROTO;
goto done;
}
tx_tile = msg[0];
tx_dev = msg[1];
got_len = msg[2];
payload = (struct g2g_test_payload *)(msg + 3);
rx_tile = gt_to_tile(rx_gt)->id;
rx_dev = G2G_DEV(rx_gt);
if (got_len != payload_len) {
xe_gt_err(rx_gt, "G2G: Invalid payload length: %u vs %zu\n", got_len, payload_len);
ret = -EPROTO;
goto done;
}
if (payload->tx_dev != tx_dev || payload->tx_tile != tx_tile ||
payload->rx_dev != rx_dev || payload->rx_tile != rx_tile) {
xe_gt_err(rx_gt, "G2G: Invalid payload: %d:%d -> %d:%d vs %d:%d -> %d:%d! [%d]\n",
payload->tx_tile, payload->tx_dev, payload->rx_tile, payload->rx_dev,
tx_tile, tx_dev, rx_tile, rx_dev, payload->seqno);
ret = -EPROTO;
goto done;
}
if (!xe->g2g_test_array) {
xe_gt_err(rx_gt, "G2G: Missing test array!\n");
ret = -ENOMEM;
goto done;
}
for_each_gt(test_gt, xe, i) {
if (gt_to_tile(test_gt)->id != tx_tile)
continue;
if (G2G_DEV(test_gt) != tx_dev)
continue;
if (tx_gt) {
xe_gt_err(rx_gt, "G2G: Got duplicate TX GTs: %d vs %d for %d:%d!\n",
tx_gt->info.id, test_gt->info.id, tx_tile, tx_dev);
ret = -EINVAL;
goto done;
}
tx_gt = test_gt;
}
if (!tx_gt) {
xe_gt_err(rx_gt, "G2G: Failed to find a TX GT for %d:%d!\n", tx_tile, tx_dev);
ret = -EINVAL;
goto done;
}
idx = (tx_gt->info.id * xe->info.gt_count) + rx_gt->info.id;
if (xe->g2g_test_array[idx] != payload->seqno - 1) {
xe_gt_err(rx_gt, "G2G: Seqno mismatch %d vs %d for %d:%d -> %d:%d!\n",
xe->g2g_test_array[idx], payload->seqno - 1,
tx_tile, tx_dev, rx_tile, rx_dev);
ret = -EINVAL;
goto done;
}
xe->g2g_test_array[idx] = payload->seqno;
done:
atomic_dec(&xe->g2g_test_count);
return ret;
}
/*
* Send the given seqno from all GuCs to all other GuCs in tile/GT order
*/
static void g2g_test_in_order(struct kunit *test, struct xe_device *xe, u32 seqno)
{
struct xe_gt *near_gt, *far_gt;
int i, j;
for_each_gt(near_gt, xe, i) {
u32 near_tile = gt_to_tile(near_gt)->id;
u32 near_dev = G2G_DEV(near_gt);
for_each_gt(far_gt, xe, j) {
u32 far_tile = gt_to_tile(far_gt)->id;
u32 far_dev = G2G_DEV(far_gt);
struct g2g_test_payload payload;
if (far_gt->info.id == near_gt->info.id)
continue;
payload.tx_dev = near_dev;
payload.tx_tile = near_tile;
payload.rx_dev = far_dev;
payload.rx_tile = far_tile;
payload.seqno = seqno;
g2g_test_send(test, &near_gt->uc.guc, far_tile, far_dev, &payload);
}
}
}
#define WAIT_TIME_MS 100
#define WAIT_COUNT (1000 / WAIT_TIME_MS)
static void g2g_wait_for_complete(void *_xe)
{
struct xe_device *xe = (struct xe_device *)_xe;
struct kunit *test = kunit_get_current_test();
int wait = 0;
/* Wait for all G2H messages to be received */
while (atomic_read(&xe->g2g_test_count)) {
if (++wait > WAIT_COUNT)
break;
msleep(WAIT_TIME_MS);
}
KUNIT_ASSERT_EQ_MSG(test, 0, atomic_read(&xe->g2g_test_count),
"Timed out waiting for notifications\n");
kunit_info(test, "Got all notifications back\n");
}
#undef WAIT_TIME_MS
#undef WAIT_COUNT
static void g2g_clean_array(void *_xe)
{
struct xe_device *xe = (struct xe_device *)_xe;
xe->g2g_test_array = NULL;
}
#define NUM_LOOPS 16
static void g2g_run_test(struct kunit *test, struct xe_device *xe)
{
u32 seqno, max_array;
int ret, i, j;
max_array = xe->info.gt_count * xe->info.gt_count;
xe->g2g_test_array = kunit_kcalloc(test, max_array, sizeof(u32), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe->g2g_test_array);
ret = kunit_add_action_or_reset(test, g2g_clean_array, xe);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
/*
* Send incrementing seqnos from all GuCs to all other GuCs in tile/GT order.
* Tile/GT order doesn't really mean anything to the hardware but it is going
* to be a fixed sequence every time.
*
* Verify that each one comes back having taken the correct route.
*/
ret = kunit_add_action(test, g2g_wait_for_complete, xe);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
for (seqno = 1; seqno < NUM_LOOPS; seqno++)
g2g_test_in_order(test, xe, seqno);
seqno--;
kunit_release_action(test, &g2g_wait_for_complete, xe);
/* Check for the final seqno in each slot */
for (i = 0; i < xe->info.gt_count; i++) {
for (j = 0; j < xe->info.gt_count; j++) {
u32 idx = (j * xe->info.gt_count) + i;
if (i == j)
KUNIT_ASSERT_EQ_MSG(test, 0, xe->g2g_test_array[idx],
"identity seqno modified: %d for %dx%d!\n",
xe->g2g_test_array[idx], i, j);
else
KUNIT_ASSERT_EQ_MSG(test, seqno, xe->g2g_test_array[idx],
"invalid seqno: %d vs %d for %dx%d!\n",
xe->g2g_test_array[idx], seqno, i, j);
}
}
kunit_kfree(test, xe->g2g_test_array);
kunit_release_action(test, &g2g_clean_array, xe);
kunit_info(test, "Test passed\n");
}
#undef NUM_LOOPS
static void g2g_ct_stop(struct xe_guc *guc)
{
struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
struct xe_device *xe = gt_to_xe(gt);
int i, t;
for_each_gt(remote_gt, xe, i) {
u32 tile, dev;
if (remote_gt->info.id == gt->info.id)
continue;
tile = gt_to_tile(remote_gt)->id;
dev = G2G_DEV(remote_gt);
for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
guc_g2g_deregister(guc, tile, dev, t);
}
}
/* Size of a single allocation that contains all G2G CTBs across all GTs */
static u32 g2g_ctb_size(struct kunit *test, struct xe_device *xe)
{
unsigned int count = xe->info.gt_count;
u32 num_channels = (count * (count - 1)) / 2;
kunit_info(test, "Size: (%d * %d / 2) * %d * 0x%08X + 0x%08X => 0x%08X [%d]\n",
count, count - 1, XE_G2G_TYPE_LIMIT, G2G_BUFFER_SIZE, G2G_DESC_AREA_SIZE,
num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE,
num_channels * XE_G2G_TYPE_LIMIT);
return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
}
/*
* Use the driver's regular CTB allocation scheme.
*/
static void g2g_alloc_default(struct kunit *test, struct xe_device *xe)
{
struct xe_gt *gt;
int i;
kunit_info(test, "Default [tiles = %d, GTs = %d]\n",
xe->info.tile_count, xe->info.gt_count);
for_each_gt(gt, xe, i) {
struct xe_guc *guc = &gt->uc.guc;
int ret;
ret = guc_g2g_alloc(guc);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G alloc failed: %pe", ERR_PTR(ret));
continue;
}
}
static void g2g_distribute(struct kunit *test, struct xe_device *xe, struct xe_bo *bo)
{
struct xe_gt *root_gt, *gt;
int i;
root_gt = xe_device_get_gt(xe, 0);
root_gt->uc.guc.g2g.bo = bo;
root_gt->uc.guc.g2g.owned = true;
kunit_info(test, "[%d.%d] Assigned 0x%p\n", gt_to_tile(root_gt)->id, root_gt->info.id, bo);
for_each_gt(gt, xe, i) {
if (gt->info.id != 0) {
gt->uc.guc.g2g.owned = false;
gt->uc.guc.g2g.bo = xe_bo_get(bo);
kunit_info(test, "[%d.%d] Pinned 0x%p\n",
gt_to_tile(gt)->id, gt->info.id, gt->uc.guc.g2g.bo);
}
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt->uc.guc.g2g.bo);
}
}
/*
* Allocate a single blob on the host and split between all G2G CTBs.
*/
static void g2g_alloc_host(struct kunit *test, struct xe_device *xe)
{
struct xe_bo *bo;
u32 g2g_size;
kunit_info(test, "Host [tiles = %d, GTs = %d]\n", xe->info.tile_count, xe->info.gt_count);
g2g_size = g2g_ctb_size(test, xe);
bo = xe_managed_bo_create_pin_map(xe, xe_device_get_root_tile(xe), g2g_size,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_ALL |
XE_BO_FLAG_GGTT_INVALIDATE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
kunit_info(test, "[HST] G2G buffer create: 0x%p\n", bo);
xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
g2g_distribute(test, xe, bo);
}
/*
* Allocate a single blob on the given tile and split between all G2G CTBs.
*/
static void g2g_alloc_tile(struct kunit *test, struct xe_device *xe, struct xe_tile *tile)
{
struct xe_bo *bo;
u32 g2g_size;
KUNIT_ASSERT_TRUE(test, IS_DGFX(xe));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tile);
kunit_info(test, "Tile %d [tiles = %d, GTs = %d]\n",
tile->id, xe->info.tile_count, xe->info.gt_count);
g2g_size = g2g_ctb_size(test, xe);
bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_ALL |
XE_BO_FLAG_GGTT_INVALIDATE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
kunit_info(test, "[%d.*] G2G buffer create: 0x%p\n", tile->id, bo);
xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
g2g_distribute(test, xe, bo);
}
static void g2g_free(struct kunit *test, struct xe_device *xe)
{
struct xe_gt *gt;
struct xe_bo *bo;
int i;
for_each_gt(gt, xe, i) {
bo = gt->uc.guc.g2g.bo;
if (!bo)
continue;
if (gt->uc.guc.g2g.owned) {
xe_managed_bo_unpin_map_no_vm(bo);
kunit_info(test, "[%d.%d] Unmapped 0x%p\n",
gt_to_tile(gt)->id, gt->info.id, bo);
} else {
xe_bo_put(bo);
kunit_info(test, "[%d.%d] Unpinned 0x%p\n",
gt_to_tile(gt)->id, gt->info.id, bo);
}
gt->uc.guc.g2g.bo = NULL;
}
}
static void g2g_stop(struct kunit *test, struct xe_device *xe)
{
struct xe_gt *gt;
int i;
for_each_gt(gt, xe, i) {
struct xe_guc *guc = &gt->uc.guc;
if (!guc->g2g.bo)
continue;
g2g_ct_stop(guc);
}
g2g_free(test, xe);
}
/*
* Generate a unique id for each bi-directional CTB for each pair of
* near and far tiles/devices. The id can then be used as an index into
* a single allocation that is sub-divided into multiple CTBs.
*
* For example, with two devices per tile and two tiles, the table should
* look like:
* Far <tile>.<dev>
* 0.0 0.1 1.0 1.1
* N 0.0 --/-- 00/01 02/03 04/05
* e 0.1 01/00 --/-- 06/07 08/09
* a 1.0 03/02 07/06 --/-- 10/11
* r 1.1 05/04 09/08 11/10 --/--
*
* Where each entry is Rx/Tx channel id.
*
* So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
* be reading from channel #11 and writing to channel #10. Whereas,
* GuC #2 talking to GuC #3 would be read on #10 and write to #11.
*/
static int g2g_slot_flat(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
u32 type, u32 max_inst, bool have_dev)
{
u32 near = near_tile, far = far_tile;
u32 idx = 0, x, y, direction;
int i;
if (have_dev) {
near = (near << 1) | near_dev;
far = (far << 1) | far_dev;
}
/* No need to send to one's self */
if (far == near)
return -1;
if (far > near) {
/* Top right table half */
x = far;
y = near;
/* T/R is 'forwards' direction */
direction = type;
} else {
/* Bottom left table half */
x = near;
y = far;
/* B/L is 'backwards' direction */
direction = (1 - type);
}
/* Count the rows prior to the target */
for (i = y; i > 0; i--)
idx += max_inst - i;
/* Count this row up to the target */
idx += (x - 1 - y);
/* Slots are in Rx/Tx pairs */
idx *= 2;
/* Pick Rx/Tx direction */
idx += direction;
return idx;
}
static int g2g_register_flat(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type, bool have_dev)
{
struct xe_gt *gt = guc_to_gt(guc);
struct xe_device *xe = gt_to_xe(gt);
u32 near_tile = gt_to_tile(gt)->id;
u32 near_dev = G2G_DEV(gt);
u32 max = xe->info.gt_count;
int idx;
u32 base, desc, buf;
if (!guc->g2g.bo)
return -ENODEV;
idx = g2g_slot_flat(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
xe_assert(xe, idx >= 0);
base = guc_bo_ggtt_addr(guc, guc->g2g.bo);
desc = base + idx * G2G_DESC_SIZE;
buf = base + idx * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(guc->g2g.bo));
return guc_action_register_g2g_buffer(guc, type, far_tile, far_dev,
desc, buf, G2G_BUFFER_SIZE);
}
static void g2g_start(struct kunit *test, struct xe_guc *guc)
{
struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
struct xe_device *xe = gt_to_xe(gt);
unsigned int i;
int t, ret;
bool have_dev;
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, guc->g2g.bo);
/* GuC interface will need extending if more GT device types are ever created. */
KUNIT_ASSERT_TRUE(test,
(gt->info.type == XE_GT_TYPE_MAIN) ||
(gt->info.type == XE_GT_TYPE_MEDIA));
/* Channel numbering depends on whether there are multiple GTs per tile */
have_dev = xe->info.gt_count > xe->info.tile_count;
for_each_gt(remote_gt, xe, i) {
u32 tile, dev;
if (remote_gt->info.id == gt->info.id)
continue;
tile = gt_to_tile(remote_gt)->id;
dev = G2G_DEV(remote_gt);
for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
ret = g2g_register_flat(guc, tile, dev, t, have_dev);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G register failed: %pe", ERR_PTR(ret));
}
}
}
static void g2g_reinit(struct kunit *test, struct xe_device *xe, int ctb_type, struct xe_tile *tile)
{
struct xe_gt *gt;
int i, found = 0;
g2g_stop(test, xe);
for_each_gt(gt, xe, i) {
struct xe_guc *guc = &gt->uc.guc;
KUNIT_ASSERT_NULL(test, guc->g2g.bo);
}
switch (ctb_type) {
case G2G_CTB_TYPE_DEFAULT:
g2g_alloc_default(test, xe);
break;
case G2G_CTB_TYPE_HOST:
g2g_alloc_host(test, xe);
break;
case G2G_CTB_TYPE_TILE:
g2g_alloc_tile(test, xe, tile);
break;
default:
KUNIT_ASSERT_TRUE(test, false);
}
for_each_gt(gt, xe, i) {
struct xe_guc *guc = &gt->uc.guc;
if (!guc->g2g.bo)
continue;
if (ctb_type == G2G_CTB_TYPE_DEFAULT)
guc_g2g_start(guc);
else
g2g_start(test, guc);
found++;
}
KUNIT_ASSERT_GT_MSG(test, found, 1, "insufficient G2G channels running: %d", found);
kunit_info(test, "Testing across %d GTs\n", found);
}
static void g2g_recreate_ctb(void *_xe)
{
struct xe_device *xe = (struct xe_device *)_xe;
struct kunit *test = kunit_get_current_test();
g2g_stop(test, xe);
if (xe_guc_g2g_wanted(xe))
g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
}
static void g2g_pm_runtime_put(void *_xe)
{
struct xe_device *xe = (struct xe_device *)_xe;
xe_pm_runtime_put(xe);
}
static void g2g_pm_runtime_get(struct kunit *test)
{
struct xe_device *xe = test->priv;
int ret;
xe_pm_runtime_get(xe);
ret = kunit_add_action_or_reset(test, g2g_pm_runtime_put, xe);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register runtime PM action\n");
}
static void g2g_check_skip(struct kunit *test)
{
struct xe_device *xe = test->priv;
struct xe_gt *gt;
int i;
if (IS_SRIOV_VF(xe))
kunit_skip(test, "not supported from a VF");
if (xe->info.gt_count <= 1)
kunit_skip(test, "not enough GTs");
for_each_gt(gt, xe, i) {
struct xe_guc *guc = &gt->uc.guc;
if (guc->fw.build_type == CSS_UKERNEL_INFO_BUILDTYPE_PROD)
kunit_skip(test,
"G2G test interface not available in production firmware builds\n");
}
}
/*
* Simple test that does not try to recreate the CTBs.
* Requires that the platform already enables G2G comms
* but has no risk of leaving the system in a broken state
* afterwards.
*/
static void xe_live_guc_g2g_kunit_default(struct kunit *test)
{
struct xe_device *xe = test->priv;
if (!xe_guc_g2g_wanted(xe))
kunit_skip(test, "G2G not enabled");
g2g_check_skip(test);
g2g_pm_runtime_get(test);
kunit_info(test, "Testing default CTBs\n");
g2g_run_test(test, xe);
kunit_release_action(test, &g2g_pm_runtime_put, xe);
}
/*
* More complex test that re-creates the CTBs in various location to
* test access to each location from each GuC. Can be run even on
* systems that do not enable G2G by default. On the other hand,
* because it recreates the CTBs, if something goes wrong it could
* leave the system with broken G2G comms.
*/
static void xe_live_guc_g2g_kunit_allmem(struct kunit *test)
{
struct xe_device *xe = test->priv;
int ret;
g2g_check_skip(test);
g2g_pm_runtime_get(test);
/* Make sure to leave the system as we found it */
ret = kunit_add_action_or_reset(test, g2g_recreate_ctb, xe);
KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register CTB re-creation action\n");
kunit_info(test, "Testing CTB type 'default'...\n");
g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
g2g_run_test(test, xe);
kunit_info(test, "Testing CTB type 'host'...\n");
g2g_reinit(test, xe, G2G_CTB_TYPE_HOST, NULL);
g2g_run_test(test, xe);
if (IS_DGFX(xe)) {
struct xe_tile *tile;
int id;
for_each_tile(tile, xe, id) {
kunit_info(test, "Testing CTB type 'tile: #%d'...\n", id);
g2g_reinit(test, xe, G2G_CTB_TYPE_TILE, tile);
g2g_run_test(test, xe);
}
} else {
kunit_info(test, "Skipping local memory on integrated platform\n");
}
kunit_release_action(test, g2g_recreate_ctb, xe);
kunit_release_action(test, g2g_pm_runtime_put, xe);
}
static struct kunit_case xe_guc_g2g_tests[] = {
KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_default, xe_pci_live_device_gen_param),
KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_allmem, xe_pci_live_device_gen_param),
{}
};
VISIBLE_IF_KUNIT
struct kunit_suite xe_guc_g2g_test_suite = {
.name = "xe_guc_g2g",
.test_cases = xe_guc_g2g_tests,
.init = xe_kunit_helper_xe_device_live_test_init,
};
EXPORT_SYMBOL_IF_KUNIT(xe_guc_g2g_test_suite);

View File

@@ -10,12 +10,14 @@ extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
extern struct kunit_suite xe_guc_g2g_test_suite;
kunit_test_suite(xe_bo_test_suite);
kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
kunit_test_suite(xe_guc_g2g_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");

View File

@@ -70,7 +70,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
} } while (0)
static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test, u32 region)
struct kunit *test, u32 region, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
@@ -84,14 +84,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
XE_BO_FLAG_PINNED,
exec);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
return;
}
err = xe_bo_validate(remote, NULL, false);
err = xe_bo_validate(remote, NULL, false, exec);
if (err) {
KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
str, err);
@@ -161,13 +162,13 @@ out_unlock:
}
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
struct drm_exec *exec, struct kunit *test)
{
test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
test_copy(m, bo, test, XE_BO_FLAG_SYSTEM, exec);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
struct drm_exec *exec, struct kunit *test)
{
u32 region;
@@ -178,10 +179,11 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
region = XE_BO_FLAG_VRAM1;
else
region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region);
test_copy(m, bo, test, region, exec);
}
static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
struct drm_exec *exec)
{
struct xe_tile *tile = m->tile;
struct xe_device *xe = tile_to_xe(tile);
@@ -202,7 +204,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile));
XE_BO_FLAG_VRAM_IF_DGFX(tile),
exec);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -210,7 +213,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile));
XE_BO_FLAG_VRAM_IF_DGFX(tile),
exec);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -220,7 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile));
XE_BO_FLAG_VRAM_IF_DGFX(tile),
exec);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -290,10 +295,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
test_copy_sysmem(m, tiny, test);
test_copy_sysmem(m, tiny, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying small buffer object to other vram\n");
test_copy_vram(m, tiny, test);
test_copy_vram(m, tiny, exec, test);
}
/* Clear a big bo */
@@ -312,10 +317,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
test_copy_sysmem(m, big, test);
test_copy_sysmem(m, big, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying big buffer object to other vram\n");
test_copy_vram(m, big, test);
test_copy_vram(m, big, exec, test);
}
out:
@@ -343,10 +348,11 @@ static int migrate_test_run_device(struct xe_device *xe)
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, false);
xe_migrate_sanity_test(m, test);
xe_migrate_sanity_test(m, test, exec);
xe_vm_unlock(m->q->vm);
}
@@ -490,7 +496,7 @@ err_sync:
static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
struct kunit *test)
struct drm_exec *exec, struct kunit *test)
{
struct dma_fence *fence;
u64 expected, retval;
@@ -509,7 +515,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
ret = xe_bo_evict(vram_bo);
ret = xe_bo_evict(vram_bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
@@ -538,7 +544,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Restore vram buffer object\n");
ret = xe_bo_validate(vram_bo, NULL, false);
ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
return;
@@ -636,13 +642,14 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
{
struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct drm_exec *exec;
long ret;
sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
sys_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -650,8 +657,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
return;
}
exec = XE_VALIDATION_OPT_OUT;
xe_bo_lock(sys_bo, false);
ret = xe_bo_validate(sys_bo, NULL, false);
ret = xe_bo_validate(sys_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_sysbo;
@@ -664,10 +672,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(sys_bo);
ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
ccs_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -676,7 +684,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(ccs_bo, false);
ret = xe_bo_validate(ccs_bo, NULL, false);
ret = xe_bo_validate(ccs_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_ccsbo;
@@ -689,10 +697,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(ccs_bo);
vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
vram_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
@@ -700,7 +708,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(vram_bo, false);
ret = xe_bo_validate(vram_bo, NULL, false);
ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
goto free_vrambo;
@@ -713,7 +721,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
test_clear(xe, tile, sys_bo, vram_bo, test);
test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, exec, test);
xe_bo_unlock(vram_bo);
xe_bo_lock(vram_bo, false);

View File

@@ -12,12 +12,219 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
#define PLATFORM_CASE(platform__, graphics_step__) \
{ \
.platform = XE_ ## platform__, \
.subplatform = XE_SUBPLATFORM_NONE, \
.step = { .graphics = STEP_ ## graphics_step__ } \
}
#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
{ \
.platform = XE_ ## platform__, \
.subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
.step = { .graphics = STEP_ ## graphics_step__ } \
}
#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
media_verx100__, media_step__) \
{ \
.platform = XE_ ## platform__, \
.subplatform = XE_SUBPLATFORM_NONE, \
.graphics_verx100 = graphics_verx100__, \
.media_verx100 = media_verx100__, \
.step = { .graphics = STEP_ ## graphics_step__, \
.media = STEP_ ## media_step__ } \
}
static const struct xe_pci_fake_data cases[] = {
PLATFORM_CASE(TIGERLAKE, B0),
PLATFORM_CASE(DG1, A0),
PLATFORM_CASE(DG1, B0),
PLATFORM_CASE(ALDERLAKE_S, A0),
PLATFORM_CASE(ALDERLAKE_S, B0),
PLATFORM_CASE(ALDERLAKE_S, C0),
PLATFORM_CASE(ALDERLAKE_S, D0),
PLATFORM_CASE(ALDERLAKE_P, A0),
PLATFORM_CASE(ALDERLAKE_P, B0),
PLATFORM_CASE(ALDERLAKE_P, C0),
SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
SUBPLATFORM_CASE(DG2, G10, C0),
SUBPLATFORM_CASE(DG2, G11, B1),
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
GMDID_CASE(PANTHERLAKE, 3000, A0, 3000, A0),
};
KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc);
/**
* xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
* This function prepares struct xe_pci_fake_data parameter.
*
* To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc)
{
return platform_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_gen_params);
static const struct xe_device_desc *lookup_desc(enum xe_platform p)
{
const struct xe_device_desc *desc;
const struct pci_device_id *ids;
for (ids = pciidlist; ids->driver_data; ids++) {
desc = (const void *)ids->driver_data;
if (desc->platform == p)
return desc;
}
return NULL;
}
static const struct xe_subplatform_desc *lookup_sub_desc(enum xe_platform p, enum xe_subplatform s)
{
const struct xe_device_desc *desc = lookup_desc(p);
const struct xe_subplatform_desc *spd;
if (desc && desc->subplatforms)
for (spd = desc->subplatforms; spd->subplatform; spd++)
if (spd->subplatform == s)
return spd;
return NULL;
}
static const char *lookup_platform_name(enum xe_platform p)
{
const struct xe_device_desc *desc = lookup_desc(p);
return desc ? desc->platform_name : "INVALID";
}
static const char *__lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
{
const struct xe_subplatform_desc *desc = lookup_sub_desc(p, s);
return desc ? desc->name : "INVALID";
}
static const char *lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
{
return s == XE_SUBPLATFORM_NONE ? "" : __lookup_subplatform_name(p, s);
}
static const char *subplatform_prefix(enum xe_subplatform s)
{
return s == XE_SUBPLATFORM_NONE ? "" : " ";
}
static const char *step_prefix(enum xe_step step)
{
return step == STEP_NONE ? "" : " ";
}
static const char *step_name(enum xe_step step)
{
return step == STEP_NONE ? "" : xe_step_name(step);
}
static const char *sriov_prefix(enum xe_sriov_mode mode)
{
return mode <= XE_SRIOV_MODE_NONE ? "" : " ";
}
static const char *sriov_name(enum xe_sriov_mode mode)
{
return mode <= XE_SRIOV_MODE_NONE ? "" : xe_sriov_mode_to_string(mode);
}
static const char *lookup_graphics_name(unsigned int verx100)
{
const struct xe_ip *ip = find_graphics_ip(verx100);
return ip ? ip->name : "";
}
static const char *lookup_media_name(unsigned int verx100)
{
const struct xe_ip *ip = find_media_ip(verx100);
return ip ? ip->name : "";
}
/**
* xe_pci_fake_data_desc - Describe struct xe_pci_fake_data parameter
* @param: the &struct xe_pci_fake_data parameter to describe
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
* This function prepares description of the struct xe_pci_fake_data parameter.
*
* It is tailored for use in parameterized KUnit tests where parameter generator
* is based on the struct xe_pci_fake_data arrays.
*/
void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc)
{
if (param->graphics_verx100 || param->media_verx100)
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s %u.%02u(%s)%s%s %u.%02u(%s)%s%s%s%s",
lookup_platform_name(param->platform),
subplatform_prefix(param->subplatform),
lookup_subplatform_name(param->platform, param->subplatform),
param->graphics_verx100 / 100, param->graphics_verx100 % 100,
lookup_graphics_name(param->graphics_verx100),
step_prefix(param->step.graphics), step_name(param->step.graphics),
param->media_verx100 / 100, param->media_verx100 % 100,
lookup_media_name(param->media_verx100),
step_prefix(param->step.media), step_name(param->step.media),
sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
else
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s%s%s%s%s",
lookup_platform_name(param->platform),
subplatform_prefix(param->subplatform),
lookup_subplatform_name(param->platform, param->subplatform),
step_prefix(param->step.graphics), step_name(param->step.graphics),
sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_desc);
static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
param->verx100 / 100, param->verx100 % 100, param->name);
}
/*
* Pre-GMDID Graphics and Media IPs definitions.
*
* Mimic the way GMDID IPs are declared so the same
* param generator can be used for both
*/
static const struct xe_ip pre_gmdid_graphics_ips[] = {
graphics_ip_xelp,
graphics_ip_xelpp,
graphics_ip_xehpg,
graphics_ip_xehpc,
};
static const struct xe_ip pre_gmdid_media_ips[] = {
media_ip_xem,
media_ip_xehpm,
};
KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc);
KUNIT_ARRAY_PARAM(pre_gmdid_media_ip, pre_gmdid_media_ips, xe_ip_kunit_desc);
KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
@@ -46,6 +253,13 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
*/
const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
const void *next = pre_gmdid_graphics_ip_gen_params(test, prev, desc);
if (next)
return next;
if (is_insidevar(prev, pre_gmdid_graphics_ips))
prev = NULL;
return graphics_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
@@ -63,6 +277,13 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
*/
const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
const void *next = pre_gmdid_media_ip_gen_params(test, prev, desc);
if (next)
return next;
if (is_insidevar(prev, pre_gmdid_media_ips))
prev = NULL;
return media_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
@@ -94,13 +315,18 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
if (type == GMDID_MEDIA) {
*ver = data->media_verx100;
*revid = xe_step_to_gmdid(data->media_step);
*revid = xe_step_to_gmdid(data->step.media);
} else {
*ver = data->graphics_verx100;
*revid = xe_step_to_gmdid(data->graphics_step);
*revid = xe_step_to_gmdid(data->step.graphics);
}
}
static void fake_xe_info_probe_tile_count(struct xe_device *xe)
{
/* Nothing to do, just use the statically defined value. */
}
int xe_pci_fake_device_init(struct xe_device *xe)
{
struct kunit *test = kunit_get_current_test();
@@ -138,6 +364,8 @@ done:
data->sriov_mode : XE_SRIOV_MODE_NONE;
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
kunit_activate_static_stub(test, xe_info_probe_tile_count,
fake_xe_info_probe_tile_count);
xe_info_init_early(xe, desc, subplatform_desc);
xe_info_init(xe, desc);

View File

@@ -11,6 +11,7 @@
#include "xe_platform_types.h"
#include "xe_sriov_types.h"
#include "xe_step_types.h"
struct xe_device;
@@ -18,13 +19,14 @@ struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
struct xe_step_info step;
u32 graphics_verx100;
u32 media_verx100;
u32 graphics_step;
u32 media_step;
};
int xe_pci_fake_device_init(struct xe_device *xe);
const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc);
void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc);
const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc);
const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc);

View File

@@ -15,86 +15,10 @@
#include "xe_tuning.h"
#include "xe_wa.h"
struct platform_test_case {
const char *name;
enum xe_platform platform;
enum xe_subplatform subplatform;
u32 graphics_verx100;
u32 media_verx100;
struct xe_step_info step;
};
#define PLATFORM_CASE(platform__, graphics_step__) \
{ \
.name = #platform__ " (" #graphics_step__ ")", \
.platform = XE_ ## platform__, \
.subplatform = XE_SUBPLATFORM_NONE, \
.step = { .graphics = STEP_ ## graphics_step__ } \
}
#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
{ \
.name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \
.platform = XE_ ## platform__, \
.subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
.step = { .graphics = STEP_ ## graphics_step__ } \
}
#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
media_verx100__, media_step__) \
{ \
.name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\
.platform = XE_ ## platform__, \
.subplatform = XE_SUBPLATFORM_NONE, \
.graphics_verx100 = graphics_verx100__, \
.media_verx100 = media_verx100__, \
.step = { .graphics = STEP_ ## graphics_step__, \
.media = STEP_ ## media_step__ } \
}
static const struct platform_test_case cases[] = {
PLATFORM_CASE(TIGERLAKE, B0),
PLATFORM_CASE(DG1, A0),
PLATFORM_CASE(DG1, B0),
PLATFORM_CASE(ALDERLAKE_S, A0),
PLATFORM_CASE(ALDERLAKE_S, B0),
PLATFORM_CASE(ALDERLAKE_S, C0),
PLATFORM_CASE(ALDERLAKE_S, D0),
PLATFORM_CASE(ALDERLAKE_P, A0),
PLATFORM_CASE(ALDERLAKE_P, B0),
PLATFORM_CASE(ALDERLAKE_P, C0),
SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
SUBPLATFORM_CASE(DG2, G10, C0),
SUBPLATFORM_CASE(DG2, G11, B1),
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
};
static void platform_desc(const struct platform_test_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(platform, cases, platform_desc);
static int xe_wa_test_init(struct kunit *test)
{
const struct platform_test_case *param = test->param_value;
struct xe_pci_fake_data data = {
.platform = param->platform,
.subplatform = param->subplatform,
.graphics_verx100 = param->graphics_verx100,
.media_verx100 = param->media_verx100,
.graphics_step = param->step.graphics,
.media_step = param->step.media,
};
const struct xe_pci_fake_data *param = test->param_value;
struct xe_pci_fake_data data = *param;
struct xe_device *xe;
struct device *dev;
int ret;
@@ -119,13 +43,6 @@ static int xe_wa_test_init(struct kunit *test)
return 0;
}
static void xe_wa_test_exit(struct kunit *test)
{
struct xe_device *xe = test->priv;
drm_kunit_helper_free_device(test, xe->drm.dev);
}
static void xe_wa_gt(struct kunit *test)
{
struct xe_device *xe = test->priv;
@@ -143,14 +60,13 @@ static void xe_wa_gt(struct kunit *test)
}
static struct kunit_case xe_wa_tests[] = {
KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params),
KUNIT_CASE_PARAM(xe_wa_gt, xe_pci_fake_data_gen_params),
{}
};
static struct kunit_suite xe_rtp_test_suite = {
.name = "xe_wa",
.init = xe_wa_test_init,
.exit = xe_wa_test_exit,
.test_cases = xe_wa_tests,
};