Pull drm updates from Dave Airlie:
 "Highlights are usual, more AMD IP blocks for future hw, i915/xe
  changes, Displayport tunnelling support for i915, msm YUV over DP
  changes, new tests for ttm, but its mostly a lot of stuff all over the
  place from lots of people.

  core:
   - EDID cleanups
   - scheduler error handling fixes
   - managed: add drmm_release_action() with tests
   - add ratelimited drm debug print
   - DPCD PSR early transport macro
   - DP tunneling and bandwidth allocation helpers
   - remove built-in edids
   - dp: Avoid AUX transfers on powered-down displays
   - dp: Add VSC SDP helpers

  cross drivers:
   - use new drm print helpers
   - switch to ->read_edid callback
   - gem: add stats for shared buffers plus updates to amdgpu, i915, xe

  syncobj:
   - fixes to waiting and sleeping

  ttm:
   - add tests
   - fix errno codes
   - simply busy-placement handling
   - fix page decryption

  media:
   - tc358743: fix v4l device registration

  video:
   - move all kernel parameters for video behind CONFIG_VIDEO

  sound:
   - remove <drm/drm_edid.h> include from header

  ci:
   - add tests for msm
   - fix apq8016 runner

  efifb:
   - use copy of global screen_info state

  vesafb:
   - use copy of global screen_info state

  simplefb:
   - fix logging

  bridge:
   - ite-6505: fix DP link-training bug
   - samsung-dsim: fix error checking in probe
   - samsung-dsim: add bsh-smm-s2/pro boards
   - tc358767: fix regmap usage
   - imx: add i.MX8MP HDMI PVI plus DT bindings
   - imx: add i.MX8MP HDMI TX plus DT bindings
   - sii902x: fix probing and unregistration
   - tc358767: limit pixel PLL input range
   - switch to new drm_bridge_read_edid() interface

  panel:
   - ltk050h3146w: error-handling fixes
   - panel-edp: support delay between power-on and enable; use put_sync
     in unprepare; support Mediatek MT8173 Chromebooks, BOE NV116WHM-N49
     V8.0, BOE NV122WUM-N41, CSO MNC207QS1-1 plus DT bindings
   - panel-lvds: support EDT ETML0700Z9NDHA plus DT bindings
   - panel-novatek: FRIDA FRD400B25025-A-CTK plus DT bindings
   - add BOE TH101MB31IG002-28A plus DT bindings
   - add EDT ETML1010G3DRA plus DT bindings
   - add Novatek NT36672E LCD DSI plus DT bindings
   - nt36523: support 120Hz timings, fix includes
   - simple: fix display timings on RK32FN48H
   - visionox-vtdr6130: fix initialization
   - add Powkiddy RGB10MAX3 plus DT bindings
   - st7703: support panel rotation plus DT bindings
   - add Himax HX83112A plus DT bindings
   - ltk500hd1829: add support for ltk101b4029w and admatec 9904370
   - simple: add BOE BP082WX1-100 8.2" panel plus DT bindungs

  panel-orientation-quirks:
   - GPD Win Mini

  amdgpu:
   - Validate DMABuf imports in compute VMs
   - Add RAS ACA framework
   - PSP 13 fixes
   - Misc code cleanups
   - Replay fixes
   - Atom interpretor PS, WS bounds checking
   - DML2 fixes
   - Audio fixes
   - DCN 3.5 Z state fixes
   - Remove deprecated ida_simple usage
   - UBSAN fixes
   - RAS fixes
   - Enable seq64 infrastructure
   - DC color block enablement
   - Documentation updates
   - DC documentation updates
   - DMCUB updates
   - ATHUB 4.1 support
   - LSDMA 7.0 support
   - JPEG DPG support
   - IH 7.0 support
   - HDP 7.0 support
   - VCN 5.0 support
   - SMU 13.0.6 updates
   - NBIO 7.11 updates
   - SDMA 6.1 updates
   - MMHUB 3.3 updates
   - DCN 3.5.1 support
   - NBIF 6.3.1 support
   - VPE 6.1.1 support

  amdkfd:
   - Validate DMABuf imports in compute VMs
   - SVM fixes
   - Trap handler updates and enhancements
   - Fix cache size reporting
   - Relocate the trap handler

  radeon:
   - Atom interpretor PS, WS bounds checking
   - Misc code cleanups

  xe:
   - new query for GuC submission version
   - Remove unused persistent exec_queues
   - Add vram frequency sysfs attributes
   - Add the flag XE_VM_BIND_FLAG_DUMPABLE
   - Drop pre-production workarounds
   - Drop kunit tests for unsupported platforms
   - Start pumbling SR-IOV support with memory based interrupts for VF
   - Allow to map BO in GGTT with PAT index corresponding to XE_CACHE_UC
     to work with memory based interrupts
   - Add GuC Doorbells Manager as prep work SR-IOV
   - Implement additional workarounds for xe2 and MTL
   - Program a few registers according to perfomance guide spec for Xe2
   - Fix remaining 32b build issues and enable it back
   - Fix build with CONFIG_DEBUG_FS=n
   - Fix warnings from GuC ABI headers
   - Introduce Relay Communication for SR-IOV for VF <-> GuC <-> PF
   - Release mmap mappings on rpm suspend
   - Disable mid-thread preemption when not properly supported by
     hardware
   - Fix xe_exec by reserving extra fence slot for CPU bind
   - Fix xe_exec with full long running exec queue
   - Canonicalize addresses where needed for Xe2 and add to devcoredum
   - Toggle USM support for Xe2
   - Only allow 1 ufence per exec / bind IOCTL
   - Add GuC firmware loading for Lunar Lake
   - Add XE_VMA_PTE_64K VMA flag

  i915:
   - Add more ADL-N PCI IDs
   - Enable fastboot also on older platforms
   - Early transport for panel replay and PSR
   - New ARL PCI IDs
   - DP TPS4 PHY test pattern support
   - Unify and improve VSC SDP for PSR and non-PSR cases
   - Refactor memory regions and improve debug logging
   - Rework global state serialization
   - Remove unused CDCLK divider fields
   - Unify HDCP connector logging format
   - Use display instead of graphics version in display code
   - Move VBT and opregion debugfs next to the implementation
   - Abstract opregion interface, use opaque type
   - MTL fixes
   - HPD handling fixes
   - Add GuC submission interface version query
   - Atomically invalidate userptr on mmu-notifier
   - Update handling of MMIO triggered reports
   - Don't make assumptions about intel_wakeref_t type
   - Extend driver code of Xe_LPG to Xe_LPG+
   - Add flex arrays to struct i915_syncmap
   - Allow for very slow HuC loading
   - DP tunneling and bandwidth allocation support

  msm:
   - Correct bindings for MSM8976 and SM8650 platforms
   - Start migration of MDP5 platforms to DPU driver
   - X1E80100 MDSS support
   - DPU:
      - Improve DSC allocation, fixing several important corner cases
      - Add support for SDM630/SDM660 platforms
      - Simplify dpu_encoder_phys_ops
      - Apply fixes targeting DSC support with a single DSC encoder
      - Apply fixes for HCTL_EN timing configuration
      - X1E80100 support
      - Add support for YUV420 over DP
   - GPU:
      - fix sc7180 UBWC config
      - fix a7xx LLC config
      - new gpu support: a305B, a750, a702
      - machine support: SM7150 (different power levels than other a618)
      - a7xx devcoredump support

  habanalabs:
   - configure IRQ affinity according to NUMA node
   - move HBM MMU page tables inside the HBM
   - improve device reset
   - check extended PCIe errors

  ivpu:
   - updates to firmware API
   - refactor BO allocation

  imx:
   - use devm_ functions during init

  hisilicon:
   - fix EDID includes

  mgag200:
   - improve ioremap usage
   - convert to struct drm_edid
   - Work around PCI write bursts

  nouveau:
   - disp: use kmemdup()
   - fix EDID includes
   - documentation fixes

  qaic:
   - fixes to BO handling
   - make use of DRM managed release
   - fix order of remove operations

  rockchip:
   - analogix_dp: get encoder port from DT
   - inno_hdmi: support HDMI for RK3128
   - lvds: error-handling fixes

  ssd130x:
   - support SSD133x plus DT bindings

  tegra:
   - fix error handling

  tilcdc:
   - make use of DRM managed release

  v3d:
   - show memory stats in debugfs
   - Support display MMU page size

  vc4:
   - fix error handling in plane prepare_fb
   - fix framebuffer test in plane helpers

  virtio:
   - add venus capset defines

  vkms:
   - fix OOB access when programming the LUT
   - Kconfig improvements

  vmwgfx:
   - unmap surface before changing plane state
   - fix memory leak in error handling
   - documentation fixes
   - list command SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 as invalid
   - fix null-pointer deref in execbuf
   - refactor display-mode probing
   - fix fencing for creating cursor MOBs
   - fix cursor-memory lifetime

  xlnx:
   - fix live video input for ZynqMP DPSUB

  lima:
   - fix memory leak

  loongson:
   - fail if no VRAM present

  meson:
   - switch to new drm_bridge_read_edid() interface

  renesas:
   - add RZ/G2L DU support plus DT bindings

  mxsfb:
   - Use managed mode config

  sun4i:
   - HDMI: updates to atomic mode setting

  mediatek:
   - Add display driver for MT8188 VDOSYS1
   - DSI driver cleanups
   - Filter modes according to hardware capability
   - Fix a null pointer crash in mtk_drm_crtc_finish_page_flip

  etnaviv:
   - enhancements for NPU and MRT support"

* tag 'drm-next-2024-03-13' of https://gitlab.freedesktop.org/drm/kernel: (1420 commits)
  drm/amd/display: Removed redundant @ symbol to fix kernel-doc warnings in -next repo
  drm/amd/pm: wait for completion of the EnableGfxImu message
  drm/amdgpu/soc21: add mode2 asic reset for SMU IP v14.0.1
  drm/amdgpu: add smu 14.0.1 support
  drm/amdgpu: add VPE 6.1.1 discovery support
  drm/amdgpu/vpe: add VPE 6.1.1 support
  drm/amdgpu/vpe: don't emit cond exec command under collaborate mode
  drm/amdgpu/vpe: add collaborate mode support for VPE
  drm/amdgpu/vpe: add PRED_EXE and COLLAB_SYNC OPCODE
  drm/amdgpu/vpe: add multi instance VPE support
  drm/amdgpu/discovery: add nbif v6_3_1 ip block
  drm/amdgpu: Add nbif v6_3_1 ip block support
  drm/amdgpu: Add pcie v6_1_0 ip headers (v5)
  drm/amdgpu: Add nbif v6_3_1 ip headers (v5)
  arch/powerpc: Remove <linux/fb.h> from backlight code
  macintosh/via-pmu-backlight: Include <linux/backlight.h>
  fbdev/chipsfb: Include <linux/backlight.h>
  drm/etnaviv: Restore some id values
  drm/amdkfd: make kfd_class constant
  drm/amdgpu: add ring timeout information in devcoredump
  ...
This commit is contained in:
Linus Torvalds
2024-03-13 18:34:05 -07:00
1189 changed files with 189380 additions and 16411 deletions

View File

@@ -1,10 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
# "live" kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
xe_bo_test.o \
xe_dma_buf_test.o \
xe_migrate_test.o \
xe_mocs_test.o \
xe_mocs_test.o
# Normal kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
xe_test-y = xe_test_mod.o \
xe_pci_test.o \
xe_rtp_test.o \
xe_wa_test.o

View File

@@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <kunit/test.h>
#include "xe_device.h"
#include "xe_kunit_helpers.h"
static int guc_dbm_test_init(struct kunit *test)
{
struct xe_guc_db_mgr *dbm;
xe_kunit_helper_xe_device_test_init(test);
dbm = &xe_device_get_gt(test->priv, 0)->uc.guc.dbm;
mutex_init(dbm_mutex(dbm));
test->priv = dbm;
return 0;
}
static void test_empty(struct kunit *test)
{
struct xe_guc_db_mgr *dbm = test->priv;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, 0), 0);
KUNIT_ASSERT_EQ(test, dbm->count, 0);
mutex_lock(dbm_mutex(dbm));
KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
mutex_unlock(dbm_mutex(dbm));
KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
}
static void test_default(struct kunit *test)
{
struct xe_guc_db_mgr *dbm = test->priv;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
KUNIT_ASSERT_EQ(test, dbm->count, GUC_NUM_DOORBELLS);
}
static const unsigned int guc_dbm_params[] = {
GUC_NUM_DOORBELLS / 64,
GUC_NUM_DOORBELLS / 32,
GUC_NUM_DOORBELLS / 8,
GUC_NUM_DOORBELLS,
};
static void uint_param_get_desc(const unsigned int *p, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u", *p);
}
KUNIT_ARRAY_PARAM(guc_dbm, guc_dbm_params, uint_param_get_desc);
static void test_size(struct kunit *test)
{
const unsigned int *p = test->param_value;
struct xe_guc_db_mgr *dbm = test->priv;
unsigned int n;
int id;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0);
KUNIT_ASSERT_EQ(test, dbm->count, *p);
mutex_lock(dbm_mutex(dbm));
for (n = 0; n < *p; n++) {
KUNIT_EXPECT_GE(test, id = xe_guc_db_mgr_reserve_id_locked(dbm), 0);
KUNIT_EXPECT_LT(test, id, dbm->count);
}
KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
mutex_unlock(dbm_mutex(dbm));
mutex_lock(dbm_mutex(dbm));
for (n = 0; n < *p; n++)
xe_guc_db_mgr_release_id_locked(dbm, n);
mutex_unlock(dbm_mutex(dbm));
}
static void test_reuse(struct kunit *test)
{
const unsigned int *p = test->param_value;
struct xe_guc_db_mgr *dbm = test->priv;
unsigned int n;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0);
mutex_lock(dbm_mutex(dbm));
for (n = 0; n < *p; n++)
KUNIT_EXPECT_GE(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
mutex_unlock(dbm_mutex(dbm));
mutex_lock(dbm_mutex(dbm));
for (n = 0; n < *p; n++) {
xe_guc_db_mgr_release_id_locked(dbm, n);
KUNIT_EXPECT_EQ(test, xe_guc_db_mgr_reserve_id_locked(dbm), n);
}
KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
mutex_unlock(dbm_mutex(dbm));
mutex_lock(dbm_mutex(dbm));
for (n = 0; n < *p; n++)
xe_guc_db_mgr_release_id_locked(dbm, n);
mutex_unlock(dbm_mutex(dbm));
}
static void test_range_overlap(struct kunit *test)
{
const unsigned int *p = test->param_value;
struct xe_guc_db_mgr *dbm = test->priv;
int id1, id2, id3;
unsigned int n;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
KUNIT_ASSERT_LE(test, *p, dbm->count);
KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
for (n = 0; n < dbm->count - *p; n++) {
KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
KUNIT_ASSERT_NE(test, id2, id1);
KUNIT_ASSERT_NE_MSG(test, id2 < id1, id2 > id1 + *p - 1,
"id1=%d id2=%d", id1, id2);
}
KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
if (*p >= 1) {
KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, *p - 1, 0), 0);
KUNIT_ASSERT_NE(test, id2, id1);
KUNIT_ASSERT_NE_MSG(test, id1 < id2, id1 > id2 + *p - 2,
"id1=%d id2=%d", id1, id2);
for (n = 0; n < dbm->count - *p; n++) {
KUNIT_ASSERT_GE(test, id3 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
KUNIT_ASSERT_NE(test, id3, id1);
KUNIT_ASSERT_NE(test, id3, id2);
KUNIT_ASSERT_NE_MSG(test, id3 < id2, id3 > id2 + *p - 2,
"id3=%d id2=%d", id3, id2);
}
KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
}
}
static void test_range_compact(struct kunit *test)
{
const unsigned int *p = test->param_value;
struct xe_guc_db_mgr *dbm = test->priv;
unsigned int n;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
KUNIT_ASSERT_NE(test, *p, 0);
KUNIT_ASSERT_LE(test, *p, dbm->count);
if (dbm->count % *p)
kunit_skip(test, "must be divisible");
KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
for (n = 1; n < dbm->count / *p; n++)
KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
}
static void test_range_spare(struct kunit *test)
{
const unsigned int *p = test->param_value;
struct xe_guc_db_mgr *dbm = test->priv;
int id;
KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
KUNIT_ASSERT_LE(test, *p, dbm->count);
KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count), 0);
KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p + 1), 0);
KUNIT_ASSERT_EQ(test, id = xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p), 0);
KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, dbm->count - *p), 0);
xe_guc_db_mgr_release_range(dbm, id, *p);
}
static struct kunit_case guc_dbm_test_cases[] = {
KUNIT_CASE(test_empty),
KUNIT_CASE(test_default),
KUNIT_CASE_PARAM(test_size, guc_dbm_gen_params),
KUNIT_CASE_PARAM(test_reuse, guc_dbm_gen_params),
KUNIT_CASE_PARAM(test_range_overlap, guc_dbm_gen_params),
KUNIT_CASE_PARAM(test_range_compact, guc_dbm_gen_params),
KUNIT_CASE_PARAM(test_range_spare, guc_dbm_gen_params),
{}
};
static struct kunit_suite guc_dbm_suite = {
.name = "guc_dbm",
.test_cases = guc_dbm_test_cases,
.init = guc_dbm_test_init,
};
kunit_test_suites(&guc_dbm_suite);

View File

@@ -0,0 +1,522 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <kunit/static_stub.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include "xe_device.h"
#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#define TEST_RID 1234
#define TEST_VFID 5
#define TEST_LEN 6
#define TEST_ACTION 0xa
#define TEST_DATA(n) (0xd0 + (n))
static int replacement_relay_get_totalvfs(struct xe_guc_relay *relay)
{
return TEST_VFID;
}
static int relay_test_init(struct kunit *test)
{
struct xe_pci_fake_data fake = {
.sriov_mode = XE_SRIOV_MODE_PF,
.platform = XE_TIGERLAKE, /* some random platform */
.subplatform = XE_SUBPLATFORM_NONE,
};
struct xe_guc_relay *relay;
struct xe_device *xe;
test->priv = &fake;
xe_kunit_helper_xe_device_test_init(test);
xe = test->priv;
KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
kunit_activate_static_stub(test, relay_get_totalvfs,
replacement_relay_get_totalvfs);
KUNIT_ASSERT_EQ(test, xe_guc_relay_init(relay), 0);
KUNIT_EXPECT_TRUE(test, relay_is_ready(relay));
relay->last_rid = TEST_RID - 1;
test->priv = relay;
return 0;
}
static const u32 TEST_MSG[TEST_LEN] = {
FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, TEST_ACTION) |
FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_DATA0, TEST_DATA(0)),
TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4),
};
static int replacement_xe_guc_ct_send_recv_always_fails(struct xe_guc_ct *ct,
const u32 *msg, u32 len,
u32 *response_buffer)
{
struct kunit *test = kunit_get_current_test();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN);
return -ECOMM;
}
static int replacement_xe_guc_ct_send_recv_expects_pf2guc_relay(struct xe_guc_ct *ct,
const u32 *msg, u32 len,
u32 *response_buffer)
{
struct kunit *test = kunit_get_current_test();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN);
KUNIT_ASSERT_EQ(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + TEST_LEN);
KUNIT_EXPECT_EQ(test, GUC_HXG_ORIGIN_HOST, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]));
KUNIT_EXPECT_EQ(test, GUC_HXG_TYPE_REQUEST, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]));
KUNIT_EXPECT_EQ(test, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF,
FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]));
KUNIT_EXPECT_EQ(test, TEST_VFID,
FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, msg[1]));
KUNIT_EXPECT_EQ(test, TEST_RID,
FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, msg[2]));
KUNIT_EXPECT_MEMEQ(test, TEST_MSG, msg + PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN,
sizeof(u32) * TEST_LEN);
return 0;
}
static const u32 test_guc2pf[GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN] = {
/* transport */
FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF),
FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, TEST_VFID),
FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, TEST_RID),
/* payload */
FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
};
static const u32 test_guc2vf[GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN] = {
/* transport */
FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF),
FIELD_PREP_CONST(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, TEST_RID),
/* payload */
FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
};
static void pf_rejects_guc2pf_too_short(struct kunit *test)
{
const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN - 1;
struct xe_guc_relay *relay = test->priv;
const u32 *msg = test_guc2pf;
KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len));
}
static void pf_rejects_guc2pf_too_long(struct kunit *test)
{
const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN + 1;
struct xe_guc_relay *relay = test->priv;
const u32 *msg = test_guc2pf;
KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2pf(relay, msg, len));
}
static void pf_rejects_guc2pf_no_payload(struct kunit *test)
{
const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN;
struct xe_guc_relay *relay = test->priv;
const u32 *msg = test_guc2pf;
KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len));
}
static void pf_fails_no_payload(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
const u32 msg = 0;
KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, &msg, 0));
}
static void pf_fails_bad_origin(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
static const u32 msg[] = {
FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
};
u32 len = ARRAY_SIZE(msg);
KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len));
}
static void pf_fails_bad_type(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
const u32 msg[] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, 4), /* only 4 is undefined */
};
u32 len = ARRAY_SIZE(msg);
KUNIT_ASSERT_EQ(test, -EBADRQC, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len));
}
static void pf_txn_reports_error(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
struct relay_transaction *txn;
txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID,
TEST_MSG, TEST_LEN, NULL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_always_fails);
KUNIT_EXPECT_EQ(test, -ECOMM, relay_send_transaction(relay, txn));
relay_release_transaction(relay, txn);
}
static void pf_txn_sends_pf2guc(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
struct relay_transaction *txn;
txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID,
TEST_MSG, TEST_LEN, NULL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_expects_pf2guc_relay);
KUNIT_ASSERT_EQ(test, 0, relay_send_transaction(relay, txn));
relay_release_transaction(relay, txn);
}
static void pf_sends_pf2guc(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_expects_pf2guc_relay);
KUNIT_ASSERT_EQ(test, 0,
xe_guc_relay_send_to_vf(relay, TEST_VFID,
TEST_MSG, TEST_LEN, NULL, 0));
}
static int replacement_xe_guc_ct_send_recv_loopback_relay(struct xe_guc_ct *ct,
const u32 *msg, u32 len,
u32 *response_buffer)
{
struct kunit *test = kunit_get_current_test();
struct xe_guc_relay *relay = test->priv;
u32 *reply = kunit_kzalloc(test, len * sizeof(u32), GFP_KERNEL);
int (*guc2relay)(struct xe_guc_relay *, const u32 *, u32);
u32 action;
int err;
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN);
KUNIT_ASSERT_EQ(test, GUC_HXG_TYPE_REQUEST,
FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]));
KUNIT_ASSERT_GE(test, len, GUC_HXG_REQUEST_MSG_MIN_LEN);
KUNIT_ASSERT_NOT_NULL(test, reply);
switch (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0])) {
case XE_GUC_ACTION_PF2GUC_RELAY_TO_VF:
KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN);
action = XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF;
guc2relay = xe_guc_relay_process_guc2pf;
break;
case XE_GUC_ACTION_VF2GUC_RELAY_TO_PF:
KUNIT_ASSERT_GE(test, len, VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN);
action = XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF;
guc2relay = xe_guc_relay_process_guc2vf;
break;
default:
KUNIT_FAIL(test, "bad RELAY action %#x", msg[0]);
return -EINVAL;
}
reply[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION, action);
memcpy(reply + 1, msg + 1, sizeof(u32) * (len - 1));
err = guc2relay(relay, reply, len);
KUNIT_EXPECT_EQ(test, err, 0);
return err;
}
static void test_requires_relay_testloop(struct kunit *test)
{
/*
* The debug relay action GUC_RELAY_ACTION_VFXPF_TESTLOOP is available
* only on builds with CONFIG_DRM_XE_DEBUG_SRIOV enabled.
* See "kunit.py --kconfig_add" option if it's missing.
*/
if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV))
kunit_skip(test, "requires %s\n", __stringify(CONFIG_DRM_XE_DEBUG_SRIOV));
}
static void pf_loopback_nop(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
u32 request[] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_NOP),
};
u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
int ret;
test_requires_relay_testloop(test);
kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_loopback_relay);
ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
request, ARRAY_SIZE(request),
response, ARRAY_SIZE(response));
KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]),
GUC_HXG_ORIGIN_HOST);
KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]),
GUC_HXG_TYPE_RESPONSE_SUCCESS);
KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]), 0);
}
static void pf_loopback_echo(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
u32 request[] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_ECHO),
TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4),
};
u32 response[ARRAY_SIZE(request)];
unsigned int n;
int ret;
test_requires_relay_testloop(test);
kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_loopback_relay);
ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
request, ARRAY_SIZE(request),
response, ARRAY_SIZE(response));
KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(response));
KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]),
GUC_HXG_ORIGIN_HOST);
KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]),
GUC_HXG_TYPE_RESPONSE_SUCCESS);
KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]),
ARRAY_SIZE(response));
for (n = GUC_HXG_RESPONSE_MSG_MIN_LEN; n < ret; n++)
KUNIT_EXPECT_EQ(test, request[n], response[n]);
}
static void pf_loopback_fail(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
u32 request[] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_FAIL),
};
u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
int ret;
test_requires_relay_testloop(test);
kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_loopback_relay);
ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
request, ARRAY_SIZE(request),
response, ARRAY_SIZE(response));
KUNIT_ASSERT_EQ(test, ret, -EREMOTEIO);
}
static void pf_loopback_busy(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
u32 request[] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_BUSY),
TEST_DATA(0xb),
};
u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
int ret;
test_requires_relay_testloop(test);
kunit_activate_static_stub(test, relay_testonly_nop, relay_process_incoming_action);
kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_loopback_relay);
ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
request, ARRAY_SIZE(request),
response, ARRAY_SIZE(response));
KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
}
static void pf_loopback_retry(struct kunit *test)
{
struct xe_guc_relay *relay = test->priv;
u32 request[] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_RETRY),
TEST_DATA(0xd), TEST_DATA(0xd),
};
u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
int ret;
test_requires_relay_testloop(test);
kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
kunit_activate_static_stub(test, xe_guc_ct_send_recv,
replacement_xe_guc_ct_send_recv_loopback_relay);
ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
request, ARRAY_SIZE(request),
response, ARRAY_SIZE(response));
KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
}
static struct kunit_case pf_relay_test_cases[] = {
KUNIT_CASE(pf_rejects_guc2pf_too_short),
KUNIT_CASE(pf_rejects_guc2pf_too_long),
KUNIT_CASE(pf_rejects_guc2pf_no_payload),
KUNIT_CASE(pf_fails_no_payload),
KUNIT_CASE(pf_fails_bad_origin),
KUNIT_CASE(pf_fails_bad_type),
KUNIT_CASE(pf_txn_reports_error),
KUNIT_CASE(pf_txn_sends_pf2guc),
KUNIT_CASE(pf_sends_pf2guc),
KUNIT_CASE(pf_loopback_nop),
KUNIT_CASE(pf_loopback_echo),
KUNIT_CASE(pf_loopback_fail),
KUNIT_CASE_SLOW(pf_loopback_busy),
KUNIT_CASE_SLOW(pf_loopback_retry),
{}
};
static struct kunit_suite pf_relay_suite = {
.name = "pf_relay",
.test_cases = pf_relay_test_cases,
.init = relay_test_init,
};
static void vf_rejects_guc2vf_too_short(struct kunit *test)
{
const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN - 1;
struct xe_guc_relay *relay = test->priv;
const u32 *msg = test_guc2vf;
KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len));
}
static void vf_rejects_guc2vf_too_long(struct kunit *test)
{
const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN + 1;
struct xe_guc_relay *relay = test->priv;
const u32 *msg = test_guc2vf;
KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2vf(relay, msg, len));
}
static void vf_rejects_guc2vf_no_payload(struct kunit *test)
{
const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN;
struct xe_guc_relay *relay = test->priv;
const u32 *msg = test_guc2vf;
KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len));
}
static struct kunit_case vf_relay_test_cases[] = {
KUNIT_CASE(vf_rejects_guc2vf_too_short),
KUNIT_CASE(vf_rejects_guc2vf_too_long),
KUNIT_CASE(vf_rejects_guc2vf_no_payload),
{}
};
static struct kunit_suite vf_relay_suite = {
.name = "vf_relay",
.test_cases = vf_relay_test_cases,
.init = relay_test_init,
};
static void xe_drops_guc2pf_if_not_ready(struct kunit *test)
{
struct xe_device *xe = test->priv;
struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
const u32 *msg = test_guc2pf;
u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN;
KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2pf(relay, msg, len));
}
static void xe_drops_guc2vf_if_not_ready(struct kunit *test)
{
struct xe_device *xe = test->priv;
struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
const u32 *msg = test_guc2vf;
u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN;
KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2vf(relay, msg, len));
}
static void xe_rejects_send_if_not_ready(struct kunit *test)
{
struct xe_device *xe = test->priv;
struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
u32 msg[GUC_RELAY_MSG_MIN_LEN];
u32 len = ARRAY_SIZE(msg);
KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_send_to_pf(relay, msg, len, NULL, 0));
KUNIT_ASSERT_EQ(test, -ENODEV, relay_send_to(relay, TEST_VFID, msg, len, NULL, 0));
}
static struct kunit_case no_relay_test_cases[] = {
KUNIT_CASE(xe_drops_guc2pf_if_not_ready),
KUNIT_CASE(xe_drops_guc2vf_if_not_ready),
KUNIT_CASE(xe_rejects_send_if_not_ready),
{}
};
static struct kunit_suite no_relay_suite = {
.name = "no_relay",
.test_cases = no_relay_test_cases,
.init = xe_kunit_helper_xe_device_test_init,
};
kunit_test_suites(&no_relay_suite,
&pf_relay_suite,
&vf_relay_suite);

View File

@@ -0,0 +1,90 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <kunit/test.h>
#include <kunit/static_stub.h>
#include <kunit/visibility.h>
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_device_types.h"
/**
* xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
* @test: the &kunit where this &xe_device will be used
* @dev: The parent device object
*
* This function allocates xe_device using drm_kunit_helper_alloc_device().
* The xe_device allocation is managed by the test.
*
* @dev should be allocated using drm_kunit_helper_alloc_device().
*
* This function uses KUNIT_ASSERT to detect any allocation failures.
*
* Return: A pointer to the new &xe_device.
*/
struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
struct device *dev)
{
struct xe_device *xe;
xe = drm_kunit_helper_alloc_drm_device(test, dev,
struct xe_device,
drm, DRIVER_GEM);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
return xe;
}
EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_alloc_xe_device);
static void kunit_action_restore_priv(void *priv)
{
struct kunit *test = kunit_get_current_test();
test->priv = priv;
}
/**
* xe_kunit_helper_xe_device_test_init - Prepare a &xe_device for a KUnit test.
* @test: the &kunit where this fake &xe_device will be used
*
* This function allocates and initializes a fake &xe_device and stores its
* pointer as &kunit.priv to allow the test code to access it.
*
* This function can be directly used as custom implementation of
* &kunit_suite.init.
*
* It is possible to prepare specific variant of the fake &xe_device by passing
* in &kunit.priv pointer to the struct xe_pci_fake_data supplemented with
* desired parameters prior to calling this function.
*
* This function uses KUNIT_ASSERT to detect any failures.
*
* Return: Always 0.
*/
int xe_kunit_helper_xe_device_test_init(struct kunit *test)
{
struct xe_device *xe;
struct device *dev;
int err;
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
err = xe_pci_fake_device_init(xe);
KUNIT_ASSERT_EQ(test, err, 0);
err = kunit_add_action_or_reset(test, kunit_action_restore_priv, test->priv);
KUNIT_ASSERT_EQ(test, err, 0);
test->priv = xe;
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 AND MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_KUNIT_HELPERS_H_
#define _XE_KUNIT_HELPERS_H_
struct device;
struct kunit;
struct xe_device;
struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
struct device *dev);
int xe_kunit_helper_xe_device_test_init(struct kunit *test);
#endif

View File

@@ -128,3 +128,39 @@ void xe_live_mocs_kernel_kunit(struct kunit *test)
xe_call_for_each_device(mocs_kernel_test_run_device);
}
EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
static int mocs_reset_test_run_device(struct xe_device *xe)
{
/* Check the mocs setup is retained over GT reset */
struct live_mocs mocs;
struct xe_gt *gt;
unsigned int flags;
int id;
struct kunit *test = xe_cur_kunit();
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
kunit_info(test, "mocs_reset_test before reset\n");
if (flags & HAS_GLOBAL_MOCS)
read_mocs_table(gt, &mocs.table);
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
xe_gt_reset_async(gt);
flush_work(&gt->reset.worker);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
read_mocs_table(gt, &mocs.table);
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
return 0;
}
void xe_live_mocs_reset_kunit(struct kunit *test)
{
xe_call_for_each_device(mocs_reset_test_run_device);
}
EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);

View File

@@ -9,6 +9,7 @@
static struct kunit_case xe_mocs_tests[] = {
KUNIT_CASE(xe_live_mocs_kernel_kunit),
KUNIT_CASE(xe_live_mocs_reset_kunit),
{}
};

View File

@@ -9,5 +9,6 @@
struct kunit;
void xe_live_mocs_kernel_kunit(struct kunit *test);
void xe_live_mocs_reset_kunit(struct kunit *test);
#endif

View File

@@ -156,6 +156,9 @@ int xe_pci_fake_device_init(struct xe_device *xe)
return -ENODEV;
done:
xe->sriov.__mode = data && data->sriov_mode ?
data->sriov_mode : XE_SRIOV_MODE_NONE;
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);

View File

@@ -64,8 +64,3 @@ static struct kunit_suite xe_pci_test_suite = {
};
kunit_test_suite(xe_pci_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe_pci kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);

View File

@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_platform_types.h"
#include "xe_sriov_types.h"
struct xe_device;
struct xe_graphics_desc;
@@ -23,6 +24,7 @@ void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
u32 graphics_verx100;

View File

@@ -15,6 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
#include "xe_rtp.h"
@@ -276,9 +277,7 @@ static int xe_rtp_test_init(struct kunit *test)
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
xe = drm_kunit_helper_alloc_drm_device(test, dev,
struct xe_device,
drm, DRIVER_GEM);
xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
/* Initialize an empty device */
@@ -312,8 +311,3 @@ static struct kunit_suite xe_rtp_test_suite = {
};
kunit_test_suite(xe_rtp_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe_rtp kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);

View File

@@ -0,0 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/module.h>
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe kunit tests");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);

View File

@@ -9,6 +9,7 @@
#include <kunit/test.h>
#include "xe_device.h"
#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
#include "xe_tuning.h"
@@ -65,14 +66,8 @@ static const struct platform_test_case cases[] = {
PLATFORM_CASE(ALDERLAKE_P, C0),
SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
SUBPLATFORM_CASE(DG2, G10, A0),
SUBPLATFORM_CASE(DG2, G10, A1),
SUBPLATFORM_CASE(DG2, G10, B0),
SUBPLATFORM_CASE(DG2, G10, C0),
SUBPLATFORM_CASE(DG2, G11, A0),
SUBPLATFORM_CASE(DG2, G11, B0),
SUBPLATFORM_CASE(DG2, G11, B1),
SUBPLATFORM_CASE(DG2, G12, A0),
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
@@ -105,9 +100,7 @@ static int xe_wa_test_init(struct kunit *test)
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
xe = drm_kunit_helper_alloc_drm_device(test, dev,
struct xe_device,
drm, DRIVER_GEM);
xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
test->priv = &data;
@@ -160,8 +153,3 @@ static struct kunit_suite xe_rtp_test_suite = {
};
kunit_test_suite(xe_rtp_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe_wa kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);