Merge tag 'drm-misc-next-2024-01-11' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v6.9:

UAPI Changes:

virtio:
- add Venus capset defines

Cross-subsystem Changes:

Core Changes:

- fix drm_fixp2int_ceil()
- documentation fixes
- clean ups
- allow DRM_MM_DEBUG with DRM=m
- build fixes for debugfs support
- EDID cleanups
- sched: error-handling fixes
- ttm: add tests

Driver Changes:

bridge:
- ite-6505: fix DP link-training bug
- samsung-dsim: fix error checking in probe
- tc358767: fix regmap usage

efifb:
- use copy of global screen_info state

hisilicon:
- fix EDID includes

mgag200:
- improve ioremap usage
- convert to struct drm_edid

nouveau:
- disp: use kmemdup()
- fix EDID includes
- documentation fixes

panel:
- ltk050h3146w: error-handling fixes
- panel-edp: support delay between power-on and enable; use put_sync in
  unprepare; support Mediatek MT8173 Chromebooks, BOE NV116WHM-N49 V8.0,
  BOE NV122WUM-N41, CSO MNC207QS1-1 plus DT bindings
- panel-lvds: support EDT ETML0700Z9NDHA plus DT bindings
- panel-novatek: FRIDA FRD400B25025-A-CTK plus DT bindings

qaic:
- fixes to BO handling
- make use of DRM managed release
- fix order of remove operations

rockchip:
- analogix_dp: get encoder port from DT
- inno_hdmi: support HDMI for RK3128
- lvds: error-handling fixes

simplefb:
- fix logging

ssd130x:
- support SSD133x plus DT bindings

tegra:
- fix error handling

tilcdc:
- make use of DRM managed release

v3d:
- show memory stats in debugfs

vc4:
- fix error handling in plane prepare_fb
- fix framebuffer test in plane helpers

vesafb:
- use copy of global screen_info state

virtio:
- cleanups

vkms:
- fix OOB access when programming the LUT
- Kconfig improvements

vmwgfx:
- unmap surface before changing plane state
- fix memory leak in error handling
- documentation fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20240111154902.GA8448@linux-uq9g
This commit is contained in:
Dave Airlie
2024-02-05 13:49:47 +10:00
84 changed files with 3411 additions and 1111 deletions

View File

@@ -3,4 +3,7 @@
obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
ttm_device_test.o \
ttm_pool_test.o \
ttm_resource_test.o \
ttm_tt_test.o \
ttm_bo_test.o \
ttm_kunit_helpers.o

View File

@@ -0,0 +1,622 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/dma-resv.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
#define BO_SIZE SZ_8K
struct ttm_bo_test_case {
const char *description;
bool interruptible;
bool no_wait;
};
static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
{
.description = "Cannot be interrupted and sleeps",
.interruptible = false,
.no_wait = false,
},
{
.description = "Cannot be interrupted, locks straight away",
.interruptible = false,
.no_wait = true,
},
{
.description = "Can be interrupted, sleeps",
.interruptible = true,
.no_wait = false,
},
};
static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
char *desc)
{
strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
{
const struct ttm_bo_test_case *params = test->param_value;
struct ttm_buffer_object *bo;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
dma_resv_unlock(bo->base.resv);
}
static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
{
struct ttm_buffer_object *bo;
bool interruptible = false;
bool no_wait = true;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
/* Let's lock it beforehand */
dma_resv_lock(bo->base.resv, NULL);
err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
dma_resv_unlock(bo->base.resv);
KUNIT_ASSERT_EQ(test, err, -EBUSY);
}
static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ww_acquire_ctx ctx;
bool interruptible = false;
bool no_wait = true;
int err;
ww_acquire_init(&ctx, &reservation_ww_class);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
KUNIT_ASSERT_EQ(test, err, -EBUSY);
ww_acquire_fini(&ctx);
}
static void ttm_bo_reserve_double_resv(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ww_acquire_ctx ctx;
bool interruptible = false;
bool no_wait = false;
int err;
ww_acquire_init(&ctx, &reservation_ww_class);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
KUNIT_ASSERT_EQ(test, err, 0);
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
dma_resv_unlock(bo->base.resv);
ww_acquire_fini(&ctx);
KUNIT_ASSERT_EQ(test, err, -EALREADY);
}
/*
* A test case heavily inspired by ww_test_edeadlk_normal(). It injects
* a deadlock by manipulating the sequence number of the context that holds
* dma_resv lock of bo2 so the other context is "wounded" and has to back off
* (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
* propagates that error.
*/
static void ttm_bo_reserve_deadlock(struct kunit *test)
{
struct ttm_buffer_object *bo1, *bo2;
struct ww_acquire_ctx ctx1, ctx2;
bool interruptible = false;
bool no_wait = false;
int err;
bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
ww_acquire_init(&ctx1, &reservation_ww_class);
mutex_lock(&bo2->base.resv->lock.base);
/* The deadlock will be caught by WW mutex, don't warn about it */
lock_release(&bo2->base.resv->lock.base.dep_map, 1);
bo2->base.resv->lock.ctx = &ctx2;
ctx2 = ctx1;
ctx2.stamp--; /* Make the context holding the lock younger */
err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
KUNIT_ASSERT_EQ(test, err, 0);
err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
KUNIT_ASSERT_EQ(test, err, -EDEADLK);
dma_resv_unlock(bo1->base.resv);
ww_acquire_fini(&ctx1);
}
#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
struct signal_timer {
struct timer_list timer;
struct ww_acquire_ctx *ctx;
};
static void signal_for_ttm_bo_reserve(struct timer_list *t)
{
struct signal_timer *s_timer = from_timer(s_timer, t, timer);
struct task_struct *task = s_timer->ctx->task;
do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
}
static int threaded_ttm_bo_reserve(void *arg)
{
struct ttm_buffer_object *bo = arg;
struct signal_timer s_timer;
struct ww_acquire_ctx ctx;
bool interruptible = true;
bool no_wait = false;
int err;
ww_acquire_init(&ctx, &reservation_ww_class);
/* Prepare a signal that will interrupt the reservation attempt */
timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
s_timer.ctx = &ctx;
mod_timer(&s_timer.timer, msecs_to_jiffies(100));
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
timer_delete_sync(&s_timer.timer);
destroy_timer_on_stack(&s_timer.timer);
ww_acquire_fini(&ctx);
return err;
}
static void ttm_bo_reserve_interrupted(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct task_struct *task;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
if (IS_ERR(task))
KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
/* Take a lock so the threaded reserve has to wait */
mutex_lock(&bo->base.resv->lock.base);
wake_up_process(task);
msleep(20);
err = kthread_stop(task);
mutex_unlock(&bo->base.resv->lock.base);
KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
}
#endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
static void ttm_bo_unreserve_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
struct ttm_device *ttm_dev;
struct ttm_resource *res1, *res2;
struct ttm_place *place;
struct ttm_resource_manager *man;
unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
uint32_t mem_type = TTM_PL_SYSTEM;
int err;
place = ttm_place_kunit_init(test, mem_type, 0);
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo->priority = bo_prio;
err = ttm_resource_alloc(bo, place, &res1);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to populate LRU */
ttm_resource_alloc(bo, place, &res2);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unreserve(bo);
man = ttm_manager_type(priv->ttm_dev, mem_type);
KUNIT_ASSERT_EQ(test,
list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
ttm_resource_free(bo, &res2);
ttm_resource_free(bo, &res1);
}
static void ttm_bo_unreserve_pinned(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
struct ttm_device *ttm_dev;
struct ttm_resource *res1, *res2;
struct ttm_place *place;
uint32_t mem_type = TTM_PL_SYSTEM;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
place = ttm_place_kunit_init(test, mem_type, 0);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
err = ttm_resource_alloc(bo, place, &res1);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to the pinned list */
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
}
static void ttm_bo_unreserve_bulk(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_lru_bulk_move lru_bulk_move;
struct ttm_lru_bulk_move_pos *pos;
struct ttm_buffer_object *bo1, *bo2;
struct ttm_resource *res1, *res2;
struct ttm_device *ttm_dev;
struct ttm_place *place;
uint32_t mem_type = TTM_PL_SYSTEM;
unsigned int bo_priority = 0;
int err;
ttm_lru_bulk_move_init(&lru_bulk_move);
place = ttm_place_kunit_init(test, mem_type, 0);
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
dma_resv_lock(bo1->base.resv, NULL);
ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
dma_resv_unlock(bo1->base.resv);
err = ttm_resource_alloc(bo1, place, &res1);
KUNIT_ASSERT_EQ(test, err, 0);
bo1->resource = res1;
dma_resv_lock(bo2->base.resv, NULL);
ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
dma_resv_unlock(bo2->base.resv);
err = ttm_resource_alloc(bo2, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
bo2->resource = res2;
ttm_bo_reserve(bo1, false, false, NULL);
ttm_bo_unreserve(bo1);
pos = &lru_bulk_move.pos[mem_type][bo_priority];
KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
ttm_resource_free(bo1, &res1);
ttm_resource_free(bo2, &res2);
}
static void ttm_bo_put_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
struct ttm_resource *res;
struct ttm_device *ttm_dev;
struct ttm_place *place;
uint32_t mem_type = TTM_PL_SYSTEM;
int err;
place = ttm_place_kunit_init(test, mem_type, 0);
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo->type = ttm_bo_type_device;
err = ttm_resource_alloc(bo, place, &res);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
dma_resv_unlock(bo->base.resv);
KUNIT_EXPECT_EQ(test, err, 0);
ttm_bo_put(bo);
}
static const char *mock_name(struct dma_fence *f)
{
return "kunit-ttm-bo-put";
}
static const struct dma_fence_ops mock_fence_ops = {
.get_driver_name = mock_name,
.get_timeline_name = mock_name,
};
static void ttm_bo_put_shared_resv(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
struct dma_resv *external_resv;
struct dma_fence *fence;
/* A dummy DMA fence lock */
spinlock_t fence_lock;
struct ttm_device *ttm_dev;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, external_resv);
dma_resv_init(external_resv);
fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, fence);
spin_lock_init(&fence_lock);
dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
dma_resv_lock(external_resv, NULL);
dma_resv_reserve_fences(external_resv, 1);
dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_unlock(external_resv);
dma_fence_signal(fence);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo->type = ttm_bo_type_device;
bo->base.resv = external_resv;
ttm_bo_put(bo);
}
static void ttm_bo_pin_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
struct ttm_device *ttm_dev;
unsigned int no_pins = 3;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
for (int i = 0; i < no_pins; i++) {
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
dma_resv_unlock(bo->base.resv);
}
KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
}
static void ttm_bo_pin_unpin_resource(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_lru_bulk_move lru_bulk_move;
struct ttm_lru_bulk_move_pos *pos;
struct ttm_buffer_object *bo;
struct ttm_resource *res;
struct ttm_device *ttm_dev;
struct ttm_place *place;
uint32_t mem_type = TTM_PL_SYSTEM;
unsigned int bo_priority = 0;
int err;
ttm_lru_bulk_move_init(&lru_bulk_move);
place = ttm_place_kunit_init(test, mem_type, 0);
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_resource_alloc(bo, place, &res);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_set_bulk_move(bo, &lru_bulk_move);
ttm_bo_pin(bo);
dma_resv_unlock(bo->base.resv);
pos = &lru_bulk_move.pos[mem_type][bo_priority];
KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
KUNIT_ASSERT_NULL(test, pos->first);
KUNIT_ASSERT_NULL(test, pos->last);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unpin(bo);
dma_resv_unlock(bo->base.resv);
KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
ttm_resource_free(bo, &res);
}
static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_lru_bulk_move lru_bulk_move;
struct ttm_lru_bulk_move_pos *pos;
struct ttm_buffer_object *bo;
struct ttm_resource *res;
struct ttm_device *ttm_dev;
struct ttm_place *place;
uint32_t mem_type = TTM_PL_SYSTEM;
unsigned int bo_priority = 0;
int err;
ttm_lru_bulk_move_init(&lru_bulk_move);
place = ttm_place_kunit_init(test, mem_type, 0);
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_resource_alloc(bo, place, &res);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_set_bulk_move(bo, &lru_bulk_move);
/* Multiple pins */
ttm_bo_pin(bo);
ttm_bo_pin(bo);
dma_resv_unlock(bo->base.resv);
pos = &lru_bulk_move.pos[mem_type][bo_priority];
KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
KUNIT_ASSERT_NULL(test, pos->first);
KUNIT_ASSERT_NULL(test, pos->last);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unpin(bo);
dma_resv_unlock(bo->base.resv);
KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
KUNIT_ASSERT_NULL(test, pos->first);
KUNIT_ASSERT_NULL(test, pos->last);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unpin(bo);
dma_resv_unlock(bo->base.resv);
ttm_resource_free(bo, &res);
}
static struct kunit_case ttm_bo_test_cases[] = {
KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
ttm_bo_reserve_gen_params),
KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
KUNIT_CASE(ttm_bo_reserve_double_resv),
#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
KUNIT_CASE(ttm_bo_reserve_interrupted),
#endif
KUNIT_CASE(ttm_bo_reserve_deadlock),
KUNIT_CASE(ttm_bo_unreserve_basic),
KUNIT_CASE(ttm_bo_unreserve_pinned),
KUNIT_CASE(ttm_bo_unreserve_bulk),
KUNIT_CASE(ttm_bo_put_basic),
KUNIT_CASE(ttm_bo_put_shared_resv),
KUNIT_CASE(ttm_bo_pin_basic),
KUNIT_CASE(ttm_bo_pin_unpin_resource),
KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
{}
};
static struct kunit_suite ttm_bo_test_suite = {
.name = "ttm_bo",
.init = ttm_test_devices_init,
.exit = ttm_test_devices_fini,
.test_cases = ttm_bo_test_cases,
};
kunit_test_suites(&ttm_bo_test_suite);
MODULE_LICENSE("GPL");

View File

@@ -2,9 +2,33 @@
/*
* Copyright © 2023 Intel Corporation
*/
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct ttm_tt *tt;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
return tt;
}
static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
kfree(ttm);
}
static void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
}
struct ttm_device_funcs ttm_dev_funcs = {
.ttm_tt_create = ttm_tt_simple_create,
.ttm_tt_destroy = ttm_tt_simple_destroy,
};
EXPORT_SYMBOL_GPL(ttm_dev_funcs);
@@ -29,19 +53,41 @@ struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size)
{
struct drm_gem_object gem_obj = { .size = size };
struct drm_gem_object gem_obj = { };
struct ttm_buffer_object *bo;
int err;
bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, bo);
bo->base = gem_obj;
err = drm_gem_object_init(devs->drm, &bo->base, size);
KUNIT_ASSERT_EQ(test, err, 0);
bo->bdev = devs->ttm_dev;
bo->destroy = dummy_ttm_bo_destroy;
kref_init(&bo->kref);
return bo;
}
EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
struct ttm_place *ttm_place_kunit_init(struct kunit *test,
uint32_t mem_type, uint32_t flags)
{
struct ttm_place *place;
place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, place);
place->mem_type = mem_type;
place->flags = flags;
return place;
}
EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
{
struct ttm_test_devices *devs;

View File

@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_kunit_helpers.h>
#include <kunit/test.h>
@@ -28,6 +29,8 @@ int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size);
struct ttm_place *ttm_place_kunit_init(struct kunit *test,
uint32_t mem_type, uint32_t flags);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);

View File

@@ -78,10 +78,9 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
struct ttm_test_devices *devs = priv->devs;
struct ttm_pool *pool;
struct ttm_tt *tt;
unsigned long order = __fls(size / PAGE_SIZE);
int err;
tt = ttm_tt_kunit_init(test, order, caching, size);
tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);

View File

@@ -0,0 +1,335 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <drm/ttm/ttm_resource.h>
#include "ttm_kunit_helpers.h"
#define RES_SIZE SZ_4K
#define TTM_PRIV_DUMMY_REG (TTM_NUM_MEM_TYPES - 1)
struct ttm_resource_test_case {
const char *description;
uint32_t mem_type;
uint32_t flags;
};
struct ttm_resource_test_priv {
struct ttm_test_devices *devs;
struct ttm_buffer_object *bo;
struct ttm_place *place;
};
static const struct ttm_resource_manager_func ttm_resource_manager_mock_funcs = { };
static int ttm_resource_test_init(struct kunit *test)
{
struct ttm_resource_test_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
priv->devs = ttm_test_devices_all(test);
KUNIT_ASSERT_NOT_NULL(test, priv->devs);
test->priv = priv;
return 0;
}
static void ttm_resource_test_fini(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
ttm_test_devices_put(test, priv->devs);
}
static void ttm_init_test_mocks(struct kunit *test,
struct ttm_resource_test_priv *priv,
uint32_t mem_type, uint32_t flags)
{
size_t size = RES_SIZE;
/* Make sure we have what we need for a good BO mock */
KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev);
priv->bo = ttm_bo_kunit_init(test, priv->devs, size);
priv->place = ttm_place_kunit_init(test, mem_type, flags);
}
static void ttm_init_test_manager(struct kunit *test,
struct ttm_resource_test_priv *priv,
uint32_t mem_type)
{
struct ttm_device *ttm_dev = priv->devs->ttm_dev;
struct ttm_resource_manager *man;
size_t size = SZ_16K;
man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, man);
man->use_tt = false;
man->func = &ttm_resource_manager_mock_funcs;
ttm_resource_manager_init(man, ttm_dev, size);
ttm_set_driver_manager(ttm_dev, mem_type, man);
ttm_resource_manager_set_used(man, true);
}
static const struct ttm_resource_test_case ttm_resource_cases[] = {
{
.description = "Init resource in TTM_PL_SYSTEM",
.mem_type = TTM_PL_SYSTEM,
},
{
.description = "Init resource in TTM_PL_VRAM",
.mem_type = TTM_PL_VRAM,
},
{
.description = "Init resource in a private placement",
.mem_type = TTM_PRIV_DUMMY_REG,
},
{
.description = "Init resource in TTM_PL_SYSTEM, set placement flags",
.mem_type = TTM_PL_SYSTEM,
.flags = TTM_PL_FLAG_TOPDOWN,
},
};
static void ttm_resource_case_desc(const struct ttm_resource_test_case *t, char *desc)
{
strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(ttm_resource, ttm_resource_cases, ttm_resource_case_desc);
static void ttm_resource_init_basic(struct kunit *test)
{
const struct ttm_resource_test_case *params = test->param_value;
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource *res;
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource_manager *man;
uint64_t expected_usage;
ttm_init_test_mocks(test, priv, params->mem_type, params->flags);
bo = priv->bo;
place = priv->place;
if (params->mem_type > TTM_PL_SYSTEM)
ttm_init_test_manager(test, priv, params->mem_type);
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
expected_usage = man->usage + RES_SIZE;
KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
ttm_resource_init(bo, place, res);
KUNIT_ASSERT_EQ(test, res->start, 0);
KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
KUNIT_ASSERT_EQ(test, res->mem_type, place->mem_type);
KUNIT_ASSERT_EQ(test, res->placement, place->flags);
KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
KUNIT_ASSERT_NULL(test, res->bus.addr);
KUNIT_ASSERT_EQ(test, res->bus.offset, 0);
KUNIT_ASSERT_FALSE(test, res->bus.is_iomem);
KUNIT_ASSERT_EQ(test, res->bus.caching, ttm_cached);
KUNIT_ASSERT_EQ(test, man->usage, expected_usage);
KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
ttm_resource_fini(man, res);
}
static void ttm_resource_init_pinned(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource *res;
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource_manager *man;
ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
bo = priv->bo;
place = priv->place;
man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
ttm_resource_init(bo, place, res);
KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned));
ttm_bo_unpin(bo);
ttm_resource_fini(man, res);
dma_resv_unlock(bo->base.resv);
KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
}
static void ttm_resource_fini_basic(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource *res;
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource_manager *man;
ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
bo = priv->bo;
place = priv->place;
man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
ttm_resource_init(bo, place, res);
ttm_resource_fini(man, res);
KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
static void ttm_resource_manager_init_basic(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource_manager *man;
size_t size = SZ_16K;
man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, man);
ttm_resource_manager_init(man, priv->devs->ttm_dev, size);
KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev);
KUNIT_ASSERT_EQ(test, man->size, size);
KUNIT_ASSERT_EQ(test, man->usage, 0);
KUNIT_ASSERT_NULL(test, man->move);
KUNIT_ASSERT_NOT_NULL(test, &man->move_lock);
for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
}
static void ttm_resource_manager_usage_basic(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource *res;
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource_manager *man;
uint64_t actual_usage;
ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, TTM_PL_FLAG_TOPDOWN);
bo = priv->bo;
place = priv->place;
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
ttm_resource_init(bo, place, res);
actual_usage = ttm_resource_manager_usage(man);
KUNIT_ASSERT_EQ(test, actual_usage, RES_SIZE);
ttm_resource_fini(man, res);
}
static void ttm_resource_manager_set_used_basic(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource_manager *man;
man = ttm_manager_type(priv->devs->ttm_dev, TTM_PL_SYSTEM);
KUNIT_ASSERT_TRUE(test, man->use_type);
ttm_resource_manager_set_used(man, false);
KUNIT_ASSERT_FALSE(test, man->use_type);
}
static void ttm_sys_man_alloc_basic(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource_manager *man;
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource *res;
uint32_t mem_type = TTM_PL_SYSTEM;
int ret;
ttm_init_test_mocks(test, priv, mem_type, 0);
bo = priv->bo;
place = priv->place;
man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
ret = man->func->alloc(man, bo, place, &res);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
KUNIT_ASSERT_EQ(test, res->mem_type, mem_type);
KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
ttm_resource_fini(man, res);
}
static void ttm_sys_man_free_basic(struct kunit *test)
{
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource_manager *man;
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource *res;
uint32_t mem_type = TTM_PL_SYSTEM;
ttm_init_test_mocks(test, priv, mem_type, 0);
bo = priv->bo;
place = priv->place;
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
ttm_resource_alloc(bo, place, &res);
man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
man->func->free(man, res);
KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
static struct kunit_case ttm_resource_test_cases[] = {
KUNIT_CASE_PARAM(ttm_resource_init_basic, ttm_resource_gen_params),
KUNIT_CASE(ttm_resource_init_pinned),
KUNIT_CASE(ttm_resource_fini_basic),
KUNIT_CASE(ttm_resource_manager_init_basic),
KUNIT_CASE(ttm_resource_manager_usage_basic),
KUNIT_CASE(ttm_resource_manager_set_used_basic),
KUNIT_CASE(ttm_sys_man_alloc_basic),
KUNIT_CASE(ttm_sys_man_free_basic),
{}
};
static struct kunit_suite ttm_resource_test_suite = {
.name = "ttm_resource",
.init = ttm_resource_test_init,
.exit = ttm_resource_test_fini,
.test_cases = ttm_resource_test_cases,
};
kunit_test_suites(&ttm_resource_test_suite);
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,295 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/shmem_fs.h>
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
#define BO_SIZE SZ_4K
struct ttm_tt_test_case {
const char *description;
uint32_t size;
uint32_t extra_pages_num;
};
static int ttm_tt_test_init(struct kunit *test)
{
struct ttm_test_devices *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
priv = ttm_test_devices_all(test);
test->priv = priv;
return 0;
}
static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
{
.description = "Page-aligned size",
.size = SZ_4K,
},
{
.description = "Extra pages requested",
.size = SZ_4K,
.extra_pages_num = 1,
},
};
static void ttm_tt_init_case_desc(const struct ttm_tt_test_case *t,
char *desc)
{
strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_cases,
ttm_tt_init_case_desc);
static void ttm_tt_init_basic(struct kunit *test)
{
const struct ttm_tt_test_case *params = test->param_value;
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
uint32_t page_flags = TTM_TT_FLAG_ZERO_ALLOC;
enum ttm_caching caching = ttm_cached;
uint32_t extra_pages = params->extra_pages_num;
int num_pages = params->size >> PAGE_SHIFT;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, test->priv, params->size);
err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages);
KUNIT_ASSERT_EQ(test, tt->page_flags, page_flags);
KUNIT_ASSERT_EQ(test, tt->caching, caching);
KUNIT_ASSERT_NULL(test, tt->dma_address);
KUNIT_ASSERT_NULL(test, tt->swap_storage);
}
static void ttm_tt_init_misaligned(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
enum ttm_caching caching = ttm_cached;
uint32_t size = SZ_8K;
int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, test->priv, size);
/* Make the object size misaligned */
bo->base.size += 1;
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages);
}
static void ttm_tt_fini_basic(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
enum ttm_caching caching = ttm_cached;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_NOT_NULL(test, tt->pages);
ttm_tt_fini(tt);
KUNIT_ASSERT_NULL(test, tt->pages);
}
static void ttm_tt_fini_sg(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
enum ttm_caching caching = ttm_cached;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_sg_tt_init(tt, bo, 0, caching);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_NOT_NULL(test, tt->dma_address);
ttm_tt_fini(tt);
KUNIT_ASSERT_NULL(test, tt->dma_address);
}
static void ttm_tt_fini_shmem(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
struct file *shmem;
enum ttm_caching caching = ttm_cached;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
shmem = shmem_file_setup("ttm swap", BO_SIZE, 0);
tt->swap_storage = shmem;
ttm_tt_fini(tt);
KUNIT_ASSERT_NULL(test, tt->swap_storage);
}
static void ttm_tt_create_basic(struct kunit *test)
{
struct ttm_buffer_object *bo;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo->type = ttm_bo_type_device;
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
dma_resv_unlock(bo->base.resv);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
/* Free manually, as it was allocated outside of KUnit */
kfree(bo->ttm);
}
static void ttm_tt_create_invalid_bo_type(struct kunit *test)
{
struct ttm_buffer_object *bo;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
bo->type = ttm_bo_type_sg + 1;
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
dma_resv_unlock(bo->base.resv);
KUNIT_EXPECT_EQ(test, err, -EINVAL);
KUNIT_EXPECT_NULL(test, bo->ttm);
}
static void ttm_tt_create_ttm_exists(struct kunit *test)
{
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
enum ttm_caching caching = ttm_cached;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
bo->ttm = tt;
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
dma_resv_unlock(bo->base.resv);
/* Expect to keep the previous TTM */
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_PTR_EQ(test, tt, bo->ttm);
}
static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
return NULL;
}
static struct ttm_device_funcs ttm_dev_empty_funcs = {
.ttm_tt_create = ttm_tt_null_create,
};
static void ttm_tt_create_failed(struct kunit *test)
{
const struct ttm_test_devices *devs = test->priv;
struct ttm_buffer_object *bo;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
/* Update ttm_device_funcs so we don't alloc ttm_tt */
devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
dma_resv_unlock(bo->base.resv);
KUNIT_ASSERT_EQ(test, err, -ENOMEM);
}
static void ttm_tt_destroy_basic(struct kunit *test)
{
const struct ttm_test_devices *devs = test->priv;
struct ttm_buffer_object *bo;
int err;
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
dma_resv_unlock(bo->base.resv);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
ttm_tt_destroy(devs->ttm_dev, bo->ttm);
}
static struct kunit_case ttm_tt_test_cases[] = {
KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
KUNIT_CASE(ttm_tt_init_misaligned),
KUNIT_CASE(ttm_tt_fini_basic),
KUNIT_CASE(ttm_tt_fini_sg),
KUNIT_CASE(ttm_tt_fini_shmem),
KUNIT_CASE(ttm_tt_create_basic),
KUNIT_CASE(ttm_tt_create_invalid_bo_type),
KUNIT_CASE(ttm_tt_create_ttm_exists),
KUNIT_CASE(ttm_tt_create_failed),
KUNIT_CASE(ttm_tt_destroy_basic),
{}
};
static struct kunit_suite ttm_tt_test_suite = {
.name = "ttm_tt",
.init = ttm_tt_test_init,
.exit = ttm_test_devices_fini,
.test_cases = ttm_tt_test_cases,
};
kunit_test_suites(&ttm_tt_test_suite);
MODULE_LICENSE("GPL");