Files
linux/drivers/gpu/drm/xe/xe_exec.c
Francois Dugast c73acc1eeb drm/xe: Use Xe assert macros instead of XE_WARN_ON macro
The XE_WARN_ON macro maps to WARN_ON which is not justified
in many cases where only a simple debug check is needed.
Replace the use of the XE_WARN_ON macro with the new xe_assert
macros which relies on drm_*. This takes a struct drm_device
argument, which is one of the main changes in this commit. The
other main change is that the condition is reversed, as with
XE_WARN_ON a message is displayed if the condition is true,
whereas with xe_assert it is if the condition is false.

v2:
- Rebase
- Keep WARN splats in xe_wopcm.c (Matt Roper)

v3:
- Rebase

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2023-12-21 11:41:08 -05:00

406 lines
12 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "xe_exec.h"
#include <drm/drm_device.h>
#include <drm/drm_exec.h>
#include <drm/drm_file.h>
#include <drm/xe_drm.h>
#include <linux/delay.h>
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_macros.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_vm.h"
/**
* DOC: Execbuf (User GPU command submission)
*
* Execs have historically been rather complicated in DRM drivers (at least in
* the i915) because a few things:
*
* - Passing in a list BO which are read / written to creating implicit syncs
* - Binding at exec time
* - Flow controlling the ring at exec time
*
* In XE we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
* seperate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
*
* We can get away from a BO list by forcing the user to use in / out fences on
* every exec rather than the kernel tracking dependencies of BO (e.g. if the
* user knows an exec writes to a BO and reads from the BO in the next exec, it
* is the user's responsibility to pass in / out fence between the two execs).
*
* Implicit dependencies for external BOs are handled by using the dma-buf
* implicit dependency uAPI (TODO: add link). To make this works each exec must
* install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
* BO mapped in the VM.
*
* We do not allow a user to trigger a bind at exec time rather we have a VM
* bind IOCTL which uses the same in / out fence interface as exec. In that
* sense, a VM bind is basically the same operation as an exec from the user
* perspective. e.g. If an exec depends on a VM bind use the in / out fence
* interface (struct drm_xe_sync) to synchronize like syncing between two
* dependent execs.
*
* Although a user cannot trigger a bind, we still have to rebind userptrs in
* the VM that have been invalidated since the last exec, likewise we also have
* to rebind BOs that have been evicted by the kernel. We schedule these rebinds
* behind any pending kernel operations on any external BOs in VM or any BOs
* private to the VM. This is accomplished by the rebinds waiting on BOs
* DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
* slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
* in DMA_RESV_USAGE_WRITE for external BOs).
*
* Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
* mode VMs we use preempt fences and a rebind worker (TODO: add link).
*
* There is no need to flow control the ring in the exec as we write the ring at
* submission time and set the DRM scheduler max job limit SIZE_OF_RING /
* MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
* ring is available.
*
* All of this results in a rather simple exec implementation.
*
* Flow
* ~~~~
*
* .. code-block::
*
* Parse input arguments
* Wait for any async VM bind passed as in-fences to start
* <----------------------------------------------------------------------|
* Lock global VM lock in read mode |
* Pin userptrs (also finds userptr invalidated since last exec) |
* Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
* Validate BOs that have been evicted |
* Create job |
* Rebind invalidated userptrs + evicted BOs (non-compute-mode) |
* Add rebind fence dependency to job |
* Add job VM dma-resv bookkeeping slot (non-compute mode) |
* Add job to external BOs dma-resv write slots (non-compute mode) |
* Check if any userptrs invalidated since pin ------ Drop locks ---------|
* Install in / out fences for job
* Submit job
* Unlock all
*/
static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
{
struct xe_vma *vma;
LIST_HEAD(dups);
int err = 0;
if (xe_vm_no_dma_fences(vm))
return 0;
/*
* 1 fence for job from exec plus a fence for each tile from a possible
* rebind
*/
err = xe_vm_lock_dma_resv(vm, exec, 1 + vm->xe->info.tile_count, true);
if (err)
return err;
/*
* Validate BOs that have been evicted (i.e. make sure the
* BOs have valid placements possibly moving an evicted BO back
* to a location where the GPU can access it).
*/
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
xe_assert(vm->xe, !xe_vma_is_null(vma));
if (xe_vma_is_userptr(vma))
continue;
err = xe_bo_validate(xe_vma_bo(vma), vm, false);
if (err)
break;
}
return err;
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_exec *args = data;
struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
u64 __user *addresses_user = u64_to_user_ptr(args->address);
struct xe_exec_queue *q;
struct xe_sync_entry *syncs = NULL;
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_exec exec;
u32 i, num_syncs = 0;
struct xe_sched_job *job;
struct dma_fence *rebind_fence;
struct xe_vm *vm;
bool write_locked;
ktime_t end = 0;
int err = 0;
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
return -EINVAL;
if (XE_IOCTL_DBG(xe, q->width != args->num_batch_buffer))
return -EINVAL;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
err = -ECANCELED;
goto err_exec_queue;
}
if (args->num_syncs) {
syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
if (!syncs) {
err = -ENOMEM;
goto err_exec_queue;
}
}
vm = q->vm;
for (i = 0; i < args->num_syncs; i++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
&syncs_user[i], true,
xe_vm_no_dma_fences(vm));
if (err)
goto err_syncs;
}
if (xe_exec_queue_is_parallel(q)) {
err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
}
}
/*
* We can't install a job into the VM dma-resv shared slot before an
* async VM bind passed in as a fence without the risk of deadlocking as
* the bind can trigger an eviction which in turn depends on anything in
* the VM dma-resv shared slots. Not an ideal solution, but we wait for
* all dependent async VM binds to start (install correct fences into
* dma-resv slots) before moving forward.
*/
if (!xe_vm_no_dma_fences(vm) &&
vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
for (i = 0; i < args->num_syncs; i++) {
struct dma_fence *fence = syncs[i].fence;
if (fence) {
err = xe_vm_async_fence_wait_start(fence);
if (err)
goto err_syncs;
}
}
}
retry:
if (!xe_vm_no_dma_fences(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock);
write_locked = true;
} else {
/* We don't allow execs while the VM is in error state */
err = down_read_interruptible(&vm->lock);
write_locked = false;
}
if (err)
goto err_syncs;
/* We don't allow execs while the VM is in error state */
if (vm->async_ops.error) {
err = vm->async_ops.error;
goto err_unlock_list;
}
/*
* Extreme corner where we exit a VM error state with a munmap style VM
* unbind inflight which requires a rebind. In this case the rebind
* needs to install some fences into the dma-resv slots. The worker to
* do this queued, let that worker make progress by dropping vm->lock,
* flushing the worker and retrying the exec.
*/
if (vm->async_ops.munmap_rebind_inflight) {
if (write_locked)
up_write(&vm->lock);
else
up_read(&vm->lock);
flush_work(&vm->async_ops.work);
goto retry;
}
if (write_locked) {
err = xe_vm_userptr_pin(vm);
downgrade_write(&vm->lock);
write_locked = false;
if (err)
goto err_unlock_list;
}
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
err = xe_exec_begin(&exec, vm);
drm_exec_retry_on_contention(&exec);
if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
err = -EAGAIN;
goto err_unlock_list;
}
if (err)
goto err_exec;
}
if (xe_vm_is_closed_or_banned(q->vm)) {
drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
err = -ECANCELED;
goto err_exec;
}
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK;
goto err_exec;
}
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
addresses : &args->address);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto err_exec;
}
/*
* Rebind any invalidated userptr or evicted BOs in the VM, non-compute
* VM mode only.
*/
rebind_fence = xe_vm_rebind(vm, false);
if (IS_ERR(rebind_fence)) {
err = PTR_ERR(rebind_fence);
goto err_put_job;
}
/*
* We store the rebind_fence in the VM so subsequent execs don't get
* scheduled before the rebinds of userptrs / evicted BOs is complete.
*/
if (rebind_fence) {
dma_fence_put(vm->rebind_fence);
vm->rebind_fence = rebind_fence;
}
if (vm->rebind_fence) {
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&vm->rebind_fence->flags)) {
dma_fence_put(vm->rebind_fence);
vm->rebind_fence = NULL;
} else {
dma_fence_get(vm->rebind_fence);
err = drm_sched_job_add_dependency(&job->drm,
vm->rebind_fence);
if (err)
goto err_put_job;
}
}
/* Wait behind munmap style rebinds */
if (!xe_vm_no_dma_fences(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL);
if (err)
goto err_put_job;
}
for (i = 0; i < num_syncs && !err; i++)
err = xe_sync_entry_add_deps(&syncs[i], job);
if (err)
goto err_put_job;
if (!xe_vm_no_dma_fences(vm)) {
err = down_read_interruptible(&vm->userptr.notifier_lock);
if (err)
goto err_put_job;
err = __xe_vm_userptr_needs_repin(vm);
if (err)
goto err_repin;
}
/*
* Point of no return, if we error after this point just set an error on
* the job and let the DRM scheduler / backend clean up the job.
*/
xe_sched_job_arm(job);
if (!xe_vm_no_dma_fences(vm)) {
/* Block userptr invalidations / BO eviction */
dma_resv_add_fence(xe_vm_resv(vm),
&job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP);
/*
* Make implicit sync work across drivers, assuming all external
* BOs are written as we don't pass in a read / write list.
*/
xe_vm_fence_all_extobjs(vm, &job->drm.s_fence->finished,
DMA_RESV_USAGE_WRITE);
}
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], job,
&job->drm.s_fence->finished);
if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job);
xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm);
if (!err && !xe_vm_no_dma_fences(vm)) {
spin_lock(&xe->ttm.lru_lock);
ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
spin_unlock(&xe->ttm.lru_lock);
}
err_repin:
if (!xe_vm_no_dma_fences(vm))
up_read(&vm->userptr.notifier_lock);
err_put_job:
if (err)
xe_sched_job_put(job);
err_exec:
drm_exec_fini(&exec);
err_unlock_list:
if (write_locked)
up_write(&vm->lock);
else
up_read(&vm->lock);
if (err == -EAGAIN)
goto retry;
err_syncs:
for (i = 0; i < num_syncs; i++)
xe_sync_entry_cleanup(&syncs[i]);
kfree(syncs);
err_exec_queue:
xe_exec_queue_put(q);
return err;
}