mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 14:53:58 -04:00
drm/xe/uapi: Hide the madvise autoreset behind a VM_BIND flag
The madvise implementation currently resets the SVM madvise if the
underlying CPU map is unmapped. This is in an attempt to mimic the
CPU madvise behaviour. However, it's not clear that this is a desired
behaviour since if the end app user relies on it for malloc()ed
objects or stack objects, it may not work as intended.
Instead of having the autoreset functionality being a direct
application-facing implicit UAPI, make the UMD explicitly choose
this behaviour if it wants to expose it by introducing
DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET, and add a semantics
description.
v2:
- Kerneldoc fixes. Fix a commit log message.
Fixes: a2eb8aec3e ("drm/xe: Reset VMA attributes to default in SVM garbage collector")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: "Falkowski, John" <john.falkowski@intel.com>
Cc: "Mrozek, Michal" <michal.mrozek@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Link: https://lore.kernel.org/r/20251015170726.178685-2-thomas.hellstrom@linux.intel.com
This commit is contained in:
@@ -646,7 +646,8 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
|
||||
XE_VMA_READ_ONLY | \
|
||||
XE_VMA_DUMPABLE | \
|
||||
XE_VMA_SYSTEM_ALLOCATOR | \
|
||||
DRM_GPUVA_SPARSE)
|
||||
DRM_GPUVA_SPARSE | \
|
||||
XE_VMA_MADV_AUTORESET)
|
||||
|
||||
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
|
||||
u8 tile_mask)
|
||||
@@ -2297,6 +2298,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
|
||||
op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
|
||||
if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
|
||||
op->map.vma_flags |= XE_VMA_DUMPABLE;
|
||||
if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
|
||||
op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
|
||||
op->map.pat_index = pat_index;
|
||||
op->map.invalidate_on_bind =
|
||||
__xe_vm_needs_clear_scratch_pages(vm, flags);
|
||||
@@ -3280,7 +3283,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
|
||||
DRM_XE_VM_BIND_FLAG_NULL | \
|
||||
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
|
||||
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
|
||||
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
|
||||
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
|
||||
DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
|
||||
|
||||
#ifdef TEST_VM_OPS_ERROR
|
||||
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
|
||||
@@ -3395,7 +3399,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP)) {
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP) ||
|
||||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
|
||||
(!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
|
||||
err = -EINVAL;
|
||||
goto free_bind_ops;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user