drm/xe: Use the vma attibute drm_pagemap to select where to migrate

Honor the drm_pagemap vma attribute when migrating SVM pages.
Ensure that when the desired placement is validated as device
memory, that we also check that the requested drm_pagemap is
consistent with the current.

v2:
- Initialize a struct drm_pagemap pointer to NULL that could
  otherwise be dereferenced uninitialized. (CI)
- Remove a redundant assignment (Matt Brost)
- Slightly improved commit message (Matt Brost)
- Extended drm_pagemap validation.

v3:
- Fix a compilation error if CONFIG_DRM_GPUSVM is not enabled.
  (kernel test robot <lkp@intel.com>)

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Link: https://patch.msgid.link/20251219113320.183860-14-thomas.hellstrom@linux.intel.com
This commit is contained in:
Thomas Hellström
2025-12-19 12:33:09 +01:00
parent eb9db59d96
commit 238dbc9d9f
4 changed files with 72 additions and 58 deletions

View File

@@ -2342,7 +2342,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
struct xe_tile *tile;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
struct drm_pagemap *dpagemap;
struct drm_pagemap *dpagemap = NULL;
u8 id, tile_mask = 0;
u32 i;
@@ -2360,23 +2360,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
op->prefetch_range.ranges_count = 0;
tile = NULL;
if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
dpagemap = xe_vma_resolve_pagemap(vma,
xe_device_get_root_tile(vm->xe));
/*
* TODO: Once multigpu support is enabled will need
* something to dereference tile from dpagemap.
*/
if (dpagemap)
tile = xe_device_get_root_tile(vm->xe);
} else if (prefetch_region) {
tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
XE_PL_VRAM0];
dpagemap = xe_tile_local_pagemap(tile);
}
op->prefetch_range.tile = tile;
op->prefetch_range.dpagemap = dpagemap;
alloc_next_range:
svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
@@ -2395,7 +2389,7 @@ alloc_next_range:
goto unwind_prefetch_ops;
}
if (xe_svm_range_validate(vm, svm_range, tile_mask, !!tile)) {
if (xe_svm_range_validate(vm, svm_range, tile_mask, dpagemap)) {
xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
goto check_next_range;
}
@@ -2917,7 +2911,7 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
{
bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
struct xe_tile *tile = op->prefetch_range.tile;
struct drm_pagemap *dpagemap = op->prefetch_range.dpagemap;
int err = 0;
struct xe_svm_range *svm_range;
@@ -2930,15 +2924,15 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !tile);
ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
if (!tile)
if (!dpagemap)
xe_svm_range_migrate_to_smem(vm, svm_range);
if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
err = xe_svm_alloc_vram(tile, svm_range, &ctx);
if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, dpagemap)) {
err = xe_svm_alloc_vram(svm_range, &ctx, dpagemap);
if (err) {
drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));