Merge drm/drm-next into drm-misc-next

Backmerging to get bug fixes from v6.19-rc7.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
This commit is contained in:
Thomas Zimmermann
2026-02-05 10:33:06 +01:00
2677 changed files with 159442 additions and 26890 deletions

View File

@@ -153,14 +153,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -1050,7 +1042,8 @@ static const char * const amdgpu_vram_names[] = {
"DDR5",
"LPDDR4",
"LPDDR5",
"HBM3E"
"HBM3E",
"HBM4"
};
/**
@@ -1080,10 +1073,10 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
adev->gmc.aper_size);
}
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
drm_info(adev_to_drm(adev), "Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n",
drm_info(adev_to_drm(adev), "RAM width %dbits %s\n",
adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev);
}
@@ -1125,6 +1118,10 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_bo_user *ubo;
/* MMIO_REMAP is BAR I/O space; tiling should never be used here. */
WARN_ON_ONCE(bo->tbo.resource &&
bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP);
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
if (adev->family <= AMDGPU_FAMILY_CZ &&
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
@@ -1147,6 +1144,13 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
struct amdgpu_bo_user *ubo;
/*
* MMIO_REMAP BOs are not real VRAM/GTT memory but a fixed BAR I/O window.
* They should never go through GEM tiling helpers.
*/
WARN_ON_ONCE(bo->tbo.resource &&
bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP);
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
dma_resv_assert_held(bo->tbo.base.resv);
ubo = to_amdgpu_bo_user(bo);
@@ -1321,8 +1325,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (r)
goto out;
r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv,
&fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
@@ -1534,8 +1538,17 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
*/
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
{
uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
u32 domain;
/*
* MMIO_REMAP is internal now, so it no longer maps from a userspace
* domain bit. Keep fdinfo/mem-stats visibility by checking the actual
* TTM placement.
*/
if (bo->tbo.resource && bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP)
return AMDGPU_PL_MMIO_REMAP;
domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
if (!domain)
return TTM_PL_SYSTEM;
@@ -1554,8 +1567,6 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}