mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 14:53:58 -04:00
drm/amdgpu: pass the entity to use to ttm public functions
This way the caller can select the one it wants to use. Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Acked-by: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
e95b41846d
commit
35bb80e683
@@ -356,7 +356,7 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||
write_compress_disable));
|
||||
}
|
||||
|
||||
r = amdgpu_copy_buffer(adev, from, to, cur_size, resv,
|
||||
r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
|
||||
&next, true, copy_flags);
|
||||
if (r)
|
||||
goto error;
|
||||
@@ -411,8 +411,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
|
||||
struct dma_fence *wipe_fence = NULL;
|
||||
|
||||
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
|
||||
false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
|
||||
r = amdgpu_fill_buffer(&adev->mman.move_entity,
|
||||
abo, 0, NULL, &wipe_fence,
|
||||
AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
|
||||
if (r) {
|
||||
goto error;
|
||||
} else if (wipe_fence) {
|
||||
@@ -2370,7 +2371,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
|
||||
int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
|
||||
int amdgpu_copy_buffer(struct amdgpu_device *adev,
|
||||
struct amdgpu_ttm_buffer_entity *entity,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset, uint32_t byte_count,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence,
|
||||
@@ -2394,7 +2397,7 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
|
||||
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
|
||||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
r = amdgpu_ttm_prepare_job(adev, &adev->mman.move_entity, num_dw,
|
||||
r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
|
||||
resv, vm_needs_flush, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
|
||||
if (r)
|
||||
@@ -2523,22 +2526,18 @@ err:
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **f,
|
||||
bool delayed,
|
||||
u64 k_job_id)
|
||||
int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
|
||||
struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **f,
|
||||
u64 k_job_id)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_ttm_buffer_entity *entity;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_res_cursor dst;
|
||||
int r;
|
||||
|
||||
entity = delayed ? &adev->mman.clear_entity :
|
||||
&adev->mman.move_entity;
|
||||
|
||||
if (!adev->mman.buffer_funcs_enabled) {
|
||||
dev_err(adev->dev,
|
||||
"Trying to clear memory with ring turned off.\n");
|
||||
@@ -2555,13 +2554,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
/* Never fill more than 256MiB at once to avoid timeouts */
|
||||
cur_size = min(dst.size, 256ULL << 20);
|
||||
|
||||
r = amdgpu_ttm_map_buffer(&adev->mman.default_entity,
|
||||
&bo->tbo, bo->tbo.resource, &dst,
|
||||
r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
|
||||
1, false, &cur_size, &to);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_ttm_fill_mem(adev, entity, src_data, to, cur_size, resv,
|
||||
r = amdgpu_ttm_fill_mem(adev, entity,
|
||||
src_data, to, cur_size, resv,
|
||||
&next, true, k_job_id);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
Reference in New Issue
Block a user