mirror of
https://github.com/torvalds/linux.git
synced 2026-04-24 17:42:27 -04:00
Merge tag 'drm-misc-next-2024-12-05' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
[airlied: handle module ns conflict]
drm-misc-next for 6.14:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- Remove driver date from drm_driver
Driver Changes:
- amdxdna: New driver!
- ivpu: Fix qemu crash when using passthrough
- nouveau: expose GSP-RM logging buffers via debugfs
- panfrost: Add MT8188 Mali-G57 MC3 support
- panthor: misc improvements,
- rockchip: Gamma LUT support
- tidss: Misc improvements
- virtio: convert to helpers, add prime support for scanout buffers
- v3d: Add DRM_IOCTL_V3D_PERFMON_SET_GLOBAL
- vc4: Add support for BCM2712
- vkms: Improvements all across the board
- panels:
- Introduce backlight quirks infrastructure
- New panels: KDB KD116N2130B12
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241205-agile-straight-pegasus-aca7f4@houat
This commit is contained in:
@@ -27,6 +27,8 @@
|
||||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
|
||||
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
|
||||
uuid_t *uuid)
|
||||
{
|
||||
@@ -142,10 +144,159 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
|
||||
return buf;
|
||||
}
|
||||
|
||||
int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
|
||||
unsigned int *nents,
|
||||
struct virtio_gpu_object *bo,
|
||||
struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct scatterlist *sl;
|
||||
struct sg_table *sgt;
|
||||
long i, ret;
|
||||
|
||||
dma_resv_assert_held(attach->dmabuf->resv);
|
||||
|
||||
ret = dma_resv_wait_timeout(attach->dmabuf->resv,
|
||||
DMA_RESV_USAGE_KERNEL,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret <= 0)
|
||||
return ret < 0 ? ret : -ETIMEDOUT;
|
||||
|
||||
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
*ents = kvmalloc_array(sgt->nents,
|
||||
sizeof(struct virtio_gpu_mem_entry),
|
||||
GFP_KERNEL);
|
||||
if (!(*ents)) {
|
||||
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*nents = sgt->nents;
|
||||
for_each_sgtable_dma_sg(sgt, sl, i) {
|
||||
(*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
|
||||
(*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
|
||||
(*ents)[i].padding = 0;
|
||||
}
|
||||
|
||||
bo->sgt = sgt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
|
||||
struct dma_buf_attachment *attach = obj->import_attach;
|
||||
struct dma_resv *resv = attach->dmabuf->resv;
|
||||
|
||||
if (attach) {
|
||||
dma_resv_lock(resv, NULL);
|
||||
|
||||
virtio_gpu_detach_object_fenced(bo);
|
||||
|
||||
if (bo->sgt)
|
||||
dma_buf_unmap_attachment(attach, bo->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
dma_resv_unlock(resv);
|
||||
|
||||
dma_buf_detach(attach->dmabuf, attach);
|
||||
dma_buf_put(attach->dmabuf);
|
||||
}
|
||||
|
||||
if (bo->created) {
|
||||
virtio_gpu_cmd_unref_resource(vgdev, bo);
|
||||
virtio_gpu_notify(vgdev);
|
||||
return;
|
||||
}
|
||||
virtio_gpu_cleanup_object(bo);
|
||||
}
|
||||
|
||||
static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
|
||||
struct virtio_gpu_object *bo,
|
||||
struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_object_params params = { 0 };
|
||||
struct dma_resv *resv = attach->dmabuf->resv;
|
||||
struct virtio_gpu_mem_entry *ents = NULL;
|
||||
unsigned int nents;
|
||||
int ret;
|
||||
|
||||
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
|
||||
if (ret) {
|
||||
virtgpu_dma_buf_free_obj(&bo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_resv_lock(resv, NULL);
|
||||
|
||||
ret = dma_buf_pin(attach);
|
||||
if (ret)
|
||||
goto err_pin;
|
||||
|
||||
ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
|
||||
if (ret)
|
||||
goto err_import;
|
||||
|
||||
params.blob = true;
|
||||
params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
|
||||
params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
|
||||
params.size = attach->dmabuf->size;
|
||||
|
||||
virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms,
|
||||
ents, nents);
|
||||
bo->guest_blob = true;
|
||||
bo->attached = true;
|
||||
|
||||
dma_buf_unpin(attach);
|
||||
dma_resv_unlock(resv);
|
||||
|
||||
return 0;
|
||||
|
||||
err_import:
|
||||
dma_buf_unpin(attach);
|
||||
err_pin:
|
||||
dma_resv_unlock(resv);
|
||||
virtgpu_dma_buf_free_obj(&bo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
|
||||
.free = virtgpu_dma_buf_free_obj,
|
||||
};
|
||||
|
||||
static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->importer_priv;
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
if (bo->created && kref_read(&obj->refcount)) {
|
||||
virtio_gpu_detach_object_fenced(bo);
|
||||
|
||||
if (bo->sgt)
|
||||
dma_buf_unmap_attachment(attach, bo->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
bo->sgt = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
|
||||
.allow_peer2peer = true,
|
||||
.move_notify = virtgpu_dma_buf_move_notify
|
||||
};
|
||||
|
||||
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *buf)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct dma_buf_attachment *attach;
|
||||
struct virtio_gpu_object *bo;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
|
||||
obj = buf->priv;
|
||||
@@ -159,7 +310,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
return drm_gem_prime_import(dev, buf);
|
||||
if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
|
||||
return drm_gem_prime_import(dev, buf);
|
||||
|
||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||
if (!bo)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
obj = &bo->base.base;
|
||||
obj->funcs = &virtgpu_gem_dma_buf_funcs;
|
||||
drm_gem_private_object_init(dev, obj, buf->size);
|
||||
|
||||
attach = dma_buf_dynamic_attach(buf, dev->dev,
|
||||
&virtgpu_dma_buf_attach_ops, obj);
|
||||
if (IS_ERR(attach)) {
|
||||
kfree(bo);
|
||||
return ERR_CAST(attach);
|
||||
}
|
||||
|
||||
obj->import_attach = attach;
|
||||
get_dma_buf(buf);
|
||||
|
||||
ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
||||
|
||||
Reference in New Issue
Block a user