mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 23:03:57 -04:00
drm/virtio: Add drm_panic support
Virtio gpu supports the drm_panic module, which displays a message to the screen when a kernel panic occurs. It is supported where it has vmapped shmem BO. Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com> Signed-off-by: Ryosuke Yasuoka <ryasuoka@redhat.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250206104300.416014-1-ryasuoka@redhat.com
This commit is contained in:
committed by
Dmitry Osipenko
parent
05345cea4f
commit
6f3d9d0dd3
@@ -86,6 +86,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
|
||||
vgdev->vbufs = NULL;
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
static struct virtio_gpu_vbuffer*
|
||||
virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
|
||||
{
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
||||
vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
|
||||
|
||||
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
|
||||
vbuf->size = size;
|
||||
vbuf->resp_cb = NULL;
|
||||
vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
|
||||
vbuf->resp_buf = (void *)vbuf->buf + size;
|
||||
return vbuf;
|
||||
}
|
||||
|
||||
static struct virtio_gpu_vbuffer*
|
||||
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
|
||||
int size, int resp_size, void *resp_buf,
|
||||
@@ -137,6 +153,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
|
||||
return (struct virtio_gpu_update_cursor *)vbuf->buf;
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer **vbuffer_p,
|
||||
int cmd_size)
|
||||
{
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
||||
vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
|
||||
*vbuffer_p = vbuf;
|
||||
return (struct virtio_gpu_command *)vbuf->buf;
|
||||
}
|
||||
|
||||
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
|
||||
virtio_gpu_resp_cb cb,
|
||||
struct virtio_gpu_vbuffer **vbuffer_p,
|
||||
@@ -311,6 +339,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer *vbuf,
|
||||
int elemcnt,
|
||||
struct scatterlist **sgs,
|
||||
int outcnt,
|
||||
int incnt)
|
||||
{
|
||||
struct virtqueue *vq = vgdev->ctrlq.vq;
|
||||
int ret;
|
||||
|
||||
if (vgdev->has_indirect)
|
||||
elemcnt = 1;
|
||||
|
||||
if (vq->num_free < elemcnt)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
|
||||
WARN_ON(ret);
|
||||
|
||||
vbuf->seqno = ++vgdev->ctrlq.seqno;
|
||||
trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
|
||||
|
||||
atomic_inc(&vgdev->pending_commands);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer *vbuf,
|
||||
struct virtio_gpu_fence *fence,
|
||||
@@ -368,6 +424,32 @@ again:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer *vbuf)
|
||||
{
|
||||
struct scatterlist *sgs[3], vcmd, vresp;
|
||||
int elemcnt = 0, outcnt = 0, incnt = 0;
|
||||
|
||||
/* set up vcmd */
|
||||
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
|
||||
elemcnt++;
|
||||
sgs[outcnt] = &vcmd;
|
||||
outcnt++;
|
||||
|
||||
/* set up vresp */
|
||||
if (vbuf->resp_size) {
|
||||
sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
|
||||
elemcnt++;
|
||||
sgs[outcnt + incnt] = &vresp;
|
||||
incnt++;
|
||||
}
|
||||
|
||||
return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
|
||||
elemcnt, sgs,
|
||||
outcnt, incnt);
|
||||
}
|
||||
|
||||
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer *vbuf,
|
||||
struct virtio_gpu_fence *fence)
|
||||
@@ -422,6 +504,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
bool notify;
|
||||
|
||||
if (!atomic_read(&vgdev->pending_commands))
|
||||
return;
|
||||
|
||||
atomic_set(&vgdev->pending_commands, 0);
|
||||
notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
|
||||
|
||||
if (notify)
|
||||
virtqueue_notify(vgdev->ctrlq.vq);
|
||||
}
|
||||
|
||||
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
bool notify;
|
||||
@@ -567,6 +664,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
|
||||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t width, uint32_t height)
|
||||
{
|
||||
struct virtio_gpu_resource_flush *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
||||
cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
vbuf->objs = NULL;
|
||||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
|
||||
cmd_p->resource_id = cpu_to_le32(resource_id);
|
||||
cmd_p->r.width = cpu_to_le32(width);
|
||||
cmd_p->r.height = cpu_to_le32(height);
|
||||
cmd_p->r.x = cpu_to_le32(x);
|
||||
cmd_p->r.y = cpu_to_le32(y);
|
||||
|
||||
virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id,
|
||||
uint32_t x, uint32_t y,
|
||||
@@ -591,6 +711,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
|
||||
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
|
||||
}
|
||||
|
||||
/* For drm_panic */
|
||||
int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||
uint64_t offset,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t x, uint32_t y,
|
||||
struct virtio_gpu_object_array *objs)
|
||||
{
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
|
||||
struct virtio_gpu_transfer_to_host_2d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
||||
|
||||
if (virtio_gpu_is_shmem(bo) && use_dma_api)
|
||||
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
||||
bo->base.sgt, DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
vbuf->objs = objs;
|
||||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->offset = cpu_to_le64(offset);
|
||||
cmd_p->r.width = cpu_to_le32(width);
|
||||
cmd_p->r.height = cpu_to_le32(height);
|
||||
cmd_p->r.x = cpu_to_le32(x);
|
||||
cmd_p->r.y = cpu_to_le32(y);
|
||||
|
||||
return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||
uint64_t offset,
|
||||
uint32_t width, uint32_t height,
|
||||
|
||||
Reference in New Issue
Block a user