diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-25 11:48:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-25 11:48:26 -0800 |
commit | 4971f090aa7f6ce5daa094ce4334f6618f93a7eb (patch) | |
tree | 45d75782b7dedbec76a3ab82d2769f7707668071 /drivers/gpu/drm/virtio/virtgpu_vq.c | |
parent | c76cd634eb5bfd497617ea224a54a03b545c8c4d (diff) | |
parent | 2a3c83f5fe0770d13bbb71b23674886ff4111f44 (diff) |
Merge tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"Core:
- shared fencing staging removal
- drop transactional atomic helpers and move helpers to new location
- DP/MST atomic cleanup
- Leasing cleanups and drop EXPORT_SYMBOL
- Convert drivers to atomic helpers and generic fbdev.
- removed deprecated obj_ref/unref in favour of get/put
- Improve dumb callback documentation
- MODESET_LOCK_BEGIN/END helpers
panels:
- CDTech panels, Banana Pi Panel, DLC1010GIG,
- Olimex LCD-O-LinuXino, Samsung S6D16D0, Truly NT35597 WQXGA,
- Himax HX8357D, simulated RTSM AEMv8.
- GPD Win2 panel
- AUO G101EVN010
vgem:
- render node support
ttm:
- move global init out of drivers
- fix LRU handling for ghost objects
- Support for simultaneous submissions to multiple engines
scheduler:
- timeout/fault handling changes to help GPU recovery
- helpers for hw with preemption support
i915:
- Scaler/Watermark fixes
- DP MST + powerwell fixes
- PSR fixes
- Break long get/put shmemfs pages
- Icelake fixes
- Icelake DSI video mode enablement
- Engine workaround improvements
amdgpu:
- freesync support
- GPU reset enabled on CI, VI, SOC15 dGPUs
- ABM support in DC
- KFD support for vega12/polaris12
- SDMA paging queue on vega
- More amdkfd code sharing
- DCC scanout on GFX9
- DC kerneldoc
- Updated SMU firmware for GFX8 chips
- XGMI PSP + hive reset support
- GPU reset
- DC trace support
- Powerplay updates for newer Polaris
- Cursor plane update fast path
- kfd dma-buf support
virtio-gpu:
- add EDID support
vmwgfx:
- pageflip with damage support
nouveau:
- Initial Turing TU104/TU106 modesetting support
msm:
- a2xx gpu support for apq8060 and imx5
- a2xx gpummu support
- mdp4 display support for apq8060
- DPU fixes and cleanups
- enhanced profiling support
- debug object naming interface
- get_iova/page pinning decoupling
tegra:
- Tegra194 host1x, VIC and display support enabled
- Audio over HDMI for Tegra186 and Tegra194
exynos:
- DMA/IOMMU refactoring
- plane alpha + blend mode support
- Color format fixes for mixer driver
rcar-du:
- R8A7744 and R8A77470 support
- R8A77965 LVDS support
imx:
- fbdev emulation fix
- multi-tiled scalling fixes
- SPDX identifiers
rockchip
- dw_hdmi support
- dw-mipi-dsi + dual dsi support
- mailbox read size fix
qxl:
- fix cursor pinning
vc4:
- YUV support (scaling + cursor)
v3d:
- enable TFU (Texture Formatting Unit)
mali-dp:
- add support for linear tiled formats
sun4i:
- Display Engine 3 support
- H6 DE3 mixer 0 support
- H6 display engine support
- dw-hdmi support
- H6 HDMI phy support
- implicit fence waiting
- BGRX8888 support
meson:
- Overlay plane support
- implicit fence waiting
- HDMI 1.4 4k modes
bridge:
- i2c fixes for sii902x"
* tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm: (1403 commits)
drm/amd/display: Add fast path for cursor plane updates
drm/amdgpu: Enable GPU recovery by default for CI
drm/amd/display: Fix duplicating scaling/underscan connector state
drm/amd/display: Fix unintialized max_bpc state values
Revert "drm/amd/display: Set RMX_ASPECT as default"
drm/amdgpu: Fix stub function name
drm/msm/dpu: Fix clock issue after bind failure
drm/msm/dpu: Clean up dpu_media_info.h static inline functions
drm/msm/dpu: Further cleanups for static inline functions
drm/msm/dpu: Cleanup the debugfs functions
drm/msm/dpu: Remove dpu_irq and unused functions
drm/msm: Make irq_postinstall optional
drm/msm/dpu: Cleanup callers of dpu_hw_blk_init
drm/msm/dpu: Remove unused functions
drm/msm/dpu: Remove dpu_crtc_is_enabled()
drm/msm/dpu: Remove dpu_crtc_get_mixer_height
drm/msm/dpu: Remove dpu_dbg
drm/msm: dpu: Remove crtc_lock
drm/msm: dpu: Remove vblank_requested flag from dpu_crtc
drm/msm: dpu: Separate crtc assignment from vblank enable
...
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 129 |
1 files changed, 89 insertions, 40 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 4e2e037aed34..e27c4aedb809 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -38,26 +38,6 @@ + MAX_INLINE_CMD_SIZE \ + MAX_INLINE_RESP_SIZE) -void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, - uint32_t *resid) -{ - int handle; - - idr_preload(GFP_KERNEL); - spin_lock(&vgdev->resource_idr_lock); - handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT); - spin_unlock(&vgdev->resource_idr_lock); - idr_preload_end(); - *resid = handle; -} - -void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) -{ - spin_lock(&vgdev->resource_idr_lock); - idr_remove(&vgdev->resource_idr, id); - spin_unlock(&vgdev->resource_idr_lock); -} - void virtio_gpu_ctrl_ack(struct virtqueue *vq) { struct drm_device *dev = vq->vdev->priv; @@ -98,10 +78,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, { struct virtio_gpu_vbuffer *vbuf; - vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL); + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); if (!vbuf) return ERR_PTR(-ENOMEM); - memset(vbuf, 0, VBUFFER_SIZE); BUG_ON(size > MAX_INLINE_CMD_SIZE); vbuf->buf = (void *)vbuf + sizeof(*vbuf); @@ -319,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_ctrl_hdr *hdr, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtqueue *vq = vgdev->ctrlq.vq; int rc; @@ -388,7 +367,7 @@ retry: /* create a basic resource */ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, - uint32_t resource_id, + struct virtio_gpu_object *bo, uint32_t format, uint32_t width, uint32_t height) @@ -400,12 +379,13 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, memset(cmd_p, 0, sizeof(*cmd_p)); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->format = cpu_to_le32(format); cmd_p->width = cpu_to_le32(width); cmd_p->height = cpu_to_le32(height); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + bo->created = true; } void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, @@ -425,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, uint32_t resource_id, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_detach_backing *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -487,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, __le32 width, __le32 height, __le32 x, __le32 y, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -517,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, uint32_t resource_id, struct virtio_gpu_mem_entry *ents, uint32_t nents, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_attach_backing *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -604,6 +584,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, wake_up(&vgdev->resp_wq); } +static int virtio_get_edid_block(void *data, u8 *buf, + unsigned int block, size_t len) +{ + struct virtio_gpu_resp_edid *resp = data; + size_t start = block * EDID_LENGTH; + + if (start + len > le32_to_cpu(resp->size)) + return -1; + memcpy(buf, resp->edid + start, len); + return 0; +} + +static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_cmd_get_edid *cmd = + (struct virtio_gpu_cmd_get_edid *)vbuf->buf; + struct virtio_gpu_resp_edid *resp = + (struct virtio_gpu_resp_edid *)vbuf->resp_buf; + uint32_t scanout = le32_to_cpu(cmd->scanout); + struct virtio_gpu_output *output; + struct edid *new_edid, *old_edid; + + if (scanout >= vgdev->num_scanouts) + return; + output = vgdev->outputs + scanout; + + new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); + + spin_lock(&vgdev->display_info_lock); + old_edid = output->edid; + output->edid = new_edid; + drm_connector_update_edid_property(&output->conn, output->edid); + spin_unlock(&vgdev->display_info_lock); + + kfree(old_edid); + wake_up(&vgdev->resp_wq); +} + int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) { struct virtio_gpu_ctrl_hdr *cmd_p; @@ -706,6 +725,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, return 0; } +int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) +{ + struct virtio_gpu_cmd_get_edid *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + void *resp_buf; + int scanout; + + if (WARN_ON(!vgdev->has_edid)) + return -EINVAL; + + for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), + GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, + sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), + resp_buf); + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); + cmd_p->scanout = cpu_to_le32(scanout); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + } + + return 0; +} + void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, uint32_t nlen, const char *name) { @@ -772,8 +819,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_resource_create_3d *rc_3d, - struct virtio_gpu_fence **fence) + struct virtio_gpu_object *bo, + struct virtio_gpu_resource_create_3d *rc_3d) { struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -785,7 +832,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); cmd_p->hdr.flags = 0; - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + bo->created = true; } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, @@ -793,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -821,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -841,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence **fence) + uint32_t ctx_id, struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -861,14 +909,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, - uint32_t resource_id, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); struct virtio_gpu_mem_entry *ents; struct scatterlist *sg; int si, nents; + if (!obj->created) + return 0; + if (!obj->pages) { int ret; @@ -902,10 +952,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, ents[si].padding = 0; } - virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id, + virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, ents, nents, fence); - obj->hw_res_handle = resource_id; return 0; } @@ -913,11 +962,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj) { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); - struct virtio_gpu_fence *fence; if (use_dma_api && obj->mapped) { + struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); /* detach backing and wait for the host process it ... */ - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); + virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); dma_fence_wait(&fence->f, true); dma_fence_put(&fence->f); |