summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_ioctl.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-11-04 10:55:11 +1000
committerDave Airlie <airlied@redhat.com>2020-11-04 11:49:10 +1000
commit1cd260a7905e3ba2e5dfa39b110ad6cf8f466f49 (patch)
treebfb701fdb0fcb32f8e6e53fb1692361c8fa33a6a /drivers/gpu/drm/virtio/virtgpu_ioctl.c
parent3cea11cd5e3b00d91caf0b4730194039b45c5891 (diff)
parent4dfec0d1d7b9970f36931de714b379dbeaed83f8 (diff)
Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.11: UAPI Changes: - doc: rules for EBUSY on non-blocking commits; requirements for fourcc modifiers; on parsing EDID - fbdev/sbuslib: Remove unused FBIOSCURSOR32 - fourcc: deprecate DRM_FORMAT_MOD_NONE - virtio: Support blob resources for memory allocations; Expose host-visible and cross-device features Cross-subsystem Changes: - devicetree: Add vendor Prefix for Yes Optoelectronics, Shanghai Top Display Optoelectronics - dma-buf: Add struct dma_buf_map that stores DMA pointer and I/O-memory flag; dma_buf_vmap()/vunmap() return address in dma_buf_map; Use struct_size() macro Core Changes: - atomic: pass full state to CRTC atomic enable/disable; warn for EBUSY during non-blocking commits - dp: Prepare for DP 2.0 DPCD - dp_mst: Receive extended DPCD caps - dma-buf: Documentation - doc: Format modifiers; dma-buf-map; Cleanups - fbdev: Don't use compat_alloc_user_space(); mark as orphaned - fb-helper: Take lock in drm_fb_helper_restore_work_fb() - gem: Convert implementation and drivers to GEM object functions, remove GEM callbacks from struct drm_driver (expect gem_prime_mmap) - panel: Cleanups - pci: Add legacy infix to drm_irq_by_busid() - sched: Avoid infinite waits in drm_sched_entity_destroy() - switcheroo: Cleanups - ttm: Remove AGP support; Don't modify caching during swapout; Major refactoring of the implementation and API that affects all depending drivers; Add ttm_bo_wait_ctx(); Add ttm_bo_pin()/unpin() in favor of TTM_PL_FLAG_NO_EVICT; Remove ttm_bo_create(); Remove fault_reserve_notify() callback; Push move() implementation into drivers; Remove TTM_PAGE_FLAG_WRITE; Replace caching flags with init-time cache setting; Push ttm_tt_bind() into drivers; Replace move_notify() with delete_mem_notify(); No overlapping memcpy(); no more ttm_set_populated() - vram-helper: Fix BO top-down placement; TTM-related changes; Init GEM object functions with defaults; Default placement in system memory; Cleanups Driver Changes: - amdgpu: Use GEM object functions - armada: Use GEM object functions - aspeed: Configure output via sysfs; Init struct drm_driver with - ast: Reload LUT after FB format changes - bridge: Add driver and DT bindings for anx7625; Cleanups - bridge/dw-hdmi: Constify ops - bridge/ti-sn65dsi86: Add retries for link training - bridge/lvds-codec: Add support for regulator - bridge/tc358768: Restore connector support DRM_GEM_CMA_DRIVEROPS; Cleanups - display/ti,j721e-dss: Add DT properies assigned-clocks, assigned-clocks-parent and dma-coherent - display/ti,am65s-dss: Add DT properies assigned-clocks, assigned-clocks-parent and dma-coherent - etnaviv: Use GEM object functions - exynos: Use GEM object functions - fbdev: Cleanups and compiler fixes throughout framebuffer drivers - fbdev/cirrusfb: Avoid division by 0 - gma500: Use GEM object functions; Fix double-free of connector; Cleanups - hisilicon/hibmc: I2C-based DDC support; Use to_hibmc_drm_device(); Cleanups - i915: Use GEM object functions - imx/dcss: Init driver with DRM_GEM_CMA_DRIVER_OPS; Cleanups - ingenic: Reset pixel clock when parent clock changes; support reserved memory; Alloc F0 and F1 DMA channels at once; Support different pixel formats; Revert support for cached mmap buffers on F0/F1; support 30-bit/24-bit/8-bit-palette modes - komeda: Use DEFINE_SHOW_ATTRIBUTE - mcde: Detect platform_get_irq() errors - mediatek: Use GEM object functions - msm: Use GEM object functions - nouveau: Cleanups; TTM-related changes; Use GEM object functions - omapdrm: Use GEM object functions - panel: Add driver and DT bindings for Novatak nt36672a; Add driver and DT bindings for YTC700TLAG-05-201C; Add driver and DT bindings for TDO TL070WSH30; Cleanups - panel/mantix: Fix reset; Fix deref of NULL pointer in mantix_get_modes() - panel/otm8009a: Allow non-continuous dsi clock; Cleanups - panel/rm68200: Allow non-continuous dsi clock; Fix mode to 50 FPS - panfrost: Fix job timeout handling; Cleanups - pl111: Use GEM object functions - qxl: Cleanups; TTM-related changes; Pin new BOs with ttm_bo_init_reserved() - radeon: Cleanups; TTM-related changes; Use GEM object functions - rockchip: Use GEM object functions - shmobile: Cleanups - tegra: Use GEM object functions - tidss: Set drm_plane_helper_funcs.prepare_fb - tilcdc: Don't keep vblank interrupt enabled all the time - tve200: Detect platform_get_irq() errors - vc4: Use GEM object functions; Only register components once DSI is attached; Add Maxime as maintainer - vgem: Use GEM object functions - via: Simplify critical section in via_mem_alloc() - virtgpu: Use GEM object functions - virtio: Implement blob resources, host-visible and cross-device features; Support mapping of host-allocated resources; Use UUID APi; Cleanups - vkms: Use GEM object functions; Switch to SHMEM - vmwgfx: TTM-related changes; Inline ttm_bo_swapout_all() - xen: Use GEM object functions - xlnx: Use GEM object functions Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201027100936.GA4858@linux-uq9g
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_ioctl.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c185
1 files changed, 180 insertions, 5 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c8da7adc6b30..5417f365d1a3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -34,6 +34,10 @@
#include "virtgpu_drv.h"
+#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
+ VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
+ VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -208,11 +212,20 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
switch (param->param) {
case VIRTGPU_PARAM_3D_FEATURES:
- value = vgdev->has_virgl_3d == true ? 1 : 0;
+ value = vgdev->has_virgl_3d ? 1 : 0;
break;
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
value = 1;
break;
+ case VIRTGPU_PARAM_RESOURCE_BLOB:
+ value = vgdev->has_resource_blob ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_HOST_VISIBLE:
+ value = vgdev->has_host_visible ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_CROSS_DEVICE:
+ value = vgdev->has_resource_assign_uuid ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -301,6 +314,9 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
+ if (qobj->host3d_blob || qobj->guest_blob)
+ ri->blob_mem = qobj->blob_mem;
+
drm_gem_object_put(gobj);
return 0;
}
@@ -312,6 +328,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
@@ -325,6 +342,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -334,9 +362,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
ret = -ENOMEM;
goto err_unlock;
}
+
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, vfpriv->ctx_id, offset, args->level,
- &args->box, objs, fence);
+ (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
+ args->layer_stride, &args->box, objs, fence);
dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
@@ -354,6 +383,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
@@ -363,6 +393,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
@@ -370,6 +406,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
objs, NULL);
} else {
virtio_gpu_create_context(dev, file);
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -381,8 +423,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
- vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &args->box, objs, fence);
+ vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
+ args->stride, args->layer_stride, &args->box, objs,
+ fence);
dma_fence_put(&fence->f);
}
virtio_gpu_notify(vgdev);
@@ -491,6 +534,134 @@ copy_exit:
return 0;
}
+static int verify_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv,
+ struct virtio_gpu_object_params *params,
+ struct drm_virtgpu_resource_create_blob *rc_blob,
+ bool *guest_blob, bool *host3d_blob)
+{
+ if (!vgdev->has_resource_blob)
+ return -EINVAL;
+
+ if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
+ !rc_blob->blob_flags)
+ return -EINVAL;
+
+ if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+ if (!vgdev->has_resource_assign_uuid)
+ return -EINVAL;
+ }
+
+ switch (rc_blob->blob_mem) {
+ case VIRTGPU_BLOB_MEM_GUEST:
+ *guest_blob = true;
+ break;
+ case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
+ *guest_blob = true;
+ fallthrough;
+ case VIRTGPU_BLOB_MEM_HOST3D:
+ *host3d_blob = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*host3d_blob) {
+ if (!vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Must be dword aligned. */
+ if (rc_blob->cmd_size % 4 != 0)
+ return -EINVAL;
+
+ params->ctx_id = vfpriv->ctx_id;
+ params->blob_id = rc_blob->blob_id;
+ } else {
+ if (rc_blob->blob_id != 0)
+ return -EINVAL;
+
+ if (rc_blob->cmd_size != 0)
+ return -EINVAL;
+ }
+
+ params->blob_mem = rc_blob->blob_mem;
+ params->size = rc_blob->size;
+ params->blob = true;
+ params->blob_flags = rc_blob->blob_flags;
+ return 0;
+}
+
+static int virtio_gpu_resource_create_blob(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t handle = 0;
+ bool guest_blob = false;
+ bool host3d_blob = false;
+ struct drm_gem_object *obj;
+ struct virtio_gpu_object *bo;
+ struct virtio_gpu_object_params params = { 0 };
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_resource_create_blob *rc_blob = data;
+
+ if (verify_blob(vgdev, vfpriv, &params, rc_blob,
+ &guest_blob, &host3d_blob))
+ return -EINVAL;
+
+ if (vgdev->has_virgl_3d)
+ virtio_gpu_create_context(dev, file);
+
+ if (rc_blob->cmd_size) {
+ void *buf;
+
+ buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
+ rc_blob->cmd_size);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
+ vfpriv->ctx_id, NULL, NULL);
+ }
+
+ if (guest_blob)
+ ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
+ else if (!guest_blob && host3d_blob)
+ ret = virtio_gpu_vram_create(vgdev, &params, &bo);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+
+ bo->guest_blob = guest_blob;
+ bo->host3d_blob = host3d_blob;
+ bo->blob_mem = rc_blob->blob_mem;
+ bo->blob_flags = rc_blob->blob_flags;
+
+ obj = &bo->base.base;
+ if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+ ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+ if (ret) {
+ drm_gem_object_release(obj);
+ return ret;
+ }
+ }
+
+ ret = drm_gem_handle_create(file, obj, &handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ return ret;
+ }
+ drm_gem_object_put(obj);
+
+ rc_blob->res_handle = bo->hw_res_handle;
+ rc_blob->bo_handle = handle;
+
+ return 0;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -523,4 +694,8 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
+ virtio_gpu_resource_create_blob,
+ DRM_RENDER_ALLOW),
};