summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_vq.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-11-04 10:55:11 +1000
committerDave Airlie <airlied@redhat.com>2020-11-04 11:49:10 +1000
commit1cd260a7905e3ba2e5dfa39b110ad6cf8f466f49 (patch)
treebfb701fdb0fcb32f8e6e53fb1692361c8fa33a6a /drivers/gpu/drm/virtio/virtgpu_vq.c
parent3cea11cd5e3b00d91caf0b4730194039b45c5891 (diff)
parent4dfec0d1d7b9970f36931de714b379dbeaed83f8 (diff)
Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.11: UAPI Changes: - doc: rules for EBUSY on non-blocking commits; requirements for fourcc modifiers; on parsing EDID - fbdev/sbuslib: Remove unused FBIOSCURSOR32 - fourcc: deprecate DRM_FORMAT_MOD_NONE - virtio: Support blob resources for memory allocations; Expose host-visible and cross-device features Cross-subsystem Changes: - devicetree: Add vendor Prefix for Yes Optoelectronics, Shanghai Top Display Optoelectronics - dma-buf: Add struct dma_buf_map that stores DMA pointer and I/O-memory flag; dma_buf_vmap()/vunmap() return address in dma_buf_map; Use struct_size() macro Core Changes: - atomic: pass full state to CRTC atomic enable/disable; warn for EBUSY during non-blocking commits - dp: Prepare for DP 2.0 DPCD - dp_mst: Receive extended DPCD caps - dma-buf: Documentation - doc: Format modifiers; dma-buf-map; Cleanups - fbdev: Don't use compat_alloc_user_space(); mark as orphaned - fb-helper: Take lock in drm_fb_helper_restore_work_fb() - gem: Convert implementation and drivers to GEM object functions, remove GEM callbacks from struct drm_driver (expect gem_prime_mmap) - panel: Cleanups - pci: Add legacy infix to drm_irq_by_busid() - sched: Avoid infinite waits in drm_sched_entity_destroy() - switcheroo: Cleanups - ttm: Remove AGP support; Don't modify caching during swapout; Major refactoring of the implementation and API that affects all depending drivers; Add ttm_bo_wait_ctx(); Add ttm_bo_pin()/unpin() in favor of TTM_PL_FLAG_NO_EVICT; Remove ttm_bo_create(); Remove fault_reserve_notify() callback; Push move() implementation into drivers; Remove TTM_PAGE_FLAG_WRITE; Replace caching flags with init-time cache setting; Push ttm_tt_bind() into drivers; Replace move_notify() with delete_mem_notify(); No overlapping memcpy(); no more ttm_set_populated() - vram-helper: Fix BO top-down placement; TTM-related changes; Init GEM object functions with defaults; Default placement in system memory; Cleanups Driver Changes: - amdgpu: Use GEM object functions - armada: Use GEM object functions - aspeed: Configure output via sysfs; Init struct drm_driver with - ast: Reload LUT after FB format changes - bridge: Add driver and DT bindings for anx7625; Cleanups - bridge/dw-hdmi: Constify ops - bridge/ti-sn65dsi86: Add retries for link training - bridge/lvds-codec: Add support for regulator - bridge/tc358768: Restore connector support DRM_GEM_CMA_DRIVEROPS; Cleanups - display/ti,j721e-dss: Add DT properies assigned-clocks, assigned-clocks-parent and dma-coherent - display/ti,am65s-dss: Add DT properies assigned-clocks, assigned-clocks-parent and dma-coherent - etnaviv: Use GEM object functions - exynos: Use GEM object functions - fbdev: Cleanups and compiler fixes throughout framebuffer drivers - fbdev/cirrusfb: Avoid division by 0 - gma500: Use GEM object functions; Fix double-free of connector; Cleanups - hisilicon/hibmc: I2C-based DDC support; Use to_hibmc_drm_device(); Cleanups - i915: Use GEM object functions - imx/dcss: Init driver with DRM_GEM_CMA_DRIVER_OPS; Cleanups - ingenic: Reset pixel clock when parent clock changes; support reserved memory; Alloc F0 and F1 DMA channels at once; Support different pixel formats; Revert support for cached mmap buffers on F0/F1; support 30-bit/24-bit/8-bit-palette modes - komeda: Use DEFINE_SHOW_ATTRIBUTE - mcde: Detect platform_get_irq() errors - mediatek: Use GEM object functions - msm: Use GEM object functions - nouveau: Cleanups; TTM-related changes; Use GEM object functions - omapdrm: Use GEM object functions - panel: Add driver and DT bindings for Novatak nt36672a; Add driver and DT bindings for YTC700TLAG-05-201C; Add driver and DT bindings for TDO TL070WSH30; Cleanups - panel/mantix: Fix reset; Fix deref of NULL pointer in mantix_get_modes() - panel/otm8009a: Allow non-continuous dsi clock; Cleanups - panel/rm68200: Allow non-continuous dsi clock; Fix mode to 50 FPS - panfrost: Fix job timeout handling; Cleanups - pl111: Use GEM object functions - qxl: Cleanups; TTM-related changes; Pin new BOs with ttm_bo_init_reserved() - radeon: Cleanups; TTM-related changes; Use GEM object functions - rockchip: Use GEM object functions - shmobile: Cleanups - tegra: Use GEM object functions - tidss: Set drm_plane_helper_funcs.prepare_fb - tilcdc: Don't keep vblank interrupt enabled all the time - tve200: Detect platform_get_irq() errors - vc4: Use GEM object functions; Only register components once DSI is attached; Add Maxime as maintainer - vgem: Use GEM object functions - via: Simplify critical section in via_mem_alloc() - virtgpu: Use GEM object functions - virtio: Implement blob resources, host-visible and cross-device features; Support mapping of host-allocated resources; Use UUID APi; Cleanups - vkms: Use GEM object functions; Switch to SHMEM - vmwgfx: TTM-related changes; Inline ttm_bo_swapout_all() - xen: Use GEM object functions - xlnx: Use GEM object functions Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201027100936.GA4858@linux-uq9g
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c156
1 files changed, 148 insertions, 8 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 07945ca238e2..857f730747b6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1016,6 +1016,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1024,11 +1026,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
shmem->pages, DMA_TO_DEVICE);
+ }
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1041,6 +1044,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
@@ -1048,6 +1053,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1067,6 +1074,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
@@ -1125,14 +1134,14 @@ static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
uint32_t resp_type = le32_to_cpu(resp->hdr.type);
spin_lock(&vgdev->resource_export_lock);
- WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+ WARN_ON(obj->uuid_state != STATE_INITIALIZING);
if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
- obj->uuid_state == UUID_INITIALIZING) {
- memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
- obj->uuid_state = UUID_INITIALIZED;
+ obj->uuid_state == STATE_INITIALIZING) {
+ import_uuid(&obj->uuid, resp->uuid);
+ obj->uuid_state = STATE_OK;
} else {
- obj->uuid_state = UUID_INITIALIZATION_FAILED;
+ obj->uuid_state = STATE_ERR;
}
spin_unlock(&vgdev->resource_export_lock);
@@ -1151,7 +1160,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
if (!resp_buf) {
spin_lock(&vgdev->resource_export_lock);
- bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ bo->uuid_state = STATE_ERR;
spin_unlock(&vgdev->resource_export_lock);
virtio_gpu_array_put_free(objs);
return -ENOMEM;
@@ -1169,3 +1178,134 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
+
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_map_info *resp =
+ (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->host_visible_lock);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+ vram->map_info = resp->map_info;
+ vram->map_state = STATE_OK;
+ } else {
+ vram->map_state = STATE_ERR;
+ }
+
+ spin_unlock(&vgdev->host_visible_lock);
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs, uint64_t offset)
+{
+ struct virtio_gpu_resource_map_blob *cmd_p;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_map_info *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ vbuf->objs = objs;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_resource_unmap_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_mem_entry *ents,
+ uint32_t nents)
+{
+ struct virtio_gpu_resource_create_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+ cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
+ cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
+ cmd_p->blob_id = cpu_to_le64(params->blob_id);
+ cmd_p->size = cpu_to_le64(params->size);
+ cmd_p->nr_entries = cpu_to_le32(nents);
+
+ vbuf->data_buf = ents;
+ vbuf->data_size = sizeof(*ents) * nents;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
+}
+
+void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id,
+ struct virtio_gpu_object *bo,
+ struct drm_framebuffer *fb,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ uint32_t i;
+ struct virtio_gpu_set_scanout_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ uint32_t format = virtio_gpu_translate_format(fb->format->format);
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->scanout_id = cpu_to_le32(scanout_id);
+
+ cmd_p->format = cpu_to_le32(format);
+ cmd_p->width = cpu_to_le32(fb->width);
+ cmd_p->height = cpu_to_le32(fb->height);
+
+ for (i = 0; i < 4; i++) {
+ cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
+ cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
+ }
+
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}