diff options
author | Ashutosh Dixit <ashutosh.dixit@intel.com> | 2024-06-17 18:46:05 -0700 |
---|---|---|
committer | Ashutosh Dixit <ashutosh.dixit@intel.com> | 2024-06-18 12:40:41 -0700 |
commit | 392bf22238ff88506f410c464ba0c7a84e9de471 (patch) | |
tree | 0ebc1a36b4f364e1934ebfcb7c7693be8f22f385 /drivers/gpu/drm/xe/xe_oa.c | |
parent | dd6b4718c3bab611588922ae8a7736c58eafcc93 (diff) |
drm/xe/oa/uapi: OA buffer mmap
Allow the OA buffer to be mmap'd to userspace. This is needed for the MMIO
trigger use case. Even otherwise, with whitelisted OA head/tail ptr
registers, userspace can receive/interpret OA data from the mmap'd buffer
without issuing read()'s on the OA stream fd.
v2: Remove unmap_mapping_range from xe_oa_release (Thomas H)
Use vm_flags_mod (Umesh)
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Suggested-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Reviewed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240618014609.3233427-14-ashutosh.dixit@intel.com
Diffstat (limited to 'drivers/gpu/drm/xe/xe_oa.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_oa.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 038caeb7c9e7..00654213cd93 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -824,6 +824,8 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream) return PTR_ERR(bo); stream->oa_buffer.bo = bo; + /* mmap implementation requires OA buffer to be in system memory */ + xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0); stream->oa_buffer.vaddr = bo->vmap.vaddr; return 0; } @@ -1125,6 +1127,49 @@ static int xe_oa_release(struct inode *inode, struct file *file) return 0; } +static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct xe_oa_stream *stream = file->private_data; + struct xe_bo *bo = stream->oa_buffer.bo; + unsigned long start = vma->vm_start; + int i, ret; + + if (xe_perf_stream_paranoid && !perfmon_capable()) { + drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n"); + return -EACCES; + } + + /* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */ + if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) { + drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n"); + return -EINVAL; + } + + /* + * Only support VM_READ, enforce MAP_PRIVATE by checking for + * VM_MAYSHARE, don't copy the vma on fork + */ + if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED | VM_MAYSHARE)) { + drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n"); + return -EINVAL; + } + vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY, + VM_MAYWRITE | VM_MAYEXEC); + + xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == + (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); + for (i = 0; i < bo->ttm.ttm->num_pages; i++) { + ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]), + PAGE_SIZE, vma->vm_page_prot); + if (ret) + break; + + start += PAGE_SIZE; + } + + return ret; +} + static const struct file_operations xe_oa_fops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -1132,6 +1177,7 @@ static const struct file_operations xe_oa_fops = { .poll = xe_oa_poll, .read = xe_oa_read, .unlocked_ioctl = xe_oa_ioctl, + .mmap = xe_oa_mmap, }; static bool engine_supports_mi_query(struct xe_hw_engine *hwe) |