diff options
author | Francois Dugast <francois.dugast@intel.com> | 2023-11-22 14:38:24 +0000 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:45:16 -0500 |
commit | 4bc9dd98e0a7e8a14386fc8341379ee09e594987 (patch) | |
tree | 000c2146edb15ac5915976798d695a6ea55455bd /drivers/gpu/drm/xe/xe_query.c | |
parent | 4e03b584143e18eabd091061a1716515da928dcb (diff) |
drm/xe/uapi: Align on a common way to return arrays (memory regions)
The uAPI provides queries which return arrays of elements. As of now
the format used in the struct is different depending on which element
is queried. Fix this for memory regions by applying the pattern below:
struct drm_xe_query_Xs {
__u32 num_Xs;
struct drm_xe_X Xs[];
...
}
This removes "query" in the name of struct drm_xe_query_mem_region
as it is not returned from the query IOCTL. There is no functional
change.
v2: Only rename drm_xe_query_mem_region to drm_xe_mem_region
(José Roberto de Souza)
v3: Rename usage to mem_regions in xe_query.c (José Roberto de Souza)
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_query.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_query.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 0cbfeaeb1330..34474f8b97f6 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -240,14 +240,14 @@ static size_t calc_mem_regions_size(struct xe_device *xe) if (ttm_manager_type(&xe->ttm, i)) num_managers++; - return offsetof(struct drm_xe_query_mem_regions, regions[num_managers]); + return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]); } static int query_mem_regions(struct xe_device *xe, - struct drm_xe_device_query *query) + struct drm_xe_device_query *query) { size_t size = calc_mem_regions_size(xe); - struct drm_xe_query_mem_regions *usage; + struct drm_xe_query_mem_regions *mem_regions; struct drm_xe_query_mem_regions __user *query_ptr = u64_to_user_ptr(query->data); struct ttm_resource_manager *man; @@ -260,50 +260,52 @@ static int query_mem_regions(struct xe_device *xe, return -EINVAL; } - usage = kzalloc(size, GFP_KERNEL); - if (XE_IOCTL_DBG(xe, !usage)) + mem_regions = kzalloc(size, GFP_KERNEL); + if (XE_IOCTL_DBG(xe, !mem_regions)) return -ENOMEM; man = ttm_manager_type(&xe->ttm, XE_PL_TT); - usage->regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; - usage->regions[0].instance = 0; - usage->regions[0].min_page_size = PAGE_SIZE; - usage->regions[0].total_size = man->size << PAGE_SHIFT; + mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; + mem_regions->mem_regions[0].instance = 0; + mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; + mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; if (perfmon_capable()) - usage->regions[0].used = ttm_resource_manager_usage(man); - usage->num_regions = 1; + mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); + mem_regions->num_mem_regions = 1; for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { man = ttm_manager_type(&xe->ttm, i); if (man) { - usage->regions[usage->num_regions].mem_class = + mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class = DRM_XE_MEM_REGION_CLASS_VRAM; - usage->regions[usage->num_regions].instance = - usage->num_regions; - usage->regions[usage->num_regions].min_page_size = + mem_regions->mem_regions[mem_regions->num_mem_regions].instance = + mem_regions->num_mem_regions; + mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : PAGE_SIZE; - usage->regions[usage->num_regions].total_size = + mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = man->size; if (perfmon_capable()) { xe_ttm_vram_get_used(man, - &usage->regions[usage->num_regions].used, - &usage->regions[usage->num_regions].cpu_visible_used); + &mem_regions->mem_regions + [mem_regions->num_mem_regions].used, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].cpu_visible_used); } - usage->regions[usage->num_regions].cpu_visible_size = + mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = xe_ttm_vram_get_cpu_visible_size(man); - usage->num_regions++; + mem_regions->num_mem_regions++; } } - if (!copy_to_user(query_ptr, usage, size)) + if (!copy_to_user(query_ptr, mem_regions, size)) ret = 0; else ret = -ENOSPC; - kfree(usage); + kfree(mem_regions); return ret; } |