diff options
author | Maxime Ripard <maxime@cerno.tech> | 2021-04-26 14:03:09 +0200 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2021-04-26 14:03:09 +0200 |
commit | 355b60296143a090039211c5f0e1463f84aab65a (patch) | |
tree | b74d4ef2aea66252ea9cf77c847de6c6e72a02b7 /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |
parent | 91185d55b32e7e377f15fb46a62b216f8d3038d4 (diff) | |
parent | a1a1ca70deb3ec600eeabb21de7f3f48aaae5695 (diff) |
Merge drm/drm-next into drm-misc-next
Christian needs some patches from drm/next
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 135 |
1 files changed, 85 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 485f249d063a..fe4e5880509d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -56,6 +56,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo_user *ubo; amdgpu_bo_kunmap(bo); @@ -70,7 +71,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) } amdgpu_bo_unref(&bo->parent); - kfree(bo->metadata); + if (bo->tbo.type == ttm_bo_type_device) { + ubo = to_amdgpu_bo_user(bo); + kfree(ubo->metadata); + } + kfree(bo); } @@ -224,6 +229,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); if (!*bo_ptr) { r = amdgpu_bo_create(adev, &bp, bo_ptr); @@ -519,9 +525,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, if (!amdgpu_bo_validate_size(adev, size, bp->domain)) return -ENOMEM; - *bo_ptr = NULL; + BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo)); - bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); + *bo_ptr = NULL; + bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL); if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); @@ -611,6 +618,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_SHADOW; bp.type = ttm_bo_type_kernel; bp.resv = bo->tbo.base.resv; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); if (!r) { @@ -645,6 +653,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, int r; bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; + r = amdgpu_bo_do_create(adev, bp, bo_ptr); if (r) return r; @@ -667,6 +676,34 @@ int amdgpu_bo_create(struct amdgpu_device *adev, } /** + * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object + * @adev: amdgpu device object + * @bp: parameters to be used for the buffer object + * @ubo_ptr: pointer to the buffer object pointer + * + * Create a BO to be used by user application; + * + * Returns: + * 0 for success or a negative error code on failure. + */ + +int amdgpu_bo_create_user(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_user **ubo_ptr) +{ + struct amdgpu_bo *bo_ptr; + int r; + + bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; + bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); + r = amdgpu_bo_do_create(adev, bp, &bo_ptr); + if (r) + return r; + + *ubo_ptr = to_amdgpu_bo_user(bo_ptr); + return r; +} +/** * amdgpu_bo_validate - validate an &amdgpu_bo buffer object * @bo: pointer to the buffer object * @@ -1008,13 +1045,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { struct ttm_resource_manager *man; - /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ -#ifndef CONFIG_HIBERNATION - if (adev->flags & AMD_IS_APU) { - /* Useless to evict on IGP chips */ + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) { + /* No need to evict vram on APUs for suspend to ram */ return 0; } -#endif man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return ttm_resource_manager_evict_all(&adev->mman.bdev, man); @@ -1045,13 +1079,17 @@ static const char *amdgpu_vram_names[] = { */ int amdgpu_bo_init(struct amdgpu_device *adev) { - /* reserve PAT memory space to WC for VRAM */ - arch_io_reserve_memtype_wc(adev->gmc.aper_base, - adev->gmc.aper_size); + /* On A+A platform, VRAM can be mapped as WB */ + if (!adev->gmc.xgmi.connected_to_cpu) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(adev->gmc.aper_base, + adev->gmc.aper_size); + + /* Add an MTRR for the VRAM */ + adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, + adev->gmc.aper_size); + } - /* Add an MTRR for the VRAM */ - adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, - adev->gmc.aper_size); DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", adev->gmc.mc_vram_size >> 20, (unsigned long long)adev->gmc.aper_size >> 20); @@ -1069,27 +1107,10 @@ int amdgpu_bo_init(struct amdgpu_device *adev) void amdgpu_bo_fini(struct amdgpu_device *adev) { amdgpu_ttm_fini(adev); - arch_phys_wc_del(adev->gmc.vram_mtrr); - arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); -} - -/** - * amdgpu_bo_fbdev_mmap - mmap fbdev memory - * @bo: &amdgpu_bo buffer object - * @vma: vma as input from the fbdev mmap method - * - * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, - struct vm_area_struct *vma) -{ - if (vma->vm_pgoff != 0) - return -EACCES; - - return ttm_bo_mmap_obj(vma, &bo->tbo); + if (!adev->gmc.xgmi.connected_to_cpu) { + arch_phys_wc_del(adev->gmc.vram_mtrr); + arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + } } /** @@ -1106,12 +1127,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_bo_user *ubo; + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); if (adev->family <= AMDGPU_FAMILY_CZ && AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) return -EINVAL; - bo->tiling_flags = tiling_flags; + ubo = to_amdgpu_bo_user(bo); + ubo->tiling_flags = tiling_flags; return 0; } @@ -1125,10 +1149,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) */ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) { + struct amdgpu_bo_user *ubo; + + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); dma_resv_assert_held(bo->tbo.base.resv); + ubo = to_amdgpu_bo_user(bo); if (tiling_flags) - *tiling_flags = bo->tiling_flags; + *tiling_flags = ubo->tiling_flags; } /** @@ -1147,13 +1175,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, uint32_t metadata_size, uint64_t flags) { + struct amdgpu_bo_user *ubo; void *buffer; + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); + ubo = to_amdgpu_bo_user(bo); if (!metadata_size) { - if (bo->metadata_size) { - kfree(bo->metadata); - bo->metadata = NULL; - bo->metadata_size = 0; + if (ubo->metadata_size) { + kfree(ubo->metadata); + ubo->metadata = NULL; + ubo->metadata_size = 0; } return 0; } @@ -1165,10 +1196,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (buffer == NULL) return -ENOMEM; - kfree(bo->metadata); - bo->metadata_flags = flags; - bo->metadata = buffer; - bo->metadata_size = metadata_size; + kfree(ubo->metadata); + ubo->metadata_flags = flags; + ubo->metadata = buffer; + ubo->metadata_size = metadata_size; return 0; } @@ -1192,21 +1223,25 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, size_t buffer_size, uint32_t *metadata_size, uint64_t *flags) { + struct amdgpu_bo_user *ubo; + if (!buffer && !metadata_size) return -EINVAL; + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); + ubo = to_amdgpu_bo_user(bo); if (buffer) { - if (buffer_size < bo->metadata_size) + if (buffer_size < ubo->metadata_size) return -EINVAL; - if (bo->metadata_size) - memcpy(buffer, bo->metadata, bo->metadata_size); + if (ubo->metadata_size) + memcpy(buffer, ubo->metadata, ubo->metadata_size); } if (metadata_size) - *metadata_size = bo->metadata_size; + *metadata_size = ubo->metadata_size; if (flags) - *flags = bo->metadata_flags; + *flags = ubo->metadata_flags; return 0; } |