diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 345 |
1 files changed, 88 insertions, 257 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cc6a4e7551e3..97ac662a47cb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,26 +77,12 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p, - int mem_type) -{ - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - - drm_printf(p, " has_type: %d\n", man->has_type); - drm_printf(p, " use_type: %d\n", man->use_type); - drm_printf(p, " flags: 0x%08X\n", man->flags); - drm_printf(p, " size: %llu\n", man->size); - drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); - drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); - if (mem_type != TTM_PL_SYSTEM) - (*man->func->debug)(man, p); -} - static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); int i, ret, mem_type; + struct ttm_resource_manager *man; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, @@ -108,7 +94,8 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, return; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); - ttm_mem_type_debug(bo->bdev, &p, mem_type); + man = ttm_manager_type(bo->bdev, mem_type); + ttm_resource_manager_debug(man, &p); } } @@ -145,10 +132,10 @@ static inline uint32_t ttm_bo_type_flags(unsigned type) } static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!list_empty(&bo->lru)) return; @@ -156,10 +143,10 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, if (mem->placement & TTM_PL_FLAG_NO_EVICT) return; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); list_add_tail(&bo->lru, &man->lru[bo->priority]); - if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && + if (man->use_tt && bo->ttm && !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); @@ -223,7 +210,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -231,14 +218,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - man = &pos->first->bdev->man[TTM_PL_TT]; + man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); list_bulk_move_tail(&man->lru[i], &pos->first->lru, &pos->last->lru); } for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -246,7 +233,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - man = &pos->first->bdev->man[TTM_PL_VRAM]; + man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); list_bulk_move_tail(&man->lru[i], &pos->first->lru, &pos->last->lru); } @@ -268,12 +255,12 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, bool evict, + struct ttm_resource *mem, bool evict, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; - struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; + struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; ret = ttm_mem_io_lock(old_man, true); @@ -286,13 +273,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, * Create and bind a ttm if required. */ - if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { - if (bo->ttm == NULL) { - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); - ret = ttm_tt_create(bo, zero); - if (ret) - goto out_err; - } + if (new_man->use_tt) { + /* Zero init the new TTM structure if the old location should + * have used one as well. + */ + ret = ttm_tt_create(bo, old_man->use_tt); + if (ret) + goto out_err; ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) @@ -315,8 +302,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (bdev->driver->move_notify) bdev->driver->move_notify(bo, evict, mem); - if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && - !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) + if (old_man->use_tt && new_man->use_tt) ret = ttm_bo_move_ttm(bo, ctx, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, ctx, mem); @@ -340,8 +326,8 @@ moved: return 0; out_err: - new_man = &bdev->man[bo->mem.mem_type]; - if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { + new_man = ttm_manager_type(bdev, bo->mem.mem_type); + if (!new_man->use_tt) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -364,7 +350,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->ttm); bo->ttm = NULL; - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); } static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) @@ -552,7 +538,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -643,7 +629,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; struct ttm_placement placement; int ret = 0; @@ -654,10 +640,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bdev->driver->evict_flags(bo, &placement); if (!placement.num_placement && !placement.num_busy_placement) { - ret = ttm_bo_pipeline_gutting(bo); - if (ret) - return ret; + ttm_bo_wait(bo, false, false); + ttm_bo_cleanup_memtype_use(bo); return ttm_tt_create(bo, false); } @@ -680,7 +665,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); - ttm_bo_mem_put(bo, &evict_mem); + ttm_resource_free(bo, &evict_mem); goto out; } bo->evicted = true; @@ -769,14 +754,13 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, return r == -EDEADLK ? -EBUSY : r; } -static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - uint32_t mem_type, - const struct ttm_place *place, - struct ttm_operation_ctx *ctx, - struct ww_acquire_ctx *ticket) +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket) { struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; bool locked = false; unsigned i; int ret; @@ -842,38 +826,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } -static int ttm_bo_mem_get(struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - - mem->mm_node = NULL; - if (!man->func || !man->func->get_node) - return 0; - - return man->func->get_node(man, bo, place, mem); -} - -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - - if (!man->func || !man->func->put_node) - return; - - man->func->put_node(man, mem); - mem->mm_node = NULL; - mem->mem_type = TTM_PL_SYSTEM; -} -EXPORT_SYMBOL(ttm_bo_mem_put); - /** * Add the last move fence to the BO and reserve a new shared slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem, + struct ttm_resource_manager *man, + struct ttm_resource *mem, bool no_wait_gpu) { struct dma_fence *fence; @@ -910,22 +868,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; ticket = dma_resv_locking_ctx(bo->base.resv); do { - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (likely(!ret)) break; if (unlikely(ret != -ENOSPC)) return ret; - ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, + ret = ttm_mem_evict_first(bdev, man, place, ctx, ticket); if (unlikely(ret != 0)) return ret; @@ -934,7 +892,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, +static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { @@ -959,7 +917,7 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } -static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, +static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, uint32_t mem_type, const struct ttm_place *place, uint32_t *masked_placement) @@ -991,12 +949,12 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, */ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; uint32_t mem_type = TTM_PL_SYSTEM; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; uint32_t cur_flags = 0; int ret; @@ -1004,8 +962,8 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, if (ret) return ret; - man = &bdev->man[mem_type]; - if (!man->has_type || !man->use_type) + man = ttm_manager_type(bdev, mem_type); + if (!man || !ttm_resource_manager_used(man)) return -EBUSY; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) @@ -1039,7 +997,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -1052,7 +1010,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; ret = ttm_bo_mem_placement(bo, place, mem, ctx); if (ret == -EBUSY) @@ -1061,16 +1019,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, goto error; type_found = true; - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (ret == -ENOSPC) continue; if (unlikely(ret)) goto error; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { - ttm_bo_mem_put(bo, mem); + ttm_resource_free(bo, mem); if (ret == -EBUSY) continue; @@ -1105,9 +1063,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, error: if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); } return ret; @@ -1119,7 +1075,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { int ret = 0; - struct ttm_mem_reg mem; + struct ttm_resource mem; dma_resv_assert_held(bo->base.resv); @@ -1139,13 +1095,13 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); out_unlock: if (ret) - ttm_bo_mem_put(bo, &mem); + ttm_resource_free(bo, &mem); return ret; } static bool ttm_bo_places_compat(const struct ttm_place *places, unsigned num_placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { unsigned i; @@ -1168,7 +1124,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, } bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { if (ttm_bo_places_compat(placement->placement, placement->num_placement, @@ -1335,9 +1291,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, return ret; } - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); return ret; } @@ -1426,144 +1380,24 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - unsigned mem_type) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC - }; - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct ttm_bo_global *glob = &ttm_bo_glob; - struct dma_fence *fence; - int ret; - unsigned i; - - /* - * Can't use standard list traversal since we're unlocking. - */ - - spin_lock(&glob->lru_lock); - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { - while (!list_empty(&man->lru[i])) { - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx, - NULL); - if (ret) - return ret; - spin_lock(&glob->lru_lock); - } - } - spin_unlock(&glob->lru_lock); - - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; - } - - return 0; -} - -int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) -{ - struct ttm_mem_type_manager *man; - int ret = -EINVAL; - - if (mem_type >= TTM_NUM_MEM_TYPES) { - pr_err("Illegal memory type %d\n", mem_type); - return ret; - } - man = &bdev->man[mem_type]; - - if (!man->has_type) { - pr_err("Trying to take down uninitialized memory manager type %u\n", - mem_type); - return ret; - } - - man->use_type = false; - man->has_type = false; - - ret = 0; - if (mem_type > 0) { - ret = ttm_bo_force_list_clean(bdev, mem_type); - if (ret) { - pr_err("Cleanup eviction failed\n"); - return ret; - } - - ret = (*man->func->takedown)(man); - } - - dma_fence_put(man->move); - man->move = NULL; - - return ret; -} -EXPORT_SYMBOL(ttm_bo_clean_mm); - int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { pr_err("Illegal memory manager memory type %u\n", mem_type); return -EINVAL; } - if (!man->has_type) { + if (!man) { pr_err("Memory type %u has not been initialized\n", mem_type); return 0; } - return ttm_bo_force_list_clean(bdev, mem_type); + return ttm_resource_manager_force_list_clean(bdev, man); } EXPORT_SYMBOL(ttm_bo_evict_mm); -int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size) -{ - int ret; - struct ttm_mem_type_manager *man; - unsigned i; - - BUG_ON(type >= TTM_NUM_MEM_TYPES); - man = &bdev->man[type]; - BUG_ON(man->has_type); - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); - spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); - - ret = bdev->driver->init_mem_type(bdev, type, man); - if (ret) - return ret; - man->bdev = bdev; - - if (type != TTM_PL_SYSTEM) { - ret = (*man->func->init)(man, p_size); - if (ret) - return ret; - } - man->has_type = true; - man->use_type = true; - man->size = p_size; - - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; - - return 0; -} -EXPORT_SYMBOL(ttm_bo_init_mm); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = @@ -1628,21 +1462,12 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) { struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; - unsigned i = TTM_NUM_MEM_TYPES; - struct ttm_mem_type_manager *man; - - while (i--) { - man = &bdev->man[i]; - if (man->has_type) { - man->use_type = false; - if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { - ret = -EBUSY; - pr_err("DRM memory manager type %d is not clean\n", - i); - } - man->has_type = false; - } - } + unsigned i; + struct ttm_resource_manager *man; + + man = ttm_manager_type(bdev, TTM_PL_SYSTEM); + ttm_resource_manager_set_used(man, false); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); list_del(&bdev->device_list); @@ -1655,7 +1480,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) spin_lock(&glob->lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - if (list_empty(&bdev->man[0].lru[0])) + if (list_empty(&man->lru[0])) pr_debug("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); @@ -1666,6 +1491,23 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) } EXPORT_SYMBOL(ttm_bo_device_release); +static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) +{ + struct ttm_resource_manager *man = &bdev->sysman; + + /* + * Initialize the system memory buffer type. + * Other types need to be driver / IOCTL initialized. + */ + man->use_tt = true; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + + ttm_resource_manager_init(man, 0); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); + ttm_resource_manager_set_used(man, true); +} + int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, struct address_space *mapping, @@ -1684,15 +1526,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->driver = driver; - memset(bdev->man, 0, sizeof(bdev->man)); - - /* - * Initialize the system memory buffer type. - * Other types need to be driver / IOCTL initialized. - */ - ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); - if (unlikely(ret != 0)) - goto out_no_sys; + ttm_bo_init_sysman(bdev); bdev->vma_manager = vma_manager; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); @@ -1704,9 +1538,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, mutex_unlock(&ttm_global_mutex); return 0; -out_no_sys: - ttm_bo_global_release(); - return ret; } EXPORT_SYMBOL(ttm_bo_device_init); @@ -1725,7 +1556,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); ttm_mem_io_lock(man, false); ttm_bo_unmap_virtual_locked(bo); @@ -1812,7 +1643,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm->caching_state != tt_cached) { struct ttm_operation_ctx ctx = { false, false }; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; |