static void ttm_bo_release_list(struct kref *list_kref) { struct ttm_buffer_object *bo = container_of(list_kref, struct ttm_buffer_object, list_kref); struct ttm_bo_device *bdev = bo->bdev; size_t acc_size = bo->acc_size; BUG_ON(atomic_read(&bo->list_kref.refcount)); BUG_ON(atomic_read(&bo->kref.refcount)); BUG_ON(atomic_read(&bo->cpu_writers)); BUG_ON(bo->sync_obj != NULL); BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); if (bo->ttm) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); if (bo->destroy) bo->destroy(bo); else { kfree(bo); } ttm_mem_global_free(bdev->glob->mem_glob, acc_size); }
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags) { ttm_tt_init_fields(ttm, bo, page_flags); if (ttm_tt_alloc_page_directory(ttm)) { ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } return 0; }
struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { struct ttm_bo_driver *bo_driver = bdev->driver; struct ttm_tt *ttm; if (!bo_driver) return NULL; ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); if (!ttm) return NULL; ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->first_himem_page = ttm->num_pages; ttm->last_lomem_page = -1; ttm->caching_state = tt_cached; ttm->page_flags = page_flags; ttm->dummy_read_page = dummy_read_page; ttm_tt_alloc_page_directory(ttm); if (!ttm->pages) { ttm_tt_destroy(ttm); printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); return NULL; } ttm->be = bo_driver->create_ttm_backend_entry(bdev); if (!ttm->be) { ttm_tt_destroy(ttm); printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n"); return NULL; } ttm->state = tt_unpopulated; return ttm; }
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, uint32_t page_flags) { struct ttm_tt *ttm = &ttm_dma->ttm; ttm_tt_init_fields(ttm, bo, page_flags); INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } return 0; }
/* * Call bo->mutex locked. */ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int ret = 0; uint32_t page_flags = 0; TTM_ASSERT_LOCKED(&bo->mutex); bo->ttm = NULL; if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32; switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; case ttm_bo_type_kernel: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; case ttm_bo_type_user: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags | TTM_PAGE_FLAG_USER, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) { ret = -ENOMEM; break; } ret = ttm_tt_set_user(bo->ttm, current, bo->buffer_start, bo->num_pages); if (unlikely(ret != 0)) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } break; default: printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); ret = -EINVAL; break; } return ret; }
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { if (bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); /* * Make processes trying to reserve really pick it up. */ smp_mb__after_atomic_dec(); wake_up_all(&bo->event_queue); }
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { ttm->bdev = bdev; ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->caching_state = tt_cached; ttm->page_flags = page_flags; ttm->dummy_read_page = dummy_read_page; ttm->state = tt_unpopulated; ttm->swap_storage = NULL; ttm_tt_alloc_page_directory(ttm); if (!ttm->pages) { ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } return 0; }
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, vm_page_t dummy_read_page) { struct ttm_tt *ttm = &ttm_dma->ttm; ttm->bdev = bdev; ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->caching_state = tt_cached; ttm->page_flags = page_flags; ttm->dummy_read_page = dummy_read_page; ttm->state = tt_unpopulated; ttm->swap_storage = NULL; INIT_LIST_HEAD(&ttm_dma->pages_list); ttm_dma_tt_alloc_page_directory(ttm_dma); if (!ttm->pages || !ttm_dma->dma_address) { ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } return 0; }
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci || ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { ret = ttm_mem_io_lock(old_man, true); if (unlikely(ret != 0)) goto out_err; ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_unlock(old_man); } /* * Create and bind a ttm if required. */ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { ret = ttm_bo_add_ttm(bo, false); if (ret) goto out_err; ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_bind(bo->ttm, mem); if (ret) goto out_err; } if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; } } if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, mem); else ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); if (ret) goto out_err; moved: if (bo->evicted) { ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); if (ret) printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); bo->evicted = false; } if (bo->mem.mm_node) { bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; } else bo->offset = 0; return 0; out_err: new_man = &bdev->man[bo->mem.mem_type]; if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } return ret; }