void ttm_tt_destroy(struct ttm_tt *ttm) { struct ttm_backend *be; if (unlikely(ttm == NULL)) return; if (likely(ttm->pages != NULL)) { if (ttm->page_flags & TTM_PAGE_FLAG_USER) ttm_tt_free_user_pages(ttm); else ttm_tt_free_alloced_pages(ttm); ttm_tt_free_page_directory(ttm); } be = ttm->be; if (likely(be != NULL)) { be->func->destroy(be); ttm->be = NULL; } if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && ttm->swap_storage) fput(ttm->swap_storage); kfree(ttm); }
/* This is used for sg_table which is derived from user-pointer */ static void ttm_ub_bo_user_destroy(struct ttm_buffer_object *bo) { struct ttm_bo_user_object *user_bo = container_of(bo, struct ttm_bo_user_object, bo); #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)) if (bo->sg) { ttm_tt_free_user_pages(bo); sg_free_table(bo->sg); kfree(bo->sg); bo->sg = NULL; } #endif ttm_mem_global_free(bo->glob->mem_glob, bo->acc_size); kfree(user_bo); }
int ttm_tt_set_user(struct ttm_tt *ttm, struct task_struct *tsk, unsigned long start, unsigned long num_pages) { struct mm_struct *mm = tsk->mm; int ret; int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; BUG_ON(num_pages != ttm->num_pages); BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); /** * Account user pages as lowmem pages for now. */ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false); if (unlikely(ret != 0)) return ret; down_read(&mm->mmap_sem); ret = get_user_pages(tsk, mm, start, num_pages, write, 0, ttm->pages, NULL); up_read(&mm->mmap_sem); if (ret != num_pages && write) { ttm_tt_free_user_pages(ttm); ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); return -ENOMEM; } ttm->tsk = tsk; ttm->start = start; ttm->state = tt_unbound; return 0; }
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) { struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; void *from_virtual; void *to_virtual; int i; int ret = -ENOMEM; BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached); /* * For user buffers, just unpin the pages, as there should be * vma references. */ if (ttm->page_flags & TTM_PAGE_FLAG_USER) { ttm_tt_free_user_pages(ttm); ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; ttm->swap_storage = NULL; return 0; } if (!persistant_swap_storage) { swap_storage = shmem_file_setup("ttm swap", ttm->num_pages << PAGE_SHIFT, 0); if (unlikely(IS_ERR(swap_storage))) { printk(KERN_ERR "Failed allocating swap storage.\n"); return PTR_ERR(swap_storage); } } else swap_storage = persistant_swap_storage; swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; if (unlikely(from_page == NULL)) continue; to_page = shmem_read_mapping_page(swap_space, i); if (unlikely(IS_ERR(to_page))) { ret = PTR_ERR(to_page); goto out_err; } preempt_disable(); #ifdef VMW_HAS_STACK_KMAP_ATOMIC from_virtual = kmap_atomic(from_page); to_virtual = kmap_atomic(to_page); #else from_virtual = kmap_atomic(from_page, KM_USER0); to_virtual = kmap_atomic(to_page, KM_USER1); #endif memcpy(to_virtual, from_virtual, PAGE_SIZE); #ifdef VMW_HAS_STACK_KMAP_ATOMIC kunmap_atomic(to_virtual); kunmap_atomic(from_virtual); #else kunmap_atomic(to_virtual, KM_USER1); kunmap_atomic(from_virtual, KM_USER0); #endif preempt_enable(); set_page_dirty(to_page); mark_page_accessed(to_page); page_cache_release(to_page); } ttm_tt_free_alloced_pages(ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; if (persistant_swap_storage) ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; return 0; out_err: if (!persistant_swap_storage) fput(swap_storage); return ret; }