int ttm_tt_populate(struct ttm_tt *ttm) { struct page *page; unsigned long i; struct ttm_backend *be; int ret; if (ttm->state != tt_unpopulated) return 0; if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) return ret; } be = ttm->be; for (i = 0; i < ttm->num_pages; ++i) { page = __ttm_tt_get_page(ttm, i); if (!page) return -ENOMEM; } be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page); ttm->state = tt_unbound; return 0; }
static int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; void *from_virtual; void *to_virtual; int i; int ret = -ENOMEM; if (ttm->page_flags & TTM_PAGE_FLAG_USER) { ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, ttm->num_pages); if (unlikely(ret != 0)) return ret; ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; return 0; } swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL); swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = read_mapping_page(swap_space, i, NULL); if (IS_ERR(from_page)) { ret = PTR_ERR(from_page); goto out_err; } to_page = __ttm_tt_get_page(ttm, i); if (unlikely(to_page == NULL)) goto out_err; preempt_disable(); from_virtual = kmap_atomic(from_page, KM_USER0); to_virtual = kmap_atomic(to_page, KM_USER1); memcpy(to_virtual, from_virtual, PAGE_SIZE); kunmap_atomic(to_virtual, KM_USER1); kunmap_atomic(from_virtual, KM_USER0); preempt_enable(); page_cache_release(from_page); } if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) fput(swap_storage); ttm->swap_storage = NULL; ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; return 0; out_err: ttm_tt_free_alloced_pages(ttm); return ret; }
struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) { int ret; if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) return NULL; } return __ttm_tt_get_page(ttm, index); }