Ejemplo n.º 1
0
/**
 * read_cache_pages - populate an address space with some pages & start reads against them
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 * @filler: callback routine for filling a single page.
 * @data: private data for the callback routine.
 *
 * Hides the details of the LRU cache etc from the filesystems.
 */
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
			int (*filler)(void *, struct page *), void *data)
{
	struct page *page;
	int ret = 0;

	while (!list_empty(pages)) {
		page = list_to_page(pages);
		list_del(&page->lru);
		if (add_to_page_cache_lru(page, mapping,
					page->index, GFP_KERNEL)) {
			read_cache_pages_invalidate_page(mapping, page);
			continue;
		}
		page_cache_release(page);

		ret = filler(data, page);
		if (unlikely(ret)) {
			read_cache_pages_invalidate_pages(mapping, pages);
			break;
		}
		task_io_account_read(PAGE_CACHE_SIZE);
	}
	return ret;
}
Ejemplo n.º 2
0
static struct page *alloc_new_pde(void)
{
	struct page *page;
	size_t size = PDE_TABLE_SIZE;
	u32 align = PDE_TABLE_ALIGN_SIZE;

	spin_lock_irqsave(&pgt_buffer.pgt_lock);

	if (pgt_buffer.nr_free == 0) {
		/*
		 * alloc new pgt from system
		 */
		page = get_free_pages_align(page_nr(size), align, GFP_PGT);
		if (!page) {
			spin_unlock_irqstore(&pgt_buffer.pgt_lock);
			return NULL;
		}

		pgt_buffer.alloc_nr++;
	} else {
		page = list_to_page(list_next(&pgt_buffer.list));
		list_del(list_next(&pgt_buffer.list));
		pgt_buffer.nr_free--;
	}

	spin_unlock_irqstore(&pgt_buffer.pgt_lock);
	return page;
}
Ejemplo n.º 3
0
static int pgt_map_normal_memory(struct task_page_table *table,
		struct list_head *mem_list, unsigned long map_base)
{
	struct page *page;
	unsigned long base = map_base;
	unsigned long pte_end = 0, pte = 0;
	struct list_head *list = list_next(mem_list);

	if (!map_base)
		return -EINVAL;

	while (list != mem_list){
		if (pte == pte_end) {
			pte = pgt_get_mapped_pte_addr(table, map_base);
			if (!pte)
				return -ENOMEM;

			pte_end = min_align(pte + PTES_PER_PDE, PTES_PER_PDE);
		}

		page = list_to_page(list);
		mmu_create_pte_entry(pte, page_to_pa(page), base);
		page_set_map_address(page, base);
		base += PAGE_SIZE;

		pte += sizeof(unsigned long);
		list = list_next(list);
	}

	return 0;
}
Ejemplo n.º 4
0
static struct page *
alloc_new_pte_page(struct pte_cache_list *clist)
{
	struct page *page;
	unsigned long pte_free_base = 0;

	if (!clist->pte_free_size) {
		/* 
		 * alloc new page for pte pgt
		 */
		page = request_pages(1, GFP_PGT);
		if (!page)
			return NULL;

		add_page_to_list_tail(page, &clist->pte_list);
		clist->pte_alloc_size += PAGE_SIZE;
	} else {
		/*
		 * fetch a new page from the pte_list
		 */
		clist->pte_current_page =
			list_next(clist->pte_current_page);
		page = list_to_page(clist->pte_current_page);
		clist->pte_free_size -= PAGE_SIZE;
	}

	pte_free_base = page_to_va(page);
	memset((char *)pte_free_base, 0, PAGE_SIZE);

	return page;
}
Ejemplo n.º 5
0
/*
 * release a list of pages, invalidating them first if need be
 */
static void read_cache_pages_invalidate_pages(struct address_space *mapping,
					      struct list_head *pages)
{
	struct page *victim;

	while (!list_empty(pages)) {
		victim = list_to_page(pages);
		list_del(&victim->lru);
		read_cache_pages_invalidate_page(mapping, victim);
	}
}
Ejemplo n.º 6
0
unsigned long
pgt_get_mmap_base(struct task_page_table *table, int page_nr)
{
	struct list_head *list;
	unsigned long ua = 0;
	struct list_head *head = &table->mmap_list.pte_list;
	struct page *page;

	list_for_each(head, list) {
		page = list_to_page(list);
		ua = pgt_get_page_mmap_base(page, page_nr);
		if (ua > 0)
			goto out;
	}