Ejemplo n.º 1
0
void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	unsigned long addr;

	if (PageHighMem(page))
		return;

	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}
Ejemplo n.º 2
0
void show_mem(unsigned int filter)
{
	pg_data_t *pgdat;
	unsigned long total = 0, reserved = 0, shared = 0,
		nonshared = 0, highmem = 0;

	printk("Mem-Info:\n");
	show_free_areas(filter);

	if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
		return;

	for_each_online_pgdat(pgdat) {
		unsigned long i, flags;

		pgdat_resize_lock(pgdat, &flags);
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			struct page *page;
			unsigned long pfn = pgdat->node_start_pfn + i;

			if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
				touch_nmi_watchdog();

			if (!pfn_valid(pfn))
				continue;

			page = pfn_to_page(pfn);

			if (PageHighMem(page))
				highmem++;

			if (PageReserved(page))
				reserved++;
			else if (page_count(page) == 1)
				nonshared++;
			else if (page_count(page) > 1)
				shared += page_count(page) - 1;

			total++;
		}
		pgdat_resize_unlock(pgdat, &flags);
	}

	printk("%lu pages RAM\n", total);
#ifdef CONFIG_HIGHMEM
	printk("%lu pages HighMem\n", highmem);
#endif
	printk("%lu pages reserved\n", reserved);
	printk("%lu pages shared\n", shared);
	printk("%lu pages non-shared\n", nonshared);
#ifdef CONFIG_QUICKLIST
	printk("%lu pages in pagetable cache\n",
		quicklist_total_size());
#endif
}
Ejemplo n.º 3
0
static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
{
	struct page *page = virt_to_page(pt);
	unsigned long pfn = page_to_pfn(page);

	if (PageHighMem(page))
		return;
	BUG_ON(HYPERVISOR_update_va_mapping(
		(unsigned long)__va(pfn << PAGE_SHIFT),
		pfn_pte(pfn, flags), 0));
}
Ejemplo n.º 4
0
/* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page)
{
	/* Lowmem is re-populated first, so highmem pages go at list tail. */
	if (PageHighMem(page)) {
		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
		balloon_high++;
	} else {
		list_add(PAGE_TO_LIST(page), &ballooned_pages);
		balloon_low++;
	}
}
Ejemplo n.º 5
0
static void dma_cache_maint_page(struct page *page, unsigned long offset,
	size_t size, enum dma_data_direction dir,
	void (*op)(const void *, size_t, int))
{
	/*
	 * A single sg entry may refer to multiple physically contiguous
	 * pages.  But we still need to process highmem pages individually.
	 * If highmem is not configured then the bulk of this loop gets
	 * optimized out.
	 */

	size_t left = size;
	do {
		size_t len = left;
		void *vaddr;

		if (PageHighMem(page)) {
			if (len + offset > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset / PAGE_SIZE;
					offset %= PAGE_SIZE;
				}
				len = PAGE_SIZE - offset;
			}
			vaddr = kmap_high_get(page);
			if (vaddr) {
				vaddr += offset;
				op(vaddr, len, dir);
				kunmap_high(page);
			} else if (cache_is_vipt()) {
				pte_t saved_pte;
				vaddr = kmap_high_l1_vipt(page, &saved_pte);
				if(vaddr) //TI
				{ //TI
				op(vaddr + offset, len, dir);
				kunmap_high_l1_vipt(page, saved_pte);
				}else //TI
					printk(KERN_ERR "SSN: 1 vaddr is NULL\n");//TI
			}
			else //TI
					printk(KERN_ERR "SSN: 2 vaddr is NULL\n"); //TI

		} else {
			vaddr = page_address(page) + offset;
			if(vaddr) //TI
				op(vaddr, len, dir);
			else //TI
				printk(KERN_ERR "SSN: 3 vaddr is NULL\n"); //TI
		}
		offset = 0;
		page++;
		left -= len;
	} while (left);
}
Ejemplo n.º 6
0
/* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page)
{
	/* Lowmem is re-populated first, so highmem pages go at list tail. */
	if (PageHighMem(page)) {
		list_add_tail(&page->lru, &ballooned_pages);
		balloon_stats.balloon_high++;
	} else {
		list_add(&page->lru, &ballooned_pages);
		balloon_stats.balloon_low++;
	}
}
Ejemplo n.º 7
0
static inline void *coherent_kvaddr(struct page *page, unsigned long base,
				    unsigned long vaddr, unsigned long *paddr)
{
	if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
		*paddr = page_to_phys(page);
		return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
	} else {
		*paddr = 0;
		return page_to_virt(page);
	}
}
Ejemplo n.º 8
0
void __flush_dcache_page(struct page *page)
{
#ifdef CONFIG_RALINK_SOC
	void *addr;

	if (page_mapping(page) && !page_mapped(page)) {
		SetPageDcacheDirty(page);
		return;
	}
#else
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (PageHighMem(page))
		return;
	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}
#endif

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
#ifdef CONFIG_RALINK_SOC
	if (PageHighMem(page)) {
		addr = kmap_atomic(page, KM_PTE1);
		flush_data_cache_page((unsigned long)addr);
		kunmap_atomic(addr, KM_PTE1);
	} else {
		addr = (void *) page_address(page);
		flush_data_cache_page((unsigned long)addr);
	}
	ClearPageDcacheDirty(page);
#else
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
#endif
}
Ejemplo n.º 9
0
struct scatterlist*
videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
{
	struct scatterlist *sglist;
	int i = 0;

	if (NULL == pages[0])
		return NULL;
	sglist = kmalloc(sizeof(*sglist) * nr_pages, GFP_KERNEL);
	if (NULL == sglist)
		return NULL;
	memset(sglist, 0, sizeof(*sglist) * nr_pages);

	if (NULL == pages[0])
		goto nopage;
	if (PageHighMem(pages[0]))
		/* DMA to highmem pages might not work */
		goto highmem;
	sglist[0].page   = pages[0];
	sglist[0].offset = offset;
	sglist[0].length = PAGE_SIZE - offset;
	for (i = 1; i < nr_pages; i++) {
		if (NULL == pages[i])
			goto nopage;
		if (PageHighMem(pages[i]))
			goto highmem;
		sglist[i].page   = pages[i];
		sglist[i].length = PAGE_SIZE;
	}
	return sglist;

 nopage:
	dprintk(2,"sgl: oops - no page\n");
	kfree(sglist);
	return NULL;

 highmem:
	dprintk(2,"sgl: oops - highmem page\n");
	kfree(sglist);
	return NULL;
}
Ejemplo n.º 10
0
void *__kmap(struct page *page)
{
	void *addr;

	might_sleep();
	if (!PageHighMem(page))
		return page_address(page);
	addr = kmap_high(page);
	flush_tlb_one((unsigned long)addr);

	return addr;
}
Ejemplo n.º 11
0
Archivo: iomem.c Proyecto: avagin/linux
static void *try_ram_remap(resource_size_t offset, size_t size,
			   unsigned long flags)
{
	unsigned long pfn = PHYS_PFN(offset);

	/* In the simple case just return the existing linear address */
	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
	    arch_memremap_can_ram_remap(offset, size, flags))
		return __va(offset);

	return NULL; /* fallback to arch_memremap_wb */
}
Ejemplo n.º 12
0
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
						 int enc)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
	struct aead_request *subreq = &rctx->subreq;
	struct scatterlist *dst = req->dst;
	struct scatterlist *cipher = rctx->cipher;
	struct scatterlist *payload = rctx->payload;
	struct scatterlist *assoc = rctx->assoc;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int assoclen = req->assoclen;
	struct page *dstp;
	u8 *vdst;
	u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
			   crypto_aead_alignmask(ctx->child) + 1);

	memcpy(iv, ctx->nonce, 4);
	memcpy(iv + 4, req->iv, 8);

	/* construct cipher/plaintext */
	if (enc)
		memset(rctx->auth_tag, 0, authsize);
	else
		scatterwalk_map_and_copy(rctx->auth_tag, dst,
					 req->cryptlen - authsize,
					 authsize, 0);

	sg_init_one(cipher, rctx->auth_tag, authsize);

	/* construct the aad */
	dstp = sg_page(dst);
	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;

	sg_init_table(payload, 2);
	sg_set_buf(payload, req->iv, 8);
	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);

	sg_init_table(assoc, 2);
	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
		    req->assoc->offset);
	scatterwalk_crypto_chain(assoc, payload, 0, 2);

	aead_request_set_tfm(subreq, ctx->child);
	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
				  req->base.data);
	aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
	aead_request_set_assoc(subreq, assoc, assoclen);

	return subreq;
}
Ejemplo n.º 13
0
static void dma_cache_maint_page(struct page *page, unsigned long offset,
	size_t size, enum dma_data_direction dir,
	void (*op)(const void *, size_t, int))
{
	unsigned long pfn;
	size_t left = size;

	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
	offset %= PAGE_SIZE;

	/*
	 * A single sg entry may refer to multiple physically contiguous
	 * pages.  But we still need to process highmem pages individually.
	 * If highmem is not configured then the bulk of this loop gets
	 * optimized out.
	 */
/*	size_t left = size; */
	do {
		size_t len = left;
		void *vaddr;
		
		page = pfn_to_page(pfn);

		if (PageHighMem(page)) {
/*			if (len + offset > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset / PAGE_SIZE;
					offset %= PAGE_SIZE;
				} */
			if (len + offset > PAGE_SIZE)				
				len = PAGE_SIZE - offset;
/*			} */
			vaddr = kmap_high_get(page);
			if (vaddr) {
				vaddr += offset;
				op(vaddr, len, dir);
				kunmap_high(page);
			} else if (cache_is_vipt()) {
				/* unmapped pages might still be cached */
				vaddr = kmap_atomic(page);
				op(vaddr + offset, len, dir);
				kunmap_atomic(vaddr);
			}
		} else {
			vaddr = page_address(page) + offset;
			op(vaddr, len, dir);
		}
		offset = 0;
/*		page++; */
		pfn++;
		left -= len;
	} while (left);
}
Ejemplo n.º 14
0
void *os_kmap_sgptr(PSG psg)
{
	struct page * page = (struct page *)(HPT_UPTR)(psg->addr.bus>>32);

	if (page)
		return (PageHighMem(page)?
				(char *)kmap_atomic(page) :
				(char *)page_address(page))
			+ (psg->addr.bus & 0xffffffff);
	else
		return psg->addr._logical;
}
Ejemplo n.º 15
0
static void poison_page(struct page *page)
{
	void *addr;

	if (PageHighMem(page)) {
		poison_highpage(page);
		return;
	}
	set_page_poison(page);
	addr = page_address(page);
	memset(addr, PAGE_POISON, PAGE_SIZE);
}
static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
{
	unsigned long start = (unsigned long) page_address(page);
	unsigned end = start + size;

	if (PageHighMem(page))
		return;
	apply_to_page_range(&init_mm, start,
		size, __dma_update_pte, &prot);
	dsb();
	flush_tlb_kernel_range(start, end);
}
Ejemplo n.º 17
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
#ifdef CONFIG_RALINK_SOC
	if (!PageHighMem(page)) {
		unsigned long addr = (unsigned long) page_address(page);

		if (pages_do_alias(addr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				flush_data_cache_page(addr);
				ClearPageDcacheDirty(page);
			}
		}
	} else {
		void *laddr = lowmem_page_address(page);

		if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				void *kaddr;

				kaddr = kmap_atomic(page, KM_PTE1);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_atomic(kaddr, KM_PTE1);
				ClearPageDcacheDirty(page);
			}
		}
	}
#else
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (page_mapped(page) && !Page_dcache_dirty(page)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			flush_data_cache_page((unsigned long)kaddr);
			kunmap_coherent();
		} else
			flush_data_cache_page(addr);
	}
#endif
}
Ejemplo n.º 18
0
static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
			pgprot_t ref_prot;
			struct page *split;

			ref_prot =
			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
			split = split_large_page(address, prot, ref_prot);
			if (!split)
				return -ENOMEM;
			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
			kpte_page = split;
		}
		page_private(kpte_page)++;
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
			ClearPagePrivate(kpte_page);
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
} 
Ejemplo n.º 19
0
static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
{
	mutex_lock(&pool->mutex);
	if (PageHighMem(page)) {
		list_add_tail(&page->lru, &pool->high_items);
		pool->high_count++;
	} else {
		list_add_tail(&page->lru, &pool->low_items);
		pool->low_count++;
	}
	mutex_unlock(&pool->mutex);
	return 0;
}
Ejemplo n.º 20
0
void cma_unmap_kernel(u32 phys_addr, size_t size, void *cpu_addr)
{
	struct page *page = phys_to_page(phys_addr);

	BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr))));

	size = PAGE_ALIGN(size);

	if (PageHighMem(page))
		__dma_free_remap(cpu_addr, size);
	else
		__dma_remap(page, size, pgprot_kernel);
}
Ejemplo n.º 21
0
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
	{
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
	}
}
Ejemplo n.º 22
0
static void unpoison_page(struct page *page)
{
	if (PageHighMem(page)) {
		unpoison_highpage(page);
		return;
	}
	if (page_poison(page)) {
		void *addr = page_address(page);

		check_poison_mem(addr, PAGE_SIZE);
		clear_page_poison(page);
	}
}
Ejemplo n.º 23
0
/*
 * Return a scatterlist for a an array of userpages (NULL on errors).
 * Memory for the scatterlist is allocated using kmalloc.  The caller
 * must free the memory.
 */
static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
					int nr_pages, int offset, size_t size)
{
	struct scatterlist *sglist;
	int i;

	if (NULL == pages[0])
		return NULL;
	sglist = vmalloc(nr_pages * sizeof(*sglist));
	if (NULL == sglist)
		return NULL;
	sg_init_table(sglist, nr_pages);

	if (PageHighMem(pages[0]))
		/* DMA to highmem pages might not work */
		goto highmem;
	sg_set_page(&sglist[0], pages[0],
			min_t(size_t, PAGE_SIZE - offset, size), offset);
	size -= min_t(size_t, PAGE_SIZE - offset, size);
	for (i = 1; i < nr_pages; i++) {
		if (NULL == pages[i])
			goto nopage;
		if (PageHighMem(pages[i]))
			goto highmem;
		sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
		size -= min_t(size_t, PAGE_SIZE, size);
	}
	return sglist;

nopage:
	dprintk(2, "sgl: oops - no page\n");
	vfree(sglist);
	return NULL;

highmem:
	dprintk(2, "sgl: oops - highmem page\n");
	vfree(sglist);
	return NULL;
}
Ejemplo n.º 24
0
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
	/* the return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time.
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
	/* we should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_all();
}
Ejemplo n.º 25
0
struct scatterlist*
videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
{
	struct scatterlist *sglist;
	int i = 0;

	if (NULL == pages[0])
		return NULL;
	sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
	if (NULL == sglist)
		return NULL;
	sg_init_table(sglist, nr_pages);

	if (NULL == pages[0])
		goto nopage;
	if (PageHighMem(pages[0]))
		/* DMA to highmem pages might not work */
		goto highmem;
	sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
	for (i = 1; i < nr_pages; i++) {
		if (NULL == pages[i])
			goto nopage;
		if (PageHighMem(pages[i]))
			goto highmem;
		sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
	}
	return sglist;

 nopage:
	dprintk(2,"sgl: oops - no page\n");
	kfree(sglist);
	return NULL;

 highmem:
	dprintk(2,"sgl: oops - highmem page\n");
	kfree(sglist);
	return NULL;
}
Ejemplo n.º 26
0
Archivo: dma.c Proyecto: 1314cc/linux
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	struct page *page = virt_to_page(dma_handle);
	int is_non_coh = 1;

	is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
			(is_isa_arcv2() && ioc_exists);

	if (PageHighMem(page) || !is_non_coh)
		iounmap((void __force __iomem *)vaddr);

	__free_pages(page, get_order(size));
}
Ejemplo n.º 27
0
/* if pagedir_p != NULL it also copies the counted pages */
static int count_and_copy_data_pages(struct pbe *pagedir_p)
{
	int chunk_size;
	int nr_copy_pages = 0;
	int pfn;
	struct page *page;
	
#ifdef CONFIG_DISCONTIGMEM
	panic("Discontingmem not supported");
#else
	BUG_ON (max_pfn != num_physpages);
#endif
	for (pfn = 0; pfn < max_pfn; pfn++) {
		page = pfn_to_page(pfn);
		if (PageHighMem(page))
			panic("Swsusp not supported on highmem boxes. Send 1GB of RAM to <*****@*****.**> and try again ;-).");

		if (!PageReserved(page)) {
			if (PageNosave(page))
				continue;

			if ((chunk_size=is_head_of_free_region(page))!=0) {
				pfn += chunk_size - 1;
				continue;
			}
		} else if (PageReserved(page)) {
			BUG_ON (PageNosave(page));

			/*
			 * Just copy whole code segment. Hopefully it is not that big.
			 */
			if ((ADDRESS(pfn) >= (unsigned long) ADDRESS2(&__nosave_begin)) && 
			    (ADDRESS(pfn) <  (unsigned long) ADDRESS2(&__nosave_end))) {
				PRINTK("[nosave %lx]", ADDRESS(pfn));
				continue;
			}
			/* Hmm, perhaps copying all reserved pages is not too healthy as they may contain 
			   critical bios data? */
		} else	BUG();

		nr_copy_pages++;
		if (pagedir_p) {
			pagedir_p->orig_address = ADDRESS(pfn);
			copy_page((void *) pagedir_p->address, (void *) pagedir_p->orig_address);
			pagedir_p++;
		}
	}
	return nr_copy_pages;
}
Ejemplo n.º 28
0
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	phys_addr_t paddr = dma_handle;
	struct page *page = virt_to_page(paddr);
	int is_non_coh = 1;

	is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
			(is_isa_arcv2() && ioc_enable);

	if (PageHighMem(page) || !is_non_coh)
		iounmap((void __force __iomem *)vaddr);

	__free_pages(page, get_order(size));
}
Ejemplo n.º 29
0
static inline int ttm_tt_set_page_caching(struct page *p,
					  enum ttm_caching_state c_state)
{
	if (PageHighMem(p))
		return 0;

	switch (c_state) {
	case tt_cached:
		return set_pages_wb(p, 1);
	case tt_wc:
	    return set_memory_wc((unsigned long) page_address(p), 1);
	default:
		return set_pages_uc(p, 1);
	}
}
Ejemplo n.º 30
0
static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
{
	mutex_lock(&pool->mutex);
	if (PageHighMem(page)) {
		list_add_tail(&page->lru, &pool->high_items);
		pool->high_count++;
	} else {
		list_add_tail(&page->lru, &pool->low_items);
		pool->low_count++;
	}

	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
							1 << pool->order);
	mutex_unlock(&pool->mutex);
}