コード例 #1
0
ファイル: copypage-v6.c プロジェクト: QiuLihua83/linux-2.6.10
/*
 * Copy the page, taking account of the cache colour.
 */
void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
	unsigned int offset = DCACHE_COLOUR(vaddr);
	unsigned long from, to;

	/*
	 * Discard data in the kernel mapping for the new page.
	 * FIXME: needs this MCRR to be supported.
	 */
	__asm__("mcrr	p15, 0, %1, %0, c6	@ 0xec401f06"
	   :
	   : "r" (kto),
	     "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
	   : "cc");

	/*
	 * Now copy the page using the same cache colour as the
	 * pages ultimate destination.
	 */
	spin_lock(&v6_lock);

	set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
	set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));

	from = from_address + (offset << PAGE_SHIFT);
	to   = to_address + (offset << PAGE_SHIFT);

	flush_tlb_kernel_page(from);
	flush_tlb_kernel_page(to);

	copy_page((void *)to, (void *)from);

	spin_unlock(&v6_lock);
}
コード例 #2
0
void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	unsigned long kaddr;
	pgprot_t prot;
	pte_t **pte;

	BUG_ON(!ref);
	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
	prot = nvmap_pgprot(h, pgprot_kernel);
	pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
	if (!pte)
		goto out;

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	set_pte_at(&init_mm, kaddr, *pte,
				pfn_pte(__phys_to_pfn(paddr), prot));
	flush_tlb_kernel_page(kaddr);
	return (void *)kaddr;
out:
	nvmap_handle_put(ref->handle);
	return NULL;
}
コード例 #3
0
ファイル: copypage-v7.c プロジェクト: andreiw/xen3-arm-tegra
/*
 * Clear the user page.  We need to deal with the aliasing issues,
 * so remap the kernel page into the same cache colour as the user
 * page.
 */
static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
#if 0
   unsigned int offset = CACHE_COLOUR(vaddr);
   unsigned long to = to_address + (offset << PAGE_SHIFT);

   /*
    * Discard data in the kernel mapping for the new page
    * FIXME: needs this MCRR to be supported.
    */
   __asm__("mcrr  p15, 0, %1, %0, c6   @ 0xec401f06"
           :
           : "r" (kaddr),
             "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
           : "cc");

#endif
   /*
    * Now clear the page using the same cache colour as
    * the pages ultimate destination.
    */
   spin_lock(&v6_lock);

#if 0
   set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0);
   flush_tlb_kernel_page(to);
#endif

   cpu_flush_tlb_all();
   clear_page(kaddr);

   spin_unlock(&v6_lock);
}
コード例 #4
0
ファイル: copypage-v7.c プロジェクト: andreiw/xen3-arm-tegra
static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
#if 0
   unsigned int offset = CACHE_COLOUR(vaddr);
   unsigned long from, to;
   struct page *page = virt_to_page(kfrom);

   if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
      __flush_dcache_page(page_mapping(page), page);

   /*
    * Discard data in the kernel mapping for the new page.
    * FIXME: needs this MCRR to be supported.
    */
   __asm__("mcrr  p15, 0, %1, %0, c6   @ 0xec401f06"
           :
           : "r" (kto),
             "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
           : "cc");
#endif

   /*
    * Now copy the page using the same cache colour as the
    * pages ultimate destination.
    */
   spin_lock(&v6_lock);

#if 0
   set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
   set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);

   from = from_address + (offset << PAGE_SHIFT);
   to   = to_address + (offset << PAGE_SHIFT);

   flush_tlb_kernel_page(from);
   flush_tlb_kernel_page(to);
#endif

   cpu_flush_tlb_all();

   copy_page(kto, kfrom);

   spin_unlock(&v6_lock);
}
コード例 #5
0
ファイル: motorola.c プロジェクト: 3sOx/asuswrt-merlin
static pte_t * __init kernel_page_table(void)
{
	pte_t *ptablep;

	ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);

	clear_page(ptablep);
	__flush_page_to_ram(ptablep);
	flush_tlb_kernel_page(ptablep);
	nocache_page(ptablep);

	return ptablep;
}
コード例 #6
0
ファイル: motorola.c プロジェクト: 3sOx/asuswrt-merlin
static pmd_t * __init kernel_ptr_table(void)
{
	if (!last_pgtable) {
		unsigned long pmd, last;
		int i;

		/* Find the last ptr table that was used in head.S and
		 * reuse the remaining space in that page for further
		 * ptr tables.
		 */
		last = (unsigned long)kernel_pg_dir;
		for (i = 0; i < PTRS_PER_PGD; i++) {
			if (!pgd_present(kernel_pg_dir[i]))
				continue;
			pmd = __pgd_page(kernel_pg_dir[i]);
			if (pmd > last)
				last = pmd;
		}

		last_pgtable = (pmd_t *)last;
#ifdef DEBUG
		printk("kernel_ptr_init: %p\n", last_pgtable);
#endif
	}

	last_pgtable += PTRS_PER_PMD;
	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
		last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);

		clear_page(last_pgtable);
		__flush_page_to_ram(last_pgtable);
		flush_tlb_kernel_page(last_pgtable);
		nocache_page(last_pgtable);
	}

	return last_pgtable;
}
コード例 #7
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}