Esempio n. 1
0
void *kmap_atomic(struct page *page, enum km_type type)
{
    unsigned int idx;
    unsigned long vaddr;
    void *kmap;

    pagefault_disable();
    if (!PageHighMem(page))
        return page_address(page);

    debug_kmap_atomic(type);

    kmap = kmap_high_get(page);
    if (kmap)
        return kmap;

    idx = type + KM_TYPE_NR * smp_processor_id();
    vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
    /*
     * With debugging enabled, kunmap_atomic forces that entry to 0.
     * Make sure it was indeed properly unmapped.
     */
    BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif
    set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
    /*
     * When debugging is off, kunmap_atomic leaves the previous mapping
     * in place, so this TLB flush ensures the TLB is updated with the
     * new mapping.
     */
    local_flush_tlb_kernel_page(vaddr);

    return (void *)vaddr;
}
Esempio n. 2
0
void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
{
    unsigned int idx, cpu = smp_processor_id();
    int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
    unsigned long vaddr, flags;
    pte_t pte, *ptep;

    idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
    vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
    ptep = TOP_PTE(vaddr);
    pte = mk_pte(page, kmap_prot);

    BUG_ON(pte_val(*ptep) != pte_val(pte));
    BUG_ON(*depth <= 0);

    raw_local_irq_save(flags);
    (*depth)--;
    if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
        set_pte_ext(ptep, saved_pte, 0);
        local_flush_tlb_kernel_page(vaddr);
    }
    raw_local_irq_restore(flags);

    if (!in_interrupt())
        preempt_enable();
}
Esempio n. 3
0
/*
 * Clear the user page.  We need to deal with the aliasing issues,
 * so remap the kernel page into the same cache colour as the user
 * page.
 */
static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
#if 0
   unsigned int offset = CACHE_COLOUR(vaddr);
   unsigned long to = to_address + (offset << PAGE_SHIFT);

   /*
    * Discard data in the kernel mapping for the new page
    * FIXME: needs this MCRR to be supported.
    */
   __asm__("mcrr  p15, 0, %1, %0, c6   @ 0xec401f06"
           :
           : "r" (kaddr),
             "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
           : "cc");

#endif
   /*
    * Now clear the page using the same cache colour as
    * the pages ultimate destination.
    */
   spin_lock(&v6_lock);

#if 0
   set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0);
   flush_tlb_kernel_page(to);
#endif

   cpu_flush_tlb_all();
   clear_page(kaddr);

   spin_unlock(&v6_lock);
}
Esempio n. 4
0
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
{
    unsigned int idx, cpu = smp_processor_id();
    int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
    unsigned long vaddr, flags;
    pte_t pte, *ptep;

    idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
    vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
    ptep = TOP_PTE(vaddr);
    pte = mk_pte(page, kmap_prot);

    if (!in_interrupt())
        preempt_disable();

    raw_local_irq_save(flags);
    (*depth)++;
    if (pte_val(*ptep) == pte_val(pte)) {
        *saved_pte = pte;
    } else {
        *saved_pte = *ptep;
        set_pte_ext(ptep, pte, 0);
        local_flush_tlb_kernel_page(vaddr);
    }
    raw_local_irq_restore(flags);

    return (void *)vaddr;
}
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	int idx, type;

	if (kvaddr >= (void *)FIXADDR_START) {
		type = kmap_atomic_idx();
		idx = type + KM_TYPE_NR * smp_processor_id();

		if (cache_is_vivt())
			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
		set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
		local_flush_tlb_kernel_page(vaddr);
#else
		(void) idx;  /* to kill a warning */
#endif
		kmap_atomic_idx_pop();
	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
		/* this address was obtained through kmap_high_get() */
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
}
Esempio n. 6
0
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
    unsigned int idx;
    unsigned long vaddr;

    pagefault_disable();

    idx = type + KM_TYPE_NR * smp_processor_id();
    vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
    BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif
    set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
    local_flush_tlb_kernel_page(vaddr);

    return (void *)vaddr;
}
void *__kmap_atomic(struct page *page)
{
	unsigned int idx;
	unsigned long vaddr;
	void *kmap;
	int type;

	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * There is no cache coherency issue when non VIVT, so force the
	 * dedicated kmap usage for better debugging purposes in that case.
	 */
	if (!cache_is_vivt())
		kmap = NULL;
	else
#endif
		kmap = kmap_high_get(page);
	if (kmap)
		return kmap;

	type = kmap_atomic_idx_push();

	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * With debugging enabled, kunmap_atomic forces that entry to 0.
	 * Make sure it was indeed properly unmapped.
	 */
	BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif
	set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
	/*
	 * When debugging is off, kunmap_atomic leaves the previous mapping
	 * in place, so this TLB flush ensures the TLB is updated with the
	 * new mapping.
	 */
	local_flush_tlb_kernel_page(vaddr);

	return (void *)vaddr;
}
Esempio n. 8
0
static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
#if 0
   unsigned int offset = CACHE_COLOUR(vaddr);
   unsigned long from, to;
   struct page *page = virt_to_page(kfrom);

   if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
      __flush_dcache_page(page_mapping(page), page);

   /*
    * Discard data in the kernel mapping for the new page.
    * FIXME: needs this MCRR to be supported.
    */
   __asm__("mcrr  p15, 0, %1, %0, c6   @ 0xec401f06"
           :
           : "r" (kto),
             "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
           : "cc");
#endif

   /*
    * Now copy the page using the same cache colour as the
    * pages ultimate destination.
    */
   spin_lock(&v6_lock);

#if 0
   set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
   set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);

   from = from_address + (offset << PAGE_SHIFT);
   to   = to_address + (offset << PAGE_SHIFT);

   flush_tlb_kernel_page(from);
   flush_tlb_kernel_page(to);
#endif

   cpu_flush_tlb_all();

   copy_page(kto, kfrom);

   spin_unlock(&v6_lock);
}
Esempio n. 9
0
struct page *kmap_atomic_to_page(const void *ptr)
{
    unsigned long vaddr = (unsigned long)ptr;
    pte_t *pte;

    if (vaddr < FIXADDR_START)
        return virt_to_page(ptr);

    pte = TOP_PTE(vaddr);
    return pte_page(*pte);
}
Esempio n. 10
0
struct page *kmap_atomic_to_page(const void *ptr)
{
	unsigned long vaddr = (unsigned long)ptr;
	pte_t *pte;

	if (vaddr < FIXADDR_START) {
		if (vaddr >= PKMAP_ADDR(0) &&
			vaddr < PKMAP_ADDR(LAST_PKMAP))
			return pte_page(pkmap_page_table[PKMAP_NR(vaddr)]);
		else
			return virt_to_page(ptr);
	}

	pte = TOP_PTE(vaddr);
	return pte_page(*pte);
}
void *kmap_atomic_pfn(unsigned long pfn)
{
	unsigned long vaddr;
	int idx, type;

	pagefault_disable();

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif
	set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));

	return (void *)vaddr;
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
	unsigned long vaddr, struct vm_area_struct *vma)
{
	void *kto = kmap_atomic(to, KM_USER1);

<<<<<<< HEAD
	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
=======
	if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
		__flush_dcache_page(page_mapping(from), from);

	spin_lock(&minicache_lock);

	set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
	flush_tlb_kernel_page(COPYPAGE_MINICACHE);

	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);

	spin_unlock(&minicache_lock);

	kunmap_atomic(kto, KM_USER1);
}

/*
 * XScale optimised clear_user_page
 */
void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{