Example #1
0
void l4x_vmalloc_map_vm_area(unsigned long address, unsigned long end)
{
	if (address & ~PAGE_MASK)
		enter_kdebug("map_vm_area: Unaligned address!");

	for (; address < end; address += PAGE_SIZE) {
		pte_t *ptep;

#ifdef ARCH_arm
		unsigned long o;
		if ((o = l4x_arm_is_selfmapped_addr(address))) {
			address += o - PAGE_SIZE;
			continue;
		}
#endif

		ptep = lookup_pte(swapper_pg_dir, address);

		if (!ptep || !pte_present(*ptep)) {
			if (0)
				printk("%s: No (valid) PTE for %08lx?!"
			               " (ptep: %p, pte: %08"
#ifndef ARCH_arm
				       "l"
#endif
				       "x\n",
			               __func__, address,
			               ptep, pte_val(*ptep));
			continue;
		}
		l4x_virtual_mem_register(address, pte_val(*ptep));
		l4lx_memory_map_virtual_page(address, pte_val(*ptep),
		                             pte_write(*ptep));
	}
}
Example #2
0
/* A semi-optimized get next pte */
static inline pte_t * 
pte_next(pgd_t *page_dir, unsigned long pf_address, pte_t *last)
{
	pte_t *pte = last + 1;

	if ((!last) || !ptes_same_pmd(pte, last))
	{
		pte = lookup_pte(page_dir, pf_address);
	}
#ifdef DEBUG_LOOKUP_PTABS
	else
		printk("%s pte = %p\n", __func__, pte);
#endif

	return pte;
}
Example #3
0
static void log_efault(const char *str, const void *user_addr,
                       const void *kernel_addr, unsigned long size)
{
    pte_t *ptep = lookup_pte((pgd_t *)current->mm->pgd,
                             (unsigned long)user_addr);

    printk("%s returning efault, \n"
           "  user_addr: %p, kernel_addr: %p, size: %08lx\n"
           "  task: %s (%p) " PRINTF_L4TASK_FORM
           ", pdir: %p, ptep: %p, pte: %lx\n",
           str, user_addr, kernel_addr, size,
           current->comm, current,
           PRINTF_L4TASK_ARG(current->thread.user_thread_id),
           current->mm->pgd, ptep, ptep ? pte_val(*ptep) : 0);
#ifdef DEBUG_KDEBUG_EFAULT
    enter_kdebug("log_efault");
#endif
}
Example #4
0
void l4x_vmalloc_map_vm_area(unsigned long address, unsigned long end)
{
	if (address & ~PAGE_MASK)
		enter_kdebug("map_vm_area: Unaligned address!");

	if (!(   (VMALLOC_START <= address && end <= VMALLOC_END)
	      || (MODULES_VADDR <= address && end <= MODULES_END))) {
		pr_err("%s: %lx-%lx outside areas: %lx-%lx, %lx-%lx\n",
		       __func__, address, end,
		       VMALLOC_START, VMALLOC_END, MODULES_VADDR, MODULES_END);
		pr_err("%s: %p\n", __func__, __builtin_return_address(0));
		enter_kdebug("KK");
		return;
	}

	for (; address < end; address += PAGE_SIZE) {
		pte_t *ptep;

#ifdef CONFIG_ARM
		unsigned long o;
		if ((o = l4x_arm_is_selfmapped_addr(address))) {
			address += o - PAGE_SIZE;
			continue;
		}
#endif

		ptep = lookup_pte(&init_mm, address);

		if (!ptep || !pte_present(*ptep)) {
			if (0)
				printk("%s: No (valid) PTE for %08lx?!"
			               " (ptep: %p, pte: %08"
#ifndef CONFIG_ARM
				       "l"
#endif
				       "x\n",
			               __func__, address,
			               ptep, pte_val(*ptep));
			continue;
		}
		l4x_virtual_mem_register(address, *ptep);
		l4lx_memory_map_virtual_page(address, *ptep);
	}
}
Example #5
0
void __glue(_CACHE, _coherent_user_range)(unsigned long start, unsigned long end)
{
	pgd_t *pgd;

	if (current->mm)
		pgd = (pgd_t *)current->mm->pgd;
	else if (current->active_mm)
		pgd = (pgd_t *)current->active_mm->pgd;
	else {
		printk("active_mm: No mm... %lx-%lx\n", start, end);
		return;
	}

	for (start &= PAGE_MASK; start < end; start += PAGE_SIZE) {
		pte_t *ptep = lookup_pte(pgd, start);
		if (ptep && pte_present(*ptep) && pte_mapped(*ptep)) {
			unsigned long k = pte_pfn(*ptep) << PAGE_SHIFT;
			unsigned long e = k + PAGE_SIZE;
			l4_cache_clean_data(k, e);
		}
	}
}
Example #6
0
void __glue(_CACHE, _flush_user_cache_range)(unsigned long start, unsigned long end,
                                            unsigned int flags)
{
	struct mm_struct *mm;

	if (current->mm)
		mm = current->mm;
	else if (current->active_mm)
		mm = current->active_mm;
	else {
		printk("active_mm: No mm... %lx-%lx\n", start, end);
		return;
	}

	for (start &= PAGE_MASK; start < end; start += PAGE_SIZE) {
		pte_t *ptep = lookup_pte(mm, start);
		if (ptep && pte_present(*ptep)) {
			unsigned long k = pte_pfn(*ptep) << PAGE_SHIFT;
			unsigned long e = k + PAGE_SIZE;
			l4_cache_flush_data(k, e);
		}
	}
}