Ejemplo n.º 1
0
static void raise_backtrace_ipi(cpumask_t *mask)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask) {
		if (cpu == smp_processor_id())
			handle_backtrace_ipi(NULL);
		else
			smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
	}

	for_each_cpu(cpu, mask) {
		struct paca_struct *p = paca_ptrs[cpu];

		cpumask_clear_cpu(cpu, mask);

		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
		if (!virt_addr_valid(p)) {
			pr_warn("paca pointer appears corrupt? (%px)\n", p);
			continue;
		}

		pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
			p->irq_soft_mask, p->in_mce, p->in_nmi);

		if (virt_addr_valid(p->__current))
			pr_cont(" current: %d (%s)\n", p->__current->pid,
				p->__current->comm);
		else
			pr_cont(" current pointer corrupt? (%px)\n", p->__current);

		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
		show_stack(p->__current, (unsigned long *)p->saved_r1);
	}
}
Ejemplo n.º 2
0
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
void dma_cache_maint(const void *start, size_t size, int direction)
{
       void (*inner_op)(const void *, const void *);
       void (*outer_op)(unsigned long, unsigned long);

       BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));

       switch (direction) {
       case DMA_FROM_DEVICE:           /* invalidate only */
               inner_op = dmac_inv_range;
               outer_op = outer_inv_range;
               break;
       case DMA_TO_DEVICE:             /* writeback only */
               inner_op = dmac_clean_range;
               outer_op = outer_clean_range;
               break;
       case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
               inner_op = dmac_flush_range;
               outer_op = outer_flush_range;
               break;
       default:
               BUG();
       }

       inner_op(start, start + size);
       outer_op(__pa(start), __pa(start) + size);
}
static int ipanic_current_task_info(void *data, unsigned char *buffer, size_t sz_buf)
{
	struct stack_trace trace;
	int i, plen;
	struct task_struct *tsk;
	struct aee_process_info *cur_proc;
	struct pt_regs *regs = (struct pt_regs *)data;

	if (!virt_addr_valid(current_thread_info()))
		return -1;
	tsk = current_thread_info()->task;
	if (!virt_addr_valid(tsk))
		return -1;
	cur_proc = (struct aee_process_info *)ipanic_task_info;
	memset(cur_proc, 0, sizeof(struct aee_process_info));

	/* Grab kernel task stack trace */
	trace.nr_entries = 0;
	trace.max_entries = MAX_STACK_TRACE_DEPTH;
	trace.entries = ipanic_stack_entries;
	trace.skip = 8;
	save_stack_trace_tsk(tsk, &trace);
	/* Skip the entries -  ipanic_save_current_tsk_info/save_stack_trace_tsk */
	for (i = 0; i < trace.nr_entries; i++) {
		int off = strlen(cur_proc->backtrace);
		int plen = AEE_BACKTRACE_LENGTH - off;
		if (plen > 16) {
			snprintf(cur_proc->backtrace + off, plen, "[<%p>] %pS\n",
				 (void *)ipanic_stack_entries[i], (void *)ipanic_stack_entries[i]);
		}
	}
	if (regs) {
		cur_proc->ke_frame.pc = (__u64) regs->reg_pc;
		cur_proc->ke_frame.lr = (__u64) regs->reg_lr;
	} else {
		/* in case panic() is called without die */
		/* Todo: a UT for this */
		cur_proc->ke_frame.pc = ipanic_stack_entries[0];
		cur_proc->ke_frame.lr = ipanic_stack_entries[1];
	}
	snprintf(cur_proc->ke_frame.pc_symbol, AEE_SZ_SYMBOL_S, "[<%p>] %pS",
		 (void *)(unsigned long) cur_proc->ke_frame.pc, (void *)(unsigned long) cur_proc->ke_frame.pc);
	snprintf(cur_proc->ke_frame.lr_symbol, AEE_SZ_SYMBOL_L, "[<%p>] %pS",
		 (void *)(unsigned long) cur_proc->ke_frame.lr, (void *)(unsigned long) cur_proc->ke_frame.lr);
	/* Current panic user tasks */
	plen = 0;
	while (tsk && (tsk->pid != 0) && (tsk->pid != 1)) {
		/* FIXME: Check overflow ? */
		plen += snprintf(cur_proc->process_path + plen, AEE_PROCESS_NAME_LENGTH,
				 "[%s, %d]", tsk->comm, tsk->pid);
		tsk = tsk->real_parent;
	}
	mrdump_mini_add_misc((unsigned long)cur_proc, sizeof(struct aee_process_info), 0, "PROC_CUR_TSK");
	memcpy(buffer, cur_proc, sizeof(struct aee_process_info));
	return sizeof(struct aee_process_info);
}
int nvmap_get_handle_param(struct nvmap_client *client,
			   struct nvmap_handle_ref *ref, u32 param, u64 *result)
{
	if (WARN_ON(!virt_addr_valid(ref)) ||
	    WARN_ON(!virt_addr_valid(client)) ||
	    WARN_ON(!result))
		return -EINVAL;

	return __nvmap_get_handle_param(client, ref->handle, param, result);
}
Ejemplo n.º 5
0
/**
 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
 * @from:	The virtual kernel start address of the range
 * @to:		The virtual kernel end address of the range (exclusive)
 *
 * The same virtual address as the kernel virtual address is also used
 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
 * physical pages.
 */
int create_hyp_mappings(void *from, void *to)
{
    unsigned long phys_addr = virt_to_phys(from);
    unsigned long start = KERN_TO_HYP((unsigned long)from);
    unsigned long end = KERN_TO_HYP((unsigned long)to);

    /* Check for a valid kernel memory mapping */
    if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
        return -EINVAL;

    return __create_hyp_mappings(hyp_pgd, start, end,
                                 __phys_to_pfn(phys_addr), PAGE_HYP);
}
Ejemplo n.º 6
0
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE) {
		unsigned long paddr = __pa(kaddr);
		outer_inv_range(paddr, paddr + size);
	}

	dmac_unmap_area(kaddr, size, dir);
}
Ejemplo n.º 7
0
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	/* FIXME: non-speculating: not required */
	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE) {
		unsigned long paddr = __pa(kaddr);
		outer_inv_range(paddr, paddr + size);
	}
#endif
	dmac_unmap_area(kaddr, size, dir);
}
Ejemplo n.º 8
0
static void aed_get_bt(struct task_struct *tsk, struct aee_process_bt *bt)
{
    struct stackframe frame;
    unsigned int stack_address;

    bt->nr_entries = 0;

    memset(&frame, 0, sizeof(struct stackframe));
    if (tsk != current) {
        frame.fp = thread_saved_fp(tsk);
        frame.sp = thread_saved_sp(tsk);
        frame.lr = thread_saved_pc(tsk);
        frame.pc = 0xffffffff;
    } else {
        register unsigned long current_sp asm("sp");

        frame.fp = (unsigned long)__builtin_frame_address(0);
        frame.sp = current_sp;
        frame.lr = (unsigned long)__builtin_return_address(0);
        frame.pc = (unsigned long)aed_get_bt;
    }
    stack_address = ALIGN(frame.sp, THREAD_SIZE);
    if ((stack_address >= (PAGE_OFFSET + THREAD_SIZE)) && virt_addr_valid(stack_address)) {
        aed_walk_stackframe(&frame, bt, stack_address);
    } else {
        LOGD("%s: Invalid sp value %lx\n", __func__, frame.sp);
    }
}
Ejemplo n.º 9
0
/*
 * map a kernel virtual address or kernel logical address to a phys address
 */
static inline u32 physical_address(u32 virt, int write)
{
    struct page *page;
       /* kernel static-mapped address */
    DPRINTK(" get physical address: virt %x , write %d\n", virt, write);
    if (virt_addr_valid(virt)) 
    {
        return __pa((u32) virt);
    }
    if (virt >= high_memory)
	    return 0;
    
    if (virt >= TASK_SIZE)
    {
        page = follow_page(find_extend_vma(&init_mm, virt), (u32) virt, write);
    }
    else
    {
        page = follow_page(find_extend_vma(current->mm, virt), (u32) virt, write);
    }
    
    if (pfn_valid(page_to_pfn(page)))
    {
        return ((page_to_pfn(page) << PAGE_SHIFT) |
                       ((u32) virt & (PAGE_SIZE - 1)));
    }
    else
    {
        return 0;
    }
}
Ejemplo n.º 10
0
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	unsigned long paddr;

	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	dmac_map_area(kaddr, size, dir);

	paddr = __pa(kaddr);
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
}
Ejemplo n.º 11
0
static void return_dump_slab(char *str)
{
	unsigned long address;
	char *end;

	address = simple_strtoul(str, &end, 0);
	if (*end != '\0') {
		printk("Bad address [%s]\n", str);
		return;
	}

	dump_slab_ptr = (struct kmem_cache *)address;
	if (!virt_addr_valid(dump_slab_ptr) ||
	    !PageSlab(virt_to_page(dump_slab_ptr))) {
		printk("Non-slab address [%s]\n", str);
		dump_slab_ptr = NULL;
		return;
	}

	printk(KERN_DEBUG "SLAB %p %s size %d objuse %d\n",
			dump_slab_ptr, dump_slab_ptr->name,
			dump_slab_ptr->buffer_size, dump_slab_ptr->objuse);

	dump_address = NULL;
	dump_offset = 0;
	dump_slab();
}
Ejemplo n.º 12
0
/*!
******************************************************************************

 @Function				SECDEV_CpuVirtAddrToCpuPAddr

******************************************************************************/
IMG_PHYSADDR SECDEV_CpuVirtAddrToCpuPAddr(
	IMG_VOID *pvCpuKmAddr
)
{
	IMG_PHYSADDR ret = 0;
	if(virt_addr_valid(pvCpuKmAddr))
	{
		//direct mapping of kernel addresses.
		//this works for kmalloc.
		ret = virt_to_phys(pvCpuKmAddr);
	}
	else
	{
		//walk the page table. 
		//Works for ioremap, vmalloc, and kmalloc(GPF_DMA),
		//but not, for some reason, kmalloc(GPF_KERNEL)
		struct page * pg = vmalloc_to_page(pvCpuKmAddr);
		if(pg) 
		{
			ret = page_to_phys(pg);
		}
		else 
		{
			IMG_ASSERT(!"vmalloc_to_page failure");
		}
	}
	return ret;
}
void homecache_free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
		VM_BUG_ON(!virt_addr_valid((void *)addr));
		__homecache_free_pages(virt_to_page((void *)addr), order);
	}
}
static IMG_PHYSADDR CpuKmAddrToCpuPAddr(
    SYSMEM_Heap *  heap,
    IMG_VOID *     pvCpuKmAddr
)
{
    IMG_PHYSADDR ret = 0;

    if(virt_addr_valid(pvCpuKmAddr))
    {
        /* direct mapping of kernel addresses.
         * this works for kmalloc.
         */
        ret = virt_to_phys(pvCpuKmAddr);
    }
    else
    {
        /* walk the page table.
         * Works for ioremap, vmalloc, and kmalloc(GPF_DMA),
          but not, for some reason, kmalloc(GPF_KERNEL)
         */
        struct page * pg = vmalloc_to_page(pvCpuKmAddr);
        if(pg) {
            ret = page_to_phys(pg);
        }
        else {
            IMG_ASSERT(!"vmalloc_to_page failure");
        }
    }

    IMG_ASSERT(ret != 0);

    return ret;
}
Ejemplo n.º 15
0
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
{
	if (!is_vmalloc_addr(kaddr)) {
		BUG_ON(!virt_addr_valid(kaddr));
		return __pa(kaddr);
	} else {
		return page_to_phys(vmalloc_to_page(kaddr)) +
		       offset_in_page(kaddr);
	}
}
Ejemplo n.º 16
0
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long paddr;

	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
#endif

	dmac_map_area(kaddr, size, dir);

#ifdef CONFIG_OUTER_CACHE
	paddr = __pa(kaddr);
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
#endif
	/* FIXME: non-speculating: flush on bidirectional mappings? */
}
Ejemplo n.º 17
0
inline void aee_print_bt(struct pt_regs *regs)
{
    int i;
    unsigned long high, bottom, fp;
    struct stackframe cur_frame;
    struct pt_regs *exp_regs;
    bottom = regs->ARM_sp;
    if (!virt_addr_valid(bottom)) {
        aee_nested_printf("invalid sp[%x]\n", regs);
        return;
    }
    high = ALIGN(bottom, THREAD_SIZE);
    cur_frame.lr = regs->ARM_lr;
    cur_frame.fp = regs->ARM_fp;
    cur_frame.pc = regs->ARM_pc;
    for (i = 0; i < AEE_MAX_EXCP_FRAME; i++) {
        fp = cur_frame.fp;
        if ((fp < (bottom + 12)) || ((fp + 4) >= (high + 8192))) {
            if (fp != 0)
                aee_nested_printf("fp(%x)", fp);
            break;
        }
        cur_frame.fp = *(unsigned long *)(fp - 12);
        cur_frame.lr = *(unsigned long *)(fp - 4);
        cur_frame.pc = *(unsigned long *)fp;
        if (!
                ((cur_frame.lr >= (PAGE_OFFSET + THREAD_SIZE))
                 && virt_addr_valid(cur_frame.lr)))
            break;
        if (in_exception_text(cur_frame.pc)) {
            exp_regs = (struct pt_regs *)(fp + 4);
            cur_frame.lr = exp_regs->ARM_pc;
        }
        aee_nested_printf("%08lx, ", cur_frame.lr);
    }
    aee_nested_printf("\n");
    return;
}
int __nvmap_get_handle_param(struct nvmap_client *client,
			     struct nvmap_handle *h, u32 param, u64 *result)
{
	int err = 0;

	if (WARN_ON(!virt_addr_valid(h)))
		return -EINVAL;

	switch (param) {
	case NVMAP_HANDLE_PARAM_SIZE:
		*result = h->orig_size;
		break;
	case NVMAP_HANDLE_PARAM_ALIGNMENT:
		*result = h->align;
		break;
	case NVMAP_HANDLE_PARAM_BASE:
		if (!h->alloc || !atomic_read(&h->pin))
			*result = -EINVAL;
		else if (!h->heap_pgalloc) {
			mutex_lock(&h->lock);
			*result = h->carveout->base;
			mutex_unlock(&h->lock);
		} else if (h->attachment->priv)
			*result = sg_dma_address(
				((struct sg_table *)h->attachment->priv)->sgl);
		else
			*result = -EINVAL;
		break;
	case NVMAP_HANDLE_PARAM_HEAP:
		if (!h->alloc)
			*result = 0;
		else if (!h->heap_pgalloc) {
			mutex_lock(&h->lock);
			*result = nvmap_carveout_usage(client, h->carveout);
			mutex_unlock(&h->lock);
		} else
			*result = NVMAP_HEAP_IOVMM;
		break;
	case NVMAP_HANDLE_PARAM_KIND:
		*result = h->kind;
		break;
	case NVMAP_HANDLE_PARAM_COMPR:
		/* ignored, to be removed */
		break;
	default:
		err = -EINVAL;
		break;
	}
	return err;
}
Ejemplo n.º 19
0
static inline void free_image_page(void *addr, int clear_nosave_free)
{
	struct page *page;

	BUG_ON(!virt_addr_valid(addr));

	page = virt_to_page(addr);

	swsusp_unset_page_forbidden(page);
	if (clear_nosave_free)
		swsusp_unset_page_free(page);

	__free_page(page);
}
Ejemplo n.º 20
0
static int sgl_fill_kernel_pages(struct page **pages, unsigned long kaddr,
			const unsigned int nr_pages, int rw)
{
	int i;

	/* Note: this supports lowmem pages only */
	if (!virt_addr_valid(kaddr))
		return -EINVAL;

	for (i = 0; i < nr_pages; i++)
		pages[i] = virt_to_page(kaddr + PAGE_SIZE * i);

	return nr_pages;
}
Ejemplo n.º 21
0
inline int aee_nested_save_stack(struct pt_regs *regs)
{
    int len = 0;
    if (!virt_addr_valid(regs->ARM_sp))
        return -1;
    aee_nested_printf("[%08lx %08lx]\n", regs->ARM_sp, regs->ARM_sp + 256);

    len = aee_dump_stack_top_binary(nested_panic_buf, sizeof(nested_panic_buf),
                                    regs->ARM_sp, regs->ARM_sp + 256);
    if (len > 0)
        aee_sram_fiq_save_bin(nested_panic_buf, len);
    else
        print_error_msg(len);
    return len;
}
Ejemplo n.º 22
0
void homecache_free_pages(unsigned long addr, unsigned int order)
{
	struct page *page;

	if (addr == 0)
		return;

	VM_BUG_ON(!virt_addr_valid((void *)addr));
	page = virt_to_page((void *)addr);
	if (put_page_testzero(page)) {
		int pages = (1 << order);
		homecache_change_page_home(page, order, initial_page_home());
		while (pages--)
			__free_page(page++);
	}
}
Ejemplo n.º 23
0
/*!
******************************************************************************

 @Function                SECDEV_CpuVirtAddrToCpuPAddr

******************************************************************************/
IMG_PHYSADDR SECDEV_CpuVirtAddrToCpuPAddr(
	IMG_VOID *pvCpuKmAddr
)
{
#ifdef FPGA_BUS_MASTERING
    IMG_PHYSADDR ret = 0;

    if(virt_addr_valid(pvCpuKmAddr))
    {
        /* direct mapping of kernel addresses.
         * this works for kmalloc.
         */
        ret = virt_to_phys(pvCpuKmAddr);
    }
    else
    {
        /* walk the page table.
         * Works for ioremap, vmalloc, and kmalloc(GPF_DMA),
          but not, for some reason, kmalloc(GPF_KERNEL)
         */
        struct page * pg = vmalloc_to_page(pvCpuKmAddr);
        if(pg) {
            ret = page_to_phys(pg);
        }
        else {
            IMG_ASSERT(!"vmalloc_to_page failure");
        }
    }

    return ret;
#else
	int i;
	IMG_UINTPTR uipOffset = 0;

	for(i = 0; i < PCI_MAX_REGIONS; i++)
	{
		if (((IMG_UINTPTR)pvCpuKmAddr >= (IMG_UINTPTR)gsPCIMem[i].pvKmAddr) &&
				((IMG_UINTPTR)pvCpuKmAddr < (IMG_UINTPTR)gsPCIMem[i].pvKmAddr + gsPCIMem[i].size))
		{
			uipOffset = (IMG_UINTPTR)pvCpuKmAddr - (IMG_UINTPTR)gsPCIMem[i].pvKmAddr;
			return gsPCIMem[i].addr + (IMG_PHYSADDR)uipOffset;
		}
	}

	return 0;
#endif
}
Ejemplo n.º 24
0
/*
 * Return the shadow address for the given address. Returns NULL if the
 * address is not tracked.
 *
 * We need to be extremely careful not to follow any invalid pointers,
 * because this function can be called for *any* possible address.
 */
void *kmemcheck_shadow_lookup(unsigned long address)
{
	pte_t *pte;
	struct page *page;

	if (!virt_addr_valid(address))
		return NULL;

	pte = kmemcheck_pte_lookup(address);
	if (!pte)
		return NULL;

	page = virt_to_page(address);
	if (!page->shadow)
		return NULL;
	return page->shadow + (address & (PAGE_SIZE - 1));
}
Ejemplo n.º 25
0
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;
	pte_t *pte;
	unsigned offset;

	if (virt_addr_valid(vaddr))
		return virt_to_machine(vaddr);

	

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);
	offset = address & ~PAGE_MASK;
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
Ejemplo n.º 26
0
int omapvout_mem_alloc(u32 size, u32 *phy_addr, u32 *virt_addr)
{
	int	order;
	u32	dss_page_addr;
	u32	dss_page_phy;
	u32	dss_page_virt;
	u32	used, alloc_end;
	struct page	*tmp_page;

	size = PAGE_ALIGN(size);
	order = get_order(size);

	dss_page_addr = __get_free_pages(GFP_KERNEL, order);
	if (!dss_page_addr) {
		printk(KERN_ERR "Failed to allocate pages !!!! \n");
		return -ENOMEM;
	}

	/*
	 *'alloc_pages' allocates pages in power of 2,
	 *so free the not needed pages
	 */
	split_page(virt_to_page(dss_page_addr), order);
	alloc_end = dss_page_addr + (PAGE_SIZE<<order);
	used = dss_page_addr + size;

	DBG("mem_alloc: dss_page_addr=0x%x, alloc_end=0x%x, used=0x%x\n"
		, dss_page_addr, alloc_end, used);
	DBG("mem_alloc: physical_start=0x%lx, order=0x%x, size=0x%x\n"
		, virt_to_phys((void *)dss_page_addr), order, size);

	while (used < alloc_end) {
		BUG_ON(!virt_addr_valid((void *)used));
		tmp_page = virt_to_page((void *)used);
		__free_page(tmp_page);
		used += PAGE_SIZE;
	}

	dss_page_phy = virt_to_phys((void *)dss_page_addr);
	dss_page_virt = (u32) ioremap_cached(dss_page_phy, size);

	*phy_addr = dss_page_phy;
	*virt_addr = dss_page_virt;

	return 0;
}
void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
{
	unsigned long *fl_table;
	int i;

	fl_table = pt->fl_table;
	for (i = 0; i < NUM_FL_PTE; i++)
		if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
			unsigned long addr = (unsigned long) __va(((fl_table[i]) &
						FL_BASE_MASK));
			dec_meminfo_total_pages_on(NR_IOMMU_PAGETABLES_PAGES,
					addr && virt_addr_valid((void *)addr));
			free_page(addr);
		}

	sub_meminfo_total_pages(NR_IOMMU_PAGETABLES_PAGES, 1 << get_order(SZ_16K));
	free_pages((unsigned long)fl_table, get_order(SZ_16K));
	pt->fl_table = 0;
}
Ejemplo n.º 28
0
/*
 * map a kernel virtual address or kernel logical address to a phys address
 */
static inline u32 physical_address(u32 virt, int write)
{
    struct page *page;
    struct vm_area_struct *vm;
    struct mm_struct * mm = (virt >= TASK_SIZE)? &init_mm : current->mm;
    unsigned int vm_flags;
    unsigned int flags;

    /* kernel static-mapped address */
    DPRINTK(" get physical address: virt %x , write %d\n", virt, write);
    if (virt_addr_valid(virt)) 
    {
        return __pa((u32) virt);
    }
    if (virt >= (u32)high_memory)
	    return 0;
    
    /* 
    * Require read or write permissions.
    */
    vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);

    vm = find_extend_vma(mm, virt);
    if (!vm || (vm->vm_flags & (VM_IO | VM_PFNMAP))
		|| !(vm_flags & vm->vm_flags)){
	return 0;
    }
    flags = FOLL_PTE_EXIST | FOLL_TOUCH;
    flags |= (write)? FOLL_WRITE : 0;
		 
    page = follow_page(vm, (u32) virt, flags);
    
    if (pfn_valid(page_to_pfn(page)))
    {
        return ((page_to_pfn(page) << PAGE_SHIFT) |
                       ((u32) virt & (PAGE_SIZE - 1)));
    }
    else /* page == 0, otherwise should never happen, since its being checked inside follow_page->vm_normal_page */
    {
        return 0;
    }
}
Ejemplo n.º 29
0
/**
 * free_hyp_pgds - free Hyp-mode page tables
 *
 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
 * therefore contains either mappings in the kernel memory area (above
 * PAGE_OFFSET), or device mappings in the vmalloc range (from
 * VMALLOC_START to VMALLOC_END).
 *
 * boot_hyp_pgd should only map two pages for the init code.
 */
void free_hyp_pgds(void)
{
	unsigned long addr;

	free_boot_hyp_pgd();

	mutex_lock(&kvm_hyp_pgd_mutex);

	if (hyp_pgd) {
		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);

		free_pages((unsigned long)hyp_pgd, pgd_order);
		hyp_pgd = NULL;
	}

	mutex_unlock(&kvm_hyp_pgd_mutex);
}
Ejemplo n.º 30
0
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;
	pte_t *pte;
	unsigned offset;

	/*
	 * if the PFN is in the linear mapped vaddr range, we can just use
	 * the (quick) virt_to_machine() p2m lookup
	 */
	if (virt_addr_valid(vaddr))
		return virt_to_machine(vaddr);

	/* otherwise we have to do a (slower) full page-table walk */

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);
	offset = address & ~PAGE_MASK;
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}