Example #1
0
static unsigned long
walk_stack(struct thread_info *tinfo,
           unsigned long *stack, unsigned long bp,
           const struct stacktrace_ops *ops, void *data,
           unsigned long *end, int *graph)
{
    struct stack_frame *frame = (struct stack_frame *)bp;

    while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
        unsigned long addr;

        addr = *stack;
        if (is_kernel_text(addr)) {
            if ((unsigned long) stack == bp + sizeof(long)) {
                ops->address(data, addr, 1);
                frame = frame->next_frame;
                bp = (unsigned long) frame;
            } else {
                ops->address(data, addr, 0);
            }
        }
        stack++;
    }
    return bp;
}
Example #2
0
/*
 * This maps the physical memory to kernel virtual address space, a total 
 * of max_low_pfn pages, by creating page tables starting from address 
 * PAGE_OFFSET.
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	int pgd_idx, pmd_idx, pte_ofs;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;

			/* Map with big pages if possible, otherwise create normal page tables. */
			if (cpu_has_pse) {
				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;

				if (is_kernel_text(address) || is_kernel_text(address2))
					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
				else
					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
				pfn += PTRS_PER_PTE;
			} else {
				pte = one_page_table_init(pmd);

				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
						if (is_kernel_text(address))
							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
						else
							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
				}
			}
		}
	}
}
Example #3
0
/*ARGSUSED*/
void
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
    uint32_t *ignored)
{
	int	depth;
#if !defined(HAVE_STACKTRACE_OPS)
	/***********************************************/
	/*   This  is  a basic stack walker - we dont  */
	/*   care  about  omit-frame-pointer,  and we  */
	/*   can  have  false positives. We also dont  */
	/*   handle  exception  stacks properly - but  */
	/*   this  is  for  older  kernels, where the  */
	/*   kernel  wont  help  us,  so they may not  */
	/*   have exception stacks anyhow.	       */
	/***********************************************/

	cpu_core_t	*this_cpu = cpu_get_this();
	struct pt_regs *regs = this_cpu->cpuc_regs;
	uintptr_t *sp = (uintptr_t *) &regs->r_rsp;
	uintptr_t *spend;
	
	if (regs == NULL)
		sp = (uintptr_t *) &depth;

	spend = sp + THREAD_SIZE / sizeof(uintptr_t);

	for (depth = 0; depth < pcstack_limit && sp < spend; ) {
		if (sp && is_kernel_text((unsigned long) *sp)) {
			pcstack[depth++] = *sp;
		}
		sp++;
	}
#else

	dmutex_enter(&dtrace_stack_mutex);
	g_depth = 0;
	g_pcstack = pcstack;
	g_pcstack_limit = pcstack_limit;

#if FUNC_DUMP_TRACE_ARGS == 6
	dump_trace(NULL, NULL, NULL, 0, &print_trace_ops, NULL);
#else
	dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL);
#endif
	depth = g_depth;
	dmutex_exit(&dtrace_stack_mutex);
#endif

	while (depth < pcstack_limit)
		pcstack[depth++] = (pc_t) NULL;
}
Example #4
0
/*
 *  Try to set a crash scope block based upon the vaddr.   
 */
int
gdb_set_crash_scope(ulong vaddr, char *arg)
{
        struct gnu_request request, *req = &request;
	char name[BUFSIZE];
	struct load_module *lm;

	if (!is_kernel_text(vaddr)) {
		error(INFO, "invalid text address: %s\n", arg);
		return FALSE;
	}

	if (module_symbol(vaddr, NULL, &lm, name, 0)) {
		if (!(lm->mod_flags & MOD_LOAD_SYMS)) {
			error(INFO, "attempting to find/load \"%s\" module debuginfo\n", 
				lm->mod_name);
			if (!load_module_symbols_helper(lm->mod_name)) {
				error(INFO, "cannot find/load \"%s\" module debuginfo\n", 
					lm->mod_name);
				return FALSE;
			}
		}
	}

	req->command = GNU_SET_CRASH_BLOCK;
	req->addr = vaddr;
	req->flags = 0;
	req->addr2 = 0;
	gdb_command_funnel(req);    

	if (CRASHDEBUG(1))
		fprintf(fp, 
		    "gdb_set_crash_scope: %s  addr: %lx  block: %lx\n",
			req->flags & GNU_COMMAND_FAILED ? "FAILED" : "OK",  
			req->addr, req->addr2);

	if (req->flags & GNU_COMMAND_FAILED) {
		error(INFO, 
			"gdb cannot find text block for address: %s\n", arg);
		return FALSE;
	}

	return TRUE;
}
Example #5
0
/*
 * Lookup an address
 * - modname is set to NULL if it's in the kernel.
 * - We guarantee that the returned name is valid until we reschedule even if.
 *   It resides in a module.
 * - We also guarantee that modname will be valid until rescheduled.
 */
const char *kallsyms_lookup(unsigned long addr,
			    unsigned long *symbolsize,
			    unsigned long *offset,
			    char **modname, char *namebuf)
{
	namebuf[KSYM_NAME_LEN - 1] = 0;
	namebuf[0] = 0;

	if (is_kernel_text(addr)) {
		unsigned long pos;

		pos = get_symbol_pos(addr, symbolsize, offset);
		/* Grab name */
		kallsyms_expand_symbol(get_symbol_offset(pos), namebuf);
		if (modname)
			*modname = NULL;
		return namebuf;
	}

	/* moduled not yet supported in kallsyms */
	return NULL;
}
Example #6
0
/*ARGSUSED*/
void
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
                  uint32_t *ignored)
{
    int	depth;
#if !defined(HAVE_STACKTRACE_OPS)
    int	lim;
    /***********************************************/
    /*   This  is  a basic stack walker - we dont  */
    /*   care  about  omit-frame-pointer,  and we  */
    /*   can  have  false positives. We also dont  */
    /*   handle  exception  stacks properly - but  */
    /*   this  is  for  older  kernels, where the  */
    /*   kernel  wont  help  us,  so they may not  */
    /*   have exception stacks anyhow.	       */
    /***********************************************/

    /***********************************************/
    /*   20121125 Lets use this always - it avoid  */
    /*   kernel  specific  issues in the official  */
    /*   stack  walker and will give us a vehicle  */
    /*   later  for adding reliable vs guess-work  */
    /*   stack entries.			       */
    /***********************************************/
    cpu_core_t	*this_cpu = cpu_get_this();
    struct pt_regs *regs = this_cpu->cpuc_regs;
    struct thread_info *context;
    uintptr_t *sp;
    uintptr_t *spend;

    /***********************************************/
    /*   For   syscalls,  we  will  have  a  null  */
    /*   cpuc_regs,  since  we dont intercept the  */
    /*   trap,   but   instead  intercept  the  C  */
    /*   syscall function.			       */
    /***********************************************/
    if (regs == NULL)
        sp = (uintptr_t *) &depth;
    else
        sp = (uintptr_t *) regs->r_rsp;

    /***********************************************/
    /*   Daisy  chain the interrupt and any other  */
    /*   stacks.  Limit  ourselves in case of bad  */
    /*   corruptions.			       */
    /***********************************************/
    DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
    depth = 0;
    for (lim = 0; lim < 3 && depth < pcstack_limit; lim++) {
        int	ndepth = depth;
        uintptr_t *prev_esp;

        context = (struct thread_info *) ((unsigned long) sp & (~(THREAD_SIZE - 1)));
        spend = (uintptr_t *) ((unsigned long) sp | (THREAD_SIZE - 1));
        for ( ; depth < pcstack_limit && sp < spend; sp++) {
            if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
                goto end_stack;
            if (*sp && is_kernel_text((unsigned long) *sp)) {
                pcstack[depth++] = *sp;
            }
        }
        if (depth >= pcstack_limit || ndepth == depth)
            break;

        prev_esp = (uintptr_t *) ((char *) context + sizeof(struct thread_info));
        if ((sp = prev_esp) == NULL)
            break;
        /***********************************************/
        /*   Special signal to mark the IRQ stack.     */
        /***********************************************/
        if (depth < pcstack_limit) {
            pcstack[depth++] = 1;
        }
    }
end_stack:
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_FAULT);
#else

    /***********************************************/
    /*   I'm  a  little tired of the kernel dying  */
    /*   in  the  callback, so lets avoid relying  */
    /*   on the kernel stack walker.	       */
    /***********************************************/
    dmutex_enter(&dtrace_stack_mutex);
    g_depth = 0;
    g_pcstack = pcstack;
    g_pcstack_limit = pcstack_limit;

#if FUNC_DUMP_TRACE_ARGS == 6
    dump_trace(NULL, NULL, NULL, 0, &print_trace_ops, NULL);
#else
    dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL);
#endif
    depth = g_depth;
    dmutex_exit(&dtrace_stack_mutex);
#endif

    while (depth < pcstack_limit)
        pcstack[depth++] = (pc_t) NULL;
}
Example #7
0
File: symbols.c Project: CPFL/xen
bool_t is_active_kernel_text(unsigned long addr)
{
    return (is_kernel_text(addr) ||
            (system_state < SYS_STATE_active && is_kernel_inittext(addr)));
}
Example #8
0
File: mm.c Project: abligh/xen
/* Boot-time pagetable setup.
 * Changes here may need matching changes in head.S */
void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
{
    unsigned long dest_va;
    lpae_t pte, *p;
    int i;

    /* Map the destination in the boot misc area. */
    dest_va = BOOT_MISC_VIRT_START;
    pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT);
    write_pte(xen_second + second_table_offset(dest_va), pte);
    flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);

    /* Calculate virt-to-phys offset for the new location */
    phys_offset = xen_paddr - (unsigned long) _start;

    /* Copy */
    memcpy((void *) dest_va, _start, _end - _start);

    /* Beware!  Any state we modify between now and the PT switch may be
     * discarded when we switch over to the copy. */

    /* Update the copy of xen_pgtable to use the new paddrs */
    p = (void *) xen_pgtable + dest_va - (unsigned long) _start;
#ifdef CONFIG_ARM_64
    p[0].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;
    p = (void *) xen_first + dest_va - (unsigned long) _start;
#endif
    for ( i = 0; i < 4; i++)
        p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;

    p = (void *) xen_second + dest_va - (unsigned long) _start;
    if ( boot_phys_offset != 0 )
    {
        /* Remove the old identity mapping of the boot paddr */
        vaddr_t va = (vaddr_t)_start + boot_phys_offset;
        p[second_linear_offset(va)].bits = 0;
    }
    for ( i = 0; i < 4 * LPAE_ENTRIES; i++)
        if ( p[i].pt.valid )
            p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;

    /* Change pagetables to the copy in the relocated Xen */
    boot_ttbr = (uintptr_t) xen_pgtable + phys_offset;
    flush_xen_dcache(boot_ttbr);
    flush_xen_dcache_va_range((void*)dest_va, _end - _start);
    flush_xen_text_tlb();

    WRITE_SYSREG64(boot_ttbr, TTBR0_EL2);
    dsb();                         /* Ensure visibility of HTTBR update */
    flush_xen_text_tlb();

    /* Undo the temporary map */
    pte.bits = 0;
    write_pte(xen_second + second_table_offset(dest_va), pte);
    flush_xen_text_tlb();

    /* Link in the fixmap pagetable */
    pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset)
                           >> PAGE_SHIFT);
    pte.pt.table = 1;
    write_pte(xen_second + second_table_offset(FIXMAP_ADDR(0)), pte);
    /*
     * No flush required here. Individual flushes are done in
     * set_fixmap as entries are used.
     */

    /* Break up the Xen mapping into 4k pages and protect them separately. */
    for ( i = 0; i < LPAE_ENTRIES; i++ )
    {
        unsigned long mfn = paddr_to_pfn(xen_paddr) + i;
        unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);
        if ( !is_kernel(va) )
            break;
        pte = mfn_to_xen_entry(mfn);
        pte.pt.table = 1; /* 4k mappings always have this bit set */
        if ( is_kernel_text(va) || is_kernel_inittext(va) )
        {
            pte.pt.xn = 0;
            pte.pt.ro = 1;
        }
        if ( is_kernel_rodata(va) )
            pte.pt.ro = 1;
        write_pte(xen_xenmap + i, pte);
        /* No flush required here as page table is not hooked in yet. */
    }
    pte = mfn_to_xen_entry((((unsigned long) xen_xenmap) + phys_offset)
                           >> PAGE_SHIFT);
    pte.pt.table = 1;
    write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
    /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */

    /* From now on, no mapping may be both writable and executable. */
    WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
    /* Flush everything after setting WXN bit. */
    flush_xen_text_tlb();
}
Example #9
0
void
dump_gnu_request(struct gnu_request *req, int in_gdb)
{
	int others;
	char buf[BUFSIZE];

	if (pc->flags & KERNEL_DEBUG_QUERY)
		return;

	console("%scommand: %d (%s)\n", in_gdb ? "GDB IN: " : "GDB OUT: ", 
		req->command, gdb_command_string(req->command, buf, TRUE));
        console("buf: %lx ", req->buf);
        if (req->buf && ascii_string(req->buf))
                console(" \"%s\"", req->buf);
        console("\n");
        console("fp: %lx ", req->fp);

	if (req->fp == pc->nullfp)
		console("(pc->nullfp) ");
	if (req->fp == pc->stdpipe)
		console("(pc->stdpipe) ");
	if (req->fp == pc->pipe)
		console("(pc->pipe) ");
	if (req->fp == pc->ofile)
		console("(pc->ofile) ");
	if (req->fp == pc->ifile)
		console("(pc->ifile) ");
	if (req->fp == pc->ifile_pipe)
		console("(pc->ifile_pipe) ");
	if (req->fp == pc->ifile_ofile)
		console("(pc->ifile_ofile) ");
	if (req->fp == pc->tmpfile)
		console("(pc->tmpfile) ");
	if (req->fp == pc->saved_fp)
		console("(pc->saved_fp) ");
	if (req->fp == pc->tmp_fp)
		console("(pc->tmp_fp) ");

	console("flags: %lx  (", req->flags);
	others = 0;
	if (req->flags & GNU_PRINT_LINE_NUMBERS)
		console("%sGNU_PRINT_LINE_NUMBERS", others++ ? "|" : "");
	if (req->flags & GNU_FUNCTION_ONLY)
                console("%sGNU_FUNCTION_ONLY", others++ ? "|" : "");
        if (req->flags & GNU_PRINT_ENUMERATORS)
                console("%sGNU_PRINT_ENUMERATORS", others++ ? "|" : "");
        if (req->flags & GNU_RETURN_ON_ERROR)
                console("%sGNU_RETURN_ON_ERROR", others++ ? "|" : "");
        if (req->flags & GNU_FROM_TTY_OFF)
                console("%sGNU_FROM_TTY_OFF", others++ ? "|" : "");
        if (req->flags & GNU_NO_READMEM)
                console("%sGNU_NO_READMEM", others++ ? "|" : "");
        if (req->flags & GNU_VAR_LENGTH_TYPECODE)
                console("%sGNU_VAR_LENGTH_TYPECODE", others++ ? "|" : "");
	console(")\n");

        console("addr: %lx ", req->addr);
        console("addr2: %lx ", req->addr2);
        console("count: %ld\n", req->count);

	if ((ulong)req->name > (ulong)PATCH_KERNEL_SYMBOLS_STOP) 
		console("name: \"%s\" ", req->name);
	else
		console("name: %lx ", (ulong)req->name);
	console("length: %ld ", req->length);
        console("typecode: %d\n", req->typecode);
#if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0)
	console("typename: %s\n", req->typename);
#else
	console("type_name: %s\n", req->type_name);
#endif
	console("target_typename: %s\n", req->target_typename);
	console("target_length: %ld ", req->target_length);
	console("target_typecode: %d ", req->target_typecode);
	console("is_typedef: %d ", req->is_typedef);
	console("member: \"%s\" ", req->member);
	console("member_offset: %ld\n", req->member_offset);
	console("member_length: %ld\n", req->member_length);
        console("member_typecode: %d\n", req->member_typecode);
	console("value: %lx ", req->value);
	console("tagname: \"%s\" ", req->tagname);
	console("pc: %lx  ", req->pc);
	if (is_kernel_text(req->pc))
		console("(%s)", value_to_symstr(req->pc, buf, 0));
	console("\n");
	console("sp: %lx ", req->sp);
	console("ra: %lx ", req->ra);
        console("frame: %ld ", req->frame);
	console("prevsp: %lx\n", req->prevsp);
	console("prevpc: %lx ", req->prevpc);
	console("lastsp: %lx ", req->lastsp);
        console("task: %lx ", req->task);
	console("debug: %lx\n", req->debug);
	console("\n");
}
Example #10
0
/**
 * Lookup the symbol name corresponding to a kernel address
 */
const char *kallsyms_lookup(unsigned long addr,
			    unsigned long *symbolsize,
			    unsigned long *offset,
			    char *namebuf)
{
	unsigned long i, low, high, mid;

	/* This kernel should never had been booted. */
	BUG_ON(!kallsyms_addresses);

	namebuf[KSYM_NAME_LEN] = 0;
	namebuf[0] = 0;

	if ((all_var && is_kernel(addr)) ||
	    (!all_var && (is_kernel_text(addr) || is_kernel_inittext(addr) ||
				is_kernel_extratext(addr)))) {
		unsigned long symbol_end = 0;

		/* do a binary search on the sorted kallsyms_addresses array */
		low = 0;
		high = kallsyms_num_syms;

		while (high-low > 1) {
			mid = (low + high) / 2;
			if (kallsyms_addresses[mid] <= addr) low = mid;
			else high = mid;
		}

		/* search for the first aliased symbol. Aliased symbols are
		   symbols with the same address */
		while (low && kallsyms_addresses[low - 1] ==
				kallsyms_addresses[low])
			--low;

		/* Grab name */
		kallsyms_expand_symbol(get_symbol_offset(low), namebuf);

		/* Search for next non-aliased symbol */
		for (i = low + 1; i < kallsyms_num_syms; i++) {
			if (kallsyms_addresses[i] > kallsyms_addresses[low]) {
				symbol_end = kallsyms_addresses[i];
				break;
			}
		}

		/* if we found no next symbol, we use the end of the section */
		if (!symbol_end) {
			if (is_kernel_inittext(addr))
				symbol_end = (unsigned long)_einittext;
			else
				symbol_end = (all_var) ? (unsigned long)_end
				                       : (unsigned long)_etext;
		}

		if (symbolsize)
			*symbolsize = symbol_end - kallsyms_addresses[low];
		if (offset)
			*offset = addr - kallsyms_addresses[low];
		return namebuf;
	}

	return NULL;
}