Esempio n. 1
0
unsigned int test_builtin_frame_address() // CHECK: test_builtin_frame_address:
{
	return __builtin_frame_address(0);
	// CHECK: add_i sp, sp, -
	// CHECK: store_32 fp, 
	// CHECK: move fp, sp
	// CHECK: move s0, fp
}
Esempio n. 2
0
void
db_trace_self(void)
{
	db_addr_t addr;

	addr = (db_addr_t)__builtin_frame_address(1);
	db_backtrace(curthread, addr, -1);
}
Esempio n. 3
0
void
stack_save(struct stack *st)
{
	register_t frame;

	frame = (register_t)__builtin_frame_address(1);
	stack_capture(st, frame);
}
Esempio n. 4
0
void GetBacktrace( const void **buf, size_t size, const BacktraceContext *ctx )
{
	InitializeBacktrace();
	
	BacktraceContext CurrentCtx;
	if( ctx == NULL )
	{
		ctx = &CurrentCtx;

		CurrentCtx.ip = NULL;
		CurrentCtx.bp = __builtin_frame_address(0);
		CurrentCtx.sp = __builtin_frame_address(0);
		CurrentCtx.pid = GetCurrentThreadId();
	}


	do_backtrace( buf, size, ctx );
}
Esempio n. 5
0
void
stack_save(struct stack *st)
{
	u_int32_t *frame;

	frame = (u_int32_t *)__builtin_frame_address(0);
	stack_zero(st);
	stack_capture(st, frame);
}
Esempio n. 6
0
/*
 * Clear all poison for the region between the current SP and a provided
 * watermark value, as is sometimes required prior to hand-crafted asm function
 * returns in the middle of functions.
 */
void kasan_unpoison_stack_above_sp_to(const void *watermark)
{
	const void *sp = __builtin_frame_address(0);
	size_t size = watermark - sp;

	if (WARN_ON(sp > watermark))
		return;
	kasan_unpoison_shadow(sp, size);
}
Esempio n. 7
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {
#ifdef CONFIG_SMP

/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_START] */
#if 1 
		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else // old
/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_END] */

		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		BUG();

/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_START] */
#endif
/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_END] */

#else
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_backtrace (void) {
	struct frame *frame = (struct frame *)__builtin_frame_address(0);
	unsigned int depth_index = 0;

	for (struct frame *fp = frame; (!(fp < frame)) && depth_index < BUFFER_DEPTH;
		fp = (struct frame *)((long) fp->fr_savfp)) {
		allocation_points[allocation_index][depth_index] = fp->fr_savpc;
		allocation_index = (allocation_index % (BUFFER_SIZE - 1)) + 1;
	}
}
Esempio n. 9
0
word nested_sp(void)
{
# if defined(__GNUC__) && (__GNUC__ >= 4)
    return (word)__builtin_frame_address(0);
# else
    volatile word sp;
    sp = (word)(&sp);
    return sp;
# endif
}
Esempio n. 10
0
  static void printStack(vector<const void *> *vek) {
    if (!__builtin_frame_address(N))
      return;
 
    if (const void * const p = __builtin_return_address(N)) {
      vek->push_back(p);
      // Because this is recursive(ish), we may have to go down the stack by 2.
      StackTracer<S, N + S>::printStack(vek);
    }
  }
/*
 * This is only a toy implementation to generate a seemingly random 128-bit key
 * from sp and x30 values. A production system must re-implement this function
 * to generate keys from a reliable randomness source.
 */
uint64_t *plat_init_apiakey(void)
{
	uintptr_t return_addr = (uintptr_t)__builtin_return_address(0U);
	uintptr_t frame_addr = (uintptr_t)__builtin_frame_address(0U);

	plat_apiakey[0] = (return_addr << 13) ^ frame_addr;
	plat_apiakey[1] = (frame_addr << 15) ^ return_addr;

	return plat_apiakey;
}
int middle(const char *outer_local) {
  const char *frame = __builtin_frame_address (0);
  int retval;

  fprintf(stderr, "middle: outer_local = %p, frame = %p\n", outer_local, frame);
  retval = inner(outer_local, frame);
  /* fprintf also disables tail call optimization. */
  fprintf(stderr, "middle: inner returned %d\n", retval);
  return retval != 0;
}
Esempio n. 13
0
void InitializeBacktrace()
{
	static bool bInitialized = false;
	if( bInitialized )
		return;
	bInitialized = true;

	/* We might have a different stack in the signal handler.  Record a pointer
	 * that lies in the real stack, so we can look it up later. */
	SavedStackPointer = __builtin_frame_address(0);
}
Esempio n. 14
0
void _start() {
  structors_array_t array;
  void *elfdata;

  array.preinit_array = &__PREINIT_ARRAY__;
  array.init_array =    &__INIT_ARRAY__;
  array.fini_array =    &__FINI_ARRAY__;

  elfdata = __builtin_frame_address(0) + sizeof(void *);
  __libc_init(elfdata, (void *) 0, &main, &array);
}
Esempio n. 15
0
File: log.c Progetto: zhirsch/tacos
void panic(const char* w, const char* format, ...) {
  va_list ap;
  outputf("%-20s *** PANIC: ", w);
  va_start(ap, format);
  outputv(format, ap);
  va_end(ap);
  print_call_stack(0, (uint32_t)__builtin_frame_address(0));
  screen_panic();
  __asm__ __volatile__ ("cli; hlt");
  while (1) { }
}
Esempio n. 16
0
void bt ()
{
  void **fp = (void **) __builtin_frame_address (0);
  void *saved_pc = __builtin_return_address (0);
  void *saved_fp = __builtin_frame_address (1);
  int depth = 0;


  printf ("[%d] pc == %p fp == %p\n", depth++, saved_pc, saved_fp);
  fp = (void**)saved_fp;
  while (fp != NULL)
    {
      saved_fp = *fp;
      fp = (void**)saved_fp;
      if (*fp == NULL)
        break;
      saved_pc = *(fp + 2);
      printf ("[%d] pc == %p fp == %p\n", depth++, saved_pc, saved_fp);
    }
}
Esempio n. 17
0
MOZ_NEVER_INLINE
static bool
GetStackAfterCurrentFrame(uint8_t** aOutTop, uint8_t** aOutBottom)
{
  mach_vm_address_t stackFrame =
    reinterpret_cast<mach_vm_address_t>(__builtin_frame_address(0));
  *aOutTop = reinterpret_cast<uint8_t*>(stackFrame);
  // Kernel code shows that stack is always a single region.
  *aOutBottom = reinterpret_cast<uint8_t*>(RegionContainingAddress(stackFrame));
  return *aOutBottom && (*aOutBottom < *aOutTop);
}
Esempio n. 18
0
/* Return the CPU struct which is at the high memory address of the stack.
 */
struct cpu_info *cpu_info(void)
{
#error "This is BROKEN! ARM stacks are currently not guaranteed to be " \
	"STACK_SIZE-aligned in any way. If you ever plan to revive this " \
	"feature, make sure you add the proper assertions " \
	"(and maybe consider revising the whole thing to work closer to what " \
	"arm64 is doing now)."
	uintptr_t addr = ALIGN((uintptr_t)__builtin_frame_address(0),
		CONFIG_STACK_SIZE);
	addr -= sizeof(struct cpu_info);
	return (void *)addr;
}
Esempio n. 19
0
void
db_trace_self(void)
{
	db_addr_t addr;

	addr = (db_addr_t)__builtin_frame_address(0);
	if (addr == 0) {
		db_printf("Null frame address\n");
		return;
	}
	db_backtrace(curthread, *(db_addr_t *)addr, -1);
}
static struct pt_regs * unwind_get_regs(struct task_struct *tsk)
{
	struct stackframe frame;
	register unsigned long current_sp asm ("sp");
       int found = 0;
	//unsigned long  sc;
	
	if (!tsk)
		tsk = current;
		
       printk("tsk = %p,comm=%s,pid=%d,pgd=0x%p\n", tsk , tsk->comm , tsk->pid , tsk->mm->pgd );
	if (tsk == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)unwind_get_regs;
	} else {
		/* task blocked in __switch_to */
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		/*
		 * The function calling __switch_to cannot be a leaf function
		 * so LR is recovered from the stack.
		 */
		frame.lr = 0;
		frame.pc = thread_saved_pc(tsk);
	}

	while (1) {
		int urc;
		//unsigned long where = frame.pc;

		urc = unwind_frame(&frame);
		if (urc < 0)
			break;
		//dump_backtrace_entry(where, frame.pc, frame.sp - 4);
		if( frame.pc == (unsigned long)ret_fast_syscall ){
		        found = 1;
		        break;
		}
	}
	if( !found )
	        return NULL;
	#if 0
	//printk("FRAME:sp=0x%lx,pc=0x%lx,lr=0x%lx,fp=0x%lx\n" , frame.sp,frame.pc,frame.lr,frame.fp );
	//rk28_printk_mem((unsigned int*)(frame.sp-sizeof(struct pt_regs)),2*sizeof(struct pt_regs)/4+8, NULL );
	sc =*( (unsigned long*)(frame.sp-4));
	if( sc >= (unsigned long)&_text && sc < (unsigned long)&_end ){
	    print_symbol("sys call=%s\n",sc );
	}
	#endif
	return (struct pt_regs *)(frame.sp+8); // 8 for reg r4,r5 as fifth and sixth args.
}
Esempio n. 21
0
void _start() {
  structors_array_t array;
  array.preinit_array = &__PREINIT_ARRAY__;
  array.init_array = &__INIT_ARRAY__;
  array.fini_array = &__FINI_ARRAY__;

  void* raw_args = (void*) ((uintptr_t) __builtin_frame_address(0) + sizeof(void*));
#ifdef __x86_64__
  // 16-byte stack alignment is required by x86_64 ABI
  asm("andq  $~15, %rsp");
#endif
  __libc_init(raw_args, NULL, &main, &array);
}
Esempio n. 22
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {

/* CORE-HC-ANR_Kernel_Stack-00*[ */
/*#ifdef CONFIG_SMP*/
#if defined(CONFIG_SMP) && !defined(CONFIG_FIH_DUMP_KERNEL_STACK)
		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else
		
		pr_info("save_stack_trace_tsk: %s[%d] %s[%d]\r\n",
			current->comm, 
			smp_processor_id(), 
			tsk->comm, 
			task_thread_info(tsk)->cpu);
		
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
/* CORE-HC-ANR_Kernel_Stack-00*] */
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Esempio n. 23
0
void StackWalk(native_t * callstack, Context * context)
{
	void **frame = (void**) __builtin_frame_address(0);
	void **bp = (void**) (*frame);
	void *ip = frame[1];
	
	int i;
	for (i = 0; bp && ip && i < MaxCallStack; ++i)
	{
		callstack[i] = (native_t)ip;
		ip = bp[1];
		bp = (void**)(bp[0]);
	}
}
Esempio n. 24
0
GC_INNER ptr_t GC_approx_sp(void)
{
    volatile word sp;
#   if defined(CPPCHECK) || (__GNUC__ >= 4) /* GC_GNUC_PREREQ(4, 0) */
        /* TODO: Use GC_GNUC_PREREQ after fixing a bug in cppcheck. */
        sp = (word)__builtin_frame_address(0);
#   else
        sp = (word)&sp;
#   endif
                /* Also force stack to grow if necessary. Otherwise the */
                /* later accesses might cause the kernel to think we're */
                /* doing something wrong.                               */
    return((ptr_t)sp);
}
Esempio n. 25
0
dump_callstack(char* buf)
{
    int pos = 0;
    struct layout_t* lo = __builtin_frame_address(0);
    while (lo) {
	void* caller = lo->ret;
	buf[pos++] = ' ';
	pos += hexdump(&buf[pos], (char*) &caller, sizeof(caller));
	
	lo = (struct layout_t*) lo->next;
    }

    return pos;
}
Esempio n. 26
0
void InitializeBacktrace()
{
	static bool bInitialized = false;
	
	if( bInitialized )
		return;
	vm_prot_t protection;
	if( !GetRegionInfo(mach_task_self(), __builtin_frame_address(0), g_StackPointer, protection) ||
	    protection != PROT_RW )
	{
		g_StackPointer = 0;
	}
	bInitialized = true;
}
Esempio n. 27
0
/*
 *  This method returns the base address and size of the area which
 *  is to be allocated between the RTEMS Workspace and the C Program
 *  Heap.
 */
void bsp_get_work_area(
  void      **work_area_start,
  uintptr_t  *work_area_size,
  void      **heap_start,
  uintptr_t  *heap_size
)
{
  uintptr_t work_size;
  uintptr_t spared;
  uintptr_t work_area;

  work_area = (uintptr_t)&__rtems_end +
              rtems_configuration_get_interrupt_stack_size();
  work_size = (uintptr_t)BSP_mem_size - work_area;

  spared = _bsp_sbrk_init( work_area, &work_size );

  *work_area_start = (void *)work_area,
  *work_area_size  = work_size;
  *heap_start      = BSP_BOOTCARD_HEAP_USES_WORK_AREA;
  *heap_size       = BSP_BOOTCARD_HEAP_SIZE_DEFAULT;

  /*
   *  The following may be helpful in debugging what goes wrong when
   *  you are allocating the Work Area in a new BSP.
   */
  #ifdef BSP_GET_WORK_AREA_DEBUG
    {
      void *sp = __builtin_frame_address(0);
      void *end = *work_area_start + *work_area_size;
      printk(
        "work_area_start = 0x%p\n"
        "work_area_size = %d 0x%08x\n"
        "end = 0x%p\n"
        "heap_start = 0x%p\n"
        "heap_size = %d\n"
        "current stack pointer = 0x%p%s\n",
        *work_area_start,
        *work_area_size,  /* decimal */
        *work_area_size,  /* hexadecimal */
        end,
        *heap_start,
        *heap_size,
        sp,
        ((sp >= *work_area_start && sp <= end) ? " OVERLAPS!" : "")
     );
  }
  #endif
}
Esempio n. 28
0
size_t
backtrace(void **trace, size_t len)
{
	const struct frameinfo *frame = __builtin_frame_address(0);
	void *stack = &stack;

	for (size_t i = 0; i < len; i++) {
		if ((const void *)frame BELOW stack)
			return i;
		trace[i] = frame->return_address;
		frame = frame->next;
	}

	return len;
}
Esempio n. 29
0
std::vector<frame>
backtrace() {
    std::vector<frame> frames;

    // Ideally we would use the current value of EIP here, but there's no
    // portable way to get that and there are never any GC roots in our C++
    // frames anyhow.
    frame f(__builtin_frame_address(0), (void (*)())NULL);

    while (f.ra != END_OF_STACK_RA) {
        frames.push_back(f);
        f.next();
    }
    return frames;
}
Esempio n. 30
0
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
	struct stackframe frame;
	int skip;

	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);

	if (!tsk)
		tsk = current;

	if (!try_get_task_stack(tsk))
		return;

	if (tsk == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.pc = (unsigned long)dump_backtrace;
	} else {
		/*
		 * task blocked in __switch_to
		 */
		frame.fp = thread_saved_fp(tsk);
		frame.pc = thread_saved_pc(tsk);
	}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	frame.graph = 0;
#endif

	skip = !!regs;
	printk("Call trace:\n");
	do {
		/* skip until specified stack frame */
		if (!skip) {
			dump_backtrace_entry(frame.pc);
		} else if (frame.fp == regs->regs[29]) {
			skip = 0;
			/*
			 * Mostly, this is the case where this function is
			 * called in panic/abort. As exception handler's
			 * stack frame does not contain the corresponding pc
			 * at which an exception has taken place, use regs->pc
			 * instead.
			 */
			dump_backtrace_entry(regs->pc);
		}
	} while (!unwind_frame(tsk, &frame));

	put_task_stack(tsk);
}