Ejemplo n.º 1
0
void msm7k_fiq_handler(void)
{
	struct pt_regs ctx_regs;
	static cpumask_t fiq_cpu_mask;
	int this_cpu;
	unsigned long msm_fiq_flags;

	spin_lock_irqsave(&msm_fiq_lock, msm_fiq_flags);
	this_cpu = smp_processor_id();

	pr_info("%s: Fiq is received on CPU%d\n", __func__, this_cpu);
	fiq_counter += 1;

	ctx_regs.ARM_pc = msm_dump_cpu_ctx[this_cpu].fiq_r14;
	ctx_regs.ARM_lr = msm_dump_cpu_ctx[this_cpu].svc_r14;
	ctx_regs.ARM_sp = msm_dump_cpu_ctx[this_cpu].svc_r13;
	ctx_regs.ARM_fp = msm_dump_cpu_ctx[this_cpu].usr_r11;
	unwind_backtrace(&ctx_regs, current);

	if (fiq_counter == 1 && (cpu_is_msm8625() || cpu_is_msm8625q())) {
		cpumask_copy(&fiq_cpu_mask, cpu_online_mask);
		cpu_clear(this_cpu, fiq_cpu_mask);
		gic_raise_secure_softirq(&fiq_cpu_mask, GIC_SECURE_SOFT_IRQ);
	}

	flush_cache_all();
	outer_flush_all();
	spin_unlock_irqrestore(&msm_fiq_lock, msm_fiq_flags);
	return;
}
Ejemplo n.º 2
0
void
cf (int i)
{
  struct trace_arg arg = { .size = 100, .cnt = -1 };
  void *handle;
  _Unwind_Reason_Code (*unwind_backtrace) (_Unwind_Trace_Fn, void *);

  if (i != othervar || thr != 94)
    {
      printf ("i %d thr %d\n", i, thr);
      exit (1);
    }

  /* Test if callback function of _Unwind_Backtrace is not called infinitely
     times. See Bug 18508 or gcc bug "Bug 66303 - runtime.Caller() returns
     infinitely deep stack frames on s390x.".
     The go runtime calls backtrace_full() in
     <gcc-src>/libbacktrace/backtrace.c, which uses _Unwind_Backtrace().  */
  handle = dlopen (LIBGCC_S_SO, RTLD_LAZY);
  if (handle != NULL)
    {
      unwind_backtrace = dlsym (handle, "_Unwind_Backtrace");
      if (unwind_backtrace != NULL)
	{
	  unwind_backtrace (backtrace_helper, &arg);
	  assert (arg.cnt != -1 && arg.cnt < 100);
	}
      dlclose (handle);
    }

  /* Since uc_link below has been set to NULL, setcontext is supposed to
     terminate the process normally after this function returns.  */
}

int
do_test (void)
{
  if (getcontext (&ucp) != 0)
    {
      if (errno == ENOSYS)
	{
	  puts ("context handling not supported");
	  return 0;
	}

      puts ("getcontext failed");
      return 1;
    }
  thr = 94;
  ucp.uc_link = NULL;
  ucp.uc_stack.ss_sp = st1;
  ucp.uc_stack.ss_size = sizeof st1;
  makecontext (&ucp, (void (*) (void)) cf, 1, somevar - 2);
  if (setcontext (&ucp) != 0)
    {
      puts ("setcontext failed");
      return 1;
    }
  return 2;
}
void CallStack::update(int32_t ignoreDepth, int32_t maxDepth) {
    if (maxDepth > MAX_DEPTH) {
        maxDepth = MAX_DEPTH;
    }
    ssize_t count = unwind_backtrace(mStack, ignoreDepth + 1, maxDepth);
    mCount = count > 0 ? count : 0;
}
/* Called from the FIQ asm handler */
void msm7k_fiq_handler(void)
{
	struct irq_data *d;
	struct irq_chip *c;
	struct pt_regs context_regs;

	pr_info("Fiq is received %s\n", __func__);
	fiq_counter++;
	d = irq_get_irq_data(MSM8625_INT_A9_M2A_2);
	c = irq_data_get_irq_chip(d);
	c->irq_mask(d);
	local_irq_disable();

	/* Clear the IRQ from the ENABLE_SET */
	gic_clear_irq_pending(MSM8625_INT_A9_M2A_2);
	local_irq_enable();
	flush_cache_all();
	outer_flush_all();
 pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x", __func__, msm_dump_cpu_ctx.usr_r0);
	pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x usr_r1:0x%x usr_r2:0x%x usr_r3:0x%x usr_r4:0x%x usr_r5:0x%x usr_r6:0x%x usr_r7:0x%x usr_r8:0x%x usr_r9:0x%x usr_r10:0x%x usr_r11:0x%x usr_r12:0x%x usr_r13:0x%x usr_r14:0x%x irq_spsr:0x%x irq_r13:0x%x irq_r14:0x%x svc_spsr:0x%x svc_r13:0x%x svc_r14:0x%x abt_spsr:0x%x abt_r13:0x%x abt_r14:0x%x und_spsr:0x%x und_r13:0x%x und_r14:0x%x fiq_spsr:0x%x fiq_r8:0x%x fiq_r9:0x%x fiq_r10:0x%x fiq_r11:0x%x fiq_r12:0x%x fiq_r13:0x%x fiq_r14:0x%x\n",__func__, msm_dump_cpu_ctx.usr_r0,msm_dump_cpu_ctx.usr_r1,msm_dump_cpu_ctx.usr_r2,msm_dump_cpu_ctx.usr_r3, msm_dump_cpu_ctx.usr_r4, msm_dump_cpu_ctx.usr_r5, msm_dump_cpu_ctx.usr_r6, msm_dump_cpu_ctx.usr_r7, msm_dump_cpu_ctx.usr_r8, msm_dump_cpu_ctx.usr_r9, msm_dump_cpu_ctx.usr_r10, msm_dump_cpu_ctx.usr_r11, msm_dump_cpu_ctx.usr_r12, msm_dump_cpu_ctx.usr_r13, msm_dump_cpu_ctx.usr_r14, msm_dump_cpu_ctx.irq_spsr, msm_dump_cpu_ctx.irq_r13, msm_dump_cpu_ctx.irq_r14, msm_dump_cpu_ctx.svc_spsr, msm_dump_cpu_ctx.svc_r13, msm_dump_cpu_ctx.svc_r14, msm_dump_cpu_ctx.abt_spsr,msm_dump_cpu_ctx.abt_r13, msm_dump_cpu_ctx.abt_r14, msm_dump_cpu_ctx.und_spsr,msm_dump_cpu_ctx.und_r13, msm_dump_cpu_ctx.und_r14, msm_dump_cpu_ctx.fiq_spsr,msm_dump_cpu_ctx.fiq_r8, msm_dump_cpu_ctx.fiq_r9, msm_dump_cpu_ctx.fiq_r10, msm_dump_cpu_ctx.fiq_r11, msm_dump_cpu_ctx.fiq_r12, msm_dump_cpu_ctx.fiq_r13, msm_dump_cpu_ctx.fiq_r14);
	context_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13;
	context_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14;
	context_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11; //for the svc r11 is the same with usr r11
	context_regs.ARM_pc = msm_dump_cpu_ctx.svc_r14;
	//dump_stack();
	unwind_backtrace(&context_regs, current);
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	return;
}
/*
 * Perform stack unwinding by using the _Unwind_Backtrace.
 *
 * User application that wants to use backtrace needs to be
 * compiled with -fexceptions option and -rdynamic to get full
 * symbols printed.
 */
int backtrace (void **array, int size)
{
	struct trace_arg arg = { .array = array, .size = size, .cnt = -1 };

	if (unwind_backtrace == NULL)
		backtrace_init();

	if (size >= 1)
		unwind_backtrace (backtrace_helper, &arg);

	return arg.cnt != -1 ? arg.cnt : 0;
}
Ejemplo n.º 6
0
/* Called from the FIQ asm handler */
void msm7k_fiq_handler(void)
{
	struct irq_data *d;
	struct irq_chip *c;
	struct pt_regs ctx_regs;

	pr_info("Fiq is received %s\n", __func__);
	fiq_counter++;
	d = irq_get_irq_data(MSM8625_INT_A9_M2A_2);
	c = irq_data_get_irq_chip(d);
	c->irq_mask(d);
	local_irq_disable();

	/* Clear the IRQ from the ENABLE_SET */
	gic_clear_irq_pending(MSM8625_INT_A9_M2A_2);
	local_irq_enable();
	ctx_regs.ARM_pc = msm_dump_cpu_ctx.fiq_r14;
	ctx_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14;
	ctx_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13;
	ctx_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11;

#ifdef CONFIG_SEC_DEBUG
	do {
		extern void sec_save_final_context(void);
		sec_save_final_context();
	} while (0);
#endif

	unwind_backtrace(&ctx_regs, current);
#ifdef CONFIG_SMP
	smp_send_all_cpu_backtrace();
#endif

	flush_cache_all();
	outer_flush_all();
	return;
}
Ejemplo n.º 7
0
ssize_t unwind_backtrace_thread(pid_t tid, backtrace_frame_t* backtrace,
        size_t ignore_depth, size_t max_depth) {
    if (tid == gettid()) {
        return unwind_backtrace(backtrace, ignore_depth + 1, max_depth);
    }

    ALOGV("Unwinding thread %d from thread %d.", tid, gettid());

    // TODO: there's no tgkill(2) on Mac OS, so we'd either need the
    // mach_port_t or the pthread_t rather than the tid.
#if defined(CORKSCREW_HAVE_ARCH) && !defined(__APPLE__)
    struct sigaction act;
    struct sigaction oact;
    memset(&act, 0, sizeof(act));
    act.sa_sigaction = unwind_backtrace_thread_signal_handler;
    act.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
    sigemptyset(&act.sa_mask);

    pthread_mutex_lock(&g_unwind_signal_mutex);
    map_info_t* milist = acquire_my_map_info_list();

    ssize_t frames = -1;
    if (!sigaction(SIGURG, &act, &oact)) {
        g_unwind_signal_state.map_info_list = milist;
        g_unwind_signal_state.backtrace = backtrace;
        g_unwind_signal_state.ignore_depth = ignore_depth;
        g_unwind_signal_state.max_depth = max_depth;
        g_unwind_signal_state.returned_frames = 0;
        android_atomic_release_store(tid, &g_unwind_signal_state.tid_state);

        // Signal the specific thread that we want to dump.
        int32_t tid_state = tid;
        if (tgkill(getpid(), tid, SIGURG)) {
            ALOGV("Failed to send SIGURG to thread %d.", tid);
        } else {
            // Wait for the other thread to start dumping the stack, or time out.
            int wait_millis = 250;
            for (;;) {
                tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state);
                if (tid_state != tid) {
                    break;
                }
                if (wait_millis--) {
                    ALOGV("Waiting for thread %d to start dumping the stack...", tid);
                    usleep(1000);
                } else {
                    ALOGV("Timed out waiting for thread %d to start dumping the stack.", tid);
                    break;
                }
            }
        }

        // Try to cancel the dump if it has not started yet.
        if (tid_state == tid) {
            if (!android_atomic_acquire_cas(tid, STATE_CANCEL, &g_unwind_signal_state.tid_state)) {
                ALOGV("Canceled thread %d stack dump.", tid);
                tid_state = STATE_CANCEL;
            } else {
                tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state);
            }
        }

        // Wait indefinitely for the dump to finish or be canceled.
        // We cannot apply a timeout here because the other thread is accessing state that
        // is owned by this thread, such as milist.  It should not take very
        // long to take the dump once started.
        while (tid_state == STATE_DUMPING) {
            ALOGV("Waiting for thread %d to finish dumping the stack...", tid);
            usleep(1000);
            tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state);
        }

        if (tid_state == STATE_DONE) {
            frames = g_unwind_signal_state.returned_frames;
        }

        sigaction(SIGURG, &oact, NULL);
    }

    release_my_map_info_list(milist);
    pthread_mutex_unlock(&g_unwind_signal_mutex);
    return frames;
#else
    return -1;
#endif
}