Exemplo n.º 1
0
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
static void rcu_idle_exit_common(long long oldval)
{
	if (oldval) {
		RCU_TRACE(trace_rcu_dyntick(TPS("++="),
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
			  oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
}
Exemplo n.º 2
0
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
static void rcu_idle_exit_common(long long oldval)
{
	if (oldval) {
		RCU_TRACE(trace_rcu_dyntick("++=",
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
	if (!is_idle_task(current)) {
		struct task_struct *idle = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
			  oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
}
Exemplo n.º 3
0
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
static void rcu_idle_enter_common(long long oldval)
{
	if (rcu_dynticks_nesting) {
		RCU_TRACE(trace_rcu_dyntick("--=",
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
	if (!is_idle_task(current)) {
		struct task_struct *idle = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
					    oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
	rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
}
Exemplo n.º 4
0
static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
{
	smp_mb__before_atomic_inc();  
	atomic_inc(&rdtp->dynticks);
	
	smp_mb__after_atomic_inc();  
	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
	rcu_cleanup_after_idle(smp_processor_id());
	trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
	if (!is_idle_task(current)) {
		struct task_struct *idle = idle_task(smp_processor_id());

		trace_rcu_dyntick("Error on exit: not idle task",
				  oldval, rdtp->dynticks_nesting);
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); 
	}
}
Exemplo n.º 5
0
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
static void rcu_idle_enter_common(long long newval)
{
	if (newval) {
		RCU_TRACE(trace_rcu_dyntick(TPS("--="),
					    rcu_dynticks_nesting, newval));
		rcu_dynticks_nesting = newval;
		return;
	}
	RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
				    rcu_dynticks_nesting, newval));
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
					    rcu_dynticks_nesting, newval));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
	rcu_sched_qs(); /* implies rcu_bh_inc() */
	barrier();
	rcu_dynticks_nesting = newval;
}
Exemplo n.º 6
0
static void sysrq_ftrace_dump(int key)
{
	ftrace_dump(DUMP_ALL);
}
static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
	ftrace_dump(DUMP_ALL);
}
Exemplo n.º 8
0
static void apanic_mmc_logbuf_dump(void)
{
	struct apanic_data *ctx = &drv_ctx;
	struct panic_header *hdr = (struct panic_header *) ctx->bounce;
	int console_offset = 0;
	int console_len = 0;
	int threads_offset = 0;
	int threads_len = 0;
	int app_threads_offset = 0;
	int app_threads_len = 0;
	int rc = 0;
	struct timespec now;
	struct timespec uptime;
	struct rtc_time rtc_timestamp;
	struct console *con;

	if (!ctx->hd || !ctx->mmc_panic_ops ||
	    !ctx->mmc_panic_ops->panic_probe)
		return;
	if (ctx->mmc_panic_ops->panic_probe(ctx->hd,
					    ctx->mmc_panic_ops->type)) {
		pr_err("apanic: choose to use mmc, "
		       "but eMMC card not detected\n");
		return;
	}
	console_offset = 1024;

	if (ctx->curr.magic) {
		pr_emerg("Crash partition in use!\n");
		return;
	}

	/*
	 * Add timestamp to displays current UTC time and uptime (in seconds).
	 */
	now = current_kernel_time();
	rtc_time_to_tm((unsigned long) now.tv_sec, &rtc_timestamp);
	do_posix_clock_monotonic_gettime(&uptime);
	bust_spinlocks(1);
	pr_emerg("Timestamp = %lu.%03lu\n",
	       (unsigned long) now.tv_sec,
	       (unsigned long) (now.tv_nsec / 1000000));
	pr_emerg("Current Time = "
	       "%02d-%02d %02d:%02d:%lu.%03lu, "
	       "Uptime = %lu.%03lu seconds\n",
	       rtc_timestamp.tm_mon + 1, rtc_timestamp.tm_mday,
	       rtc_timestamp.tm_hour, rtc_timestamp.tm_min,
	       (unsigned long) rtc_timestamp.tm_sec,
	       (unsigned long) (now.tv_nsec / 1000000),
	       (unsigned long) uptime.tv_sec,
	       (unsigned long) (uptime.tv_nsec / USEC_PER_SEC));
	bust_spinlocks(0);

	if (ctx->annotation)
		printk(KERN_EMERG "%s\n", ctx->annotation);

	touch_hw_watchdog();
	/*
	 * Write out the console
	 */
	console_len = apanic_write_console_mmc(console_offset);
	if (console_len < 0) {
		pr_emerg("Error writing console to panic log! (%d)\n",
				console_len);
		console_len = 0;
	}

	/*
	 * Write out all threads
	 */
	app_threads_offset = (ALIGN(console_offset + console_len,
				1024) == 0) ? 1024 :
	    ALIGN(console_offset + console_len, 1024);

	log_buf_clear();

	for (con = console_drivers; con; con = con->next)
		con->flags &= ~CON_ENABLED;

	ctx->buf_offset = app_threads_offset;
	ctx->written = app_threads_offset;
	start_apanic_threads = 1;
	if (tracing_get_trace_buf_size() < (SZ_512K + 1))
		ftrace_dump(1);
	show_state_thread_filter(0, SHOW_APP_THREADS);
	ctx->buf_offset = ALIGN(ctx->written, 512);
	start_apanic_threads = 0;
	ctx->written += apanic_write_console_mmc(ctx->buf_offset);
	app_threads_len = ctx->written - app_threads_offset;

	touch_hw_watchdog();

	log_buf_clear();
	threads_offset = ALIGN(ctx->written, 512);
	ctx->buf_offset = threads_offset;
	ctx->written = threads_offset;
	start_apanic_threads = 1;
	show_state_thread_filter(0, SHOW_KTHREADS);
	start_apanic_threads = 0;
	ctx->buf_offset = ALIGN(ctx->written, 512);
	ctx->written += apanic_write_console_mmc(ctx->buf_offset);
	threads_len = ctx->written - threads_offset + 512;

	touch_hw_watchdog();

	for (con = console_drivers; con; con = con->next)
		con->flags |= CON_ENABLED;

	/*
	 * Finally write the panic header
	 */
	memset(ctx->bounce, 0, PAGE_SIZE);
	hdr->magic = PANIC_MAGIC;
	hdr->version = PHDR_VERSION;

	hdr->console_offset = console_offset;
	hdr->console_length = console_len;

	hdr->app_threads_offset = app_threads_offset;
	hdr->app_threads_length = app_threads_len;

	hdr->threads_offset = threads_offset;
	hdr->threads_length = threads_len;

	rc = ctx->mmc_panic_ops->panic_write(ctx->hd, ctx->bounce, 0,
					     console_offset);
	if (rc <= 0) {
		pr_emerg("apanic: Header write failed (%d)\n", rc);
		return;
	}

	pr_emerg("apanic: Panic dump successfully written\n");
}