Exemple #1
0
/*
 * Called when the architecture enters its oops handler, before it prints
 * anything.  If this is the first CPU to oops, and it's oopsing the first
 * time then let it proceed.
 *
 * This is all enabled by the pause_on_oops kernel boot option.  We do all
 * this to ensure that oopses don't scroll off the screen.  It has the
 * side-effect of preventing later-oopsing CPUs from mucking up the display,
 * too.
 *
 * It turns out that the CPU which is allowed to print ends up pausing for
 * the right duration, whereas all the other CPUs pause for twice as long:
 * once in oops_enter(), once in oops_exit().
 */
void oops_enter(void)
{
	tracing_off();
	/* can't trust the integrity of the kernel anymore: */
	debug_locks_off();
	do_oops_enter_exit();
}
static irqreturn_t mdm_errfatal(int irq, void *dev_id)
{
	extern unsigned int HTC_HSIC_PHY_FOOTPRINT;	//HTC

	pr_debug("%s: mdm got errfatal interrupt\n", __func__);

	#ifdef HTC_DEBUG_USB_PHY_POWER_ON_STUCK
	pr_info("HTC_HSIC_PHY_FOOTPRINT(%d)\n", HTC_HSIC_PHY_FOOTPRINT);	//HTC
	#endif //HTC_DEBUG_USB_PHY_POWER_ON_STUCK

	if ( get_radio_flag() & 0x0001 ) {
		trace_printk("%s: mdm got errfatal interrupt\n", __func__);
		tracing_off();
	}

	if (mdm_drv->mdm_ready &&
		(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1) && !device_ehci_shutdown) {

		//++SSD_RIL: set mdm_drv->mdm_ready before restart modem
		mdm_drv->mdm_ready = 0;
		//--SSD_RIL

		//HTC_Kris+++
		mdm_in_fatal_handler = true;
		mdm_is_alive = false;
		//HTC_Kris---

		pr_debug("%s: scheduling work now\n", __func__);
		queue_work(mdm_queue, &mdm_fatal_work);
	}
	return IRQ_HANDLED;
}
Exemple #3
0
/*
 * Test the trace buffer to see if all the elements
 * are still sane.
 */
static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
{
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;

	/* Don't allow flipping of max traces now */
	local_irq_save(flags);
	arch_spin_lock(&buf->tr->max_lock);

	cnt = ring_buffer_entries(buf->buffer);

	/*
	 * The trace_test_buffer_cpu runs a while loop to consume all data.
	 * If the calling tracer is broken, and is constantly filling
	 * the buffer, this will run forever, and hard lock the box.
	 * We disable the ring buffer while we do this test to prevent
	 * a hard lock up.
	 */
	tracing_off();
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(buf, cpu);
		if (ret)
			break;
	}
	tracing_on();
	arch_spin_unlock(&buf->tr->max_lock);
	local_irq_restore(flags);

	if (count)
		*count = cnt;

	return ret;
}
Exemple #4
0
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;

	
	local_irq_save(flags);
	__raw_spin_lock(&ftrace_max_lock);

	cnt = ring_buffer_entries(tr->buffer);

	
	tracing_off();
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(tr, cpu);
		if (ret)
			break;
	}
	tracing_on();
	__raw_spin_unlock(&ftrace_max_lock);
	local_irq_restore(flags);

	if (count)
		*count = cnt;

	return ret;
}
static void
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (!tracing_is_on())
		return;

	tracing_off();
}
static void
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (!tracing_is_on())
		return;

	if (update_count(data))
		tracing_off();
}
Exemple #7
0
/*
 * Reset system and call either kdump or normal kexec
 */
static void __machine_kexec(void *data)
{
	__arch_local_irq_stosm(0x04); /* enable DAT */
	pfault_fini();
	tracing_off();
	debug_locks_off();
#ifdef CONFIG_CRASH_DUMP
	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
		__machine_kdump(data);
#endif
	__do_machine_kexec(data);
}
static void __machine_kexec(void *data)
{
	struct kimage *image = data;

	pfault_fini();
	tracing_off();
	debug_locks_off();
	if (image->type == KEXEC_TYPE_CRASH) {
		lgr_info_log();
		s390_reset_system(__do_machine_kdump, data);
	} else {
		s390_reset_system(__do_machine_kexec, data);
	}
	disabled_wait((unsigned long) __builtin_return_address(0));
}
Exemple #9
0
static void
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
{
	long *count = (long *)data;

	if (!tracing_is_on())
		return;

	if (!*count)
		return;

	if (*count != -1)
		(*count)--;

	tracing_off();
}
Exemple #10
0
/*
 * Reset system and call either kdump or normal kexec
 */
static void __machine_kexec(void *data)
{
	__arch_local_irq_stosm(0x04); /* enable DAT */
	pfault_fini();
	tracing_off();
	debug_locks_off();
#ifdef CONFIG_CRASH_DUMP
	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {

		lgr_info_log();
		s390_reset_system(setup_regs, __do_machine_kdump, data);
	} else
#endif
		s390_reset_system(NULL, __do_machine_kexec, data);
	disabled_wait((unsigned long) __builtin_return_address(0));
}
Exemple #11
0
/*
 * Reset system and call either kdump or normal kexec
 */
void machine_kexec(struct kimage *image)
{
	if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
		return;

	smp_send_stop();
	pfault_fini();
	tracing_off();
	debug_locks_off();
	if (image->type == KEXEC_TYPE_CRASH) {
		lgr_info_log();
		s390_reset_system(__do_machine_kdump, image);
	} else {
		s390_reset_system(__do_machine_kexec, image);
	}
	disabled_wait((unsigned long) __builtin_return_address(0));
}
Exemple #12
0
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
{
	unsigned long flags;
	int sstep_tries = 100;
	int error;
	int i, cpu;
	int trace_on = 0;
acquirelock:
	/*
	 * Interrupts will be restored by the 'trap return' code, except when
	 * single stepping.
	 */
	local_irq_save(flags);

	cpu = ks->cpu;
	kgdb_info[cpu].debuggerinfo = regs;
	kgdb_info[cpu].task = current;
	kgdb_info[cpu].ret_state = 0;
	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
	/*
	 * Make sure the above info reaches the primary CPU before
	 * our cpu_in_kgdb[] flag setting does:
	 */
	atomic_inc(&cpu_in_kgdb[cpu]);

	if (exception_level == 1)
		goto cpu_master_loop;

	/*
	 * CPU will loop if it is a slave or request to become a kgdb
	 * master cpu and acquire the kgdb_active lock:
	 */
	while (1) {
cpu_loop:
		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
			goto cpu_master_loop;
		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
			if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
				break;
		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
			if (!atomic_read(&passive_cpu_wait[cpu]))
				goto return_normal;
		} else {
return_normal:
			/* Return to normal operation by executing any
			 * hw breakpoint fixup.
			 */
			if (arch_kgdb_ops.correct_hw_break)
				arch_kgdb_ops.correct_hw_break();
			if (trace_on)
				tracing_on();
			atomic_dec(&cpu_in_kgdb[cpu]);
			touch_softlockup_watchdog_sync();
			clocksource_touch_watchdog();
			local_irq_restore(flags);
			return 0;
		}
		cpu_relax();
	}

	/*
	 * For single stepping, try to only enter on the processor
	 * that was single stepping.  To gaurd against a deadlock, the
	 * kernel will only try for the value of sstep_tries before
	 * giving up and continuing on.
	 */
	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
	    (kgdb_info[cpu].task &&
	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
		atomic_set(&kgdb_active, -1);
		touch_softlockup_watchdog_sync();
		clocksource_touch_watchdog();
		local_irq_restore(flags);

		goto acquirelock;
	}

	if (!kgdb_io_ready(1)) {
		kgdb_info[cpu].ret_state = 1;
		goto kgdb_restore; /* No I/O connection, resume the system */
	}

	/*
	 * Don't enter if we have hit a removed breakpoint.
	 */
	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
		goto kgdb_restore;

	/* Call the I/O driver's pre_exception routine */
	if (dbg_io_ops->pre_exception)
		dbg_io_ops->pre_exception();

	kgdb_disable_hw_debug(ks->linux_regs);

	/*
	 * Get the passive CPU lock which will hold all the non-primary
	 * CPU in a spin state while the debugger is active
	 */
	if (!kgdb_single_step) {
		for (i = 0; i < NR_CPUS; i++)
			atomic_inc(&passive_cpu_wait[i]);
	}

#ifdef CONFIG_SMP
	/* Signal the other CPUs to enter kgdb_wait() */
	if ((!kgdb_single_step) && kgdb_do_roundup)
		kgdb_roundup_cpus(flags);
#endif

	/*
	 * Wait for the other CPUs to be notified and be waiting for us:
	 */
	for_each_online_cpu(i) {
		while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i]))
			cpu_relax();
	}

	/*
	 * At this point the primary processor is completely
	 * in the debugger and all secondary CPUs are quiescent
	 */
	dbg_deactivate_sw_breakpoints();
	kgdb_single_step = 0;
	kgdb_contthread = current;
	exception_level = 0;
	trace_on = tracing_is_on();
	if (trace_on)
		tracing_off();

	while (1) {
cpu_master_loop:
		if (dbg_kdb_mode) {
			kgdb_connected = 1;
			error = kdb_stub(ks);
			kgdb_connected = 0;
		} else {
			error = gdb_serial_stub(ks);
		}

		if (error == DBG_PASS_EVENT) {
			dbg_kdb_mode = !dbg_kdb_mode;
		} else if (error == DBG_SWITCH_CPU_EVENT) {
			dbg_cpu_switch(cpu, dbg_switch_cpu);
			goto cpu_loop;
		} else {
			kgdb_info[cpu].ret_state = error;
			break;
		}
	}

	/* Call the I/O driver's post_exception routine */
	if (dbg_io_ops->post_exception)
		dbg_io_ops->post_exception();

	atomic_dec(&cpu_in_kgdb[ks->cpu]);

	if (!kgdb_single_step) {
		for (i = NR_CPUS-1; i >= 0; i--)
			atomic_dec(&passive_cpu_wait[i]);
		/*
		 * Wait till all the CPUs have quit from the debugger,
		 * but allow a CPU that hit an exception and is
		 * waiting to become the master to remain in the debug
		 * core.
		 */
		for_each_online_cpu(i) {
			while (kgdb_do_roundup &&
			       atomic_read(&cpu_in_kgdb[i]) &&
			       !(kgdb_info[i].exception_state &
				 DCPU_WANT_MASTER))
				cpu_relax();
		}
	}

kgdb_restore:
	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
		if (kgdb_info[sstep_cpu].task)
			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
		else
			kgdb_sstep_pid = 0;
	}
	if (trace_on)
		tracing_on();
	/* Free kgdb_active */
	atomic_set(&kgdb_active, -1);
	touch_softlockup_watchdog_sync();
	clocksource_touch_watchdog();
	local_irq_restore(flags);

	return kgdb_info[cpu].ret_state;
}
Exemple #13
0
static long met_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	/* mtag_cmd_t cblk; */
	mtag_cmd_t *pUserCmd;
	int ret = 0;
	DEBF("mtag_ioctl cmd=%X, arg=%lX\n", cmd, arg);

	if (_IOC_TYPE(cmd) != MTAG_IOC_MAGIC)
		return -ENOTTY;

	/* Hanlde command without copying data from user space */
	if (cmd == MTAG_CMD_ENABLE)
		return met_tag_enable_real((unsigned int)arg);
	else if (cmd == MTAG_CMD_DISABLE)
		return met_tag_disable_real((unsigned int)arg);
	else if (cmd == MTAG_CMD_REC_SET) {
		if (arg)
			tracing_on();
		else
			tracing_off();
		return 0;
	} else if (cmd == MTAG_CMD_DUMP_SIZE) {
		ret = met_set_dump_buffer_real((int)arg);
		return (long)ret;
	}
	/* Handle commands with user space data */
	if (_IOC_DIR(cmd) & _IOC_WRITE)
		ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
	if (ret)
		return -EFAULT;

	pUserCmd = (mtag_cmd_t *) arg;
#if 0
	__get_user(cblk.class_id, (unsigned int __user *)(&(pUserCmd->class_id)));
	__get_user(cblk.value, (unsigned int __user *)(&(pUserCmd->value)));
	__get_user(cblk.slen, (unsigned int __user *)(&(pUserCmd->slen)));
	ret = __copy_from_user(cblk.tname, (char __user *)(&(pUserCmd->tname)), cblk.slen);
	if (unlikely(ret)) {
		ERRF("Failed to __copy_from_user: ret=%d\n", ret);
		return -EFAULT;
	}

	switch (cmd) {
	case MTAG_CMD_START:
		ret = met_tag_start_real(cblk.class_id, (char *)cblk.tname);
		break;
	case MTAG_CMD_END:
		ret = met_tag_end_real(cblk.class_id, (char *)cblk.tname);
		break;
	case MTAG_CMD_ONESHOT:
		ret = met_tag_oneshot_real(cblk.class_id, (char *)cblk.tname, cblk.value);
		break;
	default:
		return -EINVAL;
	}
#else
	switch (cmd) {
	case MTAG_CMD_START:
		ret = met_tag_start_real(pUserCmd->class_id, pUserCmd->tname);
		break;
	case MTAG_CMD_END:
		ret = met_tag_end_real(pUserCmd->class_id, pUserCmd->tname);
		break;
	case MTAG_CMD_ONESHOT:
		ret = met_tag_oneshot_real(pUserCmd->class_id, pUserCmd->tname, pUserCmd->value);
		break;
	case MTAG_CMD_DUMP:
		ret = !access_ok(VERIFY_READ, (void __user *)pUserCmd->data, pUserCmd->size);
		if (ret)
			return -EFAULT;
		ret =
		    met_tag_dump_real(pUserCmd->class_id, pUserCmd->tname, pUserCmd->data,
				      pUserCmd->size);
		break;
	case MTAG_CMD_DUMP_SAVE:
		ret = met_save_dump_buffer_real(pUserCmd->tname);
		break;
	default:
		return -EINVAL;
	}
#endif
	return ret;
}
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
		int exception_state)
{
	unsigned long flags;
	int sstep_tries = 100;
	int error;
	int cpu;
	int trace_on = 0;
	int online_cpus = num_online_cpus();

	#ifdef CONFIG_KGDB_KDB
	if (force_panic)	/* Force panic in previous KDB, so skip this time */
		return NOTIFY_DONE;
	#endif

	kgdb_info[ks->cpu].enter_kgdb++;
	kgdb_info[ks->cpu].exception_state |= exception_state;

	if (exception_state == DCPU_WANT_MASTER)
		atomic_inc(&masters_in_kgdb);
	else
		atomic_inc(&slaves_in_kgdb);

	if (arch_kgdb_ops.disable_hw_break)
		arch_kgdb_ops.disable_hw_break(regs);

acquirelock:
	/*
	 * Interrupts will be restored by the 'trap return' code, except when
	 * single stepping.
	 */
	local_irq_save(flags);

	cpu = ks->cpu;
	kgdb_info[cpu].debuggerinfo = regs;
	kgdb_info[cpu].task = current;
	kgdb_info[cpu].ret_state = 0;
	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;

	/* Make sure the above info reaches the primary CPU */
	smp_mb();

	if (exception_level == 1) {
		if (raw_spin_trylock(&dbg_master_lock))
			atomic_xchg(&kgdb_active, cpu);
		goto cpu_master_loop;
	}

	/*
	 * CPU will loop if it is a slave or request to become a kgdb
	 * master cpu and acquire the kgdb_active lock:
	 */
	while (1) {
cpu_loop:
		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
			goto cpu_master_loop;
		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
			if (raw_spin_trylock(&dbg_master_lock)) {
				atomic_xchg(&kgdb_active, cpu);
				break;
			}
		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
			if (!raw_spin_is_locked(&dbg_slave_lock))
				goto return_normal;
		} else {
return_normal:
			/* Return to normal operation by executing any
			 * hw breakpoint fixup.
			 */
			if (arch_kgdb_ops.correct_hw_break)
				arch_kgdb_ops.correct_hw_break();
			if (trace_on)
				tracing_on();
			kgdb_info[cpu].exception_state &=
				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
			kgdb_info[cpu].enter_kgdb--;
			smp_mb__before_atomic_dec();
			atomic_dec(&slaves_in_kgdb);
			dbg_touch_watchdogs();
			local_irq_restore(flags);
			return 0;
		}
		cpu_relax();
	}

	/*
	 * For single stepping, try to only enter on the processor
	 * that was single stepping.  To guard against a deadlock, the
	 * kernel will only try for the value of sstep_tries before
	 * giving up and continuing on.
	 */
	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
	    (kgdb_info[cpu].task &&
	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
		atomic_set(&kgdb_active, -1);
		raw_spin_unlock(&dbg_master_lock);
		dbg_touch_watchdogs();
		local_irq_restore(flags);

		goto acquirelock;
	}

	if (!kgdb_io_ready(1)) {
		kgdb_info[cpu].ret_state = 1;
		goto kgdb_restore; /* No I/O connection, resume the system */
	}

	/*
	 * Don't enter if we have hit a removed breakpoint.
	 */
	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
		goto kgdb_restore;

	/* Call the I/O driver's pre_exception routine */
	if (dbg_io_ops->pre_exception)
		dbg_io_ops->pre_exception();

	/*
	 * Get the passive CPU lock which will hold all the non-primary
	 * CPU in a spin state while the debugger is active
	 */
	if (!kgdb_single_step)
		raw_spin_lock(&dbg_slave_lock);

#ifdef CONFIG_SMP
	/* Signal the other CPUs to enter kgdb_wait() */
	if ((!kgdb_single_step) && kgdb_do_roundup)
		kgdb_roundup_cpus(flags);
#endif

	/*
	 * Wait for the other CPUs to be notified and be waiting for us:
	 */
	while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
				atomic_read(&slaves_in_kgdb)) != online_cpus)
		cpu_relax();

	/*
	 * At this point the primary processor is completely
	 * in the debugger and all secondary CPUs are quiescent
	 */
	dbg_deactivate_sw_breakpoints();
	kgdb_single_step = 0;
	kgdb_contthread = current;
	exception_level = 0;
	trace_on = tracing_is_on();
	if (trace_on)
		tracing_off();

	while (1) {
cpu_master_loop:
		if (dbg_kdb_mode) {
			kgdb_connected = 1;
			error = kdb_stub(ks);
			if (error == -1)
				continue;
			kgdb_connected = 0;
		} else {
			error = gdb_serial_stub(ks);
		}

		if (error == DBG_PASS_EVENT) {
			dbg_kdb_mode = !dbg_kdb_mode;
		} else if (error == DBG_SWITCH_CPU_EVENT) {
			kgdb_info[dbg_switch_cpu].exception_state |=
				DCPU_NEXT_MASTER;
			goto cpu_loop;
		} else {
			kgdb_info[cpu].ret_state = error;
			break;
		}
	}

	/* Call the I/O driver's post_exception routine */
	if (dbg_io_ops->post_exception)
		dbg_io_ops->post_exception();

	if (!kgdb_single_step) {
		raw_spin_unlock(&dbg_slave_lock);
		/* Wait till all the CPUs have quit from the debugger. */
		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
			cpu_relax();
	}

kgdb_restore:
	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
		if (kgdb_info[sstep_cpu].task)
			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
		else
			kgdb_sstep_pid = 0;
	}
	if (arch_kgdb_ops.correct_hw_break)
		arch_kgdb_ops.correct_hw_break();
	if (trace_on)
		tracing_on();

	kgdb_info[cpu].exception_state &=
		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
	kgdb_info[cpu].enter_kgdb--;
	smp_mb__before_atomic_dec();
	atomic_dec(&masters_in_kgdb);
	/* Free kgdb_active */
	atomic_set(&kgdb_active, -1);
	raw_spin_unlock(&dbg_master_lock);
	dbg_touch_watchdogs();
	local_irq_restore(flags);

	#ifdef CONFIG_KGDB_KDB
	/* If no user input, force trigger kernel panic here */
	if (force_panic) {
		printk("KDB : Force Kernal Panic ! \n");
		do { *(volatile int *)0 = 0; } while (1);
	}
	#endif
		
	return kgdb_info[cpu].ret_state;
}