static int hotplug_rtb_callback(struct notifier_block *nfb,
				unsigned long action, void *hcpu)
{
	/*
	 * Bits [19:4] of the data are the online mask, lower 4 bits are the
	 * cpu number that is being changed. Additionally, changes to the
	 * online_mask that will be done by the current hotplug will be made
	 * even though they aren't necessarily in the online mask yet.
	 *
	 * XXX: This design is limited to supporting at most 16 cpus
	 */
	unsigned long this_cpumask = CPUSET_OF(1 << (unsigned long)hcpu);
	unsigned long cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
	unsigned long cpudata = CPU_OF((unsigned long)hcpu) | cpumask;

	switch (action & (~CPU_TASKS_FROZEN)) {
	case CPU_STARTING:
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
		break;
	case CPU_DYING:
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
		break;
	default:
		break;
	}

	return NOTIFY_OK;
}
Exemplo n.º 2
0
/*
 * The kernel tried to access some page that wasn't present.
 */
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
			      unsigned int esr, struct pt_regs *regs)
{
#if defined(CONFIG_HTC_DEBUG_RTB)
	static int enable_logk_die = 1;
#endif
	/*
	 * Are we prepared to handle this kernel fault?
	 */
	if (fixup_exception(regs))
		return;

#if defined(CONFIG_HTC_DEBUG_RTB)
	if (enable_logk_die) {
		uncached_logk(LOGK_DIE, (void *)regs->pc);
		uncached_logk(LOGK_DIE, (void *)regs->regs[30]);
		uncached_logk(LOGK_DIE, (void *)addr);
		/* Disable RTB here to avoid weird recursive spinlock/printk behaviors */
		msm_rtb_disable();
		enable_logk_die = 0;
	}
#endif
	/*
	 * No handler, we'll have to terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);
	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
		 "paging request", addr);

	show_pte(mm, addr);
	die("Oops", regs, esr);
	bust_spinlocks(0);
	do_exit(SIGKILL);
}
static int hotplug_rtb_callback(struct notifier_block *nfb,
				unsigned long action, void *hcpu)
{
	/*
                                                                     
                                                                  
                                                                     
                                                               
   
                                                             
  */
	int this_cpumask = CPUSET_OF(1 << (int)hcpu);
	int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
	int cpudata = CPU_OF((int)hcpu) | cpumask;

	switch (action & (~CPU_TASKS_FROZEN)) {
	case CPU_STARTING:
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
		break;
	case CPU_DYING:
		cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
		break;
	default:
		break;
	}

	return NOTIFY_OK;
}
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
			      unsigned int esr, struct pt_regs *regs)
{
#if defined(CONFIG_HTC_DEBUG_RTB)
	static int enable_logk_die = 1;
#endif
	if (fixup_exception(regs))
		return;

#if defined(CONFIG_HTC_DEBUG_RTB)
	if (enable_logk_die) {
		uncached_logk(LOGK_DIE, (void *)regs->pc);
		uncached_logk(LOGK_DIE, (void *)regs->regs[30]);
		uncached_logk(LOGK_DIE, (void *)addr);
		
		msm_rtb_disable();
		enable_logk_die = 0;
	}
#endif
	bust_spinlocks(1);
	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
		 "paging request", addr);

	show_pte(mm, addr);
	die("Oops", regs, esr);
	bust_spinlocks(0);
	do_exit(SIGKILL);
}