Exemple #1
0
void __cpuinit per_cpu_init(void)
{
	int cpu = smp_processor_id();
	int slice = LOCAL_HUB_L(PI_CPU_NUM);
	cnodeid_t cnode = get_compact_nodeid();
	struct hub_data *hub = hub_data(cnode);
	struct slice_data *si = hub->slice + slice;
	int i;

	if (test_and_set_bit(slice, &hub->slice_map))
		return;

	clear_c0_status(ST0_IM);

	per_hub_init(cnode);

	for (i = 0; i < LEVELS_PER_SLICE; i++)
		si->level_to_irq[i] = -1;

	/*
	 * We use this so we can find the local hub's data as fast as only
	 * possible.
	 */
	cpu_data[cpu].data = si;

	cpu_time_init();
	install_ipi();

	/* Install our NMI handler if symmon hasn't installed one. */
	install_cpu_nmi_handler(cputoslice(cpu));

	set_c0_status(SRB_DEV0 | SRB_DEV1);
}
Exemple #2
0
void ip27_rt_timer_interrupt(void)
{
    int cpu = smp_processor_id();
    int cpuA = cputoslice(cpu) == 0;
    unsigned int irq = rt_timer_irq;

    irq_enter();
    write_seqlock(&xtime_lock);

again:
    LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
    ct_cur[cpu] += CYCLES_PER_JIFFY;
    LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

    if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
        goto again;

    kstat_this_cpu.irqs[irq]++;		/* kstat only for bootcpu? */

    if (cpu == 0)
        do_timer(1);

    update_process_times(user_mode(get_irq_regs()));

    write_sequnlock(&xtime_lock);
    irq_exit();
}
void __cpuinit per_cpu_init(void)
{
	int cpu = smp_processor_id();
	int slice = LOCAL_HUB_L(PI_CPU_NUM);
	cnodeid_t cnode = get_compact_nodeid();
	struct hub_data *hub = hub_data(cnode);
	struct slice_data *si = hub->slice + slice;
	int i;

	if (test_and_set_bit(slice, &hub->slice_map))
		return;

	clear_c0_status(ST0_IM);

	per_hub_init(cnode);

	for (i = 0; i < LEVELS_PER_SLICE; i++)
		si->level_to_irq[i] = -1;

	/*
                                                                   
             
  */
	cpu_data[cpu].data = si;

	cpu_time_init();
	install_ipi();

	/*                                                         */
	install_cpu_nmi_handler(cputoslice(cpu));

	set_c0_status(SRB_DEV0 | SRB_DEV1);
}
Exemple #4
0
/*
 * Takes as first input the PROM assigned cpu id, and the kernel
 * assigned cpu id as the second.
 */
static void alloc_cpupda(cpuid_t cpu, int cpunum)
{
	cnodeid_t node = get_cpu_cnode(cpu);
	nasid_t nasid = COMPACT_TO_NASID_NODEID(node);

	cputonasid(cpunum) = nasid;
	cpu_data[cpunum].p_nodeid = node;
	cputoslice(cpunum) = get_cpu_slice(cpu);
}
Exemple #5
0
/*
 * Launch a slave into smp_bootstrap().  It doesn't take an argument, and we
 * set sp to the kernel stack of the newly created idle process, gp to the proc
 * struct so that current_thread_info() will work.
 */
void __init prom_boot_secondary(int cpu, struct task_struct *idle)
{
	unsigned long gp = (unsigned long) idle->thread_info;
	unsigned long sp = gp + THREAD_SIZE - 32;

	LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu),
		(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
		0, (void *) sp, (void *) gp);
}
Exemple #6
0
/*
 * Launch a slave into smp_bootstrap().  It doesn't take an argument, and we
 * set sp to the kernel stack of the newly created idle process, gp to the proc
 * struct so that current_thread_info() will work.
 */
static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle)
{
	unsigned long gp = (unsigned long)task_thread_info(idle);
	unsigned long sp = __KSTK_TOS(idle);

	LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
		(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
		0, (void *) sp, (void *) gp);
}
Exemple #7
0
static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	int slice = cputoslice(cpu);
	unsigned long cnt;

	cnt = LOCAL_HUB_L(PI_RT_COUNT);
	cnt += delta;
	LOCAL_HUB_S(PI_RT_COMPARE_A + PI_COUNT_OFFSET * slice, cnt);

	return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
}
Exemple #8
0
void __init per_cpu_init(void)
{
	int cpu = smp_processor_id();
	int slice = LOCAL_HUB_L(PI_CPU_NUM);
	cnodeid_t cnode = get_compact_nodeid();
	struct hub_data *hub = hub_data(cnode);
	struct slice_data *si = hub->slice + slice;
	int i;

	if (test_and_set_bit(slice, &hub->slice_map))
		return;

	clear_c0_status(ST0_IM);

	for (i = 0; i < LEVELS_PER_SLICE; i++)
		si->level_to_irq[i] = -1;

	/*
	 * Some interrupts are reserved by hardware or by software convention.
	 * Mark these as reserved right away so they won't be used accidently
	 * later.
	 */
	for (i = 0; i <= BASE_PCI_IRQ; i++) {
		__set_bit(i, si->irq_alloc_mask);
		LOCAL_HUB_S(PI_INT_PEND_MOD, i);
	}

	__set_bit(IP_PEND0_6_63, si->irq_alloc_mask);
	LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);

	for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
		__set_bit(i, si->irq_alloc_mask + 1);
		LOCAL_HUB_S(PI_INT_PEND_MOD, i);
	}

	LOCAL_HUB_L(PI_INT_PEND0);

	/*
	 * We use this so we can find the local hub's data as fast as only
	 * possible.
	 */
	cpu_data[cpu].data = si;

	cpu_time_init();
	install_ipi();

	/* Install our NMI handler if symmon hasn't installed one. */
	install_cpu_nmi_handler(cputoslice(cpu));

	set_c0_status(SRB_DEV0 | SRB_DEV1);

	per_hub_init(cnode);
}
void rt_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int cpuA = ((cputoslice(cpu)) == 0);
	int irq = IP27_TIMER_IRQ;

	irq_enter(cpu, irq);
	write_lock(&xtime_lock);

again:
	LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
	ct_cur[cpu] += CYCLES_PER_JIFFY;
	LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

	if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
		goto again;

	kstat.irqs[cpu][irq]++;		/* kstat only for bootcpu? */

	if (cpu == 0)
		do_timer(regs);

#ifdef CONFIG_SMP
	update_process_times(user_mode(regs));
#endif /* CONFIG_SMP */

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to when a second starts.
	 */
	if ((time_status & STA_UNSYNC) == 0 &&
	    xtime.tv_sec > last_rtc_update + 660) {
		if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec + 1) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				last_rtc_update = xtime.tv_sec - 600;
		} else if (xtime.tv_usec <= ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				last_rtc_update = xtime.tv_sec - 600;
		}
        }

	write_unlock(&xtime_lock);
	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
}
Exemple #10
0
static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
{
	struct clock_event_device *cd = dev_id;
	unsigned int cpu = smp_processor_id();
	int slice = cputoslice(cpu);

	/*
	 * Ack
	 */
	LOCAL_HUB_S(PI_RT_PEND_A + PI_COUNT_OFFSET * slice, 0);
	cd->event_handler(cd);

	return IRQ_HANDLED;
}
Exemple #11
0
void rt_timer_interrupt(struct pt_regs *regs)
{
    int cpu = smp_processor_id();
    int cpuA = ((cputoslice(cpu)) == 0);
    int irq = 9;				/* XXX Assign number */

    irq_enter();
    write_seqlock(&xtime_lock);

again:
    LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
    ct_cur[cpu] += CYCLES_PER_JIFFY;
    LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

    if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
        goto again;

    kstat_this_cpu.irqs[irq]++;		/* kstat only for bootcpu? */

    if (cpu == 0)
        do_timer(regs);

#ifdef CONFIG_SMP
    update_process_times(user_mode(regs));
#endif /* CONFIG_SMP */

    /*
     * If we have an externally synchronized Linux clock, then update
     * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
     * called as close as possible to when a second starts.
     */
    if ((time_status & STA_UNSYNC) == 0 &&
            xtime.tv_sec > last_rtc_update + 660 &&
            (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
            (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
        if (rtc_set_time(xtime.tv_sec) == 0) {
            last_rtc_update = xtime.tv_sec;
        } else {
            last_rtc_update = xtime.tv_sec - 600;
            /* do it again in 60 s */
        }
    }

    write_sequnlock(&xtime_lock);
    irq_exit();
}
static int intr_connect_level(int cpu, int bit)
{
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
	struct slice_data *si = cpu_data[cpu].data;

	set_bit(bit, si->irq_enable_mask);

	if (!cputoslice(cpu)) {
		REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
		REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
	} else {
		REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
		REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
	}

	return 0;
}
static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
{
	nasid_t nasid;
	int cpu;

	cpu = cpumask_first_and(mask, cpu_online_mask);
	nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
	hd->cpu = cpu;
	if (!cputoslice(cpu)) {
		hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
		hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
	} else {
		hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
		hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
	}

	/* Make sure it's not already pending when we connect it. */
	REMOTE_HUB_CLR_INTR(nasid, hd->bit);
}
Exemple #14
0
void core_send_ipi(int destid, unsigned int action)
{
	int irq;

	switch (action) {
	case SMP_RESCHEDULE_YOURSELF:
		irq = CPU_RESCHED_A_IRQ;
		break;
	case SMP_CALL_FUNCTION:
		irq = CPU_CALL_A_IRQ;
		break;
	default:
		panic("sendintr");
	}

	irq += cputoslice(destid);

	/*
	 * Convert the compact hub number to the NASID to get the correct
	 * part of the address space.  Then set the interrupt bit associated
	 * with the CPU we want to send the interrupt to.
	 */
	REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
Exemple #15
0
void rt_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int cpuA = ((cputoslice(cpu)) == 0);
	int irq = 7;				/* XXX Assign number */

	write_lock(&xtime_lock);

again:
	LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
	ct_cur[cpu] += CYCLES_PER_JIFFY;
	LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

	if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
		goto again;

	kstat.irqs[cpu][irq]++;		/* kstat only for bootcpu? */

	if (cpu == 0)
		do_timer(regs);

#ifdef CONFIG_SMP
	{
		int user = user_mode(regs);

		/*
		 * update_process_times() expects us to have done irq_enter().
		 * Besides, if we don't timer interrupts ignore the global
		 * interrupt lock, which is the WrongThing (tm) to do.
		 * Picked from i386 code.
		 */
		irq_enter(cpu, 0);
		update_process_times(user);
		irq_exit(cpu, 0);
	}
#endif /* CONFIG_SMP */
	
	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to when a second starts.
	 */
	if ((time_status & STA_UNSYNC) == 0 &&
	    xtime.tv_sec > last_rtc_update + 660) {
		if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec + 1) == 0)
				last_rtc_update = xtime.tv_sec;
			else    
				last_rtc_update = xtime.tv_sec - 600;
		} else if (xtime.tv_usec <= ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else    
				last_rtc_update = xtime.tv_sec - 600;
		}
        }

	write_unlock(&xtime_lock);

	if (softirq_pending(cpu))
		do_softirq();
}