Beispiel #1
0
int platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	printk(KERN_INFO "Booting Core B.\n");

	spin_lock(&boot_lock);

	if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
		/* CoreB already running, sending ipi to wakeup it */
		smp_send_reschedule(cpu);
	} else {
		/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
		bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
		SSYNC();
	}

	timeout = jiffies + 1 * HZ;
	while (time_before(jiffies, timeout)) {
		if (cpu_online(cpu))
			break;
		udelay(100);
		barrier();
	}

	if (cpu_online(cpu)) {
		/* release the lock and let coreb run */
		spin_unlock(&boot_lock);
		return 0;
	} else
		panic("CPU%u: processor failed to boot\n", cpu);
}
Beispiel #2
0
int __devinit smp_generic_kick_cpu(int nr)
{
	BUG_ON(nr < 0 || nr >= NR_CPUS);

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero After we set cpu_start,
	 * the processor will continue on to secondary_start
	 */
	if (!paca[nr].cpu_start) {
		paca[nr].cpu_start = 1;
		smp_mb();
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * Ok it's not there, so it might be soft-unplugged, let's
	 * try to bring it back
	 */
	per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
	smp_wmb();
	smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */

	return 0;
}
Beispiel #3
0
static void request_migration(edf_wm_task_t *et, int cpu_dst)
{
	unsigned long flags;
	resch_task_t *hp;

	INIT_LIST_HEAD(&et->migration_list);

	/* insert the task to the waiting list for the migration thread. */
	spin_lock_irqsave(&kthread[cpu_dst].lock, flags);
	list_add_tail(&et->migration_list, &kthread[cpu_dst].list);
	spin_unlock(&kthread[cpu_dst].lock);

	/* wake up the migration thread running on the destination CPU. */
	wake_up_process(kthread[cpu_dst].task);
	et->rt->task->state = TASK_UNINTERRUPTIBLE;
	set_tsk_need_resched(et->rt->task);
	local_irq_restore(flags);

	active_queue_lock(cpu_dst, &flags);
	hp = active_highest_prio_task(cpu_dst);
	if (hp) {
		set_tsk_need_resched(hp->task);
	}
	active_queue_unlock(cpu_dst, &flags);
	smp_send_reschedule(cpu_dst);
}
Beispiel #4
0
int smp_generic_kick_cpu(int nr)
{
	if (nr < 0 || nr >= nr_cpu_ids)
		return -EINVAL;

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero After we set cpu_start,
	 * the processor will continue on to secondary_start
	 */
	if (!paca[nr].cpu_start) {
		paca[nr].cpu_start = 1;
		smp_mb();
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * Ok it's not there, so it might be soft-unplugged, let's
	 * try to bring it back
	 */
	generic_set_cpu_up(nr);
	smp_wmb();
	smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */

	return 0;
}
Beispiel #5
0
static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 */
	write_pen_release(cpu_logical_map(cpu));

	smp_send_reschedule(cpu);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		if (pen_release == -1)
			break;
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #6
0
static int primary_cpu_enter_wfi(void *data)
{
#if 0
    int i = 0;
#endif
    unsigned int irqstat = 0;
    int cpu = *(int*)data;

    dcm_info("[%s]: thread idle-%d start\n", __func__, cpu);

#ifdef CONFIG_LOCAL_WDT
    mpcore_wk_wdt_stop();
#endif

    mt6577_irq_mask_all(&cpu_irq_mask[cpu]);
    mt6577_irq_unmask_for_sleep(MT6577_KP_IRQ_ID);

    dcm_info("[%s]: cpu%d waiting all threads done\n", __func__, cpu);
    complete(&each_thread_done[cpu]);
    wait_for_completion(&all_threads_done);
    local_irq_disable();
    dcm_info("[%s]: cpu%d before wfi\n", __func__, cpu);

#if 0
    do {
        go_to_idle();
        irqstat = DRV_Reg32(GIC_CPU_BASE + GIC_CPU_INTACK) & 0x3FF; 
        dcm_info("[%s]: cpu%d after wfi(irqstat:0x%x/%u)\n", __func__, cpu, irqstat, irqstat);
        if (irqstat == MT6577_KP_IRQ_ID) {
            break;
        }
    } while (1);
#else
    go_to_idle();
    irqstat = DRV_Reg32(GIC_CPU_BASE + GIC_CPU_INTACK) & 0x3FF; 
    dcm_info("[%s]: cpu%d after wfi(irqstat:0x%x/%u)\n", __func__, cpu, irqstat, irqstat);
#endif

    local_irq_enable();

    spin_lock(&factory_lock);
    mt6577_irq_mask_restore(&cpu_irq_mask[cpu]);
    spin_unlock(&factory_lock);

#if 0
    for (i = 1; i < nr_cpu_ids; i++) {
        wake_flag[i] = 1;
        smp_send_reschedule(i);
    }
#endif
    
#if defined(CONFIG_MTK_LEDS)
    mt65xx_leds_brightness_set(MT65XX_LED_TYPE_LCD, LED_FULL);
#endif

    dcm_info("[%s]: thread idle-%d end\n", __func__, cpu);
    return 0;
}
Beispiel #7
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long boot_entry;
	void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
	void *boot_iram_base;
	unsigned int val;

	 /*
	  * set synchronisation state between this boot processor
	  * and the secondary one
	  */
	spin_lock(&boot_lock);

	/* boot entry is at the last 4K iRAM, from 0x93f000 */
	boot_entry = MX6Q_IRAM_BASE_ADDR + MX6Q_IRAM_SIZE;
	boot_iram_base = (void *)ioremap(boot_entry, SZ_4K);
	memcpy((void *)boot_iram_base, mx6_secondary_startup, SZ_1K);

	/* set entry point for cpu1-cpu3*/
	writel(boot_entry, src_base + SRC_GPR1_OFFSET + 4 * 2 * cpu);
	writel(virt_to_phys(mx6_secondary_startup),
			src_base + SRC_GPR1_OFFSET + 4 * 2 * cpu + 4);

	smp_wmb();
	dsb();
	flush_cache_all();

	/* reset cpu<n> */
	val = readl(src_base + SRC_SCR_OFFSET);
	val |= 1 << (BP_SRC_SCR_CORE0_RST + cpu);
	val |= 1 << (BP_SRC_SCR_CORES_DBG_RST + cpu);
	writel(val, src_base + SRC_SCR_OFFSET);

	val = jiffies;
	/* wait cpu<n> boot up and clear boot_entry, timeout is 500ms */
	while (__raw_readl(src_base + SRC_GPR1_OFFSET + 4 * 2 * cpu) != 0) {
		if (time_after(jiffies, (unsigned long)(val + HZ / 2))) {
			printk(KERN_WARNING "cpu %d: boot up failed!\n", cpu);
			break;
		}
	}

	/* let cpu<n> out of loop, call secondary_startup function*/
	writel(0, src_base + SRC_GPR1_OFFSET + 4 * 2 * cpu + 4);
	smp_send_reschedule(cpu);

	/* unmap iram base */
	iounmap(boot_iram_base);
	/*
	* now the secondary core is starting up let it run its
	* calibrations, then wait for it to finish
	*/
	spin_unlock(&boot_lock);

	return 0;

}
Beispiel #8
0
static inline void resched_task(task_t *p)
{
#ifdef CONFIG_SMP
	preempt_disable();
	if (/* balabala */ && (task_cpu(p) != smp_processor_id()))
		smp_send_reschedule(task_cpu(p));
	preempt_enable();
#else
	set_tsk_need_resched(p);
#endif
}
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
	int me;
	int cpu = vcpu->cpu;

	me = get_cpu();
	if (waitqueue_active(vcpu->arch.wqp)) {
		wake_up_interruptible(vcpu->arch.wqp);
		vcpu->stat.halt_wakeup++;
	} else if (cpu != me && cpu != -1) {
		smp_send_reschedule(vcpu->cpu);
	}
	put_cpu();
}
Beispiel #10
0
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
	if (irq->irq == KVM_INTERRUPT_UNSET)
		kvmppc_core_dequeue_external(vcpu, irq);
	else
		kvmppc_core_queue_external(vcpu, irq);

	if (waitqueue_active(&vcpu->wq)) {
		wake_up_interruptible(&vcpu->wq);
		vcpu->stat.halt_wakeup++;
	} else if (vcpu->cpu != -1) {
		smp_send_reschedule(vcpu->cpu);
	}

	return 0;
}
Beispiel #11
0
static void force_quiescent_state(struct rcu_data *rdp,
			struct rcu_ctrlblk *rcp)
{
	int cpu;
	cpumask_t cpumask;
	set_need_resched();
	if (unlikely(!rcp->signaled)) {
		rcp->signaled = 1;
		/*
		 * Don't send IPI to itself. With irqs disabled,
		 * rdp->cpu is the current cpu.
		 */
		cpumask = rcp->cpumask;
		cpu_clear(rdp->cpu, cpumask);
		for_each_cpu_mask(cpu, cpumask)
			smp_send_reschedule(cpu);
	}
}
Beispiel #12
0
int __devinit smp_generic_kick_cpu(int nr)
{
	BUG_ON(nr < 0 || nr >= NR_CPUS);

	if (!paca[nr].cpu_start) {
		paca[nr].cpu_start = 1;
		smp_mb();
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
	per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
	smp_wmb();
	smp_send_reschedule(nr);
#endif 

	return 0;
}
Beispiel #13
0
/*
 * If the specified CPU is offline, tell the caller that it is in
 * a quiescent state.  Otherwise, whack it with a reschedule IPI.
 * Grace periods can end up waiting on an offline CPU when that
 * CPU is in the process of coming online -- it will be added to the
 * rcu_node bitmasks before it actually makes it online.  The same thing
 * can happen while a CPU is in the process of coming online.  Because this
 * race is quite rare, we check for it after detecting that the grace
 * period has been delayed rather than checking each and every CPU
 * each and every time we start a new grace period.
 */
static int rcu_implicit_offline_qs(struct rcu_data *rdp)
{
	/*
	 * If the CPU is offline, it is in a quiescent state.  We can
	 * trust its state not to change because interrupts are disabled.
	 */
	if (cpu_is_offline(rdp->cpu)) {
		rdp->offline_fqs++;
		return 1;
	}

	/* The CPU is online, so send it a reschedule IPI. */
	if (rdp->cpu != smp_processor_id())
		smp_send_reschedule(rdp->cpu);
	else
		set_need_resched();
	rdp->resched_ipi++;
	return 0;
}
Beispiel #14
0
static int rcu_implicit_offline_qs(struct rcu_data *rdp)
{
	
	if (cpu_is_offline(rdp->cpu)) {
		rdp->offline_fqs++;
		return 1;
	}

	
	if (rdp->preemptable)
		return 0;

	
	if (rdp->cpu != smp_processor_id())
		smp_send_reschedule(rdp->cpu);
	else
		set_need_resched();
	rdp->resched_ipi++;
	return 0;
}
Beispiel #15
0
void resched_task(struct task_struct *p)
{
	int cpu;

	assert_raw_spin_locked(&task_rq(p)->lock);

	if (test_tsk_need_resched(p))
		return;

	set_tsk_need_resched(p);

	cpu = task_cpu(p);
	if (cpu == smp_processor_id())
		return;

	/* NEED_RESCHED must be visible before we test polling */
	smp_mb();
	if (!tsk_is_polling(p))
		smp_send_reschedule(cpu);
}
Beispiel #16
0
int __cpuinit meson_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	* Set synchronisation state between this boot processor
	* and the secondary one
	*/
	spin_lock(&boot_lock);
	 
	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 */
	printk("write pen_release: %d\n",cpu_logical_map(cpu));
	write_pen_release(cpu_logical_map(cpu));

#ifndef CONFIG_MESON_TRUSTZONE
	check_and_rewrite_cpu_entry();
	meson_set_cpu_power_ctrl(cpu, 1);
#endif
	meson_secondary_set(cpu);
	dsb_sev();

	smp_send_reschedule(cpu);
	timeout = jiffies + (10* HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;
		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);
	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #17
0
static void check_preempt(struct task_struct* t)
{
	int cpu = NO_CPU;
	if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on &&
	    tsk_rt(t)->present) {
		/* the task can be scheduled and
		 * is not scheduled where it ought to be scheduled
		 */
		cpu = tsk_rt(t)->linked_on != NO_CPU ?
			tsk_rt(t)->linked_on         :
			tsk_rt(t)->scheduled_on;
		PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n",
			   tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on);
		/* preempt */
		if (cpu == smp_processor_id())
			set_tsk_need_resched(current);
		else {
			smp_send_reschedule(cpu);
		}
	}
}
static int set_enable_mask(const char *val, const struct kernel_param *kp)
{
	int rv = param_set_uint(val, kp);
	unsigned long flags;

	pr_info("%s: enable_mask=0x%x\n", __func__, enable_mask);

	if (rv)
		return rv;

	spin_lock_irqsave(&enable_mask_lock, flags);

	if (!(enable_mask & ENABLE_C2)) {
		unsigned int cpuid = smp_processor_id();
		int i;
		for_each_online_cpu(i) {
			if (i == cpuid)
				continue;
			smp_send_reschedule(i);
		}
	}
Beispiel #19
0
/* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must
 * hold the lock that is used to serialize scheduling decisions. */
void litmus_reschedule(int cpu)
{
	int picked_transition_ok = 0;
	int scheduled_transition_ok = 0;

	/* The (remote) CPU could be in any state. */

	/* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU
	 * is not aware of the need to reschedule at this point. */

	/* is a context switch in progress? */
	if (cpu_is_in_sched_state(cpu, TASK_PICKED))
		picked_transition_ok = sched_state_transition_on(
			cpu, TASK_PICKED, PICKED_WRONG_TASK);

	if (!picked_transition_ok &&
	    cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) {
		/* We either raced with the end of the context switch, or the
		 * CPU was in TASK_SCHEDULED anyway. */
		scheduled_transition_ok = sched_state_transition_on(
			cpu, TASK_SCHEDULED, SHOULD_SCHEDULE);
	}

	/* If the CPU was in state TASK_SCHEDULED, then we need to cause the
	 * scheduler to be invoked. */
	if (scheduled_transition_ok) {
		if (smp_processor_id() == cpu)
			set_tsk_need_resched(current);
		else
			smp_send_reschedule(cpu);
	}

	TRACE_STATE("%s picked-ok:%d sched-ok:%d\n",
		    __FUNCTION__,
		    picked_transition_ok,
		    scheduled_transition_ok);
}
Beispiel #20
0
static inline void __save_stack_trace_user_task(struct task_struct *task,
		struct stack_trace *trace)
{
	const struct pt_regs *regs = task_pt_regs(task);
	const void __user *fp;
	unsigned long addr;
#ifdef CONFIG_SMP
	if (task != current && task->state == TASK_RUNNING && task->on_cpu) {
		/* To trap into kernel at least once */
		smp_send_reschedule(task_cpu(task));
	}
#endif
	fp = (const void __user *)regs->bp;
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = regs->ip;

	while (trace->nr_entries < trace->max_entries) {
		struct stack_frame_user frame;

		frame.next_fp = NULL;
		frame.ret_addr = 0;

		addr = (unsigned long)fp;
		if (!access_process_vm(task, addr, (void *)&frame,
				sizeof(frame), 0))
			break;
		if ((unsigned long)fp < regs->sp)
			break;
		if (frame.ret_addr) {
			trace->entries[trace->nr_entries++] =
				frame.ret_addr;
		}
		if (fp == frame.next_fp)
			break;
		fp = frame.next_fp;
	}
}
Beispiel #21
0
static void fastcall reschedule_idle(struct task_struct * p)
{
#ifdef CONFIG_SMP
#if 0
  // not yet?
	int this_cpu = smp_processor_id();
	struct task_struct *tsk, *target_tsk;
	int cpu, best_cpu, i, max_prio;
	cycles_t oldest_idle;

	/*
	 * shortcut if the woken up task's last CPU is
	 * idle now.
	 */
	best_cpu = p->pcb$l_cpu_id;
	if (can_schedule(p, best_cpu)) {
		tsk = idle_task(best_cpu);
		if (cpu_curr(best_cpu) == tsk) {
			int need_resched;
send_now_idle:
			/*
			 * If need_resched == -1 then we can skip sending
			 * the IPI altogether, tsk->need_resched is
			 * actively watched by the idle thread.
			 */
			need_resched = tsk->need_resched;
			tsk->need_resched = 1;
			if ((best_cpu != this_cpu) && !need_resched)
				smp_send_reschedule(best_cpu);
			return;
		}
	}

	/*
	 * We know that the preferred CPU has a cache-affine current
	 * process, lets try to find a new idle CPU for the woken-up
	 * process. Select the least recently active idle CPU. (that
	 * one will have the least active cache context.) Also find
	 * the executing process which has the least priority.
	 */
	oldest_idle = (cycles_t) -1;
	target_tsk = NULL;
	max_prio = 0;

	for (i = 0; i < smp_num_cpus; i++) {
		cpu = cpu_logical_map(i);
		if (!can_schedule(p, cpu))
			continue;
		tsk = cpu_curr(cpu);
		/*
		 * We use the first available idle CPU. This creates
		 * a priority list between idle CPUs, but this is not
		 * a problem.
		 */
		if (tsk == idle_task(cpu)) {
			if (last_schedule(cpu) < oldest_idle) {
				oldest_idle = last_schedule(cpu);
				target_tsk = tsk;
			}
		} else {
			if (oldest_idle == -1ULL) {
				int prio = preemption_goodness(tsk, p, cpu);

				if (prio > max_prio) {
					max_prio = prio;
					target_tsk = tsk;
				}
			}
		}
	}
	tsk = target_tsk;
	if (tsk) {
		if (oldest_idle != -1ULL) {
			best_cpu = tsk->pcb$l_cpu_id;
			goto send_now_idle;
		}
		tsk->need_resched = 1;
		if (tsk->pcb$l_cpu_id != this_cpu)
			smp_send_reschedule(tsk->pcb$l_cpu_id);
	}
	return;
		
#endif
#else /* UP */
	int this_cpu = smp_processor_id();
	struct task_struct *tsk;

	tsk = ctl$gl_pcb;
	if (p->pcb$b_pri >= tsk->pcb$b_pri) /* previous was meaningless */
		tsk->need_resched = 1;
#endif
}
Beispiel #22
0
void _interrupt_sp(void)
{
	smp_send_reschedule(aprp_cpu_index());
}