Beispiel #1
0
static void ipi_handler(evtchn_port_t port, void *unused)
{
//    tprintk("IPI recieved on CPU=%d\n", smp_processor_id());
    if(per_cpu(smp_processor_id(), cpu_state) == CPU_SUSPENDING) {
	set_need_resched(current);
    }
}
Beispiel #2
0
void smp_message_recv(int msg)
{
	switch(msg) {
	case PPC_MSG_CALL_FUNCTION:
		smp_call_function_interrupt();
		break;
	case PPC_MSG_RESCHEDULE:
		/* XXX Do we have to do this? */
		set_need_resched();
		break;
	case PPC_MSG_DEBUGGER_BREAK:
		if (crash_ipi_function_ptr) {
			crash_ipi_function_ptr(get_irq_regs());
			break;
		}
#ifdef CONFIG_DEBUGGER
		debugger_ipi(get_irq_regs());
		break;
#endif /* CONFIG_DEBUGGER */
		/* FALLTHROUGH */
	default:
		printk("SMP %d: smp_message_recv(): unknown msg %d\n",
		       smp_processor_id(), msg);
		break;
	}
}
Beispiel #3
0
/*
 * Common functions
 */
void smp_message_recv(int msg, struct pt_regs *regs)
{
	atomic_inc(&ipi_recv);

	switch( msg ) {
	case PPC_MSG_CALL_FUNCTION:
		smp_call_function_interrupt();
		break;
	case PPC_MSG_RESCHEDULE:
		set_need_resched();
		break;
	case PPC_MSG_INVALIDATE_TLB:
		_tlbia();
		break;
#ifdef CONFIG_XMON
	case PPC_MSG_XMON_BREAK:
		xmon(regs);
		break;
#endif /* CONFIG_XMON */
	default:
		printk("SMP %d: smp_message_recv(): unknown msg %d\n",
		       smp_processor_id(), msg);
		break;
	}
}
Beispiel #4
0
static int default_idle(void)
{
	long oldval;
	unsigned int cpu = smp_processor_id();

	while (1) {
		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			while (!need_resched() && !cpu_is_offline(cpu)) {
				barrier();
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Beispiel #5
0
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
	struct page *page;
	unsigned int page_offset;
	int ret = 0;

	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

	if (!obj->pages)
		return VM_FAULT_SIGBUS;

	page = obj->pages[page_offset];
	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
	switch (ret) {
	case -EAGAIN:
		set_need_resched();
	case 0:
	case -ERESTARTSYS:
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}
Beispiel #6
0
void emxx_pm_idle(void)
{
	local_irq_disable();
	local_fiq_disable();
	if (need_resched()) {
		local_fiq_enable();
		local_irq_enable();
		return;
	}

	pm_idle_count++;

	if (sleep_while_idle_enable && !suspend_disable) {
#ifdef CONFIG_SMP
		if ((0 == hard_smp_processor_id()) &&
			!(inl(SMU_CLKSTOPSIG_ST) & PWSTATE_PD_NE1)) {
			/* cpu1 wfi */
			emxx_close_clockgate(EMXX_CLK_TI1);
#endif
			if (emxx_can_sleep()) {
				emxx_sleep_while_idle = 1;
				if (0 == emxx_pm_suspend(PM_SUSPEND_STANDBY)) {
					emxx_add_neigh_timer();
					emxx_add_timer_writeback();
					emxx_add_workqueue_timer();

					emxx_sleep_while_idle = 0;
					sleep_while_idle_count++;
#ifdef CONFIG_SMP
					emxx_open_clockgate(EMXX_CLK_TI1);
#endif
					set_need_resched();

					local_fiq_enable();
					local_irq_enable();

					return;
				}
				emxx_add_neigh_timer();
				emxx_add_timer_writeback();
				emxx_add_workqueue_timer();

				emxx_sleep_while_idle = 0;
			}
#ifdef CONFIG_SMP
			emxx_open_clockgate(EMXX_CLK_TI1);
		}
#endif
	}

	arch_idle();

	local_fiq_enable();
	local_irq_enable();
}
Beispiel #7
0
static int pseries_dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;

			while (!need_resched() && !cpu_is_offline(cpu)) {
				ppc64_runlatch_off();

				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay != 0 &&
				    __get_tb() > start_snooze) {
					HMT_medium();
					dedicated_idle_sleep(cpu);
				}

			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}
Beispiel #8
0
void rcu_irq_exit(void)
{
	struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);

	if (--rdtp->dynticks_nesting)
		return;
	smp_mb(); 
	rdtp->dynticks++;
	WARN_ON_ONCE(rdtp->dynticks & 0x1);

	
	if (__get_cpu_var(rcu_sched_data).nxtlist ||
	    __get_cpu_var(rcu_bh_data).nxtlist)
		set_need_resched();
}
Beispiel #9
0
/**
 * rcu_irq_exit - inform RCU of exit from hard irq context
 *
 * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
 * to put let the RCU handling be aware that the CPU is going back to idle
 * with no ticks.
 */
void rcu_irq_exit(void)
{
	struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);

	if (--rdtp->dynticks_nesting)
		return;
	smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
	rdtp->dynticks++;
	WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);

	/* If the interrupt queued a callback, get out of dyntick mode. */
	if (__get_cpu_var(rcu_data).nxtlist ||
	    __get_cpu_var(rcu_bh_data).nxtlist)
		set_need_resched();
}
Beispiel #10
0
static void print_cpu_stall(struct rcu_state *rsp)
{
	unsigned long flags;
	struct rcu_node *rnp = rcu_get_root(rsp);

	printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
			smp_processor_id(), jiffies - rsp->gp_start);
	dump_stack();
	spin_lock_irqsave(&rnp->lock, flags);
	if ((long)(jiffies - rsp->jiffies_stall) >= 0)
		rsp->jiffies_stall =
			jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
	spin_unlock_irqrestore(&rnp->lock, flags);
	set_need_resched();  /* kick ourselves to get things going. */
}
static inline void poll_idle(void)
{
	int oldval;

	local_irq_enable();

	oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

	if (!oldval) {
		set_thread_flag(TIF_POLLING_NRFLAG);
		while (!need_resched())
			cpu_relax();
		clear_thread_flag(TIF_POLLING_NRFLAG);
	} else {
		set_need_resched();
	}
}
Beispiel #12
0
static int iSeries_idle(void)
{
	struct paca_struct *lpaca;
	long oldval;
	unsigned long CTRL;

	/* ensure iSeries run light will be out when idle */
	clear_thread_flag(TIF_RUN_LIGHT);
	CTRL = mfspr(CTRLF);
	CTRL &= ~RUNLATCH;
	mtspr(CTRLT, CTRL);

	lpaca = get_paca();

	while (1) {
		if (lpaca->lppaca.xSharedProc) {
			if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
				process_iSeries_events();
			if (!need_resched())
				yield_shared_processor();
		} else {
			oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

			if (!oldval) {
				set_thread_flag(TIF_POLLING_NRFLAG);

				while (!need_resched()) {
					HMT_medium();
					if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
						process_iSeries_events();
					HMT_low();
				}

				HMT_medium();
				clear_thread_flag(TIF_POLLING_NRFLAG);
			} else {
				set_need_resched();
			}
		}

		schedule();
	}

	return 0;
}
Beispiel #13
0
static void force_quiescent_state(struct rcu_data *rdp,
			struct rcu_ctrlblk *rcp)
{
	int cpu;
	cpumask_t cpumask;
	set_need_resched();
	if (unlikely(!rcp->signaled)) {
		rcp->signaled = 1;
		/*
		 * Don't send IPI to itself. With irqs disabled,
		 * rdp->cpu is the current cpu.
		 */
		cpumask = rcp->cpumask;
		cpu_clear(rdp->cpu, cpumask);
		for_each_cpu_mask(cpu, cpumask)
			smp_send_reschedule(cpu);
	}
}
Beispiel #14
0
/*
 * If the specified CPU is offline, tell the caller that it is in
 * a quiescent state.  Otherwise, whack it with a reschedule IPI.
 * Grace periods can end up waiting on an offline CPU when that
 * CPU is in the process of coming online -- it will be added to the
 * rcu_node bitmasks before it actually makes it online.  The same thing
 * can happen while a CPU is in the process of coming online.  Because this
 * race is quite rare, we check for it after detecting that the grace
 * period has been delayed rather than checking each and every CPU
 * each and every time we start a new grace period.
 */
static int rcu_implicit_offline_qs(struct rcu_data *rdp)
{
	/*
	 * If the CPU is offline, it is in a quiescent state.  We can
	 * trust its state not to change because interrupts are disabled.
	 */
	if (cpu_is_offline(rdp->cpu)) {
		rdp->offline_fqs++;
		return 1;
	}

	/* The CPU is online, so send it a reschedule IPI. */
	if (rdp->cpu != smp_processor_id())
		smp_send_reschedule(rdp->cpu);
	else
		set_need_resched();
	rdp->resched_ipi++;
	return 0;
}
Beispiel #15
0
static int rcu_implicit_offline_qs(struct rcu_data *rdp)
{
	
	if (cpu_is_offline(rdp->cpu)) {
		rdp->offline_fqs++;
		return 1;
	}

	
	if (rdp->preemptable)
		return 0;

	
	if (rdp->cpu != smp_processor_id())
		smp_send_reschedule(rdp->cpu);
	else
		set_need_resched();
	rdp->resched_ipi++;
	return 0;
}
Beispiel #16
0
/*
 * On SMP it's slightly faster (but much more power-consuming!)
 * to poll the ->work.need_resched flag instead of waiting for the
 * cross-CPU IPI to arrive. Use this option with caution.
 */
static inline void poll_idle(void)
{
	int oldval;

	local_irq_enable();

	/*
	 * Deal with another CPU just having chosen a thread to
	 * run here:
	 */
	oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

	if (!oldval) {
		set_thread_flag(TIF_POLLING_NRFLAG);
		while (!need_resched())
			cpu_relax();
		clear_thread_flag(TIF_POLLING_NRFLAG);
	} else {
		set_need_resched();
	}
}
Beispiel #17
0
void
update_timeslice(struct sch_process *p)
{
	//TO DO:
	//Round-Robin
	//RR seems simply, so I impletate RR first.
	if (p->add->policy == SCHED_RR &&
			!--p->add->timeslice) {
		p->add->timeslice = get_base_timeslice(p);
		p->add->first_timeslice = 0;
		set_need_resched(p);		
	}

	//conventional process
	/*
	-- (p->add->timeslice);
	if (p->add->timeslice <=0 ) {
		//quantum is exhaousted
		dequeue_task();
		set_need_reshed();
		update_prio();
	}
	*/
}
Beispiel #18
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
int do_fork(unsigned long clone_flags,
	    unsigned long stack_start,
	    struct pt_regs *regs,
	    unsigned long stack_size,
	    int *parent_tidptr,
	    int *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	pid_t pid;

	if (unlikely(current->ptrace)) {
		trace = fork_traceflag (clone_flags);
		if (trace)
			clone_flags |= CLONE_PTRACE;
	}

	p = copy_process(clone_flags, stack_start, regs, stack_size, 
			 parent_tidptr, child_tidptr);

	if (unlikely(IS_ERR(p))) 
		return (int) PTR_ERR(p);
	else {
		struct completion vfork;

         	/*
	         * Do this prior waking up the new thread - the thread pointer
       	  	 * might get invalid after that point, if the thread exits 
		 * quickly.
       		 */
		pid = p->pid;
		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			p->sigpending = 1;
		}

		if (isaudit(current))
			audit_fork(current, p);

		/*
		 * The task is in TASK_UNINTERRUPTIBLE right now, no-one
		 * can wake it up. Either wake it up as a child, which
		 * makes it TASK_RUNNING - or make it TASK_STOPPED, after
		 * which signals can wake the child up.
		 */
		if (!(clone_flags & CLONE_STOPPED))
			wake_up_forked_process(p);	/* do this last */
		else
			p->state = TASK_STOPPED;
		++total_forks;

		if (unlikely (trace)) {
			current->ptrace_message = (unsigned long) pid;
			ptrace_notify((trace << 8) | SIGTRAP);
		}

		if (clone_flags & CLONE_VFORK) {
			wait_for_completion(&vfork);
			if (unlikely(current->ptrace & PT_TRACE_VFORK_DONE))
				ptrace_notify((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
		} else
			/*
			 * Let the child process run first, to avoid most of the
			 * COW overhead when the child exec()s afterwards.
			 */
			set_need_resched();
	}
	return pid;
}
Beispiel #19
0
void smp_reschedule_irq(void)
{
	set_need_resched();
}
Beispiel #20
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long pid;

	if (unlikely(current->ptrace)) {
		trace = fork_traceflag (clone_flags);
		if (trace)
			clone_flags |= CLONE_PTRACE;
	}

	p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	pid = IS_ERR(p) ? PTR_ERR(p) : p->pid;

	if (!IS_ERR(p)) {
		struct completion vfork;

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
		}

		if (!(clone_flags & CLONE_STOPPED)) {
			/*
			 * Do the wakeup last. On SMP we treat fork() and
			 * CLONE_VM separately, because fork() has already
			 * created cache footprint on this CPU (due to
			 * copying the pagetables), hence migration would
			 * probably be costy. Threads on the other hand
			 * have less traction to the current CPU, and if
			 * there's an imbalance then the scheduler can
			 * migrate this fresh thread now, before it
			 * accumulates a larger cache footprint:
			 */
			if (clone_flags & CLONE_VM)
				wake_up_forked_thread(p);
			else
				wake_up_forked_process(p);
		} else {
			int cpu = get_cpu();

			p->state = TASK_STOPPED;
			if (cpu_is_offline(task_cpu(p)))
				set_task_cpu(p, cpu);

			put_cpu();
		}
		++total_forks;

		if (unlikely (trace)) {
			current->ptrace_message = pid;
			ptrace_notify ((trace << 8) | SIGTRAP);
		}

		if (clone_flags & CLONE_VFORK) {
			wait_for_completion(&vfork);
			if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
				ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
		} else
			/*
			 * Let the child process run first, to avoid most of the
			 * COW overhead when the child exec()s afterwards.
			 */
			set_need_resched();
	}
	return pid;
}
Beispiel #21
0
int dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca(), *ppaca;
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned int cpu = smp_processor_id();

	ppaca = &paca[cpu ^ 1];

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.xIdle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);
			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;
			while (!need_resched() && !cpu_is_offline(cpu)) {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay == 0 ||
				    __get_tb() < start_snooze)
					continue;

				HMT_medium();

				if (!(ppaca->lppaca.xIdle)) {
					local_irq_disable();

					/*
					 * We are about to sleep the thread
					 * and so wont be polling any
					 * more.
					 */
					clear_thread_flag(TIF_POLLING_NRFLAG);

					/*
					 * SMT dynamic mode. Cede will result
					 * in this thread going dormant, if the
					 * partner thread is still doing work.
					 * Thread wakes up if partner goes idle,
					 * an interrupt is presented, or a prod
					 * occurs.  Returning from the cede
					 * enables external interrupts.
					 */
					if (!need_resched())
						cede_processor();
					else
						local_irq_enable();
				} else {
					/*
					 * Give the HV an opportunity at the
					 * processor, since we are not doing
					 * any work.
					 */
					poll_pending();
				}
			}

			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		HMT_medium();
		lpaca->lppaca.xIdle = 0;
		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
	return 0;
}
Beispiel #22
0
static inline void force_quiescent_state(struct rcu_data *rdp,
			struct rcu_ctrlblk *rcp)
{
	set_need_resched();
}