Ejemplo n.º 1
0
void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fxn, void *arg,
			 struct cpu_stop_work *work_buf)
{
	struct task_struct *stop = stop_tasks[cpu];
	struct stop_task *stop_task = task_thread_info(stop)->td->data;
	int this_cpu = smp_processor_id();

	/*
	 * catch a data race on simultaneous calls to stop_one_cpu_nowait, if
	 * this ends up happening we'd need to support a proper queue here.
	 */
	BUG_ON(stop_task->fxn);
	stop_task->fxn = fxn;
	stop_task->arg = arg;

	/*
	 * in the nowait case we actually go through a schedule to make sure
	 * that our cpu has a change to drop its rq->locks since they may be
	 * needed.
	 */
	linsched_change_cpu(cpu);
	hrtimer_set_expires(&stop_task->timer, ns_to_ktime(1));
	hrtimer_start_expires(&stop_task->timer, HRTIMER_MODE_REL);
	linsched_change_cpu(this_cpu);
}
Ejemplo n.º 2
0
/* Force a migration of task to the dest_cpu.
 * If migr is set, allow migrations after the forced migration... otherwise,
 * do not allow them. (We need to disable migrations so that the forced
 * migration takes place correctly.)
 * Returns old cpu of task.
 */
int linsched_force_migration(struct task_struct *task, int dest_cpu, int migr)
{
	int old_cpu = task_cpu(task);
	
	linsched_disable_migrations();
	set_cpus_allowed(task, cpumask_of_cpu(dest_cpu));
	linsched_change_cpu(old_cpu);
	schedule();
	linsched_change_cpu(dest_cpu);
	schedule();
	if (migr)
		linsched_enable_migrations();

	return old_cpu;
}
Ejemplo n.º 3
0
/* kernel/smp.c */
void __smp_call_function_single(int cpu, struct call_single_data *data,
				int wait)
{
	int this_cpu = smp_processor_id();
	unsigned long flags;

	if (cpu != this_cpu)
		linsched_change_cpu(cpu);

	local_irq_save(flags);
	data->func(data->info);
	local_irq_restore(flags);

	if (cpu != this_cpu)
		linsched_change_cpu(this_cpu);
}
Ejemplo n.º 4
0
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fxn, void *arg)
{
	struct task_struct *stop = stop_tasks[cpu];
	int this_cpu = smp_processor_id(), ret;

	linsched_change_cpu(cpu);
	wake_up_process(stop);
	/* switch to stop */
	schedule();
	BUG_ON(current != stop);
	ret = fxn(arg);
	/* switch back */
	stop->state = TASK_INTERRUPTIBLE;
	schedule();

	/* let whoever followed the stop task re-program (if needed) */
	linsched_current_handler();
	BUG_ON(current == stop);
	linsched_change_cpu(this_cpu);

	return ret;
}
Ejemplo n.º 5
0
void linsched_run_sim(int sim_ticks)
{
	/* Run a simulation for some number of ticks. Each tick,
	 * scheduling and load balancing decisions are made. The
	 * order in which CPUs make their scheduler_tick calls
	 * is randomized. Obviously, we could create tasks,
	 * change priorities, etc., at certain ticks if we desired,
	 * rather than just running a simple simulation.
	 * (Tasks can also be removed by having them exit.)
	 */
	/* NOTE: The per-CPU "tick" is never disabled, like it might be in a
	 * real system, when a CPU goes idle. Since even the most current
	 * version of Linux maintains a periodic tick when there is
	 * actual work to do, and disabling the tick when idle would
	 * not change anything about how the scheduler behaves
	 * (it only conserves energy, which is not going to help us here),
	 * there is no need.
	 */
	

//	printf("Yeah-first_run\n");
	int initial_jiffies = jiffies;
	for (jiffies = initial_jiffies;
	     jiffies < initial_jiffies + sim_ticks;
	     jiffies++) {
		cpumask_t cpu_processed_map = CPU_MASK_NONE;
		while (!cpus_full(cpu_processed_map)) {
			int active_cpu;
			
			/* Determine next active CPU, and set as processed. */ 
			do {
				active_cpu = linsched_random() % NR_CPUS;
				//active_cpu = 1;

			} while (cpu_isset(active_cpu, cpu_processed_map));
			cpu_set(active_cpu, cpu_processed_map);

			/* Call scheduler_tick for that CPU. */
			linsched_change_cpu(active_cpu);
//			printf("Mainsimulation\n");
			scheduler_tick(); /* may trigger a schedule() call */

			/* First time executing a task? Do not need to
			 * call schedule_tail, since we are not actually
			 * performing a "real" context switch.
			 */
		}
	}
}
Ejemplo n.º 6
0
void linsched_init(void)
{
	/* Initialize platform. For now, this does nothing, since
	 * the number of CPUs is known, and we do not yet support
	 * MC/SMT/NUMA. Scheduling decisions are not dependent on
	 * asymmetries yet, either, but that could change.
	 */
	linsched_init_cpus();

	/* Initialize random number generator. */
	linsched_random_init(LINSCHED_RAND_SEED);

	/* Change context to "boot" cpu and boot kernel. */
	linsched_change_cpu(0);
	start_kernel();
}