Exemplo n.º 1
0
static void simple_thread_func(int cnt)
{
	int array[6];
	int len = cnt % 5;
	int i;

	set_current_state(TASK_INTERRUPTIBLE);
	schedule_timeout(HZ);

	for (i = 0; i < len; i++)
		array[i] = i + 1;
	array[i] = 0;

	/* Silly tracepoints */
	trace_foo_bar("hello", cnt, array, random_strings[len],
		      tsk_cpus_allowed(current));

	trace_foo_with_template_simple("HELLO", cnt);

	trace_foo_bar_with_cond("Some times print", cnt);

	trace_foo_with_template_cond("prints other times", cnt);

	trace_foo_with_template_print("I have to be different", cnt);
}
Exemplo n.º 2
0
void __init smp_cpus_done(unsigned int max_cpus)
{
	cpumask_var_t old_mask;

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
	 */
	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
	cpumask_copy(old_mask, tsk_cpus_allowed(current));
	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
	
	if (smp_ops && smp_ops->setup_cpu)
		smp_ops->setup_cpu(boot_cpuid);

	set_cpus_allowed_ptr(current, old_mask);

	free_cpumask_var(old_mask);

	if (smp_ops && smp_ops->bringup_done)
		smp_ops->bringup_done();

	dump_numa_cpu_topology();

	set_sched_topology(powerpc_topology);

}
Exemplo n.º 3
0
/*
 * cpudl_find - find the best (later-dl) CPU in the system
 * @cp: the cpudl max-heap context
 * @p: the task
 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 *
 * Returns: int - best CPU (heap maximum if suitable)
 */
int cpudl_find(struct cpudl *cp, struct task_struct *p,
	       struct cpumask *later_mask)
{
	int best_cpu = -1;
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask &&
	    cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) {
		best_cpu = cpumask_any(later_mask);
		goto out;
	} else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) &&
			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
		best_cpu = cpudl_maximum(cp);
		if (later_mask)
			cpumask_set_cpu(best_cpu, later_mask);
	}

out:
	WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

	return best_cpu;
}
Exemplo n.º 4
0
static struct task_struct *tzdev_get_next_thread(cpumask_t *mask)
{
    struct task_struct *child = NULL, *tmp = current;

    read_lock(&tasklist_lock);
    while_each_thread(current, tmp) {
        if (cpumask_equal(tsk_cpus_allowed(tmp), mask))
            continue;
        child = tmp;
        get_task_struct(child);
        break;
    }
    read_unlock(&tasklist_lock);
    return child;
}
Exemplo n.º 5
0
notrace unsigned int debug_smp_processor_id(void)
{
	unsigned long preempt_count = preempt_count();
	int this_cpu = raw_smp_processor_id();

	if (likely(preempt_count))
		goto out;

	if (irqs_disabled())
		goto out;

	/*
	 * Kernel threads bound to a single CPU can safely use
	 * smp_processor_id():
	 */
	if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
		goto out;

	/*
	 * It is valid to assume CPU-locality during early bootup:
	 */
	if (system_state != SYSTEM_RUNNING)
		goto out;

	/*
	 * Avoid recursion:
	 */
	preempt_disable_notrace();

	if (!printk_ratelimit())
		goto out_enable;

	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
			"code: %s/%d\n",
			preempt_count() - 1, current->comm, current->pid);
	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
	dump_stack();

    #ifdef CONFIG_PANIC_ON_SMP_PROCESS_ID_BUG
    panic("Please check this smp_processor_id() used in preemptable code bug! after fix it, disable CONFIG_PANIC_ON_SMP_PROCESS_ID_BUG!\n");
    #endif
    

out_enable:
	preempt_enable_no_resched_notrace();
out:
	return this_cpu;
}
Exemplo n.º 6
0
// ARM10C 20130824
// FIXME: notrace와 관련하여 프로파일링-함수가 무엇인가?
notrace unsigned int debug_smp_processor_id(void)
{
	unsigned long preempt_count = preempt_count();//0x4000_0001
	int this_cpu = raw_smp_processor_id();

	//likely는 true일 가능성이 높은 코드라고 컴파일러에게 알려준다.
	if (likely(preempt_count))
		goto out;

// 2013/08/24 종료

	if (irqs_disabled())
		goto out;

	/*
	 * Kernel threads bound to a single CPU can safely use
	 * smp_processor_id():
	 */
	if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
		goto out;

	/*
	 * It is valid to assume CPU-locality during early bootup:
	 */
	if (system_state != SYSTEM_RUNNING)
		goto out;

	/*
	 * Avoid recursion:
	 */
	preempt_disable_notrace();

	if (!printk_ratelimit())
		goto out_enable;

	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
			"code: %s/%d\n",
			preempt_count() - 1, current->comm, current->pid);
	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
	dump_stack();

out_enable:
	preempt_enable_no_resched_notrace();
out:
	return this_cpu;
}
Exemplo n.º 7
0
Arquivo: sysfs.c Projeto: 020gzh/linux
/* XXX convert to rusty's on_one_cpu */
static unsigned long run_on_cpu(unsigned long cpu,
			        unsigned long (*func)(unsigned long),
				unsigned long arg)
{
	cpumask_t old_affinity;
	unsigned long ret;

	cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
	/* should return -EINVAL to userspace */
	if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
		return 0;

	ret = func(arg);

	set_cpus_allowed_ptr(current, &old_affinity);

	return ret;
}
Exemplo n.º 8
0
notrace static unsigned int check_preemption_disabled(const char *what1,
							const char *what2)
{
	int this_cpu = raw_smp_processor_id();

	if (likely(preempt_count()))
		goto out;

	if (irqs_disabled())
		goto out;

	/*
	 * Kernel threads bound to a single CPU can safely use
	 * smp_processor_id():
	 */
	if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
		goto out;

	/*
	 * It is valid to assume CPU-locality during early bootup:
	 */
	if (system_state != SYSTEM_RUNNING)
		goto out;

	/*
	 * Avoid recursion:
	 */
	preempt_disable_notrace();

	if (!printk_ratelimit())
		goto out_enable;

	printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
		what1, what2, preempt_count() - 1, current->comm, current->pid);

	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
	dump_stack();

out_enable:
	preempt_enable_no_resched_notrace();
out:
	return this_cpu;
}
Exemplo n.º 9
0
int caam_qi_shutdown(struct device *qidev)
{
	struct caam_qi_priv *priv = dev_get_drvdata(qidev);
	int i, ret;

	const cpumask_t *cpus = qman_affine_cpus();
	struct cpumask old_cpumask = *tsk_cpus_allowed(current);

	for_each_cpu(i, cpus) {
		struct napi_struct *irqtask;

		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;

		napi_disable(irqtask);
		netif_napi_del(irqtask);

		if (kill_fq(qidev, &per_cpu(pcpu_qipriv.rsp_fq, i)))
			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
	}

	/*
	 * QMAN driver requires CGRs to be deleted from same CPU from where
	 * they were instantiated. Hence we get the module removal execute
	 * from the same CPU from where it was originally inserted.
	 */
	set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));

	ret = qman_delete_cgr(&priv->rsp_cgr);
	if (ret)
		dev_err(qidev, "Delete response CGR failed: %d\n", ret);
	else
		qman_release_cgrid(priv->rsp_cgr.cgrid);

	if (qi_cache)
		kmem_cache_destroy(qi_cache);

	/* Now that we're done with the CGRs, restore the cpus allowed mask */
	set_cpus_allowed_ptr(current, &old_cpumask);

	platform_device_unregister(priv->qi_pdev);
	return ret;
}
Exemplo n.º 10
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int mycpu, err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
	cpumask_var_t cpumask;
	cpumask_var_t cpumask_org;

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	/* Move the downtaker off the unplug cpu */
	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
		return -ENOMEM;
	if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL))  {
		free_cpumask_var(cpumask);
		return -ENOMEM;
	}

	cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
	set_cpus_allowed_ptr(current, cpumask);
	free_cpumask_var(cpumask);
	migrate_disable();
	mycpu = smp_processor_id();
	if (mycpu == cpu) {
		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
		migrate_enable();
		err = -EBUSY;
		goto restore_cpus;
	}

	cpu_hotplug_begin();
	err = cpu_unplug_begin(cpu);
	if (err) {
		printk("cpu_unplug_begin(%d) failed\n", cpu);
		goto out_cancel;
	}

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	__cpu_unplug_wait(cpu);
	smpboot_park_threads(cpu);

	/* Notifiers are done. Don't let any more tasks pin this CPU. */
	cpu_unplug_sync(cpu);

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_unplug_done(cpu);
out_cancel:
	migrate_enable();
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
restore_cpus:
	set_cpus_allowed_ptr(current, cpumask_org);
	free_cpumask_var(cpumask_org);
	return err;
}
Exemplo n.º 11
0
	if (rt_prio(current->prio))
		ctx->prio = current->prio;
	else
		ctx->prio = current->static_prio;
	ctx->policy = current->policy;

	/*
	 * TO DO: the context may be loaded, so we may need to activate
	 * it again on a different node. But it shouldn't hurt anything
	 * to update its parameters, because we know that the scheduler
	 * is not actively looking at this field, since it is not on the
	 * runqueue. The context will be rescheduled on the proper node
	 * if it is timesliced or preempted.
	 */
<<<<<<< HEAD
	cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
=======
	ctx->cpus_allowed = current->cpus_allowed;
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a

	/* Save the current cpu id for spu interrupt routing. */
	ctx->last_ran = raw_smp_processor_id();
}

void spu_update_sched_info(struct spu_context *ctx)
{
	int node;

	if (ctx->state == SPU_STATE_RUNNABLE) {
		node = ctx->spu->node;