コード例 #1
0
ファイル: process.c プロジェクト: b3rnik/dsl-n55u-bender
void cpu_idle_wait(void)
{
    unsigned int cpu, this_cpu = get_cpu();
    cpumask_t map, tmp = current->cpus_allowed;

    set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
    put_cpu();

    cpus_clear(map);
    for_each_online_cpu(cpu) {
        per_cpu(cpu_idle_state, cpu) = 1;
        cpu_set(cpu, map);
    }

    __get_cpu_var(cpu_idle_state) = 0;

    wmb();
    do {
        ssleep(1);
        for_each_online_cpu(cpu) {
            if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
                cpu_clear(cpu, map);
        }
        cpus_and(map, map, cpu_online_map);
    } while (!cpus_empty(map));

    set_cpus_allowed(current, tmp);
}
コード例 #2
0
ファイル: salinfo.c プロジェクト: 10x-Amin/nAa-kernel
static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
	cpumask_t save_cpus_allowed = current->cpus_allowed;
	cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
	set_cpus_allowed(current, new_cpus_allowed);
	(*fn)(arg);
	set_cpus_allowed(current, save_cpus_allowed);
}
コード例 #3
0
ファイル: salinfo.c プロジェクト: camelguo/linux-2.6-trimedia
static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
	cpumask_t save_cpus_allowed, new_cpus_allowed;
	memcpy(&save_cpus_allowed, &current->cpus_allowed, sizeof(save_cpus_allowed));
	memset(&new_cpus_allowed, 0, sizeof(new_cpus_allowed));
	set_bit(cpu, &new_cpus_allowed);
	set_cpus_allowed(current, new_cpus_allowed);
	(*fn)(arg);
	set_cpus_allowed(current, save_cpus_allowed);
}
コード例 #4
0
ファイル: linux_linsched.c プロジェクト: varrunr/ml-cfs
void linsched_enable_migrations(void)
{
	int i;

	for (i = 0; i < curr_task_id; i++)
		set_cpus_allowed(__linsched_tasks[i], CPU_MASK_ALL);
}
コード例 #5
0
ファイル: cpu-boost.c プロジェクト: itsmerajit/kernel_otus
/*
 * The CPUFREQ_ADJUST notifier is used to override the current policy min to
 * make sure policy min >= boost_min. The cpufreq framework then does the job
 * of enforcing the new policy.
 *
 * The sync kthread needs to run on the CPU in question to avoid deadlocks in
 * the wake up code. Achieve this by binding the thread to the respective
 * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up
 * again each time the CPU comes back up. We can use CPUFREQ_START to figure
 * out a CPU is coming online instead of registering for hotplug notifiers.
 */
static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
{
	struct cpufreq_policy *policy = data;
	unsigned int cpu = policy->cpu;
	struct cpu_sync *s = &per_cpu(sync_info, cpu);
	unsigned int b_min = s->boost_min;
	unsigned int ib_min = s->input_boost_min;
	unsigned int min;

	switch (val) {
	case CPUFREQ_ADJUST:
		if (!b_min && !ib_min)
			break;

		min = max(b_min, ib_min);

		pr_debug("CPU%u policy min before boost: %u kHz\n",
			 cpu, policy->min);
		pr_debug("CPU%u boost min: %u kHz\n", cpu, min);

		cpufreq_verify_within_limits(policy, min, UINT_MAX);

		pr_debug("CPU%u policy min after boost: %u kHz\n",
			 cpu, policy->min);
		break;

	case CPUFREQ_START:
		set_cpus_allowed(s->thread, *cpumask_of(cpu));
		break;
	}

	return NOTIFY_OK;
}
コード例 #6
0
/*
 * This is the task which runs the usermode application
 */
static int ____call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	struct key *new_session, *old_session;
	int retval;

	/* Unblock all signals and set the session keyring. */
	new_session = key_get(sub_info->ring);
	flush_signals(current);
	spin_lock_irq(&current->sighand->siglock);
	old_session = __install_session_keyring(current, new_session);
	flush_signal_handlers(current, 1);
	sigemptyset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	key_put(old_session);

	/* We can run anywhere, unlike our parent keventd(). */
	set_cpus_allowed(current, CPU_MASK_ALL);

	retval = -EPERM;
	if (current->fs->root)
		retval = execve(sub_info->path, sub_info->argv,sub_info->envp);

	/* Exec failed? */
	sub_info->retval = retval;
	do_exit(0);
}
コード例 #7
0
ファイル: smp.c プロジェクト: 1703011/asuswrt-merlin
static void mpc85xx_smp_machine_kexec(struct kimage *image)
{
	int timeout = 2000;
	int i;

	set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));

	smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);

	while ( (kexec_down_cpus != (num_online_cpus() - 1)) &&
		( timeout > 0 ) )
	{
		timeout--;
	}

	if ( !timeout )
		printk(KERN_ERR "Unable to bring down secondary cpu(s)");

	for (i = 0; i < num_present_cpus(); i++)
	{
		if ( i == smp_processor_id() ) continue;
		mpic_reset_core(i);
	}

	default_machine_kexec(image);
}
コード例 #8
0
/*
 * Boost hierarchy: there are three kinds of boosts, and some
 * boosts will take precedence over others. Below is the current
 * hierarchy, from most precedence to least precedence:
 *
 * 1. Framebuffer blank/unblank boost
 * 2. Thread-migration boost (only if the mig boost freq > policy->min)
 * 3. Input boost
 */
static int cpu_do_boost(struct notifier_block *nb, unsigned long val, void *data)
{
	struct cpufreq_policy *policy = data;
	struct boost_policy *b = &per_cpu(boost_info, policy->cpu);

	if (val == CPUFREQ_START) {
		set_cpus_allowed(b->thread, *cpumask_of(b->cpu));
		return NOTIFY_OK;
	}

	if (val != CPUFREQ_ADJUST)
		return NOTIFY_OK;

	switch (b->boost_state) {
	case UNBOOST:
		policy->min = policy->cpuinfo.min_freq;
		break;
	case BOOST:
		policy->min = min(policy->max, ib_freq[policy->cpu]);
		break;
	}

	if (b->migration_freq > policy->min)
		policy->min = min(policy->max, b->migration_freq);

	if (fb_boost)
		policy->min = policy->max;

	return NOTIFY_OK;
}
コード例 #9
0
ファイル: cpu.c プロジェクト: JBTech/ralink_rt5350
static int integrator_set_target(struct cpufreq_policy *policy,
				 unsigned int target_freq,
				 unsigned int relation)
{
	cpumask_t cpus_allowed;
	int cpu = policy->cpu;
	struct icst525_vco vco;
	struct cpufreq_freqs freqs;
	u_int cm_osc;

	/*
	 * Save this threads cpus_allowed mask.
	 */
	cpus_allowed = current->cpus_allowed;

	/*
	 * Bind to the specified CPU.  When this call returns,
	 * we should be running on the right CPU.
	 */
	set_cpus_allowed(current, cpumask_of_cpu(cpu));
	BUG_ON(cpu != smp_processor_id());

	/* get current setting */
	cm_osc = __raw_readl(CM_OSC);

	if (machine_is_integrator()) {
		vco.s = (cm_osc >> 8) & 7;
	} else if (machine_is_cintegrator()) {
コード例 #10
0
ファイル: cpu-boost.c プロジェクト: itsmerajit/kernel_otus
static int cpu_boost_init(void)
{
	int cpu, ret;
	struct cpu_sync *s;

	cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
	if (!cpu_boost_wq)
		return -EFAULT;

	INIT_WORK(&input_boost_work, do_input_boost);

	for_each_possible_cpu(cpu) {
		s = &per_cpu(sync_info, cpu);
		s->cpu = cpu;
		init_waitqueue_head(&s->sync_wq);
		atomic_set(&s->being_woken, 0);
		spin_lock_init(&s->lock);
		INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
		INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
		s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
					"boost_sync/%d", cpu);
		set_cpus_allowed(s->thread, *cpumask_of(cpu));
	}
	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
	atomic_notifier_chain_register(&migration_notifier_head,
					&boost_migration_nb);
	ret = input_register_handler(&cpuboost_input_handler);

	return 0;
}
コード例 #11
0
static void tzdev_migrate_threads(int cpu)
{
    struct task_struct *thread;
    cpumask_t next_cpumask;

    cpumask_clear(&next_cpumask);
    cpumask_set_cpu(cpu, &next_cpumask);

    while ((thread = tzdev_get_next_thread(&next_cpumask))) {
        pr_notice("Migrate thread pid = %d to cpu = %d\n", thread->pid, cpu);
        /* We shouldn't fail here because of we wrap this code by
         * get_online_cpus() / put_online_cpus() */
        BUG_ON(set_cpus_allowed(thread, next_cpumask));
        put_task_struct(thread);
    }
    BUG_ON(set_cpus_allowed(current, next_cpumask));
}
コード例 #12
0
ファイル: main.c プロジェクト: naroahlee/2016SpringCSE521S
static int hello_init(void)
{
	printk(KERN_ALERT "Ready to start kthread.\n");
	gpkthmykth[0] = kthread_create(mykthread_t, NULL, gacMyktname);
	gpkthmykth[1] = kthread_create(mykthread,   NULL, gacMyktname);
	gpkthmykth[2] = kthread_create(mykthread,   NULL, gacMyktname);
	gpkthmykth[3] = kthread_create(mykthread,   NULL, gacMyktname);
	set_cpus_allowed(gpkthmykth[0], *cpumask_of(0));
	set_cpus_allowed(gpkthmykth[1], *cpumask_of(1));
	set_cpus_allowed(gpkthmykth[2], *cpumask_of(2));
	set_cpus_allowed(gpkthmykth[3], *cpumask_of(3));
	wake_up_process(gpkthmykth[0]);
	wake_up_process(gpkthmykth[1]);
	wake_up_process(gpkthmykth[2]);
	wake_up_process(gpkthmykth[3]);
	return 0;
}
コード例 #13
0
ファイル: sysfs.c プロジェクト: 1x23/unifi-gpl
/* XXX convert to rusty's on_one_cpu */
static unsigned long run_on_cpu(unsigned long cpu,
			        unsigned long (*func)(unsigned long),
				unsigned long arg)
{
	cpumask_t old_affinity = current->cpus_allowed;
	unsigned long ret;

	/* should return -EINVAL to userspace */
	if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
		return 0;

	ret = func(arg);

	set_cpus_allowed(current, old_affinity);

	return ret;
}
コード例 #14
0
void __init smp_cpus_done(unsigned int max_cpus)
{
	cpumask_t old_mask;

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
	 */
	old_mask = current->cpus_allowed;
	set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
	
	smp_ops->setup_cpu(boot_cpuid);

	set_cpus_allowed(current, old_mask);

	dump_numa_cpu_topology();
}
コード例 #15
0
ファイル: main.c プロジェクト: varrunr/ml-cfs
static int __init kernel_init(void * unused)
{
#ifndef __LINSCHED__
	lock_kernel();
	/*
	 * init can run on any cpu.
	 */
	set_cpus_allowed(current, CPU_MASK_ALL);
	/*
	 * Tell the world that we're going to be the grim
	 * reaper of innocent orphaned children.
	 *
	 * We don't want people to have to make incorrect
	 * assumptions about where in the task array this
	 * can be found.
	 */
	init_pid_ns.child_reaper = current;

	__set_special_pids(1, 1);
	cad_pid = task_pid(current);

	smp_prepare_cpus(max_cpus);

	do_pre_smp_initcalls();

	smp_init();
#endif /* __LINSCHED__ */

	sched_init_smp();

#ifndef __LINSCHED__
	cpuset_init_smp();

	do_basic_setup();

	/*
	 * check if there is an early userspace init.  If yes, let it do all
	 * the work
	 */

	if (!ramdisk_execute_command)
		ramdisk_execute_command = "/init";

	if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
		ramdisk_execute_command = NULL;
		prepare_namespace();
	}

	/*
	 * Ok, we have completed the initial bootup, and
	 * we're essentially up and running. Get rid of the
	 * initmem segments and start the user-mode stuff..
	 */
	init_post();
#endif /* __LINSCHED__ */

	return 0;
}
コード例 #16
0
ファイル: linux_linsched.c プロジェクト: varrunr/ml-cfs
void linsched_disable_migrations(void)
{
	int i;

	for (i = 0; i < curr_task_id; i++)
		set_cpus_allowed(__linsched_tasks[i],
				 cpumask_of_cpu(
					 task_cpu(__linsched_tasks[i])));
}
コード例 #17
0
ファイル: linux_linsched.c プロジェクト: varrunr/ml-cfs
struct task_struct *__linsched_create_task_binary(void (*callback)(void), unsigned long time_slice)
{
        struct task_struct *newtask =
                (struct task_struct *)do_fork_binary(0, 0, 0, 0, 0, 0, callback, time_slice);

        set_cpus_allowed(newtask, CPU_MASK_ALL);

        return newtask;
}
コード例 #18
0
static void
acpi_power_off (void)
{
	printk("%s called\n",__FUNCTION__);
	/* Some SMP machines only can poweroff in boot CPU */
	set_cpus_allowed(current, cpumask_of_cpu(0));
	acpi_enter_sleep_state_prep(ACPI_STATE_S5);
	ACPI_DISABLE_IRQS();
	acpi_enter_sleep_state(ACPI_STATE_S5);
}
コード例 #19
0
static int __init init(void * unused)
{
	lock_kernel();
	/*
	 * init can run on any cpu.
	 */
	set_cpus_allowed(current, CPU_MASK_ALL);
	/*
	 * Tell the world that we're going to be the grim
	 * reaper of innocent orphaned children.
	 *
	 * We don't want people to have to make incorrect
	 * assumptions about where in the task array this
	 * can be found.
	 */
	child_reaper = current;

	smp_prepare_cpus(max_cpus);

	do_pre_smp_initcalls();

	smp_init();
	sched_init_smp();

	cpuset_init_smp();

	/*
	 * Do this before initcalls, because some drivers want to access
	 * firmware files.
	 */
	populate_rootfs();

	do_basic_setup();

	/*
	 * check if there is an early userspace init.  If yes, let it do all
	 * the work
	 */

	if (!ramdisk_execute_command)
		ramdisk_execute_command = "/init";

	if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
		ramdisk_execute_command = NULL;
		prepare_namespace();
	}

	/*
	 * Ok, we have completed the initial bootup, and
	 * we're essentially up and running. Get rid of the
	 * initmem segments and start the user-mode stuff..
	 */
	init_post();
	return 0;
}
コード例 #20
0
static int __init cpu_iboost_init(void)
{
	struct boost_policy *b;
	int cpu, ret;

	boost_wq = alloc_workqueue("cpu_iboost_wq", WQ_HIGHPRI, 0);
	if (!boost_wq) {
		pr_err("Failed to allocate workqueue\n");
		ret = -EFAULT;
		goto err;
	}

	cpufreq_register_notifier(&cpu_do_boost_nb, CPUFREQ_POLICY_NOTIFIER);

	INIT_DELAYED_WORK(&fb_boost_work, fb_boost_fn);

	fb_register_client(&fb_boost_nb);

	for_each_possible_cpu(cpu) {
		b = &per_cpu(boost_info, cpu);
		b->cpu = cpu;
		INIT_DELAYED_WORK(&b->ib_restore_work, ib_restore_main);
		init_waitqueue_head(&b->sync_wq);
		atomic_set(&b->being_woken, 0);
		spin_lock_init(&b->lock);
		INIT_DELAYED_WORK(&b->mig_boost_rem, do_mig_boost_rem);
		b->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
					"boost_sync/%d", cpu);
		set_cpus_allowed(b->thread, *cpumask_of(cpu));
	}

	atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb);

	INIT_WORK(&boost_work, ib_boost_main);

	ret = input_register_handler(&cpu_iboost_input_handler);
	if (ret) {
		pr_err("Failed to register input handler, err: %d\n", ret);
		goto err;
	}

	cpu_iboost_kobject = kobject_create_and_add("cpu_input_boost", kernel_kobj);
	if (!cpu_iboost_kobject) {
		pr_err("Failed to create kobject\n");
		goto err;
	}

	ret = sysfs_create_group(cpu_iboost_kobject, &cpu_iboost_attr_group);
	if (ret) {
		pr_err("Failed to create sysfs interface\n");
		kobject_put(cpu_iboost_kobject);
	}
err:
	return ret;
}
コード例 #21
0
ファイル: linux_linsched.c プロジェクト: varrunr/ml-cfs
struct task_struct *__linsched_create_task(void (*callback)(void))
{
	struct task_struct *newtask =
		(struct task_struct *)do_fork(0, 0, 0, 0, 0, 0, callback);

	/* Allow task to run on any CPU. */

	set_cpus_allowed(newtask, CPU_MASK_ALL);

	return newtask;
}
コード例 #22
0
ファイル: sn_hwperf.c プロジェクト: OpenHMR/Open-HMR600
static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
{
	u32 cpu;
	u32 use_ipi;
	int r = 0;
	cpumask_t save_allowed;
	
	cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
	use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
	op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;

	if (cpu != SN_HWPERF_ARG_ANY_CPU) {
		if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
			r = -EINVAL;
			goto out;
		}
	}

	if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
		/* don't care, or already on correct cpu */
		sn_hwperf_call_sal(op_info);
	}
	else {
		if (use_ipi) {
			/* use an interprocessor interrupt to call SAL */
			smp_call_function_single(cpu, sn_hwperf_call_sal,
				op_info, 1, 1);
		}
		else {
			/* migrate the task before calling SAL */ 
			save_allowed = current->cpus_allowed;
			set_cpus_allowed(current, cpumask_of_cpu(cpu));
			sn_hwperf_call_sal(op_info);
			set_cpus_allowed(current, save_allowed);
		}
	}
	r = op_info->ret;

out:
	return r;
}
コード例 #23
0
ファイル: system.c プロジェクト: NieHao/Tomato-RAF
static void
acpi_power_off (void)
{
	if (unlikely(in_interrupt())) 
		BUG();
	/* Some SMP machines only can poweroff in boot CPU */
	set_cpus_allowed(current, 1UL << cpu_logical_map(0));
	acpi_enter_sleep_state_prep(ACPI_STATE_S5);
	ACPI_DISABLE_IRQS();
	acpi_enter_sleep_state(ACPI_STATE_S5);

	printk(KERN_EMERG "ACPI: can not power off machine\n");
}
コード例 #24
0
ファイル: rtasd.c プロジェクト: 420GrayFox/dsl-n55u-bender
static void do_event_scan_all_cpus(long delay)
{
	int cpu;

	lock_cpu_hotplug();
	cpu = first_cpu(cpu_online_map);
	for (;;) {
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		do_event_scan(rtas_token("event-scan"));
		set_cpus_allowed(current, CPU_MASK_ALL);

		/* Drop hotplug lock, and sleep for the specified delay */
		unlock_cpu_hotplug();
		msleep_interruptible(delay);
		lock_cpu_hotplug();

		cpu = next_cpu(cpu, cpu_online_map);
		if (cpu == NR_CPUS)
			break;
	}
	unlock_cpu_hotplug();
}
コード例 #25
0
static int rtswitch_create_ktask(rtswitch_context_t *ctx,
                                 struct rttst_swtest_task *ptask)
{
	rtswitch_task_t *task;
	xnflags_t init_flags;
	struct taskarg arg;
	char name[30];
	int err;

	ptask->flags |= RTSWITCH_KERNEL;
	err = rtswitch_register_task(ctx, ptask);

	if (err)
		return err;

	snprintf(name, sizeof(name), "rtk%d/%u", ptask->index, ctx->cpu);

	task = &ctx->tasks[ptask->index];

	arg.ctx = ctx;
	arg.task = task;

	init_flags = (ptask->flags & RTTST_SWTEST_FPU) ? XNFPU : 0;

	/* Migrate the calling thread to the same CPU as the created task, in
	   order to be sure that the created task is suspended when this function
	   returns. This also allow us to use the stack to pass the parameters to
	   the created task. */
	set_cpus_allowed(current, cpumask_of_cpu(ctx->cpu));

	err = xnpod_init_thread(&task->ktask, rtdm_tbase, name, 1, init_flags, 0, NULL);

	if (!err)
		err = xnpod_start_thread(&task->ktask,
					 0,
					 0,
					 xnarch_cpumask_of_cpu(ctx->cpu),
					 rtswitch_ktask,
					 &arg);
	else
		/* In order to avoid calling xnpod_delete_thread with invalid
		   thread. */
		task->base.flags = 0;

	/* Putting the argument on stack is safe, because the new thread will
	   preempt the current thread immediately, and will suspend only once the
	   arguments on stack are used. */

	return err;
}
コード例 #26
0
ファイル: linux_linsched.c プロジェクト: varrunr/ml-cfs
/* Force a migration of task to the dest_cpu.
 * If migr is set, allow migrations after the forced migration... otherwise,
 * do not allow them. (We need to disable migrations so that the forced
 * migration takes place correctly.)
 * Returns old cpu of task.
 */
int linsched_force_migration(struct task_struct *task, int dest_cpu, int migr)
{
	int old_cpu = task_cpu(task);
	
	linsched_disable_migrations();
	set_cpus_allowed(task, cpumask_of_cpu(dest_cpu));
	linsched_change_cpu(old_cpu);
	schedule();
	linsched_change_cpu(dest_cpu);
	schedule();
	if (migr)
		linsched_enable_migrations();

	return old_cpu;
}
コード例 #27
0
ファイル: kmod.c プロジェクト: BackupTheBerlios/arp2-svn
/*
 * This is the task which runs the usermode application
 */
static int ____call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	int retval;

	/* We can run anywhere, unlike our parent keventd(). */
	set_cpus_allowed(current, CPU_MASK_ALL);

	retval = __exec_usermodehelper(sub_info->path,
			sub_info->argv, sub_info->envp, sub_info->ring);

	/* Exec failed? */
	sub_info->retval = retval;
	do_exit(0);
}
コード例 #28
0
ファイル: virtual_bak3.c プロジェクト: kandycs/kitten-perf
static int vperfctr_enable_control(struct vperfctr *perfctr, struct task_struct *tsk)
{
	int err;
	unsigned int next_cstatus;
	unsigned int nrctrs, i;

	if (perfctr->cpu_state.control.header.nractrs ||
	    perfctr->cpu_state.control.header.nrictrs) {
		cpumask_t old_mask, new_mask;

		//old_mask = tsk->cpus_allowed;
		old_mask = tsk->cpu_mask;
		cpus_andnot(new_mask, old_mask, perfctr_cpus_forbidden_mask);

		if (cpus_empty(new_mask))
			return -EINVAL;
		if (!cpus_equal(new_mask, old_mask))
			set_cpus_allowed(tsk, new_mask);
	}

	perfctr->cpu_state.user.cstatus = 0;
	perfctr->resume_cstatus = 0;

	/* remote access note: perfctr_cpu_update_control() is ok */
	err = perfctr_cpu_update_control(&perfctr->cpu_state, 0);
	if (err < 0)
		return err;
	next_cstatus = perfctr->cpu_state.user.cstatus;
	if (!perfctr_cstatus_enabled(next_cstatus))
		return 0;

	if (!perfctr_cstatus_has_tsc(next_cstatus))
		perfctr->cpu_state.user.tsc_sum = 0;

	nrctrs = perfctr_cstatus_nrctrs(next_cstatus);
	for(i = 0; i < nrctrs; ++i)
		if (!(perfctr->preserve & (1<<i)))
			perfctr->cpu_state.user.pmc[i].sum = 0;

	spin_lock(&perfctr->children_lock);
	perfctr->inheritance_id = new_inheritance_id();
	memset(&perfctr->children, 0, sizeof perfctr->children);
	spin_unlock(&perfctr->children_lock);

	return 0;
}
コード例 #29
0
ファイル: bte_regr_test.c プロジェクト: dduval/kernel-rhel3
/*
 * Allocate the needed buffers and then initiate each xfer specified
 * by brt_xfer_tests.
 */
static int
brt_tst_std_xfer(void)
{
	char *block_1;
	char *block_2;
	int iteration = 0;
	brt_xfer_entry_t *cur_test;
	int cpu;
	int err_cnt;

	block_1 = kmalloc(BRT_TEST_BLOCK_SIZE, GFP_KERNEL);
	ASSERT(!((u64) block_1 & L1_CACHE_MASK));
	block_2 = kmalloc(BRT_TEST_BLOCK_SIZE, GFP_KERNEL);
	ASSERT(!((u64) block_2 & L1_CACHE_MASK));

	cur_test = brt_xfer_tests;

	err_cnt = 0;
	while (cur_test->length) {
		for (cpu = 0; cpu < smp_num_cpus; cpu++) {
			set_cpus_allowed(current, (1UL << cpu));

			if (verbose > 1) {
				printk("Cpu %d Transfering %d from "
				       "%d to %d.\n",
				       smp_processor_id(),
				       cur_test->length,
				       cur_test->source_offset,
				       cur_test->dest_offset);
			}

			err_cnt += brt_std_xfer(block_1, block_2,
						cur_test->source_offset,
						cur_test->dest_offset,
						cur_test->length,
						++iteration);

		}
		cur_test++;
	}

	kfree(block_2);
	kfree(block_1);

	return ((err_cnt ? 1 : 0));
}
コード例 #30
0
ファイル: reboot.c プロジェクト: Jinjian0609/UVP-Tools
static int xen_suspend(void *__unused)
{
	int err, old_state;

	daemonize("suspend");
	err = set_cpus_allowed(current, cpumask_of_cpu(0));
	if (err) {
		printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err);
		goto fail;
	}

	do {
		err = __xen_suspend(fast_suspend, xen_resume_notifier);
		if (err) {
			printk(KERN_ERR "Xen suspend failed (%d)\n", err);
			goto fail;
		}
		if (!suspend_cancelled)
			setup_suspend_evtchn();
		old_state = cmpxchg(
			&shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID);
	} while (old_state == SHUTDOWN_SUSPEND);

	switch (old_state) {
	case SHUTDOWN_INVALID:
	case SHUTDOWN_SUSPEND:
		BUG();
	case SHUTDOWN_RESUMING:
		break;
	default:
		schedule_work(&shutdown_work);
		break;
	}

	return 0;

 fail:
	old_state = xchg(&shutting_down, SHUTDOWN_INVALID);
	BUG_ON(old_state != SHUTDOWN_SUSPEND);
	return 0;
}