コード例 #1
0
ファイル: pcrypt.c プロジェクト: mikuhatsune001/linux2.6.32
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
			      struct padata_pcrypt *pcrypt)
{
	unsigned int cpu_index, cpu, i;
	struct pcrypt_cpumask *cpumask;

	cpu = *cb_cpu;

	rcu_read_lock_bh();
	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
	if (cpumask_test_cpu(cpu, cpumask->mask))
			goto out;

	if (!cpumask_weight(cpumask->mask))
			goto out;

	cpu_index = cpu % cpumask_weight(cpumask->mask);

	cpu = cpumask_first(cpumask->mask);
	for (i = 0; i < cpu_index; i++)
		cpu = cpumask_next(cpu, cpumask->mask);

	*cb_cpu = cpu;

out:
	rcu_read_unlock_bh();
	return padata_do_parallel(pcrypt->pinst, padata, cpu);
}
コード例 #2
0
ファイル: smpboot.c プロジェクト: 0-T-0/ps4-linux
	/*
	 * This needs a separate iteration over the cpus because we rely on all
	 * topology_sibling_cpumask links to be set-up.
	 */
	for_each_cpu(i, cpu_sibling_setup_mask) {
		o = &cpu_data(i);

		if ((i == cpu) || (has_mp && match_die(c, o))) {
			link_mask(topology_core_cpumask, cpu, i);

			/*
			 *  Does this new cpu bringup a new core?
			 */
			if (cpumask_weight(
			    topology_sibling_cpumask(cpu)) == 1) {
				/*
				 * for each core in package, increment
				 * the booted_cores for this new cpu
				 */
				if (cpumask_first(
				    topology_sibling_cpumask(i)) == i)
					c->booted_cores++;
				/*
				 * increment the core count for all
				 * the other cpus in this package
				 */
				if (i != cpu)
					cpu_data(i).booted_cores++;
			} else if (i != cpu && !c->booted_cores)
				c->booted_cores = cpu_data(i).booted_cores;
		}
		if (match_die(c, o) && !topology_same_node(c, o))
			primarily_use_numa_for_topology();
	}
コード例 #3
0
ファイル: rtasd.c プロジェクト: Claruarius/stblinux-2.6.37
static void rtas_event_scan(struct work_struct *w)
{
    unsigned int cpu;

    do_event_scan();

    get_online_cpus();

    cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
    if (cpu >= nr_cpu_ids) {
        cpu = cpumask_first(cpu_online_mask);

        if (first_pass) {
            first_pass = 0;
            event_scan_delay = 30*HZ/rtas_event_scan_rate;

            if (surveillance_timeout != -1) {
                pr_debug("rtasd: enabling surveillance\n");
                enable_surveillance(surveillance_timeout);
                pr_debug("rtasd: surveillance enabled\n");
            }
        }
    }

    schedule_delayed_work_on(cpu, &event_scan_work,
                             __round_jiffies_relative(event_scan_delay, cpu));

    put_online_cpus();
}
コード例 #4
0
/*
 * Broadcast the event to the cpus, which are set in the mask (mangled).
 */
static void tick_do_broadcast(struct cpumask *mask)
{
	int cpu = smp_processor_id();
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

	if (!cpumask_empty(mask)) {
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
	}
}
コード例 #5
0
ファイル: pcrypt.c プロジェクト: mikuhatsune001/linux2.6.32
static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
{
	int cpu, cpu_index;
	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
	struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_aead *cipher;

	ictx->tfm_count++;

	cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);

	ctx->cb_cpu = cpumask_first(cpu_online_mask);
	for (cpu = 0; cpu < cpu_index; cpu++)
		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);

	cipher = crypto_spawn_aead(crypto_instance_ctx(inst));

	if (IS_ERR(cipher))
		return PTR_ERR(cipher);

	ctx->child = cipher;
	tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
		+ sizeof(struct aead_givcrypt_request)
		+ crypto_aead_reqsize(cipher);

	return 0;
}
コード例 #6
0
ファイル: cpu.c プロジェクト: duki994/SM-G850_Kernel_LP
int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;
#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
	int lated_cpu;
#endif

#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
	if (exynos_boot_cluster == CA7)
		lated_cpu = NR_CA7;
	else
		lated_cpu = NR_CA15;
#endif

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	cpumask_clear(frozen_cpus);

	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
		if (cpu == first_cpu || cpu == lated_cpu)
#else
		if (cpu == first_cpu)
#endif
			continue;
		error = _cpu_down(cpu, 1);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}

#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
	if (num_online_cpus() > 1) {
		error = _cpu_down(lated_cpu, 1);
		if (!error)
			cpumask_set_cpu(lated_cpu, frozen_cpus);
		else
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				lated_cpu, error);
	}
#endif
	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
	}
	cpu_maps_update_done();
	return error;
}
コード例 #7
0
static int dmar_msi_set_affinity(struct irq_data *data,
				 const struct cpumask *mask, bool force)
{
	unsigned int irq = data->irq;
	struct irq_cfg *cfg = irq_cfg + irq;
	struct msi_msg msg;
	int cpu = cpumask_first(mask);

	if (!cpu_online(cpu))
		return -1;

	if (irq_prepare_move(irq, cpu))
		return -1;

	dmar_msi_read(irq, &msg);

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));

	dmar_msi_write(irq, &msg);
	cpumask_copy(data->affinity, mask);

	return 0;
}
コード例 #8
0
ファイル: x2apic.c プロジェクト: HPSI/xen-v4v
static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector)
{
    unsigned int cpu = smp_processor_id();
    cpumask_t *ipimask = per_cpu(scratch_mask, cpu);
    const cpumask_t *cluster_cpus;
    unsigned long flags;

    mb(); /* See above for an explanation. */

    local_irq_save(flags);

    cpumask_andnot(ipimask, &cpu_online_map, cpumask_of(cpu));

    for ( cpumask_and(ipimask, cpumask, ipimask); !cpumask_empty(ipimask);
          cpumask_andnot(ipimask, ipimask, cluster_cpus) )
    {
        uint64_t msr_content = 0;

        cluster_cpus = per_cpu(cluster_cpus, cpumask_first(ipimask));
        for_each_cpu ( cpu, cluster_cpus )
        {
            if ( !cpumask_test_cpu(cpu, ipimask) )
                continue;
            msr_content |= per_cpu(cpu_2_logical_apicid, cpu);
        }

        BUG_ON(!msr_content);
        msr_content = (msr_content << 32) | APIC_DM_FIXED |
                      APIC_DEST_LOGICAL | vector;
        apic_wrmsr(APIC_ICR, msr_content);
    }

    local_irq_restore(flags);
}
コード例 #9
0
int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	cpumask_clear(frozen_cpus);
	arch_disable_nonboot_cpus_begin();

	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		error = _cpu_down(cpu, 1);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}

	arch_disable_nonboot_cpus_end();

	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		
		cpu_hotplug_disabled = 1;
	} else {
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
	}
	cpu_maps_update_done();
	return error;
}
コード例 #10
0
ファイル: smpboot.c プロジェクト: CobraJet93/kernel-3.10.54
	/*
	 * This needs a separate iteration over the cpus because we rely on all
	 * cpu_sibling_mask links to be set-up.
	 */
	for_each_cpu(i, cpu_sibling_setup_mask) {
		o = &cpu_data(i);

		if ((i == cpu) || (has_mp && match_mc(c, o))) {
			link_mask(core, cpu, i);

			/*
			 *  Does this new cpu bringup a new core?
			 */
			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
				/*
				 * for each core in package, increment
				 * the booted_cores for this new cpu
				 */
				if (cpumask_first(cpu_sibling_mask(i)) == i)
					c->booted_cores++;
				/*
				 * increment the core count for all
				 * the other cpus in this package
				 */
				if (i != cpu)
					cpu_data(i).booted_cores++;
			} else if (i != cpu && !c->booted_cores)
				c->booted_cores = cpu_data(i).booted_cores;
		}
	}
コード例 #11
0
ファイル: octeon-irq.c プロジェクト: CSCLOG/beaglebone
static int next_cpu_for_irq(struct irq_data *data)
{

#ifdef CONFIG_SMP
	int cpu;
	int weight = cpumask_weight(data->affinity);

	if (weight > 1) {
		cpu = smp_processor_id();
		for (;;) {
			cpu = cpumask_next(cpu, data->affinity);
			if (cpu >= nr_cpu_ids) {
				cpu = -1;
				continue;
			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
				break;
			}
		}
	} else if (weight == 1) {
		cpu = cpumask_first(data->affinity);
	} else {
		cpu = smp_processor_id();
	}
	return cpu;
#else
	return smp_processor_id();
#endif
}
コード例 #12
0
ファイル: topology.c プロジェクト: guribe94/linux
static void update_top_cache_domain(int cpu)
{
	struct sched_domain_shared *sds = NULL;
	struct sched_domain *sd;
	int id = cpu;
	int size = 1;

	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
	if (sd) {
		id = cpumask_first(sched_domain_span(sd));
		size = cpumask_weight(sched_domain_span(sd));
		sds = sd->shared;
	}

	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
	per_cpu(sd_llc_size, cpu) = size;
	per_cpu(sd_llc_id, cpu) = id;
	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);

	sd = lowest_flag_domain(cpu, SD_NUMA);
	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);

	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
}
コード例 #13
0
/* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
	int cpu = cpumask_first(cpumask);

	if (cpu < nr_cpu_ids)
		return cpu_physical_id(cpu);
	return BAD_APICID;
}
コード例 #14
0
ファイル: misc.c プロジェクト: millken/zhuxianB30
void conplat_misc_init(void)
{
    g_product_type = drv_get_product_type();
    g_device_id = drv_get_device_id();
    g_if_port_sum = drv_get_slot_max_port();
    drv_get_max_slot(&g_if_slot_sum);
    
    g_control_cpu_first = cpumask_first(cpu_control_mask);
    g_control_cpu_num = num_control_cpus();
    g_data_cpu_first = cpumask_first(cpu_data_mask);
    g_data_cpu_num = num_data_cpus();
    g_cpu_num = g_control_cpu_num + g_data_cpu_num;
        
    printk("conplat_misc_init:control_cpu_first = %d, control_cpu_num = %d\n",g_control_cpu_first, g_control_cpu_num);
    printk("conplat_misc_init:data_cpu_first = %d, data_cpu_num = %d\n",g_data_cpu_first, g_data_cpu_num);
 
}
コード例 #15
0
ファイル: irq.c プロジェクト: LittleForker/linux-2.6
static int sn_set_affinity_irq(struct irq_data *data,
			       const struct cpumask *mask, bool force)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	unsigned int irq = data->irq;
	nasid_t nasid;
	int slice;

	nasid = cpuid_to_nasid(cpumask_first(mask));
	slice = cpuid_to_slice(cpumask_first(mask));

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list)
		(void)sn_retarget_vector(sn_irq_info, nasid, slice);

	return 0;
}
コード例 #16
0
void lge_pm_handle_poweroff(void)
{
#if 1
	lge_pm_low_vbatt_notify();
#else
	schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
#endif
}
コード例 #17
0
ファイル: op_model_p4.c プロジェクト: 3null/fastsocket
/* this assigns a "stagger" to the current CPU, which is used throughout
   the code in this module as an extra array offset, to select the "even"
   or "odd" part of all the divided resources. */
static unsigned int get_stagger(void)
{
#ifdef CONFIG_SMP
	int cpu = smp_processor_id();
	return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
#endif
	return 0;
}
コード例 #18
0
ファイル: tick-common.c プロジェクト: longqzh/chronnOS
/*
 * Transfer the do_timer job away from a dying cpu.
 *
 * Called with interrupts disabled.
 */
static void tick_handover_do_timer(int *cpup)
{
	if (*cpup == tick_do_timer_cpu) {
		int cpu = cpumask_first(cpu_online_mask);

		tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
			TICK_DO_TIMER_NONE;
	}
}
コード例 #19
0
ファイル: affinity.c プロジェクト: AK101111/linux
static int get_first_sibling(unsigned int cpu)
{
	unsigned int ret;

	ret = cpumask_first(topology_sibling_cpumask(cpu));
	if (ret < nr_cpu_ids)
		return ret;
	return cpu;
}
コード例 #20
0
/**
 * Save current fixed-range MTRR state of the first cpu in cpu_online_mask.
 */
void mtrr_save_state(void)
{
	int first_cpu;

	get_online_cpus();
	first_cpu = cpumask_first(cpu_online_mask);
	smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
	put_online_cpus();
}
コード例 #21
0
ファイル: tick-common.c プロジェクト: lovejavaee/linux-2
/*
 * Transfer the do_timer job away from a dying cpu.
 *
 * Called with interrupts disabled. Not locking required. If
 * tick_do_timer_cpu is owned by this cpu, nothing can change it.
 */
void tick_handover_do_timer(void)
{
	if (tick_do_timer_cpu == smp_processor_id()) {
		int cpu = cpumask_first(cpu_online_mask);

		tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
			TICK_DO_TIMER_NONE;
	}
}
コード例 #22
0
ファイル: msi_sn.c プロジェクト: 10x-Amin/nAa-kernel
static void sn_set_msi_irq_affinity(unsigned int irq,
				    const struct cpumask *cpu_mask)
{
	struct msi_msg msg;
	int slice;
	nasid_t nasid;
	u64 bus_addr;
	struct pci_dev *pdev;
	struct pcidev_info *sn_pdev;
	struct sn_irq_info *sn_irq_info;
	struct sn_irq_info *new_irq_info;
	struct sn_pcibus_provider *provider;
	unsigned int cpu;

	cpu = cpumask_first(cpu_mask);
	sn_irq_info = sn_msi_info[irq].sn_irq_info;
	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
		return;

	/*
	 * Release XIO resources for the old MSI PCI address
	 */

	read_msi_msg(irq, &msg);
        sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
	pdev = sn_pdev->pdi_linux_pcidev;
	provider = SN_PCIDEV_BUSPROVIDER(pdev);

	bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo);
	(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
	sn_msi_info[irq].pci_addr = 0;

	nasid = cpuid_to_nasid(cpu);
	slice = cpuid_to_slice(cpu);

	new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
	sn_msi_info[irq].sn_irq_info = new_irq_info;
	if (new_irq_info == NULL)
		return;

	/*
	 * Map the xio address into bus space
	 */

	bus_addr = (*provider->dma_map_consistent)(pdev,
					new_irq_info->irq_xtalkaddr,
					sizeof(new_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);

	sn_msi_info[irq].pci_addr = bus_addr;
	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	write_msi_msg(irq, &msg);
	irq_desc[irq].affinity = *cpu_mask;
}
コード例 #23
0
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
	int cpu, target_cpu;

	target_cpu = cpumask_first(pd->cpumask.pcpu);
	for (cpu = 0; cpu < cpu_index; cpu++)
		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);

	return target_cpu;
}
コード例 #24
0
static void *c_start(struct seq_file *m, loff_t *pos)
{
	if (*pos == 0)	
		*pos = cpumask_first(cpu_online_mask);
	else
		*pos = cpumask_next(*pos - 1, cpu_online_mask);
	if ((*pos) < nr_cpu_ids)
		return &cpu_data(*pos);
	return NULL;
}
コード例 #25
0
ファイル: dw_apb_timer.c プロジェクト: 513855417/linux
static int apbt_resume(struct clock_event_device *evt)
{
	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);

	pr_debug("%s CPU %d state=resume\n", __func__,
		 cpumask_first(evt->cpumask));

	apbt_enable_int(&dw_ced->timer);
	return 0;
}
コード例 #26
0
/* Checks if a mask spans multiple nodes
 *
 * @mask	: cpumask to check for multiple node span
 */
int xlp_span_multiple_nodes(const struct cpumask *mask)
{
    int l, f;
    f = cpumask_first(mask);
    l = find_last_bit(cpumask_bits(mask), NR_CPUS);
    if ((f/NLM_MAX_CPU_PER_NODE) != (l/NLM_MAX_CPU_PER_NODE)) {
        printk(KERN_DEBUG "Mask spans from cpu %#x to %#x. Spans across nodes are not supported\n", f, l);
        return -EINVAL;
    }
    return 0;
}
コード例 #27
0
ファイル: mips-gic-timer.c プロジェクト: hackbras/linux
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
	u64 cnt;
	int res;

	cnt = gic_read_count();
	cnt += (u64)delta;
	gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
	res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
	return res;
}
コード例 #28
0
ファイル: rtasd.c プロジェクト: 0-T-0/ps4-linux
static void start_event_scan(void)
{
	printk(KERN_DEBUG "RTAS daemon started\n");
	pr_debug("rtasd: will sleep for %d milliseconds\n",
		 (30000 / rtas_event_scan_rate));

	/* Retrieve errors from nvram if any */
	retreive_nvram_error_log();

	schedule_delayed_work_on(cpumask_first(cpu_online_mask),
				 &event_scan_work, event_scan_delay);
}
コード例 #29
0
ファイル: irq.c プロジェクト: hothanhbinh/AndromadusMod-New
void fixup_irqs(void)
{
	unsigned int irq;
	extern void ia64_process_pending_intr(void);
	extern volatile int time_keeper_id;

	/* Mask ITV to disable timer */
	ia64_set_itv(1 << 16);

	/*
	 * Find a new timesync master
	 */
	if (smp_processor_id() == time_keeper_id) {
		time_keeper_id = cpumask_first(cpu_online_mask);
		printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
	}

	/*
	 * Phase 1: Locate IRQs bound to this cpu and
	 * relocate them for cpu removal.
	 */
	migrate_irqs();

	/*
	 * Phase 2: Perform interrupt processing for all entries reported in
	 * local APIC.
	 */
	ia64_process_pending_intr();

	/*
	 * Phase 3: Now handle any interrupts not captured in local APIC.
	 * This is to account for cases that device interrupted during the time the
	 * rte was being disabled and re-programmed.
	 */
	for (irq=0; irq < NR_IRQS; irq++) {
		if (vectors_in_migration[irq]) {
			struct pt_regs *old_regs = set_irq_regs(NULL);

			vectors_in_migration[irq]=0;
			generic_handle_irq(irq);
			set_irq_regs(old_regs);
		}
	}

	/*
	 * Now let processor die. We do irq disable and max_xtp() to
	 * ensure there is no more interrupts routed to this processor.
	 * But the local timer interrupt can have 1 pending which we
	 * take care in timer_interrupt().
	 */
	max_xtp();
	local_irq_disable();
}
コード例 #30
0
static void cpuhotplug_early_suspend(struct early_suspend *p)
{
	int first_cpu, cpu;
	/* skip the first cpu, cpu0 always online */
	first_cpu = cpumask_first(cpu_online_mask);
	for_each_possible_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		if (cpu_online(cpu))
			earlysuspend_cpu_op(cpu, false);
	}
}