/*
 * This wrapper function around hv_flush_remote() does several things:
 *
 *  - Provides a return value error-checking panic path, since
 *    there's never any good reason for hv_flush_remote() to fail.
 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
 *    is the type that Linux wants to pass around anyway.
 *  - Canonicalizes that lengths of zero make cpumasks NULL.
 *  - Handles deferring TLB flushes for dataplane tiles.
 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
 *
 * Note that we have to wait until the cache flush completes before
 * updating the per-cpu last_cache_flush word, since otherwise another
 * concurrent flush can race, conclude the flush has already
 * completed, and start to use the page while it's still dirty
 * remotely (running concurrently with the actual evict, presumably).
 */
void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
		  const struct cpumask *cache_cpumask_orig,
		  HV_VirtAddr tlb_va, unsigned long tlb_length,
		  unsigned long tlb_pgsize,
		  const struct cpumask *tlb_cpumask_orig,
		  HV_Remote_ASID *asids, int asidcount)
{
	int rc;
	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
	struct cpumask *cache_cpumask, *tlb_cpumask;
	HV_PhysAddr cache_pa;

	mb();   /* provided just to simplify "magic hypervisor" mode */

	/*
	 * Canonicalize and copy the cpumasks.
	 */
	if (cache_cpumask_orig && cache_control) {
		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
		cache_cpumask = &cache_cpumask_copy;
	} else {
		cpumask_clear(&cache_cpumask_copy);
		cache_cpumask = NULL;
	}
	if (cache_cpumask == NULL)
		cache_control = 0;
	if (tlb_cpumask_orig && tlb_length) {
		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
		tlb_cpumask = &tlb_cpumask_copy;
	} else {
		cpumask_clear(&tlb_cpumask_copy);
		tlb_cpumask = NULL;
	}

	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
			asids, asidcount);
	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
	rc = hv_flush_remote(cache_pa, cache_control,
			     cpumask_bits(cache_cpumask),
			     tlb_va, tlb_length, tlb_pgsize,
			     cpumask_bits(tlb_cpumask),
			     asids, asidcount);
	if (rc == 0)
		return;

	pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
	       cache_pa, cache_control, cache_cpumask,
	       cpumask_pr_args(&cache_cpumask_copy),
	       (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
	       cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
	panic("Unsafe to continue.");
}
Ejemplo n.º 2
0
static void timer_list_show_tickdevices_header(struct seq_file *m)
{
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
	print_tickdevice(m, tick_get_broadcast_device(), -1);
	SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
		   cpumask_bits(tick_get_broadcast_mask())[0]);
#ifdef CONFIG_TICK_ONESHOT
	SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
		   cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
#endif
	SEQ_printf(m, "\n");
#endif
}
Ejemplo n.º 3
0
Archivo: cpu.c Proyecto: avagin/vzctl
int set_cpumask(envid_t veid, cpumask_t *mask)
{
	static char maskstr[CPUMASK_NBITS * 2];

	bitmap_snprintf(maskstr, CPUMASK_NBITS * 2,
			cpumask_bits(mask), CPUMASK_NBITS);
	logger(0, 0, "Setting CPU mask: %s", maskstr);
	if (fairsched_cpumask(veid, sizeof(cpumask_t), cpumask_bits(mask))) {
		logger(-1, errno, "fairsched_cpumask");
		return VZ_SETFSHD_ERROR;
	}
	return 0;
}
Ejemplo n.º 4
0
void gicv2_ipi_send_mask(int irq, const cpumask_t *dest)
{
	u8 tlist = (u8)cpumask_bits(dest)[0];

	assert(irq < 16);
	writel(tlist << 16 | irq, gicv2_dist_base() + GICD_SGIR);
}
Ejemplo n.º 5
0
static int hotplug_rtb_callback(struct notifier_block *nfb,
				unsigned long action, void *hcpu)
{
	/*
                                                                     
                                                                  
                                                                     
                                                               
   
                                                             
  */
	int this_cpumask = CPUSET_OF(1 << (int)hcpu);
	int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
	int cpudata = CPU_OF((int)hcpu) | cpumask;

	switch (action & (~CPU_TASKS_FROZEN)) {
	case CPU_STARTING:
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
		break;
	case CPU_DYING:
		cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
		break;
	default:
		break;
	}

	return NOTIFY_OK;
}
static int set_max_cpus(const char *buf, const struct kernel_param *kp)
{
	unsigned int i, ntokens = 0;
	const char *cp = buf;
	int val;

	if (!clusters_inited)
		return -EINVAL;

	while ((cp = strpbrk(cp + 1, ":")))
		ntokens++;

	if (!ntokens)
		return -EINVAL;

	cp = buf;
	for (i = 0; i < num_clusters; i++) {

		if (sscanf(cp, "%d\n", &val) != 1)
			return -EINVAL;
		if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
			return -EINVAL;

		managed_clusters[i]->max_cpu_request = val;

		cp = strchr(cp, ':');
		cp++;
		trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
								val);
	}

	schedule_delayed_work(&evaluate_hotplug_work, 0);

	return 0;
}
Ejemplo n.º 7
0
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
					     compat_ulong_t __user *user_mask_ptr)
{
	int ret;
	cpumask_var_t mask;

	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
		return -EINVAL;
	if (len & (sizeof(compat_ulong_t)-1))
		return -EINVAL;

	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;

	ret = sched_getaffinity(pid, mask);
	if (ret == 0) {
		size_t retlen = min_t(size_t, len, cpumask_size());

		if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
			ret = -EFAULT;
		else
			ret = retlen;
	}
	free_cpumask_var(mask);

	return ret;
}
Ejemplo n.º 8
0
int cpumask_to_xenctl_cpumap(
    struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
{
    unsigned int guest_bytes, copy_bytes, i;
    uint8_t zero = 0;
    int err = 0;
    uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);

    if ( !bytemap )
        return -ENOMEM;

    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);

    bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);

    if ( copy_bytes != 0 )
        if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
            err = -EFAULT;

    for ( i = copy_bytes; !err && i < guest_bytes; i++ )
        if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
            err = -EFAULT;

    xfree(bytemap);

    return err;
}
Ejemplo n.º 9
0
int xenctl_cpumap_to_cpumask(
    cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
{
    unsigned int guest_bytes, copy_bytes;
    int err = 0;
    uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);

    if ( !bytemap )
        return -ENOMEM;

    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);

    if ( copy_bytes != 0 )
    {
        if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
            err = -EFAULT;
        if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes == copy_bytes) )
            bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
    }

    if ( err )
        /* nothing */;
    else if ( alloc_cpumask_var(cpumask) )
        bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
    else
        err = -ENOMEM;

    xfree(bytemap);

    return err;
}
Ejemplo n.º 10
0
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
					     compat_ulong_t __user *user_mask_ptr)
{
	int ret;
	cpumask_var_t mask;
	unsigned long *k;
	unsigned int min_length = cpumask_size();

	if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
		min_length = sizeof(compat_ulong_t);

	if (len < min_length)
		return -EINVAL;

	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;

	ret = sched_getaffinity(pid, mask);
	if (ret < 0)
		goto out;

	k = cpumask_bits(mask);
	ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
	if (ret == 0)
		ret = min_length;

out:
	free_cpumask_var(mask);
	return ret;
}
Ejemplo n.º 11
0
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
		     bool force)
{
	unsigned long flags;
	cpumask_t online;
	unsigned int destination_bits;
	unsigned int distribution_mode;

	/* errout if no online cpu per @cpumask */
	if (!cpumask_and(&online, cpumask, cpu_online_mask))
		return -EINVAL;

	raw_spin_lock_irqsave(&mcip_lock, flags);

	destination_bits = cpumask_bits(&online)[0];
	idu_set_dest(data->hwirq, destination_bits);

	if (ffs(destination_bits) == fls(destination_bits))
		distribution_mode = IDU_M_DISTRI_DEST;
	else
		distribution_mode = IDU_M_DISTRI_RR;

	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);

	raw_spin_unlock_irqrestore(&mcip_lock, flags);

	return IRQ_SET_MASK_OK;
}
static int hotplug_rtb_callback(struct notifier_block *nfb,
				unsigned long action, void *hcpu)
{
	/*
	 * Bits [19:4] of the data are the online mask, lower 4 bits are the
	 * cpu number that is being changed. Additionally, changes to the
	 * online_mask that will be done by the current hotplug will be made
	 * even though they aren't necessarily in the online mask yet.
	 *
	 * XXX: This design is limited to supporting at most 16 cpus
	 */
	unsigned long this_cpumask = CPUSET_OF(1 << (unsigned long)hcpu);
	unsigned long cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
	unsigned long cpudata = CPU_OF((unsigned long)hcpu) | cpumask;

	switch (action & (~CPU_TASKS_FROZEN)) {
	case CPU_STARTING:
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
		break;
	case CPU_DYING:
		uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
		break;
	default:
		break;
	}

	return NOTIFY_OK;
}
Ejemplo n.º 13
0
/*==========================================================================*
 * Name:         flush_tlb_others
 *
 * Description:  This routine requests other CPU to execute flush TLB.
 *               1.Setup parameters.
 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
 *               3.Wait for other CPUs operation finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpumask - bitmap of target CPUs
 *               *mm -  a pointer to the mm struct for flush TLB
 *               *vma -  a pointer to the vma struct include va
 *               va - virtual address for flush TLB
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long va)
{
	unsigned long *mask;
#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpumask_empty(&cpumask));

	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpumask_and(&cpumask, &cpumask, cpu_online_mask);
	if (cpumask_empty(&cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_vma = vma;
	flush_va = va;
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);

	while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
		/* nothing. lockup detection does not belong here */
		mb();
	}

	flush_mm = NULL;
	flush_vma = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Ejemplo n.º 14
0
static ssize_t store_rps_map(struct netdev_rx_queue *queue,
			     const char *buf, size_t len)
{
	struct rps_map *old_map, *map;
	cpumask_var_t mask;
	int err, cpu, i;
	static DEFINE_MUTEX(rps_map_mutex);

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;

	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
	if (err) {
		free_cpumask_var(mask);
		return err;
	}

	map = kzalloc(max_t(unsigned int,
			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
		      GFP_KERNEL);
	if (!map) {
		free_cpumask_var(mask);
		return -ENOMEM;
	}

	i = 0;
	for_each_cpu_and(cpu, mask, cpu_online_mask)
		map->cpus[i++] = cpu;

	if (i) {
		map->len = i;
	} else {
		kfree(map);
		map = NULL;
	}

	mutex_lock(&rps_map_mutex);
	old_map = rcu_dereference_protected(queue->rps_map,
					    mutex_is_locked(&rps_map_mutex));
	rcu_assign_pointer(queue->rps_map, map);

	if (map)
		static_key_slow_inc(&rps_needed);
	if (old_map)
		static_key_slow_dec(&rps_needed);

	mutex_unlock(&rps_map_mutex);

	if (old_map)
		kfree_rcu(old_map, rcu);

	free_cpumask_var(mask);
	return len;
}
static void es7000_setup_apic_routing(void)
{
    int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());

    pr_info("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
            (apic_version[apic] == 0x14) ?
            "Physical Cluster" : "Logical Cluster",
            nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
}
Ejemplo n.º 16
0
/* Checks if a mask spans multiple nodes
 *
 * @mask	: cpumask to check for multiple node span
 */
int xlp_span_multiple_nodes(const struct cpumask *mask)
{
    int l, f;
    f = cpumask_first(mask);
    l = find_last_bit(cpumask_bits(mask), NR_CPUS);
    if ((f/NLM_MAX_CPU_PER_NODE) != (l/NLM_MAX_CPU_PER_NODE)) {
        printk(KERN_DEBUG "Mask spans from cpu %#x to %#x. Spans across nodes are not supported\n", f, l);
        return -EINVAL;
    }
    return 0;
}
static void
flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
{
	unsigned long mask = cpumask_bits(cpumask)[0];
	int cpu = smp_processor_id();

	if (cpu < BITS_PER_LONG)
		clear_bit(cpu, &mask);

	_flat_send_IPI_mask(mask, vector);
}
Ejemplo n.º 18
0
/* Similar to default_send_IPI_mask_logical() */
void l4x_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
	unsigned long mask = cpumask_bits(cpumask)[0];
	unsigned long flags;

	if (!mask)
		return;

	local_irq_save(flags);
	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
	l4x_send_IPI_mask_bitmask(mask, vector);
	local_irq_restore(flags);
}
Ejemplo n.º 19
0
static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
				    unsigned len, struct cpumask *new_mask)
{
	unsigned long *k;

	if (len < cpumask_size())
		memset(new_mask, 0, cpumask_size());
	else if (len > cpumask_size())
		len = cpumask_size();

	k = cpumask_bits(new_mask);
	return compat_get_bitmap(k, user_mask_ptr, len * 8);
}
static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
    /* Careful. Some cpus do not strictly honor the set of cpus
     * specified in the interrupt destination when using lowest
     * priority interrupt delivery mode.
     *
     * In particular there was a hyperthreading cpu observed to
     * deliver interrupts to the wrong hyperthread when only one
     * hyperthread was specified in the interrupt desitination.
     */
    cpumask_clear(retmask);
    cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
Ejemplo n.º 21
0
static void raise_mce(struct mce *m)
{
	int context = MCJ_CTX(m->inject_flags);

	inject_mce(m);

	if (context == MCJ_CTX_RANDOM)
		return;

#ifdef CONFIG_X86_LOCAL_APIC
	if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) {
		unsigned long start;
		int cpu;

		get_online_cpus();
		cpumask_copy(mce_inject_cpumask, cpu_online_mask);
		cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
		for_each_online_cpu(cpu) {
			struct mce *mcpu = &per_cpu(injectm, cpu);
			if (!mcpu->finished ||
			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
				cpumask_clear_cpu(cpu, mce_inject_cpumask);
		}
		if (!cpumask_empty(mce_inject_cpumask)) {
			if (m->inject_flags & MCJ_IRQ_BRAODCAST) {
				/*
				 * don't wait because mce_irq_ipi is necessary
				 * to be sync with following raise_local
				 */
				preempt_disable();
				smp_call_function_many(mce_inject_cpumask,
					mce_irq_ipi, NULL, 0);
				preempt_enable();
			} else if (m->inject_flags & MCJ_NMI_BROADCAST)
				apic->send_IPI_mask(mce_inject_cpumask,
						NMI_VECTOR);
		}
		start = jiffies;
		while (!cpumask_empty(mce_inject_cpumask)) {
			if (!time_before(jiffies, start + 2*HZ)) {
				printk(KERN_ERR
				"Timeout waiting for mce inject %lx\n",
					*cpumask_bits(mce_inject_cpumask));
				break;
			}
			cpu_relax();
		}
		raise_local();
		put_cpu();
		put_online_cpus();
	} else
Ejemplo n.º 22
0
void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
{
    unsigned long mask = cpumask_bits(cpumask)[0];
    unsigned long cfg;
    unsigned long flags;

    mask &= cpumask_bits(&cpu_online_map)[0];
    mask &= ~(1UL << smp_processor_id());
    if ( mask == 0 )
        return;

    local_irq_save(flags);

    /*
     * Wait for idle.
     */
    apic_wait_icr_idle();

    /*
     * prepare target chip field
     */
    cfg = __prepare_ICR2(mask);
    apic_write_around(APIC_ICR2, cfg);

    /*
     * program the ICR
     */
    cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;

    /*
     * Send the IPI. The write to APIC_ICR fires this off.
     */
    apic_write_around(APIC_ICR, cfg);
    
    local_irq_restore(flags);
}
Ejemplo n.º 23
0
int xenctl_bitmap_to_cpumask(cpumask_var_t *cpumask,
                             const struct xenctl_bitmap *xenctl_cpumap)
{
    int err = 0;

    if ( alloc_cpumask_var(cpumask) ) {
        err = xenctl_bitmap_to_bitmap(cpumask_bits(*cpumask), xenctl_cpumap,
                                      nr_cpu_ids);
        /* In case of error, cleanup is up to us, as the caller won't care! */
        if ( err )
            free_cpumask_var(*cpumask);
    }
    else
        err = -ENOMEM;

    return err;
}
Ejemplo n.º 24
0
/*==========================================================================*
 * Name:         smp_flush_cache_all
 *
 * Description:  This routine sends a 'INVALIDATE_CACHE_IPI' to all other
 *               CPUs in the system.
 *
 * Born on Date: 2003-05-28
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_flush_cache_all(void)
{
	cpumask_t cpumask;
	unsigned long *mask;

	preempt_disable();
	cpumask_copy(&cpumask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &cpumask);
	spin_lock(&flushcache_lock);
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
	_flush_cache_copyback_all();
	while (flushcache_cpumask)
		mb();
	spin_unlock(&flushcache_lock);
	preempt_enable();
}
Ejemplo n.º 25
0
/**
 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
 * @mask: pointer to cpumask_var_t where the cpumask is returned
 * @flags: GFP_ flags
 *
 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 * a nop returning a constant 1 (in <linux/cpumask.h>)
 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
 *
 * In addition, mask will be NULL if this fails.  Note that gcc is
 * usually smart enough to know that mask can never be NULL if
 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
 * too.
 */
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
	*mask = kmalloc_node(cpumask_size(), flags, node);

#ifdef CONFIG_DEBUG_PER_CPU_MAPS
	if (!*mask) {
		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
		dump_stack();
	}
#endif
	/* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
	if (*mask) {
		unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
		unsigned int tail;
		tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
		memset(ptr + cpumask_size() - tail, 0, tail);
	}

	return *mask != NULL;
}
Ejemplo n.º 26
0
Archivo: mcip.c Proyecto: causten/linux
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
                     bool force)
{
    unsigned long flags;
    cpumask_t online;

    /* errout if no online cpu per @cpumask */
    if (!cpumask_and(&online, cpumask, cpu_online_mask))
        return -EINVAL;

    raw_spin_lock_irqsave(&mcip_lock, flags);

    idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
    idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);

    raw_spin_unlock_irqrestore(&mcip_lock, flags);

    return IRQ_SET_MASK_OK;
}
static void flat_send_IPI_allbutself(int vector)
{
	int cpu = smp_processor_id();
#ifdef	CONFIG_HOTPLUG_CPU
	int hotplug = 1;
#else
	int hotplug = 0;
#endif
	if (hotplug || vector == NMI_VECTOR) {
		if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
			unsigned long mask = cpumask_bits(cpu_online_mask)[0];

			if (cpu < BITS_PER_LONG)
				clear_bit(cpu, &mask);

			_flat_send_IPI_mask(mask, vector);
		}
	} else if (num_online_cpus() > 1) {
		__default_send_IPI_shortcut(APIC_DEST_ALLBUT,
					    vector, apic->dest_logical);
	}
}
Ejemplo n.º 28
0
static void raise_mce(struct mce *m)
{
	int context = MCJ_CTX(m->inject_flags);

	inject_mce(m);

	if (context == MCJ_CTX_RANDOM)
		return;

#ifdef CONFIG_X86_LOCAL_APIC
	if (m->inject_flags & MCJ_NMI_BROADCAST) {
		unsigned long start;
		int cpu;
		get_online_cpus();
		cpumask_copy(mce_inject_cpumask, cpu_online_mask);
		cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
		for_each_online_cpu(cpu) {
			struct mce *mcpu = &per_cpu(injectm, cpu);
			if (!mcpu->finished ||
			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
				cpumask_clear_cpu(cpu, mce_inject_cpumask);
		}
		if (!cpumask_empty(mce_inject_cpumask))
			apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
		start = jiffies;
		while (!cpumask_empty(mce_inject_cpumask)) {
			if (!time_before(jiffies, start + 2*HZ)) {
				printk(KERN_ERR
				"Timeout waiting for mce inject NMI %lx\n",
					*cpumask_bits(mce_inject_cpumask));
				break;
			}
			cpu_relax();
		}
		raise_local();
		put_cpu();
		put_online_cpus();
	} else
Ejemplo n.º 29
0
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
static void __init setup_nr_cpu_ids(void)
{
	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
Ejemplo n.º 30
0
/*
 * This wrapper function around hv_flush_remote() does several things:
 *
 *  - Provides a return value error-checking panic path, since
 *    there's never any good reason for hv_flush_remote() to fail.
 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
 *    is the type that Linux wants to pass around anyway.
 *  - Centralizes the mark_caches_evicted() handling.
 *  - Canonicalizes that lengths of zero make cpumasks NULL.
 *  - Handles deferring TLB flushes for dataplane tiles.
 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
 *
 * Note that we have to wait until the cache flush completes before
 * updating the per-cpu last_cache_flush word, since otherwise another
 * concurrent flush can race, conclude the flush has already
 * completed, and start to use the page while it's still dirty
 * remotely (running concurrently with the actual evict, presumably).
 */
void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
		  const struct cpumask *cache_cpumask_orig,
		  HV_VirtAddr tlb_va, unsigned long tlb_length,
		  unsigned long tlb_pgsize,
		  const struct cpumask *tlb_cpumask_orig,
		  HV_Remote_ASID *asids, int asidcount)
{
	int rc;
	int timestamp = 0;  /* happy compiler */
	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
	struct cpumask *cache_cpumask, *tlb_cpumask;
	HV_PhysAddr cache_pa;
	char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];

	mb();   /* provided just to simplify "magic hypervisor" mode */

	/*
	 * Canonicalize and copy the cpumasks.
	 */
	if (cache_cpumask_orig && cache_control) {
		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
		cache_cpumask = &cache_cpumask_copy;
	} else {
		cpumask_clear(&cache_cpumask_copy);
		cache_cpumask = NULL;
	}
	if (cache_cpumask == NULL)
		cache_control = 0;
	if (tlb_cpumask_orig && tlb_length) {
		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
		tlb_cpumask = &tlb_cpumask_copy;
	} else {
		cpumask_clear(&tlb_cpumask_copy);
		tlb_cpumask = NULL;
	}

	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
			asids, asidcount);
	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
	if (cache_control & HV_FLUSH_EVICT_L2)
		timestamp = mark_caches_evicted_start();
	rc = hv_flush_remote(cache_pa, cache_control,
			     cpumask_bits(cache_cpumask),
			     tlb_va, tlb_length, tlb_pgsize,
			     cpumask_bits(tlb_cpumask),
			     asids, asidcount);
	if (cache_control & HV_FLUSH_EVICT_L2)
		mark_caches_evicted_finish(cache_cpumask, timestamp);
	if (rc == 0)
		return;
	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
	cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);

	pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
	       " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
	       cache_pa, cache_control, cache_cpumask, cache_buf,
	       (unsigned long)tlb_va, tlb_length, tlb_pgsize,
	       tlb_cpumask, tlb_buf,
	       asids, asidcount, rc);
	panic("Unsafe to continue.");
}