Esempio n. 1
0
/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
	cpumask_t mask;

	spin_lock(&tick_broadcast_lock);

	cpus_and(mask, cpu_online_map, tick_broadcast_mask);
	tick_do_broadcast(mask);

	spin_unlock(&tick_broadcast_lock);
}
Esempio n. 2
0
/**
 * smp_call_function_mask(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on.  Must not include the current cpu.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 *
  * Returns 0 on success, else a negative status code.
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
static int
native_smp_call_function_mask(cpumask_t mask,
                              void (*func)(void *), void *info,
                              int wait)
{
    struct call_data_struct data;
    cpumask_t allbutself;
    int cpus;

    /* Can deadlock when called with interrupts disabled */
    WARN_ON(irqs_disabled());

    /* Holding any lock stops cpus from going down. */
    spin_lock(&call_lock);

    allbutself = cpu_online_map;
    cpu_clear(smp_processor_id(), allbutself);

    cpus_and(mask, mask, allbutself);
    cpus = cpus_weight(mask);

    if (!cpus) {
        spin_unlock(&call_lock);
        return 0;
    }

    data.func = func;
    data.info = info;
    atomic_set(&data.started, 0);
    data.wait = wait;
    if (wait)
        atomic_set(&data.finished, 0);

    call_data = &data;
    mb();

    /* Send a message to other CPUs */
    if (cpus_equal(mask, allbutself))
        send_IPI_allbutself(CALL_FUNCTION_VECTOR);
    else
        send_IPI_mask(mask, CALL_FUNCTION_VECTOR);

    /* Wait for response */
    while (atomic_read(&data.started) != cpus)
        cpu_relax();

    if (wait)
        while (atomic_read(&data.finished) != cpus)
            cpu_relax();
    spin_unlock(&call_lock);

    return 0;
}
Esempio n. 3
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
                             unsigned long va)
{
    /*
     * A couple of (to be removed) sanity checks:
     *
     * - current CPU must not be in mask
     * - mask must exist :)
     */
    BUG_ON(cpus_empty(cpumask));
    BUG_ON(cpu_isset(smp_processor_id(), cpumask));
    BUG_ON(!mm);

    /* If a CPU which we ran on has gone down, OK. */
    cpus_and(cpumask, cpumask, cpu_online_map);
    if (cpus_empty(cpumask))
        return;

    /*
     * i'm not happy about this global shared spinlock in the
     * MM hot path, but we'll see how contended it is.
     * Temporarily this turns IRQs off, so that lockups are
     * detected by the NMI watchdog.
     */
    spin_lock(&tlbstate_lock);

    flush_mm = mm;
    flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
    atomic_set_mask(cpumask, &flush_cpumask);
#else
    {
        int k;
        unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
        unsigned long *cpu_mask = (unsigned long *)&cpumask;
        for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
            atomic_set_mask(cpu_mask[k], &flush_mask[k]);
    }
#endif
    /*
     * We have to send the IPI only to
     * CPUs affected.
     */
    send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);

    while (!cpus_empty(flush_cpumask))
        /* nothing. lockup detection does not belong here */
        mb();

    flush_mm = NULL;
    flush_va = 0;
    spin_unlock(&tlbstate_lock);
}
Esempio n. 4
0
void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
{
    cpumask_t online_affinity;

    /* Do not fail if no CPU in old affinity mask is online. */
    cpus_and(online_affinity, *affinity, cpu_online_map);
    if ( cpus_empty(online_affinity) )
        *affinity = cpu_online_map;

    if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
        BUG();
}
Esempio n. 5
0
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
                             unsigned long va)
{
    cpumask_t cpumask = *cpumaskp;

    /*
     * A couple of (to be removed) sanity checks:
     *
     * - current CPU must not be in mask
     * - mask must exist :)
     */
    BUG_ON(cpus_empty(cpumask));
    BUG_ON(cpu_isset(smp_processor_id(), cpumask));
    BUG_ON(!mm);

#ifdef CONFIG_HOTPLUG_CPU
    /* If a CPU which we ran on has gone down, OK. */
    cpus_and(cpumask, cpumask, cpu_online_map);
    if (unlikely(cpus_empty(cpumask)))
        return;
#endif

    /*
     * i'm not happy about this global shared spinlock in the
     * MM hot path, but we'll see how contended it is.
     * AK: x86-64 has a faster method that could be ported.
     */
    spin_lock(&tlbstate_lock);

    flush_mm = mm;
    flush_va = va;
    cpus_or(flush_cpumask, cpumask, flush_cpumask);

    /*
     * Make the above memory operations globally visible before
     * sending the IPI.
     */
    smp_mb();
    /*
     * We have to send the IPI only to
     * CPUs affected.
     */
    send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);

    while (!cpus_empty(flush_cpumask))
        /* nothing. lockup detection does not belong here */
        cpu_relax();

    flush_mm = NULL;
    flush_va = 0;
    spin_unlock(&tlbstate_lock);
}
Esempio n. 6
0
File: irq.c Progetto: Einheri/wl500g
void fixup_irqs(cpumask_t map)
{
	unsigned int irq;
	static int warned;

	for (irq = 0; irq < NR_IRQS; irq++) {
		cpumask_t mask;
		int break_affinity = 0;
		int set_affinity = 1;

		if (irq == 2)
			continue;

		/* interrupt's are disabled at this point */
		spin_lock(&irq_desc[irq].lock);

		if (!irq_has_action(irq) ||
		    cpus_equal(irq_desc[irq].affinity, map)) {
			spin_unlock(&irq_desc[irq].lock);
			continue;
		}

		cpus_and(mask, irq_desc[irq].affinity, map);
		if (cpus_empty(mask)) {
			break_affinity = 1;
			mask = map;
		}

		if (irq_desc[irq].chip->mask)
			irq_desc[irq].chip->mask(irq);

		if (irq_desc[irq].chip->set_affinity)
			irq_desc[irq].chip->set_affinity(irq, mask);
		else if (!(warned++))
			set_affinity = 0;

		if (irq_desc[irq].chip->unmask)
			irq_desc[irq].chip->unmask(irq);

		spin_unlock(&irq_desc[irq].lock);

		if (break_affinity && set_affinity)
			printk("Broke affinity for irq %i\n", irq);
		else if (!set_affinity)
			printk("Cannot set affinity for irq %i\n", irq);
	}

	/* That doesn't seem sufficient.  Give it 1ms. */
	local_irq_enable();
	mdelay(1);
	local_irq_disable();
}
Esempio n. 7
0
void cpuidle_wakeup_mwait(cpumask_t *mask)
{
    cpumask_t target;
    unsigned int cpu;

    cpus_and(target, *mask, cpuidle_mwait_flags);

    /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
    for_each_cpu_mask(cpu, target)
        mwait_wakeup(cpu) = 0;

    cpus_andnot(*mask, *mask, target);
}
Esempio n. 8
0
int send_ipi(int vector, int wait, cpumask_t cpu_mask)
{
	int i = 0;
	reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
	int ret = 0;

	/* Calculate CPUs to send to. */
	cpus_and(cpu_mask, cpu_mask, cpu_online_map);

	/* Send the IPI. */
	for_each_cpu_mask(i, cpu_mask)
	{
		ipi.vector |= vector;
		REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
	}
Esempio n. 9
0
static void find_best_object(struct topo_obj *d, void *data)
{
	struct obj_placement *best = (struct obj_placement *)data;
	uint64_t newload;
	cpumask_t subset;

	/*
 	 * Don't consider the unspecified numa node here
 	 */
	if (numa_avail && (d->obj_type == OBJ_TYPE_NODE) && (d->number == -1))
		return;

	/*
	 * also don't consider any node that doesn't have at least one cpu in
	 * the unbanned list
	 */
	if ((d->obj_type == OBJ_TYPE_NODE) &&
	    (!cpus_intersects(d->mask, unbanned_cpus)))
		return;

	/*
 	 * If the hint policy is subset, then we only want 
 	 * to consider objects that are within the irqs hint, but
 	 * only if that irq in fact has published a hint
 	 */
	if (best->info->hint_policy == HINT_POLICY_SUBSET) {
		if (!cpus_empty(best->info->affinity_hint)) {
			cpus_and(subset, best->info->affinity_hint, d->mask);
			if (cpus_empty(subset))
				return;
		}
	}

	if (d->powersave_mode)
		return;

	newload = d->load;
	if (newload < best->best_cost) {
		best->best = d;
		best->best_cost = newload;
		best->least_irqs = NULL;
	}

	if (newload == best->best_cost) {
		if (g_list_length(d->interrupts) < g_list_length(best->best->interrupts))
			best->least_irqs = d;
	}
}
Esempio n. 10
0
File: irq_64.c Progetto: E-LLP/n900
void fixup_irqs(cpumask_t map)
{
	unsigned int irq;
	static int warned;
	struct irq_desc *desc;

	for_each_irq_desc(irq, desc) {
		cpumask_t mask;
		int break_affinity = 0;
		int set_affinity = 1;

		if (irq == 2)
			continue;

		/* interrupt's are disabled at this point */
		spin_lock(&desc->lock);

		if (!irq_has_action(irq) ||
		    cpus_equal(desc->affinity, map)) {
			spin_unlock(&desc->lock);
			continue;
		}

		cpus_and(mask, desc->affinity, map);
		if (cpus_empty(mask)) {
			break_affinity = 1;
			mask = map;
		}

		if (desc->chip->mask)
			desc->chip->mask(irq);

		if (desc->chip->set_affinity)
			desc->chip->set_affinity(irq, mask);
		else if (!(warned++))
			set_affinity = 0;

		if (desc->chip->unmask)
			desc->chip->unmask(irq);

		spin_unlock(&desc->lock);

		if (break_affinity && set_affinity)
			printk("Broke affinity for irq %i\n", irq);
		else if (!set_affinity)
			printk("Cannot set affinity for irq %i\n", irq);
	}
Esempio n. 11
0
void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
{
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_bcast_ipi);
	if (!cpus_empty(mask)) {
#ifdef CONFIG_SMP
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#else
		/*
		 * We can directly call the apic timer interrupt handler
		 * in UP case. Minus all irq related functions
		 */
		up_apic_timer_interrupt_call(regs);
#endif
	}
}
Esempio n. 12
0
void smp_send_timer_broadcast_ipi(void)
{
	int cpu = smp_processor_id();
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);

	if (cpu_isset(cpu, mask)) {
		cpu_clear(cpu, mask);
		add_pda(apic_timer_irqs, 1);
		smp_local_timer_interrupt();
	}

	if (!cpus_empty(mask)) {
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
	}
}
Esempio n. 13
0
void move_masked_irq(int irq)
{
	struct irq_desc *desc = irq_desc + irq;
	cpumask_t tmp;

	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (CHECK_IRQ_PER_CPU(desc->status)) {
		WARN_ON(1);
		return;
	}

	desc->status &= ~IRQ_MOVE_PENDING;

	if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
		return;

	if (!desc->chip->set_affinity)
		return;

	assert_spin_locked(&desc->lock);

	cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is comming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(!cpus_empty(tmp))) {
		desc->chip->set_affinity(irq,tmp);
	}
	cpus_clear(irq_desc[irq].pending_mask);
}
Esempio n. 14
0
static inline int find_unassigned_vector(cpumask_t domain)
{
	cpumask_t mask;
	int pos, vector;

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpus_empty(mask))
		return -EINVAL;

	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
		vector = IA64_FIRST_DEVICE_VECTOR + pos;
		cpus_and(mask, domain, vector_table[vector]);
		if (!cpus_empty(mask))
			continue;
		return vector;
	}
	return -ENOSPC;
}
Esempio n. 15
0
void irq_complete_move(unsigned irq)
{
	struct irq_cfg *cfg = &irq_cfg[irq];
	cpumask_t cleanup_mask;
	int i;

	if (likely(!cfg->move_in_progress))
		return;

	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
		return;

	cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
	for_each_cpu_mask(i, cleanup_mask)
		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
	cfg->move_in_progress = 0;
}
Esempio n. 16
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
						unsigned long va)
{
	cpumask_t tmp;
	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpus_empty(cpumask));
	cpus_and(tmp, cpumask, cpu_online_map);
	BUG_ON(!cpus_equal(tmp, cpumask));
	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
	if (!mm)
		BUG();

	/*
	 * I'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);
	
	flush_mm = mm;
	flush_va = va;
	cpus_or(flush_cpumask, cpumask, flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);

	while (!cpus_empty(flush_cpumask))
		mb();	/* nothing. lockup detection does not belong here */;

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Esempio n. 17
0
cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask)
{
	cpumask_t oldmask;

	if (irq_to_desc(irq)->chip->set_affinity == NULL)
		return CPU_MASK_NONE;

	if (cpus_empty(cpumask))
		return CPU_MASK_NONE; /* Return mask value -- no change. */

	cpus_and(cpumask, cpumask, cpu_online_map);
	if (cpus_empty(cpumask))
		return CPU_MASK_NONE;	/* Error -- bad mask value or non-routable IRQ. */

	cpumask_copy(&oldmask, irq_to_desc(irq)->affinity);
	irq_to_desc(irq)->chip->set_affinity(irq, &cpumask);

	return oldmask;
}
Esempio n. 18
0
static void __clear_irq_vector(int irq)
{
	int vector, cpu;
	cpumask_t mask;
	cpumask_t domain;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
	vector = cfg->vector;
	domain = cfg->domain;
	cpus_and(mask, cfg->domain, cpu_online_map);
	for_each_cpu_mask(cpu, mask)
		per_cpu(vector_irq, cpu)[vector] = -1;
	cfg->vector = IRQ_VECTOR_UNASSIGNED;
	cfg->domain = CPU_MASK_NONE;
	irq_status[irq] = IRQ_UNUSED;
	cpus_andnot(vector_table[vector], vector_table[vector], domain);
}
Esempio n. 19
0
static int __vcpu_set_affinity(
    struct vcpu *v, cpumask_t *affinity,
    bool_t old_lock_status, bool_t new_lock_status)
{
    cpumask_t online_affinity, old_affinity;

    cpus_and(online_affinity, *affinity, cpu_online_map);
    if ( cpus_empty(online_affinity) )
        return -EINVAL;

    vcpu_schedule_lock_irq(v);

    if ( v->affinity_locked != old_lock_status )
    {
        BUG_ON(!v->affinity_locked);
        vcpu_schedule_unlock_irq(v);
        return -EBUSY;
    }

    v->affinity_locked = new_lock_status;

    old_affinity = v->cpu_affinity;
    v->cpu_affinity = *affinity;
    *affinity = old_affinity;
    if ( !cpu_isset(v->processor, v->cpu_affinity) )
        set_bit(_VPF_migrating, &v->pause_flags);

    vcpu_schedule_unlock_irq(v);

    if ( test_bit(_VPF_migrating, &v->pause_flags) )
    {
        vcpu_sleep_nosync(v);
        vcpu_migrate(v);
    }

    return 0;
}
Esempio n. 20
0
static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
{
    cpumask_t    tmp = CPU_MASK_NONE;
    unsigned long    flags;
    int        i;

    pr_debug(KERN_DEBUG "%s called\n", __func__);
    irq -= _irqbase;

    cpus_and(tmp, cpumask, cpu_online_map);
    if (cpus_empty(tmp))
        return;

    /* Assumption : cpumask refers to a single CPU */
    spin_lock_irqsave(&gic_lock, flags);
    for (;;) {
        /* Re-route this IRQ */
        GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));

        /*
         * FIXME: assumption that _intrmap is ordered and has no holes
         */

        /* Update the intr_map */
        _intrmap[irq].cpunum = first_cpu(tmp);

        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
            clear_bit(irq, pcpu_masks[i].pcpu_mask);
        set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);

    }
    irq_desc[irq].affinity = cpumask;
    spin_unlock_irqrestore(&gic_lock, flags);

}
Esempio n. 21
0
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
	cpumask_t mask;
	int cpu;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

	cpus_and(mask, domain, cpu_online_map);
	if (cpus_empty(mask))
		return -EINVAL;
	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
		return 0;
	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
		return -EBUSY;
	for_each_cpu_mask(cpu, mask)
		per_cpu(vector_irq, cpu)[vector] = irq;
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	cpus_or(vector_table[vector], vector_table[vector], domain);
	return 0;
}
Esempio n. 22
0
static int
msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
{
	struct irq_cfg *cfg = irq_cfg + irq;
	unsigned dest;
	cpumask_t mask;

	cpus_and(mask, irq_to_domain(irq), cpu_online_map);
	dest = cpu_physical_id(first_cpu(mask));

	msg->address_hi = 0;
	msg->address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DEST_MODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DEST_ID_CPU(dest);

	msg->data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(cfg->vector);
	return 0;
}
Esempio n. 23
0
/*
 * These functions send a 'generic call function' IPI to other online
 * CPUS in the system.
 *
 * [SUMMARY] Run a function on other CPUs.
 * <func> The function to run. This must be fast and non-blocking.
 * <info> An arbitrary pointer to pass to the function.
 * <nonatomic> currently unused.
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
 * [RETURNS] 0 on success, else a negative status code. Does not return until
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
 * <map> is a cpu map of the cpus to send IPI to.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
static int __smp_call_function_map(void (*func) (void *info), void *info,
				   int nonatomic, int wait, cpumask_t map)
{
	struct call_data_struct data;
	int ret = -1, num_cpus;
	int cpu;
	u64 timeout;

	if (unlikely(smp_ops == NULL))
		return ret;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	/* remove 'self' from the map */
	if (cpu_isset(smp_processor_id(), map))
		cpu_clear(smp_processor_id(), map);

	/* sanity check the map, remove any non-online processors. */
	cpus_and(map, map, cpu_online_map);

	num_cpus = cpus_weight(map);
	if (!num_cpus)
		goto done;

	call_data = &data;
	smp_wmb();
	/* Send a message to all CPUs in the map */
	for_each_cpu_mask(cpu, map)
		smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);

	timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;

	/* Wait for indication that they have received the message */
	while (atomic_read(&data.started) != num_cpus) {
		HMT_low();
		if (get_tb() >= timeout) {
			printk("smp_call_function on cpu %d: other cpus not "
				"responding (%d)\n", smp_processor_id(),
				atomic_read(&data.started));
			if (!ipi_fail_ok)
				debugger(NULL);
			goto out;
		}
	}

	/* optionally wait for the CPUs to complete */
	if (wait) {
		while (atomic_read(&data.finished) != num_cpus) {
			HMT_low();
			if (get_tb() >= timeout) {
				printk("smp_call_function on cpu %d: other "
					"cpus not finishing (%d/%d)\n",
					smp_processor_id(),
					atomic_read(&data.finished),
					atomic_read(&data.started));
				debugger(NULL);
				goto out;
			}
		}
	}

 done:
	ret = 0;

 out:
	call_data = NULL;
	HMT_medium();
	return ret;
}
Esempio n. 24
0
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_t new_mask;
	cpumask_t effective_mask;
	int retval;
	struct task_struct *p;
	struct thread_info *ti;
	uid_t euid;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	read_lock(&tasklist_lock);

	p = find_process_by_pid(pid);
	if (!p) {
		read_unlock(&tasklist_lock);
		put_online_cpus();
		return -ESRCH;
	}

	/*
	 * It is not safe to call set_cpus_allowed with the
	 * tasklist_lock held.  We will bump the task_struct's
	 * usage count and drop tasklist_lock before invoking
	 * set_cpus_allowed.
	 */
	get_task_struct(p);

	euid = current_euid();
	retval = -EPERM;
	if (euid != p->cred->euid && euid != p->cred->uid &&
	    !capable(CAP_SYS_NICE)) {
		read_unlock(&tasklist_lock);
		goto out_unlock;
	}

	retval = security_task_setscheduler(p, 0, NULL);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	p->thread.user_cpus_allowed = new_mask;

	/* Unlock the task list */
	read_unlock(&tasklist_lock);

	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(new_mask, mt_fpu_cpumask)) {
		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, &effective_mask);
	} else {
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, &new_mask);
	}

out_unlock:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_var_t cpus_allowed, new_mask, effective_mask;
	struct thread_info *ti;
	struct task_struct *p;
	int retval;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		put_online_cpus();
		return -ESRCH;
	}

	/* Prevent p going away */
	get_task_struct(p);
	rcu_read_unlock();

	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_new_mask;
	}
	retval = -EPERM;
	if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
		goto out_unlock;

	retval = security_task_setscheduler(p);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);

 again:
	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, effective_mask);
	} else {
		cpumask_copy(effective_mask, new_mask);
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, new_mask);
	}

	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(effective_mask, cpus_allowed)) {
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
			cpumask_copy(new_mask, cpus_allowed);
			goto again;
		}
	}
out_unlock:
	free_cpumask_var(effective_mask);
out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Esempio n. 26
0
static int sys_vperfctr_control(struct vperfctr *perfctr,
				struct perfctr_struct_buf *argp,
				struct task_struct *tsk)
{
	struct vperfctr_control control;
	int err;
	unsigned int next_cstatus;
	unsigned int nrctrs, i;
	cpumask_t cpumask;

	if (!tsk)
		return -ESRCH;	/* attempt to update unlinked perfctr */

	err = perfctr_copy_from_user(&control, argp, &vperfctr_control_sdesc);
	if (err)
		return err;

	/* Step 1: Update the control but keep the counters disabled.
	   PREEMPT note: Preemption is disabled since we're updating
	   an active perfctr. */
	preempt_disable();
	if (IS_RUNNING(perfctr)) {
		if (tsk == current)
			vperfctr_suspend(perfctr);
		perfctr->cpu_state.cstatus = 0;
		vperfctr_clear_iresume_cstatus(perfctr);
	}
	perfctr->cpu_state.control = control.cpu_control;
	/* remote access note: perfctr_cpu_update_control() is ok */
	cpus_setall(cpumask);
#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
	/* make a stopped vperfctr have an unconstrained cpumask */
	perfctr->cpumask = cpumask;
#endif
	err = perfctr_cpu_update_control(&perfctr->cpu_state, &cpumask);
	if (err < 0) {
		next_cstatus = 0;
	} else {
		next_cstatus = perfctr->cpu_state.cstatus;
		perfctr->cpu_state.cstatus = 0;
		perfctr->updater_tgid = current->tgid;
#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
		perfctr->cpumask = cpumask;
#endif
	}
	preempt_enable_no_resched();

	if (!perfctr_cstatus_enabled(next_cstatus))
		return err;

#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
	/* Step 2: Update the task's CPU affinity mask.
	   PREEMPT note: Preemption must be enabled for set_cpus_allowed(). */
	if (control.cpu_control.nractrs || control.cpu_control.nrictrs) {
		cpumask_t old_mask, new_mask;

		old_mask = tsk->cpus_allowed;
		cpus_and(new_mask, old_mask, cpumask);

		if (cpus_empty(new_mask))
			return -EINVAL;
		if (!cpus_equal(new_mask, old_mask))
			set_cpus_allowed(tsk, new_mask);
	}
#endif

	/* Step 3: Enable the counters with the new control and affinity.
	   PREEMPT note: Preemption is disabled since we're updating
	   an active perfctr. */
	preempt_disable();

	/* We had to enable preemption above for set_cpus_allowed() so we may
	   have lost a race with a concurrent update via the remote control
	   interface. If so then we must abort our update of this perfctr. */
	if (perfctr->updater_tgid != current->tgid) {
		printk(KERN_WARNING "perfctr: control update by task %d"
		       " was lost due to race with update by task %d\n",
		       current->tgid, perfctr->updater_tgid);
		err = -EBUSY;
	} else {
		/* XXX: validate si_signo? */
		perfctr->si_signo = control.si_signo;

		perfctr->cpu_state.cstatus = next_cstatus;

		if (!perfctr_cstatus_has_tsc(next_cstatus))
			perfctr->cpu_state.tsc_sum = 0;

		nrctrs = perfctr_cstatus_nrctrs(next_cstatus);
		for(i = 0; i < nrctrs; ++i)
			if (!(control.preserve & (1<<i)))
				perfctr->cpu_state.pmc[i].sum = 0;

		perfctr->flags = control.flags;

		if (tsk == current)
			vperfctr_resume(perfctr);
	}

	preempt_enable();
	return err;
}
Esempio n. 27
0
static void do_one_cpu(char *path)
{
	struct topo_obj *cpu;
	FILE *file;
	char new_path[PATH_MAX];
	cpumask_t cache_mask, package_mask;
	struct topo_obj *cache;
	struct topo_obj *package;
	DIR *dir;
	struct dirent *entry;
	int nodeid;
	int packageid = 0;
	unsigned int max_cache_index, cache_index, cache_stat;

	/* skip offline cpus */
	snprintf(new_path, PATH_MAX, "%s/online", path);
	file = fopen(new_path, "r");
	if (file) {
		char *line = NULL;
		size_t size = 0;
		if (getline(&line, &size, file)==0)
			return;
		fclose(file);
		if (line && line[0]=='0') {
			free(line);
			return;
		}
		free(line);
	}

	cpu = calloc(sizeof(struct topo_obj), 1);
	if (!cpu)
		return;

	cpu->obj_type = OBJ_TYPE_CPU;

	cpu->number = strtoul(&path[27], NULL, 10);

	cpu_set(cpu->number, cpu_possible_map);
	
	cpu_set(cpu->number, cpu->mask);

	/*
 	 * Default the cache_domain mask to be equal to the cpu
 	 */
	cpus_clear(cache_mask);
	cpu_set(cpu->number, cache_mask);

	/* if the cpu is on the banned list, just don't add it */
	if (cpus_intersects(cpu->mask, banned_cpus)) {
		free(cpu);
		/* even though we don't use the cpu we do need to count it */
		core_count++;
		return;
	}


	/* try to read the package mask; if it doesn't exist assume solitary */
	snprintf(new_path, PATH_MAX, "%s/topology/core_siblings", path);
	file = fopen(new_path, "r");
	cpu_set(cpu->number, package_mask);
	if (file) {
		char *line = NULL;
		size_t size = 0;
		if (getline(&line, &size, file)) 
			cpumask_parse_user(line, strlen(line), package_mask);
		fclose(file);
		free(line);
	}
	/* try to read the package id */
	snprintf(new_path, PATH_MAX, "%s/topology/physical_package_id", path);
	file = fopen(new_path, "r");
	if (file) {
		char *line = NULL;
		size_t size = 0;
		if (getline(&line, &size, file))
			packageid = strtoul(line, NULL, 10);
		fclose(file);
		free(line);
	}

	/* try to read the cache mask; if it doesn't exist assume solitary */
	/* We want the deepest cache level available */
	cpu_set(cpu->number, cache_mask);
	max_cache_index = 0;
	cache_index = 1;
	cache_stat = 0;
	do {
		struct stat sb;
		snprintf(new_path, PATH_MAX, "%s/cache/index%d/shared_cpu_map", path, cache_index);
		cache_stat = stat(new_path, &sb);
		if (!cache_stat) {
			max_cache_index = cache_index;
			if (max_cache_index == deepest_cache)
				break;
			cache_index ++;
		}
	} while(!cache_stat);

	if (max_cache_index > 0) {
		snprintf(new_path, PATH_MAX, "%s/cache/index%d/shared_cpu_map", path, max_cache_index);
		file = fopen(new_path, "r");
		if (file) {
			char *line = NULL;
			size_t size = 0;
			if (getline(&line, &size, file))
				cpumask_parse_user(line, strlen(line), cache_mask);
			fclose(file);
			free(line);
		}
	}

	nodeid=-1;
	if (numa_avail) {
		dir = opendir(path);
		do {
			entry = readdir(dir);
			if (!entry)
				break;
			if (strstr(entry->d_name, "node")) {
				nodeid = strtoul(&entry->d_name[4], NULL, 10);
				break;
			}
		} while (entry);
		closedir(dir);
	}

	/*
	   blank out the banned cpus from the various masks so that interrupts
	   will never be told to go there
	 */
	cpus_and(cache_mask, cache_mask, unbanned_cpus);
	cpus_and(package_mask, package_mask, unbanned_cpus);

	cache = add_cpu_to_cache_domain(cpu, cache_mask);
	package = add_cache_domain_to_package(cache, packageid, package_mask);
	add_package_to_node(package, nodeid);

	cpu->obj_type_list = &cpus;
	cpus = g_list_append(cpus, cpu);
	core_count++;
}