Пример #1
0
/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
	raw_spin_lock(&tick_broadcast_lock);

	cpumask_and(to_cpumask(tmpmask),
		    cpu_online_mask, tick_get_broadcast_mask());
	tick_do_broadcast(to_cpumask(tmpmask));

	raw_spin_unlock(&tick_broadcast_lock);
}
Пример #2
0
static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
			      const char *name)
{
	int ret = -ENOMEM;
	struct pcrypt_cpumask *mask;

	get_online_cpus();

	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
				     1, name);
	if (!pcrypt->wq)
		goto err;

	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
	if (!pcrypt->pinst)
		goto err_destroy_workqueue;

	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
	if (!mask)
		goto err_free_padata;
	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
		kfree(mask);
		goto err_free_padata;
	}

	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
	rcu_assign_pointer(pcrypt->cb_cpumask, mask);

	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
	if (ret)
		goto err_free_cpumask;

	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
	if (ret)
		goto err_unregister_notifier;

	put_online_cpus();

	return ret;

err_unregister_notifier:
	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
err_free_cpumask:
	free_cpumask_var(mask->mask);
	kfree(mask);
err_free_padata:
	padata_free(pcrypt->pinst);
err_destroy_workqueue:
	destroy_workqueue(pcrypt->wq);
err:
	put_online_cpus();

	return ret;
}
/*
 * Generic version of the affinity autoselector.
 */
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct cpumask *set = irq_default_affinity;
	int ret, node = desc->irq_data.node;

	/* Excludes PER_CPU and NO_BALANCE interrupts */
	if (!irq_can_set_affinity(irq))
		return 0;

	/*
	 * Preserve an userspace affinity setup, but make sure that
	 * one of the targets is online.
	 */
	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
		if (cpumask_intersects(desc->irq_data.affinity,
				       cpu_online_mask))
			set = desc->irq_data.affinity;
		else
			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
	}

	cpumask_and(mask, cpu_online_mask, set);
	if (node != NUMA_NO_NODE) {
		const struct cpumask *nodemask = cpumask_of_node(node);

		/* make sure at least one of the cpus in nodemask is online */
		if (cpumask_intersects(mask, nodemask))
			cpumask_and(mask, mask, nodemask);
	}
	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(desc->irq_data.affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
	}
	return 0;
}
Пример #4
0
void cpuidle_wakeup_mwait(cpumask_t *mask)
{
    cpumask_t target;
    unsigned int cpu;

    cpumask_and(&target, mask, &cpuidle_mwait_flags);

    /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
    for_each_cpu(cpu, &target)
        mwait_wakeup(cpu) = 0;

    cpumask_andnot(mask, mask, &target);
}
Пример #5
0
/*
 * cpudl_find - find the best (later-dl) CPU in the system
 * @cp: the cpudl max-heap context
 * @p: the task
 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 *
 * Returns: int - best CPU (heap maximum if suitable)
 */
int cpudl_find(struct cpudl *cp, struct task_struct *p,
	       struct cpumask *later_mask)
{
	int best_cpu = -1;
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask && cpumask_and(later_mask, cp->free_cpus,
			&p->cpus_allowed) && cpumask_and(later_mask,
			later_mask, cpu_active_mask)) {
		best_cpu = cpumask_any(later_mask);
		goto out;
	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
		best_cpu = cpudl_maximum(cp);
		if (later_mask)
			cpumask_set_cpu(best_cpu, later_mask);
	}

out:
	WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

	return best_cpu;
}
Пример #6
0
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct cpumask *set = irq_default_affinity;
	int ret, node = desc->irq_data.node;

	
	if (!irq_can_set_affinity(irq))
		return 0;

	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
		if (cpumask_intersects(desc->irq_data.affinity,
				       cpu_online_mask))
			set = desc->irq_data.affinity;
		else
			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
	}

	cpumask_and(mask, cpu_online_mask, set);
	if (node != NUMA_NO_NODE) {
		const struct cpumask *nodemask = cpumask_of_node(node);

		
		if (cpumask_intersects(mask, nodemask))
			cpumask_and(mask, mask, nodemask);
	}
	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(desc->irq_data.affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
	}
	return 0;
}
void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
{
	struct hmp_domain *domain;

	arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);

	/*
	 * Initialize hmp_domains
	 * Must be ordered with respect to compute capacity.
	 * Fastest domain at head of list.
	 */
	if(!cpumask_empty(&hmp_slow_cpu_mask)) {
		domain = (struct hmp_domain *)
			kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
		cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
		cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
		list_add(&domain->hmp_domains, hmp_domains_list);
	}
	domain = (struct hmp_domain *)
		kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
	cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
	cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
	list_add(&domain->hmp_domains, hmp_domains_list);
}
Пример #8
0
void move_masked_irq(int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (CHECK_IRQ_PER_CPU(desc->status)) {
		WARN_ON(1);
		return;
	}

	desc->status &= ~IRQ_MOVE_PENDING;

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!desc->chip->set_affinity)
		return;

	assert_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is comming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
		   < nr_cpu_ids)) {
		cpumask_and(desc->affinity,
			    desc->pending_mask, cpu_online_mask);
		desc->chip->set_affinity(irq, desc->affinity);
	}
	cpumask_clear(desc->pending_mask);
}
Пример #9
0
void irq_complete_move(unsigned irq)
{
	struct irq_cfg *cfg = &irq_cfg[irq];
	cpumask_t cleanup_mask;
	int i;

	if (likely(!cfg->move_in_progress))
		return;

	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
		return;

	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
	for_each_cpu_mask(i, cleanup_mask)
		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
	cfg->move_in_progress = 0;
}
Пример #10
0
static inline int find_unassigned_vector(cpumask_t domain)
{
	cpumask_t mask;
	int pos, vector;

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpus_empty(mask))
		return -EINVAL;

	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
		vector = IA64_FIRST_DEVICE_VECTOR + pos;
		cpus_and(mask, domain, vector_table[vector]);
		if (!cpus_empty(mask))
			continue;
		return vector;
	}
	return -ENOSPC;
}
Пример #11
0
/**
 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
 * @cpumask: The list of CPUs to target.
 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
 */
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
			     unsigned long va)
{
	cpumask_t tmp;

	/* A couple of sanity checks (to be removed):
	 * - mask must not be empty
	 * - current CPU must not be in mask
	 * - we do not send IPIs to as-yet unbooted CPUs.
	 */
	BUG_ON(!mm);
	BUG_ON(cpumask_empty(&cpumask));
	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));

	cpumask_and(&tmp, &cpumask, cpu_online_mask);
	BUG_ON(!cpumask_equal(&cpumask, &tmp));

	/* I'm not happy about this global shared spinlock in the MM hot path,
	 * but we'll see how contended it is.
	 *
	 * Temporarily this turns IRQs off, so that lockups are detected by the
	 * NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
#else
#error Not supported.
#endif

	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
	smp_call_function(smp_flush_tlb, NULL, 1);

	while (!cpumask_empty(&flush_cpumask))
		/* Lockup detection does not belong here */
		smp_mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #12
0
static void __clear_irq_vector(int irq)
{
	int vector, cpu;
	cpumask_t mask;
	cpumask_t domain;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
	vector = cfg->vector;
	domain = cfg->domain;
	cpumask_and(&mask, &cfg->domain, cpu_online_mask);
	for_each_cpu_mask(cpu, mask)
		per_cpu(vector_irq, cpu)[vector] = -1;
	cfg->vector = IRQ_VECTOR_UNASSIGNED;
	cfg->domain = CPU_MASK_NONE;
	irq_status[irq] = IRQ_UNUSED;
	cpus_andnot(vector_table[vector], vector_table[vector], domain);
}
Пример #13
0
/*
 * Debug related code, dump vcpu/cpu information
 */
static void
rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
{
    cpumask_t *cpupool_mask, *mask;

    ASSERT(svc != NULL);
    /* idle vcpu */
    if( svc->sdom == NULL )
    {
        printk("\n");
        return;
    }

    /*
     * We can't just use 'cpumask_scratch' because the dumping can
     * happen from a pCPU outside of this scheduler's cpupool, and
     * hence it's not right to use the pCPU's scratch mask (which
     * may even not exist!). On the other hand, it is safe to use
     * svc->vcpu->processor's own scratch space, since we hold the
     * runqueue lock.
     */
    mask = _cpumask_scratch[svc->vcpu->processor];

    cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
    cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
    printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
           " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
            svc->vcpu->domain->domain_id,
            svc->vcpu->vcpu_id,
            svc->vcpu->processor,
            svc->period,
            svc->budget,
            svc->cur_budget,
            svc->cur_deadline,
            svc->last_start,
            __vcpu_on_q(svc),
            vcpu_runnable(svc->vcpu),
            svc->flags,
            keyhandler_scratch);
}
Пример #14
0
SYSCALL_DEFINE3(fairsched_cpumask, unsigned int, id, unsigned int, len,
		unsigned long __user *, user_mask_ptr)
{
	struct cgroup *cgrp;
	int retval;
	cpumask_var_t new_mask, in_mask;

	if (!capable_setveid())
		return -EPERM;

	if (id == 0)
		return -EINVAL;

	cgrp = fairsched_open(id);
	if (IS_ERR(cgrp))
		return PTR_ERR(cgrp);

	if (!alloc_cpumask_var(&in_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_in_mask;
	}

	retval = get_user_cpu_mask(user_mask_ptr, len, in_mask);
	if (retval == 0) {
		cpumask_and(new_mask, in_mask, cpu_active_mask);
		cgroup_lock();
		retval = cgroup_set_cpumask(cgrp, new_mask);
		cgroup_unlock();
	}

	free_cpumask_var(new_mask);

out_free_in_mask:
	free_cpumask_var(in_mask);
out:
	cgroup_kernel_close(cgrp);
	return retval;
}
Пример #15
0
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
    ASSERT(local_irq_is_enabled());

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
        flush_area_local(va, flags);

    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
    {
        spin_lock(&flush_lock);
        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
        flush_va      = va;
        flush_flags   = flags;
        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
        while ( !cpumask_empty(&flush_cpumask) )
            cpu_relax();
        spin_unlock(&flush_lock);
    }
}
Пример #16
0
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
                     bool force)
{
    unsigned long flags;
    cpumask_t online;

    /* errout if no online cpu per @cpumask */
    if (!cpumask_and(&online, cpumask, cpu_online_mask))
        return -EINVAL;

    raw_spin_lock_irqsave(&mcip_lock, flags);

    idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
    idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);

    raw_spin_unlock_irqrestore(&mcip_lock, flags);

    return IRQ_SET_MASK_OK;
}
Пример #17
0
/*
 * Generic version of the affinity autoselector.
 */
static int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
	if (!irq_can_set_affinity(irq))
		return 0;

	/*
	 * Preserve an userspace affinity setup, but make sure that
	 * one of the targets is online.
	 */
	if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
		if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
		    < nr_cpu_ids)
			goto set_affinity;
		else
			desc->status &= ~IRQ_AFFINITY_SET;
	}

	cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
	desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);

	return 0;
}
Пример #18
0
/* This function would program ITE values on node given by the cpumask
 * @cpumask	: cpumask to program on ITE
 * @node	: node on which ITE should be programmed
 * @ite		: ITE to program
 * @scope	: program ITE only on the given node (0) or all nodes (1)
 */
void xlp_cpumask_to_node_ite(const struct cpumask *m, u8 node, u8 ite, u8 scope)
{
    __label__ prog_all;
    struct cpumask t;
    int cpu = (node * NLM_MAX_CPU_PER_NODE), last;

    if (scope != 0) goto prog_all;

    /* When the scope is 0, program node ITEs with target as
     * local cpus only */
    last = cpu + NLM_MAX_CPU_PER_NODE - 1;
    if (last >= NR_CPUS) return;
    cpumask_and(&t, m, &phys_cpu_present_map);
    for (; cpu <= last; cpu++) {
        cpumask_test_cpu(cpu, &t) ? xlp_ite_cpu_set(node, cpu, ite) : xlp_ite_cpu_clear(node, cpu, ite);
    }
    return;
prog_all:
    /* Here we program the specified ITE in all nodes with the cpumask
     * passed. */
    /* TBD TODO */
    return;
}
Пример #19
0
static int
msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
{
	struct irq_cfg *cfg = irq_cfg + irq;
	unsigned dest;
	cpumask_t mask;

	cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
	dest = cpu_physical_id(first_cpu(mask));

	msg->address_hi = 0;
	msg->address_lo =
		MSI_ADDR_HEADER |
		MSI_ADDR_DEST_MODE_PHYS |
		MSI_ADDR_REDIRECTION_CPU |
		MSI_ADDR_DEST_ID_CPU(dest);

	msg->data =
		MSI_DATA_TRIGGER_EDGE |
		MSI_DATA_LEVEL_ASSERT |
		MSI_DATA_DELIVERY_FIXED |
		MSI_DATA_VECTOR(cfg->vector);
	return 0;
}
Пример #20
0
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
	cpumask_t mask;
	int cpu;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpus_empty(mask))
		return -EINVAL;
	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
		return 0;
	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
		return -EBUSY;
	for_each_cpu_mask(cpu, mask)
		per_cpu(vector_irq, cpu)[vector] = irq;
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	cpus_or(vector_table[vector], vector_table[vector], domain);
	return 0;
}
Пример #21
0
static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

	pr_debug(KERN_DEBUG "%s called\n", __func__);
	irq -= _irqbase;

	cpumask_and(&tmp, cpumask, cpu_online_mask);
	if (cpus_empty(tmp))
		return;

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);
	for (;;) {
		/* Re-route this IRQ */
		GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));

		/*
		 * FIXME: assumption that _intrmap is ordered and has no holes
		 */

		/* Update the intr_map */
		_intrmap[irq].cpunum = first_cpu(tmp);

		/* Update the pcpu_masks */
		for (i = 0; i < NR_CPUS; i++)
			clear_bit(irq, pcpu_masks[i].pcpu_mask);
		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);

	}
	irq_desc[irq].affinity = *cpumask;
	spin_unlock_irqrestore(&gic_lock, flags);

}
Пример #22
0
int cpupri_find(struct cpupri *cp, struct task_struct *p,
		struct cpumask *lowest_mask)
{
	int                  idx      = 0;
	int                  task_pri = convert_prio(p->prio);

	if (task_pri >= MAX_RT_PRIO)
		return 0;

	for (idx = 0; idx < task_pri; idx++) {
		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
		int skip = 0;

		if (!atomic_read(&(vec)->count))
			skip = 1;
		smp_rmb();

		
		if (skip)
			continue;

		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
			continue;

		if (lowest_mask) {
			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);

			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
				continue;
		}

		return 1;
	}

	return 0;
}
Пример #23
0
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
	ktime_t now, next_event;
	int cpu, next_cpu = 0;
	bool bc_local;

	raw_spin_lock(&tick_broadcast_lock);
	dev->next_event = KTIME_MAX;
	next_event = KTIME_MAX;
	cpumask_clear(tmpmask);
	now = ktime_get();
	/* Find all expired events */
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
		/*
		 * Required for !SMP because for_each_cpu() reports
		 * unconditionally CPU0 as set on UP kernels.
		 */
		if (!IS_ENABLED(CONFIG_SMP) &&
		    cpumask_empty(tick_broadcast_oneshot_mask))
			break;

		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev->next_event <= now) {
			cpumask_set_cpu(cpu, tmpmask);
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
		} else if (td->evtdev->next_event < next_event) {
			next_event = td->evtdev->next_event;
			next_cpu = cpu;
		}
	}

	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

	/*
	 * Wakeup the cpus which have an expired event.
	 */
	bc_local = tick_do_broadcast(tmpmask);

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
	 */
	if (next_event != KTIME_MAX)
		tick_broadcast_set_event(dev, next_cpu, next_event);

	raw_spin_unlock(&tick_broadcast_lock);

	if (bc_local) {
		td = this_cpu_ptr(&tick_cpu_device);
		td->evtdev->event_handler(td->evtdev);
	}
}
Пример #24
0
/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static bool tick_do_periodic_broadcast(void)
{
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	return tick_do_broadcast(tmpmask);
}
Пример #25
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long long irqmask;
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_CACHE_NO_L3);
		} else {
			prot = hv_pte_set_mode(prot,
					       HV_PTE_MODE_CACHE_TILE_L3);
			cpu = cpumask_first(&ktext_mask);

			prot = ktext_set_nocache(prot);
		}

		BUG_ON(address != (unsigned long)_stext);
		pte = NULL;
		for (; address < (unsigned long)_einittext;
		     pfn++, address += PAGE_SIZE) {
			pte_ofs = pte_index(address);
			if (pte_ofs == 0) {
				if (pte)
					assign_pte(pmd++, pte);
				pte = alloc_pte();
			}
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				if (cpu == NR_CPUS)
					cpu = cpumask_first(&ktext_mask);
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		if (pte)
			assign_pte(pmd, pte);
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
		if (ktext_hash) {
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_HASH_L3);
			pteval = ktext_set_nocache(pteval);
		} else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
		if (cpumask_weight(&ktext_mask) == 1) {
			pteval = set_remote_cache_cpu(pteval,
					      cpumask_first(&ktext_mask));
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_TILE_L3);
			pteval = ktext_set_nocache(pteval);
		} else if (ktext_nocache)
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_UNCACHED);
		else
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
		for (; address < (unsigned long)_einittext;
		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
	}

	/* Set swapper_pgprot here so it is flushed to memory right away. */
	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

	/*
	 * Since we may be changing the caching of the stack and page
	 * table itself, we invoke an assembly helper to do the
	 * following steps:
	 *
	 *  - flush the cache so we start with an empty slate
	 *  - install pgtables[] as the real page table
	 *  - flush the TLB so the new page table takes effect
	 */
	irqmask = interrupt_mask_save_mask();
	interrupt_mask_set_mask(-1ULL);
	rc = flush_and_install_context(__pa(pgtables),
				       init_pgprot((unsigned long)pgtables),
				       __get_cpu_var(current_asid),
				       cpumask_bits(my_cpu_mask));
	interrupt_mask_restore_mask(irqmask);
	BUG_ON(rc != 0);

	/* Copy the page table back to the normal swapper_pg_dir. */
	memcpy(pgd_base, pgtables, sizeof(pgtables));
	__install_page_table(pgd_base, __get_cpu_var(current_asid),
			     swapper_pgprot);

	/*
	 * We just read swapper_pgprot and thus brought it into the cache,
	 * with its new home & caching mode.  When we start the other CPUs,
	 * they're going to reference swapper_pgprot via their initial fake
	 * VA-is-PA mappings, which cache everything locally.  At that
	 * time, if it's in our cache with a conflicting home, the
	 * simulator's coherence checker will complain.  So, flush it out
	 * of our cache; we're not going to ever use it again anyway.
	 */
	__insn_finv(&swapper_pgprot);
}
Пример #26
0
/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
}
Пример #27
0
/**
 * cpupri_find - find the best (lowest-pri) CPU in the system
 * @cp: The cpupri context
 * @p: The task
 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 *
 * Note: This function returns the recommended CPUs as calculated during the
 * current invocation.  By the time the call returns, the CPUs may have in
 * fact changed priorities any number of times.  While not ideal, it is not
 * an issue of correctness since the normal rebalancer logic will correct
 * any discrepancies created by racing against the uncertainty of the current
 * priority configuration.
 *
 * Returns: (int)bool - CPUs were found
 */
int cpupri_find(struct cpupri *cp, struct task_struct *p,
		struct cpumask *lowest_mask)
{
	int                  idx      = 0;
	int                  task_pri = convert_prio(p->prio);

	if (task_pri >= MAX_RT_PRIO)
		return 0;

	for (idx = 0; idx < task_pri; idx++) {
		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
		int skip = 0;

		if (!atomic_read(&(vec)->count))
			skip = 1;
		/*
		 * When looking at the vector, we need to read the counter,
		 * do a memory barrier, then read the mask.
		 *
		 * Note: This is still all racey, but we can deal with it.
		 *  Ideally, we only want to look at masks that are set.
		 *
		 *  If a mask is not set, then the only thing wrong is that we
		 *  did a little more work than necessary.
		 *
		 *  If we read a zero count but the mask is set, because of the
		 *  memory barriers, that can only happen when the highest prio
		 *  task for a run queue has left the run queue, in which case,
		 *  it will be followed by a pull. If the task we are processing
		 *  fails to find a proper place to go, that pull request will
		 *  pull this task if the run queue is running at a lower
		 *  priority.
		 */
		smp_rmb();

		/* Need to do the rmb for every iteration */
		if (skip)
			continue;

		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
			continue;

		if (lowest_mask) {
			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);

			/*
			 * We have to ensure that we have at least one bit
			 * still set in the array, since the map could have
			 * been concurrently emptied between the first and
			 * second reads of vec->mask.  If we hit this
			 * condition, simply act as though we never hit this
			 * priority level and continue on.
			 */
			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
				continue;
		}

		return 1;
	}

	return 0;
}
Пример #28
0
int cps_pm_enter_state(enum cps_pm_state state)
{
	unsigned cpu = smp_processor_id();
	unsigned core = current_cpu_data.core;
	unsigned online, left;
	cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
	u32 *core_ready_count, *nc_core_ready_count;
	void *nc_addr;
	cps_nc_entry_fn entry;
	struct core_boot_config *core_cfg;
	struct vpe_boot_config *vpe_cfg;

	/* Check that there is an entry function for this state */
	entry = per_cpu(nc_asm_enter, core)[state];
	if (!entry)
		return -EINVAL;

	/* Calculate which coupled CPUs (VPEs) are online */
#ifdef CONFIG_MIPS_MT
	if (cpu_online(cpu)) {
		cpumask_and(coupled_mask, cpu_online_mask,
			    this_cpu_ptr(&cpu_sibling_map));
		online = cpumask_weight(coupled_mask);
		cpumask_clear_cpu(cpu, coupled_mask);
	} else
#endif
	{
		cpumask_clear(coupled_mask);
		online = 1;
	}

	/* Setup the VPE to run mips_cps_pm_restore when started again */
	if (state == CPS_PM_POWER_GATED) {
		/* Power gating relies upon CPS SMP */
		if (!mips_cps_smp_in_use())
			return -EINVAL;

		core_cfg = &mips_cps_core_bootcfg[core];
		vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
		vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
		vpe_cfg->gp = (unsigned long)current_thread_info();
		vpe_cfg->sp = 0;
	}

	/* Indicate that this CPU might not be coherent */
	cpumask_clear_cpu(cpu, &cpu_coherent_mask);
	smp_mb__after_clear_bit();

	/* Create a non-coherent mapping of the core ready_count */
	core_ready_count = per_cpu(ready_count, core);
	nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
				   (unsigned long)core_ready_count);
	nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
	nc_core_ready_count = nc_addr;

	/* Ensure ready_count is zero-initialised before the assembly runs */
	ACCESS_ONCE(*nc_core_ready_count) = 0;
	coupled_barrier(&per_cpu(pm_barrier, core), online);

	/* Run the generated entry code */
	left = entry(online, nc_core_ready_count);

	/* Remove the non-coherent mapping of ready_count */
	kunmap_noncoherent();

	/* Indicate that this CPU is definitely coherent */
	cpumask_set_cpu(cpu, &cpu_coherent_mask);

	/*
	 * If this VPE is the first to leave the non-coherent wait state then
	 * it needs to wake up any coupled VPEs still running their wait
	 * instruction so that they return to cpuidle, which can then complete
	 * coordination between the coupled VPEs & provide the governor with
	 * a chance to reflect on the length of time the VPEs were in the
	 * idle state.
	 */
	if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
		arch_send_call_function_ipi_mask(coupled_mask);

	return 0;
}
Пример #29
0
/*
 * hps algo - hmp
 */
void hps_algo_hmp(void)
{
    unsigned int cpu;
    unsigned int val;
    struct cpumask little_online_cpumask;
    struct cpumask big_online_cpumask;
    unsigned int little_num_base, little_num_limit, little_num_online;
    unsigned int big_num_base, big_num_limit, big_num_online;
    //log purpose
    char str1[64];
    char str2[64];
    int i, j;
    char * str1_ptr = str1;
    char * str2_ptr = str2;

    /*
     * run algo or not by hps_ctxt.enabled
     */
    if (!hps_ctxt.enabled)
    {
        atomic_set(&hps_ctxt.is_ondemand, 0);
        return;
    }

    /*
     * calculate cpu loading
     */
    hps_ctxt.cur_loads = 0;
    str1_ptr = str1;
    str2_ptr = str2;

    for_each_possible_cpu(cpu)
    {
        per_cpu(hps_percpu_ctxt, cpu).load = hps_cpu_get_percpu_load(cpu);
        hps_ctxt.cur_loads += per_cpu(hps_percpu_ctxt, cpu).load;

        if (hps_ctxt.cur_dump_enabled)
        {
            if (cpu_online(cpu))
                i = sprintf(str1_ptr, "%4u", 1);
            else
                i = sprintf(str1_ptr, "%4u", 0);
            str1_ptr += i;
            j = sprintf(str2_ptr, "%4u", per_cpu(hps_percpu_ctxt, cpu).load);
            str2_ptr += j;
        }
    }
    hps_ctxt.cur_nr_heavy_task = hps_cpu_get_nr_heavy_task();
    hps_cpu_get_tlp(&hps_ctxt.cur_tlp, &hps_ctxt.cur_iowait);

    /*
     * algo - begin
     */
    mutex_lock(&hps_ctxt.lock);
    hps_ctxt.action = ACTION_NONE;
    atomic_set(&hps_ctxt.is_ondemand, 0);

    /*
     * algo - get boundary
     */
    little_num_limit = min(hps_ctxt.little_num_limit_thermal, hps_ctxt.little_num_limit_low_battery);
    little_num_base = hps_ctxt.little_num_base_perf_serv;
    cpumask_and(&little_online_cpumask, &hps_ctxt.little_cpumask, cpu_online_mask);
    little_num_online = cpumask_weight(&little_online_cpumask);
    //TODO: no need if is_hmp
    big_num_limit = min(hps_ctxt.big_num_limit_thermal, hps_ctxt.big_num_limit_low_battery);
    big_num_base = max(hps_ctxt.cur_nr_heavy_task, hps_ctxt.big_num_base_perf_serv);
    cpumask_and(&big_online_cpumask, &hps_ctxt.big_cpumask, cpu_online_mask);
    big_num_online = cpumask_weight(&big_online_cpumask);
    if (hps_ctxt.cur_dump_enabled)
    {
        hps_debug(" CPU:%s\n", str1);
        hps_debug("LOAD:%s\n", str2);
        hps_debug("loads(%u), hvy_tsk(%u), tlp(%u), iowait(%u), limit_t(%u)(%u), limit_lb(%u)(%u), base_ps(%u)(%u)\n", 
            hps_ctxt.cur_loads, hps_ctxt.cur_nr_heavy_task, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait,
            hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
            hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
            hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv);
    }

//ALGO_LIMIT:
    /*
     * algo - thermal, low battery
     */
    if (big_num_online > big_num_limit)
    {
        val =  big_num_online - big_num_limit;
        for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
                --big_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_LIMIT_BIG, (unsigned long *)&hps_ctxt.action);
    }
    if (little_num_online > little_num_limit)
    {
        val =  little_num_online - little_num_limit;
        for (cpu = hps_ctxt.little_cpu_id_max; cpu > hps_ctxt.little_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &little_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &little_online_cpumask);
                --little_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_LIMIT_LITTLE, (unsigned long *)&hps_ctxt.action);
    }
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_BASE:
    /*
     * algo - PerfService, heavy task detect
     */
    BUG_ON(big_num_online > big_num_limit);
    BUG_ON(little_num_online > little_num_limit);
    if ((big_num_online < big_num_base) && (big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
    {
        val =  min(big_num_base, big_num_limit) - big_num_online;
        for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
        {
            if (!cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_up(cpu);
                cpumask_set_cpu(cpu, &big_online_cpumask);
                ++big_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_BASE_BIG, (unsigned long *)&hps_ctxt.action);
    }
    if ((little_num_online < little_num_base) && (little_num_online < little_num_limit) &&
        (little_num_online + big_num_online < hps_ctxt.little_num_base_perf_serv + hps_ctxt.big_num_base_perf_serv))
    {
        val =  min(little_num_base, little_num_limit) - little_num_online;
        if (big_num_online > hps_ctxt.big_num_base_perf_serv)
            val -= big_num_online - hps_ctxt.big_num_base_perf_serv;
        for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
        {
            if (!cpumask_test_cpu(cpu, &little_online_cpumask))
            {
                cpu_up(cpu);
                cpumask_set_cpu(cpu, &little_online_cpumask);
                ++little_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_BASE_LITTLE, (unsigned long *)&hps_ctxt.action);
    }
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

    /*
     * update history - tlp
     */
    val = hps_ctxt.tlp_history[hps_ctxt.tlp_history_index];
    hps_ctxt.tlp_history[hps_ctxt.tlp_history_index] = hps_ctxt.cur_tlp;
    hps_ctxt.tlp_sum += hps_ctxt.cur_tlp;
    hps_ctxt.tlp_history_index = (hps_ctxt.tlp_history_index + 1 == hps_ctxt.tlp_times) ? 0 : hps_ctxt.tlp_history_index + 1;
    ++hps_ctxt.tlp_count;
    if (hps_ctxt.tlp_count > hps_ctxt.tlp_times)
    {
        BUG_ON(hps_ctxt.tlp_sum < val);
        hps_ctxt.tlp_sum -= val;
        hps_ctxt.tlp_avg = hps_ctxt.tlp_sum / hps_ctxt.tlp_times;
    }
    else
    {
        hps_ctxt.tlp_avg = hps_ctxt.tlp_sum / hps_ctxt.tlp_count;
    }
    if (hps_ctxt.stats_dump_enabled)
        hps_ctxt_print_algo_stats_tlp(0);

//ALGO_RUSH_BOOST:
    /*
     * algo - rush boost
     */
    if (hps_ctxt.rush_boost_enabled)
    {
        if (hps_ctxt.cur_loads > hps_ctxt.rush_boost_threshold * (little_num_online + big_num_online))
            ++hps_ctxt.rush_count;
        else
            hps_ctxt.rush_count = 0;

        if ((hps_ctxt.rush_count >= hps_ctxt.rush_boost_times) &&
            ((little_num_online + big_num_online) * 100 < hps_ctxt.tlp_avg))
        {
            val = hps_ctxt.tlp_avg / 100 + (hps_ctxt.tlp_avg % 100 ? 1 : 0);
            BUG_ON(!(val > little_num_online + big_num_online));
            if (val > num_possible_cpus())
                val = num_possible_cpus();

            val -= little_num_online + big_num_online;
            if ((val) && (little_num_online < little_num_limit))
            {
                for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &little_online_cpumask);
                        ++little_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_RUSH_BOOST_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
            else if ((val) && (big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
            {
                for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &big_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &big_online_cpumask);
                        ++big_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_RUSH_BOOST_BIG, (unsigned long *)&hps_ctxt.action);
            }
        }
    } //if (hps_ctxt.rush_boost_enabled)
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_UP:
    /*
     * algo - cpu up
     */
    if ((little_num_online + big_num_online) < num_possible_cpus())
    {
        /*
         * update history - up
         */
        val = hps_ctxt.up_loads_history[hps_ctxt.up_loads_history_index];
        hps_ctxt.up_loads_history[hps_ctxt.up_loads_history_index] = hps_ctxt.cur_loads;
        hps_ctxt.up_loads_sum += hps_ctxt.cur_loads;
        hps_ctxt.up_loads_history_index = (hps_ctxt.up_loads_history_index + 1 == hps_ctxt.up_times) ? 0 : hps_ctxt.up_loads_history_index + 1;
        ++hps_ctxt.up_loads_count;
        //XXX: use >= or >, which is benifit? use >
        if (hps_ctxt.up_loads_count > hps_ctxt.up_times)
        {
            BUG_ON(hps_ctxt.up_loads_sum < val);
            hps_ctxt.up_loads_sum -= val;
        }
        if (hps_ctxt.stats_dump_enabled)
            hps_ctxt_print_algo_stats_up(0);

        if (hps_ctxt.up_loads_count >= hps_ctxt.up_times)
        {
            if (hps_ctxt.up_loads_sum > hps_ctxt.up_threshold * hps_ctxt.up_times * (little_num_online + big_num_online))
            {
                if (little_num_online < little_num_limit)
                {
                    for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                    {
                        if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                        {
                            cpu_up(cpu);
                            cpumask_set_cpu(cpu, &little_online_cpumask);
                            ++little_num_online;
                            break;
                        }
                    }
                    set_bit(ACTION_UP_LITTLE, (unsigned long *)&hps_ctxt.action);
                }
                else if ((big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
                {
                    for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
                    {
                        if (!cpumask_test_cpu(cpu, &big_online_cpumask))
                        {
                            cpu_up(cpu);
                            cpumask_set_cpu(cpu, &big_online_cpumask);
                            ++big_num_online;
                            break;
                        }
                    }
                    set_bit(ACTION_UP_BIG, (unsigned long *)&hps_ctxt.action);
                }
            }
        } //if (hps_ctxt.up_loads_count >= hps_ctxt.up_times)
    } //if ((little_num_online + big_num_online) < num_possible_cpus())
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_DOWN:
    /*
     * algo - cpu down (inc. quick landing)
     */
    if (little_num_online + big_num_online > 1)
    {
        /*
         * update history - down
         */
        val = hps_ctxt.down_loads_history[hps_ctxt.down_loads_history_index];
        hps_ctxt.down_loads_history[hps_ctxt.down_loads_history_index] = hps_ctxt.cur_loads;
        hps_ctxt.down_loads_sum += hps_ctxt.cur_loads;
        hps_ctxt.down_loads_history_index = (hps_ctxt.down_loads_history_index + 1 == hps_ctxt.down_times) ? 0 : hps_ctxt.down_loads_history_index + 1;
        ++hps_ctxt.down_loads_count;
        //XXX: use >= or >, which is benifit? use >
        if (hps_ctxt.down_loads_count > hps_ctxt.down_times)
        {
            BUG_ON(hps_ctxt.down_loads_sum < val);
            hps_ctxt.down_loads_sum -= val;
        }
        if (hps_ctxt.stats_dump_enabled)
            hps_ctxt_print_algo_stats_down(0);

        if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
        {
            unsigned int down_threshold = hps_ctxt.down_threshold * hps_ctxt.down_times;

            val = little_num_online + big_num_online;
            while (hps_ctxt.down_loads_sum < down_threshold * (val - 1))
                --val;
            val = little_num_online + big_num_online - val;

            if ((val) && (big_num_online > big_num_base))
            {
                for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
                {
                    if (cpumask_test_cpu(cpu, &big_online_cpumask))
                    {
                        cpu_down(cpu);
                        cpumask_clear_cpu(cpu, &big_online_cpumask);
                        --big_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_DOWN_BIG, (unsigned long *)&hps_ctxt.action);
            }
            else if ((val) && (little_num_online > little_num_base))
            {
                for (cpu = hps_ctxt.little_cpu_id_max; cpu > hps_ctxt.little_cpu_id_min; --cpu)
                {
                    if (cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_down(cpu);
                        cpumask_clear_cpu(cpu, &little_online_cpumask);
                        --little_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_DOWN_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
        } //if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    } //if (little_num_online + big_num_online > 1)
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_BIG_TO_LITTLE:
    /*
     * algo - b2L
     */
    if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    {
        if ((little_num_online < little_num_limit) && (big_num_online > big_num_base))
        {
            //find last online big
            for (val = hps_ctxt.big_cpu_id_max; val >= hps_ctxt.big_cpu_id_min; --val)
            {
                if (cpumask_test_cpu(val, &big_online_cpumask))
                    break;
            }
            BUG_ON(val < hps_ctxt.big_cpu_id_min);

            //verify whether b2L will open 1 little
            if (per_cpu(hps_percpu_ctxt, val).load * CPU_DMIPS_BIG_LITTLE_DIFF / 100 + 
                hps_ctxt.up_loads_sum / hps_ctxt.up_times <= hps_ctxt.up_threshold  * (little_num_online + big_num_online))
            {
                //up 1 little
                for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &little_online_cpumask);
                        ++little_num_online;
                        break;
                    }
                }

                //down 1 big
                cpu_down(val);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
                --big_num_online;
                set_bit(ACTION_BIG_TO_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
        } //if ((little_num_online < little_num_limit) && (big_num_online > big_num_base))
    } //if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    if (!hps_ctxt.action)
        goto ALGO_END_WO_ACTION;

    /*
     * algo - end
     */
ALGO_END_WITH_ACTION:
    hps_warn("(%04x)(%u)(%u)action end(%u)(%u)(%u)(%u) (%u)(%u)(%u)(%u)(%u)(%u) (%u)(%u)(%u) (%u)(%u)(%u) (%u)(%u)(%u)(%u)(%u)\n",
        hps_ctxt.action, little_num_online, big_num_online,
        hps_ctxt.cur_loads, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait, hps_ctxt.cur_nr_heavy_task, 
        hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
        hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
        hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv,
        hps_ctxt.up_loads_sum, hps_ctxt.up_loads_count, hps_ctxt.up_loads_history_index, 
        hps_ctxt.down_loads_sum, hps_ctxt.down_loads_count, hps_ctxt.down_loads_history_index, 
        hps_ctxt.rush_count, hps_ctxt.tlp_sum, hps_ctxt.tlp_count, hps_ctxt.tlp_history_index, hps_ctxt.tlp_avg);
    hps_ctxt_reset_stas_nolock();
ALGO_END_WO_ACTION:
    mutex_unlock(&hps_ctxt.lock);

    return;
}
Пример #30
0
/* Propagate any changes to the watchdog threads */
static void proc_watchdog_update(void)
{
	/* Remove impossible cpus to keep sysctl output clean. */
	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
	lockup_detector_reconfigure();
}