コード例 #1
0
ファイル: irq_ia64.c プロジェクト: 0x7f454c46/linux
void irq_complete_move(unsigned irq)
{
	struct irq_cfg *cfg = &irq_cfg[irq];
	cpumask_t cleanup_mask;
	int i;

	if (likely(!cfg->move_in_progress))
		return;

	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
		return;

	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
	for_each_cpu(i, &cleanup_mask)
		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
	cfg->move_in_progress = 0;
}
コード例 #2
0
ファイル: tegra3_dvfs.c プロジェクト: Hundsbuah/tf700t_kernel
int tegra_cpu_dvfs_alter(int edp_thermal_index, const cpumask_t *cpus,
			  bool before_clk_update, int cpu_event)
{
	bool cpu_warm = !!edp_thermal_index;
	unsigned int n = cpumask_weight(cpus);
	unsigned long *alt_freqs = cpu_warm ?
		(n > 1 ? NULL : cpu_0_freqs) : cpu_cold_freqs;

	if (cpu_event || (cpu_warm == before_clk_update)) {
		int ret = tegra_dvfs_alt_freqs_set(cpu_dvfs, alt_freqs);
		if (ret) {
			pr_err("tegra dvfs: failed to set alternative dvfs on "
			       "%u %s CPUs\n", n, cpu_warm ? "warm" : "cold");
			return ret;
		}
	}
	return 0;
}
コード例 #3
0
ファイル: irq.c プロジェクト: duanyujun/androidgoldfish
static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
{
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	unsigned long flags;
	unsigned int irq_dirty;

	if (cpumask_weight(mask) != 1) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}
	i = cpumask_first(mask);

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&bcm1480_imr_lock, flags);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
コード例 #4
0
static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
{
    int i = 0, old_cpu, cpu, int_on;
    u64 cur_ints;
    struct irq_desc *desc = irq_desc + irq;
    unsigned long flags;

    i = cpumask_first(mask);

    if (cpumask_weight(mask) > 1) {
        printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
        return;
    }

    /* Convert logical CPU to physical CPU */
    cpu = cpu_logical_map(i);

    /* Protect against other affinity changers and IMR manipulation */
    spin_lock_irqsave(&desc->lock, flags);
    spin_lock(&sb1250_imr_lock);

    /* Swizzle each CPU's IMR (but leave the IP selection alone) */
    old_cpu = sb1250_irq_owner[irq];
    cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
                                    R_IMR_INTERRUPT_MASK));
    int_on = !(cur_ints & (((u64) 1) << irq));
    if (int_on) {
        /* If it was on, mask it */
        cur_ints |= (((u64) 1) << irq);
        ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
                                        R_IMR_INTERRUPT_MASK));
    }
    sb1250_irq_owner[irq] = cpu;
    if (int_on) {
        /* unmask for the new CPU */
        cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
                                        R_IMR_INTERRUPT_MASK));
        cur_ints &= ~(((u64) 1) << irq);
        ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
                                        R_IMR_INTERRUPT_MASK));
    }
    spin_unlock(&sb1250_imr_lock);
    spin_unlock_irqrestore(&desc->lock, flags);
}
コード例 #5
0
static int __ref bcl_cpu_ctrl_callback(struct notifier_block *nfb,
	unsigned long action, void *hcpu)
{
	uint32_t cpu = (uintptr_t)hcpu;

	if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
		if (!cpumask_test_and_set_cpu(cpu, bcl_cpu_online_mask))
			pr_debug("BCL online Mask: %u\n",
				cpumask_weight(bcl_cpu_online_mask));
		if (bcl_hotplug_request & BIT(cpu)) {
			pr_info("preventing CPU%d from coming online\n", cpu);
			return NOTIFY_BAD;
		} else {
			pr_debug("voting for CPU%d to be online\n", cpu);
		}
	}

	return NOTIFY_OK;
}
コード例 #6
0
/*
 * Try to steal tags from a remote cpu's percpu freelist.
 *
 * We first check how many percpu freelists have tags - we don't steal tags
 * unless enough percpu freelists have tags on them that it's possible more than
 * half the total tags could be stuck on remote percpu freelists.
 *
 * Then we iterate through the cpus until we find some tags - we don't attempt
 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
 * minimum.
 */
static inline void steal_tags(struct percpu_ida *pool,
			      struct percpu_ida_cpu *tags)
{
	unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
	struct percpu_ida_cpu *remote;

	for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
	     cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2;
	     cpus_have_tags--) {
		cpu = cpumask_next(cpu, &pool->cpus_have_tags);

		if (cpu >= nr_cpu_ids) {
			cpu = cpumask_first(&pool->cpus_have_tags);
			if (cpu >= nr_cpu_ids)
				BUG();
		}

		pool->cpu_last_stolen = cpu;
		remote = per_cpu_ptr(pool->tag_cpu, cpu);

		cpumask_clear_cpu(cpu, &pool->cpus_have_tags);

		if (remote == tags)
			continue;

		spin_lock(&remote->lock);

		if (remote->nr_free) {
			memcpy(tags->freelist,
			       remote->freelist,
			       sizeof(unsigned) * remote->nr_free);

			tags->nr_free = remote->nr_free;
			remote->nr_free = 0;
		}

		spin_unlock(&remote->lock);

		if (tags->nr_free)
			break;
	}
}
コード例 #7
0
ファイル: proc.c プロジェクト: 285452612/ali_kernel
/*
 *	Get CPU information for use by the procfs.
 */
static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
			unsigned int cpu, unsigned int index, bool instance, unsigned int total)
{
#ifdef CONFIG_SMP
	if (c->x86_max_cores * smp_num_siblings > 1) {
		if (instance) {
			seq_printf(m, "physical id\t: 0\n");
			seq_printf(m, "siblings\t: %d\n", total);
			seq_printf(m, "core id\t\t: %d\n", index);
			seq_printf(m, "cpu cores\t: %d\n", total);
		} else {
			seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
			seq_printf(m, "siblings\t: %d\n",
				cpumask_weight(cpu_core_mask(cpu)));
			seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
			seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
		}
		seq_printf(m, "apicid\t\t: %d\n", c->apicid);
		seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
	}
#endif
}
コード例 #8
0
static unsigned long max_pages(unsigned long min_pages)
{
	unsigned long node_free_pages, max;
	int node = numa_node_id();
	struct zone *zones = NODE_DATA(node)->node_zones;
	int num_cpus_on_node;

	node_free_pages =
#ifdef CONFIG_ZONE_DMA
		zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
#endif
#ifdef CONFIG_ZONE_DMA32
		zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
#endif
		zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);

	max = node_free_pages / FRACTION_OF_NODE_MEM;

	num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
	max /= num_cpus_on_node;

	return max(max, min_pages);
}
コード例 #9
0
static int sd_degenerate(struct sched_domain *sd)
{
	if (cpumask_weight(sched_domain_span(sd)) == 1)
		return 1;

	/* Following flags need at least 2 groups */
	if (sd->flags & (SD_LOAD_BALANCE |
			 SD_BALANCE_NEWIDLE |
			 SD_BALANCE_FORK |
			 SD_BALANCE_EXEC |
			 SD_SHARE_CPUCAPACITY |
			 SD_ASYM_CPUCAPACITY |
			 SD_SHARE_PKG_RESOURCES |
			 SD_SHARE_POWERDOMAIN)) {
		if (sd->groups != sd->groups->next)
			return 0;
	}

	/* Following flags don't use groups */
	if (sd->flags & (SD_WAKE_AFFINE))
		return 0;

	return 1;
}
コード例 #10
0
ファイル: ipi.c プロジェクト: 513855417/linux
/**
 * irq_destroy_ipi() - unreserve an IPI that was previously allocated
 * @irq:	linux irq number to be destroyed
 * @dest:	cpumask of cpus which should have the IPI removed
 *
 * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
 * destroying all virqs associated with them.
 *
 * Return 0 on success or error code on failure.
 */
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
{
	struct irq_data *data = irq_get_irq_data(irq);
	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
	struct irq_domain *domain;
	unsigned int nr_irqs;

	if (!irq || !data || !ipimask)
		return -EINVAL;

	domain = data->domain;
	if (WARN_ON(domain == NULL))
		return -EINVAL;

	if (!irq_domain_is_ipi(domain)) {
		pr_warn("Trying to destroy a non IPI domain!\n");
		return -EINVAL;
	}

	if (WARN_ON(!cpumask_subset(dest, ipimask)))
		/*
		 * Must be destroying a subset of CPUs to which this IPI
		 * was set up to target
		 */
		return -EINVAL;

	if (irq_domain_is_ipi_per_cpu(domain)) {
		irq = irq + cpumask_first(dest) - data->common->ipi_offset;
		nr_irqs = cpumask_weight(dest);
	} else {
		nr_irqs = 1;
	}

	irq_domain_free_irqs(irq, nr_irqs);
	return 0;
}
コード例 #11
0
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
	int cpu, num_cpus;
	unsigned int next_nr, next_index;
	struct padata_parallel_queue *queue, *next_queue;
	struct padata_priv *padata;
	struct padata_list *reorder;

	num_cpus = cpumask_weight(pd->cpumask.pcpu);

	/*
                                                       
                              
  */
	next_nr = pd->processed;
	next_index = next_nr % num_cpus;
	cpu = padata_index_to_cpu(pd, next_index);
	next_queue = per_cpu_ptr(pd->pqueue, cpu);

	padata = NULL;

	reorder = &next_queue->reorder;

	if (!list_empty(&reorder->list)) {
		padata = list_entry(reorder->list.next,
				    struct padata_priv, list);

		spin_lock(&reorder->lock);
		list_del_init(&padata->list);
		atomic_dec(&pd->reorder_objects);
		spin_unlock(&reorder->lock);

		pd->processed++;

		goto out;
	}
コード例 #12
0
int get_cluster_size(enum cache_level level)
{
	cpumask_var_t mask;
	int ok;
	int num_cpus;

	if (level == GLOBAL_CLUSTER)
		return num_online_cpus();
	else {
		if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
			return -ENOMEM;
		/* assumes CPU 0 is representative of all CPUs */
		ok = get_shared_cpu_map(mask, 0, level);
		/* ok == 0 means we got the map; otherwise it's an invalid cache level */
		if (ok == 0)
			num_cpus = cpumask_weight(mask);
		free_cpumask_var(mask);

		if (ok == 0)
			return num_cpus;
		else
			return -EINVAL;
	}
}
コード例 #13
0
ファイル: init.c プロジェクト: AllenWeb/linux
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long long irqmask;
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_CACHE_NO_L3);
		} else {
			prot = hv_pte_set_mode(prot,
					       HV_PTE_MODE_CACHE_TILE_L3);
			cpu = cpumask_first(&ktext_mask);

			prot = ktext_set_nocache(prot);
		}

		BUG_ON(address != (unsigned long)_stext);
		pte = NULL;
		for (; address < (unsigned long)_einittext;
		     pfn++, address += PAGE_SIZE) {
			pte_ofs = pte_index(address);
			if (pte_ofs == 0) {
				if (pte)
					assign_pte(pmd++, pte);
				pte = alloc_pte();
			}
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				if (cpu == NR_CPUS)
					cpu = cpumask_first(&ktext_mask);
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		if (pte)
			assign_pte(pmd, pte);
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
		if (ktext_hash) {
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_HASH_L3);
			pteval = ktext_set_nocache(pteval);
		} else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
		if (cpumask_weight(&ktext_mask) == 1) {
			pteval = set_remote_cache_cpu(pteval,
					      cpumask_first(&ktext_mask));
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_TILE_L3);
			pteval = ktext_set_nocache(pteval);
		} else if (ktext_nocache)
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_UNCACHED);
		else
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
		for (; address < (unsigned long)_einittext;
		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
	}

	/* Set swapper_pgprot here so it is flushed to memory right away. */
	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

	/*
	 * Since we may be changing the caching of the stack and page
	 * table itself, we invoke an assembly helper to do the
	 * following steps:
	 *
	 *  - flush the cache so we start with an empty slate
	 *  - install pgtables[] as the real page table
	 *  - flush the TLB so the new page table takes effect
	 */
	irqmask = interrupt_mask_save_mask();
	interrupt_mask_set_mask(-1ULL);
	rc = flush_and_install_context(__pa(pgtables),
				       init_pgprot((unsigned long)pgtables),
				       __get_cpu_var(current_asid),
				       cpumask_bits(my_cpu_mask));
	interrupt_mask_restore_mask(irqmask);
	BUG_ON(rc != 0);

	/* Copy the page table back to the normal swapper_pg_dir. */
	memcpy(pgd_base, pgtables, sizeof(pgtables));
	__install_page_table(pgd_base, __get_cpu_var(current_asid),
			     swapper_pgprot);

	/*
	 * We just read swapper_pgprot and thus brought it into the cache,
	 * with its new home & caching mode.  When we start the other CPUs,
	 * they're going to reference swapper_pgprot via their initial fake
	 * VA-is-PA mappings, which cache everything locally.  At that
	 * time, if it's in our cache with a conflicting home, the
	 * simulator's coherence checker will complain.  So, flush it out
	 * of our cache; we're not going to ever use it again anyway.
	 */
	__insn_finv(&swapper_pgprot);
}
コード例 #14
0
ファイル: process.c プロジェクト: 0x000000FF/Linux4Edison
static void
common_shutdown_1(void *generic_ptr)
{
	struct halt_info *how = (struct halt_info *)generic_ptr;
	struct percpu_struct *cpup;
	unsigned long *pflags, flags;
	int cpuid = smp_processor_id();

	/* No point in taking interrupts anymore. */
	local_irq_disable();

	cpup = (struct percpu_struct *)
			((unsigned long)hwrpb + hwrpb->processor_offset
			 + hwrpb->processor_size * cpuid);
	pflags = &cpup->flags;
	flags = *pflags;

	/* Clear reason to "default"; clear "bootstrap in progress". */
	flags &= ~0x00ff0001UL;

#ifdef CONFIG_SMP
	/* Secondaries halt here. */
	if (cpuid != boot_cpuid) {
		flags |= 0x00040000UL; /* "remain halted" */
		*pflags = flags;
		set_cpu_present(cpuid, false);
		set_cpu_possible(cpuid, false);
		halt();
	}
#endif

	if (how->mode == LINUX_REBOOT_CMD_RESTART) {
		if (!how->restart_cmd) {
			flags |= 0x00020000UL; /* "cold bootstrap" */
		} else {
			/* For SRM, we could probably set environment
			   variables to get this to work.  We'd have to
			   delay this until after srm_paging_stop unless
			   we ever got srm_fixup working.

			   At the moment, SRM will use the last boot device,
			   but the file and flags will be the defaults, when
			   doing a "warm" bootstrap.  */
			flags |= 0x00030000UL; /* "warm bootstrap" */
		}
	} else {
		flags |= 0x00040000UL; /* "remain halted" */
	}
	*pflags = flags;

#ifdef CONFIG_SMP
	/* Wait for the secondaries to halt. */
	set_cpu_present(boot_cpuid, false);
	set_cpu_possible(boot_cpuid, false);
	while (cpumask_weight(cpu_present_mask))
		barrier();
#endif

	/* If booted from SRM, reset some of the original environment. */
	if (alpha_using_srm) {
#ifdef CONFIG_DUMMY_CONSOLE
		/* If we've gotten here after SysRq-b, leave interrupt
		   context before taking over the console. */
		if (in_interrupt())
			irq_exit();
		/* This has the effect of resetting the VGA video origin.  */
		take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
#endif
		pci_restore_srm_config();
		set_hae(srm_hae);
	}

	if (alpha_mv.kill_arch)
		alpha_mv.kill_arch(how->mode);

	if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
		/* Unfortunately, since MILO doesn't currently understand
		   the hwrpb bits above, we can't reliably halt the 
		   processor and keep it halted.  So just loop.  */
		return;
	}

	if (alpha_using_srm)
		srm_paging_stop();

	halt();
}
コード例 #15
0
ファイル: pm-cps.c プロジェクト: stas2z/linux-3.10-witi
int cps_pm_enter_state(enum cps_pm_state state)
{
	unsigned cpu = smp_processor_id();
	unsigned core = current_cpu_data.core;
	unsigned online, left;
	cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
	u32 *core_ready_count, *nc_core_ready_count;
	void *nc_addr;
	cps_nc_entry_fn entry;
	struct core_boot_config *core_cfg;
	struct vpe_boot_config *vpe_cfg;

	/* Check that there is an entry function for this state */
	entry = per_cpu(nc_asm_enter, core)[state];
	if (!entry)
		return -EINVAL;

	/* Calculate which coupled CPUs (VPEs) are online */
#ifdef CONFIG_MIPS_MT
	if (cpu_online(cpu)) {
		cpumask_and(coupled_mask, cpu_online_mask,
			    this_cpu_ptr(&cpu_sibling_map));
		online = cpumask_weight(coupled_mask);
		cpumask_clear_cpu(cpu, coupled_mask);
	} else
#endif
	{
		cpumask_clear(coupled_mask);
		online = 1;
	}

	/* Setup the VPE to run mips_cps_pm_restore when started again */
	if (state == CPS_PM_POWER_GATED) {
		/* Power gating relies upon CPS SMP */
		if (!mips_cps_smp_in_use())
			return -EINVAL;

		core_cfg = &mips_cps_core_bootcfg[core];
		vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
		vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
		vpe_cfg->gp = (unsigned long)current_thread_info();
		vpe_cfg->sp = 0;
	}

	/* Indicate that this CPU might not be coherent */
	cpumask_clear_cpu(cpu, &cpu_coherent_mask);
	smp_mb__after_clear_bit();

	/* Create a non-coherent mapping of the core ready_count */
	core_ready_count = per_cpu(ready_count, core);
	nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
				   (unsigned long)core_ready_count);
	nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
	nc_core_ready_count = nc_addr;

	/* Ensure ready_count is zero-initialised before the assembly runs */
	ACCESS_ONCE(*nc_core_ready_count) = 0;
	coupled_barrier(&per_cpu(pm_barrier, core), online);

	/* Run the generated entry code */
	left = entry(online, nc_core_ready_count);

	/* Remove the non-coherent mapping of ready_count */
	kunmap_noncoherent();

	/* Indicate that this CPU is definitely coherent */
	cpumask_set_cpu(cpu, &cpu_coherent_mask);

	/*
	 * If this VPE is the first to leave the non-coherent wait state then
	 * it needs to wake up any coupled VPEs still running their wait
	 * instruction so that they return to cpuidle, which can then complete
	 * coordination between the coupled VPEs & provide the governor with
	 * a chance to reflect on the length of time the VPEs were in the
	 * idle state.
	 */
	if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
		arch_send_call_function_ipi_mask(coupled_mask);

	return 0;
}
コード例 #16
0
ファイル: init.c プロジェクト: rslotte/OGS-Tile
static int __init setup_ktext(char *str)
{
    if (str == NULL)
        return -EINVAL;

    /* If you have a leading "nocache", turn off ktext caching */
    if (strncmp(str, "nocache", 7) == 0) {
        ktext_nocache = 1;
        pr_info("ktext: disabling local caching of kernel text\n");
        str += 7;
        if (*str == ',')
            ++str;
        if (*str == '\0')
            return 0;
    }

    ktext_arg_seen = 1;

    /* Default setting on Tile64: use a huge page */
    if (strcmp(str, "huge") == 0)
        pr_info("ktext: using one huge locally cached page\n");

    /* Pay TLB cost but get no cache benefit: cache small pages locally */
    else if (strcmp(str, "local") == 0) {
        ktext_small = 1;
        ktext_local = 1;
        pr_info("ktext: using small pages with local caching\n");
    }

    /* Neighborhood cache ktext pages on all cpus. */
    else if (strcmp(str, "all") == 0) {
        ktext_small = 1;
        ktext_all = 1;
        pr_info("ktext: using maximal caching neighborhood\n");
    }

#ifdef CONFIG_DATAPLANE
    /* Neighborhood cache ktext pages on all non-dataplane cpus. */
    else if (strcmp(str, "nondataplane") == 0) {
        ktext_small = 1;
        ktext_nondataplane = 1;
        pr_info("ktext: caching on all non-dataplane tiles\n");
    }
#endif

    /* Neighborhood ktext pages on specified mask */
    else if (cpulist_parse(str, &ktext_mask) == 0) {
        char buf[NR_CPUS * 5];
        cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
        if (cpumask_weight(&ktext_mask) > 1) {
            ktext_small = 1;
            pr_info("ktext: using caching neighborhood %s "
                    "with small pages\n", buf);
        } else {
            pr_info("ktext: caching on cpu %s with one huge page\n",
                    buf);
        }
    }

    else if (*str)
        return -EINVAL;

    return 0;
}
コード例 #17
0
ファイル: powernow.c プロジェクト: HackLinux/xen
static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
    unsigned int i;
    unsigned int valid_states = 0;
    unsigned int cpu = policy->cpu;
    struct acpi_cpufreq_data *data;
    unsigned int result = 0;
    struct processor_performance *perf;
    u32 max_hw_pstate;
    uint64_t msr_content;
    struct cpuinfo_x86 *c = &cpu_data[policy->cpu];

    data = xzalloc(struct acpi_cpufreq_data);
    if (!data)
        return -ENOMEM;

    cpufreq_drv_data[cpu] = data;

    data->acpi_data = &processor_pminfo[cpu]->perf;

    perf = data->acpi_data;
    policy->shared_type = perf->shared_type;

    if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
        policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
        cpumask_set_cpu(cpu, policy->cpus);
        if (cpumask_weight(policy->cpus) != 1) {
            printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n",
                   policy->shared_type, cpumask_weight(policy->cpus));
            result = -ENODEV;
            goto err_unreg;
        }
    } else {
        cpumask_copy(policy->cpus, cpumask_of(cpu));
    }

    /* capability check */
    if (perf->state_count <= 1) {
        printk("No P-States\n");
        result = -ENODEV;
        goto err_unreg;
    }
    rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content);
    max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;

    if (perf->control_register.space_id != perf->status_register.space_id) {
        result = -ENODEV;
        goto err_unreg;
    }

    data->freq_table = xmalloc_array(struct cpufreq_frequency_table, 
                                    (perf->state_count+1));
    if (!data->freq_table) {
        result = -ENOMEM;
        goto err_unreg;
    }

    /* detect transition latency */
    policy->cpuinfo.transition_latency = 0;
    for (i=0; i<perf->state_count; i++) {
        if ((perf->states[i].transition_latency * 1000) >
            policy->cpuinfo.transition_latency)
            policy->cpuinfo.transition_latency =
                perf->states[i].transition_latency * 1000;
    }

    policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR;

    /* table init */
    for (i = 0; i < perf->state_count && i <= max_hw_pstate; i++) {
        if (i > 0 && perf->states[i].core_frequency >=
            data->freq_table[valid_states-1].frequency / 1000)
            continue;

        data->freq_table[valid_states].index = perf->states[i].control & HW_PSTATE_MASK;
        data->freq_table[valid_states].frequency =
            perf->states[i].core_frequency * 1000;
        valid_states++;
    }
    data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
    perf->state = 0;

    result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
    if (result)
        goto err_freqfree;

    if (c->cpuid_level >= 6)
        on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
      
    /*
     * the first call to ->target() should result in us actually
     * writing something to the appropriate registers.
     */
    data->arch_cpu_flags |= ARCH_CPU_FLAG_RESUME;

    policy->cur = data->freq_table[i].frequency;
    return result;

err_freqfree:
    xfree(data->freq_table);
err_unreg:
    xfree(data);
    cpufreq_drv_data[cpu] = NULL;

    return result;
}
コード例 #18
0
ファイル: smp.c プロジェクト: 1800alex/linux
static int __init smp_iic_probe(void)
{
	iic_request_IPIs();

	return cpumask_weight(cpu_possible_mask);
}
コード例 #19
0
ファイル: smpboot.c プロジェクト: fhgwright/bb-linux
/*
 * Called at the top of init() to launch all the other CPUs.
 * They run free to complete their initialization and then wait
 * until they get an IPI from the boot cpu to come online.
 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	long rc;
	int cpu, cpu_count;
	int boot_cpu = smp_processor_id();

	current_thread_info()->cpu = boot_cpu;

	/*
	 * Pin this task to the boot CPU while we bring up the others,
	 * just to make sure we don't uselessly migrate as they come up.
	 */
	rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
	if (rc != 0)
		pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);

	/* Print information about disabled and dataplane cpus. */
	print_disabled_cpus();

	/*
	 * Tell the messaging subsystem how to respond to the
	 * startup message.  We use a level of indirection to avoid
	 * confusing the linker with the fact that the messaging
	 * subsystem is calling __init code.
	 */
	start_cpu_function_addr = (unsigned long) &online_secondary;

	/* Set up thread context for all new processors. */
	cpu_count = 1;
	for (cpu = 0; cpu < NR_CPUS; ++cpu)	{
		struct task_struct *idle;

		if (cpu == boot_cpu)
			continue;

		if (!cpu_possible(cpu)) {
			/*
			 * Make this processor do nothing on boot.
			 * Note that we don't give the boot_pc function
			 * a stack, so it has to be assembly code.
			 */
			per_cpu(boot_sp, cpu) = 0;
			per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
			continue;
		}

		/* Create a new idle thread to run start_secondary() */
		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);
		idle->thread.pc = (unsigned long) start_secondary;

		/* Make this thread the boot thread for this processor */
		per_cpu(boot_sp, cpu) = task_ksp0(idle);
		per_cpu(boot_pc, cpu) = idle->thread.pc;

		++cpu_count;
	}
	BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));

	/* Fire up the other tiles, if any */
	init_cpu_present(cpu_possible_mask);
	if (cpumask_weight(cpu_present_mask) > 1) {
		mb();  /* make sure all data is visible to new processors */
		hv_start_all_tiles();
	}
}
コード例 #20
0
ファイル: tsc_sync.c プロジェクト: EMFPGA/linux_media
/*
 * If the target CPU coming online doesn't have any of its core-siblings
 * online, a timeout of 20msec will be used for the TSC-warp measurement
 * loop. Otherwise a smaller timeout of 2msec will be used, as we have some
 * information about this socket already (and this information grows as we
 * have more and more logical-siblings in that socket).
 *
 * Ideally we should be able to skip the TSC sync check on the other
 * core-siblings, if the first logical CPU in a socket passed the sync test.
 * But as the TSC is per-logical CPU and can potentially be modified wrongly
 * by the bios, TSC sync test for smaller duration should be able
 * to catch such errors. Also this will catch the condition where all the
 * cores in the socket doesn't get reset at the same time.
 */
static inline unsigned int loop_timeout(int cpu)
{
	return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
}
コード例 #21
0
ファイル: smp.c プロジェクト: nelsont/kernel-N8000
static int smp_iSeries_probe(void)
{
	return cpumask_weight(cpu_possible_mask);
}
コード例 #22
0
/*
 * hps algo - hmp
 */
void hps_algo_hmp(void)
{
    unsigned int cpu;
    unsigned int val;
    struct cpumask little_online_cpumask;
    struct cpumask big_online_cpumask;
    unsigned int little_num_base, little_num_limit, little_num_online;
    unsigned int big_num_base, big_num_limit, big_num_online;
    //log purpose
    char str1[64];
    char str2[64];
    int i, j;
    char * str1_ptr = str1;
    char * str2_ptr = str2;

    /*
     * run algo or not by hps_ctxt.enabled
     */
    if (!hps_ctxt.enabled)
    {
        atomic_set(&hps_ctxt.is_ondemand, 0);
        return;
    }

    /*
     * calculate cpu loading
     */
    hps_ctxt.cur_loads = 0;
    str1_ptr = str1;
    str2_ptr = str2;

    for_each_possible_cpu(cpu)
    {
        per_cpu(hps_percpu_ctxt, cpu).load = hps_cpu_get_percpu_load(cpu);
        hps_ctxt.cur_loads += per_cpu(hps_percpu_ctxt, cpu).load;

        if (hps_ctxt.cur_dump_enabled)
        {
            if (cpu_online(cpu))
                i = sprintf(str1_ptr, "%4u", 1);
            else
                i = sprintf(str1_ptr, "%4u", 0);
            str1_ptr += i;
            j = sprintf(str2_ptr, "%4u", per_cpu(hps_percpu_ctxt, cpu).load);
            str2_ptr += j;
        }
    }
    hps_ctxt.cur_nr_heavy_task = hps_cpu_get_nr_heavy_task();
    hps_cpu_get_tlp(&hps_ctxt.cur_tlp, &hps_ctxt.cur_iowait);

    /*
     * algo - begin
     */
    mutex_lock(&hps_ctxt.lock);
    hps_ctxt.action = ACTION_NONE;
    atomic_set(&hps_ctxt.is_ondemand, 0);

    /*
     * algo - get boundary
     */
    little_num_limit = min(hps_ctxt.little_num_limit_thermal, hps_ctxt.little_num_limit_low_battery);
    little_num_base = hps_ctxt.little_num_base_perf_serv;
    cpumask_and(&little_online_cpumask, &hps_ctxt.little_cpumask, cpu_online_mask);
    little_num_online = cpumask_weight(&little_online_cpumask);
    //TODO: no need if is_hmp
    big_num_limit = min(hps_ctxt.big_num_limit_thermal, hps_ctxt.big_num_limit_low_battery);
    big_num_base = max(hps_ctxt.cur_nr_heavy_task, hps_ctxt.big_num_base_perf_serv);
    cpumask_and(&big_online_cpumask, &hps_ctxt.big_cpumask, cpu_online_mask);
    big_num_online = cpumask_weight(&big_online_cpumask);
    if (hps_ctxt.cur_dump_enabled)
    {
        hps_debug(" CPU:%s\n", str1);
        hps_debug("LOAD:%s\n", str2);
        hps_debug("loads(%u), hvy_tsk(%u), tlp(%u), iowait(%u), limit_t(%u)(%u), limit_lb(%u)(%u), base_ps(%u)(%u)\n", 
            hps_ctxt.cur_loads, hps_ctxt.cur_nr_heavy_task, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait,
            hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
            hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
            hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv);
    }

//ALGO_LIMIT:
    /*
     * algo - thermal, low battery
     */
    if (big_num_online > big_num_limit)
    {
        val =  big_num_online - big_num_limit;
        for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
                --big_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_LIMIT_BIG, (unsigned long *)&hps_ctxt.action);
    }
    if (little_num_online > little_num_limit)
    {
        val =  little_num_online - little_num_limit;
        for (cpu = hps_ctxt.little_cpu_id_max; cpu > hps_ctxt.little_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &little_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &little_online_cpumask);
                --little_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_LIMIT_LITTLE, (unsigned long *)&hps_ctxt.action);
    }
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_BASE:
    /*
     * algo - PerfService, heavy task detect
     */
    BUG_ON(big_num_online > big_num_limit);
    BUG_ON(little_num_online > little_num_limit);
    if ((big_num_online < big_num_base) && (big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
    {
        val =  min(big_num_base, big_num_limit) - big_num_online;
        for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
        {
            if (!cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_up(cpu);
                cpumask_set_cpu(cpu, &big_online_cpumask);
                ++big_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_BASE_BIG, (unsigned long *)&hps_ctxt.action);
    }
    if ((little_num_online < little_num_base) && (little_num_online < little_num_limit) &&
        (little_num_online + big_num_online < hps_ctxt.little_num_base_perf_serv + hps_ctxt.big_num_base_perf_serv))
    {
        val =  min(little_num_base, little_num_limit) - little_num_online;
        if (big_num_online > hps_ctxt.big_num_base_perf_serv)
            val -= big_num_online - hps_ctxt.big_num_base_perf_serv;
        for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
        {
            if (!cpumask_test_cpu(cpu, &little_online_cpumask))
            {
                cpu_up(cpu);
                cpumask_set_cpu(cpu, &little_online_cpumask);
                ++little_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_BASE_LITTLE, (unsigned long *)&hps_ctxt.action);
    }
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

    /*
     * update history - tlp
     */
    val = hps_ctxt.tlp_history[hps_ctxt.tlp_history_index];
    hps_ctxt.tlp_history[hps_ctxt.tlp_history_index] = hps_ctxt.cur_tlp;
    hps_ctxt.tlp_sum += hps_ctxt.cur_tlp;
    hps_ctxt.tlp_history_index = (hps_ctxt.tlp_history_index + 1 == hps_ctxt.tlp_times) ? 0 : hps_ctxt.tlp_history_index + 1;
    ++hps_ctxt.tlp_count;
    if (hps_ctxt.tlp_count > hps_ctxt.tlp_times)
    {
        BUG_ON(hps_ctxt.tlp_sum < val);
        hps_ctxt.tlp_sum -= val;
        hps_ctxt.tlp_avg = hps_ctxt.tlp_sum / hps_ctxt.tlp_times;
    }
    else
    {
        hps_ctxt.tlp_avg = hps_ctxt.tlp_sum / hps_ctxt.tlp_count;
    }
    if (hps_ctxt.stats_dump_enabled)
        hps_ctxt_print_algo_stats_tlp(0);

//ALGO_RUSH_BOOST:
    /*
     * algo - rush boost
     */
    if (hps_ctxt.rush_boost_enabled)
    {
        if (hps_ctxt.cur_loads > hps_ctxt.rush_boost_threshold * (little_num_online + big_num_online))
            ++hps_ctxt.rush_count;
        else
            hps_ctxt.rush_count = 0;

        if ((hps_ctxt.rush_count >= hps_ctxt.rush_boost_times) &&
            ((little_num_online + big_num_online) * 100 < hps_ctxt.tlp_avg))
        {
            val = hps_ctxt.tlp_avg / 100 + (hps_ctxt.tlp_avg % 100 ? 1 : 0);
            BUG_ON(!(val > little_num_online + big_num_online));
            if (val > num_possible_cpus())
                val = num_possible_cpus();

            val -= little_num_online + big_num_online;
            if ((val) && (little_num_online < little_num_limit))
            {
                for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &little_online_cpumask);
                        ++little_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_RUSH_BOOST_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
            else if ((val) && (big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
            {
                for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &big_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &big_online_cpumask);
                        ++big_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_RUSH_BOOST_BIG, (unsigned long *)&hps_ctxt.action);
            }
        }
    } //if (hps_ctxt.rush_boost_enabled)
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_UP:
    /*
     * algo - cpu up
     */
    if ((little_num_online + big_num_online) < num_possible_cpus())
    {
        /*
         * update history - up
         */
        val = hps_ctxt.up_loads_history[hps_ctxt.up_loads_history_index];
        hps_ctxt.up_loads_history[hps_ctxt.up_loads_history_index] = hps_ctxt.cur_loads;
        hps_ctxt.up_loads_sum += hps_ctxt.cur_loads;
        hps_ctxt.up_loads_history_index = (hps_ctxt.up_loads_history_index + 1 == hps_ctxt.up_times) ? 0 : hps_ctxt.up_loads_history_index + 1;
        ++hps_ctxt.up_loads_count;
        //XXX: use >= or >, which is benifit? use >
        if (hps_ctxt.up_loads_count > hps_ctxt.up_times)
        {
            BUG_ON(hps_ctxt.up_loads_sum < val);
            hps_ctxt.up_loads_sum -= val;
        }
        if (hps_ctxt.stats_dump_enabled)
            hps_ctxt_print_algo_stats_up(0);

        if (hps_ctxt.up_loads_count >= hps_ctxt.up_times)
        {
            if (hps_ctxt.up_loads_sum > hps_ctxt.up_threshold * hps_ctxt.up_times * (little_num_online + big_num_online))
            {
                if (little_num_online < little_num_limit)
                {
                    for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                    {
                        if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                        {
                            cpu_up(cpu);
                            cpumask_set_cpu(cpu, &little_online_cpumask);
                            ++little_num_online;
                            break;
                        }
                    }
                    set_bit(ACTION_UP_LITTLE, (unsigned long *)&hps_ctxt.action);
                }
                else if ((big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
                {
                    for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
                    {
                        if (!cpumask_test_cpu(cpu, &big_online_cpumask))
                        {
                            cpu_up(cpu);
                            cpumask_set_cpu(cpu, &big_online_cpumask);
                            ++big_num_online;
                            break;
                        }
                    }
                    set_bit(ACTION_UP_BIG, (unsigned long *)&hps_ctxt.action);
                }
            }
        } //if (hps_ctxt.up_loads_count >= hps_ctxt.up_times)
    } //if ((little_num_online + big_num_online) < num_possible_cpus())
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_DOWN:
    /*
     * algo - cpu down (inc. quick landing)
     */
    if (little_num_online + big_num_online > 1)
    {
        /*
         * update history - down
         */
        val = hps_ctxt.down_loads_history[hps_ctxt.down_loads_history_index];
        hps_ctxt.down_loads_history[hps_ctxt.down_loads_history_index] = hps_ctxt.cur_loads;
        hps_ctxt.down_loads_sum += hps_ctxt.cur_loads;
        hps_ctxt.down_loads_history_index = (hps_ctxt.down_loads_history_index + 1 == hps_ctxt.down_times) ? 0 : hps_ctxt.down_loads_history_index + 1;
        ++hps_ctxt.down_loads_count;
        //XXX: use >= or >, which is benifit? use >
        if (hps_ctxt.down_loads_count > hps_ctxt.down_times)
        {
            BUG_ON(hps_ctxt.down_loads_sum < val);
            hps_ctxt.down_loads_sum -= val;
        }
        if (hps_ctxt.stats_dump_enabled)
            hps_ctxt_print_algo_stats_down(0);

        if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
        {
            unsigned int down_threshold = hps_ctxt.down_threshold * hps_ctxt.down_times;

            val = little_num_online + big_num_online;
            while (hps_ctxt.down_loads_sum < down_threshold * (val - 1))
                --val;
            val = little_num_online + big_num_online - val;

            if ((val) && (big_num_online > big_num_base))
            {
                for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
                {
                    if (cpumask_test_cpu(cpu, &big_online_cpumask))
                    {
                        cpu_down(cpu);
                        cpumask_clear_cpu(cpu, &big_online_cpumask);
                        --big_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_DOWN_BIG, (unsigned long *)&hps_ctxt.action);
            }
            else if ((val) && (little_num_online > little_num_base))
            {
                for (cpu = hps_ctxt.little_cpu_id_max; cpu > hps_ctxt.little_cpu_id_min; --cpu)
                {
                    if (cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_down(cpu);
                        cpumask_clear_cpu(cpu, &little_online_cpumask);
                        --little_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_DOWN_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
        } //if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    } //if (little_num_online + big_num_online > 1)
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_BIG_TO_LITTLE:
    /*
     * algo - b2L
     */
    if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    {
        if ((little_num_online < little_num_limit) && (big_num_online > big_num_base))
        {
            //find last online big
            for (val = hps_ctxt.big_cpu_id_max; val >= hps_ctxt.big_cpu_id_min; --val)
            {
                if (cpumask_test_cpu(val, &big_online_cpumask))
                    break;
            }
            BUG_ON(val < hps_ctxt.big_cpu_id_min);

            //verify whether b2L will open 1 little
            if (per_cpu(hps_percpu_ctxt, val).load * CPU_DMIPS_BIG_LITTLE_DIFF / 100 + 
                hps_ctxt.up_loads_sum / hps_ctxt.up_times <= hps_ctxt.up_threshold  * (little_num_online + big_num_online))
            {
                //up 1 little
                for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &little_online_cpumask);
                        ++little_num_online;
                        break;
                    }
                }

                //down 1 big
                cpu_down(val);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
                --big_num_online;
                set_bit(ACTION_BIG_TO_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
        } //if ((little_num_online < little_num_limit) && (big_num_online > big_num_base))
    } //if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    if (!hps_ctxt.action)
        goto ALGO_END_WO_ACTION;

    /*
     * algo - end
     */
ALGO_END_WITH_ACTION:
    hps_warn("(%04x)(%u)(%u)action end(%u)(%u)(%u)(%u) (%u)(%u)(%u)(%u)(%u)(%u) (%u)(%u)(%u) (%u)(%u)(%u) (%u)(%u)(%u)(%u)(%u)\n",
        hps_ctxt.action, little_num_online, big_num_online,
        hps_ctxt.cur_loads, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait, hps_ctxt.cur_nr_heavy_task, 
        hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
        hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
        hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv,
        hps_ctxt.up_loads_sum, hps_ctxt.up_loads_count, hps_ctxt.up_loads_history_index, 
        hps_ctxt.down_loads_sum, hps_ctxt.down_loads_count, hps_ctxt.down_loads_history_index, 
        hps_ctxt.rush_count, hps_ctxt.tlp_sum, hps_ctxt.tlp_count, hps_ctxt.tlp_history_index, hps_ctxt.tlp_avg);
    hps_ctxt_reset_stas_nolock();
ALGO_END_WO_ACTION:
    mutex_unlock(&hps_ctxt.lock);

    return;
}
コード例 #23
0
static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
{
	int cpu, cpu_index;
	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
	struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_aead *cipher;

	ictx->tfm_count++;

<<<<<<< HEAD
=======
<<<<<<< HEAD
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
	cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);

	ctx->cb_cpu = cpumask_first(cpu_online_mask);
	for (cpu = 0; cpu < cpu_index; cpu++)
		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
<<<<<<< HEAD
=======
=======
	cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);

	ctx->cb_cpu = cpumask_first(cpu_active_mask);
	for (cpu = 0; cpu < cpu_index; cpu++)
		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
>>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
コード例 #24
0
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
				  struct cpumask *groupmask)
{
	struct sched_group *group = sd->groups;

	cpumask_clear(groupmask);

	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);

	if (!(sd->flags & SD_LOAD_BALANCE)) {
		printk("does not load-balance\n");
		if (sd->parent)
			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
		return -1;
	}

	printk(KERN_CONT "span=%*pbl level=%s\n",
	       cpumask_pr_args(sched_domain_span(sd)), sd->name);

	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
	}
	if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
	}

	printk(KERN_DEBUG "%*s groups:", level + 1, "");
	do {
		if (!group) {
			printk("\n");
			printk(KERN_ERR "ERROR: group is NULL\n");
			break;
		}

		if (!cpumask_weight(sched_group_span(group))) {
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: empty group\n");
			break;
		}

		if (!(sd->flags & SD_OVERLAP) &&
		    cpumask_intersects(groupmask, sched_group_span(group))) {
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: repeated CPUs\n");
			break;
		}

		cpumask_or(groupmask, groupmask, sched_group_span(group));

		printk(KERN_CONT " %d:{ span=%*pbl",
				group->sgc->id,
				cpumask_pr_args(sched_group_span(group)));

		if ((sd->flags & SD_OVERLAP) &&
		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
			printk(KERN_CONT " mask=%*pbl",
				cpumask_pr_args(group_balance_mask(group)));
		}

		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
			printk(KERN_CONT " cap=%lu", group->sgc->capacity);

		if (group == sd->groups && sd->child &&
		    !cpumask_equal(sched_domain_span(sd->child),
				   sched_group_span(group))) {
			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
		}

		printk(KERN_CONT " }");

		group = group->next;

		if (group != sd->groups)
			printk(KERN_CONT ",");

	} while (group != sd->groups);
	printk(KERN_CONT "\n");

	if (!cpumask_equal(sched_domain_span(sd), groupmask))
		printk(KERN_ERR "ERROR: groups don't span domain->span\n");

	if (sd->parent &&
	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
	return 0;
}
コード例 #25
0
ファイル: ipi.c プロジェクト: 513855417/linux
/**
 * irq_reserve_ipi() - Setup an IPI to destination cpumask
 * @domain:	IPI domain
 * @dest:	cpumask of cpus which can receive the IPI
 *
 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
 *
 * On success it'll return linux irq number and error code on failure
 */
int irq_reserve_ipi(struct irq_domain *domain,
			     const struct cpumask *dest)
{
	unsigned int nr_irqs, offset;
	struct irq_data *data;
	int virq, i;

	if (!domain ||!irq_domain_is_ipi(domain)) {
		pr_warn("Reservation on a non IPI domain\n");
		return -EINVAL;
	}

	if (!cpumask_subset(dest, cpu_possible_mask)) {
		pr_warn("Reservation is not in possible_cpu_mask\n");
		return -EINVAL;
	}

	nr_irqs = cpumask_weight(dest);
	if (!nr_irqs) {
		pr_warn("Reservation for empty destination mask\n");
		return -EINVAL;
	}

	if (irq_domain_is_ipi_single(domain)) {
		/*
		 * If the underlying implementation uses a single HW irq on
		 * all cpus then we only need a single Linux irq number for
		 * it. We have no restrictions vs. the destination mask. The
		 * underlying implementation can deal with holes nicely.
		 */
		nr_irqs = 1;
		offset = 0;
	} else {
		unsigned int next;

		/*
		 * The IPI requires a seperate HW irq on each CPU. We require
		 * that the destination mask is consecutive. If an
		 * implementation needs to support holes, it can reserve
		 * several IPI ranges.
		 */
		offset = cpumask_first(dest);
		/*
		 * Find a hole and if found look for another set bit after the
		 * hole. For now we don't support this scenario.
		 */
		next = cpumask_next_zero(offset, dest);
		if (next < nr_cpu_ids)
			next = cpumask_next(next, dest);
		if (next < nr_cpu_ids) {
			pr_warn("Destination mask has holes\n");
			return -EINVAL;
		}
	}

	virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
	if (virq <= 0) {
		pr_warn("Can't reserve IPI, failed to alloc descs\n");
		return -ENOMEM;
	}

	virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
				       (void *) dest, true);

	if (virq <= 0) {
		pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
		goto free_descs;
	}

	for (i = 0; i < nr_irqs; i++) {
		data = irq_get_irq_data(virq + i);
		cpumask_copy(data->common->affinity, dest);
		data->common->ipi_offset = offset;
		irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
	}
	return virq;

free_descs:
	irq_free_descs(virq, nr_irqs);
	return -EBUSY;
}