/* Read-only node: To display all the online managed CPUs */
static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
{
	int i, cnt = 0;
	struct cpumask tmp_mask;
	struct cpu_hp *i_cpu_hp;

	if (!clusters_inited)
		return cnt;

	for (i = 0; i < num_clusters; i++) {
		i_cpu_hp = managed_clusters[i];

		cpumask_clear(&tmp_mask);
		cpumask_complement(&tmp_mask, i_cpu_hp->offlined_cpus);
		cpumask_and(&tmp_mask, i_cpu_hp->cpus, &tmp_mask);

		cnt += cpulist_scnprintf(buf + cnt, PAGE_SIZE - cnt,
								&tmp_mask);

		if ((i + 1) >= num_clusters)
			break;
		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, ":");
	}

	return cnt;
}
Пример #2
0
static int __init setup_kdata(char *str)
{
    char buf[NR_CPUS * 5];

    if (str == NULL)
        return -EINVAL;

    if (strcmp(str, "huge") == 0) {
#if CHIP_HAS_CBOX_HOME_MAP()
        kdata_huge = 1;
#else
        pr_err("kdata=huge: only supported on TILEPro and later.\n");
#endif
        return 0;
    }

    if (strcmp(str, "small") == 0) {
        kdata_huge = 0;
        str += strlen("small");
        if (*str == ',')
            ++str;
        if (*str == '\0')
            return 0;
    }

    if (cpulist_parse(str, &kdata_mask) != 0)
        return -EINVAL;

    kdata_arg_seen = 1;
    cpulist_scnprintf(buf, sizeof(buf), &kdata_mask);
    pr_info("kdata: using caching neighborhood %s\n", buf);
    return 0;
}
Пример #3
0
Файл: proc.c Проект: 08opt/linux
static int show_cpuinfo(struct seq_file *m, void *v)
{
	int n = ptr_to_cpu(v);

	if (n == 0) {
		char buf[NR_CPUS*5];
		cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
		seq_printf(m, "cpu count\t: %d\n", num_online_cpus());
		seq_printf(m, "cpu list\t: %s\n", buf);
		seq_printf(m, "model name\t: %s\n", chip_model);
		seq_printf(m, "flags\t\t:\n");  /* nothing for now */
		seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n",
			   get_clock_rate() / 1000000,
			   (get_clock_rate() % 1000000));
		seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
			   loops_per_jiffy/(500000/HZ),
			   (loops_per_jiffy/(5000/HZ)) % 100);
	}

#ifdef CONFIG_SMP
	if (!cpu_online(n))
		return 0;
#endif

	seq_printf(m, "processor\t: %d\n", n);

	/* Print only num_online_cpus() blank lines total. */
	if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids)
		seq_printf(m, "\n");

	return 0;
}
Пример #4
0
/*
 * Print cpu online, possible, present, and system maps
 */
static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
{
	int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);

	buf[n++] = '\n';
	buf[n] = '\0';
	return n;
}
Пример #5
0
static int __init setup_ktext(char *str)
{
	if (str == NULL)
		return -EINVAL;

	/* If you have a leading "nocache", turn off ktext caching */
	if (strncmp(str, "nocache", 7) == 0) {
		ktext_nocache = 1;
		pr_info("ktext: disabling local caching of kernel text\n");
		str += 7;
		if (*str == ',')
			++str;
		if (*str == '\0')
			return 0;
	}

	ktext_arg_seen = 1;

	/* Default setting on Tile64: use a huge page */
	if (strcmp(str, "huge") == 0)
		pr_info("ktext: using one huge locally cached page\n");

	/* Pay TLB cost but get no cache benefit: cache small pages locally */
	else if (strcmp(str, "local") == 0) {
		ktext_small = 1;
		ktext_local = 1;
		pr_info("ktext: using small pages with local caching\n");
	}

	/* Neighborhood cache ktext pages on all cpus. */
	else if (strcmp(str, "all") == 0) {
		ktext_small = 1;
		ktext_all = 1;
		pr_info("ktext: using maximal caching neighborhood\n");
	}


	/* Neighborhood ktext pages on specified mask */
	else if (cpulist_parse(str, &ktext_mask) == 0) {
		char buf[NR_CPUS * 5];
		cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
		if (cpumask_weight(&ktext_mask) > 1) {
			ktext_small = 1;
			pr_info("ktext: using caching neighborhood %s "
			       "with small pages\n", buf);
		} else {
			pr_info("ktext: caching on cpu %s with one huge page\n",
			       buf);
		}
	}

	else if (*str)
		return -EINVAL;

	return 0;
}
static int __init init_tick_nohz_full(void)
{
	if (have_nohz_full_mask)
		cpu_notifier(tick_nohz_cpu_down_callback, 0);

	cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
	pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);

	return 0;
}
Пример #7
0
/* Print non-responding cpus */
static void uv_nmi_nr_cpus_pr(char *fmt)
{
	static char cpu_list[1024];
	int len = sizeof(cpu_list);
	int c = cpumask_weight(uv_nmi_cpu_mask);
	int n = cpulist_scnprintf(cpu_list, len, uv_nmi_cpu_mask);

	if (n >= len-1)
		strcpy(&cpu_list[len - 6], "...\n");

	printk(fmt, c, cpu_list);
}
Пример #8
0
static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
{
	ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
	int n = 0;

	if (len > 1) {
		n = type?
			cpulist_scnprintf(buf, len-2, mask) :
			cpumask_scnprintf(buf, len-2, mask);
		buf[n++] = '\n';
		buf[n] = '\0';
	}
	return n;
}
Пример #9
0
static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
{
	struct node *node_dev = to_node(dev);
	const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
	int len;

	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
	BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));

	len = type?
		cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
		cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
 	buf[len++] = '\n';
 	buf[len] = '\0';
	return len;
}
Пример #10
0
static char *
kdb_cpus_allowed_string(struct task_struct *tp)
{
	static char maskbuf[NR_CPUS * 8];
	if (cpus_equal(tp->cpus_allowed, cpu_online_map))
		strcpy(maskbuf, "ALL");
	else if (cpus_full(tp->cpus_allowed))
		strcpy(maskbuf, "ALL(NR_CPUS)");
	else if (cpus_empty(tp->cpus_allowed))
		strcpy(maskbuf, "NONE");
	else if (cpus_weight(tp->cpus_allowed) == 1)
		snprintf(maskbuf, sizeof(maskbuf), "ONLY(%d)", first_cpu(tp->cpus_allowed));
	else
		cpulist_scnprintf(maskbuf, sizeof(maskbuf), tp->cpus_allowed);
	return maskbuf;
}
Пример #11
0
/*
<<<<<<< HEAD
=======
 * PCI Bus Class Devices
 */
static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
					int type,
					struct device_attribute *attr,
					char *buf)
{
	int ret;
	const struct cpumask *cpumask;

	cpumask = cpumask_of_pcibus(to_pci_bus(dev));
	ret = type?
		cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
		cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
	buf[ret++] = '\n';
	buf[ret] = '\0';
	return ret;
}
static int get_managed_cpus(char *buf, const struct kernel_param *kp)
{
	int i, cnt = 0;

	if (!clusters_inited)
		return cnt;

	for (i = 0; i < num_clusters; i++) {
		cnt += cpulist_scnprintf(buf + cnt, PAGE_SIZE - cnt,
						managed_clusters[i]->cpus);
		if ((i + 1) >= num_clusters)
			break;
		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, ":");
	}

	return cnt;
}
Пример #13
0
/*
 * Debug related code, dump vcpu/cpu information
 */
static void
rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
{
    cpumask_t *cpupool_mask, *mask;

    ASSERT(svc != NULL);
    /* idle vcpu */
    if( svc->sdom == NULL )
    {
        printk("\n");
        return;
    }

    /*
     * We can't just use 'cpumask_scratch' because the dumping can
     * happen from a pCPU outside of this scheduler's cpupool, and
     * hence it's not right to use the pCPU's scratch mask (which
     * may even not exist!). On the other hand, it is safe to use
     * svc->vcpu->processor's own scratch space, since we hold the
     * runqueue lock.
     */
    mask = _cpumask_scratch[svc->vcpu->processor];

    cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
    cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
    printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
           " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
            svc->vcpu->domain->domain_id,
            svc->vcpu->vcpu_id,
            svc->vcpu->processor,
            svc->period,
            svc->budget,
            svc->cur_budget,
            svc->cur_deadline,
            svc->last_start,
            __vcpu_on_q(svc),
            vcpu_runnable(svc->vcpu),
            svc->flags,
            keyhandler_scratch);
}
Пример #14
0
/*
 * --------- debug versions of the numa functions ---------
 */
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
	int node = cpu_to_node(cpu);
	cpumask_t *mask;
	char buf[64];

	if (node_to_cpumask_map == NULL) {
		printk(KERN_ERR "node_to_cpumask_map NULL\n");
		dump_stack();
		return;
	}

	mask = &node_to_cpumask_map[node];
	if (enable)
		cpu_set(cpu, *mask);
	else
		cpu_clear(cpu, *mask);

	cpulist_scnprintf(buf, sizeof(buf), *mask);
	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
		enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
 }
Пример #15
0
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
{
	int node = early_cpu_to_node(cpu);
	struct cpumask *mask;
	char buf[64];

	if (node == NUMA_NO_NODE) {
		/* early_cpu_to_node() already emits a warning and trace */
		return NULL;
	}
	mask = node_to_cpumask_map[node];
	if (!mask) {
		pr_err("node_to_cpumask_map[%i] NULL\n", node);
		dump_stack();
		return NULL;
	}

	cpulist_scnprintf(buf, sizeof(buf), mask);
	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
		enable ? "numa_add_cpu" : "numa_remove_cpu",
		cpu, node, buf);
	return mask;
}
Пример #16
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long long irqmask;
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_CACHE_NO_L3);
		} else {
			prot = hv_pte_set_mode(prot,
					       HV_PTE_MODE_CACHE_TILE_L3);
			cpu = cpumask_first(&ktext_mask);

			prot = ktext_set_nocache(prot);
		}

		BUG_ON(address != (unsigned long)_stext);
		pte = NULL;
		for (; address < (unsigned long)_einittext;
		     pfn++, address += PAGE_SIZE) {
			pte_ofs = pte_index(address);
			if (pte_ofs == 0) {
				if (pte)
					assign_pte(pmd++, pte);
				pte = alloc_pte();
			}
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				if (cpu == NR_CPUS)
					cpu = cpumask_first(&ktext_mask);
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		if (pte)
			assign_pte(pmd, pte);
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
		if (ktext_hash) {
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_HASH_L3);
			pteval = ktext_set_nocache(pteval);
		} else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
		if (cpumask_weight(&ktext_mask) == 1) {
			pteval = set_remote_cache_cpu(pteval,
					      cpumask_first(&ktext_mask));
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_TILE_L3);
			pteval = ktext_set_nocache(pteval);
		} else if (ktext_nocache)
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_UNCACHED);
		else
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
		for (; address < (unsigned long)_einittext;
		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
	}

	/* Set swapper_pgprot here so it is flushed to memory right away. */
	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

	/*
	 * Since we may be changing the caching of the stack and page
	 * table itself, we invoke an assembly helper to do the
	 * following steps:
	 *
	 *  - flush the cache so we start with an empty slate
	 *  - install pgtables[] as the real page table
	 *  - flush the TLB so the new page table takes effect
	 */
	irqmask = interrupt_mask_save_mask();
	interrupt_mask_set_mask(-1ULL);
	rc = flush_and_install_context(__pa(pgtables),
				       init_pgprot((unsigned long)pgtables),
				       __get_cpu_var(current_asid),
				       cpumask_bits(my_cpu_mask));
	interrupt_mask_restore_mask(irqmask);
	BUG_ON(rc != 0);

	/* Copy the page table back to the normal swapper_pg_dir. */
	memcpy(pgd_base, pgtables, sizeof(pgtables));
	__install_page_table(pgd_base, __get_cpu_var(current_asid),
			     swapper_pgprot);

	/*
	 * We just read swapper_pgprot and thus brought it into the cache,
	 * with its new home & caching mode.  When we start the other CPUs,
	 * they're going to reference swapper_pgprot via their initial fake
	 * VA-is-PA mappings, which cache everything locally.  At that
	 * time, if it's in our cache with a conflicting home, the
	 * simulator's coherence checker will complain.  So, flush it out
	 * of our cache; we're not going to ever use it again anyway.
	 */
	__insn_finv(&swapper_pgprot);
}