Exemple #1
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long long irqmask;
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_CACHE_NO_L3);
		} else {
			prot = hv_pte_set_mode(prot,
					       HV_PTE_MODE_CACHE_TILE_L3);
			cpu = cpumask_first(&ktext_mask);

			prot = ktext_set_nocache(prot);
		}

		BUG_ON(address != (unsigned long)_stext);
		pte = NULL;
		for (; address < (unsigned long)_einittext;
		     pfn++, address += PAGE_SIZE) {
			pte_ofs = pte_index(address);
			if (pte_ofs == 0) {
				if (pte)
					assign_pte(pmd++, pte);
				pte = alloc_pte();
			}
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				if (cpu == NR_CPUS)
					cpu = cpumask_first(&ktext_mask);
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		if (pte)
			assign_pte(pmd, pte);
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
		if (ktext_hash) {
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_HASH_L3);
			pteval = ktext_set_nocache(pteval);
		} else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
		if (cpumask_weight(&ktext_mask) == 1) {
			pteval = set_remote_cache_cpu(pteval,
					      cpumask_first(&ktext_mask));
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_TILE_L3);
			pteval = ktext_set_nocache(pteval);
		} else if (ktext_nocache)
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_UNCACHED);
		else
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
		for (; address < (unsigned long)_einittext;
		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
	}

	/* Set swapper_pgprot here so it is flushed to memory right away. */
	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

	/*
	 * Since we may be changing the caching of the stack and page
	 * table itself, we invoke an assembly helper to do the
	 * following steps:
	 *
	 *  - flush the cache so we start with an empty slate
	 *  - install pgtables[] as the real page table
	 *  - flush the TLB so the new page table takes effect
	 */
	irqmask = interrupt_mask_save_mask();
	interrupt_mask_set_mask(-1ULL);
	rc = flush_and_install_context(__pa(pgtables),
				       init_pgprot((unsigned long)pgtables),
				       __get_cpu_var(current_asid),
				       cpumask_bits(my_cpu_mask));
	interrupt_mask_restore_mask(irqmask);
	BUG_ON(rc != 0);

	/* Copy the page table back to the normal swapper_pg_dir. */
	memcpy(pgd_base, pgtables, sizeof(pgtables));
	__install_page_table(pgd_base, __get_cpu_var(current_asid),
			     swapper_pgprot);

	/*
	 * We just read swapper_pgprot and thus brought it into the cache,
	 * with its new home & caching mode.  When we start the other CPUs,
	 * they're going to reference swapper_pgprot via their initial fake
	 * VA-is-PA mappings, which cache everything locally.  At that
	 * time, if it's in our cache with a conflicting home, the
	 * simulator's coherence checker will complain.  So, flush it out
	 * of our cache; we're not going to ever use it again anyway.
	 */
	__insn_finv(&swapper_pgprot);
}
finv_buffer_remote(void *buffer, size_t size, int hfh)
{
	char *p, *base;
	size_t step_size, load_count;

	/*
	 * On TILEPro the striping granularity is a fixed 8KB; on
	 * TILE-Gx it is configurable, and we rely on the fact that
	 * the hypervisor always configures maximum striping, so that
	 * bits 9 and 10 of the PA are part of the stripe function, so
	 * every 512 bytes we hit a striping boundary.
	 *
	 */
#ifdef __tilegx__
	const unsigned long STRIPE_WIDTH = 512;
#else
	const unsigned long STRIPE_WIDTH = 8192;
#endif

#ifdef __tilegx__
	/*
	 * On TILE-Gx, we must disable the dstream prefetcher before doing
	 * a cache flush; otherwise, we could end up with data in the cache
	 * that we don't want there.  Note that normally we'd do an mf
	 * after the SPR write to disabling the prefetcher, but we do one
	 * below, before any further loads, so there's no need to do it
	 * here.
	 */
	uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
	__insn_mtspr(SPR_DSTREAM_PF, 0);
#endif

	/*
	 * Flush and invalidate the buffer out of the local L1/L2
	 * and request the home cache to flush and invalidate as well.
	 */
	__finv_buffer(buffer, size);

	/*
	 * Wait for the home cache to acknowledge that it has processed
	 * all the flush-and-invalidate requests.  This does not mean
	 * that the flushed data has reached the memory controller yet,
	 * but it does mean the home cache is processing the flushes.
	 */
	__insn_mf();

	/*
	 * Issue a load to the last cache line, which can't complete
	 * until all the previously-issued flushes to the same memory
	 * controller have also completed.  If we weren't striping
	 * memory, that one load would be sufficient, but since we may
	 * be, we also need to back up to the last load issued to
	 * another memory controller, which would be the point where
	 * we crossed a "striping" boundary (the granularity of striping
	 * across memory controllers).  Keep backing up and doing this
	 * until we are before the beginning of the buffer, or have
	 * hit all the controllers.
	 *
	 * If we are flushing a hash-for-home buffer, it's even worse.
	 * Each line may be homed on a different tile, and each tile
	 * may have up to four lines that are on different
	 * controllers.  So as we walk backwards, we have to touch
	 * enough cache lines to satisfy these constraints.  In
	 * practice this ends up being close enough to "load from
	 * every cache line on a full memory stripe on each
	 * controller" that we simply do that, to simplify the logic.
	 *
	 * On TILE-Gx the hash-for-home function is much more complex,
	 * with the upshot being we can't readily guarantee we have
	 * hit both entries in the 128-entry AMT that were hit by any
	 * load in the entire range, so we just re-load them all.
	 * With larger buffers, we may want to consider using a hypervisor
	 * trap to issue loads directly to each hash-for-home tile for
	 * each controller (doing it from Linux would trash the TLB).
	 */
	if (hfh) {
		step_size = L2_CACHE_BYTES;
#ifdef __tilegx__
		load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
#else
		load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
			      (1 << CHIP_LOG_NUM_MSHIMS());
#endif
	} else {
		step_size = STRIPE_WIDTH;
		load_count = (1 << CHIP_LOG_NUM_MSHIMS());
	}

	/* Load the last byte of the buffer. */
	p = (char *)buffer + size - 1;
	force_load(p);

	/* Bump down to the end of the previous stripe or cache line. */
	p -= step_size;
	p = (char *)((unsigned long)p | (step_size - 1));

	/* Figure out how far back we need to go. */
	base = p - (step_size * (load_count - 2));
	if ((unsigned long)base < (unsigned long)buffer)
		base = buffer;

	/* Fire all the loads we need. */
	for (; p >= base; p -= step_size)
		force_load(p);

	/*
	 * Repeat, but with finv's instead of loads, to get rid of the
	 * data we just loaded into our own cache and the old home L3.
	 * The finv's are guaranteed not to actually flush the data in
	 * the buffer back to their home, since we just read it, so the
	 * lines are clean in cache; we will only invalidate those lines.
	 */
	p = (char *)buffer + size - 1;
	__insn_finv(p);
	p -= step_size;
	p = (char *)((unsigned long)p | (step_size - 1));
	for (; p >= base; p -= step_size)
		__insn_finv(p);

	/* Wait for these finv's (and thus the first finvs) to be done. */
	__insn_mf();

#ifdef __tilegx__
	/* Reenable the prefetcher. */
	__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
#endif
}