示例#1
0
文件: init.c 项目: AllenWeb/linux
/*
 * Place a pointer to an L2 page table in a middle page
 * directory entry.
 */
static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
{
	phys_addr_t pa = __pa(page_table);
	unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
	pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
	BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
	pteval = pte_set_home(pteval, initial_heap_home());
	*(pte_t *)pmd = pteval;
	if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
		BUG();
}
示例#2
0
文件: init.c 项目: rslotte/OGS-Tile
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
#ifdef CONFIG_HOMECACHE
    int home = initial_heap_home();
#endif
    unsigned long addr = (unsigned long) begin;

    if (kdata_huge && !initfree) {
        pr_warning("Warning: ignoring initfree=0:"
                   " incompatible with kdata=huge\n");
        initfree = 1;
    }
    end = (end + PAGE_SIZE - 1) & PAGE_MASK;
    local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
    for (addr = begin; addr < end; addr += PAGE_SIZE) {
        /*
         * Note we just reset the home here directly in the
         * page table.  We know this is safe because our caller
         * just flushed the caches on all the other cpus,
         * and they won't be touching any of these pages.
         */
        int pfn = kaddr_to_pfn((void *)addr);
        struct page *page = pfn_to_page(pfn);
        pte_t *ptep = virt_to_pte(NULL, addr);
        if (!initfree) {
            /*
             * If debugging page accesses then do not free
             * this memory but mark them not present - any
             * buggy init-section access will create a
             * kernel page fault:
             */
            pte_clear(&init_mm, addr, ptep);
            continue;
        }
#ifdef CONFIG_HOMECACHE
        set_page_home(page, home);
        __clear_bit(PG_homecache_nomigrate, &page->flags);
#endif
        __ClearPageReserved(page);
        init_page_count(page);
        if (pte_huge(*ptep))
            BUG_ON(!kdata_huge);
        else
            set_pte_at(&init_mm, addr, ptep,
                       pfn_pte(pfn, PAGE_KERNEL));
        memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
        free_page(addr);
        totalram_pages++;
    }
    pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
示例#3
0
文件: init.c 项目: rslotte/OGS-Tile
/*
 * Walk the kernel page tables and derive the page_home() from
 * the PTEs, so that set_pte() can properly validate the caching
 * of all PTEs it sees.
 */
void __init set_page_homes(void)
{
#ifdef CONFIG_HOMECACHE
    struct zone *zone;
    int home = initial_heap_home();
    unsigned long address;

    /*
     * First walk the zones and set the pages to all have
     * the default heap caching.
     */
    for_each_zone(zone) {
        unsigned long pfn = zone->zone_start_pfn;
        unsigned long end_pfn = pfn + zone->spanned_pages;
        struct page *page = pfn_to_page(pfn);
        for (; pfn < end_pfn; ++pfn, ++page)
            set_page_home(page, home);
    }

    /*
     * Now walk through the loaded pages, update the page homecache,
     * and mark all pages as non-migrateable.  (Init pages that
     * are freed back to the heap are unmarked when we free them.)
     */
    for (address = PAGE_OFFSET; address < (unsigned long) _end;
            address += PAGE_SIZE) {
        enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
        struct page *pg = virt_to_page((void *)address);
        pte_t pte = *virt_to_pte(NULL, address);

        /* Adjust page.home on all loaded pages. */
        BUG_ON(!pte_present(pte));
        set_page_home(pg, get_page_home(pte));
        __SetPageHomecacheNomigrate(pg);
    }
#endif
}
示例#4
0
文件: init.c 项目: AllenWeb/linux
/*
 * For a given kernel data VA, how should it be cached?
 * We return the complete pgprot_t with caching bits set.
 */
static pgprot_t __init init_pgprot(ulong address)
{
	int cpu;
	unsigned long page;
	enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };

#if CHIP_HAS_CBOX_HOME_MAP()
	/* For kdata=huge, everything is just hash-for-home. */
	if (kdata_huge)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
#endif

	/* We map the aliased pages of permanent text inaccessible. */
	if (address < (ulong) _sinittext - CODE_DELTA)
		return PAGE_NONE;

	/*
	 * We map read-only data non-coherent for performance.  We could
	 * use neighborhood caching on TILE64, but it's not clear it's a win.
	 */
	if ((address >= (ulong) __start_rodata &&
	     address < (ulong) __end_rodata) ||
	    address == (ulong) empty_zero_page) {
		return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
	}

#ifndef __tilegx__
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
	/* Force the atomic_locks[] array page to be hash-for-home. */
	if (address == (ulong) atomic_locks)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
#endif
#endif

	/*
	 * Everything else that isn't data or bss is heap, so mark it
	 * with the initial heap home (hash-for-home, or this cpu).  This
	 * includes any addresses after the loaded image and any address before
	 * _einitdata, since we already captured the case of text before
	 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
	 *
	 * All the LOWMEM pages that we mark this way will get their
	 * struct page homecache properly marked later, in set_page_homes().
	 * The HIGHMEM pages we leave with a default zero for their
	 * homes, but with a zero free_time we don't have to actually
	 * do a flush action the first time we use them, either.
	 */
	if (address >= (ulong) _end || address < (ulong) _einitdata)
		return construct_pgprot(PAGE_KERNEL, initial_heap_home());

#if CHIP_HAS_CBOX_HOME_MAP()
	/* Use hash-for-home if requested for data/bss. */
	if (kdata_hash)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
#endif

	/*
	 * Make the w1data homed like heap to start with, to avoid
	 * making it part of the page-striped data area when we're just
	 * going to convert it to read-only soon anyway.
	 */
	if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
		return construct_pgprot(PAGE_KERNEL, initial_heap_home());

	/*
	 * Otherwise we just hand out consecutive cpus.  To avoid
	 * requiring this function to hold state, we just walk forward from
	 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
	 * the requested address, while walking cpu home around kdata_mask.
	 * This is typically no more than a dozen or so iterations.
	 */
	page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK;
	BUG_ON(address < page || address >= (ulong)_end);
	cpu = cpumask_first(&kdata_mask);
	for (; page < address; page += PAGE_SIZE) {
		if (page >= (ulong)&init_thread_union &&
		    page < (ulong)&init_thread_union + THREAD_SIZE)
			continue;
		if (page == (ulong)empty_zero_page)
			continue;
#ifndef __tilegx__
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
		if (page == (ulong)atomic_locks)
			continue;
#endif
#endif
		cpu = cpumask_next(cpu, &kdata_mask);
		if (cpu == NR_CPUS)
			cpu = cpumask_first(&kdata_mask);
	}
	return construct_pgprot(PAGE_KERNEL, cpu);
}
示例#5
0
文件: init.c 项目: Gaffey/linux
/*
 * For a given kernel data VA, how should it be cached?
 * We return the complete pgprot_t with caching bits set.
 */
static pgprot_t __init init_pgprot(ulong address)
{
	int cpu;
	unsigned long page;
	enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };

	/* For kdata=huge, everything is just hash-for-home. */
	if (kdata_huge)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);

	/*
	 * We map the aliased pages of permanent text so we can
	 * update them if necessary, for ftrace, etc.
	 */
	if (address < (ulong) _sinittext - CODE_DELTA)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);

	/* We map read-only data non-coherent for performance. */
	if ((address >= (ulong) __start_rodata &&
	     address < (ulong) __end_rodata) ||
	    address == (ulong) empty_zero_page) {
		return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
	}

#ifndef __tilegx__
	/* Force the atomic_locks[] array page to be hash-for-home. */
	if (address == (ulong) atomic_locks)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
#endif

	/*
	 * Everything else that isn't data or bss is heap, so mark it
	 * with the initial heap home (hash-for-home, or this cpu).  This
	 * includes any addresses after the loaded image and any address before
	 * __init_end, since we already captured the case of text before
	 * _sinittext, and __pa(einittext) is approximately __pa(__init_begin).
	 *
	 * All the LOWMEM pages that we mark this way will get their
	 * struct page homecache properly marked later, in set_page_homes().
	 * The HIGHMEM pages we leave with a default zero for their
	 * homes, but with a zero free_time we don't have to actually
	 * do a flush action the first time we use them, either.
	 */
	if (address >= (ulong) _end || address < (ulong) __init_end)
		return construct_pgprot(PAGE_KERNEL, initial_heap_home());

	/* Use hash-for-home if requested for data/bss. */
	if (kdata_hash)
		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);

	/*
	 * Otherwise we just hand out consecutive cpus.  To avoid
	 * requiring this function to hold state, we just walk forward from
	 * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to
	 * reach the requested address, while walking cpu home around
	 * kdata_mask. This is typically no more than a dozen or so iterations.
	 */
	page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
	BUG_ON(address < page || address >= (ulong)_end);
	cpu = cpumask_first(&kdata_mask);
	for (; page < address; page += PAGE_SIZE) {
		if (page >= (ulong)&init_thread_union &&
		    page < (ulong)&init_thread_union + THREAD_SIZE)
			continue;
		if (page == (ulong)empty_zero_page)
			continue;
#ifndef __tilegx__
		if (page == (ulong)atomic_locks)
			continue;
#endif
		cpu = cpumask_next(cpu, &kdata_mask);
		if (cpu == NR_CPUS)
			cpu = cpumask_first(&kdata_mask);
	}
	return construct_pgprot(PAGE_KERNEL, cpu);
}