예제 #1
0
/*
 * Insert the gateway page into a set of page tables, creating the
 * page tables if necessary.
 */
static void insert_gateway_page(pgd_t *pgd, unsigned long address)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	BUG_ON(!pgd_present(*pgd));

	pud = pud_offset(pgd, address);
	BUG_ON(!pud_present(*pud));

	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd)) {
		pte = alloc_bootmem_pages(PAGE_SIZE);
		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
	}

	pte = pte_offset_kernel(pmd, address);
	set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
}
예제 #2
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 * The parameters are pointers to where to stick the starting and ending
 * addresses  of available kernel virtual memory.
 */
void __init paging_init(void)
{
	struct pglist_data *pgdat = NODE_DATA(0);
	unsigned long zones_size[MAX_NR_ZONES] = {0, };

	empty_zero_page      = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	/*
	 * Set up user data space
	 */
	set_fs(KERNEL_DS);

	/*
	 * Define zones
	 */
	zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
	pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
		__pa(PAGE_OFFSET) >> PAGE_SHIFT;

	free_area_init(zones_size);
}
예제 #3
0
파일: main.c 프로젝트: varrunr/ml-cfs
static void __init setup_per_cpu_areas(void)
{
#ifndef __LINSCHED__
	unsigned long size, i;
	char *ptr;
	unsigned long nr_possible_cpus = num_possible_cpus();

	/* Copy section for each CPU (we discard the original) */
	size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
	ptr = alloc_bootmem_pages(size * nr_possible_cpus);

	for_each_possible_cpu(i) {
		__per_cpu_offset[i] = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
		ptr += size;
	}
#else
	/* Manually declare all necessary per-cpu areas. As noted in
	 * percpu.h, this is ugly and does not scale at all, and probably
	 * needs rethinking eventually. Perhaps the Linux version of
	 * per-cpu areas can be used again, if someone who understands how
	 * it could be ported to user space.
	 */
	ALLOC_PER_CPU_MEM(runqueues);
	ALLOC_PER_CPU_MEM(phys_domains);
	ALLOC_PER_CPU_MEM(sched_group_phys);
	ALLOC_PER_CPU_MEM(current_task);
	/* more per_cpu variables would be added here... */
	int cpu_id;
	for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) {
		INIT_PER_CPU_MEM(runqueues, cpu_id);
		INIT_PER_CPU_MEM(phys_domains, cpu_id);
		INIT_PER_CPU_MEM(sched_group_phys, cpu_id);
		INIT_PER_CPU_MEM(current_task, cpu_id);
		/* more per_cpu variables would be added here... */
	}
#endif /* __LINSCHED__ */
}
예제 #4
0
/**
 * init_apic_mappings - initialize APIC mappings
 */
void __init init_apic_mappings(void)
{
    /*
     * If no local APIC can be found then set up a fake all
     * zeroes page to simulate the local APIC and another
     * one for the IO-APIC.
     */
    if (!smp_found_config && detect_init_APIC()) {
        apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
        apic_phys = __pa(apic_phys);
    } else
        apic_phys = mp_lapic_addr;

    set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
    apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
                APIC_BASE, apic_phys);

    /*
     * Fetch the APIC ID of the BSP in case we have a
     * default configuration (or the MP table is broken).
     */
    boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
}
예제 #5
0
파일: init.c 프로젝트: BinVul/linux2.6.32
void __init
paging_init(void)
{
	int i;
	unsigned long zones_size[MAX_NR_ZONES];

	printk("Setting up paging and the MMU.\n");

	/* Clear out the init_mm.pgd that will contain the kernel's mappings. */
	for(i = 0; i < PTRS_PER_PGD; i++)
		swapper_pg_dir[i] = __pgd(0);

	cris_mmu_init();

	/*
	 * Initialize the bad page table and bad page to point to a couple of
	 * allocated pages.
	 */
	empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
	memset((void *) empty_zero_page, 0, PAGE_SIZE);

	/* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
	zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	for (i = 1; i < MAX_NR_ZONES; i++)
		zones_size[i] = 0;

	/*
	 * Use free_area_init_node instead of free_area_init, because it is
	 * designed for systems where the DRAM starts at an address
	 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
	 * saves space in the mem_map page array.
	 */
	free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);

	mem_map = contig_page_data.node_mem_map;
}
예제 #6
0
/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;

#ifdef CONFIG_X86_PAE
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		if (after_bootmem)
			pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
		else
			pmd_table = (pmd_t *)alloc_low_page();
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
		BUG_ON(pmd_table != pmd_offset(pud, 0));

		return pmd_table;
	}
#endif
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);

	return pmd_table;
}
예제 #7
0
/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;

#ifdef CONFIG_X86_PAE
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		if (after_bootmem)
			pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
		else
			pmd_table = (pmd_t *)alloc_low_page();
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
		BUG_ON(pmd_table != pmd_offset(pud, 0));

		return pmd_table;
	}
#endif
	pud = pud_offset(pgd, 0);    // 32 nopud, 直接return pud_t,没有取地址过程
	pmd_table = pmd_offset(pud, 0); // pmd_index 取得pmd号,加上pud的基址

	return pmd_table;         // 这么一串下来,pmd_table == pgd的地址....
}
예제 #8
0
void __init 
paging_init(void)
{
	int i;
	unsigned long zones_size[MAX_NR_ZONES];

	printk("Setting up paging and the MMU.\n");
	
	

	for(i = 0; i < PTRS_PER_PGD; i++)
		swapper_pg_dir[i] = __pgd(0);
	

	per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;

	

	tlb_init();

	

#ifdef CONFIG_CRIS_LOW_MAP

#define CACHED_BOOTROM (KSEG_F | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_e, page ) |
			IO_STATE(R_MMU_KSEG, seg_d, page ) | 
			IO_STATE(R_MMU_KSEG, seg_c, page ) |   
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) |  
#ifdef CONFIG_JULIETTE
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) |  
#else
			IO_STATE(R_MMU_KSEG, seg_a, page ) |
#endif
			IO_STATE(R_MMU_KSEG, seg_9, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_8, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_7, page ) |  
			IO_STATE(R_MMU_KSEG, seg_6, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_5, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_4, page ) |  
			IO_STATE(R_MMU_KSEG, seg_3, page ) |  
			IO_STATE(R_MMU_KSEG, seg_2, page ) |  
			IO_STATE(R_MMU_KSEG, seg_1, page ) |  
			IO_STATE(R_MMU_KSEG, seg_0, page ) ); 

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
#ifdef CONFIG_JULIETTE
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
#else
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
#endif
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#else
	

#define CACHED_BOOTROM (KSEG_A | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_e, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_d, page ) | 
			IO_STATE(R_MMU_KSEG, seg_c, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_9, page ) | 
			IO_STATE(R_MMU_KSEG, seg_8, page ) |
			IO_STATE(R_MMU_KSEG, seg_7, page ) |
			IO_STATE(R_MMU_KSEG, seg_6, page ) |
			IO_STATE(R_MMU_KSEG, seg_5, page ) |
			IO_STATE(R_MMU_KSEG, seg_4, page ) |
			IO_STATE(R_MMU_KSEG, seg_3, page ) |
			IO_STATE(R_MMU_KSEG, seg_2, page ) |
			IO_STATE(R_MMU_KSEG, seg_1, page ) |
			IO_STATE(R_MMU_KSEG, seg_0, page ) );

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#endif

	*R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) );
	

	*R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) |
			IO_STATE(R_MMU_CTRL, acc_excp, enable ) |
			IO_STATE(R_MMU_CTRL, we_excp,  enable ) );
	
	*R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable);


	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	

	zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	for (i = 1; i < MAX_NR_ZONES; i++)
		zones_size[i] = 0;


	free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
}
예제 #9
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
	unsigned long min_addr, max_addr;
	unsigned long addr, size, end;
	int i;

#ifdef DEBUG
	{
		extern unsigned long availmem;
		printk ("start of paging_init (%p, %lx)\n",
			kernel_pg_dir, availmem);
	}
#endif

	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
	if (CPU_IS_040_OR_060) {
		int i;
#ifndef mm_cachebits
		mm_cachebits = _PAGE_CACHE040;
#endif
		for (i = 0; i < 16; i++)
			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
	}

	min_addr = m68k_memory[0].addr;
	max_addr = min_addr + m68k_memory[0].size;
	for (i = 1; i < m68k_num_memory;) {
		if (m68k_memory[i].addr < min_addr) {
			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
				m68k_memory[i].addr, m68k_memory[i].size);
			printk("Fix your bootloader or use a memfile to make use of this area!\n");
			m68k_num_memory--;
			memmove(m68k_memory + i, m68k_memory + i + 1,
				(m68k_num_memory - i) * sizeof(struct mem_info));
			continue;
		}
		addr = m68k_memory[i].addr + m68k_memory[i].size;
		if (addr > max_addr)
			max_addr = addr;
		i++;
	}
	m68k_memoffset = min_addr - PAGE_OFFSET;
	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;

	module_fixup(NULL, __start_fixup, __stop_fixup);
	flush_icache();

	high_memory = phys_to_virt(max_addr);

	min_low_pfn = availmem >> PAGE_SHIFT;
	max_low_pfn = max_addr >> PAGE_SHIFT;

	for (i = 0; i < m68k_num_memory; i++) {
		addr = m68k_memory[i].addr;
		end = addr + m68k_memory[i].size;
		m68k_setup_node(i);
		availmem = PAGE_ALIGN(availmem);
		availmem += init_bootmem_node(NODE_DATA(i),
					      availmem >> PAGE_SHIFT,
					      addr >> PAGE_SHIFT,
					      end >> PAGE_SHIFT);
	}

	/*
	 * Map the physical memory available into the kernel virtual
	 * address space. First initialize the bootmem allocator with
	 * the memory we already mapped, so map_node() has something
	 * to allocate.
	 */
	addr = m68k_memory[0].addr;
	size = m68k_memory[0].size;
	free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
	map_node(0);
	if (size > INIT_MAPPED_SIZE)
		free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);

	for (i = 1; i < m68k_num_memory; i++)
		map_node(i);

	flush_tlb_all();

	/*
	 * initialize the bad page table and bad page to point
	 * to a couple of allocated pages
	 */
	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
	memset(empty_zero_page, 0, PAGE_SIZE);

	/*
	 * Set up SFC/DFC registers
	 */
	set_fs(KERNEL_DS);

#ifdef DEBUG
	printk ("before free_area_init\n");
#endif
	for (i = 0; i < m68k_num_memory; i++) {
		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
		free_area_init_node(i, pg_data_map + i, zones_size,
				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
	}
}
예제 #10
0
void __init 
paging_init(void)
{
	int i;
	unsigned long zones_size[MAX_NR_ZONES];

	printk("Setting up paging and the MMU.\n");
	
	/* clear out the init_mm.pgd that will contain the kernel's mappings */

	for(i = 0; i < PTRS_PER_PGD; i++)
		swapper_pg_dir[i] = __pgd(0);
	
	/* make sure the current pgd table points to something sane
	 * (even if it is most probably not used until the next 
	 *  switch_mm)
	 */

	current_pgd = init_mm.pgd;

	/* initialise the TLB (tlb.c) */

	tlb_init();

	/* see README.mm for details on the KSEG setup */

#ifdef CONFIG_CRIS_LOW_MAP
	/* Etrax-100 LX version 1 has a bug so that we cannot map anything
	 * across the 0x80000000 boundary, so we need to shrink the user-virtual
	 * area to 0x50000000 instead of 0xb0000000 and map things slightly
	 * different. The unused areas are marked as paged so that we can catch
	 * freak kernel accesses there.
	 *
	 * The ARTPEC chip is mapped at 0xa so we pass that segment straight
	 * through. We cannot vremap it because the vmalloc area is below 0x8
	 * and Juliette needs an uncached area above 0x8.
	 *
	 * Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards.
	 * We map them straight over in LOW_MAP, but use vremap in LX version 2.
	 */

#define CACHED_BOOTROM (KSEG_F | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) |  /* bootrom */
			IO_STATE(R_MMU_KSEG, seg_e, page ) |
			IO_STATE(R_MMU_KSEG, seg_d, page ) | 
			IO_STATE(R_MMU_KSEG, seg_c, page ) |   
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) |  /* kernel reg area */
#ifdef CONFIG_JULIETTE
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) |  /* ARTPEC etc. */
#else
			IO_STATE(R_MMU_KSEG, seg_a, page ) |
#endif
			IO_STATE(R_MMU_KSEG, seg_9, seg  ) |  /* LED's on some boards */
			IO_STATE(R_MMU_KSEG, seg_8, seg  ) |  /* CSE0/1, flash and I/O */
			IO_STATE(R_MMU_KSEG, seg_7, page ) |  /* kernel vmalloc area */
			IO_STATE(R_MMU_KSEG, seg_6, seg  ) |  /* kernel DRAM area */
			IO_STATE(R_MMU_KSEG, seg_5, seg  ) |  /* cached flash */
			IO_STATE(R_MMU_KSEG, seg_4, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_3, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_2, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_1, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
#ifdef CONFIG_JULIETTE
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
#else
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
#endif
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#else
	/* This code is for the corrected Etrax-100 LX version 2... */

#define CACHED_BOOTROM (KSEG_A | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) | /* cached flash */
			IO_STATE(R_MMU_KSEG, seg_e, seg  ) | /* uncached flash */
			IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */
			IO_STATE(R_MMU_KSEG, seg_c, seg  ) | /* kernel area */
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) | /* kernel reg area */
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) | /* bootrom */
			IO_STATE(R_MMU_KSEG, seg_9, page ) | /* user area */
			IO_STATE(R_MMU_KSEG, seg_8, page ) |
			IO_STATE(R_MMU_KSEG, seg_7, page ) |
			IO_STATE(R_MMU_KSEG, seg_6, page ) |
			IO_STATE(R_MMU_KSEG, seg_5, page ) |
			IO_STATE(R_MMU_KSEG, seg_4, page ) |
			IO_STATE(R_MMU_KSEG, seg_3, page ) |
			IO_STATE(R_MMU_KSEG, seg_2, page ) |
			IO_STATE(R_MMU_KSEG, seg_1, page ) |
			IO_STATE(R_MMU_KSEG, seg_0, page ) );

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#endif

	*R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) );
	
	/* The MMU has been enabled ever since head.S but just to make
	 * it totally obvious we do it here as well.
	 */

	*R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) |
			IO_STATE(R_MMU_CTRL, acc_excp, enable ) |
			IO_STATE(R_MMU_CTRL, we_excp,  enable ) );
	
	*R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable);

	/*
	 * initialize the bad page table and bad page to point
	 * to a couple of allocated pages
	 */

	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	/* All pages are DMA'able in Etrax, so put all in the DMA'able zone */

	zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	for (i = 1; i < MAX_NR_ZONES; i++)
		zones_size[i] = 0;

	/* Use free_area_init_node instead of free_area_init, because the former
	 * is designed for systems where the DRAM starts at an address substantially
	 * higher than 0, like us (we start at PAGE_OFFSET). This saves space in the
	 * mem_map page array.
	 */

	free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
	mem_map = contig_page_data.node_mem_map;
}
예제 #11
0
/*
 * Allocates/reserves the Platform memory resources early in the boot process.
 * This ignores any resources that are designated IORESOURCE_IO
 */
void __init platform_alloc_bootmem(void)
{
	int i;
	int total = 0;

	/* Get persistent memory data from command line before allocating
	 * resources. This need to happen before normal command line parsing
	 * has been done */
	pmem_setup_resource();

	/* Loop through looking for resources that want a particular address */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = gp_resources[i].end - gp_resources[i].start + 1;
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			reserve_bootmem(dma_to_phys(gp_resources[i].start),
				size, 0);
			total += gp_resources[i].end -
				gp_resources[i].start + 1;
			pr_info("reserve resource %s at %08x (%u bytes)\n",
				gp_resources[i].name, gp_resources[i].start,
				gp_resources[i].end -
					gp_resources[i].start + 1);
		}
	}

	/* Loop through assigning addresses for those that are left */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = gp_resources[i].end - gp_resources[i].start + 1;
		if ((gp_resources[i].start == 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			void *mem = alloc_bootmem_pages(size);

			if (mem == NULL)
				pr_err("Unable to allocate bootmem pages "
					"for %s\n", gp_resources[i].name);

			else {
				gp_resources[i].start =
					phys_to_dma(virt_to_phys(mem));
				gp_resources[i].end =
					gp_resources[i].start + size - 1;
				total += size;
				pr_info("allocate resource %s at %08x "
						"(%u bytes)\n",
					gp_resources[i].name,
					gp_resources[i].start, size);
			}
		}
	}

	pr_info("Total Platform driver memory allocation: 0x%08x\n", total);

	/* indicate resources that are platform I/O related */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
			pr_info("reserved platform resource %s at %08x\n",
				gp_resources[i].name, gp_resources[i].start);
		}
	}
}
예제 #12
0
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
void __init paging_init(void)
{
	pgd_t * pg_dir;
	pte_t * pg_table;
	int i;
	unsigned long address;
	unsigned long next_pgtable;
	unsigned long bootmem_end;
	unsigned long zones_size[3] = {0, 0, 0};
	unsigned long size;


#ifdef TEST_VERIFY_AREA
	wp_works_ok = 0;
#endif
	empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	address = PAGE_OFFSET;
	pg_dir = swapper_pg_dir;
	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
	memset (kernel_pg_dir,  0, sizeof (kernel_pg_dir));

	size = num_pages * sizeof(pte_t);
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
	
	next_pgtable = (unsigned long)alloc_bootmem_pages(size);
	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;

	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; 

	while (address < (unsigned long)high_memory) {
		pg_table = (pte_t *) __pa (next_pgtable);
		next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
		pgd_val(*pg_dir) = (unsigned long) pg_table;
		pg_dir++;

		/* now change pg_table to kernel virtual addresses */
		pg_table = (pte_t *) __va ((unsigned long) pg_table);
		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
			pte_t pte = __mk_pte(address, PAGE_INIT);
			if (address >= (unsigned long)high_memory)
				pte_val (pte) = 0;
			set_pte (pg_table, pte);
			address += PAGE_SIZE;
		}
	}

	mmu_emu_init(bootmem_end);

	current->mm = NULL;

	/* memory sizing is a hack stolen from motorola.c..  hope it works for us */
	zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
	zones_size[1] = 0;
	
	free_area_init(zones_size);

}
예제 #13
0
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
void __init paging_init(void)
{
	pgd_t * pg_dir;
	pte_t * pg_table;
	int i;
	unsigned long address;
	unsigned long next_pgtable;
	unsigned long bootmem_end;
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
	unsigned long size;

#ifdef TEST_VERIFY_AREA
	wp_works_ok = 0;
#endif
	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);

	address = PAGE_OFFSET;
	pg_dir = swapper_pg_dir;
	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
	memset (kernel_pg_dir,  0, sizeof (kernel_pg_dir));

	size = num_pages * sizeof(pte_t);
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);

	next_pgtable = (unsigned long)alloc_bootmem_pages(size);
	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;

	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;

	while (address < (unsigned long)high_memory) {
		pg_table = (pte_t *) __pa (next_pgtable);
		next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
		pgd_val(*pg_dir) = (unsigned long) pg_table;
		pg_dir++;

		/* now change pg_table to kernel virtual addresses */
		pg_table = (pte_t *) __va ((unsigned long) pg_table);
		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
			if (address >= (unsigned long)high_memory)
				pte_val (pte) = 0;
			set_pte (pg_table, pte);
			address += PAGE_SIZE;
		}
	}

	mmu_emu_init(bootmem_end);

	current->mm = NULL;

	/* memory sizing is a hack stolen from motorola.c..  hope it works for us */
	zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	/* I really wish I knew why the following change made things better...  -- Sam */
/*	free_area_init(zones_size); */
	free_area_init_node(0, zones_size,
			    (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);


}
예제 #14
0
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}