Пример #1
0
void __init wii_memory_fixups(void)
{
	struct lmb_property *p = lmb.memory.region;

	/*
	 * This is part of a workaround to allow the use of two
	 * discontinuous RAM ranges on the Wii, even if this is
	 * currently unsupported on 32-bit PowerPC Linux.
	 *
	 * We coalesce the two memory ranges of the Wii into a
	 * single range, then create a reservation for the "hole"
	 * between both ranges.
	 */

	BUG_ON(lmb.memory.cnt != 2);
	BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));

	p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
	p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);

	wii_hole_start = p[0].base + p[0].size;
	wii_hole_size = p[1].base - wii_hole_start;

	pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
	pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
	pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);

	p[0].size += wii_hole_size + p[1].size;

	lmb.memory.cnt = 1;
	lmb_analyze();

	/* reserve the hole */
	lmb_reserve(wii_hole_start, wii_hole_size);

	/* allow ioremapping the address space in the hole */
	__allow_ioremap_reserved = 1;
}
Пример #2
0
/*
 * On SH machines the conventional approach is to stash system RAM
 * in node 0, and other memory blocks in to node 1 and up, ordered by
 * latency. Each node's pgdat is node-local at the beginning of the node,
 * immediately followed by the node mem map.
 */
void __init setup_memory(void)
{
    unsigned long free_pfn = PFN_UP(__pa(_end));
    u64 base = min_low_pfn << PAGE_SHIFT;
    u64 size = (max_low_pfn << PAGE_SHIFT) - base;

    lmb_add(base, size);

    /* Reserve the LMB regions used by the kernel, initrd, etc.. */
    lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
                (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
                (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));

    /*
     * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
     */
    if (CONFIG_ZERO_PAGE_OFFSET != 0)
        lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);

    lmb_analyze();
    lmb_dump_all();

    /*
     * Node 0 sets up its pgdat at the first available pfn,
     * and bumps it up before setting up the bootmem allocator.
     */
    NODE_DATA(0) = pfn_to_kaddr(free_pfn);
    memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
    free_pfn += PFN_UP(sizeof(struct pglist_data));
    NODE_DATA(0)->bdata = &bootmem_node_data[0];

    /* Set up node 0 */
    setup_bootmem_allocator(free_pfn);

    /* Give the platforms a chance to hook up their nodes */
    plat_mem_setup();
}
Пример #3
0
/*static*/ void __init iSeries_init_early(void)
{
	extern unsigned long memory_limit;

	DBG(" -> iSeries_init_early()\n");

	ppcdbg_initialize();

#if defined(CONFIG_BLK_DEV_INITRD)
	/*
	 * If the init RAM disk has been configured and there is
	 * a non-zero starting address for it, set it up
	 */
	if (naca->xRamDisk) {
		initrd_start = (unsigned long)__va(naca->xRamDisk);
		initrd_end = initrd_start + naca->xRamDiskSize * PAGE_SIZE;
		initrd_below_start_ok = 1;	// ramdisk in kernel space
		ROOT_DEV = Root_RAM0;
		if (((rd_size * 1024) / PAGE_SIZE) < naca->xRamDiskSize)
			rd_size = (naca->xRamDiskSize * PAGE_SIZE) / 1024;
	} else
#endif /* CONFIG_BLK_DEV_INITRD */
	{
	    /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
	}

	iSeries_recal_tb = get_tb();
	iSeries_recal_titan = HvCallXm_loadTod();

	/*
	 * Cache sizes must be initialized before hpte_init_iSeries is called
	 * as the later need them for flush_icache_range()
	 */
	setup_iSeries_cache_sizes();

	/*
	 * Initialize the hash table management pointers
	 */
	hpte_init_iSeries();

	/*
	 * Initialize the DMA/TCE management
	 */
	tce_init_iSeries();

	/*
	 * Initialize the table which translate Linux physical addresses to
	 * AS/400 absolute addresses
	 */
	build_iSeries_Memory_Map();

	iSeries_get_cmdline();

	/* Save unparsed command line copy for /proc/cmdline */
	strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);

	/* Parse early parameters, in particular mem=x */
	parse_early_param();

	if (memory_limit) {
		if (memory_limit < systemcfg->physicalMemorySize)
			systemcfg->physicalMemorySize = memory_limit;
		else {
			printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
			memory_limit = 0;
		}
	}

	/* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
	iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);

	lmb_init();
	lmb_add(0, systemcfg->physicalMemorySize);
	lmb_analyze();
	lmb_reserve(0, __pa(klimit));

	/* Initialize machine-dependency vectors */
#ifdef CONFIG_SMP
	smp_init_iSeries();
#endif
	if (itLpNaca.xPirEnvironMode == 0) 
		piranha_simulator = 1;

	/* Associate Lp Event Queue 0 with processor 0 */
	HvCallEvent_setLpEventQueueInterruptProc(0, 0);

	mf_init();
	mf_initialized = 1;
	mb();

	/* If we were passed an initrd, set the ROOT_DEV properly if the values
	 * look sensible. If not, clear initrd reference.
	 */
#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
	    initrd_end > initrd_start)
		ROOT_DEV = Root_RAM0;
	else
		initrd_start = initrd_end = 0;
#endif /* CONFIG_BLK_DEV_INITRD */

	DBG(" <- iSeries_init_early()\n");
}
Пример #4
0
static void __init build_iSeries_Memory_Map(void)
{
	u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
	u32 nextPhysChunk;
	u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
	u32 num_ptegs;
	u32 totalChunks,moreChunks;
	u32 currChunk, thisChunk, absChunk;
	u32 currDword;
	u32 chunkBit;
	u64 map;
	struct MemoryBlock mb[32];
	unsigned long numMemoryBlocks, curBlock;

	/* Chunk size on iSeries is 256K bytes */
	totalChunks = (u32)HvLpConfig_getMsChunks();
	klimit = msChunks_alloc(klimit, totalChunks, 1UL<<18);

	/* Get absolute address of our load area
	 * and map it to physical address 0
	 * This guarantees that the loadarea ends up at physical 0
	 * otherwise, it might not be returned by PLIC as the first
	 * chunks
	 */
	
	loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
	loadAreaSize =  itLpNaca.xLoadAreaChunks;

	/* Only add the pages already mapped here.  
	 * Otherwise we might add the hpt pages 
	 * The rest of the pages of the load area
	 * aren't in the HPT yet and can still
	 * be assigned an arbitrary physical address
	 */
	if ( (loadAreaSize * 64) > HvPagesToMap )
		loadAreaSize = HvPagesToMap / 64;

	loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;

	/* TODO Do we need to do something if the HPT is in the 64MB load area?
	 * This would be required if the itLpNaca.xLoadAreaChunks includes 
	 * the HPT size
	 */

	printk( "Mapping load area - physical addr = 0000000000000000\n"
                "                    absolute addr = %016lx\n", 
			chunk_to_addr(loadAreaFirstChunk) );
	printk( "Load area size %dK\n", loadAreaSize*256 );
	
	for (	nextPhysChunk = 0; 
		nextPhysChunk < loadAreaSize; 
		++nextPhysChunk ) {
		msChunks.abs[nextPhysChunk] = loadAreaFirstChunk+nextPhysChunk;
	}
	
	/* Get absolute address of our HPT and remember it so
	 * we won't map it to any physical address
	 */

	hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
	hptSizePages =  (u32)(HvCallHpt_getHptPages());
	hptSizeChunks = hptSizePages >> (msChunks.chunk_shift-PAGE_SHIFT);
	hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
	
	printk( "HPT absolute addr = %016lx, size = %dK\n",
			chunk_to_addr(hptFirstChunk), hptSizeChunks*256 );

	/* Fill in the htab_data structure */
	
	/* Fill in size of hashed page table */
	num_ptegs = hptSizePages * (PAGE_SIZE/(sizeof(HPTE)*HPTES_PER_GROUP));
	htab_data.htab_num_ptegs = num_ptegs;
	htab_data.htab_hash_mask = num_ptegs - 1;
	
	/* The actual hashed page table is in the hypervisor, we have no direct access */
	htab_data.htab = NULL;

	/* Determine if absolute memory has any
	 * holes so that we can interpret the
	 * access map we get back from the hypervisor
	 * correctly.
	 */
	numMemoryBlocks = iSeries_process_mainstore_vpd( mb, 32 );

	/* Process the main store access map from the hypervisor
	 * to build up our physical -> absolute translation table
	 */
	curBlock = 0;
	currChunk = 0;
	currDword = 0;
	moreChunks = totalChunks;

	while ( moreChunks ) {
		map = HvCallSm_get64BitsOfAccessMap( itLpNaca.xLpIndex,
						     currDword );
		thisChunk = currChunk;
		while ( map ) {
			chunkBit = map >> 63;
			map <<= 1;
			if ( chunkBit ) {
				--moreChunks;

				while ( thisChunk >= mb[curBlock].logicalEnd ) {
					++curBlock;
					if ( curBlock >= numMemoryBlocks )
						panic("out of memory blocks");
				}
				if ( thisChunk < mb[curBlock].logicalStart )
					panic("memory block error");

				absChunk = mb[curBlock].absStart + ( thisChunk - mb[curBlock].logicalStart );

				if ( ( ( absChunk < hptFirstChunk ) ||
				       ( absChunk > hptLastChunk ) ) &&
				     ( ( absChunk < loadAreaFirstChunk ) ||
				       ( absChunk > loadAreaLastChunk ) ) ) {
					msChunks.abs[nextPhysChunk] = absChunk;
					++nextPhysChunk;
				}
			}
			++thisChunk;
		}
		++currDword;
		currChunk += 64;
	}
					
	/* main store size (in chunks) is 
	 *   totalChunks - hptSizeChunks
	 * which should be equal to 
	 *   nextPhysChunk
	 */
	naca->physicalMemorySize = chunk_to_addr(nextPhysChunk);

	/* Bolt kernel mappings for all of memory */
	iSeries_bolt_kernel( 0, naca->physicalMemorySize );

	lmb_init();
	lmb_add( 0, naca->physicalMemorySize );
	lmb_analyze();	/* ?? */
	lmb_reserve( 0, __pa(klimit));

	/* 
	 * Hardcode to GP size.  I am not sure where to get this info. DRENG
	 */
	naca->slb_size = 64;
}