static void __init iSeries_init_early(void) { extern unsigned long memory_limit; DBG(" -> iSeries_init_early()\n"); ppcdbg_initialize(); #if defined(CONFIG_BLK_DEV_INITRD) /* * If the init RAM disk has been configured and there is * a non-zero starting address for it, set it up */ if (naca.xRamDisk) { initrd_start = (unsigned long)__va(naca.xRamDisk); initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE; initrd_below_start_ok = 1; // ramdisk in kernel space ROOT_DEV = Root_RAM0; if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize) rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024; } else #endif /* CONFIG_BLK_DEV_INITRD */ { /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */ } iSeries_recal_tb = get_tb(); iSeries_recal_titan = HvCallXm_loadTod(); /* * Cache sizes must be initialized before hpte_init_iSeries is called * as the later need them for flush_icache_range() */ setup_iSeries_cache_sizes(); /* * Initialize the hash table management pointers */ hpte_init_iSeries(); /* * Initialize the DMA/TCE management */ iommu_init_early_iSeries(); /* * Initialize the table which translate Linux physical addresses to * AS/400 absolute addresses */ build_iSeries_Memory_Map(); iSeries_get_cmdline(); /* Save unparsed command line copy for /proc/cmdline */ strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); /* Parse early parameters, in particular mem=x */ parse_early_param(); if (memory_limit) { if (memory_limit < systemcfg->physicalMemorySize) systemcfg->physicalMemorySize = memory_limit; else { printk("Ignoring mem=%lu >= ram_top.\n", memory_limit); memory_limit = 0; } } /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */ iSeries_bolt_kernel(0, systemcfg->physicalMemorySize); lmb_init(); lmb_add(0, systemcfg->physicalMemorySize); lmb_analyze(); lmb_reserve(0, __pa(klimit)); /* Initialize machine-dependency vectors */ #ifdef CONFIG_SMP smp_init_iSeries(); #endif if (itLpNaca.xPirEnvironMode == 0) piranha_simulator = 1; /* Associate Lp Event Queue 0 with processor 0 */ HvCallEvent_setLpEventQueueInterruptProc(0, 0); mf_init(); mf_initialized = 1; mb(); /* If we were passed an initrd, set the ROOT_DEV properly if the values * look sensible. If not, clear initrd reference. */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE && initrd_end > initrd_start) ROOT_DEV = Root_RAM0; else initrd_start = initrd_end = 0; #endif /* CONFIG_BLK_DEV_INITRD */ DBG(" <- iSeries_init_early()\n"); }
static void __init build_iSeries_Memory_Map(void) { u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize; u32 nextPhysChunk; u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages; u32 num_ptegs; u32 totalChunks,moreChunks; u32 currChunk, thisChunk, absChunk; u32 currDword; u32 chunkBit; u64 map; struct MemoryBlock mb[32]; unsigned long numMemoryBlocks, curBlock; /* Chunk size on iSeries is 256K bytes */ totalChunks = (u32)HvLpConfig_getMsChunks(); klimit = msChunks_alloc(klimit, totalChunks, 1UL<<18); /* Get absolute address of our load area * and map it to physical address 0 * This guarantees that the loadarea ends up at physical 0 * otherwise, it might not be returned by PLIC as the first * chunks */ loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr); loadAreaSize = itLpNaca.xLoadAreaChunks; /* Only add the pages already mapped here. * Otherwise we might add the hpt pages * The rest of the pages of the load area * aren't in the HPT yet and can still * be assigned an arbitrary physical address */ if ( (loadAreaSize * 64) > HvPagesToMap ) loadAreaSize = HvPagesToMap / 64; loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1; /* TODO Do we need to do something if the HPT is in the 64MB load area? * This would be required if the itLpNaca.xLoadAreaChunks includes * the HPT size */ printk( "Mapping load area - physical addr = 0000000000000000\n" " absolute addr = %016lx\n", chunk_to_addr(loadAreaFirstChunk) ); printk( "Load area size %dK\n", loadAreaSize*256 ); for ( nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk ) { msChunks.abs[nextPhysChunk] = loadAreaFirstChunk+nextPhysChunk; } /* Get absolute address of our HPT and remember it so * we won't map it to any physical address */ hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); hptSizePages = (u32)(HvCallHpt_getHptPages()); hptSizeChunks = hptSizePages >> (msChunks.chunk_shift-PAGE_SHIFT); hptLastChunk = hptFirstChunk + hptSizeChunks - 1; printk( "HPT absolute addr = %016lx, size = %dK\n", chunk_to_addr(hptFirstChunk), hptSizeChunks*256 ); /* Fill in the htab_data structure */ /* Fill in size of hashed page table */ num_ptegs = hptSizePages * (PAGE_SIZE/(sizeof(HPTE)*HPTES_PER_GROUP)); htab_data.htab_num_ptegs = num_ptegs; htab_data.htab_hash_mask = num_ptegs - 1; /* The actual hashed page table is in the hypervisor, we have no direct access */ htab_data.htab = NULL; /* Determine if absolute memory has any * holes so that we can interpret the * access map we get back from the hypervisor * correctly. */ numMemoryBlocks = iSeries_process_mainstore_vpd( mb, 32 ); /* Process the main store access map from the hypervisor * to build up our physical -> absolute translation table */ curBlock = 0; currChunk = 0; currDword = 0; moreChunks = totalChunks; while ( moreChunks ) { map = HvCallSm_get64BitsOfAccessMap( itLpNaca.xLpIndex, currDword ); thisChunk = currChunk; while ( map ) { chunkBit = map >> 63; map <<= 1; if ( chunkBit ) { --moreChunks; while ( thisChunk >= mb[curBlock].logicalEnd ) { ++curBlock; if ( curBlock >= numMemoryBlocks ) panic("out of memory blocks"); } if ( thisChunk < mb[curBlock].logicalStart ) panic("memory block error"); absChunk = mb[curBlock].absStart + ( thisChunk - mb[curBlock].logicalStart ); if ( ( ( absChunk < hptFirstChunk ) || ( absChunk > hptLastChunk ) ) && ( ( absChunk < loadAreaFirstChunk ) || ( absChunk > loadAreaLastChunk ) ) ) { msChunks.abs[nextPhysChunk] = absChunk; ++nextPhysChunk; } } ++thisChunk; } ++currDword; currChunk += 64; } /* main store size (in chunks) is * totalChunks - hptSizeChunks * which should be equal to * nextPhysChunk */ naca->physicalMemorySize = chunk_to_addr(nextPhysChunk); /* Bolt kernel mappings for all of memory */ iSeries_bolt_kernel( 0, naca->physicalMemorySize ); lmb_init(); lmb_add( 0, naca->physicalMemorySize ); lmb_analyze(); /* ?? */ lmb_reserve( 0, __pa(klimit)); /* * Hardcode to GP size. I am not sure where to get this info. DRENG */ naca->slb_size = 64; }