/* * We're called here very early in the boot. We determine the machine * type and call the appropriate low-level setup functions. * -- Cort <*****@*****.**> * * Note that the kernel may be running at an address which is different * from the address that it was linked at, so we must use RELOC/PTRRELOC * to access static data (including strings). -- paulus */ notrace unsigned long __init early_init(unsigned long dt_ptr) { unsigned long offset = reloc_offset(); struct cpu_spec *spec; /* First zero the BSS -- use memset_io, some platforms don't have * caches on yet */ memset_io((void __iomem *)PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start); /* * Identify the CPU type and fix up code sections * that depend on which cpu we have. */ spec = identify_cpu(offset, mfspr(SPRN_PVR)); do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); do_lwsync_fixups(spec->cpu_features, PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); return KERNELBASE + offset; }
void phys_call_rtas_display_status(char c) { unsigned long offset = reloc_offset(); struct rtas_args *rtas = PTRRELOC(&(get_paca()->xRtas)); rtas->token = 10; rtas->nargs = 1; rtas->nret = 1; rtas->rets = (rtas_arg_t *)PTRRELOC(&(rtas->args[1])); rtas->args[0] = (int)c; enter_rtas(rtas); }
/* This routine called with relocation disabled. */ void __init lmb_analyze(void) { unsigned long i; unsigned long mem_size = 0; unsigned long size_mask = 0; unsigned long offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); #ifdef CONFIG_MSCHUNKS unsigned long physbase = 0; #endif for (i=0; i < _lmb->memory.cnt; i++) { unsigned long lmb_size; lmb_size = _lmb->memory.region[i].size; #ifdef CONFIG_MSCHUNKS _lmb->memory.region[i].physbase = physbase; physbase += lmb_size; #else _lmb->memory.region[i].physbase = _lmb->memory.region[i].base; #endif mem_size += lmb_size; size_mask |= lmb_size; } _lmb->memory.size = mem_size; }
/* This routine called with relocation disabled. */ void lmb_analyze(void) { u64 i; u64 mem_size = 0; u64 size_mask = 0; u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); #ifdef CONFIG_MSCHUNKS u64 physbase = 0; #endif for (i=0; i < _lmb->memory.cnt; i++) { u64 lmb_size; lmb_size = _lmb->memory.region[i].size; #ifdef CONFIG_MSCHUNKS _lmb->memory.region[i].physbase = physbase; physbase += lmb_size; #else _lmb->memory.region[i].physbase = _lmb->memory.region[i].base; #endif mem_size += lmb_size; size_mask |= lmb_size; } _lmb->memory.size = mem_size; }
struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) { struct cpu_spec *s = cpu_specs; struct cpu_spec *t = &the_cpu_spec; int i; s = PTRRELOC(s); t = PTRRELOC(t); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) if ((pvr & s->pvr_mask) == s->pvr_value) { /* * If we are overriding a previous value derived * from the real PVR with a new value obtained * using a logical PVR value, don't modify the * performance monitor fields. */ if (t->num_pmcs && !s->num_pmcs) { t->cpu_name = s->cpu_name; t->cpu_features = s->cpu_features; t->cpu_user_features = s->cpu_user_features; t->icache_bsize = s->icache_bsize; t->dcache_bsize = s->dcache_bsize; t->cpu_setup = s->cpu_setup; t->cpu_restore = s->cpu_restore; t->platform = s->platform; } else *t = *s; *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) /* ppc64 and booke expect identify_cpu to also call * setup_cpu for that processor. I will consolidate * that at a later time, for now, just use #ifdef. * we also don't need to PTRRELOC the function pointer * on ppc64 and booke as we are running at 0 in real * mode on ppc64 and reloc_offset is always 0 on booke. */ if (s->cpu_setup) { s->cpu_setup(offset, s); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ return s; } BUG(); return NULL; }
notrace unsigned long __init early_init(unsigned long dt_ptr) { unsigned long offset = reloc_offset(); struct cpu_spec *spec; memset_io((void __iomem *)PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start); spec = identify_cpu(offset, mfspr(SPRN_PVR)); do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); do_feature_fixups(spec->mmu_features, PTRRELOC(&__start___mmu_ftr_fixup), PTRRELOC(&__stop___mmu_ftr_fixup)); do_lwsync_fixups(spec->cpu_features, PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); do_final_fixups(); return KERNELBASE + offset; }
void apply_feature_fixups(void) { struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec); /* * Apply the CPU-specific and firmware specific fixups to kernel text * (nop out sections not relevant to this CPU or this firmware). */ do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); do_feature_fixups(spec->mmu_features, PTRRELOC(&__start___mmu_ftr_fixup), PTRRELOC(&__stop___mmu_ftr_fixup)); do_lwsync_fixups(spec->cpu_features, PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); #ifdef CONFIG_PPC64 do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); #endif do_final_fixups(); }
long lmb_reserve(u64 base, u64 size) { u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_rgn = &(_lmb->reserved); return lmb_add_region(_rgn, base, size); }
long __init lmb_reserve(unsigned long base, unsigned long size) { unsigned long offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_rgn = &(_lmb->reserved); return lmb_add_region(_rgn, base, size); }
void phys_call_rtas(int token, int nargs, int nret, ...) { va_list list; unsigned long offset = reloc_offset(); struct rtas_args *rtas = PTRRELOC(&(get_paca()->xRtas)); int i; rtas->token = token; rtas->nargs = nargs; rtas->nret = nret; rtas->rets = (rtas_arg_t *)PTRRELOC(&(rtas->args[nargs])); va_start(list, nret); for (i = 0; i < nargs; i++) rtas->args[i] = (rtas_arg_t)LONG_LSW(va_arg(list, ulong)); va_end(list); enter_rtas(rtas); }
/* This routine called with relocation disabled. */ long lmb_add(u64 base, u64 size) { u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_rgn = &(_lmb->memory); /* On pSeries LPAR systems, the first LMB is our RMO region. */ if ( base == 0 ) _lmb->rmo_size = size; return lmb_add_region(_rgn, base, size); }
unsigned long __init lmb_end_of_DRAM(void) { unsigned long offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_mem = &(_lmb->memory); int idx = _mem->cnt - 1; #ifdef CONFIG_MSCHUNKS return (_mem->region[idx].physbase + _mem->region[idx].size); #else return (_mem->region[idx].base + _mem->region[idx].size); #endif /* CONFIG_MSCHUNKS */ return 0; }
u64 lmb_phys_mem_size(void) { u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); #ifdef CONFIG_MSCHUNKS return _lmb->memory.size; #else struct lmb_region *_mem = &(_lmb->memory); u64 idx = _mem->cnt-1; u64 lastbase = _mem->region[idx].physbase; u64 lastsize = _mem->region[idx].size; return (lastbase + lastsize); #endif /* CONFIG_MSCHUNKS */ }
/* * We're called here very early in the boot. We determine the machine * type and call the appropriate low-level setup functions. * -- Cort <*****@*****.**> * * Note that the kernel may be running at an address which is different * from the address that it was linked at, so we must use RELOC/PTRRELOC * to access static data (including strings). -- paulus */ unsigned long __init early_init(unsigned long dt_ptr) { unsigned long offset = reloc_offset(); /* First zero the BSS -- use memset_io, some platforms don't have * caches on yet */ memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start); /* * Identify the CPU type and fix up code sections * that depend on which cpu we have. */ identify_cpu(offset, 0); do_cpu_ftr_fixups(offset); return KERNELBASE + offset; }
/* Depending on whether this is called from iSeries or pSeries setup * code, the location of the msChunks struct may or may not have * to be reloc'd, so we force the caller to do that for us by passing * in a pointer to the structure. */ unsigned long msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size) { unsigned long offset = reloc_offset(); struct msChunks *_msChunks = PTRRELOC(&msChunks); _msChunks->num_chunks = num_chunks; _msChunks->chunk_size = chunk_size; _msChunks->chunk_shift = __ilog2(chunk_size); _msChunks->chunk_mask = (1UL<<_msChunks->chunk_shift)-1; mem = _ALIGN(mem, sizeof(msChunks_entry)); _msChunks->abs = (msChunks_entry *)(mem + offset); mem += num_chunks * sizeof(msChunks_entry); return mem; }
unsigned long __init lmb_phys_mem_size(void) { unsigned long offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); #ifdef CONFIG_MSCHUNKS return _lmb->memory.size; #else struct lmb_region *_mem = &(_lmb->memory); unsigned long total = 0; int i; /* add all physical memory to the bootmem map */ for (i=0; i < _mem->cnt; i++) total += _mem->region[i].size; return total; #endif /* CONFIG_MSCHUNKS */ }
/* This routine called with relocation disabled. */ void __init lmb_init(void) { unsigned long offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); /* Create a dummy zero size LMB which will get coalesced away later. * This simplifies the lmb_add() code below... */ _lmb->memory.region[0].base = 0; _lmb->memory.region[0].size = 0; _lmb->memory.cnt = 1; /* Ditto. */ _lmb->reserved.region[0].base = 0; _lmb->reserved.region[0].size = 0; _lmb->reserved.cnt = 1; }
u64 lmb_end_of_DRAM(void) { u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_mem = &(_lmb->memory); u64 idx; for (idx=_mem->cnt-1; idx >= 0; idx--) { #ifdef CONFIG_MSCHUNKS return (_mem->region[idx].physbase + _mem->region[idx].size); #else return (_mem->region[idx].base + _mem->region[idx].size); #endif /* CONFIG_MSCHUNKS */ } return 0; }
u64 lmb_abs_to_phys(u64 aa) { u64 i, pa = aa; u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_mem = &(_lmb->memory); for (i=0; i < _mem->cnt; i++) { u64 lmbbase = _mem->region[i].base; u64 lmbsize = _mem->region[i].size; if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) { pa = _mem->region[i].physbase + (aa - lmbbase); break; } } return pa; }
u64 lmb_alloc_base(u64 size, u64 align, u64 max_addr) { long i, j; u64 base = 0; u64 offset = reloc_offset(); struct lmb *_lmb = PTRRELOC(&lmb); struct lmb_region *_mem = &(_lmb->memory); struct lmb_region *_rsv = &(_lmb->reserved); for (i=_mem->cnt-1; i >= 0; i--) { u64 lmbbase = _mem->region[i].base; u64 lmbsize = _mem->region[i].size; if ( max_addr == LMB_ALLOC_ANYWHERE ) base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); else if ( lmbbase < max_addr ) base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align); else continue; while ( (lmbbase <= base) && ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) { base = _ALIGN_DOWN(_rsv->region[j].base-size, align); } if ( (base != 0) && (lmbbase <= base) ) break; } if ( i < 0 ) return 0; lmb_add_region(_rsv, base, size); return base; }
struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) { struct cpu_spec *s = cpu_specs; struct cpu_spec *t = &the_cpu_spec; int i; s = PTRRELOC(s); t = PTRRELOC(t); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) if ((pvr & s->pvr_mask) == s->pvr_value) { /* * If we are overriding a previous value derived * from the real PVR with a new value obtained * using a logical PVR value, don't modify the * performance monitor fields. */ if (t->num_pmcs && !s->num_pmcs) { t->cpu_name = s->cpu_name; t->cpu_features = s->cpu_features; t->cpu_user_features = s->cpu_user_features; t->icache_bsize = s->icache_bsize; t->dcache_bsize = s->dcache_bsize; t->cpu_setup = s->cpu_setup; t->cpu_restore = s->cpu_restore; t->platform = s->platform; /* * If we have passed through this logic once * before and have pulled the default case * because the real PVR was not found inside * cpu_specs[], then we are possibly running in * compatibility mode. In that case, let the * oprofiler know which set of compatibility * counters to pull from by making sure the * oprofile_cpu_type string is set to that of * compatibility mode. If the oprofile_cpu_type * already has a value, then we are possibly * overriding a real PVR with a logical one, and, * in that case, keep the current value for * oprofile_cpu_type. */ if (t->oprofile_cpu_type == NULL) t->oprofile_cpu_type = s->oprofile_cpu_type; } else *t = *s; *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; /* * Set the base platform string once; assumes * we're called with real pvr first. */ if (*PTRRELOC(&powerpc_base_platform) == NULL) *PTRRELOC(&powerpc_base_platform) = t->platform; #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) /* ppc64 and booke expect identify_cpu to also call * setup_cpu for that processor. I will consolidate * that at a later time, for now, just use #ifdef. * we also don't need to PTRRELOC the function pointer * on ppc64 and booke as we are running at 0 in real * mode on ppc64 and reloc_offset is always 0 on booke. */ if (s->cpu_setup) { s->cpu_setup(offset, s); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ return s; } BUG(); return NULL; }