void cpu_startup(void) { vaddr_t minaddr, maxaddr; size_t msgbufsize = 32 * 1024; /* get ourself a message buffer */ um_msgbuf = kmem_zalloc(msgbufsize, KM_SLEEP); if (um_msgbuf == NULL) panic("couldn't allocate msgbuf"); initmsgbuf(um_msgbuf, msgbufsize); /* allocate a submap for physio, 1Mb enough? */ minaddr = 0; phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 1024 * 1024, 0, false, NULL); /* say hi! */ banner(); /* init lwp0 */ memset(&lwp0pcb, 0, sizeof(lwp0pcb)); thunk_getcontext(&lwp0pcb.pcb_ucp); thunk_sigemptyset(&lwp0pcb.pcb_ucp.uc_sigmask); lwp0pcb.pcb_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK; uvm_lwp_setuarea(&lwp0, (vaddr_t) &lwp0pcb); memcpy(&lwp0pcb.pcb_userret_ucp, &lwp0pcb.pcb_ucp, sizeof(ucontext_t)); /* set stack top */ lwp0pcb.sys_stack_top = lwp0pcb.sys_stack + TRAPSTACKSIZE; }
/* * Early initialization, right before main is called. */ void mvme68k_init() { int i; /* * Tell the VM system about available physical memory. */ for (i = 0; i < mem_cluster_cnt; i++) { if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) { /* * Segment has been completely gobbled up. */ continue; } /* * Note the index of the mem cluster is the free * list we want to put the memory on (0 == default, * 1 == VME). There can only be two. */ uvm_page_physload(atop(phys_seg_list[i].ps_start), atop(phys_seg_list[i].ps_end), atop(phys_seg_list[i].ps_start), atop(phys_seg_list[i].ps_end), i); } /* Initialize interrupt handlers. */ isrinit(); switch (machineid) { #ifdef MVME147 case MVME_147: mvme147_init(); break; #endif #ifdef MVME162 case MVME_162: mvme162_init(); break; #endif #ifdef MVME167 case MVME_167: mvme167_init(); break; #endif default: panic("mvme68k_init: impossible machineid"); } /* * Initialize error message buffer (at end of core). */ for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG, msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE, TRUE, VM_PROT_READ|VM_PROT_WRITE); initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); }
/* * Early initialization, before main() is called. */ void luna68k_init() { volatile unsigned char *pio0 = (void *)0x49000000; int sw1, i; char *cp; extern char bootarg[64]; extern paddr_t avail_start, avail_end; /* * Tell the VM system about available physical memory. The * luna68k only has one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); pio0[3] = 0xb6; pio0[2] = 1 << 6; /* enable parity check */ pio0[3] = 0xb6; sw1 = pio0[0]; /* dipssw1 value */ sw1 ^= 0xff; sysconsole = !(sw1 & 0x2); /* console selection */ boothowto = 0; i = 0; /* * 'bootarg' has; * "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>" * where <addr> is MAC address of which network loader used (not * necessarily same as one at 0x4101.FFE0), <host> and <name> * are the values of HOST and SERVER environment variables, * * NetBSD/luna68k cares only the first argment; any of "sda". */ for (cp = bootarg; *cp != ' '; cp++) { BOOT_FLAG(*cp, boothowto); if (i++ >= sizeof(bootarg)) break; } #if 0 /* overload 1:sw1, which now means 'go ROM monitor' after poweron */ if (boothowto == 0) boothowto = (sw1 & 0x1) ? RB_SINGLE : 0; #endif }
void cpu_startup(void) { #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY vaddr_t minaddr, maxaddr; #endif char pbuf[9]; /* * Initialize error message buffer. */ initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); /* * Good {morning,afternoon,evening,night}. * Also call CPU init on systems that need that. */ printf("%s%s", copyright, version); printf("%s\n", cpu_getmodel()); if (dep_call->cpu_conf) (*dep_call->cpu_conf)(); format_bytes(pbuf, sizeof(pbuf), avail_end); printf("total memory = %s\n", pbuf); panicstr = NULL; mtpr(AST_NO, PR_ASTLVL); spl0(); #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY minaddr = 0; /* * Allocate a submap for physio. This map effectively limits the * number of processes doing physio at any one time. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); #endif format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif iomap_ex_malloc_safe = 1; }
void fic_init(void) { int i; extern paddr_t avail_start, avail_end; boothowto = RB_SINGLE; /* XXX for now */ boothowto |= RB_KDB; /* XXX for now */ delay_divisor = 30; /* XXX */ /* * Tell the VM system about available physical memory. The * fic uses one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * map and init interrupt controller */ physaccess((void*)virtual_avail, (void*)0x44000000, PAGE_SIZE, PG_RW|PG_CI); sicinit((void*)virtual_avail); virtual_avail += PAGE_SIZE; /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); }
/* * Machine-dependent startup code */ void cpu_startup(void) { vaddr_t v; vsize_t sz; vaddr_t minaddr, maxaddr; msgbuf_vaddr = PMAP_DIRECT_MAP(msgbuf_paddr); initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE)); printf("%s", version); printf("real mem = %u (%uK)\n", ctob(physmem), ctob(physmem)/1024); if (physmem >= btoc(1ULL << 32)) { extern int amdgart_enable; amdgart_enable = 1; } /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = allocsys(0); if ((v = uvm_km_zalloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); /* * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ setup_buffers(&maxaddr); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a submap for physio */ minaddr = vm_map_min(kernel_map); phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); printf("avail mem = %lu (%luK)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024); printf("using %u buffers containing %u bytes (%uK) of memory\n", nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); bufinit(); if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support - c; continuing..\n"); #endif } /* Safe for i/o port / memory space allocation to use malloc now. */ x86_bus_space_mallocok(); }
void cpu_startup() { caddr_t v; int sz; vaddr_t minaddr, maxaddr; extern unsigned int avail_end; extern char cpu_model[]; /* * Initialize error message buffer. */ initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE)); /* * Good {morning,afternoon,evening,night}. * Also call CPU init on systems that need that. */ printf("%s%s [%08X %08X]\n", version, cpu_model, vax_cpudata, vax_siedata); if (dep_call->cpu_conf) (*dep_call->cpu_conf)(); printf("real mem = %u (%uMB)\n", avail_end, avail_end/1024/1024); physmem = btoc(avail_end); mtpr(AST_NO, PR_ASTLVL); spl0(); /* * Find out how much space we need, allocate it, and then give * everything true virtual addresses. */ sz = (int) allocsys((caddr_t)0); if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (((unsigned long)allocsys(v) - (unsigned long)v) != sz) panic("startup: table size inconsistency"); /* * Determine how many buffers to allocate. * We allocate bufcachepercent% of memory for buffer space. */ if (bufpages == 0) bufpages = physmem * bufcachepercent / 100; /* Restrict to at most 25% filled kvm */ if (bufpages > (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively limits * the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); #if VAX46 || VAX48 || VAX49 || VAX53 /* * Allocate a submap for physio. This map effectively limits the * number of processes doing physio at any one time. * * Note that machines on which all mass storage I/O controllers * can perform address translation, do not need this. */ if (vax_boardtype == VAX_BTYP_46 || vax_boardtype == VAX_BTYP_48 || vax_boardtype == VAX_BTYP_49 || vax_boardtype == VAX_BTYP_1303) phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize CPU, and do autoconfiguration. * * This is called early in init_main.c:main(), after the * kernel memory allocator is ready for use, but before * the creation of processes 1,2, and mountroot, etc. */ void cpu_startup(void) { char *v; vaddr_t minaddr, maxaddr; char pbuf[9]; /* * Initialize message buffer (for kernel printf). * This is put in physical page zero so it will * always be in the same place after a reboot. * Its mapping was prepared in pmap_bootstrap(). * Also, offset some to avoid PROM scribbles. */ v = (char *)KERNBASE; msgbufaddr = v + MSGBUFOFF; initmsgbuf(msgbufaddr, MSGBUFSIZE); /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); identifycpu(); initfpu(); /* also prints FPU type */ format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); /* * Get scratch page for dumpsys(). */ dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED); if (dumppage == 0) panic("startup: alloc dumppage"); minaddr = 0; /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); /* * Finally, allocate mbuf cluster submap. */ mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, nmbclusters * mclbytes, VM_MAP_INTRSAFE, false, NULL); format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); /* * Allocate a virtual page (for use by /dev/mem) * This page is handed to pmap_enter() therefore * it has to be in the normal kernel VA range. */ vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); /* * Create the DVMA maps. */ dvma_init(); /* * Set up CPU-specific registers, cache, etc. */ initcpu(); }
/* * void cpu_startup(void) * * Machine dependant startup code. * */ void cpu_startup() { u_int loop; paddr_t minaddr; paddr_t maxaddr; proc0paddr = (struct user *)kernelstack.pv_va; proc0.p_addr = proc0paddr; /* Set the cpu control register */ cpu_setup(); /* Lock down zero page */ vector_page_setprot(VM_PROT_READ|VM_PROT_EXECUTE); /* * Give pmap a chance to set up a few more things now the vm * is initialised */ pmap_postinit(); /* * Allow per-board specific initialization */ board_startup(); /* * Initialize error message buffer (at end of core). */ /* msgbufphys was setup during the secondary boot strap */ for (loop = 0; loop < atop(MSGBUFSIZE); ++loop) pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, msgbufphys + loop * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); /* * Identify ourselves for the msgbuf (everything printed earlier will * not be buffered). */ printf(version); printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); printf("avail mem = %lu (%uMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); curpcb = &proc0.p_addr->u_pcb; curpcb->pcb_flags = 0; curpcb->pcb_un.un_32.pcb32_und_sp = (u_int)proc0.p_addr + USPACE_UNDEF_STACK_TOP; curpcb->pcb_un.un_32.pcb32_sp = (u_int)proc0.p_addr + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(pmap_kernel(), curpcb); curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_un.un_32.pcb32_sp - 1; }
/* * Machine-dependent startup code */ void cpu_startup() { #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; extern struct user *proc0paddr; #ifdef DEBUG pmapdebug = 0; #endif /* * fix message buffer mapping */ pmap_map(MSGBUF_VA, MSGBUF_PA, MSGBUF_PA + MSGBUFSIZE, UVM_PROT_RW); initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE); proc0.p_addr = proc0paddr; /* I would print this earlier, but I want it in the message buffer */ if (kap_maskcheck() == 0) { printf("WARNING: KAP M2C3 or earlier mask detected.\n" "THE PROCESSOR IN THIS MACHINE SUFFERS FROM SEVERE HARDWARE ISSUES.\n" "M2C3 PROCESSORS MAY RUN RELIABLY ENOUGH, OLDER WILL DEFINITELY NOT.\n\n"); } /* * Good {morning,afternoon,evening,night}. */ printf(version); /*identifycpu();*/ printf("real mem = %d (%dMB)\n", ptoa(physmem), ptoa(physmem) / 1024 / 1024); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a map for physio. Others use a submap of the kernel * map, but we want one completely separate, even though it uses * the same pmap. */ dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE; dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END; phys_map = uvm_map_create(pmap_kernel(), dvma_base, dvma_end, VM_MAP_INTRSAFE); if (phys_map == NULL) panic("unable to create DVMA map"); /* * Allocate DVMA space and dump into a privately managed * resource map for double mappings which is usable from * interrupt contexts. */ if (uvm_km_valloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base) panic("unable to allocate from DVMA map"); dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end, M_DEVBUF, NULL, 0, EX_NOWAIT); if (dvmamap_extent == 0) panic("unable to allocate extent for dvma"); #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* Early interrupt handlers initialization */ intr_init(); }
/* * Machine-dependent startup code */ void cpu_startup() { #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; paddr_t msgbufpa; extern struct user *proc0paddr; #ifdef DEBUG pmapdebug = 0; #endif if (CPU_ISSUN4M) stackgap_random = STACKGAP_RANDOM_SUN4M; /* * Re-map the message buffer from its temporary address * at KERNBASE to MSGBUF_VA. */ /* Get physical address of the message buffer */ pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &msgbufpa); /* Invalidate the current mapping at KERNBASE. */ pmap_kremove((vaddr_t)KERNBASE, PAGE_SIZE); pmap_update(pmap_kernel()); /* Enter the new mapping */ pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE, PROT_READ | PROT_WRITE); /* Re-initialize the message buffer. */ initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE); proc0.p_addr = proc0paddr; /* * Good {morning,afternoon,evening,night}. */ printf(version); /*identifycpu();*/ printf("real mem = %lu (%luMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); /* * uvm_km_init() has allocated all the virtual memory below the * end of the kernel image. If VM_MIN_KERNEL_ADDRESS is below * KERNBASE, we need to reclaim that range. */ if (vm_min_kernel_address < (vaddr_t)KERNBASE) { uvm_unmap(kernel_map, vm_min_kernel_address, (vaddr_t)KERNBASE); } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Set up userland PIE limits. PIE is disabled on sun4/4c/4e due * to the limited address space. */ if (CPU_ISSUN4M) { vm_pie_max_addr = VM_MAXUSER_ADDRESS / 4; } dvma_init(); #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); }
/* * locore.s code calls bootstrap() just before calling main(), after double * mapping the kernel to high memory and setting up the trap base register. * We must finish mapping the kernel properly and glean any bootstrap info. */ void bootstrap(void) { extern uint8_t u0[]; extern struct consdev consdev_prom; #if NKSYMS || defined(DDB) || defined(MODULAR) struct btinfo_symtab *bi_sym; #else extern int end[]; #endif struct btinfo_boothowto *bi_howto; cn_tab = &consdev_prom; prom_init(); /* Find the number of CPUs as early as possible */ sparc_ncpus = find_cpus(); uvm_lwp_setuarea(&lwp0, (vaddr_t)u0); cpuinfo.master = 1; getcpuinfo(&cpuinfo, 0); curlwp = &lwp0; #if defined(SUN4M) || defined(SUN4D) /* Switch to sparc v8 multiply/divide functions on v8 machines */ if (cpu_arch == 8) { extern void sparc_v8_muldiv(void); sparc_v8_muldiv(); } #endif /* SUN4M || SUN4D */ #if !NKSYMS && !defined(DDB) && !defined(MODULAR) /* * We want to reuse the memory where the symbols were stored * by the loader. Relocate the bootinfo array which is loaded * above the symbols (we assume) to the start of BSS. Then * adjust kernel_top accordingly. */ bootinfo_relocate((void *)ALIGN((u_int)end)); #endif pmap_bootstrap(cpuinfo.mmu_ncontext, cpuinfo.mmu_nregion, cpuinfo.mmu_nsegment); #if !defined(MSGBUFSIZE) || MSGBUFSIZE == 8192 /* * Now that the kernel map has been set up, we can enable * the message buffer at the first physical page in the * memory bank where we were loaded. There are 8192 * bytes available for the buffer at this location (see the * comment in locore.s at the top of the .text segment). */ initmsgbuf((void *)KERNBASE, 8192); #endif #if defined(SUN4M) /* * sun4m bootstrap is complex and is totally different for "normal" 4m * and for microSPARC-IIep - so it's split into separate functions. */ if (CPU_ISSUN4M) { #if !defined(MSIIEP) bootstrap4m(); #else bootstrapIIep(); #endif } #endif /* SUN4M */ #if defined(SUN4) || defined(SUN4C) if (CPU_ISSUN4 || CPU_ISSUN4C) { /* Map Interrupt Enable Register */ pmap_kenter_pa(INTRREG_VA, INT_ENABLE_REG_PHYSADR | PMAP_NC | PMAP_OBIO, VM_PROT_READ | VM_PROT_WRITE, 0); pmap_update(pmap_kernel()); /* Disable all interrupts */ *((unsigned char *)INTRREG_VA) = 0; } #endif /* SUN4 || SUN4C */ #if NKSYMS || defined(DDB) || defined(MODULAR) if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL) { if (bi_sym->ssym < KERNBASE) { /* Assume low-loading boot loader */ bi_sym->ssym += KERNBASE; bi_sym->esym += KERNBASE; } ksyms_addsyms_elf(bi_sym->nsym, (void*)bi_sym->ssym, (void*)bi_sym->esym); } #endif if ((bi_howto = lookup_bootinfo(BTINFO_BOOTHOWTO)) != NULL) { boothowto = bi_howto->boothowto; } }
/* * Early initialization, before main() is called. */ void luna68k_init(void) { volatile uint8_t *pio0 = (void *)0x49000000; int sw1, i; char *cp; extern char bootarg[64]; extern paddr_t avail_start, avail_end; /* initialize cn_tab for early console */ #if 1 cn_tab = &syscons; #else cn_tab = &romcons; #endif /* * Tell the VM system about available physical memory. The * luna68k only has one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_kenter_pa((vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, 0); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); pio0[3] = 0xb6; pio0[2] = 1 << 6; /* enable parity check */ pio0[3] = 0xb6; sw1 = pio0[0]; /* dip sw1 value */ sw1 ^= 0xff; sysconsole = !(sw1 & 0x2); /* console selection */ /* * Check if boothowto and bootdev values are passed by our bootloader. */ if ((bootdev & B_MAGICMASK) == B_DEVMAGIC) { /* Valid value is set; no need to parse bootarg. */ return; } /* * No valid bootdev value is set. * Assume we are booted by ROM monitor directly using a.out kernel * and we have to parse bootarg passed from the monitor to set * proper boothowto and check netboot. */ /* set default to "sd0a" with no howto flags */ bootdev = MAKEBOOTDEV(0, LUNA68K_BOOTADPT_SPC, 0, 0, 0); boothowto = 0; /* * 'bootarg' on LUNA has: * "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>" * where <addr> is MAC address of which network loader used (not * necessarily same as one at 0x4101.FFE0), <host> and <name> * are the values of HOST and SERVER environment variables. * * 'bootarg' on LUNA-II has "<args of x command>" only. * * NetBSD/luna68k cares only the first argment; any of "sda". */ bootarg[63] = '\0'; for (cp = bootarg; *cp != '\0'; cp++) { if (*cp == '-') { char c; while ((c = *cp) != '\0' && c != ' ') { BOOT_FLAG(c, boothowto); cp++; } } else if (*cp == 'E' && memcmp("ENADDR=", cp, 7) == 0) { bootdev = MAKEBOOTDEV(0, LUNA68K_BOOTADPT_LANCE, 0, 0, 0); } } }
void cpu_startup(void) { /* For use by propdb. */ static u_int memsize = PHYSMEM * 1024 * 1024; static u_int cpuspeed = CPUFREQ * 1000 * 1000; prop_number_t pn; vaddr_t minaddr, maxaddr; char pbuf[9]; curcpu()->ci_khz = cpuspeed / 1000; /* Initialize error message buffer. */ initmsgbuf((void *)msgbuf, round_page(MSGBUFSIZE)); printf("%s%s", copyright, version); format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); /* * No need to allocate an mbuf cluster submap. Mbuf clusters * are allocated via the pool allocator, and we use direct-mapped * pool pages. */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); /* * Set up the board properties database. */ board_info_init(); pn = prop_number_create_integer(memsize); KASSERT(pn != NULL); if (prop_dictionary_set(board_properties, "mem-size", pn) == false) panic("setting mem-size"); prop_object_release(pn); pn = prop_number_create_integer(cpuspeed); KASSERT(pn != NULL); if (prop_dictionary_set(board_properties, "processor-frequency", pn) == false) panic("setting processor-frequency"); prop_object_release(pn); /* * Now that we have VM, malloc()s are OK in bus_space. */ bus_space_mallocok(); fake_mapiodev = 0; }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize CPU, and do autoconfiguration. * * This is called early in init_main.c:main(), after the * kernel memory allocator is ready for use, but before * the creation of processes 1,2, and mountroot, etc. */ void cpu_startup(void) { void *v; vaddr_t minaddr, maxaddr; char pbuf[9]; /* * Initialize message buffer (for kernel printf). * This is put in physical pages four through seven * so it will always be in the same place after a * reboot. (physical pages 0-3 are reserved by the PROM * for its vector table and other stuff.) * Its mapping was prepared in pmap_bootstrap(). * Also, offset some to avoid PROM scribbles. */ v = (void *) (PAGE_SIZE * 4); msgbufaddr = (void *)((char *)v + MSGBUFOFF); initmsgbuf(msgbufaddr, MSGBUFSIZE); #if NKSYMS || defined(DDB) || defined(LKM) { extern int nsym; extern char *ssym, *esym; ksyms_init(nsym, ssym, esym); } #endif /* DDB */ /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); identifycpu(); fputype = FPU_NONE; #ifdef FPU_EMULATE printf("fpu: emulator\n"); #else printf("fpu: no math support\n"); #endif format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); /* * XXX fredette - we force a small number of buffers * to help me debug this on my low-memory machine. * this should go away at some point, allowing the * normal automatic buffer-sizing to happen. */ bufpages = 37; /* * Get scratch page for dumpsys(). */ if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) == 0) panic("startup: alloc dumppage"); minaddr = 0; /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); /* * Finally, allocate mbuf cluster submap. */ mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, nmbclusters * mclbytes, VM_MAP_INTRSAFE, false, NULL); format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); /* * Allocate a virtual page (for use by /dev/mem) * This page is handed to pmap_enter() therefore * it has to be in the normal kernel VA range. */ vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); /* * Allocate DMA map for devices on the bus. */ dvmamap = extent_create("dvmamap", DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, M_DEVBUF, 0, 0, EX_NOWAIT); if (dvmamap == NULL) panic("unable to allocate DVMA map"); /* * Set up CPU-specific registers, cache, etc. */ initcpu(); }
/* * void cpu_startup(void) * * Machine dependent startup code. * */ void cpu_startup(void) { vaddr_t minaddr; vaddr_t maxaddr; char pbuf[9]; /* * Until we better locking, we have to live under the kernel lock. */ //KERNEL_LOCK(1, NULL); /* Set the CPU control register */ cpu_setup(boot_args); #ifndef ARM_HAS_VBAR /* Lock down zero page */ vector_page_setprot(VM_PROT_READ); #endif /* * Give pmap a chance to set up a few more things now the vm * is initialised */ pmap_postinit(); /* * Initialize error message buffer (at end of core). */ /* msgbufphys was setup during the secondary boot strap */ if (!pmap_extract(pmap_kernel(), (vaddr_t)msgbufaddr, NULL)) { for (u_int loop = 0; loop < btoc(MSGBUFSIZE); ++loop) { pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, msgbufphys + loop * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, 0); } } pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); /* * Identify ourselves for the msgbuf (everything printed earlier will * not be buffered). */ printf("%s%s", copyright, version); format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); struct lwp * const l = &lwp0; struct pcb * const pcb = lwp_getpcb(l); pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP; lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1); }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize cpu, and do autoconfiguration. */ void cpu_startup() { unsigned i; caddr_t v; vaddr_t minaddr, maxaddr; vsize_t size; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; pmapdebug = 0; #endif /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < atop(MSGBUFSIZE); i++) pmap_kenter_pa((vaddr_t)msgbufp + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); pmap_update(pmap_kernel()); initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE)); /* * Good {morning,afternoon,evening,night}. */ printf("%s", version); identifycpu(); printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem) / 1024 / 1024); /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ size = (vsize_t)allocsys((caddr_t)0); if ((v = (caddr_t) uvm_km_zalloc(kernel_map, round_page(size))) == 0) panic("startup: no room for tables"); if (allocsys(v) - v != size) panic("startup: table size inconsistency"); /* * Determine how many buffers to allocate. * We allocate bufcachepercent% of memory for buffer space. */ if (bufpages == 0) bufpages = physmem * bufcachepercent / 100; /* Restrict to at most 25% filled kvm */ if (bufpages > (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a submap for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); #ifdef DEBUG pmapdebug = opmapdebug; #endif /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); printf("avail mem = %u (%uMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }
caddr_t mips_init(int argc, void *argv, caddr_t boot_esym) { char *cp; int i; u_int cputype; vaddr_t xtlb_handler; extern char start[], edata[], end[]; extern char exception[], e_exception[]; extern char *hw_vendor; #ifdef MULTIPROCESSOR /* * Set curcpu address on primary processor. */ setcurcpu(&cpu_info_primary); #endif /* * Make sure we can access the extended address space. * Note that r10k and later do not allow XUSEG accesses * from kernel mode unless SR_UX is set. */ setsr(getsr() | SR_KX | SR_UX); /* * Clear the compiled BSS segment in OpenBSD code. */ bzero(edata, end - edata); /* * Reserve space for the symbol table, if it exists. */ ssym = (char *)*(u_int64_t *)end; /* Attempt to locate ELF header and symbol table after kernel. */ if (end[0] == ELFMAG0 && end[1] == ELFMAG1 && end[2] == ELFMAG2 && end[3] == ELFMAG3 ) { /* ELF header exists directly after kernel. */ ssym = end; esym = boot_esym; ekern = esym; } else if (((long)ssym - (long)end) >= 0 && ((long)ssym - (long)end) <= 0x1000 && ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 && ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3 ) { /* Pointers exist directly after kernel. */ esym = (char *)*((u_int64_t *)end + 1); ekern = esym; } else { /* Pointers aren't setup either... */ ssym = NULL; esym = NULL; ekern = end; } /* * Initialize the system type and set up memory layout. * Note that some systems have a more complex memory setup. */ bios_ident(); /* * Read and store ARCBios variables for future reference. */ cp = Bios_GetEnvironmentVariable("ConsoleOut"); if (cp != NULL && *cp != '\0') strlcpy(bios_console, cp, sizeof(bios_console)); cp = Bios_GetEnvironmentVariable("gfx"); if (cp != NULL && *cp != '\0') strlcpy(bios_graphics, cp, sizeof(bios_graphics)); cp = Bios_GetEnvironmentVariable("keybd"); if (cp != NULL && *cp != '\0') strlcpy(bios_keyboard, cp, sizeof(bios_keyboard)); /* * Determine system type and set up configuration record data. */ hw_vendor = "SGI"; switch (sys_config.system_type) { #ifdef TGT_O2 case SGI_O2: bios_printf("Found SGI-IP32, setting up.\n"); strlcpy(cpu_model, "IP32", sizeof(cpu_model)); ip32_setup(); break; #endif #ifdef TGT_ORIGIN case SGI_IP27: bios_printf("Found SGI-IP27, setting up.\n"); strlcpy(cpu_model, "IP27", sizeof(cpu_model)); ip27_setup(); break; case SGI_IP35: bios_printf("Found SGI-IP35, setting up.\n"); /* IP27 is intentional, we use the same kernel */ strlcpy(cpu_model, "IP27", sizeof(cpu_model)); ip27_setup(); break; #endif #ifdef TGT_OCTANE case SGI_OCTANE: bios_printf("Found SGI-IP30, setting up.\n"); strlcpy(cpu_model, "IP30", sizeof(cpu_model)); ip30_setup(); break; #endif default: bios_printf("Kernel doesn't support this system type!\n"); bios_printf("Halting system.\n"); Bios_Halt(); while(1); } /* * Look at arguments passed to us and compute boothowto. */ boothowto = RB_AUTOBOOT; dobootopts(argc, argv); /* * Figure out where we supposedly booted from. */ cp = Bios_GetEnvironmentVariable("OSLoadPartition"); if (cp == NULL) cp = "unknown"; if (strlcpy(osloadpartition, cp, sizeof osloadpartition) >= sizeof osloadpartition) bios_printf("Value of `OSLoadPartition' is too large.\n" "The kernel might not be able to find out its root device.\n"); /* * Read platform-specific environment variables. */ switch (sys_config.system_type) { #ifdef TGT_O2 case SGI_O2: /* Get Ethernet address from ARCBIOS. */ cp = Bios_GetEnvironmentVariable("eaddr"); if (cp != NULL && strlen(cp) > 0) strlcpy(bios_enaddr, cp, sizeof bios_enaddr); break; #endif default: break; } /* * Set pagesize to enable use of page macros and functions. * Commit available memory to UVM system. */ uvmexp.pagesize = PAGE_SIZE; uvm_setpagesize(); for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_last_page != 0; i++) { uint64_t fp, lp; uint64_t firstkernpage, lastkernpage; unsigned int freelist; paddr_t firstkernpa, lastkernpa; if (IS_XKPHYS((vaddr_t)start)) firstkernpa = XKPHYS_TO_PHYS((vaddr_t)start); else firstkernpa = CKSEG0_TO_PHYS((vaddr_t)start); if (IS_XKPHYS((vaddr_t)ekern)) lastkernpa = XKPHYS_TO_PHYS((vaddr_t)ekern); else lastkernpa = CKSEG0_TO_PHYS((vaddr_t)ekern); firstkernpage = atop(trunc_page(firstkernpa)); lastkernpage = atop(round_page(lastkernpa)); fp = mem_layout[i].mem_first_page; lp = mem_layout[i].mem_last_page; freelist = mem_layout[i].mem_freelist; /* Account for kernel and kernel symbol table. */ if (fp >= firstkernpage && lp < lastkernpage) continue; /* In kernel. */ if (lp < firstkernpage || fp > lastkernpage) { uvm_page_physload(fp, lp, fp, lp, freelist); continue; /* Outside kernel. */ } if (fp >= firstkernpage) fp = lastkernpage; else if (lp < lastkernpage) lp = firstkernpage; else { /* Need to split! */ uint64_t xp = firstkernpage; uvm_page_physload(fp, xp, fp, xp, freelist); fp = lastkernpage; } if (lp > fp) { uvm_page_physload(fp, lp, fp, lp, freelist); } } /* * Configure cache. */ switch (bootcpu_hwinfo.type) { #ifdef CPU_R10000 case MIPS_R10000: case MIPS_R12000: case MIPS_R14000: cputype = MIPS_R10000; break; #endif #ifdef CPU_R5000 case MIPS_R5000: case MIPS_RM52X0: cputype = MIPS_R5000; break; #endif #ifdef CPU_RM7000 case MIPS_RM7000: case MIPS_RM9000: cputype = MIPS_R5000; break; #endif default: /* * If we can't identify the cpu type, it must be * r10k-compatible on Octane and Origin families, and * it is likely to be r5k-compatible on O2. */ switch (sys_config.system_type) { case SGI_O2: cputype = MIPS_R5000; break; default: case SGI_OCTANE: case SGI_IP27: case SGI_IP35: cputype = MIPS_R10000; break; } break; } switch (cputype) { default: #if defined(CPU_R5000) || defined(CPU_RM7000) case MIPS_R5000: Mips5k_ConfigCache(curcpu()); sys_config._SyncCache = Mips5k_SyncCache; sys_config._InvalidateICache = Mips5k_InvalidateICache; sys_config._SyncDCachePage = Mips5k_SyncDCachePage; sys_config._HitSyncDCache = Mips5k_HitSyncDCache; sys_config._IOSyncDCache = Mips5k_IOSyncDCache; sys_config._HitInvalidateDCache = Mips5k_HitInvalidateDCache; break; #endif #ifdef CPU_R10000 case MIPS_R10000: Mips10k_ConfigCache(curcpu()); sys_config._SyncCache = Mips10k_SyncCache; sys_config._InvalidateICache = Mips10k_InvalidateICache; sys_config._SyncDCachePage = Mips10k_SyncDCachePage; sys_config._HitSyncDCache = Mips10k_HitSyncDCache; sys_config._IOSyncDCache = Mips10k_IOSyncDCache; sys_config._HitInvalidateDCache = Mips10k_HitInvalidateDCache; break; #endif } /* * Last chance to call the BIOS. Wiping the TLB means the BIOS' data * areas are demapped on most systems. */ delay(20*1000); /* Let any UART FIFO drain... */ tlb_set_page_mask(TLB_PAGE_MASK); tlb_set_wired(0); tlb_flush(bootcpu_hwinfo.tlbsize); tlb_set_wired(UPAGES / 2); /* * Get a console, very early but after initial mapping setup. */ consinit(); printf("Initial setup done, switching console.\n"); /* * Init message buffer. */ msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL, NULL); initmsgbuf(msgbufbase, MSGBUFSIZE); /* * Allocate U page(s) for proc[0], pm_tlbpid 1. */ proc0.p_addr = proc0paddr = curcpu()->ci_curprocpaddr = (struct user *)pmap_steal_memory(USPACE, NULL, NULL); proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs; tlb_set_pid(1); /* * Bootstrap VM system. */ pmap_bootstrap(); /* * Copy down exception vector code. */ bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception); bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception); /* * Build proper TLB refill handler trampolines. */ switch (cputype) { #if defined(CPU_R5000) || defined(CPU_RM7000) case MIPS_R5000: { /* * R5000 processors need a specific chip bug workaround * in their tlb handlers. Theoretically only revision 1 * of the processor need it, but there is evidence * later versions also need it. * * This is also necessary on RM52x0 and most RM7k/RM9k, * and is a documented errata for these chips. */ extern void xtlb_miss_err_r5k; xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k; } break; #endif default: { extern void xtlb_miss; xtlb_handler = (vaddr_t)&xtlb_miss; } break; } build_trampoline(TLB_MISS_EXC_VEC, xtlb_handler); build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler); /* * Turn off bootstrap exception vectors. */ setsr(getsr() & ~SR_BOOT_EXC_VEC); proc0.p_md.md_regs->sr = getsr(); /* * Clear out the I and D caches. */ Mips_SyncCache(curcpu()); #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* * Return new stack pointer. */ return ((caddr_t)proc0paddr + USPACE - 64); }
/* * Machine-dependent startup code */ void cpu_startup() { caddr_t v; int sz; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; extern struct user *proc0paddr; #ifdef DEBUG pmapdebug = 0; #endif if (CPU_ISSUN4M) { extern int stackgap_random; stackgap_random = STACKGAP_RANDOM_SUN4M; } /* * fix message buffer mapping, note phys addr of msgbuf is 0 */ pmap_map(MSGBUF_VA, 0, MSGBUFSIZE, VM_PROT_READ|VM_PROT_WRITE); initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE); proc0.p_addr = proc0paddr; /* * Good {morning,afternoon,evening,night}. */ printf(version); /*identifycpu();*/ printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = (int)allocsys((caddr_t)0); if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); /* * Determine how many buffers to allocate. * We allocate bufcachepercent% of memory for buffer space. */ if (bufpages == 0) bufpages = physmem * bufcachepercent / 100; /* Restrict to at most 25% filled kvm */ if (bufpages > (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a map for physio. Others use a submap of the kernel * map, but we want one completely separate, even though it uses * the same pmap. */ dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE; dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END; #if defined(SUN4M) if (CPU_ISSUN4M) { /* * The DVMA space we want partially overrides kernel_map. * Allocate it in kernel_map as well to prevent it from being * used for other things. */ if (uvm_map(kernel_map, &dvma_base, vm_map_max(kernel_map) - dvma_base, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) panic("startup: can not steal dvma map"); } #endif phys_map = uvm_map_create(pmap_kernel(), dvma_base, dvma_end, VM_MAP_INTRSAFE); if (phys_map == NULL) panic("unable to create DVMA map"); /* * Allocate DVMA space and dump into a privately managed * resource map for double mappings which is usable from * interrupt contexts. */ if (uvm_km_valloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base) panic("unable to allocate from DVMA map"); dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end, M_DEVBUF, NULL, 0, EX_NOWAIT); if (dvmamap_extent == 0) panic("unable to allocate extent for dvma"); #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* Early interrupt handlers initialization */ intr_init(); }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize cpu, and do autoconfiguration. * * This is called early in init_main.c:main(), after the * kernel memory allocator is ready for use, but before * the creation of processes 1,2, and mountroot, etc. */ void cpu_startup() { caddr_t v; int sz, i; vsize_t size; int base, residual; vaddr_t minaddr, maxaddr; char pbuf[9]; /* * Initialize message buffer (for kernel printf). * This is put in physical pages four through seven * so it will always be in the same place after a * reboot. (physical pages 0-3 are reserved by the PROM * for its vector table and other stuff.) * Its mapping was prepared in pmap_bootstrap(). * Also, offset some to avoid PROM scribbles. */ v = (caddr_t) (NBPG * 4); msgbufaddr = (caddr_t)(v + MSGBUFOFF); initmsgbuf(msgbufaddr, MSGBUFSIZE); #ifdef DDB { extern int end[]; extern char *esym; ddb_init(end[0], end + 1, (int*)esym); } #endif /* DDB */ /* * Good {morning,afternoon,evening,night}. */ printf(version); identifycpu(); fputype = FPU_NONE; #ifdef FPU_EMULATE printf("fpu: emulator\n"); #else printf("fpu: no math support\n"); #endif format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); /* * XXX fredette - we force a small number of buffers * to help me debug this on my low-memory machine. * this should go away at some point, allowing the * normal automatic buffer-sizing to happen. */ bufpages = 37; /* * Get scratch page for dumpsys(). */ if ((dumppage = uvm_km_alloc(kernel_map, NBPG)) == 0) panic("startup: alloc dumppage"); /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = (int)allocsys(NULL, NULL); if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (allocsys(v, NULL) - v != sz) panic("startup: table size inconsistency"); /* * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) panic("startup: cannot allocate VM for buffers"); minaddr = (vaddr_t)buffers; if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { /* don't want to alloc more physical mem than needed */ bufpages = btoc(MAXBSIZE) * nbuf; } base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { vsize_t curbufsize; vaddr_t curbuf; struct vm_page *pg; /* * Each buffer has MAXBSIZE bytes of VM space allocated. Of * that MAXBSIZE space, we allocate and map (base+1) pages * for the first "residual" buffers, and then we allocate * "base" pages for the rest. */ curbuf = (vaddr_t) buffers + (i * MAXBSIZE); curbufsize = NBPG * ((i < residual) ? (base+1) : base); while (curbufsize) { pg = uvm_pagealloc(NULL, 0, NULL, 0); if (pg == NULL) panic("cpu_startup: not enough memory for " "buffer cache"); pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); curbuf += PAGE_SIZE; curbufsize -= PAGE_SIZE; } } pmap_update(pmap_kernel()); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * We don't use a submap for physio, and use a separate map * for DVMA allocations. Our vmapbuf just maps pages into * the kernel map (any kernel mapping is OK) and then the * device drivers clone the kernel mappings into DVMA space. */ /* * Finally, allocate mbuf cluster submap. */ mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL); format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG); printf("using %d buffers containing %s of memory\n", nbuf, pbuf); /* * Allocate a virtual page (for use by /dev/mem) * This page is handed to pmap_enter() therefore * it has to be in the normal kernel VA range. */ vmmap = uvm_km_valloc_wait(kernel_map, NBPG); /* * Allocate dma map for devices on the bus. */ dvmamap = extent_create("dvmamap", DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, M_DEVBUF, 0, 0, EX_NOWAIT); if (dvmamap == NULL) panic("unable to allocate DVMA map"); /* * Set up CPU-specific registers, cache, etc. */ initcpu(); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); }
void cpu_startup() { vaddr_t minaddr, maxaddr; extern char cpu_model[]; /* * Initialize error message buffer. */ initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE)); /* * Good {morning,afternoon,evening,night}. * Also call CPU init on systems that need that. */ printf("%s%s [%08X %08X]\n", version, cpu_model, vax_cpudata, vax_siedata); if (dep_call->cpu_conf) (*dep_call->cpu_conf)(); printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); mtpr(AST_NO, PR_ASTLVL); spl0(); /* * Allocate a submap for exec arguments. This map effectively limits * the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); #if VAX46 || VAX48 || VAX49 || VAX53 || VAX60 /* * Allocate a submap for physio. This map effectively limits the * number of processes doing physio at any one time. * * Note that machines on which all mass storage I/O controllers * can perform address translation, do not need this. */ if (vax_boardtype == VAX_BTYP_46 || vax_boardtype == VAX_BTYP_48 || vax_boardtype == VAX_BTYP_49 || vax_boardtype == VAX_BTYP_1303 || vax_boardtype == VAX_BTYP_60) phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }
/* * Machine-dependent startup code */ void cpu_startup() { caddr_t v, v2; unsigned long sz; int x; vaddr_t minaddr, maxaddr; vsize_t size; char buf[160]; /* about 2 line */ char pbuf[9]; /* * Initialize error message buffer (et end of core). */ msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_64_round_page(MSGBUFSIZE)); if (msgbuf_vaddr == 0) panic("failed to valloc msgbuf_vaddr"); /* msgbuf_paddr was init'd in pmap */ for (x = 0; x < btoc(MSGBUFSIZE); x++) pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE, msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE)); printf("%s", version); printf("cpu0: %s", cpu_model); if (cpu_tsc_freq != 0) printf(", %ld.%02ld MHz", (cpu_tsc_freq + 4999) / 1000000, ((cpu_tsc_freq + 4999) / 10000) % 100); printf("\n"); if ((cpu_feature & CPUID_MASK1) != 0) { bitmask_snprintf(cpu_feature, CPUID_FLAGS1, buf, sizeof(buf)); printf("cpu0: features %s\n", buf); } if ((cpu_feature & CPUID_MASK2) != 0) { bitmask_snprintf(cpu_feature, CPUID_FLAGS2, buf, sizeof(buf)); printf("cpu0: features %s\n", buf); } if (cpuid_level >= 3 && ((cpu_feature & CPUID_PN) != 0)) { printf("cpu0: serial number %04X-%04X-%04X-%04X-%04X-%04X\n", cpu_serial[0] / 65536, cpu_serial[0] % 65536, cpu_serial[1] / 65536, cpu_serial[1] % 65536, cpu_serial[2] / 65536, cpu_serial[2] % 65536); } format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); printf("total memory = %s\n", pbuf); /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = (unsigned long)allocsys(NULL, NULL); if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); v2 = allocsys(v, NULL); if ((v2 - v) != sz) panic("startup: table size inconsistency"); /* * Allocate virtual address space for the buffers. The area * is not managed by the VM system. */ size = MAXBSIZE * nbuf; if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) panic("cpu_startup: cannot allocate VM for buffers"); minaddr = (vaddr_t)buffers; if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { /* don't want to alloc more physical mem than needed */ bufpages = btoc(MAXBSIZE) * nbuf; } /* * XXX We defer allocation of physical pages for buffers until * XXX after autoconfiguration has run. We must do this because * XXX on system with large amounts of memory or with large * XXX user-configured buffer caches, the buffer cache will eat * XXX up all of the lower 16M of RAM. This prevents ISA DMA * XXX maps from allocating bounce pages. * * XXX Note that nothing can use buffer cache buffers until after * XXX autoconfiguration completes!! * * XXX This is a hack, and needs to be replaced with a better * XXX solution! [email protected], December 6, 1997 */ /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * Finally, allocate mbuf cluster submap. */ mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL); /* * XXX Buffer cache pages haven't yet been allocated, so * XXX we need to account for those pages when printing * XXX the amount of free memory. */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages)); printf("avail memory = %s\n", pbuf); format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE); printf("using %d buffers containing %s of memory\n", nbuf, pbuf); /* Safe for i/o port / memory space allocation to use malloc now. */ x86_64_bus_space_mallocok(); }
caddr_t mips_init(int argc, void *argv, caddr_t boot_esym) { char *cp; int i; caddr_t sd; u_int cputype; vaddr_t tlb_handler, xtlb_handler; extern char start[], edata[], end[]; extern char exception[], e_exception[]; extern char *hw_vendor, *hw_prod; extern void tlb_miss; extern void tlb_miss_err_r5k; extern void xtlb_miss; extern void xtlb_miss_err_r5k; /* * Make sure we can access the extended address space. * Note that r10k and later do not allow XUSEG accesses * from kernel mode unless SR_UX is set. */ setsr(getsr() | SR_KX | SR_UX); #ifdef notyet /* * Make sure KSEG0 cacheability match what we intend to use. * * XXX This does not work as expected on IP30. Does ARCBios * XXX depend on this? */ cp0_setcfg((cp0_getcfg() & ~0x07) | CCA_CACHED); #endif /* * Clear the compiled BSS segment in OpenBSD code. */ bzero(edata, end - edata); /* * Reserve space for the symbol table, if it exists. */ ssym = (char *)*(u_int64_t *)end; /* Attempt to locate ELF header and symbol table after kernel. */ if (end[0] == ELFMAG0 && end[1] == ELFMAG1 && end[2] == ELFMAG2 && end[3] == ELFMAG3 ) { /* ELF header exists directly after kernel. */ ssym = end; esym = boot_esym; ekern = esym; } else if (((long)ssym - (long)end) >= 0 && ((long)ssym - (long)end) <= 0x1000 && ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 && ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3 ) { /* Pointers exist directly after kernel. */ esym = (char *)*((u_int64_t *)end + 1); ekern = esym; } else { /* Pointers aren't setup either... */ ssym = NULL; esym = NULL; ekern = end; } /* * Initialize the system type and set up memory layout. * Note that some systems have a more complex memory setup. */ bios_ident(); /* * Determine system type and set up configuration record data. */ hw_vendor = "SGI"; switch (sys_config.system_type) { #if defined(TGT_O2) case SGI_O2: bios_printf("Found SGI-IP32, setting up.\n"); hw_prod = "O2"; strlcpy(cpu_model, "IP32", sizeof(cpu_model)); ip32_setup(); sys_config.cpu[0].clock = 180000000; /* Reasonable default */ cp = Bios_GetEnvironmentVariable("cpufreq"); if (cp && atoi(cp, 10, NULL) > 100) sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000; break; #endif #if defined(TGT_ORIGIN200) || defined(TGT_ORIGIN2000) case SGI_O200: bios_printf("Found SGI-IP27, setting up.\n"); hw_prod = "Origin 200"; strlcpy(cpu_model, "IP27", sizeof(cpu_model)); ip27_setup(); break; case SGI_O300: bios_printf("Found SGI-IP35, setting up.\n"); hw_prod = "Origin 300"; /* IP27 is intentional, we use the same kernel */ strlcpy(cpu_model, "IP27", sizeof(cpu_model)); ip27_setup(); break; #endif #if defined(TGT_OCTANE) case SGI_OCTANE: bios_printf("Found SGI-IP30, setting up.\n"); hw_prod = "Octane"; strlcpy(cpu_model, "IP30", sizeof(cpu_model)); ip30_setup(); sys_config.cpu[0].clock = 175000000; /* Reasonable default */ cp = Bios_GetEnvironmentVariable("cpufreq"); if (cp && atoi(cp, 10, NULL) > 100) sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000; break; #endif default: bios_printf("Kernel doesn't support this system type!\n"); bios_printf("Halting system.\n"); Bios_Halt(); while(1); } /* * Read and store console type. */ cp = Bios_GetEnvironmentVariable("ConsoleOut"); if (cp != NULL && *cp != '\0') strlcpy(bios_console, cp, sizeof bios_console); /* Disable serial console if ARCS is telling us to use video. */ if (strncmp(bios_console, "video", 5) == 0) comconsaddr = 0; /* * Look at arguments passed to us and compute boothowto. */ boothowto = RB_AUTOBOOT; dobootopts(argc, argv); /* * Figure out where we supposedly booted from. */ cp = Bios_GetEnvironmentVariable("OSLoadPartition"); if (cp == NULL) cp = "unknown"; if (strlcpy(osloadpartition, cp, sizeof osloadpartition) >= sizeof osloadpartition) bios_printf("Value of `OSLoadPartition' is too large.\n" "The kernel might not be able to find out its root device.\n"); /* * Read platform-specific environment variables. */ switch (sys_config.system_type) { #if defined(TGT_O2) case SGI_O2: /* Get Ethernet address from ARCBIOS. */ cp = Bios_GetEnvironmentVariable("eaddr"); if (cp != NULL && strlen(cp) > 0) strlcpy(bios_enaddr, cp, sizeof bios_enaddr); break; #endif default: break; } /* * Set pagesize to enable use of page macros and functions. * Commit available memory to UVM system. */ uvmexp.pagesize = PAGE_SIZE; uvm_setpagesize(); for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_first_page != 0; i++) { u_int32_t fp, lp; u_int32_t firstkernpage, lastkernpage; unsigned int freelist; paddr_t firstkernpa, lastkernpa; if (IS_XKPHYS((vaddr_t)start)) firstkernpa = XKPHYS_TO_PHYS((vaddr_t)start); else firstkernpa = KSEG0_TO_PHYS((vaddr_t)start); if (IS_XKPHYS((vaddr_t)ekern)) lastkernpa = XKPHYS_TO_PHYS((vaddr_t)ekern); else lastkernpa = KSEG0_TO_PHYS((vaddr_t)ekern); firstkernpage = atop(trunc_page(firstkernpa)); lastkernpage = atop(round_page(lastkernpa)); fp = mem_layout[i].mem_first_page; lp = mem_layout[i].mem_last_page; freelist = mem_layout[i].mem_freelist; /* Account for kernel and kernel symbol table. */ if (fp >= firstkernpage && lp < lastkernpage) continue; /* In kernel. */ if (lp < firstkernpage || fp > lastkernpage) { uvm_page_physload(fp, lp, fp, lp, freelist); continue; /* Outside kernel. */ } if (fp >= firstkernpage) fp = lastkernpage; else if (lp < lastkernpage) lp = firstkernpage; else { /* Need to split! */ u_int32_t xp = firstkernpage; uvm_page_physload(fp, xp, fp, xp, freelist); fp = lastkernpage; } if (lp > fp) uvm_page_physload(fp, lp, fp, lp, freelist); } switch (sys_config.system_type) { #if defined(TGT_O2) || defined(TGT_OCTANE) case SGI_O2: case SGI_OCTANE: sys_config.cpu[0].type = (cp0_get_prid() >> 8) & 0xff; sys_config.cpu[0].vers_maj = (cp0_get_prid() >> 4) & 0x0f; sys_config.cpu[0].vers_min = cp0_get_prid() & 0x0f; sys_config.cpu[0].fptype = (cp1_get_prid() >> 8) & 0xff; sys_config.cpu[0].fpvers_maj = (cp1_get_prid() >> 4) & 0x0f; sys_config.cpu[0].fpvers_min = cp1_get_prid() & 0x0f; /* * Configure TLB. */ switch(sys_config.cpu[0].type) { case MIPS_RM7000: /* Rev A (version >= 2) CPU's have 64 TLB entries. */ if (sys_config.cpu[0].vers_maj < 2) { sys_config.cpu[0].tlbsize = 48; } else { sys_config.cpu[0].tlbsize = 64; } break; case MIPS_R10000: case MIPS_R12000: case MIPS_R14000: sys_config.cpu[0].tlbsize = 64; break; default: sys_config.cpu[0].tlbsize = 48; break; } break; #endif default: break; } /* * Configure cache. */ switch(sys_config.cpu[0].type) { case MIPS_R10000: case MIPS_R12000: case MIPS_R14000: cputype = MIPS_R10000; break; case MIPS_R5000: case MIPS_RM7000: case MIPS_RM52X0: case MIPS_RM9000: cputype = MIPS_R5000; break; default: /* * If we can't identify the cpu type, it must be * r10k-compatible on Octane and Origin families, and * it is likely to be r5k-compatible on O2. */ switch (sys_config.system_type) { case SGI_O2: cputype = MIPS_R5000; break; default: case SGI_OCTANE: case SGI_O200: case SGI_O300: cputype = MIPS_R10000; break; } break; } switch (cputype) { case MIPS_R10000: Mips10k_ConfigCache(); sys_config._SyncCache = Mips10k_SyncCache; sys_config._InvalidateICache = Mips10k_InvalidateICache; sys_config._InvalidateICachePage = Mips10k_InvalidateICachePage; sys_config._SyncDCachePage = Mips10k_SyncDCachePage; sys_config._HitSyncDCache = Mips10k_HitSyncDCache; sys_config._IOSyncDCache = Mips10k_IOSyncDCache; sys_config._HitInvalidateDCache = Mips10k_HitInvalidateDCache; break; default: case MIPS_R5000: Mips5k_ConfigCache(); sys_config._SyncCache = Mips5k_SyncCache; sys_config._InvalidateICache = Mips5k_InvalidateICache; sys_config._InvalidateICachePage = Mips5k_InvalidateICachePage; sys_config._SyncDCachePage = Mips5k_SyncDCachePage; sys_config._HitSyncDCache = Mips5k_HitSyncDCache; sys_config._IOSyncDCache = Mips5k_IOSyncDCache; sys_config._HitInvalidateDCache = Mips5k_HitInvalidateDCache; break; } /* * Last chance to call the BIOS. Wiping the TLB means the BIOS' data * areas are demapped on most systems. */ delay(20*1000); /* Let any UART FIFO drain... */ sys_config.cpu[0].tlbwired = UPAGES / 2; tlb_set_wired(0); tlb_flush(sys_config.cpu[0].tlbsize); tlb_set_wired(sys_config.cpu[0].tlbwired); /* * Get a console, very early but after initial mapping setup. */ consinit(); printf("Initial setup done, switching console.\n"); /* * Init message buffer. */ msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL,NULL); initmsgbuf(msgbufbase, MSGBUFSIZE); /* * Allocate U page(s) for proc[0], pm_tlbpid 1. */ proc0.p_addr = proc0paddr = curprocpaddr = (struct user *)pmap_steal_memory(USPACE, NULL, NULL); proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs; tlb_set_pid(1); /* * Allocate system data structures. */ i = (vsize_t)allocsys(NULL); sd = (caddr_t)pmap_steal_memory(i, NULL, NULL); allocsys(sd); /* * Bootstrap VM system. */ pmap_bootstrap(); /* * Copy down exception vector code. */ bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception); bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception); /* * Build proper TLB refill handler trampolines. */ switch (cputype) { case MIPS_R5000: /* * R5000 processors need a specific chip bug workaround * in their tlb handlers. Theoretically only revision 1 * of the processor need it, but there is evidence * later versions also need it. * * This is also necessary on RM52x0; we test on the `rounded' * cputype value instead of sys_config.cpu[0].type; this * causes RM7k and RM9k to be included, just to be on the * safe side. */ tlb_handler = (vaddr_t)&tlb_miss_err_r5k; xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k; break; default: tlb_handler = (vaddr_t)&tlb_miss; xtlb_handler = (vaddr_t)&xtlb_miss; break; } build_trampoline(TLB_MISS_EXC_VEC, tlb_handler); build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler); /* * Turn off bootstrap exception vectors. */ setsr(getsr() & ~SR_BOOT_EXC_VEC); proc0.p_md.md_regs->sr = getsr(); /* * Clear out the I and D caches. */ Mips_SyncCache(); #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* * Return new stack pointer. */ return ((caddr_t)proc0paddr + USPACE - 64); }
void cpu_startup() { caddr_t v; int sz, i; vsize_t size; int base, residual; vaddr_t minaddr, maxaddr, uarea_pages; /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in luna88k_bootstrap() to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_kenter_pa((paddr_t)msgbufp + i * NBPG, avail_end + i * NBPG, VM_PROT_READ | VM_PROT_WRITE); pmap_update(pmap_kernel()); initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE)); /* Determine the machine type from FUSE ROM data */ get_fuse_rom_data(); if (strncmp(fuse_rom_data, "MNAME=LUNA88K+", 14) == 0) { machtype = LUNA_88K2; } /* Determine the 'auto-boot' device from NVRAM data */ get_nvram_data(); get_autoboot_device(); /* * Good {morning,afternoon,evening,night}. */ printf(version); identifycpu(); printf("real mem = %d\n", ctob(physmem)); /* * Check front DIP switch setting */ printf("dipsw = 0x%x\n", dipswitch); /* Check DIP switch 1 - 1 */ if ((0x8000 & dipswitch) == 0) { boothowto |= RB_SINGLE; } /* Check DIP switch 1 - 3 */ if ((0x2000 & dipswitch) == 0) { boothowto |= RB_ASKNAME; } /* Check DIP switch 1 - 4 */ if ((0x1000 & dipswitch) == 0) { boothowto |= RB_CONFIG; } /* * Check frame buffer depth. */ switch (hwplanebits) { case 0: /* No frame buffer */ case 1: case 4: case 8: break; default: printf("unexpected frame buffer depth = %d\n", hwplanebits); hwplanebits = 0; break; } #if 0 /* just for test */ /* * Get boot arguments */ { char buf[256]; char **p = (volatile char **)0x00001120; strncpy(buf, *p, 256); if (buf[255] != '\0') buf[255] = '\0'; printf("boot arg: (0x%x) %s\n", *p, buf); } #endif /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = (int)allocsys((caddr_t)0); if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); /* * Grab UADDR virtual address */ uarea_pages = UADDR; uvm_map(kernel_map, (vaddr_t *)&uarea_pages, USPACE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)); if (uarea_pages != UADDR) panic("uarea_pages %lx: UADDR not free", uarea_pages); /* * Grab the OBIO space that we hardwired in pmap_bootstrap */ obiova = OBIO_START; uvm_map(kernel_map, (vaddr_t *)&obiova, OBIO_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)); if (obiova != OBIO_START) panic("obiova %lx: OBIO not free", obiova); /* * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) panic("cpu_startup: cannot allocate VM for buffers"); minaddr = (vaddr_t)buffers; if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { /* don't want to alloc more physical mem than needed */ bufpages = btoc(MAXBSIZE) * nbuf; } base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { vsize_t curbufsize; vaddr_t curbuf; struct vm_page *pg; /* * Each buffer has MAXBSIZE bytes of VM space allocated. Of * that MAXBSIZE space, we allocate and map (base+1) pages * for the first "residual" buffers, and then we allocate * "base" pages for the rest. */ curbuf = (vaddr_t)buffers + (i * MAXBSIZE); curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); while (curbufsize) { pg = uvm_pagealloc(NULL, 0, NULL, 0); if (pg == NULL) panic("cpu_startup: not enough memory for " "buffer cache"); pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE); curbuf += PAGE_SIZE; curbufsize -= PAGE_SIZE; } } pmap_update(pmap_kernel()); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate map for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); printf("avail mem = %ld (%d pages)\n", ptoa(uvmexp.free), uvmexp.free); printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* * Initialize the autovectored interrupt list. */ isrinit(); /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } /* * Say hello to the world on LCD. */ greeting(); }
/* * Early initialization, right before main is called. */ void mvme68k_init(void) { int i; /* * Since mvme68k boards can have anything from 4MB of onboard RAM, we * would rather set the pager_map_size at runtime based on the amount * of onboard RAM. * * Set pager_map_size to half the size of onboard RAM, up to a * maximum of 16MB. * (Note: Just use ps_end here since onboard RAM starts at 0x0) */ pager_map_size = phys_seg_list[0].ps_end / 2; if (pager_map_size > (16 * 1024 * 1024)) pager_map_size = 16 * 1024 * 1024; /* * Tell the VM system about available physical memory. */ for (i = 0; i < mem_cluster_cnt; i++) { if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) { /* * Segment has been completely gobbled up. */ continue; } /* * Note the index of the mem cluster is the free * list we want to put the memory on (0 == default, * 1 == VME). There can only be two. */ uvm_page_physload(atop(phys_seg_list[i].ps_start), atop(phys_seg_list[i].ps_end), atop(phys_seg_list[i].ps_start), atop(phys_seg_list[i].ps_end), i); } switch (machineid) { #ifdef MVME147 case MVME_147: mvme147_init(); break; #endif #ifdef MVME167 case MVME_167: #endif #ifdef MVME162 case MVME_162: #endif #ifdef MVME177 case MVME_177: #endif #ifdef MVME172 case MVME_172: #endif #if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177) mvme1xx_init(); break; #endif default: panic("%s: impossible machineid", __func__); } /* * Initialize error message buffer (at end of core). */ for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE, msgbufpa + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); pmap_update(pmap_kernel()); }
/* * Machine-dependent startup code */ void cpu_startup() { #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; paddr_t msgbufpa; extern struct user *proc0paddr; #ifdef DEBUG pmapdebug = 0; #endif if (CPU_ISSUN4M) { extern int stackgap_random; stackgap_random = STACKGAP_RANDOM_SUN4M; } /* * Re-map the message buffer from its temporary address * at KERNBASE to MSGBUF_VA. */ /* Get physical address of the message buffer */ pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &msgbufpa); /* Invalidate the current mapping at KERNBASE. */ pmap_kremove((vaddr_t)KERNBASE, PAGE_SIZE); pmap_update(pmap_kernel()); /* Enter the new mapping */ pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE); /* Re-initialize the message buffer. */ initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE); proc0.p_addr = proc0paddr; /* * Good {morning,afternoon,evening,night}. */ printf(version); /*identifycpu();*/ printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a map for physio. Others use a submap of the kernel * map, but we want one completely separate, even though it uses * the same pmap. */ dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE; dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END; dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end, M_DEVBUF, NULL, 0, EX_NOWAIT); if (dvmamap_extent == NULL) panic("unable to allocate extent for dvma"); #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* Early interrupt handlers initialization */ intr_init(); }