void boot(dev_t dev) { u_long marks[MARK_MAX]; char path[128]; pdc_init(); cninit(); devboot(dev, path); strncpy(path + strlen(path), ":/bsd.rd", 9); printf(">> OpenBSD/" MACHINE " CDBOOT 0.1\n" "booting %s: ", path); marks[MARK_START] = (u_long)DEFAULT_KERNEL_ADDRESS; if (!loadfile(path, marks, LOAD_KERNEL)) { marks[MARK_END] = ALIGN(marks[MARK_END] - (u_long)DEFAULT_KERNEL_ADDRESS); fcacheall(); __asm("mtctl %r0, %cr17"); __asm("mtctl %r0, %cr17"); (*(startfuncp)(marks[MARK_ENTRY]))((int)(long)pdc, 0, bootdev, marks[MARK_END], BOOTARG_APIVER, BOOTARG_LEN, (caddr_t)BOOTARG_OFF); /* not reached */ } }
/* * Bootstraps the FPU. */ void hppa_fpu_bootstrap(u_int ccr_enable) { u_int32_t junk[2]; u_int32_t vers[2]; extern u_int hppa_fpu_nop0; extern u_int hppa_fpu_nop1; /* See if we have a present and functioning hardware FPU. */ fpu_present = (ccr_enable & HPPA_FPUS) == HPPA_FPUS; /* Initialize the FPU and get its version. */ if (fpu_present) { /* * To somewhat optimize the emulation * assist trap handling and context * switching (to save them from having * to always load and check fpu_present), * there are two instructions in locore.S * that are replaced with nops when * there is a hardware FPU. */ hppa_fpu_nop0 = OPCODE_NOP; hppa_fpu_nop1 = OPCODE_NOP; fcacheall(); /* * We track what process has the FPU, * and how many times we have to swap * in and out. */ /* * The PA-RISC 1.1 Architecture manual is * pretty clear that the copr,0,0 must be * wrapped in double word stores of fr0, * otherwise its operation is undefined. */ __asm volatile( " ldo %0, %%r22 \n" " fstds %%fr0, 0(%%r22) \n" " ldo %1, %%r22 \n" " copr,0,0 \n" " fstds %%fr0, 0(%%r22) \n" : "=m" (junk), "=m" (vers) : : "r22"); /* * Now mark that no process has the FPU, * and disable it, so the first time it * gets used the process' state gets * swapped in. */ fpu_csw = 0; fpu_cur_uspace = 0; mtctl(ccr_enable & (CCR_MASK ^ HPPA_FPUS), CR_CCR); } #ifdef FPEMUL else
void run_loadfile(u_long *marks, int howto) { fcacheall(); __asm("mtctl %r0, %cr17"); __asm("mtctl %r0, %cr17"); /* stack and the gung is ok at this point, so, no need for asm setup */ (*(startfuncp)(marks[MARK_ENTRY]))((int)pdc, howto, bootdev, marks[MARK_END], BOOTARG_APIVER, BOOTARG_LEN, (caddr_t)BOOTARG_OFF); /* not reached */ }
void machdep_exec(struct x_param *xp, int howto, void *loadaddr) { #ifdef EXEC_DEBUG extern int debug; int i; #endif size_t ac = BOOTARG_LEN; void *av = (void *)BOOTARG_OFF; #ifdef notyet makebootargs(av, &ac); #endif #ifdef EXEC_DEBUG if (debug) { printf("ep=0x%x [", xp->xp_entry); for (i = 0; i < 10240; i++) { if (!(i % 8)) { printf("\b\n%p:", &((u_int *)xp->xp_entry)[i]); if (getchar() != ' ') break; } printf("%x,", ((int *)xp->xp_entry)[i]); } printf("\b\b ]\n"); } #endif fcacheall(); __asm("mtctl %r0, %cr17"); __asm("mtctl %r0, %cr17"); /* stack and the gung is ok at this point, so, no need for asm setup */ (*(startfuncp)(xp->xp_entry)) ((int)pdc, howto, bootdev, xp->xp_end, BOOTARG_APIVER, ac, av); /* not reached */ }
/* * Dump memory into the swap partition of the primary boot device. The * config code will compute the offset from the start of the disk, rather * then the start of the swap partition since this code does not know * anything about partitions. */ void pdcdump(void) { extern int dumpsize, dumpoffset; static int pdcbuf[33]; int *pdcret = (int *) ((((u_int)pdcbuf) + 7) & ~7); int (*btiodc)(struct iomod*, ...); if (dumpoffset == -1) /* initialized? */ goto bad; fcacheall(); pdc = PAGE0->mem_pdc; if ((btiodc = BT_IODC) == 0) goto bad; #ifdef USELEDS ledcontrol(0xAA, 0x55, 0); #endif /* Read the initialization code */ if ((*pdc)(PDC_IODC, PDC_IODC_READ, pdcret, BT_HPA, IODC_INIT, (int) btiodc, IODC_MAXSIZE) < 0) goto bad; /* Initialize the module and the device */ if ((*btiodc)(BT_HPA, IODC_INIT_ALL, BT_SPA, BT_LAYER, pdcret, 0,0,0,0) < 0) goto bad; /* Read the the io code */ if ((*pdc)(PDC_IODC, PDC_IODC_READ, pdcret, BT_HPA, IODC_IO, (int) btiodc, IODC_MAXSIZE) < 0) goto bad; /* * Write memory to disk; write it all at once if possible, * o/w loop until done. * * PDC restrictions: * dumpsize - multiple of 2K bytes (guaranteed) * dumpoffset - 2K byte aligned (not enforced!) */ { register u_int rdloc = 0; register u_int wrloc = dumpoffset * DEV_BSIZE; register u_int tcnt = ctob(dumpsize); register u_int n; do { #ifdef USELEDS ledcontrol(0, 0, 0xFF); #endif if ((*btiodc)(BT_HPA,IODC_IO_BOOTOUT,BT_SPA,BT_LAYER, pdcret,wrloc,rdloc,MIN(tcnt,MAXIOSIZ))<0) goto bad; n = *pdcret; rdloc += n; wrloc += n; tcnt -= n; } while ((int)tcnt > 0); } #ifdef USELEDS ledcontrol(0, 0xFF, 0); #endif bad: /* reboot */ (*(struct iomod *)LBCAST_ADDR).io_command = CMD_RESET; }
/* * Load a boot program at `jefboot' (using the PDC) and execute it. * On failure, the hardware is reset causing the old, slow reboot. * * The grand assumption we make is that the boot device listed in * PAGE0 also houses a boot program. We also assume that no one has * screwed with PAGE0 (which should be safe). * * This routine must be called in real mode, with interrupts disabled. * For a complete list of constraints, check the routine that calls it. */ void pdcboot( int interactive, void (*jefboot)(int)) { char iplbuf[sizeof(struct ipl_image) + 64 + IOREQALN]; struct ipl_image *iplptr; int *pdcret, pdcbuf[33]; int (*btiodc)(struct iomod*, ...); fcacheall(); /* * Get our PDC entry point and pointer to IODC boot code. */ pdc = PAGE0->mem_pdc; if ((btiodc = BT_IODC) == 0) goto bad; /* * Buffer alignment. */ iplptr = (struct ipl_image *) ((((u_int)iplbuf) + 63) & ~63); pdcret = (int *) ((((u_int)pdcbuf) + 7) & ~7); /* * Read the boot device initialization code into memory, * Initialize the boot module/device, and * Load the boot device I/O code into memory. */ if ((*pdc)(PDC_IODC, PDC_IODC_READ, pdcret, BT_HPA, IODC_INIT, btiodc, IODC_MAXSIZE) < 0) goto bad; if ((*btiodc)(BT_HPA, IODC_INIT_ALL, BT_SPA, BT_LAYER, pdcret, 0,0,0,0) < 0) goto bad; if ((*pdc)(PDC_IODC, PDC_IODC_READ, pdcret, BT_HPA, IODC_IO, btiodc, IODC_MAXSIZE) < 0) goto bad; /* * Load the IPL header from the disk, followed by the boot program. * Calculate the entry point into the boot code. */ READLOOP(0, (u_int)iplptr, roundup(sizeof(struct ipl_image),IOREQALN)); READLOOP(iplptr->ipl_addr, (u_int)jefboot, iplptr->ipl_size); jefboot = (void (*)(int)) ((u_int)jefboot + iplptr->ipl_entry); fcacheall(); /* for split I & D cache's (e.g. hp720) */ #ifndef MACH_KERNEL /* TLB config data not saved at boot time */ pgtlbs(); /* emulate PDC behavior as best we can */ #endif /* * Launch the boot code! */ (*jefboot)(interactive); /*NOTREACHED*/ bad: /* * If anything went wrong above, we end up here. Since we trashed * kernel memory (starting at `jefboot'), all we can do here is a * hard reboot... which is what we would have done anyway! */ BT_IODC = (int (*)(struct iomod*, ...)) 0; (*(struct iomod *)LBCAST_ADDR).io_command = CMD_RESET; }
void hppa_init() { extern int kernel_text, end; struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT; struct pdc_coproc pdc_coproc PDC_ALIGNMENT; vm_offset_t v, vstart, vend; register int pdcerr; int usehpt; /* init PDC iface, so we can call em easy */ pdc_init(); /* calculate cpu speed */ cpu_hzticks = (PAGE0->mem_10msec * 100) / hz; delay_init(); /* * get cache parameters from the PDC */ if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT, &pdc_cache)) < 0) { #ifdef DIAGNOSTIC printf("Warning: PDC_CACHE call Ret'd %d\n", pdcerr); #endif } dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1; dcache_size = pdc_cache.dc_size; dcache_stride = pdc_cache.dc_stride; icache_stride = pdc_cache.ic_stride; /* * purge TLBs and flush caches */ if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL) < 0) printf("WARNING: BTLB purge failed\n"); ptlball(); fcacheall(); /* calculate HPT size */ hpt_hashsize = PAGE0->imm_max_mem / NBPG; mtctl(hpt_hashsize - 1, CR_HPTMASK); /* * If we want to use the HW TLB support, ensure that it exists. */ if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) && !pdc_hwtlb.min_size && !pdc_hwtlb.max_size) { printf("WARNING: no HW tlb walker\n"); usehpt = 0; } else { usehpt = 1; #ifdef DEBUG printf("hwtlb: %u-%u, %u/", pdc_hwtlb.min_size, pdc_hwtlb.max_size, hpt_hashsize); #endif if (hpt_hashsize > pdc_hwtlb.max_size) hpt_hashsize = pdc_hwtlb.max_size; else if (hpt_hashsize < pdc_hwtlb.min_size) hpt_hashsize = pdc_hwtlb.min_size; #ifdef DEBUG printf("%u (0x%x)\n", hpt_hashsize, hpt_hashsize * sizeof(struct hpt_entry)); #endif } totalphysmem = PAGE0->imm_max_mem / NBPG; resvmem = ((vm_offset_t)&kernel_text) / NBPG; vstart = hppa_round_page(&end); vend = VM_MAX_KERNEL_ADDRESS; /* we hope this won't fail */ hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF, (caddr_t)mem_ex_storage, sizeof(mem_ex_storage), EX_NOCOALESCE|EX_NOWAIT); if (extent_alloc_region(hppa_ex, 0, (vm_offset_t)PAGE0->imm_max_mem, EX_NOWAIT)) panic("cannot reserve main memory"); /* * Allocate space for system data structures. We are given * a starting virtual address and we return a final virtual * address; along the way we set each data structure pointer. * * We call allocsys() with 0 to find out how much space we want, * allocate that much and fill it with zeroes, and the call * allocsys() again with the correct base virtual address. */ v = vstart; #define valloc(name, type, num) \ (name) = (type *)v; v = (vm_offset_t)((name)+(num)) #ifdef REAL_CLISTS valloc(cfree, struct cblock, nclist); #endif valloc(callout, struct callout, ncallout); nswapmap = maxproc * 2; valloc(swapmap, struct map, nswapmap); #ifdef SYSVSHM valloc(shmsegs, struct shmid_ds, shminfo.shmmni); #endif #ifdef SYSVSEM valloc(sema, struct semid_ds, seminfo.semmni); valloc(sem, struct sem, seminfo.semmns); /* This is pretty disgusting! */ valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); #endif #ifdef SYSVMSG valloc(msgpool, char, msginfo.msgmax); valloc(msgmaps, struct msgmap, msginfo.msgseg); valloc(msghdrs, struct msg, msginfo.msgtql); valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 10 #endif /* BUFCACHEPERCENT */ if (bufpages == 0) bufpages = totalphysmem / BUFCACHEPERCENT / CLSIZE; if (nbuf == 0) { nbuf = bufpages; if (nbuf < 16) nbuf = 16; } /* Restrict to at most 70% filled kvm */ if (nbuf * MAXBSIZE > (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10) nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 7 / 10; /* More buffer pages than fits into the buffers is senseless. */ if (bufpages > nbuf * MAXBSIZE / CLBYTES) bufpages = nbuf * MAXBSIZE / CLBYTES; if (nswbuf == 0) { nswbuf = (nbuf / 2) & ~1; /* force even */ if (nswbuf > 256) nswbuf = 256; /* sanity */ } valloc(swbuf, struct buf, nswbuf); valloc(buf, struct buf, nbuf); #undef valloc bzero ((void *)vstart, (v - vstart)); vstart = v; pmap_bootstrap(&vstart, &vend); physmem = totalphysmem - btoc(vstart); /* alloc msgbuf */ if (!(msgbufp = (void *)pmap_steal_memory(sizeof(struct msgbuf), NULL, NULL))) panic("cannot allocate msgbuf"); msgbufmapped = 1; #ifdef DEBUG printf("mem: %x+%x, %x\n", physmem, resvmem, totalphysmem); #endif /* Turn on the HW TLB assist */ if (usehpt) { if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG, &pdc_hwtlb, hpt_table, sizeof(struct hpt_entry) * hpt_hashsize, PDC_TLB_WORD3)) < 0) { printf("Warning: HW TLB init failed (%d), disabled\n", pdcerr); usehpt = 0; } else printf("HW TLB(%d entries at 0x%x) initialized (%d)\n", hpt_hashsize, hpt_table, pdcerr); } /* * Locate any coprocessors and enable them by setting up the CCR. * SFU's are ignored (since we dont have any). Also, initialize * the floating point registers here. */ if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT, &pdc_coproc)) < 0) printf("WARNING: PDC_COPROC call Ret'd %d\n", pdcerr); else { #ifdef DEBUG printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable, pdc_coproc.ccr_present); #endif } copr_sfu_config = pdc_coproc.ccr_enable; mtctl(copr_sfu_config & CCR_MASK, CR_CCR); /* fprinit(&fpcopr_version); fpcopr_version = (fpcopr_version & 0x003ff800) >> 11; mtctl(CR_CCR, 0); */ /* * Clear the FAULT light (so we know when we get a real one) * PDC_COPROC apparently turns it on (for whatever reason). */ pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0; (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr); #ifdef DDB ddb_init(); #endif #ifdef DEBUG printf("hppa_init: leaving\n"); #endif kernelmapped++; }
void hp700_init(int argc, char *argv[], char *envp[]) { int hpmc_br_instr; int *p = (int *) i_hpmach_chk; register struct mapping *mp; int i; vm_offset_t addr; int pdcerr; vm_offset_t first_page; struct pdc_coproc pdc_coproc; struct pdc_cache pdc_cache; struct pdc_model pdc_model; struct pdc_iodc_read pdc_iodc; extern int crashdump(void); #ifdef BTLB struct pdc_btlb pdc_btlb; #endif #ifdef HPT struct pdc_hwtlb pdc_hwtlb; extern struct hpt_entry *hpt_table; extern int usehpt; #endif first_page = move_bootstrap(); if (argc >= 1 && argc <= 4) { char *btstring = boot_string; char *src = (argc == 1 ? envp[5] : argv[2]); i = 0; while (*src != '\0' && i++ <= BOOT_LINE_LENGTH) *btstring++ = *src++; *btstring = '\0'; } pdc = PAGE0->mem_pdc; delay_init(); pdc_console_init(); printf("%s", version); /* * Determine what the boot program is using as its console * so that we can use the same device. */ pdcerr = (*pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc, PAGE0->mem_cons.pz_hpa, PDC_IODC_INDEX_DATA, &cons_iodc, sizeof(cons_iodc)); if (pdcerr == 0) bcopy((char *)&PAGE0->mem_cons.pz_dp, (char *)&cons_dp, sizeof(struct device_path)); else printf("Warning: can't id console boot device (PDC Ret'd %d)\n", pdcerr); /* * Read boot device from PROM */ pdcerr = (*PAGE0->mem_pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc, PAGE0->mem_boot.pz_hpa, PDC_IODC_INDEX_DATA, &boot_iodc, sizeof(boot_iodc)); if (pdcerr == 0) bcopy((char *)&PAGE0->mem_boot.pz_dp, (char *)&boot_dp, sizeof(struct device_path)); else printf("Warning: can't id boot device (PDC Ret'd %d)\n", pdcerr); /* * Setup the transfer of control addr to point to the crash dump * initialization code. */ PAGE0->ivec_toc = crashdump; /* * get cache parameters from the PDC */ (*PAGE0->mem_pdc)(PDC_CACHE, PDC_CACHE_DFLT, &pdc_cache); dcache_line_size = pdc_cache.dc_conf.cc_line * 16; dcache_line_mask = dcache_line_size - 1; dcache_block_size = dcache_line_size * pdc_cache.dc_conf.cc_block; dcache_size = pdc_cache.dc_size; dcache_base = pdc_cache.dc_base; dcache_stride = pdc_cache.dc_stride; dcache_count = pdc_cache.dc_count; dcache_loop = pdc_cache.dc_loop; icache_line_size = pdc_cache.ic_conf.cc_line * 16; icache_line_mask = icache_line_size - 1; icache_block_size = icache_line_size * pdc_cache.ic_conf.cc_block; icache_base = pdc_cache.ic_base; icache_stride = pdc_cache.ic_stride; icache_count = pdc_cache.ic_count; icache_loop = pdc_cache.ic_loop; /* * purge TLBs and flush caches */ ptlball(&pdc_cache); #ifdef BTLB /* * get block tlb information for clearing */ pdcerr = (*pdc)(PDC_BLOCK_TLB, PDC_BTLB_DEFAULT, &pdc_btlb); if (pdcerr != 0) printf("Warning: PDC_BTLB call Ret'd %d\n", pdcerr); switch (pdc_btlb.finfo.num_c) { /* S-Chip specific */ case 0: cputype = CPU_PCXS; for (i = 0; i < pdc_btlb.finfo.num_i; i++) purge_block_itlb(i); for (i = 0; i < pdc_btlb.finfo.num_d; i++) purge_block_dtlb(i); break; /* L-Chip specific */ case 8: cputype = CPU_PCXL; for (i = 0; i < pdc_btlb.finfo.num_c; i++) purge_L_block_ctlb(i); break; /* T-Chip specific */ case 16: cputype = CPU_PCXT; for (i = 0; i < pdc_btlb.finfo.num_c; i++) purge_block_ctlb(i); break; default: panic("unrecognized block-TLB, cannot purge block TLB(s)"); /* NOTREACHED */ } #endif fcacheall(); /* * get the cpu type */ (*PAGE0->mem_pdc)(PDC_MODEL, PDC_MODEL_INFO, &pdc_model); machtype = pdc_model.hvers >> 4; cpuinfo(&pdc_cache); if (dcache_line_size != CACHE_LINE_SIZE) printf("WARNING: data cache line size = %d bytes, %s\n", dcache_line_size, "THIS IS *VERY* BAD!"); /* * Get the instruction to do branch to PDC_HPMC from PDC. If * successful, then insert the instruction at the beginning * of the HPMC handler. */ if ((*PAGE0->mem_pdc)(PDC_INSTR, PDC_INSTR_DFLT, &hpmc_br_instr) == 0) p[0] = hpmc_br_instr; else p[0] = 0; /* * Now compute the checksum of the hpmc interrupt vector entry */ p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]); /* * setup page size for Mach */ page_size = HP700_PGBYTES; vm_set_page_size(); /* * configure the devices including memory. Passes back size of * physical memory in mem_size. */ busconf(); /* * Zero out BSS of kernel before doing anything else. The location * pointed to by &edata is included in the data section. */ bzero((char*)((vm_offset_t) &edata + 4), (vm_offset_t) &end - (vm_offset_t) &edata - 4); /* * Locate any coprocessors and enable them by setting up the CCR. * SFU's are ignored (since we dont have any). Also, initialize * the floating point registers here. */ if ((pdcerr = (*pdc)(PDC_COPROC, PDC_COPROC_DFLT, &pdc_coproc)) < 0) printf("Warning: PDC_COPROC call Ret'd %d\n", pdcerr); copr_sfu_config = pdc_coproc.ccr_enable; mtctl(CR_CCR, copr_sfu_config & CCR_MASK); fprinit(&fpcopr_version); fpcopr_version = (fpcopr_version & 0x003ff800) >> 11; mtctl(CR_CCR, 0); /* * Clear the FAULT light (so we know when we get a real one) * PDC_COPROC apparently turns it on (for whatever reason). */ pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0; (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr); #ifdef TIMEX /* * Enable the quad-store instruction. */ pdcerr = (*pdc)(PDC_MODEL, PDC_MODEL_ENSPEC, &pdc_model, pdc_model.pot_key); if (pdcerr < 0) printf("Warning: PDC enable FP quad-store Ret'd %d\n", pdcerr); #endif /* * Intialize the Event Trace Analysis Package * Static Phase: 1 of 2 */ etap_init_phase1(); /* * on the hp700 the value in &etext is a pointer to the last word * in the text section. Similarly &edata and &end are pointers to * the last words in the section. We want to change this so that * these pointers point past the sections that they terminate. */ text_start = trunc_page((vm_offset_t) &start_text); text_end = round_page((vm_offset_t) &etext + 4); /* * before we go to all the work to initialize the VM see if we really * linked the image past the end of the PDC/IODC area. */ if (text_start < 0x10800) panic("kernel text mapped over PDC and IODC memory"); /* * find ranges of physical memory that isn't allocated to the kernel */ avail_start = round_page(first_page); first_avail = avail_start; avail_end = trunc_page(mem_size); /* * bootstrap the rest of the virtual memory system */ #ifdef MAXMEMBYTES if ((avail_end - avail_start) > MAXMEMBYTES) { mem_size = trunc_page(MAXMEMBYTES); avail_end = mem_size; } #endif #ifdef HPT /* * If we want to use the HW TLB support, ensure that it exists. */ if (usehpt && !((*pdc)(PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) == 0 && (pdc_hwtlb.min_size || pdc_hwtlb.max_size))) usehpt = 0; #endif pmap_bootstrap(&avail_start, &avail_end); /* * set limits on virtual memory and kernel equivalenced memory */ virtual_avail = avail_end; virtual_end = trunc_page(VM_MAX_KERNEL_ADDRESS); /* * pmap_bootstrap allocated memory for data structures that must * be equivalently mapped. */ equiv_end = (long) round_page((vm_offset_t) &end); io_end = 0xF0000000; /* XXX */ /* * Do block mapping. We are mapping from 0, up through the first * power of 2 address above the end of the equiv region. This * means some memory gets block mapped that should not be, but * so be it (we make the text writable also :-)). We do this to * conserve block entries since we hope to use them for other * purposes (someday). */ addr = avail_start; if (addr != 1 << log2(addr)) addr = 1 << log2(addr); #ifdef BTLB if(pdc_btlb.finfo.num_c) printf("%d BTLB entries found. Block mapping up to 0x%x (0x%x)\n", pdc_btlb.finfo.num_c, addr, avail_start); /* * XXX L-CHIP vs T-CHIP vs S-CHIP difference in Block TLB insertion. */ switch (pdc_btlb.finfo.num_c) { /* S-CHIP */ case 0: pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_ICACHE); pmap_block_map(0, addr, VM_PROT_READ|VM_PROT_WRITE, 0, BLK_DCACHE); break; /* L-CHIP */ case 8: pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_LCOMBINED); break; /* T-CHIP */ case 16: pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_COMBINED); break; default: panic("unrecognized block-TLB, cannot map kernel"); /* NOTREACHED */ } #endif #ifdef HPT /* * Turn on the HW TLB assist. */ if (usehpt) { pdcerr = (*pdc)(PDC_TLB, PDC_TLB_CONFIG, &pdc_hwtlb, hpt_table, sizeof(struct hpt_entry) * HP700_HASHSIZE, PDC_TLB_WORD3); if (pdcerr) { printf("Warning: HW TLB init failed (%d), disabled\n", pdcerr); usehpt = 0; } else printf("HW TLB initialized (%d entries at 0x%x)\n", HP700_HASHSIZE, hpt_table); } #endif /* * map the PDC and IODC area for kernel read/write * XXX - should this be read only? */ (void) pmap_map(0, 0, text_start, VM_PROT_READ | VM_PROT_WRITE); /* * map the kernel text area. */ #if KGDB (void) pmap_map(text_start, text_start, text_end, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE); #else (void) pmap_map(text_start, text_start, text_end, VM_PROT_READ | VM_PROT_EXECUTE); #endif /* * map the data section of the kernel */ (void) pmap_map(text_end, text_end, avail_start, VM_PROT_READ | VM_PROT_WRITE); #ifndef IO_HACK /* * map the I/O pages */ (void) pmap_map(trunc_page(io_size), trunc_page(io_size), 0, VM_PROT_READ | VM_PROT_WRITE); #endif #if 0 /* * map the breakpoint page */ (void) pmap_map(break_page, break_page, break_page+HP700_PAGE_SIZE, VM_PROT_READ | VM_PROT_EXECUTE); #endif /* * map the interrupt stack red zone. */ addr = trunc_page((vm_offset_t) &intstack_top); (void) pmap_map(addr, addr, addr + PAGE_SIZE, VM_PROT_READ); vm_on = 1; }
int grfattach(struct hp_device *hd) { register char *rom; register struct sti_entry *ep; register char *cp; struct modtab *mptr = (struct modtab *)hd->hp_addr; struct grf_softc *gp = &grf_softc[hd->hp_unit]; struct grfdev *gd = &grfdev[hd->hp_unit]; int devtype; static int firstime = 1; if (gp->g_flags & GF_ALIVE) return(1); /* * Locate STI ROM. * On some machines it may not be part of the HPA space. * On these, busconf will stash the address in m_stirom. */ rom = (char *)mptr->m_stirom; if (rom == 0) rom = (char *)mptr->m_hpa; /* * Change page protection on `sticode' to KERNEL:rwx USER:rx. * At this time, I dont know if users will be executing these * routines; for now we'll give them permission to do so. */ if (firstime) { #ifdef MACH_KERNEL pmap_map(STICODE_ALGN, STICODE_ALGN, STICODE_ALGN + (STI_CODESIZ * STI_CODECNT * NGRF), VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE); #else register u_int pg = btop(STICODE_ALGN); register u_int pgcnt = (STI_CODESIZ * STI_CODECNT * NGRF + (NBPG-1)) / NBPG; while (pgcnt--) setaccess(pg++, PDE_AR_URXKW, 0, PDEAR); #endif firstime = 0; } devtype = STI_DEVTYP(STI_TYPE_BWGRF, rom); /* * Set addrs and type for stiload */ gd->romaddr = rom; gd->hpa = (char *)mptr->m_hpa; gd->type = devtype; /* * Set `ep' to unit's STI routine entry points and `cp' to * page-aligned code space. Load STI routines and be sure * to flush the (data) cache afterward; we actually flush * both caches as we only call this routine a couple times. */ ep = &stientry[hd->hp_unit]; cp = (char *) (STICODE_ALGN + STI_CODESIZ * STI_CODECNT * hd->hp_unit); cp = stiload(&ep->init_graph, gd, STI_IGADDR(devtype, rom), STI_SMADDR(devtype, rom), cp); cp = stiload(&ep->state_mgmt, gd, STI_SMADDR(devtype, rom), STI_FUADDR(devtype, rom), cp); cp = stiload(&ep->font_unpmv, gd, STI_FUADDR(devtype, rom), STI_BMADDR(devtype, rom), cp); cp = stiload(&ep->block_move, gd, STI_BMADDR(devtype, rom), STI_STADDR(devtype, rom), cp); cp = stiload(&ep->self_test, gd, STI_STADDR(devtype, rom), STI_EHADDR(devtype, rom), cp); cp = stiload(&ep->excep_hdlr, gd, STI_EHADDR(devtype, rom), STI_ICADDR(devtype, rom), cp); cp = stiload(&ep->inq_conf, gd, STI_ICADDR(devtype, rom), STI_EADDR(devtype, rom), cp); fcacheall(); gd->ep = &stientry[hd->hp_unit]; gp->g_data = (caddr_t) gd; gp->g_sw = &grfsw[0]; if ((*gp->g_sw->gd_init)(gp) == 0) { gp->g_data = (caddr_t) 0; return(0); } gp->g_flags = GF_ALIVE; return(1); }