void consinit(void) { /* * Initialize the console before we print anything out. */ physaccess((void*)virtual_avail, (void*)0x58000000, PAGE_SIZE, PG_RW|PG_CI); zs_cnattach((void*)virtual_avail); virtual_avail += PAGE_SIZE; #ifdef KGDB kgdb_dev = 1; kgdb_attach((void*)zscngetc, (void*)zscnputc, (void *)0); if (boothowto & RB_KDB) { kgdb_connect(1); zscons.cn_putc = zs_kgdb_cnputc; zscons.cn_getc = zs_kgdb_cngetc; } #endif #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif sic_enable_int(39, 2, 1, 7, 0); /* NMI */ }
/* ARGSUSED */ int bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { vaddr_t kva; vsize_t offset; int error; if (t->bustype == HP300_BUS_SPACE_INTIO) { /* * Intio space is direct-mapped in pmap_bootstrap(); just * do the translation. */ *bshp = (bus_space_handle_t)IIOV(INTIOBASE + bpa); return 0; } if (t->bustype != HP300_BUS_SPACE_DIO && t->bustype != HP300_BUS_SPACE_SGC) panic("%s: bad space tag", __func__); /* * Allocate virtual address space from the extio extent map. */ offset = m68k_page_offset(bpa); size = m68k_round_page(offset + size); error = extent_alloc(extio_ex, size, PAGE_SIZE, 0, EX_FAST | EX_NOWAIT | (extio_ex_malloc_safe ? EX_MALLOCOK : 0), &kva); if (error) return error; /* * Map the range. The range is always cache-inhibited on the hp300. */ physaccess((void *)kva, (void *)bpa, size, PG_RW|PG_CI); /* * All done. */ *bshp = (bus_space_handle_t)(kva + offset); return 0; }
/* * Allocate/deallocate a cache-inhibited range of kernel virtual address * space mapping the indicated physical address range [pa - pa+size) */ void * iomap(void *pa, int size) { u_long kva; int error; #ifdef DEBUG if (((int)pa & PGOFSET) || (size & PGOFSET)) panic("iomap: unaligned"); #endif error = extent_alloc(extio_ex, size, PAGE_SIZE, 0, EX_FAST | EX_NOWAIT | (extio_ex_malloc_safe ? EX_MALLOCOK : 0), &kva); if (error) return 0; physaccess((void *) kva, pa, size, PG_RW|PG_CI); return (void *)kva; }
void fic_init(void) { int i; extern paddr_t avail_start, avail_end; boothowto = RB_SINGLE; /* XXX for now */ boothowto |= RB_KDB; /* XXX for now */ delay_divisor = 30; /* XXX */ /* * Tell the VM system about available physical memory. The * fic uses one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * map and init interrupt controller */ physaccess((void*)virtual_avail, (void*)0x44000000, PAGE_SIZE, PG_RW|PG_CI); sicinit((void*)virtual_avail); virtual_avail += PAGE_SIZE; /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); }