int sun68k_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size) { bus_size_t offset; vaddr_t va = (vaddr_t)bh; /* * Adjust the user's request to be page-aligned. */ offset = va & PGOFSET; va -= offset; size += offset; size = m68k_round_page(size); if (size == 0) { printf("sun68k_bus_unmap: zero size\n"); return (EINVAL); } /* * If any part of the request is in the PROM's address space, * don't unmap it. */ #ifdef DIAGNOSTIC if ((va >= SUN_MONSTART && va < SUN_MONEND) != ((va + size) >= SUN_MONSTART && (va + size) < SUN_MONEND)) panic("sun_bus_unmap: bad PROM mapping"); #endif if (va >= SUN_MONSTART && va < SUN_MONEND) return (0); pmap_remove(pmap_kernel(), va, va + size); pmap_update(pmap_kernel()); uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY); return (0); }
/* * Free pages from dvma_malloc() */ void dvma_free(void *addr, size_t size) { vsize_t sz = m68k_round_page(size); uvm_km_free(phys_map, (vaddr_t)addr, sz, UVM_KMF_WIRED); }
/* * Early initialization, before main() is called. */ void luna68k_init() { volatile unsigned char *pio0 = (void *)0x49000000; int sw1, i; char *cp; extern char bootarg[64]; extern paddr_t avail_start, avail_end; /* * Tell the VM system about available physical memory. The * luna68k only has one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); pio0[3] = 0xb6; pio0[2] = 1 << 6; /* enable parity check */ pio0[3] = 0xb6; sw1 = pio0[0]; /* dipssw1 value */ sw1 ^= 0xff; sysconsole = !(sw1 & 0x2); /* console selection */ boothowto = 0; i = 0; /* * 'bootarg' has; * "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>" * where <addr> is MAC address of which network loader used (not * necessarily same as one at 0x4101.FFE0), <host> and <name> * are the values of HOST and SERVER environment variables, * * NetBSD/luna68k cares only the first argment; any of "sda". */ for (cp = bootarg; *cp != ' '; cp++) { BOOT_FLAG(*cp, boothowto); if (i++ >= sizeof(bootarg)) break; } #if 0 /* overload 1:sw1, which now means 'go ROM monitor' after poweron */ if (boothowto == 0) boothowto = (sw1 & 0x1) ? RB_SINGLE : 0; #endif }
/* * Common function for unmapping DMA-safe memory. May be called by * bus-specific DMA memory unmapping functions. */ void _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) { #ifdef DIAGNOSTIC if ((u_long)kva & PAGE_MASK) panic("_bus_dmamem_unmap"); #endif size = m68k_round_page(size); uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size); }
/* * Common function for DMA-safe memory allocation. May be called * by bus-specific DMA memory allocation functions. */ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { vaddr_t low, high; struct pglist *mlist; int error; extern paddr_t avail_start; extern paddr_t avail_end; /* Always round the size. */ size = m68k_round_page(size); low = avail_start; high = avail_end; if ((mlist = malloc(sizeof(*mlist), M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) return (ENOMEM); /* * Allocate physical pages from the VM system. */ error = uvm_pglistalloc(size, low, high, 0, 0, mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); if (error) { free(mlist, M_DEVBUF); return (error); } /* * Simply keep a pointer around to the linked list, so * bus_dmamap_free() can return it. * * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES * ARE IN OUR CUSTODY. */ segs[0]._ds_mlist = mlist; /* * We now have physical pages, but no DVMA addresses yet. These * will be allocated in bus_dmamap_load*() routines. Hence we * save any alignment and boundary requirements in this DMA * segment. */ segs[0].ds_addr = 0; segs[0].ds_len = 0; segs[0]._ds_va = 0; *rsegs = 1; return (0); }
int grfunmap(dev_t dev, struct macfb_softc *sc, void *addr, struct proc *p) { vm_size_t size; addr = (char*)addr - sc->sc_dc->dc_offset; if (addr <= 0) return (-1); size = m68k_round_page(sc->sc_dc->dc_offset + sc->sc_dc->dc_size); uvm_unmap(&p->p_vmspace->vm_map, (vaddr_t)addr, (vaddr_t)addr + size); return 0; }
int sun68k_bus_map(bus_space_tag_t t, bus_type_t iospace, bus_addr_t addr, bus_size_t size, int flags, vaddr_t vaddr, bus_space_handle_t *hp) { bus_size_t offset; vaddr_t v; /* * If we suspect there might be one, try to find * and use a PROM mapping. */ if ((flags & _SUN68K_BUS_MAP_USE_PROM) != 0 && find_prom_map(addr, iospace, size, &v) == 0) { *hp = (bus_space_handle_t)v; return (0); } /* * Adjust the user's request to be page-aligned. */ offset = addr & PGOFSET; addr -= offset; size += offset; size = m68k_round_page(size); if (size == 0) { printf("sun68k_bus_map: zero size\n"); return (EINVAL); } /* Get some kernel virtual address space. */ if (vaddr) v = vaddr; else v = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); if (v == 0) panic("sun68k_bus_map: no memory"); /* note: preserve page offset */ *hp = (bus_space_handle_t)(v | offset); /* * Map the device. */ addr |= iospace | PMAP_NC; pmap_map(v, addr, addr + size, VM_PROT_ALL); return (0); }
/* * Allocate actual memory pages in DVMA space. * (idea for implementation borrowed from Chris Torek.) */ void * dvma_malloc(size_t bytes) { void *new_mem; vsize_t new_size; if (bytes == 0) return NULL; new_size = m68k_round_page(bytes); new_mem = (void *)uvm_km_alloc(phys_map, new_size, 0, UVM_KMF_WIRED); if (new_mem == 0) panic("dvma_malloc: no space in phys_map"); /* The pmap code always makes DVMA pages non-cached. */ return new_mem; }
/* ARGSUSED */ int bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { vaddr_t kva; vsize_t offset; int error; if (t->bustype == HP300_BUS_SPACE_INTIO) { /* * Intio space is direct-mapped in pmap_bootstrap(); just * do the translation. */ *bshp = (bus_space_handle_t)IIOV(INTIOBASE + bpa); return 0; } if (t->bustype != HP300_BUS_SPACE_DIO && t->bustype != HP300_BUS_SPACE_SGC) panic("%s: bad space tag", __func__); /* * Allocate virtual address space from the extio extent map. */ offset = m68k_page_offset(bpa); size = m68k_round_page(offset + size); error = extent_alloc(extio_ex, size, PAGE_SIZE, 0, EX_FAST | EX_NOWAIT | (extio_ex_malloc_safe ? EX_MALLOCOK : 0), &kva); if (error) return error; /* * Map the range. The range is always cache-inhibited on the hp300. */ physaccess((void *)kva, (void *)bpa, size, PG_RW|PG_CI); /* * All done. */ *bshp = (bus_space_handle_t)(kva + offset); return 0; }
/* * This function is called from _bootstrap() to initialize * pre-vm-sytem virtual memory. All this really does is to * set virtual_avail to the first page following preloaded * data (i.e. the kernel and its symbol table) and special * things that may be needed very early (lwp0 upages). * Once that is done, pmap_bootstrap() is called to do the * usual preparations for our use of the MMU. */ static void _vm_init(void) { vaddr_t nextva; /* * First preserve our symbol table, which might have been * loaded after our BSS area by the boot loader. However, * if DDB is not part of this kernel, ignore the symbols. */ esym = end + 4; #if defined(DDB) /* This will advance esym past the symbols. */ _save_symtab(); #endif /* * Steal some special-purpose, already mapped pages. * Note: msgbuf is setup in machdep.c:cpu_startup() */ nextva = m68k_round_page(esym); /* * Setup the u-area pages (stack, etc.) for lwp0. * This is done very early (here) to make sure the * fault handler works in case we hit an early bug. * (The fault handler may reference lwp0 stuff.) */ uvm_lwp_setuarea(&lwp0, nextva); memset((void *)nextva, 0, USPACE); nextva += USPACE; /* * Now that lwp0 exists, make it the "current" one. */ curlwp = &lwp0; curpcb = lwp_getpcb(&lwp0); /* This does most of the real work. */ pmap_bootstrap(nextva); }
void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) { vaddr_t kva; vsize_t offset; if (t->bustype == HP300_BUS_SPACE_INTIO) { /* * Intio space is direct-mapped in pmap_bootstrap(); nothing * to do */ return; } if (t->bustype != HP300_BUS_SPACE_DIO && t->bustype != HP300_BUS_SPACE_SGC) panic("%s: bad space tag", __func__); kva = m68k_trunc_page(bsh); offset = m68k_page_offset(bsh); size = m68k_round_page(offset + size); #ifdef DIAGNOSTIC if (bsh < (vaddr_t)extiobase || bsh >= ((vaddr_t)extiobase + ptoa(EIOMAPSIZE))) panic("%s: bad bus space handle", __func__); #endif /* * Unmap the range. */ physunaccess((void *)kva, size); /* * Free it from the extio extent map. */ if (extent_free(extio_ex, kva, size, EX_NOWAIT | (extio_ex_malloc_safe ? EX_MALLOCOK : 0))) printf("%s: kva 0x%lx size 0x%lx: " "can't free region\n", __func__, (u_long)bsh, size); }
/* * Common function for mapping DMA-safe memory. May be called by * bus-specific DMA memory map functions. */ int _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags) { struct vm_page *m; vaddr_t va; struct pglist *mlist; const uvm_flag_t kmflags = (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; if (nsegs != 1) panic("_bus_dmamem_map: nsegs = %d", nsegs); size = m68k_round_page(size); va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); if (va == 0) return (ENOMEM); segs[0]._ds_va = va; *kvap = (void *)va; mlist = segs[0]._ds_mlist; for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { paddr_t pa; if (size == 0) panic("_bus_dmamem_map: size botch"); pa = VM_PAGE_TO_PHYS(m); pmap_enter(pmap_kernel(), va, pa | PMAP_NC, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); va += PAGE_SIZE; size -= PAGE_SIZE; } pmap_update(pmap_kernel()); return (0); }
paddr_t grfmmap(dev_t dev, off_t off, int prot) { struct grf_softc *sc; struct macfb_devconfig *dc; int unit = GRFUNIT(dev); if (grf_softc == NULL || unit >= numgrf) return ENXIO; sc = &grf_softc[unit]; if (sc->mfb_sc == NULL) return ENXIO; dc = sc->mfb_sc->sc_dc; if ((u_int)off < m68k_round_page(dc->dc_offset + dc->dc_size)) return m68k_btop(dc->dc_paddr + off); return (-1); }
void bus_mapout(void *ptr, int sz) { vaddr_t va; int off; va = (vaddr_t)ptr; /* If it was a PROM mapping, do NOT free it! */ if ((va >= SUN3_MONSTART) && (va < SUN3_MONEND)) return; off = va & PGOFSET; va -= off; sz += off; sz = m68k_round_page(sz); pmap_remove(pmap_kernel(), va, va + sz); pmap_update(pmap_kernel()); uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY); }
/* * Unmap a previously-mapped user I/O request. */ void vunmapbuf(struct buf *bp, vsize_t len) { vaddr_t kva; vsize_t off; if ((bp->b_flags & B_PHYS) == 0) panic("vunmapbuf"); kva = m68k_trunc_page(bp->b_data); off = (vaddr_t)bp->b_data - kva; len = m68k_round_page(off + len); #ifdef M68K_VAC pmap_remove(vm_map_pmap(phys_map), kva, kva + len); #else pmap_kremove(kva, len); #endif pmap_update(pmap_kernel()); uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY); bp->b_data = bp->b_saveaddr; bp->b_saveaddr = 0; }
int grfmap(dev_t dev, struct macfb_softc *sc, void **addrp, struct proc *p) { struct vnode vn; u_long len; int error, flags; len = m68k_round_page(sc->sc_dc->dc_offset + sc->sc_dc->dc_size); *addrp = (void *)VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, len); flags = MAP_SHARED | MAP_FIXED; vn.v_type = VCHR; /* XXX */ vn.v_rdev = dev; /* XXX */ error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp, (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL, flags, (void *)&vn, 0, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); /* Offset into page: */ *addrp = (char*)*addrp + sc->sc_dc->dc_offset; return (error); }
/* * Map a user I/O request into kernel virtual address space. * Note: the pages are already locked by uvm_vslock(), so we * do not need to pass an access_type to pmap_enter(). */ int vmapbuf(struct buf *bp, vsize_t len) { struct pmap *upmap, *kpmap __unused; vaddr_t uva; /* User VA (map from) */ vaddr_t kva; /* Kernel VA (new to) */ paddr_t pa; /* physical address */ vsize_t off; if ((bp->b_flags & B_PHYS) == 0) panic("vmapbuf"); uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data); off = (vaddr_t)bp->b_data - uva; len = m68k_round_page(off + len); kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); bp->b_data = (void *)(kva + off); upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); kpmap = vm_map_pmap(phys_map); do { if (pmap_extract(upmap, uva, &pa) == false) panic("vmapbuf: null page frame"); #ifdef M68K_VAC pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED); #else pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0); #endif uva += PAGE_SIZE; kva += PAGE_SIZE; len -= PAGE_SIZE; } while (len); pmap_update(kpmap); return 0; }
void fic_init(void) { int i; extern paddr_t avail_start, avail_end; boothowto = RB_SINGLE; /* XXX for now */ boothowto |= RB_KDB; /* XXX for now */ delay_divisor = 30; /* XXX */ /* * Tell the VM system about available physical memory. The * fic uses one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * map and init interrupt controller */ physaccess((void*)virtual_avail, (void*)0x44000000, PAGE_SIZE, PG_RW|PG_CI); sicinit((void*)virtual_avail); virtual_avail += PAGE_SIZE; /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); }
/* * Make a permanent mapping for a device. */ void * bus_mapin(int bustype, int pa, int sz) { vaddr_t va; int off; if ((bustype < 0) || (bustype >= BUS__NTYPES)) panic("bus_mapin: bustype"); off = pa & PGOFSET; pa -= off; sz += off; sz = m68k_round_page(sz); /* Borrow PROM mappings if we can. */ if (bustype == BUS_OBIO) { if (find_prom_map(pa, PMAP_OBIO, sz, &va) == 0) goto done; } pa &= bus_info[bustype].mask; pa |= bus_info[bustype].base; pa |= bus_info[bustype].type; pa |= PMAP_NC; /* non-cached */ /* Get some kernel virtual address space. */ va = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); if (va == 0) panic("bus_mapin"); /* Map it to the specified bus. */ pmap_map(va, pa, pa + sz, VM_PROT_ALL); done: return ((void*)(va + off)); }
/* * Early initialization, before main() is called. */ void luna68k_init(void) { volatile uint8_t *pio0 = (void *)0x49000000; int sw1, i; char *cp; extern char bootarg[64]; extern paddr_t avail_start, avail_end; /* initialize cn_tab for early console */ #if 1 cn_tab = &syscons; #else cn_tab = &romcons; #endif /* * Tell the VM system about available physical memory. The * luna68k only has one segment. */ uvm_page_physload(atop(avail_start), atop(avail_end), atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(MSGBUFSIZE); i++) pmap_kenter_pa((vaddr_t)msgbufaddr + i * PAGE_SIZE, avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, 0); pmap_update(pmap_kernel()); initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); pio0[3] = 0xb6; pio0[2] = 1 << 6; /* enable parity check */ pio0[3] = 0xb6; sw1 = pio0[0]; /* dip sw1 value */ sw1 ^= 0xff; sysconsole = !(sw1 & 0x2); /* console selection */ /* * Check if boothowto and bootdev values are passed by our bootloader. */ if ((bootdev & B_MAGICMASK) == B_DEVMAGIC) { /* Valid value is set; no need to parse bootarg. */ return; } /* * No valid bootdev value is set. * Assume we are booted by ROM monitor directly using a.out kernel * and we have to parse bootarg passed from the monitor to set * proper boothowto and check netboot. */ /* set default to "sd0a" with no howto flags */ bootdev = MAKEBOOTDEV(0, LUNA68K_BOOTADPT_SPC, 0, 0, 0); boothowto = 0; /* * 'bootarg' on LUNA has: * "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>" * where <addr> is MAC address of which network loader used (not * necessarily same as one at 0x4101.FFE0), <host> and <name> * are the values of HOST and SERVER environment variables. * * 'bootarg' on LUNA-II has "<args of x command>" only. * * NetBSD/luna68k cares only the first argment; any of "sda". */ bootarg[63] = '\0'; for (cp = bootarg; *cp != '\0'; cp++) { if (*cp == '-') { char c; while ((c = *cp) != '\0' && c != ' ') { BOOT_FLAG(c, boothowto); cp++; } } else if (*cp == 'E' && memcmp("ENADDR=", cp, 7) == 0) { bootdev = MAKEBOOTDEV(0, LUNA68K_BOOTADPT_LANCE, 0, 0, 0); } } }