int lrcscan(struct device *parent, void *self, void *aux) { struct cfdata *cf = self; struct lrcsoftc *sc = (struct lrcsoftc *)parent; struct confargs *ca = aux; struct confargs oca; bzero(&oca, sizeof oca); oca.ca_iot = ca->ca_iot; oca.ca_dmat = ca->ca_dmat; oca.ca_offset = cf->cf_loc[0]; oca.ca_ipl = cf->cf_loc[1]; if (oca.ca_offset != -1 && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) { oca.ca_vaddr = sc->sc_vaddr + oca.ca_offset; oca.ca_paddr = sc->sc_paddr + oca.ca_offset; } else { oca.ca_vaddr = (vaddr_t)-1; oca.ca_paddr = (paddr_t)-1; } oca.ca_bustype = BUS_LRC; oca.ca_name = cf->cf_driver->cd_name; if ((*cf->cf_attach->ca_match)(parent, cf, &oca) == 0) return (0); config_attach(parent, cf, &oca, lrcprint); return (1); }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { vaddr_t o, v; int c; struct iovec *iov; int error = 0; static int physlock; vm_prot_t prot; if (minor(dev) == DEV_MEM) { /* lock against other uses of shared vmmap */ while (physlock > 0) { physlock++; error = tsleep((void *)&physlock, PZERO | PCATCH, "mmrw", 0); if (error) return error; } physlock = 1; } while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { case DEV_MEM: v = uio->uio_offset; /* * Only allow reads in physical RAM. */ if (v >= 0xFFFFFFFC || v < lowram) { error = EFAULT; goto unlock; } prot = uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE; pmap_enter(pmap_kernel(), (vaddr_t)vmmap, trunc_page(v), prot, prot|PMAP_WIRED); pmap_update(pmap_kernel()); o = m68k_page_offset(uio->uio_offset); c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove(vmmap + o, c, uio); pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)vmmap + PAGE_SIZE); pmap_update(pmap_kernel()); continue; case DEV_KMEM: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); if (!uvm_kernacc((void *)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return EFAULT; /* * Don't allow reading intio * device space. This could lead to * corruption of device registers. */ if (ISIIOVA(v)) return EFAULT; error = uiomove((void *)v, c, uio); continue; case DEV_NULL: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return 0; case DEV_ZERO: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } /* * On the first call, allocate and zero a page * of memory for use with /dev/zero. */ if (devzeropage == NULL) devzeropage = (void *) malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO); c = min(iov->iov_len, PAGE_SIZE); error = uiomove(devzeropage, c, uio); continue; default: return ENXIO; } if (error) break; iov->iov_base = (char *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } if (minor(dev) == DEV_MEM) { unlock: if (physlock > 1) wakeup((void *)&physlock); physlock = 0; } return error; }