static int dk_translate(struct dk_softc *dksc, struct buf *bp) { int part; int wlabel; daddr_t blkno; struct disklabel *lp; struct disk *dk; uint64_t numsecs; unsigned secsize; lp = dksc->sc_dkdev.dk_label; dk = &dksc->sc_dkdev; part = DISKPART(bp->b_dev); numsecs = dk->dk_geom.dg_secperunit; secsize = dk->dk_geom.dg_secsize; /* * The transfer must be a whole number of blocks and the offset must * not be negative. */ if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) { bp->b_error = EINVAL; goto done; } /* If there is nothing to do, then we are done */ if (bp->b_bcount == 0) goto done; wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING); if (part == RAW_PART) { uint64_t numblocks = btodb(numsecs * secsize); if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0) goto done; } else { if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) goto done; } /* * Convert the block number to absolute and put it in terms * of the device's logical block size. */ if (secsize >= DEV_BSIZE) blkno = bp->b_blkno / (secsize / DEV_BSIZE); else blkno = bp->b_blkno * (DEV_BSIZE / secsize); if (part != RAW_PART) blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; bp->b_rawblkno = blkno; return -1; done: bp->b_resid = bp->b_bcount; return bp->b_error; }
int ofdisk_close(dev_t dev, int flags, int fmt, struct lwp *l) { struct ofdisk_softc *of = device_lookup_private(&ofdisk_cd, DISKUNIT(dev)); mutex_enter(&of->sc_dk.dk_openlock); switch (fmt) { case S_IFCHR: of->sc_dk.dk_copenmask &= ~(1 << DISKPART(dev)); break; case S_IFBLK: of->sc_dk.dk_bopenmask &= ~(1 << DISKPART(dev)); break; } of->sc_dk.dk_openmask = of->sc_dk.dk_copenmask | of->sc_dk.dk_bopenmask; #ifdef FIRMWORKSBUGS /* * This is a hack to get the firmware to flush its buffers. */ OF_seek(of->sc_ihandle, 0); #endif if (!of->sc_dk.dk_openmask) { OF_close(of->sc_ihandle); of->sc_ihandle = 0; } mutex_exit(&of->sc_dk.dk_openlock); return 0; }
void dk_strategy(struct dk_intf *di, struct dk_softc *dksc, struct buf *bp) { int s; int wlabel; daddr_t blkno; DPRINTF_FOLLOW(("dk_strategy(%s, %p, %p)\n", di->di_dkname, dksc, bp)); if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("dk_strategy: not inited\n")); bp->b_error = ENXIO; biodone(bp); return; } /* XXX look for some more errors, c.f. ld.c */ bp->b_resid = bp->b_bcount; /* If there is nothing to do, then we are done */ if (bp->b_bcount == 0) { biodone(bp); return; } wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING); if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) { biodone(bp); return; } blkno = bp->b_blkno; if (DISKPART(bp->b_dev) != RAW_PART) { struct partition *pp; pp = &dksc->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)]; blkno += pp->p_offset; } bp->b_rawblkno = blkno; /* * Start the unit by calling the start routine * provided by the individual driver. */ s = splbio(); bufq_put(dksc->sc_bufq, bp); dk_start(di, dksc); splx(s); return; }
void ofdisk_strategy(struct buf *bp) { struct ofdisk_softc *of = device_lookup_private(&ofdisk_cd, DISKUNIT(bp->b_dev)); struct partition *p; u_quad_t off; int read; int (*OF_io)(int, void *, int); daddr_t blkno = bp->b_blkno; bp->b_resid = 0; if (bp->b_bcount == 0) goto done; OF_io = bp->b_flags & B_READ ? OF_read : (int(*)(int, void*, int))OF_write; if (DISKPART(bp->b_dev) != RAW_PART) { if (bounds_check_with_label(&of->sc_dk, bp, 0) <= 0) { bp->b_resid = bp->b_bcount; goto done; } p = &of->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; blkno = bp->b_blkno + p->p_offset; } disk_busy(&of->sc_dk); off = (u_quad_t)blkno * DEV_BSIZE; read = -1; do { if (OF_seek(of->sc_ihandle, off) < 0) break; read = OF_io(of->sc_ihandle, bp->b_data, bp->b_bcount); } while (read == -2); if (read < 0) { bp->b_error = EIO; bp->b_resid = bp->b_bcount; } else bp->b_resid = bp->b_bcount - read; disk_unbusy(&of->sc_dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ)); done: biodone(bp); }
int rdopen(dev_t dev, int flag, int fmt, struct proc *p) { struct rd_softc *sc; u_int unit, part; int error; unit = DISKUNIT(dev); part = DISKPART(dev); sc = rdlookup(unit); if (sc == NULL) return (ENXIO); if ((error = disk_lock(&sc->sc_dk)) != 0) goto unref; if (sc->sc_dk.dk_openmask == 0) { /* Load the partition info if not already loaded. */ if ((error = rdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0)) != 0) goto unlock; } error = disk_openpart(&sc->sc_dk, part, fmt, 1); unlock: disk_unlock(&sc->sc_dk); unref: device_unref(&sc->sc_dev); return (error); }
/* * I/O controls. */ int rxioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { int unit = DISKUNIT(dev); struct disklabel *lp; struct rx_softc *rx = device_lookup_private(&rx_cd, unit); int error = 0; lp = rx->ra_disk.dk_label; switch (cmd) { case DIOCGDINFO: memcpy(data, lp, sizeof (struct disklabel)); break; case DIOCGPART: ((struct partinfo *)data)->disklab = lp; ((struct partinfo *)data)->part = &lp->d_partitions[DISKPART(dev)]; break; case DIOCWDINFO: case DIOCSDINFO: case DIOCWLABEL: break; default: error = ENOTTY; break; } return (error); }
static int vndsize(dev_t dev) { struct vnd_softc *sc; struct disklabel *lp; int part, unit, omask; int size; unit = vndunit(dev); sc = device_lookup_private(&vnd_cd, unit); if (sc == NULL) return -1; if ((sc->sc_flags & VNF_INITED) == 0) return -1; part = DISKPART(dev); omask = sc->sc_dkdev.dk_openmask & (1 << part); lp = sc->sc_dkdev.dk_label; if (omask == 0 && vndopen(dev, 0, S_IFBLK, curlwp)) /* XXX */ return -1; if (lp->d_partitions[part].p_fstype != FS_SWAP) size = -1; else size = lp->d_partitions[part].p_size * (lp->d_secsize / DEV_BSIZE); if (omask == 0 && vndclose(dev, 0, S_IFBLK, curlwp)) /* XXX */ return -1; return size; }
int ldsize(dev_t dev) { struct ld_softc *sc; int part, unit, omask, size; unit = DISKUNIT(dev); if ((sc = device_lookup(&ld_cd, unit)) == NULL) return (ENODEV); if ((sc->sc_flags & LDF_ENABLED) == 0) return (ENODEV); part = DISKPART(dev); omask = sc->sc_dk.dk_openmask & (1 << part); if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0) return (-1); else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) size = -1; else size = sc->sc_dk.dk_label->d_partitions[part].p_size * (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE); if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0) return (-1); return (size); }
int ofdisk_size(dev_t dev) { struct ofdisk_softc *of; struct disklabel *lp; int size, part, omask; of = device_lookup_private(&ofdisk_cd, DISKUNIT(dev)); if (of == NULL) return ENXIO; part = DISKPART(dev); omask = of->sc_dk.dk_openmask & (1 << part); lp = of->sc_dk.dk_label; if (omask == 0 && ofdisk_open(dev, 0, S_IFBLK, curlwp) != 0) return -1; if (lp->d_partitions[part].p_fstype != FS_SWAP) size = -1; else size = lp->d_partitions[part].p_size * (lp->d_secsize / DEV_BSIZE); if (omask == 0 && ofdisk_close(dev, 0, S_IFBLK, curlwp) != 0) return -1; return size; }
int dk_size(struct dk_softc *dksc, dev_t dev) { const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver; struct disklabel *lp; int is_open; int part; int size; if ((dksc->sc_flags & DKF_INITED) == 0) return -1; part = DISKPART(dev); is_open = dksc->sc_dkdev.dk_openmask & (1 << part); if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp)) return -1; lp = dksc->sc_dkdev.dk_label; if (lp->d_partitions[part].p_fstype != FS_SWAP) size = -1; else size = lp->d_partitions[part].p_size * (lp->d_secsize / DEV_BSIZE); if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp)) return -1; return size; }
static int mdopen(dev_t dev, int flag, int fmt, struct lwp *l) { int unit; struct md_softc *sc; unit = MD_UNIT(dev); sc = device_lookup_private(&md_cd, unit); if (sc == NULL) return ENXIO; /* * The raw partition is used for ioctl to configure. */ if (DISKPART(dev) == RAW_PART) return 0; #ifdef MEMORY_DISK_HOOKS /* Call the open hook to allow loading the device. */ md_open_hook(unit, &sc->sc_md); #endif /* * This is a normal, "slave" device, so * enforce initialized. */ if (sc->sc_type == MD_UNCONFIGURED) return ENXIO; return 0; }
int wdclose(dev_t dev, int flag, int fmt, struct proc *p) { struct wd_softc *wd; int part = DISKPART(dev); wd = wdlookup(DISKUNIT(dev)); if (wd == NULL) return ENXIO; WDCDEBUG_PRINT(("wdclose\n"), DEBUG_FUNCS); disk_lock_nointr(&wd->sc_dk); disk_closepart(&wd->sc_dk, part, fmt); if (wd->sc_dk.dk_openmask == 0) { wd_flushcache(wd, 0); /* XXXX Must wait for I/O to complete! */ } disk_unlock(&wd->sc_dk); device_unref(&wd->sc_dev); return (0); }
daddr_t wdsize(dev_t dev) { struct wd_softc *wd; struct disklabel *lp; int part, omask; daddr_t size; WDCDEBUG_PRINT(("wdsize\n"), DEBUG_FUNCS); wd = wdlookup(DISKUNIT(dev)); if (wd == NULL) return (-1); part = DISKPART(dev); omask = wd->sc_dk.dk_openmask & (1 << part); if (omask == 0 && wdopen(dev, 0, S_IFBLK, NULL) != 0) { size = -1; goto exit; } lp = wd->sc_dk.dk_label; size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part])); if (omask == 0 && wdclose(dev, 0, S_IFBLK, NULL) != 0) size = -1; exit: device_unref(&wd->sc_dev); return (size); }
int vndclose(dev_t dev, int flags, int mode, struct proc *p) { int unit = DISKUNIT(dev); struct vnd_softc *sc; int part; DNPRINTF(VDB_FOLLOW, "vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p); if (unit >= numvnd) return (ENXIO); sc = &vnd_softc[unit]; disk_lock_nointr(&sc->sc_dk); part = DISKPART(dev); disk_closepart(&sc->sc_dk, part, mode); #if 0 if (sc->sc_dk.dk_openmask == 0) sc->sc_flags &= ~VNF_HAVELABEL; #endif disk_unlock(&sc->sc_dk); return (0); }
/* ARGSUSED */ int dk_open(struct dk_softc *dksc, dev_t dev, int flags, int fmt, struct lwp *l) { struct disklabel *lp = dksc->sc_dkdev.dk_label; int part = DISKPART(dev); int pmask = 1 << part; int ret = 0; struct disk *dk = &dksc->sc_dkdev; DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__, dksc->sc_xname, dksc, dev, flags)); mutex_enter(&dk->dk_openlock); /* * If there are wedges, and this is not RAW_PART, then we * need to fail. */ if (dk->dk_nwedges != 0 && part != RAW_PART) { ret = EBUSY; goto done; } /* * If we're init'ed and there are no other open partitions then * update the in-core disklabel. */ if ((dksc->sc_flags & DKF_INITED)) { if ((dksc->sc_flags & DKF_VLABEL) == 0) { dksc->sc_flags |= DKF_VLABEL; dk_getdisklabel(dksc, dev); } } /* Fail if we can't find the partition. */ if (part != RAW_PART && ((dksc->sc_flags & DKF_VLABEL) == 0 || part >= lp->d_npartitions || lp->d_partitions[part].p_fstype == FS_UNUSED)) { ret = ENXIO; goto done; } /* Mark our unit as open. */ switch (fmt) { case S_IFCHR: dk->dk_copenmask |= pmask; break; case S_IFBLK: dk->dk_bopenmask |= pmask; break; } dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; done: mutex_exit(&dk->dk_openlock); return ret; }
/* ARGSUSED */ int ccdclose(dev_t dev, int flags, int fmt, struct proc *p) { int unit = ccdunit(dev); struct ccd_softc *cs; int error = 0, part; CCD_DPRINTF(CCDB_FOLLOW, ("ccdclose(%x, %x)\n", dev, flags)); if (unit >= numccd) return (ENXIO); cs = &ccd_softc[unit]; if ((error = ccdlock(cs)) != 0) return (error); part = DISKPART(dev); /* ...that much closer to allowing unconfiguration... */ switch (fmt) { case S_IFCHR: cs->sc_dkdev.dk_copenmask &= ~(1 << part); break; case S_IFBLK: cs->sc_dkdev.dk_bopenmask &= ~(1 << part); break; } cs->sc_dkdev.dk_openmask = cs->sc_dkdev.dk_copenmask | cs->sc_dkdev.dk_bopenmask; ccdunlock(cs); return (0); }
/* ARGSUSED */ int dk_close(struct dk_softc *dksc, dev_t dev, int flags, int fmt, struct lwp *l) { const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver; int part = DISKPART(dev); int pmask = 1 << part; struct disk *dk = &dksc->sc_dkdev; DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__, dksc->sc_xname, dksc, dev, flags)); mutex_enter(&dk->dk_openlock); switch (fmt) { case S_IFCHR: dk->dk_copenmask &= ~pmask; break; case S_IFBLK: dk->dk_bopenmask &= ~pmask; break; } dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; if (dk->dk_openmask == 0) { if (dkd->d_lastclose != NULL) (*dkd->d_lastclose)(dksc->sc_dev); if ((dksc->sc_flags & DKF_KLABEL) == 0) dksc->sc_flags &= ~DKF_VLABEL; } mutex_exit(&dk->dk_openlock); return 0; }
int ccdsize(dev_t dev) { struct ccd_softc *cs; int part, size, unit; unit = ccdunit(dev); if (unit >= numccd) return (-1); cs = &ccd_softc[unit]; if ((cs->sc_flags & CCDF_INITED) == 0) return (-1); if (ccdopen(dev, 0, S_IFBLK, curproc)) return (-1); part = DISKPART(dev); if (cs->sc_dkdev.dk_label->d_partitions[part].p_fstype != FS_SWAP) size = -1; else size = cs->sc_dkdev.dk_label->d_partitions[part].p_size; if (ccdclose(dev, 0, S_IFBLK, curproc)) return (-1); return (size); }
/* ARGSUSED */ int ldclose(dev_t dev, int flags, int fmt, struct proc *p) { struct ld_softc *sc; int part, unit; unit = DISKUNIT(dev); part = DISKPART(dev); sc = device_lookup(&ld_cd, unit); ldlock(sc); switch (fmt) { case S_IFCHR: sc->sc_dk.dk_copenmask &= ~(1 << part); break; case S_IFBLK: sc->sc_dk.dk_bopenmask &= ~(1 << part); break; } sc->sc_dk.dk_openmask = sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; if (sc->sc_dk.dk_openmask == 0 && sc->sc_flush != NULL) if ((*sc->sc_flush)(sc) != 0) printf("%s: unable to flush cache\n", sc->sc_dv.dv_xname); ldunlock(sc); return (0); }
int edmcasize(dev_t dev) { struct ed_softc *wd; int part, omask; int size; ATADEBUG_PRINT(("edsize\n"), DEBUG_FUNCS); wd = device_lookup_private(&ed_cd, DISKUNIT(dev)); if (wd == NULL) return (-1); part = DISKPART(dev); omask = wd->sc_dk.dk_openmask & (1 << part); if (omask == 0 && edmcaopen(dev, 0, S_IFBLK, NULL) != 0) return (-1); if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) size = -1; else size = wd->sc_dk.dk_label->d_partitions[part].p_size * (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE); if (omask == 0 && edmcaclose(dev, 0, S_IFBLK, NULL) != 0) return (-1); return (size); }
int edmcaclose(dev_t dev, int flag, int fmt, struct lwp *l) { struct ed_softc *wd = device_lookup_private(&ed_cd, DISKUNIT(dev)); int part = DISKPART(dev); ATADEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS); mutex_enter(&wd->sc_dk.dk_openlock); switch (fmt) { case S_IFCHR: wd->sc_dk.dk_copenmask &= ~(1 << part); break; case S_IFBLK: wd->sc_dk.dk_bopenmask &= ~(1 << part); break; } wd->sc_dk.dk_openmask = wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask; if (wd->sc_dk.dk_openmask == 0) { #if 0 wd_flushcache(wd, AT_WAIT); #endif /* XXXX Must wait for I/O to complete! */ if (! (wd->sc_flags & WDF_KLABEL)) wd->sc_flags &= ~WDF_LOADED; } mutex_exit(&wd->sc_dk.dk_openlock); return 0; }
/* ARGSUSED */ int dk_close(struct dk_intf *di, struct dk_softc *dksc, dev_t dev, int flags, int fmt, struct lwp *l) { int part = DISKPART(dev); int pmask = 1 << part; struct disk *dk = &dksc->sc_dkdev; DPRINTF_FOLLOW(("dk_close(%s, %p, 0x%"PRIx64", 0x%x)\n", di->di_dkname, dksc, dev, flags)); mutex_enter(&dk->dk_openlock); switch (fmt) { case S_IFCHR: dk->dk_copenmask &= ~pmask; break; case S_IFBLK: dk->dk_bopenmask &= ~pmask; break; } dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; mutex_exit(&dk->dk_openlock); return 0; }
int prestoopen(dev_t dev, int flag, int fmt, struct proc *proc) { int unit, part; struct presto_softc *sc; unit = DISKUNIT(dev); sc = (struct presto_softc *)device_lookup(&presto_cd, unit); if (sc == NULL) return (ENXIO); /* only allow valid partitions */ part = DISKPART(dev); if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions || sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) return (ENXIO); /* update open masks */ switch (fmt) { case S_IFCHR: sc->sc_dk.dk_copenmask |= (1 << part); break; case S_IFBLK: sc->sc_dk.dk_bopenmask |= (1 << part); break; } sc->sc_dk.dk_openmask = sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; return (0); }
int rdioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p) { struct rd_softc *sc; struct disklabel *lp; int error = 0; sc = rdlookup(DISKUNIT(dev)); if (sc == NULL) return (ENXIO); switch (cmd) { case DIOCRLDINFO: lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); rdgetdisklabel(dev, sc, lp, 0); bcopy(lp, sc->sc_dk.dk_label, sizeof(*lp)); free(lp, M_TEMP); goto done; case DIOCGPDINFO: rdgetdisklabel(dev, sc, (struct disklabel *)data, 1); goto done; case DIOCGDINFO: *(struct disklabel *)data = *(sc->sc_dk.dk_label); goto done; case DIOCGPART: ((struct partinfo *)data)->disklab = sc->sc_dk.dk_label; ((struct partinfo *)data)->part = &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)]; goto done; case DIOCWDINFO: case DIOCSDINFO: if ((fflag & FWRITE) == 0) { error = EBADF; goto done; } if ((error = disk_lock(&sc->sc_dk)) != 0) goto done; error = setdisklabel(sc->sc_dk.dk_label, (struct disklabel *)data, sc->sc_dk.dk_openmask); if (error == 0) { if (cmd == DIOCWDINFO) error = writedisklabel(DISKLABELDEV(dev), rdstrategy, sc->sc_dk.dk_label); } disk_unlock(&sc->sc_dk); goto done; } done: device_unref(&sc->sc_dev); return (error); }
static int cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { struct cgd_softc *cs; struct dk_softc *dksc; int part = DISKPART(dev); int pmask = 1 << part; DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n", dev, cmd, data, flag, l)); switch (cmd) { case CGDIOCGET: return cgd_ioctl_get(dev, data, l); case CGDIOCSET: case CGDIOCCLR: if ((flag & FWRITE) == 0) return EBADF; /* FALLTHROUGH */ default: GETCGD_SOFTC(cs, dev); dksc = &cs->sc_dksc; break; } switch (cmd) { case CGDIOCSET: if (DK_ATTACHED(dksc)) return EBUSY; return cgd_ioctl_set(cs, data, l); case CGDIOCCLR: if (DK_BUSY(&cs->sc_dksc, pmask)) return EBUSY; return cgd_ioctl_clr(cs, l); case DIOCCACHESYNC: /* * XXX Do we really need to care about having a writable * file descriptor here? */ if ((flag & FWRITE) == 0) return (EBADF); /* * We pass this call down to the underlying disk. */ return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred); case DIOCGSTRATEGY: case DIOCSSTRATEGY: if (!DK_ATTACHED(dksc)) return ENOENT; /*FALLTHROUGH*/ default: return dk_ioctl(dksc, dev, cmd, data, flag, l); case CGDIOCGET: KASSERT(0); return EINVAL; } }
int rlopen(dev_t dev, int flag, int fmt, struct lwp *l) { struct rl_softc * const rc = device_lookup_private(&rl_cd, DISKUNIT(dev)); struct rlc_softc *sc; int error, part, mask; struct disklabel *dl; const char *msg; /* * Make sure this is a reasonable open request. */ if (rc == NULL) return ENXIO; sc = rc->rc_rlc; part = DISKPART(dev); mutex_enter(&rc->rc_disk.dk_openlock); /* * If there are wedges, and this is not RAW_PART, then we * need to fail. */ if (rc->rc_disk.dk_nwedges != 0 && part != RAW_PART) { error = EBUSY; goto bad1; } /* Check that the disk actually is useable */ msg = rlstate(sc, rc->rc_hwid); if (msg == NULL || msg == rlstates[RLMP_UNLOAD] || msg == rlstates[RLMP_SPUNDOWN]) { error = ENXIO; goto bad1; } /* * If this is the first open; read in where on the disk we are. */ dl = rc->rc_disk.dk_label; if (rc->rc_state == DK_CLOSED) { u_int16_t mp; int maj; RL_WREG(RL_CS, RLCS_RHDR|(rc->rc_hwid << RLCS_USHFT)); waitcrdy(sc); mp = RL_RREG(RL_MP); rc->rc_head = ((mp & RLMP_HS) == RLMP_HS); rc->rc_cyl = (mp >> 7) & 0777; rc->rc_state = DK_OPEN; /* Get disk label */ maj = cdevsw_lookup_major(&rl_cdevsw); if ((msg = readdisklabel(MAKEDISKDEV(maj, device_unit(rc->rc_dev), RAW_PART), rlstrategy, dl, NULL))) aprint_normal_dev(rc->rc_dev, "%s", msg); aprint_normal_dev(rc->rc_dev, "size %d sectors\n", dl->d_secperunit); }
/* ARGSUSED */ int ccdopen(dev_t dev, int flags, int fmt, struct proc *p) { int unit = ccdunit(dev); struct ccd_softc *cs; struct disklabel *lp; int error = 0, part, pmask; CCD_DPRINTF(CCDB_FOLLOW, ("ccdopen(%x, %x)\n", dev, flags)); if (unit >= numccd) return (ENXIO); cs = &ccd_softc[unit]; if ((error = ccdlock(cs)) != 0) return (error); lp = cs->sc_dkdev.dk_label; part = DISKPART(dev); pmask = (1 << part); /* * If we're initialized, check to see if there are any other * open partitions. If not, then it's safe to update * the in-core disklabel. */ if ((cs->sc_flags & CCDF_INITED) && (cs->sc_dkdev.dk_openmask == 0)) ccdgetdisklabel(dev, cs, lp, cs->sc_dkdev.dk_cpulabel, 0); /* Check that the partition exists. */ if (part != RAW_PART) { if (((cs->sc_flags & CCDF_INITED) == 0) || ((part >= lp->d_npartitions) || (lp->d_partitions[part].p_fstype == FS_UNUSED))) { error = ENXIO; goto done; } } /* Prevent our unit from being unconfigured while open. */ switch (fmt) { case S_IFCHR: cs->sc_dkdev.dk_copenmask |= pmask; break; case S_IFBLK: cs->sc_dkdev.dk_bopenmask |= pmask; break; } cs->sc_dkdev.dk_openmask = cs->sc_dkdev.dk_copenmask | cs->sc_dkdev.dk_bopenmask; done: ccdunlock(cs); return (error); }
/* * Take a dump. */ int lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) { struct ld_softc *sc; struct disklabel *lp; int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv; static int dumping; unit = DISKUNIT(dev); if ((sc = device_lookup(&ld_cd, unit)) == NULL) return (ENXIO); if ((sc->sc_flags & LDF_ENABLED) == 0) return (ENODEV); if (sc->sc_dump == NULL) return (ENXIO); /* Check if recursive dump; if so, punt. */ if (dumping) return (EFAULT); dumping = 1; /* Convert to disk sectors. Request must be a multiple of size. */ part = DISKPART(dev); lp = sc->sc_dk.dk_label; if ((size % lp->d_secsize) != 0) return (EFAULT); towrt = size / lp->d_secsize; blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */ nsects = lp->d_partitions[part].p_size; sectoff = lp->d_partitions[part].p_offset; /* Check transfer bounds against partition size. */ if ((blkno < 0) || ((blkno + towrt) > nsects)) return (EINVAL); /* Offset block number to start of partition. */ blkno += sectoff; /* Start dumping and return when done. */ maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1; while (towrt > 0) { nblk = min(maxblkcnt, towrt); if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0) return (rv); towrt -= nblk; blkno += nblk; va += nblk * sc->sc_secsize; } dumping = 0; return (0); }
void __wdstart(struct wd_softc *wd, struct buf *bp) { struct disklabel *lp; u_int64_t nsecs; lp = wd->sc_dk.dk_label; wd->sc_wdc_bio.blkno = DL_BLKTOSEC(lp, bp->b_blkno + DL_SECTOBLK(lp, DL_GETPOFFSET(&lp->d_partitions[DISKPART(bp->b_dev)]))); wd->sc_wdc_bio.blkdone =0; wd->sc_bp = bp; /* * If we're retrying, retry in single-sector mode. This will give us * the sector number of the problem, and will eventually allow the * transfer to succeed. */ if (wd->retries >= WDIORETRIES_SINGLE) wd->sc_wdc_bio.flags = ATA_SINGLE; else wd->sc_wdc_bio.flags = 0; nsecs = howmany(bp->b_bcount, lp->d_secsize); if ((wd->sc_flags & WDF_LBA48) && /* use LBA48 only if really need */ ((wd->sc_wdc_bio.blkno + nsecs - 1 >= LBA48_THRESHOLD) || (nsecs > 0xff))) wd->sc_wdc_bio.flags |= ATA_LBA48; if (wd->sc_flags & WDF_LBA) wd->sc_wdc_bio.flags |= ATA_LBA; if (bp->b_flags & B_READ) wd->sc_wdc_bio.flags |= ATA_READ; wd->sc_wdc_bio.bcount = bp->b_bcount; wd->sc_wdc_bio.databuf = bp->b_data; wd->sc_wdc_bio.wd = wd; /* Instrumentation. */ disk_busy(&wd->sc_dk); switch (wdc_ata_bio(wd->drvp, &wd->sc_wdc_bio)) { case WDC_TRY_AGAIN: timeout_add_sec(&wd->sc_restart_timeout, 1); break; case WDC_QUEUED: break; case WDC_COMPLETE: /* * This code is never executed because we never set * the ATA_POLL flag above */ #if 0 if (wd->sc_wdc_bio.flags & ATA_POLL) wddone(wd); #endif break; default: panic("__wdstart: bad return code from wdc_ata_bio()"); } }
/* * Close the device. Only called if we are the last occurrence of an open * device. Convenient now but usually a pain. */ int sdclose(dev_t dev, int flag, int fmt, struct proc *p) { struct sd_softc *sc; int part = DISKPART(dev); int error; sc = sdlookup(DISKUNIT(dev)); if (sc == NULL) return (ENXIO); if (sc->flags & SDF_DYING) { device_unref(&sc->sc_dev); return (ENXIO); } if ((error = sdlock(sc)) != 0) { device_unref(&sc->sc_dev); return (error); } switch (fmt) { case S_IFCHR: sc->sc_dk.dk_copenmask &= ~(1 << part); break; case S_IFBLK: sc->sc_dk.dk_bopenmask &= ~(1 << part); break; } sc->sc_dk.dk_openmask = sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask; if (sc->sc_dk.dk_openmask == 0) { if ((sc->flags & SDF_DIRTY) != 0) sd_flush(sc, 0); if ((sc->sc_link->flags & SDEV_REMOVABLE) != 0) scsi_prevent(sc->sc_link, PR_ALLOW, SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_NOT_READY | SCSI_SILENT); sc->sc_link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED); if (sc->sc_link->flags & SDEV_EJECTING) { scsi_start(sc->sc_link, SSS_STOP|SSS_LOEJ, 0); sc->sc_link->flags &= ~SDEV_EJECTING; } timeout_del(&sc->sc_timeout); } sdunlock(sc); device_unref(&sc->sc_dev); return 0; }