void bufq_default_add(struct bufq *bq, struct buf *bp) { struct bufq_default *bufq = (struct bufq_default *)bq; struct proc *p = bp->b_proc; struct buf *head; if (p == NULL || p->p_nice < NZERO) head = &bufq->bufq_head[0]; else if (p->p_nice == NZERO) head = &bufq->bufq_head[1]; else head = &bufq->bufq_head[2]; disksort(head, bp); }
/* * Actually translate the requested transfer into one the physical driver can * understand. The transfer is described by a buf and will include only one * physical transfer. */ void cdstrategy(struct buf *bp) { struct cd_softc *cd; int s; if ((cd = cdlookup(DISKUNIT(bp->b_dev))) == NULL) { bp->b_error = ENXIO; goto bad; } SC_DEBUG(cd->sc_link, SDEV_DB2, ("cdstrategy: %ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno)); /* * If the device has been made invalid, error out * maybe the media changed, or no media loaded */ if ((cd->sc_link->flags & SDEV_MEDIA_LOADED) == 0) { bp->b_error = EIO; goto bad; } /* * The transfer must be a whole number of blocks. */ if ((bp->b_bcount % cd->sc_dk.dk_label->d_secsize) != 0) { bp->b_error = EINVAL; goto bad; } /* * If it's a null transfer, return immediately */ if (bp->b_bcount == 0) goto done; /* * Do bounds checking, adjust transfer. if error, process. * If end of partition, just return. */ if (bounds_check_with_label(bp, cd->sc_dk.dk_label, (cd->flags & (CDF_WLABEL|CDF_LABELLING)) != 0) <= 0) goto done; s = splbio(); /* * Place it in the queue of disk activities for this disk */ disksort(&cd->buf_queue, bp); /* * Tell the device to get going on the transfer if it's * not doing anything, otherwise just wait for completion */ cdstart(cd); device_unref(&cd->sc_dev); splx(s); return; bad: bp->b_flags |= B_ERROR; done: /* * Correctly set the buf to indicate a completed xfer */ bp->b_resid = bp->b_bcount; s = splbio(); biodone(bp); splx(s); if (cd != NULL) device_unref(&cd->sc_dev); }
/* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ void sdstrategy(struct buf *bp) { struct sd_softc *sc; int s; sc = sdlookup(DISKUNIT(bp->b_dev)); if (sc == NULL) { bp->b_error = ENXIO; goto bad; } if (sc->flags & SDF_DYING) { bp->b_error = ENXIO; goto bad; } SC_DEBUG(sc->sc_link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno)); /* * If the device has been made invalid, error out */ if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0) { if (sc->sc_link->flags & SDEV_OPEN) bp->b_error = EIO; else bp->b_error = ENODEV; goto bad; } /* * If it's a null transfer, return immediately */ if (bp->b_bcount == 0) goto done; /* * The transfer must be a whole number of sectors. */ if ((bp->b_bcount % sc->sc_dk.dk_label->d_secsize) != 0) { bp->b_error = EINVAL; goto bad; } /* * Do bounds checking, adjust transfer. if error, process. * If end of partition, just return. */ if (bounds_check_with_label(bp, sc->sc_dk.dk_label, (sc->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0) goto done; /* * Place it in the queue of disk activities for this disk */ mtx_enter(&sc->sc_buf_mtx); disksort(&sc->sc_buf_queue, bp); mtx_leave(&sc->sc_buf_mtx); /* * Tell the device to get going on the transfer if it's * not doing anything, otherwise just wait for completion */ sdstart(sc); device_unref(&sc->sc_dev); return; bad: bp->b_flags |= B_ERROR; done: /* * Correctly set the buf to indicate a completed xfer */ bp->b_resid = bp->b_bcount; s = splbio(); biodone(bp); splx(s); if (sc != NULL) device_unref(&sc->sc_dev); }
void scdisk_strategy( register io_req_t ior) { target_info_t *tgt; register scsi_softc_t *sc; register int i = ior->io_unit, part; register unsigned rec, sec, max; spl_t s; int recs_per_sec; /* * Don't do anything for a pass-through device, just acknowledge * the request. This gives us a null SCSI device. */ if (rzpassthru(i)) { ior->io_residual = 0; iodone(ior); return; } sc = scsi_softc[rzcontroller(i)]; tgt = sc->target[rzslave(i)]; part = rzpartition(i); /* * Validate request */ /* readonly ? */ if ((tgt->flags & TGT_READONLY) && (ior->io_op & (IO_READ|IO_INTERNAL) == 0)) { ior->io_error = D_READ_ONLY; ior->io_op |= IO_ERROR; ior->io_residual = ior->io_count; iodone(ior); return; } rec = ior->io_recnum; recs_per_sec = tgt->dev_info.disk.l.d_secsize / tgt->block_size; max = tgt->dev_info.disk.l.d_partitions[part].p_size; if ((int)max == -1) max = tgt->dev_info.disk.l.d_secperunit - tgt->dev_info.disk.l.d_partitions[part].p_offset; max *= recs_per_sec; i = btodb(ior->io_count + tgt->block_size - 1); if (rec >= max) { ior->io_error = D_INVALID_RECNUM; ior->io_op |= IO_ERROR; ior->io_residual = ior->io_count; iodone(ior); return; } if (((rec + i) > max) || (ior->io_count < 0) || #if later ((rec <= LABELSECTOR) && /* don't clobber the disk label */ (!(ior->io_op & IO_READ)) && ((tgt->flags & TGT_WRITE_LABEL) == 0)) #else FALSE #endif ) { ior->io_error = D_INVALID_SIZE; ior->io_op |= IO_ERROR; ior->io_residual = ior->io_count; iodone(ior); return; } /* * Find location on disk: secno and cyl (for disksort) */ rec += tgt->dev_info.disk.l.d_partitions[part].p_offset * recs_per_sec; /* * Enforce a 1-sector minimum cylinder size for disksort(). * Devices such as CD-ROMs do not define cylinder sizes. */ if (tgt->dev_info.disk.l.d_secpercyl == 0) tgt->dev_info.disk.l.d_secpercyl = 1; ior->io_residual = (rec / recs_per_sec) / tgt->dev_info.disk.l.d_secpercyl; /* * Enqueue operation */ s = splbio(); simple_lock(&tgt->target_lock); if (tgt->ior) { disksort(tgt->ior, ior); #if CHAINED_IOS if ((tgt->flags & TGT_CHAINED_IO_SUPPORT) && !(ior->io_op & IO_SGLIST)) { ior->io_seg_count = atop(round_page(ior->io_count)); if (ior->io_prev && can_chain_io_reqs(ior->io_prev, ior)) chain_io_reqs(ior->io_prev, ior, tgt->ior); if (ior->io_next && can_chain_io_reqs(ior, ior->io_next)) chain_io_reqs(ior, ior->io_next, tgt->ior); } #endif /* CHAINED_IOS */ simple_unlock(&tgt->target_lock); splx(s); } else { ior->io_next = 0; ior->io_prev = 0; tgt->ior = ior; simple_unlock(&tgt->target_lock); splx(s); scdisk_start(tgt,FALSE); } return; }
lowinit() { #if !defined(GPROF) caddr_t cp; #endif extern int dumpmag; extern int rthashsize; extern int arptab_size; extern int dk_ndrive; extern struct domain unixdomain; #ifdef INET extern struct domain inetdomain; #endif #include "imp.h" #if NIMP > 0 extern struct domain impdomain; #endif #ifdef NS extern struct domain nsdomain; #endif /* cpp messes these up for lint so put them here */ unixdomain.dom_next = domains; domains = &unixdomain; #ifdef INET inetdomain.dom_next = domains; domains = &inetdomain; #endif #if NIMP > 0 impdomain.dom_next = domains; domains = &impdomain; #endif #ifdef NS nsdomain.dom_next = domains; domains = &nsdomain; #endif dumpmag = 0; /* used only by savecore */ rthashsize = rthashsize; /* used by netstat, etc. */ arptab_size = arptab_size; /* used by arp command */ dk_ndrive = dk_ndrive; /* used by vmstat, iostat, etc. */ /* * Pseudo-uses of globals. */ lowinit(); intstack[0] = intstack[1]; maxmem = physmem = freemem = 0; u = u; fixctlrmask(); main(0); Xustray(); /* * Routines called from interrupt vectors. */ panic("Machine check"); printf("Write timeout"); consdin(); consdout(); hardclock((caddr_t)0, 0); softclock((caddr_t)0, 0); trap((unsigned)0, (unsigned)0, (unsigned)0, (unsigned)0); memerr(); /* * Miscellaneous routines called from configurable * drivers. */ disksort((struct buf *)0, (struct buf *)0); (void) uwritec((struct uio *)0); (void) todr(); if (vmemall((struct pte *)0, 0, (struct proc *)0, 0)) return; /* use value */ boothowto = 0; dumpflag = 0; dumpflag = dumpflag; #ifdef KADB bootesym = 0; bootesym = bootesym; #endif #if !defined(GPROF) cp = (caddr_t)&etext; cp = cp; #endif }
/* * Read/Write routine for a buffer. Finds the proper unit, * range checks arguments and schedules the transfer. Does not wait * for the transfer to complete. All I/O requests must be a multiple * of a sector in length. */ int hd_strategy(struct buf *bp) { u_int hdmajor = major(bp->b_dev); u_int unit = dkunit(bp->b_dev); u_int part = dkpart(bp->b_dev); struct disk *du = &(hd[hdmajor].disk[unit]); Semaphore *numbufs = &(hd[hdmajor].numbufs); struct buf *dp; struct partition *p; long maxsz, sz; #ifdef MONITOR printf("hd_strategy: Called with bp=0x%x, dev=%d, blck=%d by %s\n", bp,bp->b_dev,bp->b_blkno,myprocname(returnlink_(bp))); #endif /* Simple parameter check */ if ((unit >= MAXDISKS) || (bp->b_blkno < 0) || (part >= du->dk_lab.d_npartitions)) { printf("hd_strategy: major=%d, unit=%d, part=%d, blkno=%d, bcount=%d\n", hdmajor,unit,part,bp->b_blkno,bp->b_bcount); printf("hd: Error in hd_strategy"); bp->b_flags |= B_ERROR; goto bad; } /* Check for write protection */ if (du->dk_protected && ((bp->b_flags & B_READ) == 0)) { printf("hd_strategy: %d:%d: write protected\n",hdmajor,unit); goto bad; } if (DISKSTATE(du->dk_state) != OPEN) goto q; /* Determine size of xfer and make sure it fits. */ p = &(du->dk_lab.d_partitions[part]); maxsz = p->p_size; sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; /* XXX Check disk label writing at a later stage */ /* Check the parameters */ if ((bp->b_blkno < 0) || ((bp->b_blkno + sz) > maxsz)) { /* if exactly at end of disk, return an EOF. */ if (bp->b_blkno == maxsz) { bp->b_resid = bp->b_bcount; biodone(bp); return(0); } /* or truncate if part of it fits */ sz = maxsz - bp->b_blkno; if (sz <= 0) { printf("hd%d: invalid size %d\n",unit,sz); goto bad; } bp->b_bcount = sz << DEV_BSHIFT; } bp->b_cylin = (bp->b_blkno + p->p_offset) / du->dk_lab.d_secpercyl; q: dp = &(du->dk_queue); /* Lock buffer head, add item, free buffer head and signal */ Wait(&du->dk_guard); /* Lock */ disksort(dp, bp); /* Add */ Signal(&du->dk_guard); /* Free */ Signal(&du->dk_numq); /* Signal another buffer in this q */ Signal(numbufs); /* Signal Device Driver */ #ifdef MONITOR printf("hd_strategy: Done OK\n"); #endif return(0); bad: #ifdef MONITOR printf("hd_strategy: Done Failed\n"); #endif printf("hd_strategy: bad error\n"); bp->b_error = EINVAL; biodone(bp); return(1); }