Пример #1
0
/*
 * Get a block of requested size that is associated with
 * a given vnode and block offset. If it is found in the
 * block cache, mark it as having been found, make it busy
 * and return it. Otherwise, return an empty block of the
 * correct size. It is up to the caller to insure that the
 * cached blocks be of the correct size.
 */
struct buf *
getblk(register struct vnode *vp, daddr_t blkno, int size)
{
	struct buf *bp, *bh;
	int x;

	for (;;) {
		if (bp = incore(vp, blkno)) {
			x = splbio();
			if (bp->b_flags & B_BUSY) {
				bp->b_flags |= B_WANTED;
				sleep (bp, PRIBIO);
				splx(x);
				continue;
			}
			bp->b_flags |= B_BUSY | B_CACHE;
			bremfree(bp);
			if (size > bp->b_bufsize)
				panic("now what do we do?");
			/* if (bp->b_bufsize != size) allocbuf(bp, size); */
		} else {

			if((bp = getnewbuf(size)) == 0) continue;
			bp->b_blkno = bp->b_lblkno = blkno;
			bgetvp(vp, bp);
			x = splbio();
			bh = BUFHASH(vp, blkno);
			binshash(bp, bh);
			bp->b_flags = B_BUSY;
		}
		splx(x);
		return (bp);
	}
}
Пример #2
0
int
escgo(struct esc_softc *dev, struct esc_pending *pendp)
{
	int	 s;
	char	*buf;

	buf    = pendp->xs->data;

	if (escselect(dev, pendp, (char *)pendp->xs->cmd, pendp->xs->cmdlen,
		      buf, pendp->xs->datalen, ESC_SELECT_RS)) {
		/*
		 * We got the command going so the esc_pending struct is now
		 * free to reuse.
		 */

		s = splbio();
		TAILQ_INSERT_TAIL(&dev->sc_xs_free, pendp, link);
		splx(s);
	} else {
		/*
		 * We couldn't make the command fly so we have to wait. The
		 * struct MUST be inserted at the head to keep the order of
		 * the commands.
		 */

		s = splbio();
		TAILQ_INSERT_HEAD(&dev->sc_xs_pending, pendp, link);
		splx(s);
	}

	return(0);
}
Пример #3
0
static void
stpc_setup_channel(struct ata_channel *chp)
{
	struct atac_softc *atac = chp->ch_atac;
	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
	int channel = chp->ch_channel;
	struct ata_drive_datas *drvp;
	u_int32_t idedma_ctl, idetim;
	int drive, bits[2], s;

	/* setup DMA if needed */
	pciide_channel_dma_setup(cp);

	idedma_ctl = 0;
	bits[0] = bits[1] = 0x7F60; /* assume PIO2/DMA0 */

	/* Per drive settings */
	for (drive = 0; drive < 2; drive++) {
		drvp = &chp->ch_drive[drive];
		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;
		/* add timing values, setup DMA if needed */
		if ((atac->atac_cap & ATAC_CAP_DMA) &&
		    (drvp->drive_flags & ATA_DRIVE_DMA)) {
			/* use Multiword DMA */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
			splx(s);
			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
			bits[drive] = 0xe; /* IOCHRDY,wr/post,rd/prefetch */
		}
		else {
			/* PIO only */
			s = splbio();
			drvp->drive_flags &= ~(ATA_DRIVE_UDMA | ATA_DRIVE_DMA);
			splx(s);
			bits[drive] = 0x8; /* IOCHRDY */
		}
		bits[drive] |= dmatbl[drvp->DMA_mode] | piotbl[drvp->PIO_mode];
	}
#if 0
	idetim = pci_conf_read(sc->sc_pc, sc->sc_tag,
	    (channel == 0) ? 0x40 : 0x44);
	aprint_normal("wdc%d: IDETIM %08x -> %08x\n",
	    channel, idetim, (bits[1] << 16) | bits[0]);
#endif
	idetim = (bits[1] << 16) | bits[0];
	pci_conf_write(sc->sc_pc, sc->sc_tag,
	    (channel == 0) ? 0x40 : 0x44, idetim);

	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
		    idedma_ctl);
	}
}
Пример #4
0
/*
 * ata_reset_channel:
 *
 *	Reset and ATA channel.
 *
 *	MUST BE CALLED AT splbio()!
 */
void
ata_reset_channel(struct ata_channel *chp, int flags)
{
	struct atac_softc *atac = chp->ch_atac;
	int drive;

#ifdef ATA_DEBUG
	int spl1, spl2;

	spl1 = splbio();
	spl2 = splbio();
	if (spl2 != spl1) {
		printf("ata_reset_channel: not at splbio()\n");
		panic("ata_reset_channel");
	}
	splx(spl2);
	splx(spl1);
#endif /* ATA_DEBUG */

	chp->ch_queue->queue_freeze++;

	/*
	 * If we can poll or wait it's OK, otherwise wake up the
	 * kernel thread to do it for us.
	 */
	ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n",
	    flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
	if ((flags & (AT_POLL | AT_WAIT)) == 0) {
		if (chp->ch_flags & ATACH_TH_RESET) {
			/* No need to schedule a reset more than one time. */
			chp->ch_queue->queue_freeze--;
			return;
		}
		chp->ch_flags |= ATACH_TH_RESET;
		chp->ch_reset_flags = flags & (AT_RST_EMERG | AT_RST_NOCMD);
		wakeup(&chp->ch_thread);
		return;
	}

	(*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);

	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
	for (drive = 0; drive < chp->ch_ndrives; drive++)
		chp->ch_drive[drive].state = 0;

	chp->ch_flags &= ~ATACH_TH_RESET;
	if ((flags & AT_RST_EMERG) == 0)  {
		chp->ch_queue->queue_freeze--;
		atastart(chp);
	} else {
		/* make sure that we can use polled commands */
		TAILQ_INIT(&chp->ch_queue->queue_xfer);
		chp->ch_queue->queue_freeze = 0;
		chp->ch_queue->active_xfer = NULL;
	}
}
Пример #5
0
/*
 * Buffer cleaning daemon.
 */
void
buf_daemon(struct proc *p)
{
	int s;
	struct buf *bp;
	struct timeval starttime, timediff;

	cleanerproc = curproc;

	for (;;) {
		if (numdirtypages < hidirtypages) {
			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
		}

		starttime = time;
		s = splbio();
		while ((bp = TAILQ_FIRST(&bufqueues[BQ_DIRTY]))) {
			bremfree(bp);
			SET(bp->b_flags, B_BUSY);
			splx(s);

			if (ISSET(bp->b_flags, B_INVAL)) {
				brelse(bp);
				s = splbio();
				continue;
			}
#ifdef DIAGNOSTIC
			if (!ISSET(bp->b_flags, B_DELWRI))
				panic("Clean buffer on BQ_DIRTY");
#endif
			if (LIST_FIRST(&bp->b_dep) != NULL &&
			    !ISSET(bp->b_flags, B_DEFERRED) &&
			    buf_countdeps(bp, 0, 1)) {
				SET(bp->b_flags, B_DEFERRED);
				s = splbio();
				numfreepages += btoc(bp->b_bufsize);
				numdirtypages += btoc(bp->b_bufsize);
				binstailfree(bp, &bufqueues[BQ_DIRTY]);
				CLR(bp->b_flags, B_BUSY);
				continue;
			}

			bawrite(bp);

			if (numdirtypages < lodirtypages)
				break;
			/* Never allow processing to run for more than 1 sec */
			timersub(&time, &starttime, &timediff);
			if (timediff.tv_sec)
				break;

			s = splbio();
		}
	}
}
Пример #6
0
/*
 * used by specific esc controller
 */
void
esc_scsi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
 								void *arg)
{
	struct scsipi_xfer *xs;
	struct esc_softc	*dev = device_private(chan->chan_adapter->adapt_dev);
	struct scsipi_periph	*periph;
	struct esc_pending	*pendp;
	int			 flags, s, target;

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		periph = xs->xs_periph;
		flags = xs->xs_control;
		target = periph->periph_target;

		if (flags & XS_CTL_DATA_UIO)
			panic("esc: scsi data uio requested");

		if ((flags & XS_CTL_POLL) && (dev->sc_flags & ESC_ACTIVE))
			panic("esc_scsicmd: busy");

/* Get hold of a esc_pending block. */
		s = splbio();
		pendp = dev->sc_xs_free.tqh_first;
		if (pendp == NULL) {
			splx(s);
			xs->error = XS_RESOURCE_SHORTAGE;
			scsipi_done(xs);
			return;
		}
		TAILQ_REMOVE(&dev->sc_xs_free, pendp, link);
		pendp->xs = xs;
		splx(s);


/* If the chip if busy OR the unit is busy, we have to wait for out turn. */
		if ((dev->sc_flags & ESC_ACTIVE) ||
		    (dev->sc_nexus[target].flags & ESC_NF_UNIT_BUSY)) {
			s = splbio();
			TAILQ_INSERT_TAIL(&dev->sc_xs_pending, pendp, link);
			splx(s);
		} else
			esc_donextcmd(dev, pendp);

		return;
	case ADAPTER_REQ_GROW_RESOURCES:
	case ADAPTER_REQ_SET_XFER_MODE:
		/* XXX Not supported. */
		return;
	}

}
Пример #7
0
/*
 * Actually select the unit, whereby the whole scsi-process is started.
 */
void
esc_donextcmd(struct esc_softc *dev, struct esc_pending *pendp)
{
	int	s;

/*
 * Special case for scsi unit reset. I think this is waterproof. We first
 * select the unit during splbio. We then cycle through the generated
 * interrupts until the interrupt routine signals that the unit has
 * acknowledged the reset. After that we have to wait a reset to select
 * delay before anything else can happend.
 */
	if (pendp->xs->xs_control & XS_CTL_RESET) {
		struct nexus	*nexus;

		s = splbio();
		while(!escselect(dev, pendp, 0, 0, 0, 0, ESC_SELECT_K)) {
			splx(s);
			delay(10);
			s = splbio();
		}

		nexus = dev->sc_cur_nexus;
		while(nexus->flags & ESC_NF_UNIT_BUSY) {
			esciwait(dev);
			escintr(dev);
		}

		nexus->flags |= ESC_NF_UNIT_BUSY;
		splx(s);

		escreset(dev, 0);

		s = splbio();
		nexus->flags &= ~ESC_NF_UNIT_BUSY;
		splx(s);
	}

/*
 * If we are polling, go to splbio and perform the command, else we poke
 * the scsi-bus via escgo to get the interrupt machine going.
 */
	if (pendp->xs->xs_control & XS_CTL_POLL) {
		s = splbio();
		escicmd(dev, pendp);
		TAILQ_INSERT_TAIL(&dev->sc_xs_free, pendp, link);
		splx(s);
	} else {
		escgo(dev, pendp);
		return;
	}
}
Пример #8
0
/*
 * Read/write routine for a buffer.  Validates the arguments and schedules the
 * transfer.  Does not wait for the transfer to complete.
 */
void
wdstrategy(struct buf *bp)
{
	struct wd_softc *wd;
	int s;

	wd = wdlookup(DISKUNIT(bp->b_dev));
	if (wd == NULL) {
		bp->b_error = ENXIO;
		goto bad;
	}

	WDCDEBUG_PRINT(("wdstrategy (%s)\n", wd->sc_dev.dv_xname),
	    DEBUG_XFERS);

	/* If device invalidated (e.g. media change, door open), error. */
	if ((wd->sc_flags & WDF_LOADED) == 0) {
		bp->b_error = EIO;
		goto bad;
	}

	/* Validate the request. */
	if (bounds_check_with_label(bp, wd->sc_dk.dk_label) == -1)
		goto done;

	/* Check that the number of sectors can fit in a byte. */
	if ((bp->b_bcount / wd->sc_dk.dk_label->d_secsize) >= (1 << NBBY)) {
		bp->b_error = EINVAL;
		goto bad;
	}

	/* Queue transfer on drive, activate drive and controller if idle. */
	bufq_queue(&wd->sc_bufq, bp);
	s = splbio();
	wdstart(wd);
	splx(s);
	device_unref(&wd->sc_dev);
	return;

 bad:
	bp->b_flags |= B_ERROR;
	bp->b_resid = bp->b_bcount;
 done:
	s = splbio();
	biodone(bp);
	splx(s);
	if (wd != NULL)
		device_unref(&wd->sc_dev);
}
Пример #9
0
/*
 * Queue a transfer request, and if possible, hand it to the controller.
 */
void
rastrategy(struct buf *bp)
{
	struct ra_softc *ra = mscp_device_lookup(bp->b_dev);
	int b;

	/*
	 * Make sure this is a reasonable drive to use.
	 */
	if (ra == NULL) {
		bp->b_error = ENXIO;
		goto done;
	}
	/*
	 * If drive is open `raw' or reading label, let it at it.
	 */
	if (ra->ra_state == DK_RDLABEL) {
	        /* Make some statistics... /bqt */
	        b = splbio();
	        disk_busy(&ra->ra_disk);
		splx(b);
		mscp_strategy(bp, device_parent(ra->ra_dev));
		return;
	}

	/* If disk is not online, try to put it online */
	if (ra->ra_state == DK_CLOSED)
		if (ra_putonline(bp->b_dev, ra) == MSCP_FAILED) {
			bp->b_error = EIO;
			goto done;
		}

	/*
	 * Determine the size of the transfer, and make sure it is
	 * within the boundaries of the partition.
	 */
	if (bounds_check_with_label(&ra->ra_disk, bp, ra->ra_wlabel) <= 0)
		goto done;

	/* Make some statistics... /bqt */
	b = splbio();
	disk_busy(&ra->ra_disk);
	splx(b);
	mscp_strategy(bp, device_parent(ra->ra_dev));
	return;

done:
	biodone(bp);
}
Пример #10
0
void
mcdstart(struct mcd_softc *sc)
{
	struct buf *bp;
	int s;

loop:
	s = splbio();

	if ((bp = bufq_get(sc->buf_queue)) == NULL) {
		/* Nothing to do. */
		sc->active = 0;
		splx(s);
		return;
	}

	/* Block found to process. */
	MCD_TRACE("start: found block bp=0x%p\n", bp);
	splx(s);

	/* Changed media? */
	if ((sc->flags & MCDF_LOADED) == 0) {
		MCD_TRACE("start: drive not valid%s", "\n");
		bp->b_error = EIO;
		biodone(bp);
		goto loop;
	}

	sc->active = 1;

	/* Instrumentation. */
	s = splbio();
	disk_busy(&sc->sc_dk);
	splx(s);

	sc->mbx.retry = MCD_RDRETRIES;
	sc->mbx.bp = bp;
	sc->mbx.blkno = bp->b_rawblkno;
	sc->mbx.nblk = bp->b_bcount / sc->blksize;
	sc->mbx.sz = sc->blksize;
	sc->mbx.skip = 0;
	sc->mbx.state = MCD_S_BEGIN;
	sc->mbx.mode = MCD_MD_COOKED;

	s = splbio();
	(void) mcdintr(sc);
	splx(s);
}
Пример #11
0
void
ccdstrategy(struct buf *bp)
{
	int unit = ccdunit(bp->b_dev);
	struct ccd_softc *cs = &ccd_softc[unit];
	int s;
	int wlabel;
	struct disklabel *lp;

	CCD_DPRINTF(CCDB_FOLLOW, ("ccdstrategy(%p): unit %d\n", bp, unit));

	if ((cs->sc_flags & CCDF_INITED) == 0) {
		bp->b_error = ENXIO;
		bp->b_resid = bp->b_bcount;
		bp->b_flags |= B_ERROR;
		goto done;
	}

	/* If it's a nil transfer, wake up the top half now. */
	if (bp->b_bcount == 0)
		goto done;

	lp = cs->sc_dkdev.dk_label;

	/*
	 * Do bounds checking and adjust transfer.  If there's an
	 * error, the bounds check will flag that for us.
	 */
	wlabel = cs->sc_flags & (CCDF_WLABEL|CCDF_LABELLING);
	if (DISKPART(bp->b_dev) != RAW_PART &&
	    bounds_check_with_label(bp, lp, cs->sc_dkdev.dk_cpulabel,
	    wlabel) <= 0)
		goto done;

	bp->b_resid = bp->b_bcount;

	/*
	 * "Start" the unit.
	 */
	s = splbio();
	ccdstart(cs, bp);
	splx(s);
	return;
done:
	s = splbio();
	biodone(bp);
	splx(s);
}
Пример #12
0
/*
 * Pass I/O requests to the memory filesystem process.
 */
int
mfs_strategy(void *v)
{
	struct vop_strategy_args *ap = v;
	struct buf *bp = ap->a_bp;
	struct mfsnode *mfsp;
	struct vnode *vp;
	struct proc *p = curproc;
	int s;

	if (!vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0)
		panic("mfs_strategy: bad dev");

	mfsp = VTOMFS(vp);
	if (p != NULL && mfsp->mfs_pid == p->p_pid) {
		mfs_doio(mfsp, bp);
	} else {
		s = splbio();
		bp->b_actf = mfsp->mfs_buflist;
		mfsp->mfs_buflist = bp;
		splx(s);
		wakeup((caddr_t)vp);
	}
	return (0);
}
Пример #13
0
/*
 * Strategy function for the device.
 */
static void
icapstrategy(struct buf *bp)
{
	struct icap_softc *sc;
	int s;

	DEBUG_PRINT(("icapstrategy\n"), DEBUG_FUNCS);

	/* We did nothing lest we did */
	bp->b_resid = bp->b_bcount;

	/* Do we know you.  */
	sc = device_lookup_private(&icap_cd, minor(bp->b_dev));
	if (sc == NULL) {
		DEBUG_PRINT(("icapstrategy: nodev %" PRIx64 "\n",bp->b_dev),
		    DEBUG_ERRORS);
		bp->b_error = ENXIO;
		biodone(bp);
		return;
	    }

	/* Add to Q. If Q was empty get it started */
	s = splbio();
	bufq_put(sc->sc_buflist, bp);
	if (bufq_peek(sc->sc_buflist) == bp) {
		icapstart(sc);
	}
	splx(s);
}
Пример #14
0
/*
 * Delayed write.
 *
 * The buffer is marked dirty, but is not queued for I/O.
 * This routine should be used when the buffer is expected
 * to be modified again soon, typically a small write that
 * partially fills a buffer.
 *
 * NB: magnetic tapes cannot be delayed; they must be
 * written in the order that the writes are requested.
 *
 * Described in Leffler, et al. (pp. 208-213).
 */
void
bdwrite(struct buf *bp)
{
	int s;

	/*
	 * If the block hasn't been seen before:
	 *	(1) Mark it as having been seen,
	 *	(2) Charge for the write.
	 *	(3) Make sure it's on its vnode's correct block list,
	 *	(4) If a buffer is rewritten, move it to end of dirty list
	 */
	if (!ISSET(bp->b_flags, B_DELWRI)) {
		SET(bp->b_flags, B_DELWRI);
		s = splbio();
		reassignbuf(bp);
		splx(s);
		curproc->p_stats->p_ru.ru_oublock++;	/* XXX */
	}

	/* If this is a tape block, write the block now. */
	if (major(bp->b_dev) < nblkdev &&
	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
		bawrite(bp);
		return;
	}

	/* Otherwise, the "write" is done, so mark and release the buffer. */
	CLR(bp->b_flags, B_NEEDCOMMIT);
	SET(bp->b_flags, B_DONE);
	brelse(bp);
}
Пример #15
0
Файл: ips.c Проект: MarginC/kame
/* clean up so we can unload the driver. */
int ips_adapter_free(ips_softc_t *sc)
{
	int error = 0;
	intrmask_t mask;
	if(sc->state & IPS_DEV_OPEN)
		return EBUSY;
	if((error = ips_diskdev_free(sc)))
		return error;
	if(ips_cmdqueue_free(sc)){
		device_printf(sc->dev,
		     "trying to exit when command queue is not empty!\n");
		return EBUSY;
	}
	DEVICE_PRINTF(1, sc->dev, "free\n");
	mask = splbio();
	untimeout(ips_timeout, sc, sc->timer);
	splx(mask);
	if (mtx_initialized(&sc->cmd_mtx))
		mtx_destroy(&sc->cmd_mtx);

	if(sc->sg_dmatag)
		bus_dma_tag_destroy(sc->sg_dmatag);
	if(sc->command_dmatag)
		bus_dma_tag_destroy(sc->command_dmatag);
	if(sc->device_file)
	        destroy_dev(sc->device_file);
        return 0;
}
Пример #16
0
struct twe_ccb *
twe_ccb_alloc_wait(struct twe_softc *sc, int flags)
{
	struct twe_ccb *ccb;
	int s;

	KASSERT((flags & TWE_CCB_AEN) == 0);

	s = splbio();
	while (__predict_false((ccb =
				SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
		sc->sc_flags |= TWEF_WAIT_CCB;
		(void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0);
	}
	SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
#ifdef DIAGNOSTIC
	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
		panic("twe_ccb_alloc_wait: CCB %ld already allocated",
		    (long)(ccb - sc->sc_ccbs));
	flags |= TWE_CCB_ALLOCED;
#endif
	splx(s);

	twe_ccb_init(sc, ccb, flags);
	return (ccb);
}
Пример #17
0
/* XXX: should be rewritten using bus_space_read_region? */
static void
z3rambd_altmem_strategy(void *aux, struct buf *bp)
{
	struct z3rambd_softc *sc = aux;
	void *addr;
	size_t off, bpos;
	int s;

	bpos = 0;

	bp->b_resid = bp->b_bcount;
	off = bp->b_blkno << DEV_BSHIFT;

	s = splbio();

	addr = (char *)((char*)sc->sc_va + off);
#ifdef Z3RAMBD_DEBUG
	aprint_normal_dev(sc->sc_dev,"stratetgy at %x %x\n", (bus_addr_t) addr,
	    (bus_addr_t) kvtop(addr));
#endif /* Z3RAMBD_DEBUG */

	if (bp->b_flags & B_READ)
		memcpy((char *)bp->b_data, addr, bp->b_resid);
	else
		memcpy(addr, (char *)bp->b_data, bp->b_resid);

	splx(s);
}
Пример #18
0
/********************************************************************************
 * Disconnect from the controller completely, in preparation for unload.
 */
static int
twe_detach(device_t dev)
{
    struct twe_softc	*sc = device_get_softc(dev);
    int			s, error;

    debug_called(4);

    error = EBUSY;
    s = splbio();
    if (sc->twe_state & TWE_STATE_OPEN)
	goto out;

    /*	
     * Shut the controller down.
     */
    if ((error = twe_shutdown(dev)))
	goto out;

    twe_free(sc);

    error = 0;
 out:
    splx(s);
    return(error);
}
Пример #19
0
/*
 * Patiently await operations to complete on this buffer.
 * When they do, extract error value and return it.
 * Extract and return any errors associated with the I/O.
 * If an invalid block, force it off the lookup hash chains.
 */
int
biowait(register struct buf *bp)
{
	int x;

	x = splbio();
	while ((bp->b_flags & B_DONE) == 0)
		sleep((caddr_t)bp, PRIBIO);
	if((bp->b_flags & B_ERROR) || bp->b_error) {
		if ((bp->b_flags & B_INVAL) == 0) {
			bp->b_flags |= B_INVAL;
			bremhash(bp);
			binshash(bp, bfreelist + BQ_AGE);
		}
		if (!bp->b_error)
			bp->b_error = EIO;
		else
			bp->b_flags |= B_ERROR;
		splx(x);
		return (bp->b_error);
	} else {
		splx(x);
		return (0);
	}
}
Пример #20
0
Файл: ips.c Проект: MarginC/kame
static int ips_add_waiting_command(ips_softc_t *sc, int (*callback)(ips_command_t *), void *data, unsigned long flags)
{
	intrmask_t mask;
	ips_command_t *command;
	ips_wait_list_t *waiter;
	unsigned long memflags = 0;
	if(IPS_NOWAIT_FLAG & flags)
		memflags = M_NOWAIT;
	waiter = malloc(sizeof(ips_wait_list_t), M_DEVBUF, memflags);
	if(!waiter)
		return ENOMEM;
	mask = splbio();
	if(sc->state & IPS_OFFLINE){
		splx(mask);
		return EIO;
	}
	command = SLIST_FIRST(&sc->free_cmd_list);
	if(command && !(sc->state & IPS_TIMEOUT)){
		SLIST_REMOVE_HEAD(&sc->free_cmd_list, next);
		(sc->used_commands)++;
		splx(mask);
		clear_ips_command(command);
		bzero(command->command_buffer, IPS_COMMAND_LEN);
		free(waiter, M_DEVBUF);
		command->arg = data;
		return callback(command);
	}
	DEVICE_PRINTF(1, sc->dev, "adding command to the wait queue\n"); 
	waiter->callback = callback; 
	waiter->data = data;
	STAILQ_INSERT_TAIL(&sc->cmd_wait_list, waiter, next);
	splx(mask);
	return 0;
}
Пример #21
0
Файл: ips.c Проект: MarginC/kame
static void ips_run_waiting_command(ips_softc_t *sc)
{
	ips_wait_list_t *waiter;
	ips_command_t	*command;
	int (*callback)(ips_command_t*);
	intrmask_t mask;

	mask = splbio();
	waiter = STAILQ_FIRST(&sc->cmd_wait_list);
	command = SLIST_FIRST(&sc->free_cmd_list);
	if(!waiter || !command){
		splx(mask);
		return;
	}
	DEVICE_PRINTF(1, sc->dev, "removing command from wait queue\n");
	SLIST_REMOVE_HEAD(&sc->free_cmd_list, next);
	STAILQ_REMOVE_HEAD(&sc->cmd_wait_list, next);
	(sc->used_commands)++;
	splx(mask);
	clear_ips_command(command);
	bzero(command->command_buffer, IPS_COMMAND_LEN);
	command->arg = waiter->data;
	callback = waiter->callback;
	free(waiter, M_DEVBUF);
	callback(command);
	return;	
}
Пример #22
0
Файл: ips.c Проект: MarginC/kame
/* returns a free command struct if one is available. 
 * It also blanks out anything that may be a wild pointer/value.
 * Also, command buffers are not freed.  They are
 * small so they are saved and kept dmamapped and loaded.
 */
int ips_get_free_cmd(ips_softc_t *sc, int (*callback)(ips_command_t *), void *data, unsigned long flags)
{
	intrmask_t mask;
	ips_command_t *command;
	mask = splbio();

	if(sc->state & IPS_OFFLINE){
		splx(mask);
		return EIO;
	}
	command = SLIST_FIRST(&sc->free_cmd_list);
	if(!command || (sc->state & IPS_TIMEOUT)){
		splx(mask);
		if(flags & IPS_NOWAIT_FLAG)
			return EAGAIN;
		return ips_add_waiting_command(sc, callback, data, flags);
	}
	SLIST_REMOVE_HEAD(&sc->free_cmd_list, next);
	(sc->used_commands)++;
	splx(mask);
	clear_ips_command(command);
	bzero(command->command_buffer, IPS_COMMAND_LEN);
	command->arg = data;
	return callback(command);
}
Пример #23
0
static int
hcsc_pdma_in(struct ncr5380_softc *ncr_sc, int phase, int datalen,
    uint8_t *data)
{
	struct hcsc_softc *sc = (struct hcsc_softc *)ncr_sc;
	bus_space_tag_t pdmat = sc->sc_pdmat;
	bus_space_handle_t pdmah = sc->sc_pdmah;
	int s, resid, len;

	s = splbio();

	NCR5380_WRITE(ncr_sc, sci_mode,
	    NCR5380_READ(ncr_sc, sci_mode) | SCI_MODE_DMA);
	NCR5380_WRITE(ncr_sc, sci_irecv, 0);

	resid = datalen;
	while (resid > 0) {
		len = min(resid, HCSC_TSIZE_IN);
		if (hcsc_ready(ncr_sc) == 0)
			goto interrupt;
		bus_space_read_multi_1(pdmat, pdmah, 0, data, len);
		data += len;
		resid -= len;
	}

	hcsc_wait_not_req(ncr_sc);

interrupt:
	SCI_CLR_INTR(ncr_sc);
	NCR5380_WRITE(ncr_sc, sci_mode,
	    NCR5380_READ(ncr_sc, sci_mode) & ~SCI_MODE_DMA);
	splx(s);
	return datalen - resid;
}
Пример #24
0
/********************************************************************************
 * Bring the controller down to a dormant state and detach all child devices.
 *
 * Note that we can assume that the bioq on the controller is empty, as we won't
 * allow shutdown if any device is open.
 */
static int
twe_shutdown(device_t dev)
{
    struct twe_softc	*sc = device_get_softc(dev);
    int			i, s, error;

    debug_called(4);

    s = splbio();
    error = 0;

    /* 
     * Delete all our child devices.
     */
    for (i = 0; i < TWE_MAX_UNITS; i++) {
	if (sc->twe_drive[i].td_disk != 0) {
	    if ((error = device_delete_child(sc->twe_dev, sc->twe_drive[i].td_disk)) != 0)
		goto out;
	    sc->twe_drive[i].td_disk = 0;
	}
    }

    /*
     * Bring the controller down.
     */
    twe_deinit(sc);

 out:
    splx(s);
    return(error);
}
Пример #25
0
/*
 * The syncer vnode is no longer needed and is being decommissioned.
 */
int
sync_inactive(void *v)
{
	struct vop_inactive_args *ap = v;

	struct vnode *vp = ap->a_vp;
	int s;

	if (vp->v_usecount == 0) {
		VOP_UNLOCK(vp, 0, ap->a_p);
		return (0);
	}

	vp->v_mount->mnt_syncer = NULL;

	s = splbio();

	LIST_REMOVE(vp, v_synclist);
	vp->v_bioflag &= ~VBIOONSYNCLIST;

	splx(s);

	vp->v_writecount = 0;
	vput(vp);

	return (0);
}
Пример #26
0
/*
 * Configure.  This should be the first thing that the SPI driver
 * should do, to configure which mode (e.g. SPI_MODE_0, which is the
 * same as Philips Microwire mode), and speed.  If the bus driver
 * cannot run fast enough, then it should just configure the fastest
 * mode that it can support.  If the bus driver cannot run slow
 * enough, then the device is incompatible and an error should be
 * returned.
 */
int
spi_configure(struct spi_handle *sh, int mode, int speed)
{
	int			s, rv;
	struct spi_softc	*sc = sh->sh_sc;
	struct spi_controller	*tag = sh->sh_controller;

	/* ensure that request is compatible with other devices on the bus */
	if ((sc->sc_mode >= 0) && (sc->sc_mode != mode))
		return EINVAL;

	s = splbio();
	/* pick lowest configured speed */
	if (speed == 0)
		speed = sc->sc_speed;
	if (sc->sc_speed)
		speed = min(sc->sc_speed, speed);

	rv = (*tag->sct_configure)(tag->sct_cookie, sh->sh_slave,
	    mode, speed);

	if (rv == 0) {
		sc->sc_mode = mode;
		sc->sc_speed = speed;
	}
	splx(s);
	return rv;
}
Пример #27
0
struct twe_ccb *
twe_ccb_alloc(struct twe_softc *sc, int flags)
{
	struct twe_ccb *ccb;
	int s;

	s = splbio();
	if (__predict_false((flags & TWE_CCB_AEN) != 0)) {
		/* Use the reserved CCB. */
		ccb = sc->sc_ccbs;
	} else {
		/* Allocate a CCB and command block. */
		if (__predict_false((ccb =
				SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
			splx(s);
			return (NULL);
		}
		SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
	}
#ifdef DIAGNOSTIC
	if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0)
		panic("twe_ccb_alloc: got reserved CCB for non-AEN");
	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
		panic("twe_ccb_alloc: CCB %ld already allocated",
		    (long)(ccb - sc->sc_ccbs));
	flags |= TWE_CCB_ALLOCED;
#endif
	splx(s);

	twe_ccb_init(sc, ccb, flags);
	return (ccb);
}
Пример #28
0
static void
maru_shutdown_sc(struct maru_softc *sc)
{
    int n;
    int s;
    DB("maru_shutdown_sc(%p)\n", sc);
    s = splbio();
    if (sc->sc_flags&MUF_INITED)
    {
       disk_detach(&sc->sc_dkdev);
	    sc->sc_flags &= ~MUF_INITED;
 	   if (sc->sc_kapi)
    	    sc->sc_kapi->ka_shutdown(sc->sc_kapi);
    if (maru_tokens)
	{
	    for (n=0; n<NUM_TOKENS; n++)
		{
		    struct maru_token *tok = &maru_tokens[n];
		    if (tok->qt_sc == sc)
			{
			    if (tok->qt_bp)
				maru_berror(tok->qt_bp, ENXIO);
			    maru_release_token(tok);
			}
		}
	    if (maru_tokens_active == 0)
		{
		    free(maru_tokens, M_DEVBUF);
		    maru_tokens = NULL;
		}
	}
}
    splx(s);
}
Пример #29
0
Файл: mcd.c Проект: MarginC/kame
static void
mcd_start(struct mcd_softc *sc)
{
	struct bio *bp;
	int s = splbio();

	if (sc->data.flags & MCDMBXBSY) {
		splx(s);
		return;
	}

	bp = bioq_first(&sc->data.head);
	if (bp != 0) {
		/* block found to process, dequeue */
		/*MCD_TRACE("mcd_start: found block bp=0x%x\n",bp,0,0,0);*/
		bioq_remove(&sc->data.head, bp);
		sc->data.flags |= MCDMBXBSY;
		splx(s);
	} else {
		/* nothing to do */
		splx(s);
		return;
	}

	sc->data.mbx.retry = MCD_RETRYS;
	sc->data.mbx.bp = bp;

	mcd_doread(sc, MCD_S_BEGIN,&(sc->data.mbx));
	return;
}
Пример #30
0
Файл: ips.c Проект: MarginC/kame
void ips_issue_copperhead_cmd(ips_command_t *command)
{
	int i;
	intrmask_t mask = splbio();
	/* hmmm, is there a cleaner way to do this? */
	if(command->sc->state & IPS_OFFLINE){
		splx(mask);
		command->status.value = IPS_ERROR_STATUS;
		command->callback(command);
		return;
	}
	command->timeout = 10;
	for(i = 0; ips_read_4(command->sc, COPPER_REG_CCCR) & COPPER_SEM_BIT;
	    i++ ){
		if( i == 20){
printf("sem bit still set, can't send a command\n");
			splx(mask);
			return;
		}
		DELAY(500);/* need to do a delay here */
	}
	ips_write_4(command->sc, COPPER_REG_CCSAR, command->command_phys_addr);
	ips_write_2(command->sc, COPPER_REG_CCCR, COPPER_CMD_START);
	splx(mask);
}