Esempio n. 1
0
void
ieee80211_free_node(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	if (ni == ic->ic_bss)
		panic("freeing bss node");

	splassert(IPL_NET);

	DPRINTF(("%s\n", ether_sprintf(ni->ni_macaddr)));
#ifndef IEEE80211_STA_ONLY
	timeout_del(&ni->ni_eapol_to);
	timeout_del(&ni->ni_sa_query_to);
	IEEE80211_AID_CLR(ni->ni_associd, ic->ic_aid_bitmap);
#endif
	RB_REMOVE(ieee80211_tree, &ic->ic_tree, ni);
	ic->ic_nnodes--;
#ifndef IEEE80211_STA_ONLY
	if (!IF_IS_EMPTY(&ni->ni_savedq)) {
		IF_PURGE(&ni->ni_savedq);
		if (ic->ic_set_tim != NULL)
			(*ic->ic_set_tim)(ic, ni->ni_associd, 0);
	}
#endif
	(*ic->ic_node_free)(ic, ni);
	/* TBD indicate to drivers that a new node can be allocated */
}
Esempio n. 2
0
/*
 * Mark I/O complete on a buffer.
 *
 * If a callback has been requested, e.g. the pageout
 * daemon, do so. Otherwise, awaken waiting processes.
 *
 * [ Leffler, et al., says on p.247:
 *	"This routine wakes up the blocked process, frees the buffer
 *	for an asynchronous write, or, for a request by the pagedaemon
 *	process, invokes a procedure specified in the buffer structure" ]
 *
 * In real life, the pagedaemon (or other system processes) wants
 * to do async stuff to, and doesn't want the buffer brelse()'d.
 * (for swap pager, that puts swap buffers on the free lists (!!!),
 * for the vn device, that puts malloc'd buffers on the free lists!)
 *
 * Must be called at splbio().
 */
void
biodone(struct buf *bp)
{
	splassert(IPL_BIO);

	if (ISSET(bp->b_flags, B_DONE))
		panic("biodone already");
	SET(bp->b_flags, B_DONE);		/* note that it's done */

	if (LIST_FIRST(&bp->b_dep) != NULL)
		buf_complete(bp);

	if (!ISSET(bp->b_flags, B_READ)) {
		CLR(bp->b_flags, B_WRITEINPROG);
		vwakeup(bp->b_vp);
	}

	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
		CLR(bp->b_flags, B_CALL);	/* but note callout done */
		(*bp->b_iodone)(bp);
	} else {
		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
			brelse(bp);
		} else {			/* or just wakeup the buffer */
			CLR(bp->b_flags, B_WANTED);
			wakeup(bp);
		}
	}
}
Esempio n. 3
0
void
fdfinish(struct fd_softc *fd, struct buf *bp)
{
	struct fdc_softc *fdc = (void *)fd->sc_dev.dv_parent;

	splassert(IPL_BIO);

	fd->sc_skip = 0;
	fd->sc_bp = bufq_dequeue(&fd->sc_bufq);

	/*
	 * Move this drive to the end of the queue to give others a `fair'
	 * chance.  We only force a switch if N operations are completed while
	 * another drive is waiting to be serviced, since there is a long motor
	 * startup delay whenever we switch.
	 */
	if (TAILQ_NEXT(fd, sc_drivechain) != NULL && ++fd->sc_ops >= 8) {
		fd->sc_ops = 0;
		TAILQ_REMOVE(&fdc->sc_link.fdlink.sc_drives, fd, sc_drivechain);
		if (fd->sc_bp != NULL) {
			TAILQ_INSERT_TAIL(&fdc->sc_link.fdlink.sc_drives, fd,
					  sc_drivechain);
		}
	}

	biodone(bp);
	/* turn off motor 5s from now */
	timeout_add_sec(&fd->fd_motor_off_to, 5);
	fdc->sc_state = DEVIDLE;
}
Esempio n. 4
0
void
buf_put(struct buf *bp)
{
	splassert(IPL_BIO);

#ifdef DIAGNOSTIC
	if (bp->b_pobj != NULL)
		KASSERT(bp->b_bufsize > 0);
	if (ISSET(bp->b_flags, B_DELWRI))
		panic("buf_put: releasing dirty buffer");
	if (bp->b_freelist.tqe_next != NOLIST &&
	    bp->b_freelist.tqe_next != (void *)-1)
		panic("buf_put: still on the free list");
	if (bp->b_vnbufs.le_next != NOLIST &&
	    bp->b_vnbufs.le_next != (void *)-1)
		panic("buf_put: still on the vnode list");
	if (!LIST_EMPTY(&bp->b_dep))
		panic("buf_put: b_dep is not empty");
#endif

	LIST_REMOVE(bp, b_list);
	bcstats.numbufs--;

	if (buf_dealloc_mem(bp) != 0)
		return;
	pool_put(&bufpool, bp);
}
Esempio n. 5
0
/*
 * Must be called at splbio()
 */
void
buf_undirty(struct buf *bp)
{
	splassert(IPL_BIO);

	if (ISSET(bp->b_flags, B_DELWRI)) {
		CLR(bp->b_flags, B_DELWRI);
		reassignbuf(bp);
	}
}
Esempio n. 6
0
void
buf_map(struct buf *bp)
{
	vaddr_t va;

	splassert(IPL_BIO);

	if (bp->b_data == NULL) {
		unsigned long i;

		/*
		 * First, just use the pre-allocated space until we run out.
		 */
		if (buf_kva_start < buf_kva_end) {
			va = buf_kva_start;
			buf_kva_start += MAXPHYS;
			bcstats.kvaslots_avail--;
		} else {
			struct buf *vbp;

			/*
			 * Find some buffer we can steal the space from.
			 */
			while ((vbp = TAILQ_FIRST(&buf_valist)) == NULL) {
				buf_needva++;
				buf_nkvmsleep++;
				tsleep(&buf_needva, PRIBIO, "buf_needva", 0);
			}
			va = buf_unmap(vbp);
		}

		mtx_enter(&bp->b_pobj->vmobjlock);
		for (i = 0; i < atop(bp->b_bufsize); i++) {
			struct vm_page *pg = uvm_pagelookup(bp->b_pobj,
			    bp->b_poffs + ptoa(i));

			KASSERT(pg != NULL);

			pmap_kenter_pa(va + ptoa(i), VM_PAGE_TO_PHYS(pg),
			    VM_PROT_READ|VM_PROT_WRITE);
		}
		mtx_leave(&bp->b_pobj->vmobjlock);
		pmap_update(pmap_kernel());
		bp->b_data = (caddr_t)va;
	} else {
		TAILQ_REMOVE(&buf_valist, bp, b_valist);
		bcstats.kvaslots_avail--;
	}

	bcstats.busymapped++;

	CLR(bp->b_flags, B_NOTMAPPED);
}
Esempio n. 7
0
void
sd_buf_done(struct scsi_xfer *xs)
{
    struct sd_softc *sc = xs->sc_link->device_softc;
    struct buf *bp = xs->cookie;

    splassert(IPL_BIO);

    disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
                bp->b_flags & B_READ);

    switch (xs->error) {
    case XS_NOERROR:
        bp->b_error = 0;
        bp->b_resid = xs->resid;
        break;

    case XS_NO_CCB:
        /* The adapter is busy, requeue the buf and try it later. */
        sd_buf_requeue(sc, bp);
        scsi_xs_put(xs);
        SET(sc->flags, SDF_WAITING); /* break out of sdstart loop */
        timeout_add(&sc->sc_timeout, 1);
        return;

    case XS_SENSE:
    case XS_SHORTSENSE:
        if (scsi_interpret_sense(xs) != ERESTART)
            xs->retries = 0;

    /* FALLTHROUGH */
    case XS_BUSY:
    case XS_TIMEOUT:
        if (xs->retries--) {
            scsi_xs_exec(xs);
            return;
        }

    /* FALLTHROUGH */
    default:
        bp->b_error = EIO;
        bp->b_flags |= B_ERROR;
        bp->b_resid = bp->b_bcount;
        break;
    }

    biodone(bp);
    scsi_xs_put(xs);
    sdstart(sc); /* restart io */
}
Esempio n. 8
0
/*
 * Must be called at splbio()
 */
void
buf_undirty(struct buf *bp)
{
	splassert(IPL_BIO);

#ifdef DIAGNOSTIC
	if (!ISSET(bp->b_flags, B_BUSY))
		panic("Trying to undirty buffer on freelist!");
#endif
	if (ISSET(bp->b_flags, B_DELWRI)) {
		CLR(bp->b_flags, B_DELWRI);
		reassignbuf(bp);
	}
}
Esempio n. 9
0
/*
 * Get rid of a swap buffer structure which has been used in physical I/O.
 * Mostly taken from /sys/vm/swap_pager.c, except that it now uses
 * wakeup() rather than the VM-internal thread_wakeup(), and that the caller
 * must mask disk interrupts, rather than putphysbuf() itself.
 */
void
putphysbuf(struct buf *bp)
{
	splassert(IPL_BIO);

	/* XXXCDC: is this necessary? */
	if (bp->b_vp)
		brelvp(bp);

#ifdef DIAGNOSTIC
	if (bp->b_flags & B_WANTED)
		panic("putphysbuf: private buf B_WANTED");
#endif
	pool_put(&bufpool, bp);
}
Esempio n. 10
0
int
viocon_tx_drain(struct viocon_port *vp, struct virtqueue *vq)
{
	struct virtio_softc *vsc = vq->vq_owner;
	int ndone = 0, len, slot;

	splassert(IPL_TTY);
	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
		bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap,
		    vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, BUFSIZE,
		    BUS_DMASYNC_POSTREAD);
		virtio_dequeue_commit(vq, slot);
		ndone++;
	}
	return ndone;
}
Esempio n. 11
0
void
ccdintr(struct ccd_softc *cs, struct buf *bp)
{

	splassert(IPL_BIO);

	CCD_DPRINTF(CCDB_FOLLOW, ("ccdintr(%p, %p)\n", cs, bp));

	/*
	 * Request is done for better or worse, wakeup the top half.
	 */
	if (bp->b_flags & B_ERROR)
		bp->b_resid = bp->b_bcount;
	disk_unbusy(&cs->sc_dkdev, (bp->b_bcount - bp->b_resid),
	    (bp->b_flags & B_READ));
	biodone(bp);
}
Esempio n. 12
0
int
viocon_tx_intr(struct virtqueue *vq)
{
	struct virtio_softc *vsc = vq->vq_owner;
	struct viocon_softc *sc = (struct viocon_softc *)vsc->sc_child;
	int ndone = 0;
	int portidx = (vq->vq_index - 1) / 2;
	struct viocon_port *vp = sc->sc_ports[portidx];
	struct tty *tp = vp->vp_tty;

	splassert(IPL_TTY);
	ndone = viocon_tx_drain(vp, vq);
	if (ndone && ISSET(tp->t_state, TS_BUSY)) {
		CLR(tp->t_state, TS_BUSY);
		linesw[tp->t_line].l_start(tp);
	}

	return 1;
}
Esempio n. 13
0
int
rf_State_LastState(RF_RaidAccessDesc_t *desc)
{
	void (*callbackFunc) (RF_CBParam_t) = desc->callbackFunc;
	RF_CBParam_t callbackArg;

	callbackArg.p = desc->callbackArg;

	/*
	 * If this is not an async request, wake up the caller.
	 */
	if (desc->async_flag == 0)
		wakeup(desc->bp);

	/*
	 * That's all the IO for this one... Unbusy the 'disk'.
	 */

	rf_disk_unbusy(desc);

	/*
	 * Wakeup any requests waiting to go.
	 */

	RF_LOCK_MUTEX(((RF_Raid_t *) desc->raidPtr)->mutex);
	((RF_Raid_t *) desc->raidPtr)->openings++;
	RF_UNLOCK_MUTEX(((RF_Raid_t *) desc->raidPtr)->mutex);

	/* Wake up any pending I/O. */
	raidstart(((RF_Raid_t *) desc->raidPtr));

	/* printf("%s: Calling biodone on 0x%x.\n", __func__, desc->bp); */
	splassert(IPL_BIO);
	biodone(desc->bp);	/* Access came through ioctl. */

	if (callbackFunc)
		callbackFunc(callbackArg);
	rf_FreeRaidAccDesc(desc);

	return RF_FALSE;
}
Esempio n. 14
0
/*
 * cdstart looks to see if there is a buf waiting for the device
 * and that the device is not already busy. If both are true,
 * It deques the buf and creates a scsi command to perform the
 * transfer in the buf. The transfer request will call scsi_done
 * on completion, which will in turn call this routine again
 * so that the next queued transfer is performed.
 * The bufs are queued by the strategy routine (cdstrategy)
 *
 * This routine is also called after other non-queued requests
 * have been made of the scsi driver, to ensure that the queue
 * continues to be drained.
 *
 * must be called at the correct (highish) spl level
 * cdstart() is called at splbio from cdstrategy, cdrestart and scsi_done
 */
void
cdstart(void *v)
{
	struct cd_softc *cd = v;
	struct scsi_link *sc_link = cd->sc_link;
	struct buf *bp = 0;
	struct buf *dp;
	struct scsi_rw_big cmd_big;
	struct scsi_rw cmd_small;
	struct scsi_generic *cmdp;
	int blkno, nblks, cmdlen, error;
	struct partition *p;

	splassert(IPL_BIO);

	SC_DEBUG(sc_link, SDEV_DB2, ("cdstart\n"));
	/*
	 * Check if the device has room for another command
	 */
	while (sc_link->openings > 0) {
		/*
		 * there is excess capacity, but a special waits
		 * It'll need the adapter as soon as we clear out of the
		 * way and let it run (user level wait).
		 */
		if (sc_link->flags & SDEV_WAITING) {
			sc_link->flags &= ~SDEV_WAITING;
			wakeup((caddr_t)sc_link);
			return;
		}

		/*
		 * See if there is a buf with work for us to do..
		 */
		dp = &cd->buf_queue;
		if ((bp = dp->b_actf) == NULL)	/* yes, an assign */
			return;
		dp->b_actf = bp->b_actf;

		/*
		 * If the device has become invalid, abort all the
		 * reads and writes until all files have been closed and
		 * re-opened
		 */
		if ((sc_link->flags & SDEV_MEDIA_LOADED) == 0) {
			bp->b_error = EIO;
			bp->b_flags |= B_ERROR;
			bp->b_resid = bp->b_bcount;
			biodone(bp);
			continue;
		}

		/*
		 * We have a buf, now we should make a command
		 *
		 * First, translate the block to absolute and put it in terms
		 * of the logical blocksize of the device.
		 */
		blkno =
		    bp->b_blkno / (cd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
		p = &cd->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
		blkno += DL_GETPOFFSET(p);
		nblks = howmany(bp->b_bcount, cd->sc_dk.dk_label->d_secsize);

		/*
		 *  Fill out the scsi command.  If the transfer will
		 *  fit in a "small" cdb, use it.
		 */
		if (!(sc_link->flags & SDEV_ATAPI) &&
		    !(sc_link->quirks & SDEV_ONLYBIG) && 
		    ((blkno & 0x1fffff) == blkno) &&
		    ((nblks & 0xff) == nblks)) {
			/*
			 * We can fit in a small cdb.
			 */
			bzero(&cmd_small, sizeof(cmd_small));
			cmd_small.opcode = (bp->b_flags & B_READ) ?
			    READ_COMMAND : WRITE_COMMAND;
			_lto3b(blkno, cmd_small.addr);
			cmd_small.length = nblks & 0xff;
			cmdlen = sizeof(cmd_small);
			cmdp = (struct scsi_generic *)&cmd_small;
		} else {
			/*
			 * Need a large cdb.
			 */
			bzero(&cmd_big, sizeof(cmd_big));
			cmd_big.opcode = (bp->b_flags & B_READ) ?
			    READ_BIG : WRITE_BIG;
			_lto4b(blkno, cmd_big.addr);
			_lto2b(nblks, cmd_big.length);
			cmdlen = sizeof(cmd_big);
			cmdp = (struct scsi_generic *)&cmd_big;
		}

		/* Instrumentation. */
		disk_busy(&cd->sc_dk);

		/*
		 * Call the routine that chats with the adapter.
		 * Note: we cannot sleep as we may be an interrupt
		 */
		error = scsi_scsi_cmd(sc_link, cmdp, cmdlen,
		    (u_char *) bp->b_data, bp->b_bcount, SCSI_RETRIES, 30000,
		    bp, SCSI_NOSLEEP | ((bp->b_flags & B_READ) ? SCSI_DATA_IN :
		    SCSI_DATA_OUT));
		switch (error) {
		case 0:
			timeout_del(&cd->sc_timeout);
			break;
		case EAGAIN:
			/*
			 * The device can't start another i/o. Try again later.
			 */
			dp->b_actf = bp;
			disk_unbusy(&cd->sc_dk, 0, 0);
			timeout_add(&cd->sc_timeout, 1);
			return;
		default:
			disk_unbusy(&cd->sc_dk, 0, 0);
			printf("%s: not queued, error %d\n",
			    cd->sc_dev.dv_xname, error);
			break;
		}
	}
}
Esempio n. 15
0
/*
 * Force a routing table entry to the specified
 * destination to go through the given gateway.
 * Normally called as a result of a routing redirect
 * message from the network layer.
 *
 * N.B.: must be called at splsoftnet
 */
void
rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
    struct sockaddr *netmask, int flags, struct sockaddr *src,
    struct rtentry **rtp)
{
	struct rtentry		*rt;
	int			 error = 0;
	u_int32_t		*stat = NULL;
	struct rt_addrinfo	 info;
	struct ifaddr		*ifa;
	struct ifnet		*ifp = NULL;

	splassert(IPL_SOFTNET);

	/* verify the gateway is directly reachable */
	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
		error = ENETUNREACH;
		goto out;
	}
	ifp = ifa->ifa_ifp;
	rt = rtalloc1(dst, 0, 0);
	/*
	 * If the redirect isn't from our current router for this dst,
	 * it's either old or wrong.  If it redirects us to ourselves,
	 * we have a routing loop, perhaps as a result of an interface
	 * going down recently.
	 */
#define	equal(a1, a2) \
	((a1)->sa_len == (a2)->sa_len && \
	 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
	if (!(flags & RTF_DONE) && rt &&
	     (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
		error = EINVAL;
	else if (ifa_ifwithaddr(gateway) != NULL)
		error = EHOSTUNREACH;
	if (error)
		goto done;
	/*
	 * Create a new entry if we just got back a wildcard entry
	 * or the lookup failed.  This is necessary for hosts
	 * which use routing redirects generated by smart gateways
	 * to dynamically build the routing tables.
	 */
	if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
		goto create;
	/*
	 * Don't listen to the redirect if it's
	 * for a route to an interface. 
	 */
	if (rt->rt_flags & RTF_GATEWAY) {
		if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
			/*
			 * Changing from route to net => route to host.
			 * Create new route, rather than smashing route to net.
			 */
create:
			if (rt)
				rtfree(rt);
			flags |= RTF_GATEWAY | RTF_DYNAMIC;
			bzero(&info, sizeof(info));
			info.rti_info[RTAX_DST] = dst;
			info.rti_info[RTAX_GATEWAY] = gateway;
			info.rti_info[RTAX_NETMASK] = netmask;
			info.rti_ifa = ifa;
			info.rti_flags = flags;
			rt = NULL;
			error = rtrequest1(RTM_ADD, &info, &rt, 0);
			if (rt != NULL)
				flags = rt->rt_flags;
			stat = &rtstat.rts_dynamic;
		} else {
			/*
			 * Smash the current notion of the gateway to
			 * this destination.  Should check about netmask!!!
			 */
			rt->rt_flags |= RTF_MODIFIED;
			flags |= RTF_MODIFIED;
			stat = &rtstat.rts_newgateway;
			rt_setgate(rt, rt_key(rt), gateway, 0);
		}
	} else
		error = EHOSTUNREACH;
done:
	if (rt) {
		if (rtp && !error)
			*rtp = rt;
		else
			rtfree(rt);
	}
out:
	if (error)
		rtstat.rts_badredirect++;
	else if (stat != NULL)
		(*stat)++;
	bzero((caddr_t)&info, sizeof(info));
	info.rti_info[RTAX_DST] = dst;
	info.rti_info[RTAX_GATEWAY] = gateway;
	info.rti_info[RTAX_NETMASK] = netmask;
	info.rti_info[RTAX_AUTHOR] = src;
	rt_missmsg(RTM_REDIRECT, &info, flags, ifp, error, 0);
}
Esempio n. 16
0
/*
 * Called at interrupt time.
 * Mark the component as done and if all components are done,
 * take a ccd interrupt.
 */
void
ccdiodone(struct buf *vbp)
{
	struct ccdbuf *cbp = (struct ccdbuf *)vbp;
	struct buf *bp = cbp->cb_obp;
	struct ccd_softc *cs = cbp->cb_sc;
	int old_io = cbp->cb_flags & CBF_OLD;
	int i;
	long count = bp->b_bcount, off;
	char *comptype;

	splassert(IPL_BIO);

	CCD_DPRINTF(CCDB_FOLLOW, ("ccdiodone(%p)\n", cbp));
	CCD_DPRINTF(CCDB_IO, (cbp->cb_flags & CBF_MIRROR?
	    "ccdiodone: mirror component\n" : 
	    "ccdiodone: bp %p bcount %ld resid %ld\n",
	    bp, bp->b_bcount, bp->b_resid));
	CCD_DPRINTF(CCDB_IO, (" dev %x(u%d), cbp %p bn %d addr %p bcnt %ld\n",
	    vbp->b_dev, cbp->cb_comp, cbp, vbp->b_blkno,
	    vbp->b_data, vbp->b_bcount));

	if (vbp->b_flags & B_ERROR) {
		cs->sc_cinfo[cbp->cb_comp].ci_flags |= CCIF_FAILED;
		if (cbp->cb_flags & CBF_MIRROR)
			comptype = " (mirror)";
		else {
			bp->b_flags |= B_ERROR;
			bp->b_error = vbp->b_error ?
			    vbp->b_error : EIO;
			comptype = "";
		}

		printf("%s: error %d on component %d%s\n",
		    cs->sc_xname, bp->b_error, cbp->cb_comp, comptype);
	}
	cbp->cb_flags |= CBF_DONE;

	if (cbp->cb_dep &&
	    (cbp->cb_dep->cb_flags & CBF_DONE) != (cbp->cb_flags & CBF_DONE))
		return;

	if (cbp->cb_flags & CBF_MIRROR &&
	    !(cbp->cb_dep->cb_flags & CBF_MIRROR)) {
		cbp = cbp->cb_dep;
		vbp = (struct buf *)cbp;
	}

	if (!old_io) {
		/*
		 * Gather all the pieces and put them where they should be.
		 */
		for (i = 0, off = 0; i < cbp->cb_sgcnt; i++) {
			CCD_DPRINTF(CCDB_IO,
			    ("ccdiodone: sg %d (%p/%x) off %x\n", i,
			    cbp->cb_sg[i].cs_sgaddr,
			    cbp->cb_sg[i].cs_sglen, off));
			pagemove(vbp->b_data + off, cbp->cb_sg[i].cs_sgaddr,
			    round_page(cbp->cb_sg[i].cs_sglen));
			off += cbp->cb_sg[i].cs_sglen;
		}

		uvm_km_free(ccdmap, (vaddr_t)vbp->b_data, count);
		if (ccd_need_kvm) {
			ccd_need_kvm = 0;
			wakeup(ccdmap);
		}
	}
	count = vbp->b_bcount;

	putccdbuf(cbp);
	if (cbp->cb_dep)
		putccdbuf(cbp->cb_dep);

	/*
	 * If all done, "interrupt".
	 *
	 * Note that mirror component buffers aren't counted against
	 * the original I/O buffer.
	 */
	if (count > bp->b_resid)
		panic("ccdiodone: count");
	bp->b_resid -= count;
	if (bp->b_resid == 0)
		ccdintr(cs, bp);
}