static int
pcscp_dma_intr(struct ncr53c9x_softc *sc)
{
	struct pcscp_softc *esc = (struct pcscp_softc *)sc;
	int trans, resid, i;
	bus_dmamap_t dmap = esc->sc_xfermap;
	int datain = esc->sc_datain;
	uint32_t dmastat;
	uint8_t *p = NULL;

	dmastat = READ_DMAREG(esc, DMA_STAT);

	if (dmastat & DMASTAT_ERR) {
		/* XXX not tested... */
		WRITE_DMAREG(esc, DMA_CMD,
		    DMACMD_ABORT | (datain ? DMACMD_DIR : 0));

		printf("%s: error: DMA error detected; Aborting.\n",
		    device_xname(sc->sc_dev));
		bus_dmamap_unload(esc->sc_dmat, dmap);
		return -1;
	}

	if (dmastat & DMASTAT_ABT) {
		/* XXX What should be done? */
		printf("%s: %s: DMA aborted.\n",
		    device_xname(sc->sc_dev), __func__);
		WRITE_DMAREG(esc, DMA_CMD,
		    DMACMD_IDLE | (datain ? DMACMD_DIR : 0));
		esc->sc_active = 0;
		return 0;
	}

#ifdef DIAGNOSTIC
	/* This is an "assertion" :) */
	if (esc->sc_active == 0)
		panic("%s: %s: DMA wasn't active",
		    device_xname(sc->sc_dev), __func__);
#endif

	/* DMA has stopped */

	esc->sc_active = 0;

	if (esc->sc_dmasize == 0) {
		/* A "Transfer Pad" operation completed */
		NCR_DMA(("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
		    __func__,
		    PCSCP_READ_REG(esc, NCR_TCL) |
		    (PCSCP_READ_REG(esc, NCR_TCM) << 8),
		    PCSCP_READ_REG(esc, NCR_TCL),
		    PCSCP_READ_REG(esc, NCR_TCM)));
		return 0;
	}

	resid = 0;
	/*
	 * If a transfer onto the SCSI bus gets interrupted by the device
	 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
	 * as residual since the ESP counter registers get decremented as
	 * bytes are clocked into the FIFO.
	 */
	if (!datain &&
	    (resid = (PCSCP_READ_REG(esc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
		NCR_DMA(("%s: empty esp FIFO of %d ", __func__, resid));
	}

	if ((sc->sc_espstat & NCRSTAT_TC) == 0) {
		/*
		 * `Terminal count' is off, so read the residue
		 * out of the ESP counter registers.
		 */
		if (datain) {
			resid = PCSCP_READ_REG(esc, NCR_FFLAG) & NCRFIFO_FF;
			while (resid > 1)
				resid =
				    PCSCP_READ_REG(esc, NCR_FFLAG) & NCRFIFO_FF;
			WRITE_DMAREG(esc, DMA_CMD, DMACMD_BLAST | DMACMD_MDL |
			    (datain ? DMACMD_DIR : 0));

			for (i = 0; i < 1000; i++) { /* XXX */
				if (READ_DMAREG(esc, DMA_STAT) & DMASTAT_BCMP)
					break;
				DELAY(1);
			}

			/* See the below comments... */
			if (resid)
				p = *esc->sc_dmaaddr;
		}

		resid += PCSCP_READ_REG(esc, NCR_TCL) |
		    (PCSCP_READ_REG(esc, NCR_TCM) << 8) |
		    (PCSCP_READ_REG(esc, NCR_TCH) << 16);
	} else {
		while ((dmastat & DMASTAT_DONE) == 0)
			dmastat = READ_DMAREG(esc, DMA_STAT);
	}

	WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain ? DMACMD_DIR : 0));

	/* sync MDL */
	bus_dmamap_sync(esc->sc_dmat, esc->sc_mdldmap,
	    0, sizeof(uint32_t) * dmap->dm_nsegs, BUS_DMASYNC_POSTWRITE);
	/* sync transfer buffer */
	bus_dmamap_sync(esc->sc_dmat, dmap, 0, dmap->dm_mapsize,
	    datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
	bus_dmamap_unload(esc->sc_dmat, dmap);

	trans = esc->sc_dmasize - resid;

	/*
	 * From the technical manual notes:
	 *
	 * `In some odd byte conditions, one residual byte will be left
	 *  in the SCSI FIFO, and the FIFO flags will never count to 0.
	 *  When this happens, the residual byte should be retrieved
	 *  via PIO following completion of the BLAST operation.'
	 */

	if (p) {
		p += trans;
		*p = PCSCP_READ_REG(esc, NCR_FIFO);
		trans++;
	}

	if (trans < 0) {			/* transferred < 0 ? */
#if 0
		/*
		 * This situation can happen in perfectly normal operation
		 * if the ESP is reselected while using DMA to select
		 * another target.  As such, don't print the warning.
		 */
		printf("%s: xfer (%d) > req (%d)\n",
		    device_xname(sc->sc_dev), trans, esc->sc_dmasize);
#endif
		trans = esc->sc_dmasize;
	}

	NCR_DMA(("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
	    __func__,
	    PCSCP_READ_REG(esc, NCR_TCL),
	    PCSCP_READ_REG(esc, NCR_TCM),
	    PCSCP_READ_REG(esc, NCR_TCH),
	    trans, resid));

	*esc->sc_dmalen -= trans;
	*esc->sc_dmaaddr += trans;

	return 0;
}
Example #2
0
/*
 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 * pointers to the fragment pointers.
 */
static int
kr_encap(struct kr_softc *sc, struct mbuf **m_head)
{
	struct kr_txdesc	*txd;
	struct kr_desc		*desc, *prev_desc;
	bus_dma_segment_t	txsegs[KR_MAXFRAGS];
	uint32_t		link_addr;
	int			error, i, nsegs, prod, si, prev_prod;

	KR_LOCK_ASSERT(sc);

	prod = sc->kr_cdata.kr_tx_prod;
	txd = &sc->kr_cdata.kr_txdesc[prod];
	error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
	if (error == EFBIG) {
		panic("EFBIG");
	} else if (error != 0)
		return (error);
	if (nsegs == 0) {
		m_freem(*m_head);
		*m_head = NULL;
		return (EIO);
	}

	/* Check number of available descriptors. */
	if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) {
		bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
		return (ENOBUFS);
	}

	txd->tx_m = *m_head;
	bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
	    BUS_DMASYNC_PREWRITE);

	si = prod;

	/* 
	 * Make a list of descriptors for this packet. DMA controller will
	 * walk through it while kr_link is not zero. The last one should
	 * have COF flag set, to pickup next chain from NDPTR
	 */
	prev_prod = prod;
	desc = prev_desc = NULL;
	for (i = 0; i < nsegs; i++) {
		desc = &sc->kr_rdata.kr_tx_ring[prod];
		desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF;
		if (i == 0)
			desc->kr_devcs = KR_DMATX_DEVCS_FD;
		desc->kr_ca = txsegs[i].ds_addr;
		desc->kr_link = 0;
		/* link with previous descriptor */
		if (prev_desc)
			prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod);

		sc->kr_cdata.kr_tx_cnt++;
		prev_desc = desc;
		KR_INC(prod, KR_TX_RING_CNT);
	}

	/* 
	 * Set COF for last descriptor and mark last fragment with LD flag
	 */
	if (desc) {
		desc->kr_ctl |=  KR_CTL_COF;
		desc->kr_devcs |= KR_DMATX_DEVCS_LD;
	}

	/* Update producer index. */
	sc->kr_cdata.kr_tx_prod = prod;

	/* Sync descriptors. */
	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
	    sc->kr_cdata.kr_tx_ring_map,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	/* Start transmitting */
	/* Check if new list is queued in NDPTR */
	if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) {
		/* NDPTR is not busy - start new list */
		KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 
		    KR_TX_RING_ADDR(sc, si));
	}
	else {
		link_addr = KR_TX_RING_ADDR(sc, si);
		/* Get previous descriptor */
		si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT;
		desc = &sc->kr_rdata.kr_tx_ring[si];
		desc->kr_link = link_addr;
	}

	return (0);
}
Example #3
0
/*
 * Name: qla_lro_intr
 * Function: Handles normal ethernet frames received
 */
static int
qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
{
	qla_rx_buf_t *rxb;
	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
	struct ifnet *ifp = ha->ifp;
	qla_sds_t *sdsp;
	struct ether_vlan_header *eh;
	uint32_t i, rem_len = 0, pkt_length, iplen;
	struct tcphdr *th;
	struct ip *ip = NULL;
	struct ip6_hdr *ip6 = NULL;
	uint16_t etype;
	uint32_t r_idx = 0;
	qla_rx_ring_t *rx_ring;

	if (ha->hw.num_rds_rings > 1)
		r_idx = sds_idx;

	ha->hw.rds[r_idx].count++;

	rx_ring = &ha->rx_ring[r_idx];
	
	ha->lro_pkt_count++;

	sdsp = &ha->hw.sds[sds_idx];
	
	pkt_length = sgc->payload_length + sgc->l4_offset;

	if (sgc->flags & Q8_LRO_COMP_TS) {
		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
	} else {
		pkt_length += QLA_TCP_HDR_SIZE;
	}
	ha->lro_bytes += pkt_length;

	for (i = 0; i < sgc->num_handles; i++) {
		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];

		QL_ASSERT(ha, (rxb != NULL),
			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
			sds_idx));

		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
			/* log the error */
			device_printf(ha->pci_dev,
				"%s invalid rxb[%d, %d, 0x%04x]\n",
				__func__, sds_idx, i, sgc->handle[i]);
			qla_rcv_error(ha);
			return (0);
		}

		mp = rxb->m_head;
		if (i == 0) 
			mpf = mp;

		QL_ASSERT(ha, (mp != NULL),
			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
			sds_idx));

		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);

		rxb->m_head = NULL;
		rxb->next = sdsp->rxb_free;
		sdsp->rxb_free = rxb;
		sdsp->rx_free++;
	
		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
			/* log the error */
			device_printf(ha->pci_dev,
				"%s mp  == NULL [%d, %d, 0x%04x]\n",
				__func__, sds_idx, i, sgc->handle[i]);
			qla_rcv_error(ha);
			return (0);
		}

		if (i == 0) {
			mpl = mpf = mp;
			mp->m_flags |= M_PKTHDR;
			mp->m_pkthdr.len = pkt_length;
			mp->m_pkthdr.rcvif = ifp;
			rem_len = mp->m_pkthdr.len;
		} else {
			mp->m_flags &= ~M_PKTHDR;
			mpl->m_next = mp;
			mpl = mp;
			rem_len = rem_len - mp->m_len;
		}
	}

	mpl->m_len = rem_len;

	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);

	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
		th->th_flags |= TH_PUSH;

	m_adj(mpf, sgc->l2_offset);

	eh = mtod(mpf, struct ether_vlan_header *);

	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
		uint32_t *data = (uint32_t *)eh;

		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
		mpf->m_flags |= M_VLANTAG;

		*(data + 3) = *(data + 2);
		*(data + 2) = *(data + 1);
		*(data + 1) = *data;

		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);

		etype = ntohs(eh->evl_proto);
	} else {
		etype = ntohs(eh->evl_encap_proto);
	}

	if (etype == ETHERTYPE_IP) {
		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
	
		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
				sgc->payload_length;

                ip->ip_len = htons(iplen);

		ha->ipv4_lro++;
	} else if (etype == ETHERTYPE_IPV6) {
		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);

		iplen = (th->th_off << 2) + sgc->payload_length;

		ip6->ip6_plen = htons(iplen);

		ha->ipv6_lro++;
	} else {
		m_freem(mpf);

		if (sdsp->rx_free > ha->std_replenish)
			qla_replenish_normal_rx(ha, sdsp, r_idx);
		return 0;
	}

	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
	mpf->m_pkthdr.csum_data = 0xFFFF;

	mpf->m_pkthdr.flowid = sgc->rss_hash;
	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);

	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);

	(*ifp->if_input)(ifp, mpf);

	if (sdsp->rx_free > ha->std_replenish)
		qla_replenish_normal_rx(ha, sdsp, r_idx);

	return (0);
}
Example #4
0
void
dbdma_sync_commands(dbdma_channel_t *chan, bus_dmasync_op_t op)
{

    bus_dmamap_sync(chan->sc_dmatag, chan->sc_dmamap, op);
}
Example #5
0
static void
kr_tx(struct kr_softc *sc)
{
	struct kr_txdesc	*txd;
	struct kr_desc		*cur_tx;
	struct ifnet		*ifp;
	uint32_t		ctl, devcs;
	int			cons, prod;

	KR_LOCK_ASSERT(sc);

	cons = sc->kr_cdata.kr_tx_cons;
	prod = sc->kr_cdata.kr_tx_prod;
	if (cons == prod)
		return;

	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
	    sc->kr_cdata.kr_tx_ring_map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	ifp = sc->kr_ifp;
	/*
	 * Go through our tx list and free mbufs for those
	 * frames that have been transmitted.
	 */
	for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) {
		cur_tx = &sc->kr_rdata.kr_tx_ring[cons];
		ctl = cur_tx->kr_ctl;
		devcs = cur_tx->kr_devcs;
		/* Check if descriptor has "finished" flag */
		if ((ctl & KR_CTL_F) == 0)
			break;

		sc->kr_cdata.kr_tx_cnt--;
		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;

		txd = &sc->kr_cdata.kr_txdesc[cons];

		if (devcs & KR_DMATX_DEVCS_TOK)
			ifp->if_opackets++;
		else {
			ifp->if_oerrors++;
			/* collisions: medium busy, late collision */
			if ((devcs & KR_DMATX_DEVCS_EC) || 
			    (devcs & KR_DMATX_DEVCS_LC))
				ifp->if_collisions++;
		}

		bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
		    BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);

		/* Free only if it's first descriptor in list */
		if (txd->tx_m)
			m_freem(txd->tx_m);
		txd->tx_m = NULL;

		/* reset descriptor */
		cur_tx->kr_ctl = KR_CTL_IOF;
		cur_tx->kr_devcs = 0;
		cur_tx->kr_ca = 0;
		cur_tx->kr_link = 0; 
	}

	sc->kr_cdata.kr_tx_cons = cons;

	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
	    sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE);
}
Example #6
0
struct qlw_ccb *
qlw_handle_resp(struct qlw_softc *sc, u_int16_t id)
{
	struct qlw_ccb *ccb;
	struct qlw_iocb_hdr *hdr;
	struct qlw_iocb_status *status;
	struct scsi_xfer *xs;
	u_int32_t handle;
	int entry_type;
	int flags;
	int bus;

	ccb = NULL;
	hdr = QLW_DMA_KVA(sc->sc_responses) + (id * QLW_QUEUE_ENTRY_SIZE);

	bus_dmamap_sync(sc->sc_dmat,
	    QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE,
	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD);

	qlw_get_header(sc, hdr, &entry_type, &flags);
	switch (entry_type) {
	case QLW_IOCB_STATUS:
		status = (struct qlw_iocb_status *)hdr;
		handle = qlw_swap32(sc, status->handle);
		if (handle > sc->sc_maxccbs) {
			panic("bad completed command handle: %d (> %d)",
			    handle, sc->sc_maxccbs);
		}

		ccb = &sc->sc_ccbs[handle];
		xs = ccb->ccb_xs;
		if (xs == NULL) {
			DPRINTF(QLW_D_INTR, "%s: got status for inactive"
			    " ccb %d\n", DEVNAME(sc), handle);
			qlw_dump_iocb(sc, hdr, QLW_D_INTR);
			ccb = NULL;
			break;
		}
		if (xs->io != ccb) {
			panic("completed command handle doesn't match xs "
			    "(handle %d, ccb %p, xs->io %p)", handle, ccb,
			    xs->io);
		}

		if (xs->datalen > 0) {
			bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
			    ccb->ccb_dmamap->dm_mapsize,
			    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
			    BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
		}

		bus = qlw_xs_bus(sc, xs);
		xs->status = qlw_swap16(sc, status->scsi_status);
		switch (qlw_swap16(sc, status->completion)) {
		case QLW_IOCB_STATUS_COMPLETE:
			if (qlw_swap16(sc, status->scsi_status) &
			    QLW_SCSI_STATUS_SENSE_VALID) {
				memcpy(&xs->sense, status->sense_data,
				    sizeof(xs->sense));
				xs->error = XS_SENSE;
			} else {
				xs->error = XS_NOERROR;
			}
			xs->resid = 0;
			break;

		case QLW_IOCB_STATUS_INCOMPLETE:
			if (flags & QLW_STATE_GOT_TARGET) {
				xs->error = XS_DRIVER_STUFFUP;
			} else {
				xs->error = XS_SELTIMEOUT;
			}
			break;

		case QLW_IOCB_STATUS_DMA_ERROR:
			DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc));
			/* set resid apparently? */
			break;

		case QLW_IOCB_STATUS_RESET:
			DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n",
			    DEVNAME(sc));
			sc->sc_marker_required[bus] = 1;
			xs->error = XS_RESET;
			break;

		case QLW_IOCB_STATUS_ABORTED:
			DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc));
			sc->sc_marker_required[bus] = 1;
			xs->error = XS_DRIVER_STUFFUP;
			break;

		case QLW_IOCB_STATUS_TIMEOUT:
			DPRINTF(QLW_D_INTR, "%s: command timed out\n",
			    DEVNAME(sc));
			xs->error = XS_TIMEOUT;
			break;

		case QLW_IOCB_STATUS_DATA_OVERRUN:
		case QLW_IOCB_STATUS_DATA_UNDERRUN:
			xs->resid = qlw_swap32(sc, status->resid);
			xs->error = XS_NOERROR;
			break;

		case QLW_IOCB_STATUS_QUEUE_FULL:
			DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc));
			xs->error = XS_BUSY;
			break;

		case QLW_IOCB_STATUS_WIDE_FAILED:
			DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc));
			sc->sc_link->quirks |= SDEV_NOWIDE;
			atomic_setbits_int(&sc->sc_update_required[bus],
			    1 << xs->sc_link->target);
			task_add(systq, &sc->sc_update_task);
			xs->resid = qlw_swap32(sc, status->resid);
			xs->error = XS_NOERROR;
			break;

		case QLW_IOCB_STATUS_SYNCXFER_FAILED:
			DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc));
			sc->sc_link->quirks |= SDEV_NOSYNC;
			atomic_setbits_int(&sc->sc_update_required[bus],
			    1 << xs->sc_link->target);
			task_add(systq, &sc->sc_update_task);
			xs->resid = qlw_swap32(sc, status->resid);
			xs->error = XS_NOERROR;
			break;

		default:
			DPRINTF(QLW_D_INTR, "%s: unexpected completion"
			    " status %x\n", DEVNAME(sc),
			    qlw_swap16(sc, status->completion));
			qlw_dump_iocb(sc, hdr, QLW_D_INTR);
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		break;

	default:
		DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n",
		    DEVNAME(sc), entry_type);
		qlw_dump_iocb(sc, hdr, QLW_D_INTR);
		break;
	}

	return (ccb);
}
Example #7
0
/*
 * should switch out command type; may be status, not just I/O.
 */
static void
ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
{
	bus_dmasync_op_t op;
	int active, error = 0;

	/*
	 * finish up command
	 */
	if (!dumping)
		mtx_assert(&ida->lock, MA_OWNED);
	active = (qcb->state != QCB_FREE);
	if (qcb->flags & DMA_DATA_TRANSFER && active) {
		switch (qcb->flags & DMA_DATA_TRANSFER) {
		case DMA_DATA_TRANSFER:
			op = BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
			break;
		case DMA_DATA_IN:
			op = BUS_DMASYNC_POSTREAD;
			break;
		default:
			KASSERT((qcb->flags & DMA_DATA_TRANSFER) ==
			    DMA_DATA_OUT, ("bad DMA data flags"));
			op = BUS_DMASYNC_POSTWRITE;
			break;
		}
		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
	}
	if (active)
		bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap,
		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	if (qcb->hwqcb->req.error & SOFT_ERROR) {
		if (qcb->buf)
			device_printf(ida->dev, "soft %s error\n",
				qcb->buf->bio_cmd == BIO_READ ?
					"read" : "write");
		else
			device_printf(ida->dev, "soft error\n");
	}
	if (qcb->hwqcb->req.error & HARD_ERROR) {
		error = 1;
		if (qcb->buf)
			device_printf(ida->dev, "hard %s error\n",
				qcb->buf->bio_cmd == BIO_READ ?
					"read" : "write");
		else
			device_printf(ida->dev, "hard error\n");
	}
	if (qcb->hwqcb->req.error & CMD_REJECTED) {
		error = 1;
		device_printf(ida->dev, "invalid request\n");
	}
	if (qcb->error) {
		error = 1;
		device_printf(ida->dev, "request failed to map: %d\n", qcb->error);
	}

	if (qcb->flags & IDA_COMMAND) {
		if (ida->flags & IDA_INTERRUPTS)
			wakeup(qcb);
		if (qcb->state == QCB_TIMEDOUT)
			ida_free_qcb(ida, qcb);
	} else {
		KASSERT(qcb->buf != NULL, ("ida_done(): qcb->buf is NULL!"));
		if (error)
			qcb->buf->bio_flags |= BIO_ERROR;
		idad_intr(qcb->buf);
		ida_free_qcb(ida, qcb);
	}

	if (!active)
		return;

	ida->qactive--;
	/* Reschedule or cancel timeout */
	if (ida->qactive)
		callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
	else
		callout_stop(&ida->ch);
}
Example #8
0
File: if_sq.c Project: MarginC/kame
static int
sq_rxintr(struct sq_softc *sc)
{
	int count = 0;
	struct mbuf* m;
	int i, framelen;
	u_int8_t pktstat;
	u_int32_t status;
	int new_end, orig_end;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
		SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

		/* If this is a CPU-owned buffer, we're at the end of the list */
		if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) {
#if 0
			u_int32_t reg;

			reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETR_CTL);
			printf("%s: rxintr: done at %d (ctl %08x)\n",
			    sc->sc_dev.dv_xname, i, reg);
#endif
			break;
		}

		count++;

		m = sc->sc_rxmbuf[i];
		framelen = m->m_ext.ext_size -
		    HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3;

		/* Now sync the actual packet data */
		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
		    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);

		pktstat = *((u_int8_t*)m->m_data + framelen + 2);

		if ((pktstat & RXSTAT_GOOD) == 0) {
			ifp->if_ierrors++;

			if (pktstat & RXSTAT_OFLOW)
				printf("%s: receive FIFO overflow\n",
				    sc->sc_dev.dv_xname);

			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
			    sc->sc_rxmap[i]->dm_mapsize,
			    BUS_DMASYNC_PREREAD);
			SQ_INIT_RXDESC(sc, i);
			continue;
		}

		if (sq_add_rxbuf(sc, i) != 0) {
			ifp->if_ierrors++;
			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
			    sc->sc_rxmap[i]->dm_mapsize,
			    BUS_DMASYNC_PREREAD);
			SQ_INIT_RXDESC(sc, i);
			continue;
		}


		m->m_data += 2;
		m->m_pkthdr.rcvif = ifp;
		m->m_pkthdr.len = m->m_len = framelen;

		ifp->if_ipackets++;

#if 0
		printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname,
		    i, framelen);
#endif

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m);
#endif
		(*ifp->if_input)(ifp, m);
	}


	/* If anything happened, move ring start/end pointers to new spot */
	if (i != sc->sc_nextrx) {
		new_end = SQ_PREVRX(i);
		sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN;
		SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
		    BUS_DMASYNC_PREWRITE);

		orig_end = SQ_PREVRX(sc->sc_nextrx);
		sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN;
		SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
		    BUS_DMASYNC_PREWRITE);

		sc->sc_nextrx = i;
	}

	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL);

	/* If receive channel is stopped, restart it... */
	if ((status & ENETR_CTL_ACTIVE) == 0) {
		/* Pass the start of the receive ring to the HPC */
		bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
		    HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx));

		/* And turn on the HPC ethernet receive channel */
		bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
		    ENETR_CTL_ACTIVE);
	}

	return count;
}
Example #9
0
File: if_sq.c Project: MarginC/kame
static int
sq_txintr(struct sq_softc *sc)
{
	int i;
	u_int32_t status;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);

	SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx);

	if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) {
		if (status & TXSTAT_COLL)
			ifp->if_collisions++;

		if (status & TXSTAT_UFLOW) {
			printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
			ifp->if_oerrors++;
		}

		if (status & TXSTAT_16COLL) {
			printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
			ifp->if_oerrors++;
			ifp->if_collisions += 16;
		}
	}

	i = sc->sc_prevtx;
	while (sc->sc_nfreetx < SQ_NTXDESC) {
		/*
		 * Check status first so we don't end up with a case of
		 * the buffer not being finished while the DMA channel
		 * has gone idle.
		 */
		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
							HPC_ENETX_CTL);

		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);

		/* If not yet transmitted, try and start DMA engine again */
		if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) {
			if ((status & ENETX_CTL_ACTIVE) == 0) {
				SQ_TRACE(SQ_RESTART_DMA, i, status,
				    sc->sc_nfreetx);

				bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
					  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));

				/* Kick DMA channel into life */
				bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
					  HPC_ENETX_CTL, ENETX_CTL_ACTIVE);

				/*
				 * Set a watchdog timer in case the chip
				 * flakes out.
				 */
				ifp->if_timer = 5;
			} else {
				SQ_TRACE(SQ_TXINTR_BUSY, i, status,
				    sc->sc_nfreetx);
			}
			break;
		}

		/* Sync the packet data, unload DMA map, free mbuf */
		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
				sc->sc_txmap[i]->dm_mapsize,
				BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
		m_freem(sc->sc_txmbuf[i]);
		sc->sc_txmbuf[i] = NULL;

		ifp->if_opackets++;
		sc->sc_nfreetx++;

		SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx);
		i = SQ_NEXTTX(i);
	}

	/* prevtx now points to next xmit packet not yet finished */
	sc->sc_prevtx = i;

	/* If we have buffers free, let upper layers know */
	if (sc->sc_nfreetx > 0)
		ifp->if_flags &= ~IFF_OACTIVE;

	/* If all packets have left the coop, cancel watchdog */
	if (sc->sc_nfreetx == SQ_NTXDESC)
		ifp->if_timer = 0;

	SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx);
	sq_start(ifp);

	return 1;
}
Example #10
0
/*
 * Pseudo (chained) interrupt from the esp driver to kick the
 * current running DMA transfer.  Called from ncr53c9x_intr()
 * for now.
 *
 * return 1 if it was a DMA continue.
 */
static int
lsi64854_scsi_intr(void *arg)
{
	struct lsi64854_softc *sc = arg;
	struct ncr53c9x_softc *nsc = sc->sc_client;
	bus_dma_tag_t dmat;
	bus_dmamap_t dmam;
	size_t dmasize;
	int lxfer, resid, trans;
	uint32_t csr;

	csr = L64854_GCSR(sc);

	DPRINTF(LDB_SCSI, ("%s: addr 0x%x, csr %b\n", __func__,
	    bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, DDMACSR_BITS));

	if (csr & (D_ERR_PEND | D_SLAVE_ERR)) {
		device_printf(sc->sc_dev, "error: csr=%b\n", csr,
		    DDMACSR_BITS);
		csr &= ~D_EN_DMA;	/* Stop DMA. */
		/* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
		csr |= D_INVALIDATE | D_SLAVE_ERR;
		L64854_SCSR(sc, csr);
		return (-1);
	}

	/* This is an "assertion" :) */
	if (sc->sc_active == 0)
		panic("%s: DMA wasn't active", __func__);

	DMA_DRAIN(sc, 0);

	/* DMA has stopped */
	csr &= ~D_EN_DMA;
	L64854_SCSR(sc, csr);
	sc->sc_active = 0;

	dmasize = sc->sc_dmasize;
	if (dmasize == 0) {
		/* A "Transfer Pad" operation completed. */
		DPRINTF(LDB_SCSI, ("%s: discarded %d bytes (tcl=%d, "
		    "tcm=%d)\n", __func__, NCR_READ_REG(nsc, NCR_TCL) |
		    (NCR_READ_REG(nsc, NCR_TCM) << 8),
		    NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM)));
		return (0);
	}

	resid = 0;
	/*
	 * If a transfer onto the SCSI bus gets interrupted by the device
	 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
	 * as residual since the NCR53C9X counter registers get decremented
	 * as bytes are clocked into the FIFO.
	 */
	if ((csr & D_WRITE) == 0 &&
	    (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
		DPRINTF(LDB_SCSI, ("%s: empty esp FIFO of %d ", __func__,
		    resid));
		if (nsc->sc_rev == NCR_VARIANT_FAS366 &&
		    (NCR_READ_REG(nsc, NCR_CFG3) & NCRFASCFG3_EWIDE))
			resid <<= 1;
	}

	if ((nsc->sc_espstat & NCRSTAT_TC) == 0) {
		lxfer = nsc->sc_features & NCR_F_LARGEXFER;
		/*
		 * "Terminal count" is off, so read the residue
		 * out of the NCR53C9X counter registers.
		 */
		resid += (NCR_READ_REG(nsc, NCR_TCL) |
		    (NCR_READ_REG(nsc, NCR_TCM) << 8) |
		    (lxfer != 0 ? (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0));

		if (resid == 0 && dmasize == 65536 && lxfer == 0)
			/* A transfer of 64k is encoded as TCL=TCM=0. */
			resid = 65536;
	}

	trans = dmasize - resid;
	if (trans < 0) {			/* transferred < 0? */
#if 0
		/*
		 * This situation can happen in perfectly normal operation
		 * if the ESP is reselected while using DMA to select
		 * another target.  As such, don't print the warning.
		 */
		device_printf(sc->sc_dev, "xfer (%d) > req (%d)\n", trans,
		    dmasize);
#endif
		trans = dmasize;
	}

	DPRINTF(LDB_SCSI, ("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
	    __func__, NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM),
	    (nsc->sc_features & NCR_F_LARGEXFER) != 0 ?
	    NCR_READ_REG(nsc, NCR_TCH) : 0, trans, resid));

	if (dmasize != 0) {
		dmat = sc->sc_buffer_dmat;
		dmam = sc->sc_dmamap;
		bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ?
		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(dmat, dmam);
	}

	*sc->sc_dmalen -= trans;
	*sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans;

#if 0	/* this is not normal operation just yet */
	if (*sc->sc_dmalen == 0 || nsc->sc_phase != nsc->sc_prevphase)
		return (0);

	/* and again */
	dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE);
	return (1);
#endif
	return (0);
}
Example #11
0
File: if_sq.c Project: MarginC/kame
void
sq_start(struct ifnet *ifp)
{
	struct sq_softc *sc = ifp->if_softc;
	u_int32_t status;
	struct mbuf *m0, *m;
	bus_dmamap_t dmamap;
	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;

	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_nfreetx;
	firsttx = sc->sc_nexttx;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (sc->sc_nfreetx != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = sc->sc_txmap[sc->sc_nexttx];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
						      BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    sc->sc_dev.dv_xname);
				break;
			}
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", sc->sc_dev.dv_xname);
					m_freem(m);
					break;
				}
			}

			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;

			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
						m, BUS_DMA_NOWAIT)) != 0) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", sc->sc_dev.dv_xname, err);
				break;
			}
		}

		/*
		 * Ensure we have enough descriptors free to describe
		 * the packet.
		 */
		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
			/*
			 * Not enough free descriptors to transmit this
			 * packet.  We haven't committed to anything yet,
			 * so just unload the DMA map, put the packet
			 * back on the queue, and punt.  Notify the upper
			 * layer that there are no more slots left.
			 *
			 * XXX We could allocate an mbuf and copy, but
			 * XXX it is worth it?
			 */
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->sc_dmat, dmamap);
			if (m != NULL)
				m_freem(m);
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Initialize the transmit descriptors.
		 */
		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
		     seg < dmamap->dm_nsegs;
		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
			sc->sc_txdesc[nexttx].hdd_bufptr =
					    dmamap->dm_segs[seg].ds_addr;
			sc->sc_txdesc[nexttx].hdd_ctl =
					    dmamap->dm_segs[seg].ds_len;
			sc->sc_txdesc[nexttx].hdd_descptr=
					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
			lasttx = nexttx;
			totlen += dmamap->dm_segs[seg].ds_len;
		}

		/* Last descriptor gets end-of-packet */
		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;

		/* XXXrkb: if not EDLC, pad to min len manually */
		if (totlen < ETHER_MIN_LEN) {
		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
		    totlen = ETHER_MIN_LEN;
		}

#if 0
		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
						       sc->sc_nexttx, lasttx,
						       totlen);
#endif

		if (ifp->if_flags & IFF_DEBUG) {
			printf("     transmit chain:\n");
			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
				printf("     descriptor %d:\n", seg);
				printf("       hdd_bufptr:      0x%08x\n",
					sc->sc_txdesc[seg].hdd_bufptr);
				printf("       hdd_ctl: 0x%08x\n",
					sc->sc_txdesc[seg].hdd_ctl);
				printf("       hdd_descptr:      0x%08x\n",
					sc->sc_txdesc[seg].hdd_descptr);

				if (seg == lasttx)
					break;
			}
		}

		/* Sync the descriptors we're using. */
		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* Store a pointer to the packet so we can free it later */
		sc->sc_txmbuf[sc->sc_nexttx] = m0;

		/* Advance the tx pointer. */
		sc->sc_nfreetx -= dmamap->dm_nsegs;
		sc->sc_nexttx = nexttx;

#if NBPFILTER > 0
		/*
		 * Pass the packet to any BPF listeners.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m0);
#endif /* NBPFILTER > 0 */
	}

	/* All transmit descriptors used up, let upper layers know */
	if (sc->sc_nfreetx == 0)
		ifp->if_flags |= IFF_OACTIVE;

	if (sc->sc_nfreetx != ofree) {
#if 0
		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
			    firsttx, lasttx);
#endif

		/*
		 * Cause a transmit interrupt to happen on the
		 * last packet we enqueued, mark it as the last
		 * descriptor.
		 */
		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
						  HDD_CTL_EOCHAIN);
		SQ_CDTXSYNC(sc, lasttx, 1,
				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

		/*
		 * There is a potential race condition here if the HPC
		 * DMA channel is active and we try and either update
		 * the 'next descriptor' pointer in the HPC PIO space
		 * or the 'next descriptor' pointer in a previous desc-
		 * riptor.
		 *
		 * To avoid this, if the channel is active, we rely on
		 * the transmit interrupt routine noticing that there
		 * are more packets to send and restarting the HPC DMA
		 * engine, rather than mucking with the DMA state here.
		 */
		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
						       HPC_ENETX_CTL);

		if ((status & ENETX_CTL_ACTIVE) != 0) {
			SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status,
			    sc->sc_nfreetx);
			sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
			    ~HDD_CTL_EOCHAIN;
			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
		} else {
			SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);

			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));

			/* Kick DMA channel into life */
			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
			    HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
		}

		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Example #12
0
static void
lsi64854_reset(struct lsi64854_softc *sc)
{
	bus_dma_tag_t dmat;
	bus_dmamap_t dmam;
	uint32_t csr;

	DMA_FLUSH(sc, 1);
	csr = L64854_GCSR(sc);

	DPRINTF(LDB_ANY, ("%s: csr 0x%x\n", __func__, csr));

	if (sc->sc_dmasize != 0) {
		dmat = sc->sc_buffer_dmat;
		dmam = sc->sc_dmamap;
		bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ?
		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
		bus_dmamap_unload(dmat, dmam);
	}

	if (sc->sc_rev == DMAREV_HME)
		L64854_SCSR(sc, csr | D_HW_RESET_FAS366);

	csr |= L64854_RESET;		/* reset DMA */
	L64854_SCSR(sc, csr);
	DELAY(200);			/* > 10 Sbus clocks(?) */

	/*DMAWAIT1(sc); why was this here? */
	csr = L64854_GCSR(sc);
	csr &= ~L64854_RESET;		/* de-assert reset line */
	L64854_SCSR(sc, csr);
	DELAY(5);			/* allow a few ticks to settle */

	csr = L64854_GCSR(sc);
	csr |= L64854_INT_EN;		/* enable interrupts */
	if (sc->sc_rev > DMAREV_1 && sc->sc_channel == L64854_CHANNEL_SCSI) {
		if (sc->sc_rev == DMAREV_HME)
			csr |= D_TWO_CYCLE;
		else
			csr |= D_FASTER;
	}

	/* Set burst */
	switch (sc->sc_rev) {
	case DMAREV_HME:
	case DMAREV_2:
		csr &= ~L64854_BURST_SIZE;
		if (sc->sc_burst == 32)
			csr |= L64854_BURST_32;
		else if (sc->sc_burst == 16)
			csr |= L64854_BURST_16;
		else
			csr |= L64854_BURST_0;
		break;
	case DMAREV_ESC:
		csr |= D_ESC_AUTODRAIN;	/* Auto-drain */
		if (sc->sc_burst == 32)
			csr &= ~D_ESC_BURST;
		else
			csr |= D_ESC_BURST;
		break;
	default:
		break;
	}
	L64854_SCSR(sc, csr);

	if (sc->sc_rev == DMAREV_HME) {
		bus_write_4(sc->sc_res, L64854_REG_ADDR, 0);
		sc->sc_dmactl = csr;
	}
	sc->sc_active = 0;

	DPRINTF(LDB_ANY, ("%s: done, csr 0x%x\n", __func__, csr));
}
Example #13
0
static int
sata_channel_begin_transaction(struct ata_request *request)
{
	struct sata_softc *sc;
	struct ata_channel *ch;
	struct sata_crqb *crqb;
	uint32_t req_in;
	int error, slot;

	sc = device_get_softc(device_get_parent(request->parent));
	ch = device_get_softc(request->parent);

	mtx_assert(&ch->state_mtx, MA_OWNED);

	/* Only DMA R/W goes through the EDMA machine. */
	if (request->u.ata.command != ATA_READ_DMA &&
	    request->u.ata.command != ATA_WRITE_DMA &&
	    request->u.ata.command != ATA_READ_DMA48 &&
	    request->u.ata.command != ATA_WRITE_DMA48) {

		/* Disable EDMA before accessing legacy registers */
		if (sata_edma_is_running(request->parent)) {
			error = sata_edma_ctrl(request->parent, 0);
			if (error) {
				request->result = error;
				return (ATA_OP_FINISHED);
			}
		}

		return (ata_begin_transaction(request));
	}

	/* Prepare data for DMA */
	if ((error = ch->dma.load(request, NULL, NULL))) {
		device_printf(request->parent, "setting up DMA failed!\n");
		request->result = error;
		return ATA_OP_FINISHED;
	}

	/* Get next free queue slot */
	req_in = SATA_INL(sc, SATA_EDMA_REQIPR(ch->unit));
	slot = (req_in & sc->sc_edma_reqis_mask) >> SATA_EDMA_REQIS_OFS;
	crqb = (struct sata_crqb *)(ch->dma.work +
	    (slot << SATA_EDMA_REQIS_OFS));

	/* Fill in request */
	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	crqb->crqb_prdlo = htole32((uint64_t)request->dma->sg_bus & 0xFFFFFFFF);
	crqb->crqb_prdhi = htole32((uint64_t)request->dma->sg_bus >> 32);
	crqb->crqb_flags = htole32((request->flags & ATA_R_READ ? 0x01 : 0x00) |
	    (request->tag << 1));

	crqb->crqb_ata_command = request->u.ata.command;
	crqb->crqb_ata_feature = request->u.ata.feature;
	crqb->crqb_ata_lba_low = request->u.ata.lba;
	crqb->crqb_ata_lba_mid = request->u.ata.lba >> 8;
	crqb->crqb_ata_lba_high = request->u.ata.lba >> 16;
	crqb->crqb_ata_device = ((request->u.ata.lba >> 24) & 0x0F) | (1 << 6);
	crqb->crqb_ata_lba_low_p = request->u.ata.lba >> 24;
	crqb->crqb_ata_lba_mid_p = request->u.ata.lba >> 32;
	crqb->crqb_ata_lba_high_p = request->u.ata.lba >> 40;
	crqb->crqb_ata_feature_p = request->u.ata.feature >> 8;
	crqb->crqb_ata_count = request->u.ata.count;
	crqb->crqb_ata_count_p = request->u.ata.count >> 8;

	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	/* Enable EDMA if disabled */
	if (!sata_edma_is_running(request->parent)) {
		error = sata_edma_ctrl(request->parent, 1);
		if (error) {
			ch->dma.unload(request);
			request->result = error;
			return (ATA_OP_FINISHED);
		}
	}

	/* Tell EDMA about new request */
	req_in = (req_in & ~sc->sc_edma_reqis_mask) | (((slot + 1) <<
	    SATA_EDMA_REQIS_OFS) & sc->sc_edma_reqis_mask);

	SATA_OUTL(sc, SATA_EDMA_REQIPR(ch->unit), req_in);

	return (ATA_OP_CONTINUES);
}
Example #14
0
static int
sata_channel_attach(device_t dev)
{
	struct sata_softc *sc;
	struct ata_channel *ch;
	uint64_t work;
	int error, i;

	sc = device_get_softc(device_get_parent(dev));
	ch = device_get_softc(dev);

	if (ch->attached)
		return (0);

	ch->dev = dev;
	ch->unit = device_get_unit(dev);
	ch->flags |= ATA_USE_16BIT | ATA_NO_SLAVE | ATA_SATA;

	/* Set legacy ATA resources. */
	for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
		ch->r_io[i].res = sc->sc_mem_res;
		ch->r_io[i].offset = SATA_SHADOWR_BASE(ch->unit) + (i << 2);
	}

	ch->r_io[ATA_CONTROL].res = sc->sc_mem_res;
	ch->r_io[ATA_CONTROL].offset = SATA_SHADOWR_CONTROL(ch->unit);

	ch->r_io[ATA_IDX_ADDR].res = sc->sc_mem_res;
	ata_default_registers(dev);

	/* Set SATA resources. */
	ch->r_io[ATA_SSTATUS].res = sc->sc_mem_res;
	ch->r_io[ATA_SSTATUS].offset = SATA_SATA_SSTATUS(ch->unit);
	ch->r_io[ATA_SERROR].res = sc->sc_mem_res;
	ch->r_io[ATA_SERROR].offset = SATA_SATA_SERROR(ch->unit);
	ch->r_io[ATA_SCONTROL].res = sc->sc_mem_res;
	ch->r_io[ATA_SCONTROL].offset = SATA_SATA_SCONTROL(ch->unit);
	ata_generic_hw(dev);

	ch->hw.begin_transaction = sata_channel_begin_transaction;
	ch->hw.end_transaction = sata_channel_end_transaction;
	ch->hw.status = sata_channel_status;

	/* Set DMA resources */
	ata_dmainit(dev);
	ch->dma.setprd = sata_channel_dmasetprd;

	/* Clear work area */
	KASSERT(sc->sc_edma_qlen * (sizeof(struct sata_crqb) +
	    sizeof(struct sata_crpb)) <= ch->dma.max_iosize,
	    ("insufficient DMA memory for request/response queues.\n"));
	bzero(ch->dma.work, sc->sc_edma_qlen * (sizeof(struct sata_crqb) +
	    sizeof(struct sata_crpb)));
	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	/* Turn off EDMA engine */
	error = sata_edma_ctrl(dev, 0);
	if (error) {
		ata_dmafini(dev);
		return (error);
	}

	/*
	 * Initialize EDMA engine:
	 *	- Native Command Queuing off,
	 *	- Non-Queued operation,
	 *	- Host Queue Cache enabled.
	 */
	SATA_OUTL(sc, SATA_EDMA_CFG(ch->unit), SATA_EDMA_CFG_HQCACHE |
	    (sc->sc_version == 1) ? SATA_EDMA_CFG_QL128 : 0);

	/* Set request queue pointers */
	work = ch->dma.work_bus;
	SATA_OUTL(sc, SATA_EDMA_REQBAHR(ch->unit), work >> 32);
	SATA_OUTL(sc, SATA_EDMA_REQIPR(ch->unit), work & 0xFFFFFFFF);
	SATA_OUTL(sc, SATA_EDMA_REQOPR(ch->unit), work & 0xFFFFFFFF);

	/* Set response queue pointers */
	work += sc->sc_edma_qlen * sizeof(struct sata_crqb);
	SATA_OUTL(sc, SATA_EDMA_RESBAHR(ch->unit), work >> 32);
	SATA_OUTL(sc, SATA_EDMA_RESIPR(ch->unit), work & 0xFFFFFFFF);
	SATA_OUTL(sc, SATA_EDMA_RESOPR(ch->unit), work & 0xFFFFFFFF);

	/* Clear any outstanding interrupts */
	ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
	SATA_OUTL(sc, SATA_SATA_FISICR(ch->unit), 0);
	SATA_OUTL(sc, SATA_EDMA_IECR(ch->unit), 0);
	SATA_OUTL(sc, SATA_ICR,
	    ~(SATA_ICR_DEV(ch->unit) | SATA_ICR_DMADONE(ch->unit)));

	/* Umask channel interrupts */
	SATA_OUTL(sc, SATA_EDMA_IEMR(ch->unit), 0xFFFFFFFF);
	SATA_OUTL(sc, SATA_MIMR, SATA_INL(sc, SATA_MIMR) |
	    SATA_MICR_DONE(ch->unit) | SATA_MICR_DMADONE(ch->unit) |
	    SATA_MICR_ERR(ch->unit));

	ch->attached = 1;

	return (ata_attach(dev));
}
Example #15
0
static void
ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code)
{
	union  ccb	  *ccb;
	struct ccb_scsiio *csio;

	ccb = accb->ccb;
	csio = &accb->ccb->csio;

	if ((accb->flags & ACCB_ACTIVE) == 0) {
		device_printf(aha->dev, 
		    "ahadone - Attempt to free non-active ACCB %p\n",
		    (void *)accb);
		return;
	}

	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
		bus_dmasync_op_t op;

		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_POSTREAD;
		else
			op = BUS_DMASYNC_POSTWRITE;
		bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op);
		bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
	}

	if (accb == aha->recovery_accb) {
		/*
		 * The recovery ACCB does not have a CCB associated
		 * with it, so short circuit the normal error handling.
		 * We now traverse our list of pending CCBs and process
		 * any that were terminated by the recovery CCBs action.
		 * We also reinstate timeouts for all remaining, pending,
		 * CCBs.
		 */
		struct cam_path *path;
		struct ccb_hdr *ccb_h;
		cam_status error;

		/* Notify all clients that a BDR occurred */
		error = xpt_create_path(&path, /*periph*/NULL,
		    cam_sim_path(aha->sim), accb->hccb.target,
		    CAM_LUN_WILDCARD);

		if (error == CAM_REQ_CMP) {
			xpt_async(AC_SENT_BDR, path, NULL);
			xpt_free_path(path);
		}

		ccb_h = LIST_FIRST(&aha->pending_ccbs);
		while (ccb_h != NULL) {
			struct aha_ccb *pending_accb;

			pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr;
			if (pending_accb->hccb.target == accb->hccb.target) {
				pending_accb->hccb.ahastat = AHASTAT_HA_BDR;
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
				ahadone(aha, pending_accb, AMBI_ERROR);
			} else {
				callout_reset_sbt(&pending_accb->timer,
				    SBT_1MS * ccb_h->timeout, 0, ahatimeout,
				    pending_accb, 0);
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
			}
		}
		device_printf(aha->dev, "No longer in timeout\n");
		return;
	}

	callout_stop(&accb->timer);

	switch (comp_code) {
	case AMBI_FREE:
		device_printf(aha->dev,
		    "ahadone - CCB completed with free status!\n");
		break;
	case AMBI_NOT_FOUND:
		device_printf(aha->dev,
		    "ahadone - CCB Abort failed to find CCB\n");
		break;
	case AMBI_ABORT:
	case AMBI_ERROR:
		/* An error occurred */
		if (accb->hccb.opcode < INITIATOR_CCB_WRESID)
			csio->resid = 0;
		else
			csio->resid = aha_a24tou(accb->hccb.data_len);
		switch(accb->hccb.ahastat) {
		case AHASTAT_DATARUN_ERROR:
		{
			if (csio->resid <= 0) {
				csio->ccb_h.status = CAM_DATA_RUN_ERR;
				break;
			}
			/* FALLTHROUGH */
		}
		case AHASTAT_NOERROR:
			csio->scsi_status = accb->hccb.sdstat;
			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
			switch(csio->scsi_status) {
			case SCSI_STATUS_CHECK_COND:
			case SCSI_STATUS_CMD_TERMINATED:
				csio->ccb_h.status |= CAM_AUTOSNS_VALID;
				/*
				 * The aha writes the sense data at different
				 * offsets based on the scsi cmd len
				 */
				bcopy((caddr_t) &accb->hccb.scsi_cdb +
				    accb->hccb.cmd_len,
				    (caddr_t) &csio->sense_data,
				    accb->hccb.sense_len);
				break;
			default:
				break;
			case SCSI_STATUS_OK:
				csio->ccb_h.status = CAM_REQ_CMP;
				break;
			}
			break;
		case AHASTAT_SELTIMEOUT:
			csio->ccb_h.status = CAM_SEL_TIMEOUT;
			break;
		case AHASTAT_UNEXPECTED_BUSFREE:
			csio->ccb_h.status = CAM_UNEXP_BUSFREE;
			break;
		case AHASTAT_INVALID_PHASE:
			csio->ccb_h.status = CAM_SEQUENCE_FAIL;
			break;
		case AHASTAT_INVALID_ACTION_CODE:
			panic("%s: Inavlid Action code", aha_name(aha));
			break;
		case AHASTAT_INVALID_OPCODE:
			if (accb->hccb.opcode < INITIATOR_CCB_WRESID)
				panic("%s: Invalid CCB Opcode %x hccb = %p",
				    aha_name(aha), accb->hccb.opcode,
				    &accb->hccb);
			device_printf(aha->dev,
			    "AHA-1540A compensation failed\n");
			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
			csio->ccb_h.status = CAM_REQUEUE_REQ;
			break;
		case AHASTAT_LINKED_CCB_LUN_MISMATCH:
			/* We don't even support linked commands... */
			panic("%s: Linked CCB Lun Mismatch", aha_name(aha));
			break;
		case AHASTAT_INVALID_CCB_OR_SG_PARAM:
			panic("%s: Invalid CCB or SG list", aha_name(aha));
			break;
		case AHASTAT_HA_SCSI_BUS_RESET:
			if ((csio->ccb_h.status & CAM_STATUS_MASK)
			    != CAM_CMD_TIMEOUT)
				csio->ccb_h.status = CAM_SCSI_BUS_RESET;
			break;
		case AHASTAT_HA_BDR:
			if ((accb->flags & ACCB_DEVICE_RESET) == 0)
				csio->ccb_h.status = CAM_BDR_SENT;
			else
				csio->ccb_h.status = CAM_CMD_TIMEOUT;
			break;
		}
		if (csio->ccb_h.status != CAM_REQ_CMP) {
			xpt_freeze_devq(csio->ccb_h.path, /*count*/1);
			csio->ccb_h.status |= CAM_DEV_QFRZN;
		}
		if ((accb->flags & ACCB_RELEASE_SIMQ) != 0)
			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		break;
	case AMBI_OK:
		/* All completed without incident */
		/* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */
		/* I don't think so since it works???? */
		ccb->ccb_h.status |= CAM_REQ_CMP;
		if ((accb->flags & ACCB_RELEASE_SIMQ) != 0)
			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		break;
	}
}
Example #16
0
static int
ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
{
	struct ixl_vsi		*vsi = que->vsi;
	struct i40e_hw		*hw = vsi->hw;
	struct tx_ring		*txr = &que->txr;
	struct ixl_tx_buf	*buf;
	struct i40e_tx_desc	*txd = NULL;
	struct mbuf		*m_head, *m;
	int             	i, j, error, nsegs, maxsegs;
	int			first, last = 0;
	u16			vtag = 0;
	u32			cmd, off;
	bus_dmamap_t		map;
	bus_dma_tag_t		tag;
	bus_dma_segment_t	segs[IXL_MAX_TSO_SEGS];


	cmd = off = 0;
	m_head = *m_headp;

        /*
         * Important to capture the first descriptor
         * used because it will contain the index of
         * the one we tell the hardware to report back
         */
        first = txr->next_avail;
	buf = &txr->buffers[first];
	map = buf->map;
	tag = txr->tx_tag;
	maxsegs = IXL_MAX_TX_SEGS;

	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
		/* Use larger mapping for TSO */
		tag = txr->tso_tag;
		maxsegs = IXL_MAX_TSO_SEGS;
		if (ixl_tso_detect_sparse(m_head)) {
			m = m_defrag(m_head, M_NOWAIT);
			*m_headp = m;
		}
	}

	/*
	 * Map the packet for DMA.
	 */
	error = bus_dmamap_load_mbuf_sg(tag, map,
	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

	if (error == EFBIG) {
		struct mbuf *m;

		m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
		if (m == NULL) {
			que->mbuf_defrag_failed++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (ENOBUFS);
		}
		*m_headp = m;

		/* Try it again */
		error = bus_dmamap_load_mbuf_sg(tag, map,
		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

		if (error == ENOMEM) {
			que->tx_dma_setup++;
			return (error);
		} else if (error != 0) {
			que->tx_dma_setup++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (error);
		}
	} else if (error == ENOMEM) {
		que->tx_dma_setup++;
		return (error);
	} else if (error != 0) {
		que->tx_dma_setup++;
		m_freem(*m_headp);
		*m_headp = NULL;
		return (error);
	}

	/* Make certain there are enough descriptors */
	if (nsegs > txr->avail - 2) {
		txr->no_desc++;
		error = ENOBUFS;
		goto xmit_fail;
	}
	m_head = *m_headp;

	/* Set up the TSO/CSUM offload */
	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
		error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
		if (error)
			goto xmit_fail;
	}

	cmd |= I40E_TX_DESC_CMD_ICRC;
	/* Grab the VLAN tag */
	if (m_head->m_flags & M_VLANTAG) {
		cmd |= I40E_TX_DESC_CMD_IL2TAG1;
		vtag = htole16(m_head->m_pkthdr.ether_vtag);
	}

	i = txr->next_avail;
	for (j = 0; j < nsegs; j++) {
		bus_size_t seglen;

		buf = &txr->buffers[i];
		buf->tag = tag; /* Keep track of the type tag */
		txd = &txr->base[i];
		seglen = segs[j].ds_len;

		txd->buffer_addr = htole64(segs[j].ds_addr);
		txd->cmd_type_offset_bsz =
		    htole64(I40E_TX_DESC_DTYPE_DATA
		    | ((u64)cmd  << I40E_TXD_QW1_CMD_SHIFT)
		    | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
		    | ((u64)seglen  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
		    | ((u64)vtag  << I40E_TXD_QW1_L2TAG1_SHIFT));

		last = i; /* descriptor that will get completion IRQ */

		if (++i == que->num_desc)
			i = 0;

		buf->m_head = NULL;
		buf->eop_index = -1;
	}
	/* Set the last descriptor for report */
	txd->cmd_type_offset_bsz |=
	    htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
	txr->avail -= nsegs;
	txr->next_avail = i;

	buf->m_head = m_head;
	/* Swap the dma map between the first and last descriptor */
	txr->buffers[first].map = buf->map;
	buf->map = map;
	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);

        /* Set the index of the descriptor that will be marked done */
        buf = &txr->buffers[first];
	buf->eop_index = last;

        bus_dmamap_sync(txr->dma.tag, txr->dma.map,
            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
	/*
	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
	 * hardware that this frame is available to transmit.
	 */
	++txr->total_packets;
	wr32(hw, txr->tail, i);

	ixl_flush(hw);
	/* Mark outstanding work */
	if (que->busy == 0)
		que->busy = 1;
	return (0);

xmit_fail:
	bus_dmamap_unload(tag, buf->map);
	return (error);
}
Example #17
0
static void
ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
	struct	 aha_ccb *accb;
	union	 ccb *ccb;
	struct	 aha_softc *aha;
	uint32_t paddr;

	accb = (struct aha_ccb *)arg;
	ccb = accb->ccb;
	aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr;

	if (error != 0) {
		if (error != EFBIG)
			device_printf(aha->dev,
			    "Unexepected error 0x%x returned from "
			    "bus_dmamap_load\n", error);
		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
		}
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		return;
	}

	if (nseg != 0) {
		aha_sg_t *sg;
		bus_dma_segment_t *end_seg;
		bus_dmasync_op_t op;

		end_seg = dm_segs + nseg;

		/* Copy the segments into our SG list */
		sg = accb->sg_list;
		while (dm_segs < end_seg) {
			ahautoa24(dm_segs->ds_len, sg->len);
			ahautoa24(dm_segs->ds_addr, sg->addr);
			sg++;
			dm_segs++;
		}

		if (nseg > 1) {
			accb->hccb.opcode = aha->ccb_sg_opcode;
			ahautoa24((sizeof(aha_sg_t) * nseg),
			    accb->hccb.data_len);
			ahautoa24(accb->sg_list_phys, accb->hccb.data_addr);
		} else {
			bcopy(accb->sg_list->len, accb->hccb.data_len, 3);
			bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3);
		}

		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_PREREAD;
		else
			op = BUS_DMASYNC_PREWRITE;

		bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op);

	} else {
		accb->hccb.opcode = INITIATOR_CCB;
		ahautoa24(0, accb->hccb.data_len);
		ahautoa24(0, accb->hccb.data_addr);
	}

	/*
	 * Last time we need to check if this CCB needs to
	 * be aborted.
	 */
	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
		if (nseg != 0)
			bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		return;
	}

	accb->flags = ACCB_ACTIVE;
	ccb->ccb_h.status |= CAM_SIM_QUEUED;
	LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le);

	callout_reset_sbt(&accb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
	    ahatimeout, accb, 0);

	/* Tell the adapter about this command */
	if (aha->cur_outbox->action_code != AMBO_FREE) {
		/*
		 * We should never encounter a busy mailbox.
		 * If we do, warn the user, and treat it as
		 * a resource shortage.  If the controller is
		 * hung, one of the pending transactions will
		 * timeout causing us to start recovery operations.
		 */
		device_printf(aha->dev,
		    "Encountered busy mailbox with %d out of %d "
		    "commands active!!!", aha->active_ccbs, aha->max_ccbs);
		callout_stop(&accb->timer);
		if (nseg != 0)
			bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
		ahafreeccb(aha, accb);
		aha->resource_shortage = TRUE;
		xpt_freeze_simq(aha->sim, /*count*/1);
		ccb->ccb_h.status = CAM_REQUEUE_REQ;
		xpt_done(ccb);
		return;
	}
	paddr = ahaccbvtop(aha, accb);
	ahautoa24(paddr, aha->cur_outbox->ccb_addr);
	aha->cur_outbox->action_code = AMBO_START;
	aha_outb(aha, COMMAND_REG, AOP_START_MBOX);

	ahanextoutbox(aha);
}
Example #18
0
/* Start packet transmission on the interface. */
static void
bce_start(struct ifnet *ifp)
{
	struct bce_softc *sc = ifp->if_softc;
	struct mbuf    *m0;
	bus_dmamap_t	dmamap;
	int		txstart;
	int		txsfree;
	int		newpkts = 0;
	int		error;

	/*
	 * do not start another if currently transmitting, and more
	 * descriptors(tx slots) are needed for next packet.
	 */
	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/* determine number of descriptors available */
	if (sc->bce_txsnext >= sc->bce_txin)
		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
	else
		txsfree = sc->bce_txin - sc->bce_txsnext - 1;

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while (txsfree > 0) {
		int		seg;

		/* Grab a packet off the queue. */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;

		/* get the transmit slot dma map */
		dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext];

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we
		 * were short on resources. If the packet will not fit,
		 * it will be dropped. If short on resources, it will
		 * be tried again later.
		 */
		error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0,
		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
		if (error == EFBIG) {
			aprint_error_dev(sc->bce_dev,
			    "Tx packet consumes too many DMA segments, "
			    "dropping...\n");
			IFQ_DEQUEUE(&ifp->if_snd, m0);
			m_freem(m0);
			ifp->if_oerrors++;
			continue;
		} else if (error) {
			/* short on resources, come back later */
			aprint_error_dev(sc->bce_dev,
			    "unable to load Tx buffer, error = %d\n",
			    error);
			break;
		}
		/* If not enough descriptors available, try again later */
		if (dmamap->dm_nsegs > txsfree) {
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->bce_dmatag, dmamap);
			break;
		}
		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */

		/* So take it off the queue */
		IFQ_DEQUEUE(&ifp->if_snd, m0);

		/* save the pointer so it can be freed later */
		sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0;

		/* Sync the data DMA map. */
		bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize,
				BUS_DMASYNC_PREWRITE);

		/* Initialize the transmit descriptor(s). */
		txstart = sc->bce_txsnext;
		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
			uint32_t ctrl;

			ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK;
			if (seg == 0)
				ctrl |= CTRL_SOF;
			if (seg == dmamap->dm_nsegs - 1)
				ctrl |= CTRL_EOF;
			if (sc->bce_txsnext == BCE_NTXDESC - 1)
				ctrl |= CTRL_EOT;
			ctrl |= CTRL_IOC;
			sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
			sc->bce_tx_ring[sc->bce_txsnext].addr =
			    htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000);	/* MAGIC */
			if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
				sc->bce_txsnext = 0;
			else
				sc->bce_txsnext++;
			txsfree--;
		}
		/* sync descriptors being used */
		if ( sc->bce_txsnext > txstart ) {
			bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
			    PAGE_SIZE + sizeof(struct bce_dma_slot) * txstart,
			    sizeof(struct bce_dma_slot) * dmamap->dm_nsegs,
			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
		} else {
			bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
			    PAGE_SIZE + sizeof(struct bce_dma_slot) * txstart,
			    sizeof(struct bce_dma_slot) *
			    (BCE_NTXDESC - txstart),
			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
			if ( sc->bce_txsnext != 0 ) {
				bus_dmamap_sync(sc->bce_dmatag,
				    sc->bce_ring_map, PAGE_SIZE,
				    sc->bce_txsnext *
				    sizeof(struct bce_dma_slot),
				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
			}
		}

		/* Give the packet to the chip. */
		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
		    sc->bce_txsnext * sizeof(struct bce_dma_slot));

		newpkts++;

		/* Pass the packet to any BPF listeners. */
		bpf_mtap(ifp, m0);
	}
	if (txsfree == 0) {
		/* No more slots left; notify upper layer. */
		ifp->if_flags |= IFF_OACTIVE;
	}
	if (newpkts) {
		/* Set a watchdog timer in case the chip flakes out. */
		ifp->if_timer = 5;
	}
}
Example #19
0
void
qlw_scsi_cmd(struct scsi_xfer *xs)
{
	struct scsi_link	*link = xs->sc_link;
	struct qlw_softc	*sc = link->adapter_softc;
	struct qlw_ccb		*ccb;
	struct qlw_iocb_req0	*iocb;
	struct qlw_ccb_list	list;
	u_int16_t		req, rspin;
	int			offset, error, done;
	bus_dmamap_t		dmap;
	int			bus;
	int			seg;

	if (xs->cmdlen > sizeof(iocb->cdb)) {
		DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),
		    xs->cmdlen);
		memset(&xs->sense, 0, sizeof(xs->sense));
		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
		xs->sense.add_sense_code = 0x20;
		xs->error = XS_SENSE;
		scsi_done(xs);
		return;
	}

	ccb = xs->io;
	dmap = ccb->ccb_dmamap;
	if (xs->datalen > 0) {
		error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,
		    xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
		if (error) {
			xs->error = XS_DRIVER_STUFFUP;
			scsi_done(xs);
			return;
		}

		bus_dmamap_sync(sc->sc_dmat, dmap, 0,
		    dmap->dm_mapsize,
		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
		    BUS_DMASYNC_PREWRITE);
	}

	mtx_enter(&sc->sc_queue_mtx);

	/* put in a sync marker if required */
	bus = qlw_xs_bus(sc, xs);
	if (sc->sc_marker_required[bus]) {
		req = sc->sc_next_req_id++;
		if (sc->sc_next_req_id == sc->sc_maxrequests)
			sc->sc_next_req_id = 0;

		DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n",
		    DEVNAME(sc), req);
		offset = (req * QLW_QUEUE_ENTRY_SIZE);
		iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
		    offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
		qlw_put_marker(sc, bus, iocb);
		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
		    offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
		qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
		sc->sc_marker_required[bus] = 0;
	}

	req = sc->sc_next_req_id++;
	if (sc->sc_next_req_id == sc->sc_maxrequests)
		sc->sc_next_req_id = 0;

	offset = (req * QLW_QUEUE_ENTRY_SIZE);
	iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
	bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);

	ccb->ccb_xs = xs;

	DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req);
	qlw_put_cmd(sc, iocb, xs, ccb);
	seg = QLW_IOCB_SEGS_PER_CMD;

	bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);

	while (seg < ccb->ccb_dmamap->dm_nsegs) {
		req = sc->sc_next_req_id++;
		if (sc->sc_next_req_id == sc->sc_maxrequests)
			sc->sc_next_req_id = 0;

		offset = (req * QLW_QUEUE_ENTRY_SIZE);
		iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
		    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);

		DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req);
		qlw_put_cont(sc, iocb, xs, ccb, seg);
		seg += QLW_IOCB_SEGS_PER_CONT;

		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
		    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
	}

	qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);

	if (!ISSET(xs->flags, SCSI_POLL)) {
		mtx_leave(&sc->sc_queue_mtx);
		return;
	}

	done = 0;
	SIMPLEQ_INIT(&list);
	do {
		u_int16_t isr, info;

		delay(100);

		if (qlw_read_isr(sc, &isr, &info) == 0) {
			continue;
		}

		if (isr != QLW_INT_TYPE_IO) {
			qlw_handle_intr(sc, isr, info);
			continue;
		}

		qlw_clear_isr(sc, isr);

		rspin = qlw_queue_read(sc, QLW_RESP_IN);
		while (rspin != sc->sc_last_resp_id) {
			ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);

			sc->sc_last_resp_id++;
			if (sc->sc_last_resp_id == sc->sc_maxresponses)
				sc->sc_last_resp_id = 0;

			if (ccb != NULL)
				SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link);
			if (ccb == xs->io)
				done = 1;
		}
		qlw_queue_write(sc, QLW_RESP_OUT, rspin);
	} while (done == 0);

	mtx_leave(&sc->sc_queue_mtx);

	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
		SIMPLEQ_REMOVE_HEAD(&list, ccb_link);
		scsi_done(ccb->ccb_xs);
	}
}
Example #20
0
/* Receive interrupt handler */
void
bce_rxintr(struct bce_softc *sc)
{
	struct ifnet   *ifp = &sc->ethercom.ec_if;
	struct rx_pph  *pph;
	struct mbuf    *m;
	int		curr;
	int		len;
	int		i;

	/* get pointer to active receive slot */
	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
	    & RS_CD_MASK;
	curr = curr / sizeof(struct bce_dma_slot);
	if (curr >= BCE_NRXDESC)
		curr = BCE_NRXDESC - 1;

	/* process packets up to but not current packet being worked on */
	for (i = sc->bce_rxin; i != curr;
	    i + 1 > BCE_NRXDESC - 1 ? i = 0 : i++) {
		/* complete any post dma memory ops on packet */
		bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[i], 0,
		    sc->bce_cdata.bce_rx_map[i]->dm_mapsize,
		    BUS_DMASYNC_POSTREAD);

		/*
		 * If the packet had an error, simply recycle the buffer,
		 * resetting the len, and flags.
		 */
		pph = mtod(sc->bce_cdata.bce_rx_chain[i], struct rx_pph *);
		if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
			ifp->if_ierrors++;
			pph->len = 0;
			pph->flags = 0;
			continue;
		}
		/* receive the packet */
		len = pph->len;
		if (len == 0)
			continue;	/* no packet if empty */
		pph->len = 0;
		pph->flags = 0;
		/* bump past pre header to packet */
		sc->bce_cdata.bce_rx_chain[i]->m_data += 30;	/* MAGIC */

		/*
		 * The chip includes the CRC with every packet.  Trim
		 * it off here.
		 */
		len -= ETHER_CRC_LEN;

		/*
		 * If the packet is small enough to fit in a
		 * single header mbuf, allocate one and copy
		 * the data into it.  This greatly reduces
		 * memory consumption when receiving lots
		 * of small packets.
		 *
		 * Otherwise, add a new buffer to the receive
		 * chain.  If this fails, drop the packet and
		 * recycle the old buffer.
		 */
		if (len <= (MHLEN - 2)) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL)
				goto dropit;
			m->m_data += 2;
			memcpy(mtod(m, void *),
			 mtod(sc->bce_cdata.bce_rx_chain[i], void *), len);
			sc->bce_cdata.bce_rx_chain[i]->m_data -= 30;	/* MAGIC */
		} else {
Example #21
0
/*
 * Form an aggregate packet list.
 *
 * This function enforces the aggregate restrictions/requirements.
 *
 * These are:
 *
 * + The aggregate size maximum (64k for AR9160 and later, 8K for
 *   AR5416 when doing RTS frame protection.)
 * + Maximum number of sub-frames for an aggregate
 * + The aggregate delimiter size, giving MACs time to do whatever is
 *   needed before each frame
 * + Enforce the BAW limit
 *
 * Each descriptor queued should have the DMA setup.
 * The rate series, descriptor setup, linking, etc is all done
 * externally. This routine simply chains them together.
 * ath_tx_setds_11n() will take care of configuring the per-
 * descriptor setup, and ath_buf_set_rate() will configure the
 * rate control.
 *
 * Note that the TID lock is only grabbed when dequeuing packets from
 * the TID queue. If some code in another thread adds to the head of this
 * list, very strange behaviour will occur. Since retransmission is the
 * only reason this will occur, and this routine is designed to be called
 * from within the scheduler task, it won't ever clash with the completion
 * task.
 *
 * So if you want to call this from an upper layer context (eg, to direct-
 * dispatch aggregate frames to the hardware), please keep this in mind.
 */
ATH_AGGR_STATUS
ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid,
    ath_bufhead *bf_q)
{
	struct ieee80211_node *ni = &an->an_node;
	struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
	int nframes = 0;
	uint16_t aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw;
	struct ieee80211_tx_ampdu *tap;
	int status = ATH_AGGR_DONE;
	int prev_frames = 0;	/* XXX for AR5416 burst, not done here */
	int prev_al = 0;	/* XXX also for AR5416 burst */

	ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);

	tap = ath_tx_get_tx_tid(an, tid->tid);
	if (tap == NULL) {
		status = ATH_AGGR_ERROR;
		goto finish;
	}

	h_baw = tap->txa_wnd / 2;

	for (;;) {
		bf = TAILQ_FIRST(&tid->axq_q);
		if (bf_first == NULL)
			bf_first = bf;
		if (bf == NULL) {
			status = ATH_AGGR_DONE;
			break;
		} else {
			/*
			 * It's the first frame;
			 * set the aggregation limit based on the
			 * rate control decision that has been made.
			 */
			aggr_limit = ath_get_aggr_limit(sc, bf_first);
		}

		/* Set this early just so things don't get confused */
		bf->bf_next = NULL;

		/*
		 * Don't unlock the tid lock until we're sure we are going
		 * to queue this frame.
		 */

		/*
		 * If the frame doesn't have a sequence number that we're
		 * tracking in the BAW (eg NULL QOS data frame), we can't
		 * aggregate it. Stop the aggregation process; the sender
		 * can then TX what's in the list thus far and then
		 * TX the frame individually.
		 */
		if (! bf->bf_state.bfs_dobaw) {
			status = ATH_AGGR_NONAGGR;
			break;
		}

		/*
		 * If any of the rates are non-HT, this packet
		 * can't be aggregated.
		 * XXX TODO: add a bf_state flag which gets marked
		 * if any active rate is non-HT.
		 */

		/*
		 * do not exceed aggregation limit
		 */
		al_delta = ATH_AGGR_DELIM_SZ + bf->bf_state.bfs_pktlen;
		if (nframes &&
		    (aggr_limit < (al + bpad + al_delta + prev_al))) {
			status = ATH_AGGR_LIMITED;
			break;
		}

		/*
		 * If RTS/CTS is set on the first frame, enforce
		 * the RTS aggregate limit.
		 */
		if (bf_first->bf_state.bfs_txflags &
		    (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
			if (nframes &&
			   (sc->sc_rts_aggr_limit <
			     (al + bpad + al_delta + prev_al))) {
				status = ATH_AGGR_8K_LIMITED;
				break;
			}
		}

		/*
		 * Do not exceed subframe limit.
		 */
		if ((nframes + prev_frames) >= MIN((h_baw),
		    IEEE80211_AMPDU_SUBFRAME_DEFAULT)) {
			status = ATH_AGGR_LIMITED;
			break;
		}

		/*
		 * If the current frame has an RTS/CTS configuration
		 * that differs from the first frame, override the
		 * subsequent frame with this config.
		 */
		bf->bf_state.bfs_txflags &=
		    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
		bf->bf_state.bfs_txflags |=
		    bf_first->bf_state.bfs_txflags &
		    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);

		/*
		 * TODO: If it's _before_ the BAW left edge, complain very
		 * loudly.
		 *
		 * This means something (else) has slid the left edge along
		 * before we got a chance to be TXed.
		 */

		/*
		 * Check if we have space in the BAW for this frame before
		 * we add it.
		 *
		 * see ath_tx_xmit_aggr() for more info.
		 */
		if (bf->bf_state.bfs_dobaw) {
			ieee80211_seq seqno;

			/*
			 * If the sequence number is allocated, use it.
			 * Otherwise, use the sequence number we WOULD
			 * allocate.
			 */
			if (bf->bf_state.bfs_seqno_assigned)
				seqno = SEQNO(bf->bf_state.bfs_seqno);
			else
				seqno = ni->ni_txseqs[bf->bf_state.bfs_tid];

			/*
			 * Check whether either the currently allocated
			 * sequence number _OR_ the to-be allocated
			 * sequence number is inside the BAW.
			 */
			if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
			    seqno)) {
				status = ATH_AGGR_BAW_CLOSED;
				break;
			}

			/* XXX check for bfs_need_seqno? */
			if (! bf->bf_state.bfs_seqno_assigned) {
				int seqno;
				seqno = ath_tx_tid_seqno_assign(sc, ni, bf, bf->bf_m);
				if (seqno < 0) {
					device_printf(sc->sc_dev,
					    "%s: bf=%p, huh, seqno=-1?\n",
					    __func__,
					    bf);
					/* XXX what can we even do here? */
				}
				/* Flush seqno update to RAM */
				/*
				 * XXX This is required because the dmasetup
				 * XXX is done early rather than at dispatch
				 * XXX time. Ew, we should fix this!
				 */
				bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
				    BUS_DMASYNC_PREWRITE);
			}
		}

		/*
		 * If the packet has a sequence number, do not
		 * step outside of the block-ack window.
		 */
		if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
		    SEQNO(bf->bf_state.bfs_seqno))) {
			device_printf(sc->sc_dev,
			    "%s: bf=%p, seqno=%d, outside?!\n",
			    __func__, bf, SEQNO(bf->bf_state.bfs_seqno));
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}

		/*
		 * this packet is part of an aggregate.
		 */
		ATH_TXQ_REMOVE(tid, bf, bf_list);

		/* The TID lock is required for the BAW update */
		ath_tx_addto_baw(sc, an, tid, bf);
		bf->bf_state.bfs_addedbaw = 1;

		/*
		 * XXX enforce ACK for aggregate frames (this needs to be
		 * XXX handled more gracefully?
		 */
		if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
			device_printf(sc->sc_dev,
			    "%s: HAL_TXDESC_NOACK set for an aggregate frame?\n",
			    __func__);
			bf->bf_state.bfs_txflags &= (~HAL_TXDESC_NOACK);
		}

		/*
		 * Add the now owned buffer (which isn't
		 * on the software TXQ any longer) to our
		 * aggregate frame list.
		 */
		TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
		nframes ++;

		/* Completion handler */
		bf->bf_comp = ath_tx_aggr_comp;

		/*
		 * add padding for previous frame to aggregation length
		 */
		al += bpad + al_delta;

		/*
		 * Calculate delimiters needed for the current frame
		 */
		bf->bf_state.bfs_ndelim =
		    ath_compute_num_delims(sc, bf_first,
		    bf->bf_state.bfs_pktlen);

		/*
		 * Calculate the padding needed from this set of delimiters,
		 * used when calculating if the next frame will fit in
		 * the aggregate.
		 */
		bpad = PADBYTES(al_delta) + (bf->bf_state.bfs_ndelim << 2);

		/*
		 * Chain the buffers together
		 */
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		/*
		 * XXX TODO: if any sub-frames have RTS/CTS enabled;
		 * enable it for the entire aggregate.
		 */

#if 0
		/*
		 * terminate aggregation on a small packet boundary
		 */
		if (bf->bf_state.bfs_pktlen < ATH_AGGR_MINPLEN) {
			status = ATH_AGGR_SHORTPKT;
			break;
		}
#endif

	}

finish:
	/*
	 * Just in case the list was empty when we tried to
	 * dequeue a packet ..
	 */
	if (bf_first) {
		bf_first->bf_state.bfs_al = al;
		bf_first->bf_state.bfs_nframes = nframes;
	}
	return status;
}
Example #22
0
/*
 * Function name:	twa_map_load_data_callback
 * Description:		Callback of bus_dmamap_load for the buffer associated
 *			with data.  Updates the cmd pkt (size/sgl_entries
 *			fields, as applicable) to reflect the number of sg
 *			elements.
 *
 * Input:		arg	-- ptr to OSL internal request context
 *			segs	-- ptr to a list of segment descriptors
 *			nsegments--# of segments
 *			error	-- 0 if no errors encountered before callback,
 *				   non-zero if errors were encountered
 * Output:		None
 * Return value:	None
 */
static TW_VOID
twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
	TW_INT32 nsegments, TW_INT32 error)
{
	struct tw_osli_req_context	*req =
		(struct tw_osli_req_context *)arg;
	struct twa_softc		*sc = req->ctlr;
	struct tw_cl_req_packet		*req_pkt = &(req->req_pkt);

	tw_osli_dbg_dprintf(10, sc, "entered");

	/* Mark the request as currently being processed. */
	req->state = TW_OSLI_REQ_STATE_BUSY;
	/* Move the request into the busy queue. */
	tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);

	req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;

	if (error == EFBIG) {
		req->error_code = error;
		goto out;
	}

	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
		struct tw_cl_passthru_req_packet	*pt_req;

		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
				BUS_DMASYNC_PREREAD);

		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
			/* 
			 * If we're using an alignment buffer, and we're
			 * writing data, copy the real data out.
			 */
			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
				bcopy(req->real_data, req->data, req->real_length);
			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
				BUS_DMASYNC_PREWRITE);
		}

		pt_req = &(req_pkt->gen_req_pkt.pt_req);
		pt_req->sg_list = (TW_UINT8 *)segs;
		pt_req->sgl_entries += (nsegments - 1);
		error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
			&(req->req_handle));
	} else {
		struct tw_cl_scsi_req_packet	*scsi_req;

		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
			bus_dmamap_sync(sc->dma_tag, req->dma_map,
				BUS_DMASYNC_PREREAD);

		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
			/* 
			 * If we're using an alignment buffer, and we're
			 * writing data, copy the real data out.
			 */
			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
				bcopy(req->real_data, req->data, req->real_length);
			bus_dmamap_sync(sc->dma_tag, req->dma_map,
				BUS_DMASYNC_PREWRITE);
		}

		scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
		scsi_req->sg_list = (TW_UINT8 *)segs;
		scsi_req->sgl_entries += (nsegments - 1);
		error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
			&(req->req_handle));
	}

out:
	if (error) {
		req->error_code = error;
		req_pkt->tw_osl_callback(&(req->req_handle));
		/*
		 * If the caller had been returned EINPROGRESS, and he has
		 * registered a callback for handling completion, the callback
		 * will never get called because we were unable to submit the
		 * request.  So, free up the request right here.
		 */
		if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
			tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
	}
}
Example #23
0
static int
ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_hal *ah = sc->sc_ah;
	int error;
	struct mbuf *m;
	struct ath_desc *ds;

	/* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */

	m = bf->bf_m;
	if (m == NULL) {
		/*
		 * NB: by assigning a page to the rx dma buffer we
		 * implicitly satisfy the Atheros requirement that
		 * this buffer be cache-line-aligned and sized to be
		 * multiple of the cache line size.  Not doing this
		 * causes weird stuff to happen (for the 5210 at least).
		 */
		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
		if (m == NULL) {
			DPRINTF(sc, ATH_DEBUG_ANY,
				"%s: no mbuf/cluster\n", __func__);
			sc->sc_stats.ast_rx_nombuf++;
			return ENOMEM;
		}
		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;

		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
					     bf->bf_dmamap, m,
					     bf->bf_segs, &bf->bf_nseg,
					     BUS_DMA_NOWAIT);
		if (error != 0) {
			DPRINTF(sc, ATH_DEBUG_ANY,
			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
			    __func__, error);
			sc->sc_stats.ast_rx_busdma++;
			m_freem(m);
			return error;
		}
		KASSERT(bf->bf_nseg == 1,
			("multi-segment packet; nseg %u", bf->bf_nseg));
		bf->bf_m = m;
	}
	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);

	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To insure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is ``fixed'' naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This insures the hardware always has
	 * someplace to write a new frame.
	 */
	/*
	 * 11N: we can no longer afford to self link the last descriptor.
	 * MAC acknowledges BA status as long as it copies frames to host
	 * buffer (or rx fifo). This can incorrectly acknowledge packets
	 * to a sender if last desc is self-linked.
	 */
	ds = bf->bf_desc;
	if (sc->sc_rxslink)
		ds->ds_link = bf->bf_daddr;	/* link to self */
	else
		ds->ds_link = 0;		/* terminate the list */
	ds->ds_data = bf->bf_segs[0].ds_addr;
	ath_hal_setuprxdesc(ah, ds
		, m->m_len		/* buffer size */
		, 0
	);

	if (sc->sc_rxlink != NULL)
		*sc->sc_rxlink = bf->bf_daddr;
	sc->sc_rxlink = &ds->ds_link;
	return 0;
}
Example #24
0
/*
 * Function name:	tw_osli_unmap_request
 * Description:		Undoes the mapping done by tw_osli_map_request.
 *
 * Input:		req	-- ptr to request pkt
 * Output:		None
 * Return value:	None
 */
TW_VOID
tw_osli_unmap_request(struct tw_osli_req_context *req)
{
	struct twa_softc	*sc = req->ctlr;

	tw_osli_dbg_dprintf(10, sc, "entered");

	/* If the command involved data, unmap that too. */
	if (req->data != NULL) {
		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
			/* Lock against multiple simultaneous ioctl calls. */
			mtx_lock_spin(sc->io_lock);

			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
				bus_dmamap_sync(sc->ioctl_tag,
					sc->ioctl_map, BUS_DMASYNC_POSTREAD);

				/* 
				 * If we are using a bounce buffer, and we are
				 * reading data, copy the real data in.
				 */
				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
					bcopy(req->data, req->real_data,
						req->real_length);
			}

			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
				bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
					BUS_DMASYNC_POSTWRITE);

			bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);

			mtx_unlock_spin(sc->io_lock);
		} else {
			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
				bus_dmamap_sync(sc->dma_tag,
					req->dma_map, BUS_DMASYNC_POSTREAD);

				/* 
				 * If we are using a bounce buffer, and we are
				 * reading data, copy the real data in.
				 */
				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
					bcopy(req->data, req->real_data,
						req->real_length);
			}
			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
				bus_dmamap_sync(sc->dma_tag, req->dma_map,
					BUS_DMASYNC_POSTWRITE);

			bus_dmamap_unload(sc->dma_tag, req->dma_map);
		}
	}

	/* Free alignment buffer if it was used. */
	if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
		free(req->data, TW_OSLI_MALLOC_CLASS);
		/* Restore original data pointer and length. */
		req->data = req->real_data;
		req->length = req->real_length;
	}
}
Example #25
0
static void
kr_rx(struct kr_softc *sc)
{
	struct kr_rxdesc	*rxd;
	struct ifnet		*ifp = sc->kr_ifp;
	int			cons, prog, packet_len, count, error;
	struct kr_desc		*cur_rx;
	struct mbuf		*m;

	KR_LOCK_ASSERT(sc);

	cons = sc->kr_cdata.kr_rx_cons;

	bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
	    sc->kr_cdata.kr_rx_ring_map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) {
		cur_rx = &sc->kr_rdata.kr_rx_ring[cons];
		rxd = &sc->kr_cdata.kr_rxdesc[cons];
		m = rxd->rx_m;

		if ((cur_rx->kr_ctl & KR_CTL_D) == 0)
		       break;	

		prog++;

		packet_len = KR_PKTSIZE(cur_rx->kr_devcs);
		count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl);
		/* Assume it's error */
		error = 1;

		if (packet_len != count)
			ifp->if_ierrors++;
		else if (count < 64)
			ifp->if_ierrors++;
		else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0)
			ifp->if_ierrors++;
		else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) {
			error = 0;
			bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
			    BUS_DMASYNC_PREREAD);
			m = rxd->rx_m;
			kr_fixup_rx(m);
			m->m_pkthdr.rcvif = ifp;
			/* Skip 4 bytes of CRC */
			m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
			ifp->if_ipackets++;

			KR_UNLOCK(sc);
			(*ifp->if_input)(ifp, m);
			KR_LOCK(sc);
		}

		if (error) {
			/* Restore CONTROL and CA values, reset DEVCS */
			cur_rx->kr_ctl = rxd->saved_ctl;
			cur_rx->kr_ca = rxd->saved_ca;
			cur_rx->kr_devcs = 0;
		}
		else {
			/* Reinit descriptor */
			cur_rx->kr_ctl = KR_CTL_IOD;
			if (cons == KR_RX_RING_CNT - 1)
				cur_rx->kr_ctl |= KR_CTL_COD;
			cur_rx->kr_devcs = 0;
			cur_rx->kr_ca = 0;
			if (kr_newbuf(sc, cons) != 0) {
				device_printf(sc->kr_dev, 
				    "Failed to allocate buffer\n");
				break;
			}
		}

		bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
		    sc->kr_cdata.kr_rx_ring_map,
		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	}

	if (prog > 0) {
		sc->kr_cdata.kr_rx_cons = cons;

		bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
		    sc->kr_cdata.kr_rx_ring_map,
		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
	}
}
Example #26
0
static int
at91_usart_bus_attach(struct uart_softc *sc)
{
	int err;
	int i;
	struct at91_usart_softc *atsc;

	atsc = (struct at91_usart_softc *)sc;

	if (at91_usart_requires_rts0_workaround(sc))
		atsc->flags |= USE_RTS0_WORKAROUND;

	/*
	 * See if we have a TIMEOUT bit.  We disable all interrupts as
	 * a side effect.  Boot loaders may have enabled them.  Since
	 * a TIMEOUT interrupt can't happen without other setup, the
	 * apparent race here can't actually happen.
	 */
	WR4(&sc->sc_bas, USART_IDR, 0xffffffff);
	WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT);
	if (RD4(&sc->sc_bas, USART_IMR) & USART_CSR_TIMEOUT)
		atsc->flags |= HAS_TIMEOUT;
	WR4(&sc->sc_bas, USART_IDR, 0xffffffff);

	/*
	 * Allocate transmit DMA tag and map.  We allow a transmit buffer
	 * to be any size, but it must map to a single contiguous physical
	 * extent.
	 */
	err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    BUS_SPACE_MAXSIZE_32BIT, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
	    NULL, &atsc->tx_tag);
	if (err != 0)
		goto errout;
	err = bus_dmamap_create(atsc->tx_tag, 0, &atsc->tx_map);
	if (err != 0)
		goto errout;

	if (atsc->flags & HAS_TIMEOUT) {
		/*
		 * Allocate receive DMA tags, maps, and buffers.
		 * The receive buffers should be aligned to arm_dcache_align,
		 * otherwise partial cache line flushes on every receive
		 * interrupt are pretty much guaranteed.
		 */
		err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
		    arm_dcache_align, 0, BUS_SPACE_MAXADDR_32BIT,
		    BUS_SPACE_MAXADDR, NULL, NULL, sc->sc_rxfifosz, 1,
		    sc->sc_rxfifosz, BUS_DMA_ALLOCNOW, NULL, NULL,
		    &atsc->rx_tag);
		if (err != 0)
			goto errout;
		for (i = 0; i < 2; i++) {
			err = bus_dmamem_alloc(atsc->rx_tag,
			    (void **)&atsc->ping_pong[i].buffer,
			    BUS_DMA_NOWAIT, &atsc->ping_pong[i].map);
			if (err != 0)
				goto errout;
			err = bus_dmamap_load(atsc->rx_tag,
			    atsc->ping_pong[i].map,
			    atsc->ping_pong[i].buffer, sc->sc_rxfifosz,
			    at91_getaddr, &atsc->ping_pong[i].pa, 0);
			if (err != 0)
				goto errout;
			bus_dmamap_sync(atsc->rx_tag, atsc->ping_pong[i].map,
			    BUS_DMASYNC_PREREAD);
		}
		atsc->ping = &atsc->ping_pong[0];
		atsc->pong = &atsc->ping_pong[1];
	}

	/* Turn on rx and tx */
	DELAY(1000);		/* Give pending character a chance to drain.  */
	WR4(&sc->sc_bas, USART_CR, USART_CR_RSTSTA | USART_CR_RSTRX | USART_CR_RSTTX);
	WR4(&sc->sc_bas, USART_CR, USART_CR_RXEN | USART_CR_TXEN);

	/*
	 * Setup the PDC to receive data.  We use the ping-pong buffers
	 * so that we can more easily bounce between the two and so that
	 * we get an interrupt 1/2 way through the software 'fifo' we have
	 * to avoid overruns.
	 */
	if (atsc->flags & HAS_TIMEOUT) {
		WR4(&sc->sc_bas, PDC_RPR, atsc->ping->pa);
		WR4(&sc->sc_bas, PDC_RCR, sc->sc_rxfifosz);
		WR4(&sc->sc_bas, PDC_RNPR, atsc->pong->pa);
		WR4(&sc->sc_bas, PDC_RNCR, sc->sc_rxfifosz);
		WR4(&sc->sc_bas, PDC_PTCR, PDC_PTCR_RXTEN);

		/*
		 * Set the receive timeout to be 1.5 character times
		 * assuming 8N1.
		 */
		WR4(&sc->sc_bas, USART_RTOR, 15);
		WR4(&sc->sc_bas, USART_CR, USART_CR_STTTO);
		WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT |
		    USART_CSR_RXBUFF | USART_CSR_ENDRX);
	} else {
		WR4(&sc->sc_bas, USART_IER, USART_CSR_RXRDY);
	}
	WR4(&sc->sc_bas, USART_IER, USART_CSR_RXBRK | USART_DCE_CHANGE_BITS);

	/* Prime sc->hwsig with the initial hw line states. */
	at91_usart_bus_getsig(sc);

errout:
	return (err);
}
Example #27
0
static int
esp_pci_dma_intr(struct ncr53c9x_softc *sc)
{
	struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
	bus_dma_tag_t xferdmat;
	bus_dmamap_t xferdmam;
	size_t dmasize;
	int datain, i, resid, trans;
	uint32_t dmastat;
	char *p = NULL;

	xferdmat = esc->sc_xferdmat;
	xferdmam = esc->sc_xferdmam;
	datain = esc->sc_datain;

	dmastat = READ_DMAREG(esc, DMA_STAT);

	if ((dmastat & DMASTAT_ERR) != 0) {
		/* XXX not tested... */
		WRITE_DMAREG(esc, DMA_CMD, DMACMD_ABORT | (datain != 0 ?
		    DMACMD_DIR : 0));

		device_printf(esc->sc_dev, "DMA error detected; Aborting.\n");
		bus_dmamap_sync(xferdmat, xferdmam, datain != 0 ?
		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(xferdmat, xferdmam);
		return (-1);
	}

	if ((dmastat & DMASTAT_ABT) != 0) {
		/* XXX what should be done? */
		device_printf(esc->sc_dev, "DMA aborted.\n");
		WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ?
		    DMACMD_DIR : 0));
		esc->sc_active = 0;
		return (0);
	}

	KASSERT(esc->sc_active != 0, ("%s: DMA wasn't active", __func__));

	/* DMA has stopped. */

	esc->sc_active = 0;

	dmasize = esc->sc_dmasize;
	if (dmasize == 0) {
		/* A "Transfer Pad" operation completed. */
		NCR_DMA(("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
		    __func__, READ_ESPREG(esc, NCR_TCL) |
		    (READ_ESPREG(esc, NCR_TCM) << 8),
		    READ_ESPREG(esc, NCR_TCL), READ_ESPREG(esc, NCR_TCM)));
		return (0);
	}

	resid = 0;
	/*
	 * If a transfer onto the SCSI bus gets interrupted by the device
	 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
	 * as residual since the ESP counter registers get decremented as
	 * bytes are clocked into the FIFO.
	 */
	if (datain == 0 &&
	    (resid = (READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF)) != 0)
		NCR_DMA(("%s: empty esp FIFO of %d ", __func__, resid));

	if ((sc->sc_espstat & NCRSTAT_TC) == 0) {
		/*
		 * "Terminal count" is off, so read the residue
		 * out of the ESP counter registers.
		 */
		if (datain != 0) {
			resid = READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF;
			while (resid > 1)
				resid =
				    READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF;
			WRITE_DMAREG(esc, DMA_CMD, DMACMD_BLAST | DMACMD_DIR);

			for (i = 0; i < 0x8000; i++) /* XXX 0x8000 ? */
				if ((READ_DMAREG(esc, DMA_STAT) &
				    DMASTAT_BCMP) != 0)
					break;

			/* See the below comments... */
			if (resid != 0)
				p = *esc->sc_dmaaddr;
		}

		resid += READ_ESPREG(esc, NCR_TCL) |
		    (READ_ESPREG(esc, NCR_TCM) << 8) |
		    (READ_ESPREG(esc, NCR_TCH) << 16);
	} else
		while ((dmastat & DMASTAT_DONE) == 0)
			dmastat = READ_DMAREG(esc, DMA_STAT);

	WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ?
	    DMACMD_DIR : 0));

	/* Sync the transfer buffer. */
	bus_dmamap_sync(xferdmat, xferdmam, datain != 0 ?
	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
	bus_dmamap_unload(xferdmat, xferdmam);

	trans = dmasize - resid;

	/*
	 * From the technical manual notes:
	 *
	 * "In some odd byte conditions, one residual byte will be left
	 *  in the SCSI FIFO, and the FIFO flags will never count to 0.
	 *  When this happens, the residual byte should be retrieved
	 *  via PIO following completion of the BLAST operation."
	 */
	if (p != NULL) {
		p += trans;
		*p = READ_ESPREG(esc, NCR_FIFO);
		trans++;
	}

	if (trans < 0) {			/* transferred < 0 ? */
#if 0
		/*
		 * This situation can happen in perfectly normal operation
		 * if the ESP is reselected while using DMA to select
		 * another target.  As such, don't print the warning.
		 */
		device_printf(dev, "xfer (%d) > req (%d)\n", trans, dmasize);
#endif
		trans = dmasize;
	}

	NCR_DMA(("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", __func__,
	    READ_ESPREG(esc, NCR_TCL), READ_ESPREG(esc, NCR_TCM),
	    READ_ESPREG(esc, NCR_TCH), trans, resid));

	*esc->sc_dmalen -= trans;
	*esc->sc_dmaaddr = (char *)*esc->sc_dmaaddr + trans;

	return (0);
}
Example #28
0
/**
 * mrsas_data_load_cb:        Callback entry point   
 * input:                     Pointer to command packet as argument 
 *                            Pointer to segment
 *                            Number of segments
 *                            Error 
 *
 * This is the callback function of the bus dma map load.  It builds 
 * the SG list.  
 */
static void
mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
    struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
    struct mrsas_softc *sc = cmd->sc;
    MRSAS_RAID_SCSI_IO_REQUEST *io_request;
    pMpi25IeeeSgeChain64_t sgl_ptr;
    int i=0, sg_processed=0;

    if (error)
    {
        cmd->error_code = error;
        device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
        if (error == EFBIG) {
            cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
            return;
        }
    }

    if (cmd->flags & MRSAS_DIR_IN)
        bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
                                            BUS_DMASYNC_PREREAD);
    if (cmd->flags & MRSAS_DIR_OUT)
        bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
                                            BUS_DMASYNC_PREWRITE);
    if (nseg > sc->max_num_sge) {
        device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
        return;
    }

    io_request = cmd->io_request;
    sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;

    if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
        pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
        sgl_ptr_end += sc->max_sge_in_main_msg - 1;
        sgl_ptr_end->Flags = 0;
    }

    if (nseg != 0) {
        for (i=0; i < nseg; i++) {
            sgl_ptr->Address = segs[i].ds_addr;
            sgl_ptr->Length = segs[i].ds_len;
            sgl_ptr->Flags = 0;
	        if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
            	if (i == nseg - 1)
                	sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
            }
	    	sgl_ptr++;
	    	sg_processed = i + 1;
	    	/* 
             * Prepare chain element 
             */
			if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
				(nseg > sc->max_sge_in_main_msg)) {
					pMpi25IeeeSgeChain64_t sg_chain;
					if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
						if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
													!= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
							 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
					   else
							   cmd->io_request->ChainOffset = 0;
					} else
							 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
					sg_chain = sgl_ptr;
					if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
						 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
				    else
						 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
					sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
					sg_chain->Address = cmd->chain_frame_phys_addr;
					sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
			}
        }
    }
    cmd->sge_count = nseg;
}
Example #29
0
/*
 * Name: qla_rx_intr
 * Function: Handles normal ethernet frames received
 */
static void
qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
{
	qla_rx_buf_t		*rxb;
	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
	struct ifnet		*ifp = ha->ifp;
	qla_sds_t		*sdsp;
	struct ether_vlan_header *eh;
	uint32_t		i, rem_len = 0;
	uint32_t		r_idx = 0;
	qla_rx_ring_t		*rx_ring;

	if (ha->hw.num_rds_rings > 1)
		r_idx = sds_idx;
	
	ha->hw.rds[r_idx].count++;

	sdsp = &ha->hw.sds[sds_idx];
	rx_ring = &ha->rx_ring[r_idx];
	
	for (i = 0; i < sgc->num_handles; i++) {
		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];

		QL_ASSERT(ha, (rxb != NULL),
			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
			sds_idx));

		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
			/* log the error */
			device_printf(ha->pci_dev,
				"%s invalid rxb[%d, %d, 0x%04x]\n",
				__func__, sds_idx, i, sgc->handle[i]);
			qla_rcv_error(ha);
			return;
		}

		mp = rxb->m_head;
		if (i == 0) 
			mpf = mp;

		QL_ASSERT(ha, (mp != NULL),
			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
			sds_idx));

		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);

		rxb->m_head = NULL;
		rxb->next = sdsp->rxb_free;
		sdsp->rxb_free = rxb;
		sdsp->rx_free++;
	
		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
			/* log the error */
			device_printf(ha->pci_dev,
				"%s mp  == NULL [%d, %d, 0x%04x]\n",
				__func__, sds_idx, i, sgc->handle[i]);
			qla_rcv_error(ha);
			return;
		}

		if (i == 0) {
			mpl = mpf = mp;
			mp->m_flags |= M_PKTHDR;
			mp->m_pkthdr.len = sgc->pkt_length;
			mp->m_pkthdr.rcvif = ifp;
			rem_len = mp->m_pkthdr.len;
		} else {
			mp->m_flags &= ~M_PKTHDR;
			mpl->m_next = mp;
			mpl = mp;
			rem_len = rem_len - mp->m_len;
		}
	}

	mpl->m_len = rem_len;

	eh = mtod(mpf, struct ether_vlan_header *);

	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
		uint32_t *data = (uint32_t *)eh;

		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
		mpf->m_flags |= M_VLANTAG;

		*(data + 3) = *(data + 2);
		*(data + 2) = *(data + 1);
		*(data + 1) = *data;

		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
	}

	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
		mpf->m_pkthdr.csum_data = 0xFFFF;
	} else {
		mpf->m_pkthdr.csum_flags = 0;
	}

	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);

	mpf->m_pkthdr.flowid = sgc->rss_hash;
	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);

	(*ifp->if_input)(ifp, mpf);

	if (sdsp->rx_free > ha->std_replenish)
		qla_replenish_normal_rx(ha, sdsp, r_idx);

	return;
}
Example #30
0
static void
at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd)
{
	uint32_t cmdr, mr;
	struct mmc_data *data;

	sc->curcmd = cmd;
	data = cmd->data;

	/* XXX Upper layers don't always set this */
	cmd->mrq = sc->req;

	/* Begin setting up command register. */

	cmdr = cmd->opcode;

	if (sc->host.ios.bus_mode == opendrain)
		cmdr |= MCI_CMDR_OPDCMD;

	/* Set up response handling.  Allow max timeout for responses. */

	if (MMC_RSP(cmd->flags) == MMC_RSP_NONE)
		cmdr |= MCI_CMDR_RSPTYP_NO;
	else {
		cmdr |= MCI_CMDR_MAXLAT;
		if (cmd->flags & MMC_RSP_136)
			cmdr |= MCI_CMDR_RSPTYP_136;
		else
			cmdr |= MCI_CMDR_RSPTYP_48;
	}

	/*
	 * If there is no data transfer, just set up the right interrupt mask
	 * and start the command.
	 *
	 * The interrupt mask needs to be CMDRDY plus all non-data-transfer
	 * errors. It's important to leave the transfer-related errors out, to
	 * avoid spurious timeout or crc errors on a STOP command following a
	 * multiblock read.  When a multiblock read is in progress, sending a
	 * STOP in the middle of a block occasionally triggers such errors, but
	 * we're totally disinterested in them because we've already gotten all
	 * the data we wanted without error before sending the STOP command.
	 */

	if (data == NULL) {
		uint32_t ier = MCI_SR_CMDRDY |
		    MCI_SR_RTOE | MCI_SR_RENDE |
		    MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE;

		at91_mci_pdc_disable(sc);

		if (cmd->opcode == MMC_STOP_TRANSMISSION)
			cmdr |= MCI_CMDR_TRCMD_STOP;

		/* Ignore response CRC on CMD2 and ACMD41, per standard. */

		if (cmd->opcode == MMC_SEND_OP_COND ||
		    cmd->opcode == ACMD_SD_SEND_OP_COND)
			ier &= ~MCI_SR_RCRCE;

		if (mci_debug)
			printf("CMDR %x (opcode %d) ARGR %x no data\n",
			    cmdr, cmd->opcode, cmd->arg);

		WR4(sc, MCI_ARGR, cmd->arg);
		WR4(sc, MCI_CMDR, cmdr);
		WR4(sc, MCI_IDR, 0xffffffff);
		WR4(sc, MCI_IER, ier);
		return;
	}

	/* There is data, set up the transfer-related parts of the command. */

	if (data->flags & MMC_DATA_READ)
		cmdr |= MCI_CMDR_TRDIR;

	if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE))
		cmdr |= MCI_CMDR_TRCMD_START;

	if (data->flags & MMC_DATA_STREAM)
		cmdr |= MCI_CMDR_TRTYP_STREAM;
	else if (data->flags & MMC_DATA_MULTI) {
		cmdr |= MCI_CMDR_TRTYP_MULTIPLE;
		sc->flags |= (data->flags & MMC_DATA_READ) ?
		    CMD_MULTIREAD : CMD_MULTIWRITE;
	}

	/*
	 * Disable PDC until we're ready.
	 *
	 * Set block size and turn on PDC mode for dma xfer.
	 * Note that the block size is the smaller of the amount of data to be
	 * transferred, or 512 bytes.  The 512 size is fixed by the standard;
	 * smaller blocks are possible, but never larger.
	 */

	WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);

	mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN;
	mr |=  min(data->len, 512) << 16;
	WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV);

	/*
	 * Set up DMA.
	 *
	 * Use bounce buffers even if we don't need to byteswap, because doing
	 * multi-block IO with large DMA buffers is way fast (compared to
	 * single-block IO), even after incurring the overhead of also copying
	 * from/to the caller's buffers (which may be in non-contiguous physical
	 * pages).
	 *
	 * In an ideal non-byteswap world we could create a dma tag that allows
	 * for discontiguous segments and do the IO directly from/to the
	 * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the
	 * discontiguous buffers through the PDC. Someday.
	 *
	 * If a read is bigger than 2k, split it in half so that we can start
	 * byte-swapping the first half while the second half is on the wire.
	 * It would be best if we could split it into 8k chunks, but we can't
	 * always keep up with the byte-swapping due to other system activity,
	 * and if an RXBUFF interrupt happens while we're still handling the
	 * byte-swap from the prior buffer (IE, we haven't returned from
	 * handling the prior interrupt yet), then data will get dropped on the
	 * floor and we can't easily recover from that.  The right fix for that
	 * would be to have the interrupt handling only keep the DMA flowing and
	 * enqueue filled buffers to be byte-swapped in a non-interrupt context.
	 * Even that won't work on the write side of things though; in that
	 * context we have to have all the data ready to go before starting the
	 * dma.
	 *
	 * XXX what about stream transfers?
	 */
	sc->xfer_offset = 0;
	sc->bbuf_curidx = 0;

	if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) {
		uint32_t len;
		uint32_t remaining = data->len;
		bus_addr_t paddr;
		int err;

		if (remaining > (BBCOUNT*BBSIZE))
			panic("IO read size exceeds MAXDATA\n");

		if (data->flags & MMC_DATA_READ) {
			if (remaining > 2048) // XXX
				len = remaining / 2;
			else
				len = remaining;
			err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
			    sc->bbuf_vaddr[0], len, at91_mci_getaddr,
			    &paddr, BUS_DMA_NOWAIT);
			if (err != 0)
				panic("IO read dmamap_load failed\n");
			bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
			    BUS_DMASYNC_PREREAD);
			WR4(sc, PDC_RPR, paddr);
			WR4(sc, PDC_RCR, len / 4);
			sc->bbuf_len[0] = len;
			remaining -= len;
			if (remaining == 0) {
				sc->bbuf_len[1] = 0;
			} else {
				len = remaining;
				err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
				    sc->bbuf_vaddr[1], len, at91_mci_getaddr,
				    &paddr, BUS_DMA_NOWAIT);
				if (err != 0)
					panic("IO read dmamap_load failed\n");
				bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
				    BUS_DMASYNC_PREREAD);
				WR4(sc, PDC_RNPR, paddr);
				WR4(sc, PDC_RNCR, len / 4);
				sc->bbuf_len[1] = len;
				remaining -= len;
			}
			WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN);
		} else {
			len = min(BBSIZE, remaining);
			/*
			 * If this is MCI1 revision 2xx controller, apply
			 * a work-around for the "Data Write Operation and
			 * number of bytes" erratum.
			 */
			if ((sc->sc_cap & CAP_MCI1_REV2XX) && len < 12) {
				len = 12;
				memset(sc->bbuf_vaddr[0], 0, 12);
			}
			at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len);
			err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
			    sc->bbuf_vaddr[0], len, at91_mci_getaddr,
			    &paddr, BUS_DMA_NOWAIT);
			if (err != 0)
				panic("IO write dmamap_load failed\n");
			bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
			    BUS_DMASYNC_PREWRITE);
			WR4(sc, PDC_TPR,paddr);
			WR4(sc, PDC_TCR, len / 4);
			sc->bbuf_len[0] = len;
			remaining -= len;
			if (remaining == 0) {
				sc->bbuf_len[1] = 0;
			} else {
				len = remaining;
				at91_bswap_buf(sc, sc->bbuf_vaddr[1],
				    ((char *)data->data)+BBSIZE, len);
				err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
				    sc->bbuf_vaddr[1], len, at91_mci_getaddr,
				    &paddr, BUS_DMA_NOWAIT);
				if (err != 0)
					panic("IO write dmamap_load failed\n");
				bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
				    BUS_DMASYNC_PREWRITE);
				WR4(sc, PDC_TNPR, paddr);
				WR4(sc, PDC_TNCR, len / 4);
				sc->bbuf_len[1] = len;
				remaining -= len;
			}
			/* do not enable PDC xfer until CMDRDY asserted */
		}
		data->xfer_len = 0; /* XXX what's this? appears to be unused. */
	}

	if (mci_debug)
		printf("CMDR %x (opcode %d) ARGR %x with data len %d\n",
		       cmdr, cmd->opcode, cmd->arg, cmd->data->len);

	WR4(sc, MCI_ARGR, cmd->arg);
	WR4(sc, MCI_CMDR, cmdr);
	WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY);
}