Example #1
0
void
msi_eq_free(bus_dma_tag_t t, struct msi_eq *meq)
{
	bus_size_t size;

	size = roundup(meq->meq_nentries * sizeof(struct msi_msg), PAGE_SIZE);

	bus_dmamap_unload(t, meq->meq_map);
	bus_dmamem_unmap(t, meq->meq_va, size);
	bus_dmamem_free(t, &meq->meq_seg, 1);
	bus_dmamap_destroy(t, meq->meq_map);
	free(meq, M_DEVBUF, 0);
}
Example #2
0
void
aha_free(struct aha_softc *aha)
{
	switch (aha->init_level) {
	default:
	case 8:
	{
		struct sg_map_node *sg_map;

		while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) {
			SLIST_REMOVE_HEAD(&aha->sg_maps, links);
			bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap);
			bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr,
			    sg_map->sg_dmamap);
			free(sg_map, M_DEVBUF);
		}
		bus_dma_tag_destroy(aha->sg_dmat);
	}
	case 7:
		bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap);
	case 6:
		bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array,
		    aha->ccb_dmamap);
	case 5:
		bus_dma_tag_destroy(aha->ccb_dmat);
	case 4:
		bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap);
	case 3:
		bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes,
		    aha->mailbox_dmamap);
	case 2:
		bus_dma_tag_destroy(aha->buffer_dmat);
	case 1:
		bus_dma_tag_destroy(aha->mailbox_dmat);
	case 0:
		break;
	}
	mtx_destroy(&aha->lock);
}
Example #3
0
/*
 * mrsas_unmap_request:	Unmap and unload data
 * input:				Adapter instance soft state
 * 						Pointer to command packet
 *
 * This function unmaps and unloads data from OS.
 */
void
mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
{
    if (cmd->data != NULL) {
        if (cmd->flags & MRSAS_DIR_IN)
            bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
        if (cmd->flags & MRSAS_DIR_OUT)
            bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
        mtx_lock(&sc->io_lock);
        bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
        mtx_unlock(&sc->io_lock);
    }
}
Example #4
0
extern void
pdq_os_databuf_free(
    pdq_os_ctx_t *sc,
    struct mbuf *m)
{
    if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
	bus_dmamap_unload(sc->sc_dmatag, map);
	bus_dmamap_destroy(sc->sc_dmatag, map);
	m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
    }
    m_freem(m);
}
Example #5
0
static void
esp_pci_dma_stop(struct ncr53c9x_softc *sc)
{
	struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;

	/* DMA stop */
	/* XXX what should we do here ? */
	WRITE_DMAREG(esc, DMA_CMD,
	    DMACMD_ABORT | (esc->sc_datain != 0 ? DMACMD_DIR : 0));
	bus_dmamap_unload(esc->sc_xferdmat, esc->sc_xferdmam);

	esc->sc_active = 0;
}
static void
s3c2440_i2s_xfer_complete(dmac_xfer_t xfer, void *cookie)
{
	struct s3c2xx0_softc *sc = s3c2xx0_softc; /* Shortcut */
	s3c2440_i2s_buf_t buf = cookie;
	struct s3c2440_i2s_softc *i2s = buf->i2b_parent;

	bus_dmamap_unload(sc->sc_dmat, buf->i2b_dmamap);

	mutex_spin_enter(i2s->sc_intr_lock);
	(buf->i2b_cb)(buf->i2b_cb_cookie);
	mutex_spin_exit(i2s->sc_intr_lock);
}
Example #7
0
void
pcscp_dma_stop(struct ncr53c9x_softc *sc)
{
	struct pcscp_softc *esc = (struct pcscp_softc *)sc;

	/* dma stop */
	/* XXX What should we do here ? */
	WRITE_DMAREG(esc, DMA_CMD,
	    DMACMD_ABORT | (esc->sc_datain ? DMACMD_DIR : 0));
	bus_dmamap_unload(esc->sc_dmat, esc->sc_xfermap);

	esc->sc_active = 0;
}
Example #8
0
/* Stop transmission on the interface */
void
bce_stop(struct ifnet *ifp, int disable)
{
	struct bce_softc *sc = ifp->if_softc;
	int             i;
	u_int32_t val;

	/* Stop the 1 second timer */
	timeout_del(&sc->bce_timeout);

	/* Mark the interface down and cancel the watchdog timer. */
	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
	ifp->if_timer = 0;

	/* Down the MII. */
	mii_down(&sc->bce_mii);

	/* Disable interrupts. */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
	sc->bce_intmask = 0;
	delay(10);

	/* Disable emac */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
	for (i = 0; i < 200; i++) {
		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
		    BCE_ENET_CTL);
		if (!(val & EC_ED))
			break;
		delay(10);
	}

	/* Stop the DMA */
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
	delay(10);

	/* Release any queued transmit buffers. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if (sc->bce_cdata.bce_tx_chain[i] != NULL) {
			bus_dmamap_unload(sc->bce_dmatag,
			    sc->bce_cdata.bce_tx_map[i]);
			m_freem(sc->bce_cdata.bce_tx_chain[i]);
			sc->bce_cdata.bce_tx_chain[i] = NULL;
		}
	}

	/* drain receive queue */
	if (disable)
		bce_rxdrain(sc);
}
Example #9
0
static int ips_send_adapter_info_cmd(ips_command_t *command)
{
	int error = 0;
	ips_softc_t *sc = command->sc;
	ips_cmd_status_t *status = command->arg;

	if (bus_dma_tag_create(	/* parent    */	sc->adapter_dmatag,
				/* alignemnt */	1,
				/* boundary  */	0,
				/* lowaddr   */	BUS_SPACE_MAXADDR_32BIT,
				/* highaddr  */	BUS_SPACE_MAXADDR,
				/* filter    */	NULL,
				/* filterarg */	NULL,
				/* maxsize   */	IPS_ADAPTER_INFO_LEN,
				/* numsegs   */	1,
				/* maxsegsize*/	IPS_ADAPTER_INFO_LEN,
				/* flags     */	0,
				&command->data_dmatag) != 0) {
                printf("ips: can't alloc dma tag for adapter status\n");
		error = ENOMEM;
		goto exit;
        }
	if(bus_dmamem_alloc(command->data_dmatag, &command->data_buffer, 
	   BUS_DMA_NOWAIT, &command->data_dmamap)){
		error = ENOMEM;
		goto exit;
	}
	command->callback = ips_wakeup_callback;
	asleep(status, 0, "ips", 30*hz);
	bus_dmamap_load(command->data_dmatag, command->data_dmamap, 
			command->data_buffer,IPS_ADAPTER_INFO_LEN, 
			ips_adapter_info_callback, command, BUS_DMA_NOWAIT);

	if (await(-1, -1))
		error = ETIMEDOUT;
	else {
		bus_dmamap_sync(command->data_dmatag, command->data_dmamap, 
				BUS_DMASYNC_POSTREAD);
		memcpy(&(sc->adapter_info), command->data_buffer, 
			IPS_ADAPTER_INFO_LEN);
	}
	bus_dmamap_unload(command->data_dmatag, command->data_dmamap);

exit:
	/* I suppose I should clean up my memory allocations */
	bus_dmamem_free(command->data_dmatag, command->data_buffer, 
			command->data_dmamap);
	bus_dma_tag_destroy(command->data_dmatag);
	ips_insert_free_cmd(sc, command);
	return error;
}
Example #10
0
int
cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp)
{
	u_int64_t flags;
	u_int32_t cur, frag, i;
	bus_dmamap_t map;

	cur = frag = *bixp;
	map = sc->sc_txd[cur].sd_map;

	if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
	    BUS_DMA_NOWAIT) != 0) {
		return (ENOBUFS);
	}

	if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) {
		bus_dmamap_unload(sc->sc_dmatag, map);
		return (ENOBUFS);
	}

	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
	    BUS_DMASYNC_PREWRITE);

	for (i = 0; i < map->dm_nsegs; i++) {
		sc->sc_txdescs[frag].cd_addr =
		    CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
		flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
		    (i == 0 ? CAS_TD_START_OF_PACKET : 0) |
		    ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
		sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags);
		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
		    CAS_CDTXOFF(frag), sizeof(struct cas_desc),
		    BUS_DMASYNC_PREWRITE);
		cur = frag;
		if (++frag == CAS_NTXDESC)
			frag = 0;
	}

	sc->sc_tx_cnt += map->dm_nsegs;
	sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map;
	sc->sc_txd[cur].sd_map = map;
	sc->sc_txd[cur].sd_mbuf = mhead;

	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag);

	*bixp = frag;

	/* sync descriptors */

	return (0);
}
Example #11
0
void
vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
{
	bus_size_t size;

	size = vd->vd_nentries * sizeof(struct vd_desc);
	size = roundup(size, PAGE_SIZE);

	bus_dmamap_unload(t, vd->vd_map);
	bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
	bus_dmamem_free(t, &vd->vd_seg, 1);
	bus_dmamap_destroy(t, vd->vd_map);
	free(vd, M_DEVBUF);
}
Example #12
0
/* Drain the receive queue. */
void
bce_rxdrain(struct bce_softc *sc)
{
	int             i;

	for (i = 0; i < BCE_NRXDESC; i++) {
		if (sc->bce_cdata.bce_rx_chain[i] != NULL) {
			bus_dmamap_unload(sc->bce_dmatag,
			    sc->bce_cdata.bce_rx_map[i]);
			m_freem(sc->bce_cdata.bce_rx_chain[i]);
			sc->bce_cdata.bce_rx_chain[i] = NULL;
		}
	}
}
Example #13
0
void
iee_detach(struct iee_softc *sc, int flags)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	if ((ifp->if_flags & IFF_RUNNING) != 0)
		iee_stop(ifp, 1);
	ether_ifdetach(ifp);
	if_detach(ifp);
	bus_dmamap_unload(sc->sc_dmat, sc->sc_shmem_map);
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map);
	bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, sc->sc_shmem_sz);
	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs);
}
Example #14
0
void
rtwn_free_rx_list(struct rtwn_pci_softc *sc)
{
	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
	struct rtwn_rx_data *rx_data;
	int i, s;

	s = splnet();

	if (rx_ring->map) {
		if (rx_ring->desc) {
			bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
			    sizeof (struct r92c_rx_desc_pci) *
			    RTWN_RX_LIST_COUNT);
			bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
			    rx_ring->nsegs);
			rx_ring->desc = NULL;
		}
		bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
		rx_ring->map = NULL;
	}

	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
		rx_data = &rx_ring->rx_data[i];

		if (rx_data->m != NULL) {
			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
			m_freem(rx_data->m);
			rx_data->m = NULL;
		}
		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
		rx_data->map = NULL;
	}

	splx(s);
}
Example #15
0
void 
tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
{
    if (req->data != NULL) {
        if ( req->flags & TWS_DIR_IN )
            bus_dmamap_sync(sc->data_tag, req->dma_map, 
                                            BUS_DMASYNC_POSTREAD);
        if ( req->flags & TWS_DIR_OUT )
            bus_dmamap_sync(sc->data_tag, req->dma_map, 
                                            BUS_DMASYNC_POSTWRITE);
        mtx_lock(&sc->io_lock);
        bus_dmamap_unload(sc->data_tag, req->dma_map);
        mtx_unlock(&sc->io_lock);
    }
}
Example #16
0
int
lsi64854_detach(struct lsi64854_softc *sc)
{

	if (sc->setup != NULL) {
		bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
		    (L64854_GCSR(sc) & L64854_WRITE) != 0 ?
		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
		bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap);
		bus_dmamap_destroy(sc->sc_buffer_dmat, sc->sc_dmamap);
		bus_dma_tag_destroy(sc->sc_buffer_dmat);
	}

	return (0);
}
void
s3c2440_i2s_free(s3c2440_i2s_buf_t buf)
{
	struct s3c2xx0_softc *sc = s3c2xx0_softc; /* Shortcut */

	if (buf->i2b_xfer != NULL) {
		s3c2440_dmac_free_xfer(buf->i2b_xfer);
	}

	bus_dmamap_unload(sc->sc_dmat, buf->i2b_dmamap);
	bus_dmamap_destroy(sc->sc_dmat, buf->i2b_dmamap);
	bus_dmamem_unmap(sc->sc_dmat, &buf->i2b_addr, buf->i2b_size);
	bus_dmamem_free(sc->sc_dmat, buf->i2b_segs, buf->i2b_nsegs);
	kmem_free(buf, sizeof(struct s3c2440_i2s_buf));
}
Example #18
0
/*
 * Free a TPD. If the mbuf pointer in that TPD is not zero, it is assumed, that
 * the DMA map of this TPD was used to load this mbuf. The map is unloaded
 * and the mbuf is freed. The TPD is put back onto the free list and
 * its used bit is cleared.
 */
static void
hatm_free_tpd(struct hatm_softc *sc, struct tpd *tpd)
{
	if (tpd->mbuf != NULL) {
		bus_dmamap_unload(sc->tx_tag, tpd->map);
		hatm_free_txmbuf(sc);
		m_freem(tpd->mbuf);
		tpd->mbuf = NULL;
	}

	/* insert TPD into free list */
	SLIST_INSERT_HEAD(&sc->tpd_free, tpd, link);
	TPD_CLR_USED(sc, tpd->no);
	sc->tpd_nfree++;
}
Example #19
0
/**
 *	ti_mmchs_intr_xfer_compl - called if a 'transfer complete' IRQ was received
 *	@sc: pointer to the driver context
 *	@cmd: the command that was sent previously
 *
 *	This function is simply responsible for syncing up the DMA buffer.
 *
 *	LOCKING:
 *	Called from interrupt context
 *
 *	RETURNS:
 *	Return value indicates if the transaction is complete, not done = 0, done != 0
 */
static int
ti_mmchs_intr_xfer_compl(struct ti_mmchs_softc *sc, struct mmc_command *cmd)
{
	uint32_t cmd_reg;

	/* Read command register to test whether this command was a read or write. */
	cmd_reg = ti_mmchs_read_4(sc, MMCHS_CMD);

	/* Sync-up the DMA buffer so the caller can access the new memory */
	if (cmd_reg & MMCHS_CMD_DDIR) {
		bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap);
	}
	else {
		bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap);
	}
	sc->sc_dmamapped--;

	/* Debugging dump of the data received */
#if 0
	{
		int i;
		uint8_t *p = (uint8_t*) sc->sc_cmd_data_vaddr;
		for (i=0; i<sc->sc_cmd_data_len; i++) {
			if ((i % 16) == 0)
				printf("\n0x%04x : ", i);
			printf("%02X ", *p++);
		}
		printf("\n");
	}
#endif

	/* We are done, transfer complete */
	return 1;
}
Example #20
0
/********************************************************************************
 * Free a command cluster.
 */
static void
mly_free_command_cluster(struct mly_command_cluster *mcc)
{
    struct mly_softc	*sc = mcc->mcc_command[0].mc_sc;
    int			i;

    debug_called(1);

    for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++)
	bus_dmamap_destroy(sc->mly_buffer_dmat, mcc->mcc_command[i].mc_datamap);

    bus_dmamap_unload(sc->mly_packet_dmat, mcc->mcc_packetmap);
    bus_dmamem_free(sc->mly_packet_dmat, mcc->mcc_packet, mcc->mcc_packetmap);
    free(mcc, M_DEVBUF);
}
Example #21
0
void
usb_block_real_freemem(usb_dma_block_t *p)
{
#ifdef DIAGNOSTIC
	if (!curproc) {
		printf("usb_block_real_freemem: in interrupt context\n");
		return;
	}
#endif
	bus_dmamap_unload(p->tag, p->map);
	bus_dmamap_destroy(p->tag, p->map);
	bus_dmamem_unmap(p->tag, p->kaddr, p->size);
	bus_dmamem_free(p->tag, p->segs, p->nsegs);
	free(p, M_USB);
}
Example #22
0
/*
 * Drain the receive queue.
 */
void
epic_rxdrain(struct epic_softc *sc)
{
	struct epic_descsoft *ds;
	int i;

	for (i = 0; i < EPIC_NRXDESC; i++) {
		ds = EPIC_DSRX(sc, i);
		if (ds->ds_mbuf != NULL) {
			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
			m_freem(ds->ds_mbuf);
			ds->ds_mbuf = NULL;
		}
	}
}
Example #23
0
/*
 * Initialization of interface.
 */
void
zeinit(struct ze_softc *sc)
{
	struct ifnet *ifp = &sc->sc_if;
	struct ze_cdata *zc = sc->sc_zedata;
	int i;

	/*
	 * Reset the interface.
	 */
	if (zereset(sc))
		return;

	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
	/*
	 * Release and init transmit descriptors.
	 */
	for (i = 0; i < TXDESCS; i++) {
		if (sc->sc_xmtmap[i]->dm_nsegs > 0)
			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
		if (sc->sc_txmbuf[i]) {
			m_freem(sc->sc_txmbuf[i]);
			sc->sc_txmbuf[i] = 0;
		}
		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
	}


	/*
	 * Init receive descriptors.
	 */
	for (i = 0; i < RXDESCS; i++)
		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
	sc->sc_nextrx = 0;

	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
	    ZE_NICSR6_SR|ZE_NICSR6_DC);

	ifp->if_flags |= IFF_RUNNING;
	ifp->if_flags &= ~IFF_OACTIVE;

	/*
	 * Send a setup frame.
	 * This will start the transmit machinery as well.
	 */
	ze_setup(sc);

}
Example #24
0
static int
epe_intr(void *arg)
{
	struct epe_softc *sc = (struct epe_softc *)arg;
	struct ifnet * ifp = &sc->sc_ec.ec_if;
	uint32_t ndq = 0, irq, *cur;

	irq = EPE_READ(IntStsC);
begin:
	cur = (uint32_t *)(EPE_READ(RXStsQCurAdd) -
		sc->ctrlpage_dsaddr + (char*)sc->ctrlpage);
	CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(uint32_t),
		RX_QLEN * 4 * sizeof(uint32_t), 
		BUS_DMASYNC_PREREAD);
	while (sc->RXStsQ_cur != cur) {
		if ((sc->RXStsQ_cur[0] & (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) == 
			(RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) {
			uint32_t bi = (sc->RXStsQ_cur[1] >> 16) & 0x7fff;
			uint32_t fl = sc->RXStsQ_cur[1] & 0xffff;
			struct mbuf *m;

			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m != NULL) MCLGET(m, M_DONTWAIT);
			if (m != NULL && (m->m_flags & M_EXT)) {
				bus_dmamap_unload(sc->sc_dmat, 
					sc->rxq[bi].m_dmamap);
				sc->rxq[bi].m->m_pkthdr.rcvif = ifp;
				sc->rxq[bi].m->m_pkthdr.len = 
					sc->rxq[bi].m->m_len = fl;
				bpf_mtap(ifp, sc->rxq[bi].m);
                                (*ifp->if_input)(ifp, sc->rxq[bi].m);
				sc->rxq[bi].m = m;
				bus_dmamap_load(sc->sc_dmat, 
					sc->rxq[bi].m_dmamap, 
					m->m_ext.ext_buf, MCLBYTES,
					NULL, BUS_DMA_NOWAIT);
				sc->RXDQ[bi * 2] = 
					sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr;
			} else {
				/* Drop packets until we can get replacement
				 * empty mbufs for the RXDQ.
				 */
				if (m != NULL) {
					m_freem(m);
				}
				ifp->if_ierrors++;
			} 
		} else {
Example #25
0
/*
 * Function name:	twa_free
 * Description:		Performs clean-up at the time of going down.
 *
 * Input:		sc	-- ptr to per ctlr structure
 * Output:		None
 * Return value:	None
 */
static void
twa_free(struct twa_softc *sc)
{
	struct twa_request	*tr;

	twa_dbg_dprint_enter(3, sc);

	/* Detach from CAM */
	twa_cam_detach(sc);

	/* Destroy dma handles. */

	bus_dmamap_unload(sc->twa_dma_tag, sc->twa_cmd_map); 
	while ((tr = twa_dequeue_free(sc)) != NULL)
		bus_dmamap_destroy(sc->twa_dma_tag, tr->tr_dma_map);

	/* Free all memory allocated so far. */
	if (sc->twa_req_buf)
		free(sc->twa_req_buf, TWA_MALLOC_CLASS);
	if (sc->twa_cmd_pkt_buf)
		bus_dmamem_free(sc->twa_dma_tag, sc->twa_cmd_pkt_buf,
					sc->twa_cmd_map);
	if (sc->twa_aen_queue[0])
		free (sc->twa_aen_queue[0], M_DEVBUF);

	/* Destroy the data-transfer DMA tag. */
	if (sc->twa_dma_tag)
		bus_dma_tag_destroy(sc->twa_dma_tag);

	/* Disconnect the interrupt handler. */
	if (sc->twa_intr_handle)
		bus_teardown_intr(sc->twa_bus_dev, sc->twa_irq_res,
					sc->twa_intr_handle);
	if (sc->twa_irq_res != NULL)
		bus_release_resource(sc->twa_bus_dev, SYS_RES_IRQ,
					0, sc->twa_irq_res);

	/* Release the register window mapping. */
	if (sc->twa_io_res != NULL)
		bus_release_resource(sc->twa_bus_dev, SYS_RES_IOPORT,
					TWA_IO_CONFIG_REG, sc->twa_io_res);

	/* Destroy the control device. */
	if (sc->twa_ctrl_dev != (struct cdev *)NULL)
		destroy_dev(sc->twa_ctrl_dev);

	sysctl_ctx_free(&sc->twa_sysctl_ctx);
}
Example #26
0
static void ips_ioctl_finish(ips_command_t *command)
{
	ips_ioctl_t *ioctl_cmd = command->arg;
	if(ioctl_cmd->readwrite & IPS_IOCTL_READ){
		bus_dmamap_sync(ioctl_cmd->dmatag, ioctl_cmd->dmamap, 
				BUS_DMASYNC_POSTREAD);
	} else if(ioctl_cmd->readwrite & IPS_IOCTL_WRITE){
		bus_dmamap_sync(ioctl_cmd->dmatag, ioctl_cmd->dmamap, 
				BUS_DMASYNC_POSTWRITE);
	}
	bus_dmamap_sync(command->sc->command_dmatag, command->command_dmamap, 
			BUS_DMASYNC_POSTWRITE);
	bus_dmamap_unload(ioctl_cmd->dmatag, ioctl_cmd->dmamap);
	ioctl_cmd->status.value = command->status.value;
	ips_insert_free_cmd(command->sc, command);
}
int
s3c2440_i2s_halt_output(s3c2440_i2s_buf_t buf)
{
	/*int retval;*/
	struct s3c2xx0_softc *sc = s3c2xx0_softc; /* Shortcut */

	DPRINTF(("Aborting DMA transfer\n"));
	/*do {
	  retval =*/ s3c2440_dmac_abort_xfer(buf->i2b_xfer);
/*} while(retval != 0);*/
	DPRINTF(("Aborting DMA transfer: SUCCESS\n"));

	bus_dmamap_unload(sc->sc_dmat, buf->i2b_dmamap);

	return 0;
}
Example #28
0
File: asc.c Project: MarginC/kame
static void
asc_dma_reset(struct ncr53c9x_softc *sc)
{
	struct asc_softc *esc = (struct asc_softc *)sc;

 	bus_space_write_2(esc->sc_bst, esc->dm_bsh, RAMBO_BLKCNT, 0);
	bus_space_write_4(esc->sc_bst, esc->dm_bsh, RAMBO_MODE,
			  RB_CLRFIFO|RB_CLRERROR);
	DELAY(10);
 	bus_space_write_4(esc->sc_bst, esc->dm_bsh, RAMBO_MODE, 0);

	if (esc->sc_flags & DMA_MAPLOADED)
		bus_dmamap_unload(esc->sc_dmat, esc->sc_dmamap);

	esc->sc_flags = DMA_IDLE;
}
Example #29
0
void
asc_vsbus_dma_stop(struct ncr53c9x_softc *sc)
{
    struct asc_vsbus_softc *asc = (struct asc_vsbus_softc *)sc;

    if (asc->sc_flags & ASC_MAPLOADED) {
        bus_dmamap_sync(asc->sc_dmat, asc->sc_dmamap,
                        0, asc->sc_dmasize,
                        asc->sc_flags & ASC_FROMMEMORY
                        ? BUS_DMASYNC_POSTWRITE
                        : BUS_DMASYNC_POSTREAD);
        bus_dmamap_unload(asc->sc_dmat, asc->sc_dmamap);
    }

    asc->sc_flags &= ~(ASC_DMAACTIVE|ASC_MAPLOADED);
}
Example #30
0
static int
epe_gctx(struct epe_softc *sc)
{
	struct ifnet * ifp = &sc->sc_ec.ec_if;
	uint32_t *cur, ndq = 0;

	/* Handle transmit completions */
	cur = (uint32_t *)(EPE_READ(TXStsQCurAdd) -
		sc->ctrlpage_dsaddr + (char*)sc->ctrlpage);

	if (sc->TXStsQ_cur != cur) { 
		CTRLPAGE_DMASYNC(TX_QLEN * 2 * sizeof(uint32_t), 
			TX_QLEN * sizeof(uint32_t), BUS_DMASYNC_PREREAD);
	} else {
		return 0;
	}

	do {
		uint32_t tbi = *sc->TXStsQ_cur & 0x7fff;
		struct mbuf *m = sc->txq[tbi].m;

		if ((*sc->TXStsQ_cur & TXStsQ_TxWE) == 0) {
			ifp->if_oerrors++;
		}
		bus_dmamap_unload(sc->sc_dmat, sc->txq[tbi].m_dmamap);
		m_freem(m);
		do {
			sc->txq[tbi].m = NULL;
			ndq++;
			tbi = (tbi + 1) % TX_QLEN;
		} while (sc->txq[tbi].m == m);

		ifp->if_opackets++;
		sc->TXStsQ_cur++;
		if (sc->TXStsQ_cur >= sc->TXStsQ + TX_QLEN) {
			sc->TXStsQ_cur = sc->TXStsQ;
		}
	} while (sc->TXStsQ_cur != cur); 

	sc->TXDQ_avail += ndq;
	if (ifp->if_flags & IFF_OACTIVE) {
		ifp->if_flags &= ~IFF_OACTIVE;
		/* Disable end-of-tx-chain interrupt */
		EPE_WRITE(IntEn, IntEn_REOFIE);
	}
	return ndq;
}