static int auich_alloc_cdata(struct auich_softc *sc) { bus_dma_segment_t seg; int error, rseg; /* * Allocate the control data structure, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->dmat, sizeof(struct auich_cdata), PAGE_SIZE, 0, &seg, 1, &rseg, 0)) != 0) { aprint_error_dev(sc->sc_dev, "unable to allocate control data, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->dmat, &seg, rseg, sizeof(struct auich_cdata), (void **) &sc->sc_cdata, sc->sc_dmamap_flags)) != 0) { aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n", error); goto fail_1; } if ((error = bus_dmamap_create(sc->dmat, sizeof(struct auich_cdata), 1, sizeof(struct auich_cdata), 0, 0, &sc->sc_cddmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, " "error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->dmat, sc->sc_cddmamap, sc->sc_cdata, sizeof(struct auich_cdata), NULL, 0)) != 0) { aprint_error_dev(sc->sc_dev, "unable tp load control data DMA map, " "error = %d\n", error); goto fail_3; } sc->pcmo.dmalist = sc->sc_cdata->ic_dmalist_pcmo; sc->pcmi.dmalist = sc->sc_cdata->ic_dmalist_pcmi; sc->mici.dmalist = sc->sc_cdata->ic_dmalist_mici; return 0; fail_3: bus_dmamap_destroy(sc->dmat, sc->sc_cddmamap); fail_2: bus_dmamem_unmap(sc->dmat, (void *) sc->sc_cdata, sizeof(struct auich_cdata)); fail_1: bus_dmamem_free(sc->dmat, &seg, rseg); fail_0: return error; }
void * auvia_malloc(void *addr, int direction, size_t size, int pool, int flags) { struct auvia_softc *sc = addr; struct auvia_dma *p; int error; int rseg; p = malloc(sizeof(*p), pool, flags); if (!p) return 0; p->size = size; if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &p->seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { printf("%s: unable to allocate dma, error = %d\n", sc->sc_dev.dv_xname, error); goto fail_alloc; } if ((error = bus_dmamem_map(sc->sc_dmat, &p->seg, rseg, size, &p->addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { printf("%s: unable to map dma, error = %d\n", sc->sc_dev.dv_xname, error); goto fail_map; } if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, &p->map)) != 0) { printf("%s: unable to create dma map, error = %d\n", sc->sc_dev.dv_xname, error); goto fail_create; } if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL, BUS_DMA_NOWAIT)) != 0) { printf("%s: unable to load dma map, error = %d\n", sc->sc_dev.dv_xname, error); goto fail_load; } p->next = sc->sc_dmas; sc->sc_dmas = p; return p->addr; fail_load: bus_dmamap_destroy(sc->sc_dmat, p->map); fail_create: bus_dmamem_unmap(sc->sc_dmat, p->addr, size); fail_map: bus_dmamem_free(sc->sc_dmat, &p->seg, 1); fail_alloc: free(p, pool); return 0; }
/*------------------------------------------------------------------------* * usb_pc_dmamap_destroy * * This function is NULL safe. *------------------------------------------------------------------------*/ void usb_pc_dmamap_destroy(struct usb_page_cache *pc) { if (pc && pc->tag) { bus_dmamap_destroy(pc->tag, pc->map); pc->tag = NULL; pc->map = NULL; } }
static int auacer_freemem(struct auacer_softc *sc, struct auacer_dma *p) { bus_dmamap_unload(sc->dmat, p->map); bus_dmamap_destroy(sc->dmat, p->map); bus_dmamem_unmap(sc->dmat, p->addr, p->size); bus_dmamem_free(sc->dmat, p->segs, p->nsegs); return 0; }
void radeon_ttm_backend_destroy(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; bus_dmamap_destroy(gtt->rdev->dmat, gtt->map); free(gtt->segs, M_DRM); ttm_dma_tt_fini(>t->ttm); kfree(gtt); }
void malo_hal_detach(struct malo_hal *mh) { bus_dmamem_free(mh->mh_dmat, mh->mh_cmdbuf, mh->mh_dmamap); bus_dmamap_destroy(mh->mh_dmat, mh->mh_dmamap); bus_dma_tag_destroy(mh->mh_dmat); mtx_destroy(&mh->mh_mtx); free(mh, M_DEVBUF); }
static int yds_freemem(struct yds_softc *sc, struct yds_dma *p) { bus_dmamap_unload(sc->sc_dmatag, p->map); bus_dmamap_destroy(sc->sc_dmatag, p->map); bus_dmamem_unmap(sc->sc_dmatag, p->addr, p->size); bus_dmamem_free(sc->sc_dmatag, p->segs, p->nsegs); return 0; }
drm_dma_handle_t * drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *h; int error, nsegs; /* Need power-of-two alignment, so fail the allocation if it isn't. */ if ((align & (align - 1)) != 0) { DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n", (int)align); return NULL; } h = malloc(sizeof(drm_dma_handle_t), M_DRM, M_ZERO | M_NOWAIT); if (h == NULL) return NULL; if ((error = bus_dmamem_alloc(dev->pa.pa_dmat, size, align, 0, h->segs, 1, &nsegs, BUS_DMA_NOWAIT)) != 0) { printf("drm: Unable to allocate DMA, error %d\n", error); goto fail; } if ((error = bus_dmamem_map(dev->pa.pa_dmat, h->segs, nsegs, size, &h->addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { printf("drm: Unable to map DMA, error %d\n", error); goto free; } if ((error = bus_dmamap_create(dev->pa.pa_dmat, size, 1, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &h->map)) != 0) { printf("drm: Unable to create DMA map, error %d\n", error); goto unmap; } if ((error = bus_dmamap_load(dev->pa.pa_dmat, h->map, h->addr, size, NULL, BUS_DMA_NOWAIT)) != 0) { printf("drm: Unable to load DMA map, error %d\n", error); goto destroy; } h->busaddr = DRM_PCI_DMAADDR(h); h->vaddr = h->addr; h->size = size; return h; destroy: bus_dmamap_destroy(dev->pa.pa_dmat, h->map); unmap: bus_dmamem_unmap(dev->pa.pa_dmat, h->addr, size); free: bus_dmamem_free(dev->pa.pa_dmat, h->segs, 1); fail: free(h, M_DRM); return NULL; }
static void * auvia_malloc_dmamem(void *addr, int direction, size_t size) { struct auvia_softc *sc; struct auvia_dma *p; int error; int rseg; p = kmem_alloc(sizeof(*p), KM_SLEEP); if (p == NULL) return NULL; sc = addr; p->size = size; if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &p->seg, 1, &rseg, BUS_DMA_WAITOK)) != 0) { aprint_error_dev(sc->sc_dev, "unable to allocate DMA, error = %d\n", error); goto fail_alloc; } if ((error = bus_dmamem_map(sc->sc_dmat, &p->seg, rseg, size, &p->addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to map DMA, error = %d\n", error); goto fail_map; } if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_WAITOK, &p->map)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create DMA map, error = %d\n", error); goto fail_create; } if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL, BUS_DMA_WAITOK)) != 0) { aprint_error_dev(sc->sc_dev, "unable to load DMA map, error = %d\n", error); goto fail_load; } p->next = sc->sc_dmas; sc->sc_dmas = p; return p->addr; fail_load: bus_dmamap_destroy(sc->sc_dmat, p->map); fail_create: bus_dmamem_unmap(sc->sc_dmat, p->addr, size); fail_map: bus_dmamem_free(sc->sc_dmat, &p->seg, 1); fail_alloc: kmem_free(p, sizeof(*p)); return NULL; }
/* * rtk_detach: * Detach a rtk interface. */ int rtk_detach(struct rtk_softc *sc) { struct ifnet *ifp = &sc->ethercom.ec_if; struct rtk_tx_desc *txd; int i; /* * Succeed now if there isn't any work to do. */ if ((sc->sc_flags & RTK_ATTACHED) == 0) return 0; /* Unhook our tick handler. */ callout_stop(&sc->rtk_tick_ch); /* Detach all PHYs. */ mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY); /* Delete all remaining media. */ ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY); rnd_detach_source(&sc->rnd_source); ether_ifdetach(ifp); if_detach(ifp); for (i = 0; i < RTK_TX_LIST_CNT; i++) { txd = &sc->rtk_tx_descs[i]; if (txd->txd_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); } bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, RTK_RXBUFLEN + 16); bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); /* we don't want to run again */ sc->sc_flags &= ~RTK_ATTACHED; return 0; }
/* * Release the bus DMA mappings and memory in dmah, and deallocate it. */ void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah) { bus_dmamap_unload(dmah->dmah_tag, dmah->dmah_map); bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map); bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size); bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1); dmah->dmah_tag = NULL; /* XXX paranoia */ kmem_free(dmah, sizeof(*dmah)); }
void aha_free(struct aha_softc *aha) { switch (aha->init_level) { default: case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&aha->sg_maps, links); bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(aha->sg_dmat); } case 7: bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap); case 6: bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array, aha->ccb_dmamap); bus_dmamap_destroy(aha->ccb_dmat, aha->ccb_dmamap); case 5: bus_dma_tag_destroy(aha->ccb_dmat); case 4: bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap); case 3: bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes, aha->mailbox_dmamap); bus_dmamap_destroy(aha->mailbox_dmat, aha->mailbox_dmamap); case 2: bus_dma_tag_destroy(aha->buffer_dmat); case 1: bus_dma_tag_destroy(aha->mailbox_dmat); case 0: break; } mtx_destroy(&aha->lock); }
void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *h) { if (h == NULL) return; bus_dmamap_unload(dev->pa.pa_dmat, h->map); bus_dmamap_destroy(dev->pa.pa_dmat, h->map); bus_dmamem_unmap(dev->pa.pa_dmat, h->addr, h->size); bus_dmamem_free(dev->pa.pa_dmat, h->segs, 1); free(h, M_DRM); }
void msi_eq_free(bus_dma_tag_t t, struct msi_eq *meq) { bus_size_t size; size = roundup(meq->meq_nentries * sizeof(struct msi_msg), PAGE_SIZE); bus_dmamap_unload(t, meq->meq_map); bus_dmamem_unmap(t, meq->meq_va, size); bus_dmamem_free(t, &meq->meq_seg, 1); bus_dmamap_destroy(t, meq->meq_map); free(meq, M_DEVBUF, 0); }
extern void pdq_os_databuf_free( pdq_os_ctx_t *sc, struct mbuf *m) { if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) { bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); bus_dmamap_unload(sc->sc_dmatag, map); bus_dmamap_destroy(sc->sc_dmatag, map); m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP); } m_freem(m); }
void iee_detach(struct iee_softc *sc, int flags) { struct ifnet *ifp = &sc->sc_ethercom.ec_if; if ((ifp->if_flags & IFF_RUNNING) != 0) iee_stop(ifp, 1); ether_ifdetach(ifp); if_detach(ifp); bus_dmamap_unload(sc->sc_dmat, sc->sc_shmem_map); bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map); bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, sc->sc_shmem_sz); bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs); }
void vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd) { bus_size_t size; size = vd->vd_nentries * sizeof(struct vd_desc); size = roundup(size, PAGE_SIZE); bus_dmamap_unload(t, vd->vd_map); bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size); bus_dmamem_free(t, &vd->vd_seg, 1); bus_dmamap_destroy(t, vd->vd_map); free(vd, M_DEVBUF); }
struct ldc_queue * ldc_queue_alloc(int nentries) #endif { struct ldc_queue *lq; bus_size_t size; vaddr_t va = 0; #if OPENBSD_BUSDMA int nsegs; #endif lq = kmem_zalloc(sizeof(struct ldc_queue), KM_NOSLEEP); if (lq == NULL) return NULL; mutex_init(&lq->lq_mtx, MUTEX_DEFAULT, IPL_TTY); size = roundup(nentries * sizeof(struct ldc_pkt), PAGE_SIZE); #if OPENBSD_BUSDMA if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &lq->lq_map) != 0) return (NULL); if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &lq->lq_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) goto destroy; if (bus_dmamem_map(t, &lq->lq_seg, 1, size, (void *)&va, BUS_DMA_NOWAIT) != 0) goto free; if (bus_dmamap_load(t, lq->lq_map, (void*)va, size, NULL, BUS_DMA_NOWAIT) != 0) goto unmap; #else va = (vaddr_t)kmem_zalloc(size, KM_NOSLEEP); #endif lq->lq_va = (vaddr_t)va; lq->lq_nentries = nentries; return (lq); #if OPENBSD_BUSDMA unmap: bus_dmamem_unmap(t, (void*)va, size); free: bus_dmamem_free(t, &lq->lq_seg, 1); destroy: bus_dmamap_destroy(t, lq->lq_map); #endif return (NULL); }
int njata32_detach(struct njata32_softc *sc, int flags) { int rv, devno; if (sc->sc_flags & NJATA32_CMDPG_MAPPED) { if ((rv = wdcdetach(sc->sc_wdcdev.sc_atac.atac_dev, flags))) return rv; /* free DMA resource */ for (devno = 0; devno < NJATA32_NUM_DEV; devno++) { bus_dmamap_destroy(sc->sc_dmat, sc->sc_dev[devno].d_dmamap_xfer); } bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_sgt); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_sgt); bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_sgtpg, sizeof(struct njata32_dma_page)); bus_dmamem_free(sc->sc_dmat, &sc->sc_sgt_seg, sc->sc_sgt_nsegs); } return 0; }
void rtwn_free_rx_list(struct rtwn_pci_softc *sc) { struct rtwn_rx_ring *rx_ring = &sc->rx_ring; struct rtwn_rx_data *rx_data; int i, s; s = splnet(); if (rx_ring->map) { if (rx_ring->desc) { bus_dmamap_unload(sc->sc_dmat, rx_ring->map); bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc, sizeof (struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT); bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs); rx_ring->desc = NULL; } bus_dmamap_destroy(sc->sc_dmat, rx_ring->map); rx_ring->map = NULL; } for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { rx_data = &rx_ring->rx_data[i]; if (rx_data->m != NULL) { bus_dmamap_unload(sc->sc_dmat, rx_data->map); m_freem(rx_data->m); rx_data->m = NULL; } bus_dmamap_destroy(sc->sc_dmat, rx_data->map); rx_data->map = NULL; } splx(s); }
int lsi64854_detach(struct lsi64854_softc *sc) { if (sc->setup != NULL) { bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap, (L64854_GCSR(sc) & L64854_WRITE) != 0 ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_buffer_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_buffer_dmat); } return (0); }
void usb_block_real_freemem(usb_dma_block_t *p) { #ifdef DIAGNOSTIC if (!curproc) { printf("usb_block_real_freemem: in interrupt context\n"); return; } #endif bus_dmamap_unload(p->tag, p->map); bus_dmamap_destroy(p->tag, p->map); bus_dmamem_unmap(p->tag, p->kaddr, p->size); bus_dmamem_free(p->tag, p->segs, p->nsegs); free(p, M_USB); }
void s3c2440_i2s_free(s3c2440_i2s_buf_t buf) { struct s3c2xx0_softc *sc = s3c2xx0_softc; /* Shortcut */ if (buf->i2b_xfer != NULL) { s3c2440_dmac_free_xfer(buf->i2b_xfer); } bus_dmamap_unload(sc->sc_dmat, buf->i2b_dmamap); bus_dmamap_destroy(sc->sc_dmat, buf->i2b_dmamap); bus_dmamem_unmap(sc->sc_dmat, &buf->i2b_addr, buf->i2b_size); bus_dmamem_free(sc->sc_dmat, buf->i2b_segs, buf->i2b_nsegs); kmem_free(buf, sizeof(struct s3c2440_i2s_buf)); }
void _isa_dmamap_destroy(struct isa_dma_state *ids, int chan) { if (chan < 0 || chan > 7) { printf("%s: bogus drq %d\n", device_xname(ids->ids_dev), chan); goto lose; } bus_dmamap_destroy(ids->ids_dmat, ids->ids_dmamaps[chan]); return; lose: panic("_isa_dmamap_destroy"); }
/******************************************************************************** * Free a command cluster. */ static void mly_free_command_cluster(struct mly_command_cluster *mcc) { struct mly_softc *sc = mcc->mcc_command[0].mc_sc; int i; debug_called(1); for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) bus_dmamap_destroy(sc->mly_buffer_dmat, mcc->mcc_command[i].mc_datamap); bus_dmamap_unload(sc->mly_packet_dmat, mcc->mcc_packetmap); bus_dmamem_free(sc->mly_packet_dmat, mcc->mcc_packet, mcc->mcc_packetmap); free(mcc, M_DEVBUF); }
/* * Function name: twa_free * Description: Performs clean-up at the time of going down. * * Input: sc -- ptr to per ctlr structure * Output: None * Return value: None */ static void twa_free(struct twa_softc *sc) { struct twa_request *tr; twa_dbg_dprint_enter(3, sc); /* Detach from CAM */ twa_cam_detach(sc); /* Destroy dma handles. */ bus_dmamap_unload(sc->twa_dma_tag, sc->twa_cmd_map); while ((tr = twa_dequeue_free(sc)) != NULL) bus_dmamap_destroy(sc->twa_dma_tag, tr->tr_dma_map); /* Free all memory allocated so far. */ if (sc->twa_req_buf) free(sc->twa_req_buf, TWA_MALLOC_CLASS); if (sc->twa_cmd_pkt_buf) bus_dmamem_free(sc->twa_dma_tag, sc->twa_cmd_pkt_buf, sc->twa_cmd_map); if (sc->twa_aen_queue[0]) free (sc->twa_aen_queue[0], M_DEVBUF); /* Destroy the data-transfer DMA tag. */ if (sc->twa_dma_tag) bus_dma_tag_destroy(sc->twa_dma_tag); /* Disconnect the interrupt handler. */ if (sc->twa_intr_handle) bus_teardown_intr(sc->twa_bus_dev, sc->twa_irq_res, sc->twa_intr_handle); if (sc->twa_irq_res != NULL) bus_release_resource(sc->twa_bus_dev, SYS_RES_IRQ, 0, sc->twa_irq_res); /* Release the register window mapping. */ if (sc->twa_io_res != NULL) bus_release_resource(sc->twa_bus_dev, SYS_RES_IOPORT, TWA_IO_CONFIG_REG, sc->twa_io_res); /* Destroy the control device. */ if (sc->twa_ctrl_dev != (struct cdev *)NULL) destroy_dev(sc->twa_ctrl_dev); sysctl_ctx_free(&sc->twa_sysctl_ctx); }
static int vs_allocmem(struct vs_softc *sc, size_t size, size_t align, size_t boundary, struct vs_dma *vd) { int error; #ifdef DIAGNOSTIC if (size > DMAC_MAXSEGSZ) panic ("vs_allocmem: maximum size exceeded, %d", (int) size); #endif vd->vd_size = size; error = bus_dmamem_alloc(vd->vd_dmat, vd->vd_size, align, boundary, vd->vd_segs, sizeof (vd->vd_segs) / sizeof (vd->vd_segs[0]), &vd->vd_nsegs, BUS_DMA_WAITOK); if (error) goto out; error = bus_dmamem_map(vd->vd_dmat, vd->vd_segs, vd->vd_nsegs, vd->vd_size, &vd->vd_addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT); if (error) goto free; error = bus_dmamap_create(vd->vd_dmat, vd->vd_size, 1, DMAC_MAXSEGSZ, 0, BUS_DMA_WAITOK, &vd->vd_map); if (error) goto unmap; error = bus_dmamap_load(vd->vd_dmat, vd->vd_map, vd->vd_addr, vd->vd_size, NULL, BUS_DMA_WAITOK); if (error) goto destroy; return 0; destroy: bus_dmamap_destroy(vd->vd_dmat, vd->vd_map); unmap: bus_dmamem_unmap(vd->vd_dmat, vd->vd_addr, vd->vd_size); free: bus_dmamem_free(vd->vd_dmat, vd->vd_segs, vd->vd_nsegs); out: return error; }
struct vdsk_dring * vdsk_dring_alloc(bus_dma_tag_t t, int nentries) { struct vdsk_dring *vd; bus_size_t size; caddr_t va; int nsegs; int i; vd = malloc(sizeof(struct vdsk_dring), M_DEVBUF, M_NOWAIT); if (vd == NULL) return NULL; size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE); if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0) return (NULL); if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) goto destroy; if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va, BUS_DMA_NOWAIT) != 0) goto free; if (bus_dmamap_load(t, vd->vd_map, va, size, NULL, BUS_DMA_NOWAIT) != 0) goto unmap; vd->vd_desc = (struct vd_desc *)va; vd->vd_nentries = nentries; bzero(vd->vd_desc, nentries * sizeof(struct vd_desc)); for (i = 0; i < vd->vd_nentries; i++) vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; return (vd); unmap: bus_dmamem_unmap(t, va, size); free: bus_dmamem_free(t, &vd->vd_seg, 1); destroy: bus_dmamap_destroy(t, vd->vd_map); return (NULL); }
ldc_queue_free(struct ldc_queue *lq) #endif { bus_size_t size; size = roundup(lq->lq_nentries * sizeof(struct ldc_pkt), PAGE_SIZE); #if OPENBSD_BUSDMA bus_dmamap_unload(t, lq->lq_map); bus_dmamem_unmap(t, &lq->lq_va, size); bus_dmamem_free(t, &lq->lq_seg, 1); bus_dmamap_destroy(t, lq->lq_map); #else kmem_free((void *)lq->lq_va, size); #endif kmem_free(lq, size); }
void imxenet_dma_free(struct imxenet_softc *sc, struct imxenet_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_map != NULL) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size); bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); bus_dmamap_destroy(dma->dma_tag, dma->dma_map); } dma->dma_tag = NULL; }