Exemplo n.º 1
0
void
malo_hal_detach(struct malo_hal *mh)
{

	bus_dmamem_free(mh->mh_dmat, mh->mh_cmdbuf, mh->mh_dmamap);
	bus_dmamap_destroy(mh->mh_dmat, mh->mh_dmamap);
	bus_dma_tag_destroy(mh->mh_dmat);
	mtx_destroy(&mh->mh_mtx);
	free(mh, M_DEVBUF);
}
Exemplo n.º 2
0
i40e_status
i40e_free_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma)
{
	bus_dmamap_sync(dma->tag, dma->map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
	bus_dmamap_unload(dma->tag, dma->map);
	bus_dmamem_free(dma->tag, dma->va, dma->map);
	bus_dma_tag_destroy(dma->tag);
	return (0);
}
Exemplo n.º 3
0
/*
 * mrsas_alloc_mfi_cmds:	Allocates the command packets
 * input:					Adapter instance soft state
 *
 * Each IOCTL or passthru command that is issued to the FW are wrapped in a
 * local data structure called mrsas_mfi_cmd.  The frame embedded in this
 * mrsas_mfi is issued to FW. The array is used only to look up the
 * mrsas_mfi_cmd given the context. The free commands are maintained in a
 * linked list.
 */
int
mrsas_alloc_mfi_cmds(struct mrsas_softc *sc)
{
	int i, j;
	u_int32_t max_cmd;
	struct mrsas_mfi_cmd *cmd;

	max_cmd = MRSAS_MAX_MFI_CMDS;

	/*
	 * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers.
	 * Allocate the dynamic array first and then allocate individual
	 * commands.
	 */
	sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
	if (!sc->mfi_cmd_list) {
		device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n");
		return (ENOMEM);
	}
	memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *) * max_cmd);
	for (i = 0; i < max_cmd; i++) {
		sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd),
		    M_MRSAS, M_NOWAIT);
		if (!sc->mfi_cmd_list[i]) {
			for (j = 0; j < i; j++)
				free(sc->mfi_cmd_list[j], M_MRSAS);
			free(sc->mfi_cmd_list, M_MRSAS);
			sc->mfi_cmd_list = NULL;
			return (ENOMEM);
		}
	}

	for (i = 0; i < max_cmd; i++) {
		cmd = sc->mfi_cmd_list[i];
		memset(cmd, 0, sizeof(struct mrsas_mfi_cmd));
		cmd->index = i;
		cmd->ccb_ptr = NULL;
		cmd->sc = sc;
		TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
	}

	/* create a frame pool and assign one frame to each command */
	if (mrsas_create_frame_pool(sc)) {
		device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n");
		/* Free the frames */
		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
			cmd = sc->mfi_cmd_list[i];
			mrsas_free_frame(sc, cmd);
		}
		if (sc->mficmd_frame_tag != NULL)
			bus_dma_tag_destroy(sc->mficmd_frame_tag);
		return (ENOMEM);
	}
	return (0);
}
Exemplo n.º 4
0
static void    
ahbfree(struct ahb_softc *ahb)
{
	switch (ahb->init_level) {
	default:
	case 4:
		bus_dmamap_unload(ahb->ecb_dmat, ahb->ecb_dmamap);
	case 3:
		bus_dmamem_free(ahb->ecb_dmat, ahb->ecb_array,
				ahb->ecb_dmamap);
		bus_dmamap_destroy(ahb->ecb_dmat, ahb->ecb_dmamap);
	case 2:
		bus_dma_tag_destroy(ahb->ecb_dmat);
	case 1:
		bus_dma_tag_destroy(ahb->buffer_dmat);
	case 0:
		break;
	}
	free(ahb, M_DEVBUF);
}
Exemplo n.º 5
0
/**
 * \brief Free a DMA-accessible consistent memory block.
 */
void
drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{
	if (dmah == NULL)
		return;

	bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
	bus_dma_tag_destroy(dmah->tag);

	free(dmah, DRM_MEM_DMA);
}
Exemplo n.º 6
0
void
sfxge_dma_free(efsys_mem_t *esmp)
{

	bus_dmamap_unload(esmp->esm_tag, esmp->esm_map);
	bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map);
	bus_dma_tag_destroy(esmp->esm_tag);

	esmp->esm_addr = 0;
	esmp->esm_base = NULL;
}
Exemplo n.º 7
0
static int ips_send_drive_info_cmd(ips_command_t *command)
{
	int error = 0;
	ips_softc_t *sc = command->sc;
	ips_cmd_status_t *status = command->arg;
	ips_drive_info_t *driveinfo;

	if (bus_dma_tag_create(	/* parent    */	sc->adapter_dmatag,
				/* alignemnt */	1,
				/* boundary  */	0,
				/* lowaddr   */	BUS_SPACE_MAXADDR_32BIT,
				/* highaddr  */	BUS_SPACE_MAXADDR,
				/* filter    */	NULL,
				/* filterarg */	NULL,
				/* maxsize   */	IPS_DRIVE_INFO_LEN,
				/* numsegs   */	1,
				/* maxsegsize*/	IPS_DRIVE_INFO_LEN,
				/* flags     */	0,
				&command->data_dmatag) != 0) {
                printf("ips: can't alloc dma tag for drive status\n");
		error = ENOMEM;
		goto exit;
        }
	if(bus_dmamem_alloc(command->data_dmatag, &command->data_buffer, 
	   		    BUS_DMA_NOWAIT, &command->data_dmamap)){
		error = ENOMEM;
		goto exit;
	}
	command->callback = ips_wakeup_callback;
	asleep(status, 0, "ips", 10*hz);
	bus_dmamap_load(command->data_dmatag, command->data_dmamap, 
			command->data_buffer,IPS_DRIVE_INFO_LEN, 
			ips_drive_info_callback, command, BUS_DMA_NOWAIT);
	if (await(-1, -1))
		error = ETIMEDOUT;
	else {
		bus_dmamap_sync(command->data_dmatag, command->data_dmamap, 
				BUS_DMASYNC_POSTREAD);
		driveinfo = command->data_buffer;
		memcpy(sc->drives, driveinfo->drives, sizeof(ips_drive_t) * 8);	
		sc->drivecount = driveinfo->drivecount;
		device_printf(sc->dev, "logical drives: %d\n",sc->drivecount);
	}
	bus_dmamap_unload(command->data_dmatag, command->data_dmamap);

exit:
	/* I suppose I should clean up my memory allocations */
	bus_dmamem_free(command->data_dmatag, command->data_buffer, 
			command->data_dmamap);
	bus_dma_tag_destroy(command->data_dmatag);
	ips_insert_free_cmd(sc, command);
	return error;

}
Exemplo n.º 8
0
void 
ata_dmafini(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);

    if (ch->dma.work_bus) {
	bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
	bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
	ch->dma.work_bus = 0;
	ch->dma.work = NULL;
    }
    if (ch->dma.work_tag) {
	bus_dma_tag_destroy(ch->dma.work_tag);
	ch->dma.work_tag = NULL;
    }
    if (ch->dma.dmatag) {
	bus_dma_tag_destroy(ch->dma.dmatag);
	ch->dma.dmatag = NULL;
    }
}
Exemplo n.º 9
0
static int ips_copperhead_queue_init(ips_softc_t *sc)
{
	int error;
	bus_dma_tag_t dmatag;
	bus_dmamap_t dmamap;
       	if (bus_dma_tag_create(	/* parent    */	sc->adapter_dmatag,
				/* alignemnt */	1,
				/* boundary  */	0,
				/* lowaddr   */	BUS_SPACE_MAXADDR_32BIT,
				/* highaddr  */	BUS_SPACE_MAXADDR,
				/* filter    */	NULL,
				/* filterarg */	NULL,
				/* maxsize   */	sizeof(ips_copper_queue_t),
				/* numsegs   */	1,
				/* maxsegsize*/	sizeof(ips_copper_queue_t),
				/* flags     */	0,
				/* lockfunc  */ NULL,
				/* lockarg   */ NULL,
				&dmatag) != 0) {
                device_printf(sc->dev, "can't alloc dma tag for statue queue\n");
		error = ENOMEM;
		return error;
        }
	if(bus_dmamem_alloc(dmatag, (void *)&(sc->copper_queue), 
	   		    BUS_DMA_NOWAIT, &dmamap)){
		error = ENOMEM;
		goto exit;
	}
	bzero(sc->copper_queue, sizeof(ips_copper_queue_t));
	sc->copper_queue->dmatag = dmatag;
	sc->copper_queue->dmamap = dmamap;
	sc->copper_queue->nextstatus = 1;
	bus_dmamap_load(dmatag, dmamap, 
			&(sc->copper_queue->status[0]), IPS_MAX_CMD_NUM * 4, 
			ips_copperhead_queue_callback, sc->copper_queue, 
			BUS_DMA_NOWAIT);
	if(sc->copper_queue->base_phys_addr == 0){
		error = ENOMEM;
		goto exit;
	}
	ips_write_4(sc, COPPER_REG_SQSR, sc->copper_queue->base_phys_addr);
	ips_write_4(sc, COPPER_REG_SQER, sc->copper_queue->base_phys_addr + 
		    IPS_MAX_CMD_NUM * 4);
	ips_write_4(sc, COPPER_REG_SQHR, sc->copper_queue->base_phys_addr + 4);
	ips_write_4(sc, COPPER_REG_SQTR, sc->copper_queue->base_phys_addr);

	
	return 0;
exit:
	if (sc->copper_queue != NULL)
		bus_dmamem_free(dmatag, sc->copper_queue, dmamap);
	bus_dma_tag_destroy(dmatag);
	return error;
}
Exemplo n.º 10
0
static void
drm_ati_free_pcigart_table(struct drm_device *dev,
			   struct drm_ati_pcigart_info *gart_info)
{
	struct drm_dma_handle *dmah = gart_info->dmah;

	bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
	bus_dma_tag_destroy(dmah->tag);
	free(dmah, DRM_MEM_DMA);
	gart_info->dmah = NULL;
}
Exemplo n.º 11
0
static void
rtwn_pci_free_rx_list(struct rtwn_softc *sc)
{
	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
	struct rtwn_rx_ring *rx_ring = &pc->rx_ring;
	struct rtwn_rx_data *rx_data;
	int i;

	if (rx_ring->desc_dmat != NULL) {
		if (rx_ring->desc != NULL) {
			bus_dmamap_sync(rx_ring->desc_dmat,
			    rx_ring->desc_map,
			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(rx_ring->desc_dmat,
			    rx_ring->desc_map);
			bus_dmamem_free(rx_ring->desc_dmat, rx_ring->desc,
			    rx_ring->desc_map);
			rx_ring->desc = NULL;
		}
		bus_dma_tag_destroy(rx_ring->desc_dmat);
		rx_ring->desc_dmat = NULL;
	}

	for (i = 0; i < RTWN_PCI_RX_LIST_COUNT; i++) {
		rx_data = &rx_ring->rx_data[i];

		if (rx_data->m != NULL) {
			bus_dmamap_sync(rx_ring->data_dmat,
			    rx_data->map, BUS_DMASYNC_POSTREAD);
			bus_dmamap_unload(rx_ring->data_dmat, rx_data->map);
			m_freem(rx_data->m);
			rx_data->m = NULL;
		}
		bus_dmamap_destroy(rx_ring->data_dmat, rx_data->map);
		rx_data->map = NULL;
	}
	if (rx_ring->data_dmat != NULL) {
		bus_dma_tag_destroy(rx_ring->data_dmat);
		rx_ring->data_dmat = NULL;
	}
}
Exemplo n.º 12
0
static void
atiixp_release_resource(struct atiixp_info *sc)
{
	if (sc == NULL)
		return;
	if (sc->codec) {
		ac97_destroy(sc->codec);
		sc->codec = NULL;
	}
	if (sc->ih) {
		bus_teardown_intr(sc->dev, sc->irq, sc->ih);
		sc->ih = NULL;
	}
	if (sc->reg) {
		bus_release_resource(sc->dev, sc->regtype, sc->regid, sc->reg);
		sc->reg = NULL;
	}
	if (sc->irq) {
		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irqid, sc->irq);
		sc->irq = NULL;
	}
	if (sc->parent_dmat) {
		bus_dma_tag_destroy(sc->parent_dmat);
		sc->parent_dmat = NULL;
	}
	if (sc->sgd_dmamap)
		bus_dmamap_unload(sc->sgd_dmat, sc->sgd_dmamap);
	if (sc->sgd_table) {
		bus_dmamem_free(sc->sgd_dmat, sc->sgd_table, sc->sgd_dmamap);
		sc->sgd_table = NULL;
	}
	sc->sgd_dmamap = NULL;
	if (sc->sgd_dmat) {
		bus_dma_tag_destroy(sc->sgd_dmat);
		sc->sgd_dmat = NULL;
	}
	if (sc->lock) {
		snd_mtxfree(sc->lock);
		sc->lock = NULL;
	}
}
Exemplo n.º 13
0
static int
ohci_pci_detach(device_t self)
{
	ohci_softc_t *sc = device_get_softc(self);

	if (sc->sc_flags & OHCI_SCFLG_DONEINIT) {
		ohci_detach(sc, 0);
		sc->sc_flags &= ~OHCI_SCFLG_DONEINIT;
	}

	if (sc->sc_bus.parent_dmatag != NULL)
		bus_dma_tag_destroy(sc->sc_bus.parent_dmatag);
	if (sc->sc_bus.buffer_dmatag != NULL)
		bus_dma_tag_destroy(sc->sc_bus.buffer_dmatag);

	if (sc->irq_res && sc->ih) {
		int err = bus_teardown_intr(self, sc->irq_res, sc->ih);

		if (err)
			/* XXX or should we panic? */
			device_printf(self, "Could not tear down irq, %d\n",
			    err);
		sc->ih = NULL;
	}
	if (sc->sc_bus.bdev) {
		device_delete_child(self, sc->sc_bus.bdev);
		sc->sc_bus.bdev = NULL;
	}
	if (sc->irq_res) {
		bus_release_resource(self, SYS_RES_IRQ, 0, sc->irq_res);
		sc->irq_res = NULL;
	}
	if (sc->io_res) {
		bus_release_resource(self, SYS_RES_MEMORY, PCI_CBMEM,
		    sc->io_res);
		sc->io_res = NULL;
		sc->iot = 0;
		sc->ioh = 0;
	}
	return 0;
}
Exemplo n.º 14
0
void
aha_free(struct aha_softc *aha)
{
	switch (aha->init_level) {
	default:
	case 8:
	{
		struct sg_map_node *sg_map;

		while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) {
			SLIST_REMOVE_HEAD(&aha->sg_maps, links);
			bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap);
			bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr,
			    sg_map->sg_dmamap);
			free(sg_map, M_DEVBUF);
		}
		bus_dma_tag_destroy(aha->sg_dmat);
	}
	case 7:
		bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap);
	case 6:
		bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array,
		    aha->ccb_dmamap);
		bus_dmamap_destroy(aha->ccb_dmat, aha->ccb_dmamap);
	case 5:
		bus_dma_tag_destroy(aha->ccb_dmat);
	case 4:
		bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap);
	case 3:
		bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes,
		    aha->mailbox_dmamap);
		bus_dmamap_destroy(aha->mailbox_dmat, aha->mailbox_dmamap);
	case 2:
		bus_dma_tag_destroy(aha->buffer_dmat);
	case 1:
		bus_dma_tag_destroy(aha->mailbox_dmat);
	case 0:
		break;
	}
	mtx_destroy(&aha->lock);
}
Exemplo n.º 15
0
static int
zy7_devcfg_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
	struct zy7_devcfg_softc *sc = dev->si_drv1;

	DEVCFG_SC_LOCK(sc);
	sc->is_open = 0;
	bus_dma_tag_destroy(sc->dma_tag);
	DEVCFG_SC_UNLOCK(sc);

	return (0);
}
Exemplo n.º 16
0
static int
ds_pci_detach(device_t dev)
{
    	int r;
	struct sc_info *sc;

	r = pcm_unregister(dev);
	if (r)
    		return r;

	sc = pcm_getdevinfo(dev);
	ds_uninit(sc);
	bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg);
	bus_teardown_intr(dev, sc->irq, sc->ih);
	bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq);
	bus_dma_tag_destroy(sc->buffer_dmat);
	bus_dma_tag_destroy(sc->control_dmat);
	snd_mtxfree(sc->lock);
	free(sc, M_DEVBUF);
       	return 0;
}
Exemplo n.º 17
0
static int
via_detach(device_t dev)
{
	int r;
	struct via_info *via = NULL;

	r = pcm_unregister(dev);
	if (r) return r;

	via = pcm_getdevinfo(dev);
	bus_release_resource(dev, SYS_RES_IOPORT, via->regid, via->reg);
	bus_teardown_intr(dev, via->irq, via->ih);
	bus_release_resource(dev, SYS_RES_IRQ, via->irqid, via->irq);
	bus_dma_tag_destroy(via->parent_dmat);
	bus_dmamap_unload(via->sgd_dmat, via->sgd_dmamap);
	bus_dmamem_free(via->sgd_dmat, via->sgd_table, via->sgd_dmamap);
	bus_dma_tag_destroy(via->sgd_dmat);
	snd_mtxfree(via->lock);
	kfree(via, M_DEVBUF);
	return 0;
}
Exemplo n.º 18
0
void
fwdma_free_multiseg(struct fwdma_alloc_multi *am)
{
	struct fwdma_seg *seg;

	for (seg = &am->seg[0]; am->nseg --; seg ++) {
		fwdma_free_size(am->dma_tag, seg->dma_map,
			seg->v_addr, am->ssize);
	}
	bus_dma_tag_destroy(am->dma_tag);
	free(am, M_FW);
}
Exemplo n.º 19
0
static void
rtwn_pci_free_tx_list(struct rtwn_softc *sc, int qid)
{
	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
	struct rtwn_tx_ring *tx_ring = &pc->tx_ring[qid];
	struct rtwn_tx_data *tx_data;
	int i;

	if (tx_ring->desc_dmat != NULL) {
		if (tx_ring->desc != NULL) {
			bus_dmamap_sync(tx_ring->desc_dmat,
			    tx_ring->desc_map, BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(tx_ring->desc_dmat,
			    tx_ring->desc_map);
			bus_dmamem_free(tx_ring->desc_dmat, tx_ring->desc,
			    tx_ring->desc_map);
		}
		bus_dma_tag_destroy(tx_ring->desc_dmat);
	}

	for (i = 0; i < RTWN_PCI_TX_LIST_COUNT; i++) {
		tx_data = &tx_ring->tx_data[i];

		if (tx_data->m != NULL) {
			bus_dmamap_sync(tx_ring->data_dmat, tx_data->map,
			    BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(tx_ring->data_dmat, tx_data->map);
			m_freem(tx_data->m);
			tx_data->m = NULL;
		}
	}
	if (tx_ring->data_dmat != NULL) {
		bus_dma_tag_destroy(tx_ring->data_dmat);
		tx_ring->data_dmat = NULL;
	}

	sc->qfullmsk &= ~(1 << qid);
	tx_ring->queued = 0;
	tx_ring->last = tx_ring->cur = 0;
}
Exemplo n.º 20
0
static void
ata_dmafree(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);

    if (ch->dma->work_bus) {
	bus_dmamap_unload(ch->dma->work_tag, ch->dma->work_map);
	bus_dmamem_free(ch->dma->work_tag, ch->dma->work, ch->dma->work_map);
	ch->dma->work_bus = 0;
	ch->dma->work_map = NULL;
	ch->dma->work = NULL;
    }
    if (ch->dma->work_tag) {
	bus_dma_tag_destroy(ch->dma->work_tag);
	ch->dma->work_tag = NULL;
    }
    if (ch->dma->sg_bus) {
	bus_dmamap_unload(ch->dma->sg_tag, ch->dma->sg_map);
	bus_dmamem_free(ch->dma->sg_tag, ch->dma->sg, ch->dma->sg_map);
	ch->dma->sg_bus = 0;
	ch->dma->sg_map = NULL;
	ch->dma->sg = NULL;
    }
    if (ch->dma->data_map) {
	bus_dmamap_destroy(ch->dma->data_tag, ch->dma->data_map);
	ch->dma->data_map = NULL;
    }
    if (ch->dma->sg_tag) {
	bus_dma_tag_destroy(ch->dma->sg_tag);
	ch->dma->sg_tag = NULL;
    }
    if (ch->dma->data_tag) {
	bus_dma_tag_destroy(ch->dma->data_tag);
	ch->dma->data_tag = NULL;
    }
    if (ch->dma->dmatag) {
	bus_dma_tag_destroy(ch->dma->dmatag);
	ch->dma->dmatag = NULL;
    }
}
Exemplo n.º 21
0
/******************************************************************************
agtiapi_MemFree()

Purpose:
  Free agtiapi_MemAlloc() allocated memory
Parameters: 
  ag_card_info_t *pCardInfo (IN)  Pointer to card info structure
Return: none
******************************************************************************/
STATIC void agtiapi_MemFree( ag_card_info_t *pCardInfo )
{
  U32 idx;

  // release memory vs. alloc in agtiapi_MemAlloc; cached case
  for( idx = 0; idx < pCardInfo->cacheIndex; idx++ ) {
    if( pCardInfo->tiCachedMem[idx] ) {
      free( pCardInfo->tiCachedMem[idx], M_PMC_MMAL );
      AGTIAPI_PRINTK( "agtiapi_MemFree: TI_CACHED_MEM Mem[%d] %p\n",
              idx, pCardInfo->tiCachedMem[idx] );
    }
  }

  // release memory vs. alloc in agtiapi_typhAlloc; used in agtiapi_MemAlloc
  struct agtiapi_softc *pmsc = pCardInfo->pCard; // get card reference
  if( pmsc->typh_busaddr != 0 ) {
    bus_dmamap_unload( pmsc->typh_dmat, pmsc->typh_mapp );
  }
  if( pmsc->typh_mem != NULL )  {
    bus_dmamem_free( pmsc->typh_dmat, pmsc->typh_mem, pmsc->typh_mapp );
  }
  if( pmsc->typh_dmat != NULL ) {
    bus_dma_tag_destroy( pmsc->typh_dmat );
  }
//reference values:
//  pCardInfo->dmaIndex
//  pCardInfo->tiDmaMem[idx].dmaVirtAddr
//  pCardInfo->tiDmaMem[idx].memSize
//  pCardInfo->tiDmaMem[idx].type == TI_CACHED_DMA_MEM
//  pCardInfo->tiDmaMem[idx].type == TI_DMA_MEM

/* This code is redundant.  Commenting out for now to maintain a placekeeper.
   Free actually takes place in agtiapi_ReleaseHBA as calls on osti_dmat. dm
  // release possible lower layer dynamic memory
  for( idx = 0; idx < AGTIAPI_DYNAMIC_MAX; idx++ ) {
    if( pCardInfo->dynamicMem[idx].dmaVirtAddr != NULL ) {
      printf( "agtiapi_MemFree: dynMem[%d] virtAddr"
	            " %p / %lx size: %d\n",
              idx, pCardInfo->dynamicMem[idx].dmaVirtAddr,
              (long unsigned int)pCardInfo->dynamicMem[idx].dmaPhysAddr,
              pCardInfo->dynamicMem[idx].memSize );
      if( pCardInfo->dynamicMem[idx].dmaPhysAddr )
	      some form of free call would go here  (
                    pCardInfo->dynamicMem[idx].dmaVirtAddr,
                    pCardInfo->dynamicMem[idx].memSize, ... );
      else
        free case for cacheable memory would go here
    }
  }
*/
  return;
}
Exemplo n.º 22
0
int
dbdma_free_channel(dbdma_channel_t *chan)
{

    dbdma_stop(chan);

    bus_dmamem_free(chan->sc_dmatag, chan->sc_slots, chan->sc_dmamap);
    bus_dma_tag_destroy(chan->sc_dmatag);

    free(chan, M_DBDMA);

    return (0);
}
Exemplo n.º 23
0
static int
ioat_detach(device_t device)
{
	struct ioat_softc *ioat;

	ioat = DEVICE2SOFTC(device);

	ioat_test_detach();

	mtx_lock(IOAT_REFLK);
	ioat->quiescing = TRUE;
	ioat_channel[ioat->chan_idx] = NULL;

	ioat_drain_locked(ioat);
	mtx_unlock(IOAT_REFLK);

	ioat_teardown_intr(ioat);
	callout_drain(&ioat->timer);

	pci_disable_busmaster(device);

	if (ioat->pci_resource != NULL)
		bus_release_resource(device, SYS_RES_MEMORY,
		    ioat->pci_resource_id, ioat->pci_resource);

	if (ioat->ring != NULL)
		ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);

	if (ioat->comp_update != NULL) {
		bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
		bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
		    ioat->comp_update_map);
		bus_dma_tag_destroy(ioat->comp_update_tag);
	}

	bus_dma_tag_destroy(ioat->hw_desc_tag);

	return (0);
}
Exemplo n.º 24
0
void *
hyperv_dmamem_alloc(bus_dma_tag_t parent_dtag, bus_size_t alignment,
    bus_addr_t boundary, bus_size_t size, struct hyperv_dma *dma, int flags)
{
	void *ret;
	int error;

	error = bus_dma_tag_create(parent_dtag, /* parent */
	    alignment,		/* alignment */
	    boundary,		/* boundary */
	    BUS_SPACE_MAXADDR,	/* lowaddr */
	    BUS_SPACE_MAXADDR,	/* highaddr */
	    NULL, NULL,		/* filter, filterarg */
	    size,		/* maxsize */
	    1,			/* nsegments */
	    size,		/* maxsegsize */
	    0,			/* flags */
	    NULL,		/* lockfunc */
	    NULL,		/* lockfuncarg */
	    &dma->hv_dtag);
	if (error)
		return NULL;

	error = bus_dmamem_alloc(dma->hv_dtag, &ret,
	    (flags & HYPERV_DMA_WAITMASK) | BUS_DMA_COHERENT, &dma->hv_dmap);
	if (error) {
		bus_dma_tag_destroy(dma->hv_dtag);
		return NULL;
	}

	error = bus_dmamap_load(dma->hv_dtag, dma->hv_dmap, ret, size,
	    hyperv_dma_map_paddr, &dma->hv_paddr, BUS_DMA_NOWAIT);
	if (error) {
		bus_dmamem_free(dma->hv_dtag, ret, dma->hv_dmap);
		bus_dma_tag_destroy(dma->hv_dtag);
		return NULL;
	}
	return ret;
}
Exemplo n.º 25
0
static int
ua_attach(device_t dev)
{
	struct ua_info *ua;
	char status[SND_STATUSLEN];
	unsigned int bufsz;

	ua = (struct ua_info *)malloc(sizeof *ua, M_DEVBUF, M_NOWAIT);
	if (!ua)
		return ENXIO;
	bzero(ua, sizeof *ua);

	ua->sc_dev = dev;

	bufsz = pcm_getbuffersize(dev, 4096, UAUDIO_PCM_BUFF_SIZE, 65536);

	if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
				/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
				/*highaddr*/BUS_SPACE_MAXADDR,
				/*filter*/NULL, /*filterarg*/NULL,
				/*maxsize*/bufsz, /*nsegments*/1,
				/*maxsegz*/0x3fff, /*flags*/0,
				&ua->parent_dmat) != 0) {
		device_printf(dev, "unable to create dma tag\n");
		goto bad;
	}

	if (mixer_init(dev, &ua_mixer_class, ua)) {
		return(ENXIO);
	}

	snprintf(status, SND_STATUSLEN, "at addr ?");

	if (pcm_register(dev, ua, 1, 0)) {
		return(ENXIO);
	}

	pcm_addchan(dev, PCMDIR_PLAY, &ua_chan_class, ua);
#ifndef NO_RECORDING
	pcm_addchan(dev, PCMDIR_REC, &ua_chan_class, ua);
#endif
	pcm_setstatus(dev, status);

	return 0;
bad:
	if (ua->parent_dmat)
		bus_dma_tag_destroy(ua->parent_dmat);
	free(ua, M_DEVBUF);

	return ENXIO;
}
Exemplo n.º 26
0
void
ida_free(struct ida_softc *ida)
{
	int i;

	for (i = 0; i < ida->num_qcbs; i++)
		bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);

	if (ida->hwqcb_busaddr)
		bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);

	if (ida->hwqcbs)
		bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
		    ida->hwqcb_dmamap);

	if (ida->buffer_dmat)
		bus_dma_tag_destroy(ida->buffer_dmat);

	if (ida->hwqcb_dmat)
		bus_dma_tag_destroy(ida->hwqcb_dmat);

	if (ida->qcbs != NULL)
		kfree(ida->qcbs, M_DEVBUF);

	if (ida->ih != NULL)
                bus_teardown_intr(ida->dev, ida->irq, ida->ih);

	if (ida->irq != NULL)
		bus_release_resource(ida->dev, ida->irq_res_type,
		    0, ida->irq);

	if (ida->parent_dmat != NULL)
		bus_dma_tag_destroy(ida->parent_dmat);

	if (ida->regs != NULL)
		bus_release_resource(ida->dev, ida->regs_res_type,
		    ida->regs_res_id, ida->regs);
}
Exemplo n.º 27
0
/* clean up so we can unload the driver. */
int ips_adapter_free(ips_softc_t *sc)
{
	int error = 0;
	if(sc->state & IPS_DEV_OPEN)
		return EBUSY;
	if((error = ips_diskdev_free(sc)))
		return error;
	if(ips_cmdqueue_free(sc)){
		device_printf(sc->dev,
		     "trying to exit when command queue is not empty!\n");
		return EBUSY;
	}
	DEVICE_PRINTF(1, sc->dev, "free\n");
	callout_drain(&sc->timer);

	if(sc->sg_dmatag)
		bus_dma_tag_destroy(sc->sg_dmatag);
	if(sc->command_dmatag)
		bus_dma_tag_destroy(sc->command_dmatag);
	if(sc->device_file)
	        destroy_dev(sc->device_file);
        return 0;
}
Exemplo n.º 28
0
static int
le_pci_detach(device_t dev)
{
	struct le_pci_softc *lesc;
	struct lance_softc *sc;

	lesc = device_get_softc(dev);
	sc = &lesc->sc_am79900.lsc;

	bus_teardown_intr(dev, lesc->sc_ires, lesc->sc_ih);
	am79900_detach(&lesc->sc_am79900);
	bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam);
	bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam);
	bus_dma_tag_destroy(lesc->sc_dmat);
	bus_dma_tag_destroy(lesc->sc_pdmat);
	bus_release_resource(dev, SYS_RES_IRQ,
	    rman_get_rid(lesc->sc_ires), lesc->sc_ires);
	bus_release_resource(dev, SYS_RES_IOPORT,
	    rman_get_rid(lesc->sc_rres), lesc->sc_rres);
	LE_LOCK_DESTROY(sc);

	return (0);
}
Exemplo n.º 29
0
static int
at91_mci_detach(device_t dev)
{
	struct at91_mci_softc *sc = device_get_softc(dev);

	at91_mci_fini(dev);
	at91_mci_deactivate(dev);

	bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[0], sc->bbuf_map[0]);
	bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[1], sc->bbuf_map[1]);
	bus_dma_tag_destroy(sc->dmatag);

	return (EBUSY);	/* XXX */
}
Exemplo n.º 30
0
static int
esp_pci_detach(device_t dev)
{
	struct ncr53c9x_softc *sc;
	struct esp_pci_softc *esc;
	int error;

	esc = device_get_softc(dev);
	sc = &esc->sc_ncr53c9x;

	bus_teardown_intr(esc->sc_dev, esc->sc_res[ESP_PCI_RES_INTR],
	    esc->sc_ih);
	error = ncr53c9x_detach(sc);
	if (error != 0)
		return (error);
	bus_dmamap_destroy(esc->sc_xferdmat, esc->sc_xferdmam);
	bus_dma_tag_destroy(esc->sc_xferdmat);
	bus_dma_tag_destroy(esc->sc_pdmat);
	bus_release_resources(dev, esp_pci_res_spec, esc->sc_res);
	NCR_LOCK_DESTROY(sc);

	return (0);
}