Exemple #1
0
/* Internal functions */
int
cs428x_allocmem(struct cs428x_softc *sc, 
		size_t size, int pool, int flags,
		struct cs428x_dma *p)
{
	int error;
	size_t align;

	align   = sc->dma_align;
	p->size = sc->dma_size;
	/* allocate memory for upper audio driver */
	p->dum  = malloc(size, pool, flags);
	if (p->dum == NULL)
		return 1;

	error = bus_dmamem_alloc(sc->sc_dmatag, p->size, align, 0,
				 p->segs, sizeof(p->segs)/sizeof(p->segs[0]),
				 &p->nsegs, BUS_DMA_NOWAIT);
	if (error) {
		printf("%s: unable to allocate dma. error=%d\n",
		       sc->sc_dev.dv_xname, error);
		goto allfree;
	}

	error = bus_dmamem_map(sc->sc_dmatag, p->segs, p->nsegs, p->size,
			       &p->addr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
	if (error) {
		printf("%s: unable to map dma, error=%d\n",
		       sc->sc_dev.dv_xname, error);
		goto free;
	}

	error = bus_dmamap_create(sc->sc_dmatag, p->size, 1, p->size,
				  0, BUS_DMA_NOWAIT, &p->map);
	if (error) {
		printf("%s: unable to create dma map, error=%d\n",
		       sc->sc_dev.dv_xname, error);
		goto unmap;
	}

	error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
				BUS_DMA_NOWAIT);
	if (error) {
		printf("%s: unable to load dma map, error=%d\n",
		       sc->sc_dev.dv_xname, error);
		goto destroy;
	}
	return 0;

 destroy:
	bus_dmamap_destroy(sc->sc_dmatag, p->map);
 unmap:
	bus_dmamem_unmap(sc->sc_dmatag, p->addr, p->size);
 free:
	bus_dmamem_free(sc->sc_dmatag, p->segs, p->nsegs);
 allfree:
	free(p->dum, pool);

	return error;
}
static int
coram_allocmem(struct coram_softc *sc, size_t size, size_t align,
    struct coram_dma *p)
{
	int err;

	p->size = size;
	err = bus_dmamem_alloc(sc->sc_dmat, p->size, align, 0,
	    p->segs, sizeof(p->segs) / sizeof(p->segs[0]),
	    &p->nsegs, BUS_DMA_NOWAIT);
	if (err)
		return err;
	err = bus_dmamem_map(sc->sc_dmat, p->segs, p->nsegs, p->size,
	    &p->addr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
	if (err)
		goto free;
	err = bus_dmamap_create(sc->sc_dmat, p->size, 1, p->size, 0,
	    BUS_DMA_NOWAIT, &p->map);
	if (err)
		goto unmap;
	err = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, p->size, NULL,
	    BUS_DMA_NOWAIT);
	if (err)
		goto destroy;

	return 0;
destroy:
	bus_dmamap_destroy(sc->sc_dmat, p->map);
unmap:
	bus_dmamem_unmap(sc->sc_dmat, p->addr, p->size);
free:
	bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);

	return err;
}
void
le_sbdio_attach(device_t parent, device_t self, void *aux)
{
	struct le_sbdio_softc *lesc = device_private(self);
	struct sbdio_attach_args *sa = aux;
	struct lance_softc *sc = &lesc->sc_am7990.lsc;
	bus_dma_segment_t seg;
	int rseg;

	sc->sc_dev = self;
	lesc->sc_dmat = sa->sa_dmat;
	lesc->sc_bst  = sa->sa_bust;

	if (bus_space_map(lesc->sc_bst, sa->sa_addr1, 8 /* XXX */,
	    BUS_SPACE_MAP_LINEAR, &lesc->sc_bsh) != 0) {
		aprint_error(": cannot map registers\n");
		return;
	}

	/* Allocate DMA memory for the chip. */
	if (bus_dmamem_alloc(lesc->sc_dmat, LE_MEMSIZE, 0, 0, &seg, 1, &rseg,
	    BUS_DMA_NOWAIT) != 0) {
		aprint_error(": can't allocate DMA memory\n");
		return;
	}
	if (bus_dmamem_map(lesc->sc_dmat, &seg, rseg, LE_MEMSIZE,
	    (void **)&sc->sc_mem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0) {
		aprint_error(": can't map DMA memory\n");
		return;
	}
	if (bus_dmamap_create(lesc->sc_dmat, LE_MEMSIZE, 1, LE_MEMSIZE,
	    0, BUS_DMA_NOWAIT, &lesc->sc_dmamap) != 0) {
		aprint_error(": can't create DMA map\n");
		return;
	}
	if (bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmamap, sc->sc_mem,
	    LE_MEMSIZE, NULL, BUS_DMA_NOWAIT) != 0) {
		aprint_error(": can't load DMA map\n");
		return;
	}

	sc->sc_memsize = LE_MEMSIZE;
	sc->sc_addr = lesc->sc_dmamap->dm_segs[0].ds_addr;
	sc->sc_conf3 = LE_C3_BSWP | LE_C3_BCON;
	(*platform.ether_addr)(sc->sc_enaddr);

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;
#ifdef LEDEBUG
	sc->sc_debug = 0xff;
#endif
	sc->sc_rdcsr = le_sbdio_rdcsr;
	sc->sc_wrcsr = le_sbdio_wrcsr;

	am7990_config(&lesc->sc_am7990);
	intr_establish(sa->sa_irq, am7990_intr, sc);
}
Exemple #4
0
/**
 *	ti_mmchs_activate - activates the driver
 *	@dev: mmc device handle
 *
 *	Maps in the register set and requests an IRQ handler for the MMC controller.
 *
 *	LOCKING:
 *	None required
 *
 *	RETURNS:
 *	0 on sucess
 *	ENOMEM if failed to map register set
 */
static int
ti_mmchs_activate(device_t dev)
{
	struct ti_mmchs_softc *sc = device_get_softc(dev);
	unsigned long addr;
	int rid;
	int err;

	/* Get the memory resource for the register mapping */
	rid = 0;
	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (sc->sc_mem_res == NULL)
		panic("%s: Cannot map registers", device_get_name(dev));

	/* Allocate an IRQ resource for the MMC controller */
	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_ACTIVE | RF_SHAREABLE);
	if (sc->sc_irq_res == NULL)
		goto errout;

	/* Allocate DMA tags and maps */
	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
	    NULL, MAXPHYS, 1, MAXPHYS, BUS_DMA_ALLOCNOW, NULL,
	    NULL, &sc->sc_dmatag);
	if (err != 0)
		goto errout;

	err = bus_dmamap_create(sc->sc_dmatag, 0,  &sc->sc_dmamap);
	if (err != 0)
		goto errout;

	/* Initialise the DMA channels to be used by the controller */
	err = ti_mmchs_init_dma_channels(sc);
	if (err != 0)
		goto errout;

	/* Set the register offset */
	if (ti_chip() == CHIP_OMAP_3)
		sc->sc_reg_off = OMAP3_MMCHS_REG_OFFSET;
	else if (ti_chip() == CHIP_OMAP_4)
		sc->sc_reg_off = OMAP4_MMCHS_REG_OFFSET;
	else
		panic("Unknown OMAP device\n");

	/* Get the physical address of the MMC data register, needed for DMA */
	addr = vtophys(rman_get_start(sc->sc_mem_res));
	sc->sc_data_reg_paddr = addr + sc->sc_reg_off + MMCHS_DATA;

	/* Set the initial power state to off */
	sc->sc_cur_power_mode = power_off;

	return (0);

errout:
	ti_mmchs_deactivate(dev);
	return (ENOMEM);
}
/*------------------------------------------------------------------------*
 *	usb_pc_dmamap_create - create a DMA map
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
uint8_t
usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
{
	struct usb_xfer_root *info;
	struct usb_dma_tag *utag;

	/* get info */
	info = USB_DMATAG_TO_XROOT(pc->tag_parent);

	/* sanity check */
	if (info == NULL) {
		goto error;
	}
	utag = usb_dma_tag_find(pc->tag_parent, size, 1);
	if (utag == NULL) {
		goto error;
	}
	/* create DMA map */
	if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
		goto error;
	}
	pc->tag = utag->tag;
	return 0;			/* success */

error:
	pc->map = NULL;
	pc->tag = NULL;
	return 1;			/* failure */
}
Exemple #6
0
void
cn30xxfpa_buf_dma_alloc(struct cn30xxfpa_buf *fb)
{
	int status;
	int nsegs;
	caddr_t va;

	status = bus_dmamap_create(fb->fb_dmat, fb->fb_len,
	    fb->fb_len / PAGE_SIZE,	/* # of segments */
	    fb->fb_len,			/* we don't use s/g for FPA buf */
	    PAGE_SIZE,			/* OCTEON hates >PAGE_SIZE boundary */
	    0, &fb->fb_dmah);
	if (status != 0)
		panic("%s failed", "bus_dmamap_create");

	status = bus_dmamem_alloc(fb->fb_dmat, fb->fb_len, 128, 0,
	    fb->fb_dma_segs, fb->fb_dma_nsegs, &nsegs, 0);
	if (status != 0 || fb->fb_dma_nsegs != nsegs)
		panic("%s failed", "bus_dmamem_alloc");

	status = bus_dmamem_map(fb->fb_dmat, fb->fb_dma_segs, fb->fb_dma_nsegs,
	    fb->fb_len, &va, 0);
	if (status != 0)
		panic("%s failed", "bus_dmamem_map");

	status = bus_dmamap_load(fb->fb_dmat, fb->fb_dmah, va, fb->fb_len,
	    NULL,		/* kernel */
	    0);
	if (status != 0)
		panic("%s failed", "bus_dmamap_load");

	fb->fb_addr = (vaddr_t)va;
	fb->fb_paddr = fb->fb_dma_segs[0].ds_addr;
}
Exemple #7
0
static int
dcons_crom_attach(device_t dev)
{
#ifdef NEED_NEW_DRIVER
	kprintf("dcons_crom: you need newer firewire driver\n");
	return (-1);
#else
	struct dcons_crom_softc *sc;

        sc = (struct dcons_crom_softc *) device_get_softc(dev);
	sc->fd.fc = device_get_ivars(dev);
	sc->fd.dev = dev;
	sc->fd.post_explore = NULL;
	sc->fd.post_busreset = (void *) dcons_crom_post_busreset;

	/* map dcons buffer */
	bus_dma_tag_create(
		/*parent*/ sc->fd.fc->dmat,
		/*alignment*/ sizeof(u_int32_t),
		/*boundary*/ 0,
		/*lowaddr*/ BUS_SPACE_MAXADDR,
		/*highaddr*/ BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/ dcons_conf->size,
		/*nsegments*/ 1,
		/*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
		/*flags*/ BUS_DMA_ALLOCNOW,
		&sc->dma_tag);
	bus_dmamap_create(sc->dma_tag, 0, &sc->dma_map);
	bus_dmamap_load(sc->dma_tag, sc->dma_map,
	    (void *)dcons_conf->buf, dcons_conf->size,
	    dmamap_cb, sc, 0);
	return (0);
#endif
}
Exemple #8
0
/*
 * Finish attaching this DMA device.
 * Front-end must fill in these fields:
 *	sc_bustag
 *	sc_dmatag
 *	sc_regs
 *	sc_burst
 *	sc_channel (one of SCSI, ENET, PP)
 *	sc_client (one of SCSI, ENET, PP `soft_c' pointers)
 */
void
lsi64854_attach(struct lsi64854_softc *sc)
{
	uint32_t csr;

	/* Indirect functions */
	switch (sc->sc_channel) {
	case L64854_CHANNEL_SCSI:
		sc->intr = lsi64854_scsi_intr;
		sc->setup = lsi64854_setup;
		break;
	case L64854_CHANNEL_ENET:
		sc->intr = lsi64854_enet_intr;
		break;
	case L64854_CHANNEL_PP:
		sc->setup = lsi64854_setup_pp;
		break;
	default:
		aprint_error(": unknown channel");
	}
	sc->reset = lsi64854_reset;

	/* Allocate a dmamap */
	if (bus_dmamap_create(sc->sc_dmatag, MAX_DMA_SZ, 1, MAX_DMA_SZ,
	    0, BUS_DMA_WAITOK, &sc->sc_dmamap) != 0) {
		aprint_error(": DMA map create failed\n");
		return;
	}

	csr = L64854_GCSR(sc);
	sc->sc_rev = csr & L64854_DEVID;
	if (sc->sc_rev == DMAREV_HME) {
		return;
	}
	aprint_normal(": DMA rev ");
	switch (sc->sc_rev) {
	case DMAREV_0:
		aprint_normal("0");
		break;
	case DMAREV_ESC:
		aprint_normal("esc");
		break;
	case DMAREV_1:
		aprint_normal("1");
		break;
	case DMAREV_PLUS:
		aprint_normal("1+");
		break;
	case DMAREV_2:
		aprint_normal("2");
		break;
	default:
		aprint_normal("unknown (0x%x)", sc->sc_rev);
	}

	DPRINTF(LDB_ANY, (", burst 0x%x, csr 0x%x", sc->sc_burst, csr));
	aprint_normal("\n");
}
Exemple #9
0
/*
 * Manage DMA'able memory.
 */
int
imxenet_dma_malloc(struct imxenet_softc *sc, bus_size_t size,
    struct imxenet_dma_alloc *dma)
{
	int r;

	dma->dma_tag = sc->sc_dma_tag;
	r = bus_dmamem_alloc(dma->dma_tag, size, ENET_ALIGNMENT, 0, &dma->dma_seg,
	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
	if (r != 0) {
		printf("%s: imxenet_dma_malloc: bus_dmammem_alloc failed; "
			"size %lu, error %d\n", sc->sc_dev.dv_xname,
			(unsigned long)size, r);
		goto fail_0;
	}

	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
	    &dma->dma_vaddr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
	if (r != 0) {
		printf("%s: imxenet_dma_malloc: bus_dmammem_map failed; "
			"size %lu, error %d\n", sc->sc_dev.dv_xname,
			(unsigned long)size, r);
		goto fail_1;
	}

	r = bus_dmamap_create(dma->dma_tag, size, 1,
	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
	if (r != 0) {
		printf("%s: imxenet_dma_malloc: bus_dmamap_create failed; "
			"error %u\n", sc->sc_dev.dv_xname, r);
		goto fail_2;
	}

	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
			    dma->dma_vaddr, size, NULL,
			    BUS_DMA_NOWAIT);
	if (r != 0) {
		printf("%s: imxenet_dma_malloc: bus_dmamap_load failed; "
			"error %u\n", sc->sc_dev.dv_xname, r);
		goto fail_3;
	}

	dma->dma_size = size;
	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
	return (0);

fail_3:
	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
fail_2:
	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
fail_1:
	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
fail_0:
	dma->dma_map = NULL;
	dma->dma_tag = NULL;

	return (r);
}
Exemple #10
0
struct ldc_map *
ldc_map_alloc(int nentries)
#endif
{
	struct ldc_map *lm;
	bus_size_t size;
	vaddr_t va = 0;

#if OPENBSD_BUSDMA
	int nsegs;
#endif
	lm = kmem_zalloc(sizeof(struct ldc_map), KM_NOSLEEP);
	if (lm == NULL)
		return NULL;

	size = roundup(nentries * sizeof(struct ldc_map_slot), PAGE_SIZE);

#if OPENBSD_BUSDMA
	if (bus_dmamap_create(t, size, 1, size, 0,
			      BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &lm->lm_map) != 0) {
		DPRINTF(("ldc_map_alloc() - bus_dmamap_create() failed\n"));
		return (NULL);
	}

	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &lm->lm_seg, 1,
			     &nsegs, BUS_DMA_NOWAIT) != 0) {
		DPRINTF(("ldc_map_alloc() - bus_dmamem_alloc() failed\n"));
		goto destroy;
	}

	if (bus_dmamem_map(t, &lm->lm_seg, 1, size, (void *)&va,
			   BUS_DMA_NOWAIT) != 0) {
		DPRINTF(("ldc_map_alloc() - bus_dmamem_map() failed\n"));
		goto free;
	}
	if (bus_dmamap_load(t, lm->lm_map, (void*)va, size, NULL,
			    BUS_DMA_NOWAIT) != 0) {
		DPRINTF(("ldc_map_alloc() - bus_dmamap_load() failed\n"));
		goto unmap;
	}
#else
	va = (vaddr_t)kmem_zalloc(size, KM_NOSLEEP);
#endif
	lm->lm_slot = (struct ldc_map_slot *)va;
	lm->lm_nentries = nentries;
	bzero(lm->lm_slot, nentries * sizeof(struct ldc_map_slot));
	return (lm);

#if OPENBSD_BUSDMA
unmap:
	bus_dmamem_unmap(t, (void*)va, size);
free:
	bus_dmamem_free(t, &lm->lm_seg, 1);
destroy:
	bus_dmamap_destroy(t, lm->lm_map);
#endif
	return (NULL);
}
void *
pxa2x0_i2s_allocm(void *hdl, int direction, size_t size, int type, int flags)
{
	struct device *sc_dev = hdl;
	struct pxa2x0_i2s_softc *sc =
	    (struct pxa2x0_i2s_softc *)((struct device *)hdl + 1);
	struct pxa2x0_i2s_dma *p;
	int error;
	int rseg;

	p = malloc(sizeof(*p), type, flags);
	if (!p)
		return 0;

	p->size = size;
	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, NBPG, 0, &p->seg, 1,
	    &rseg, BUS_DMA_NOWAIT)) != 0) {
		printf("%s: unable to allocate dma, error = %d\n",
		    sc_dev->dv_xname, error);
		goto fail_alloc;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &p->seg, rseg, size, &p->addr,
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map dma, error = %d\n",
		    sc_dev->dv_xname, error);
		goto fail_map;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &p->map)) != 0) {
		printf("%s: unable to create dma map, error = %d\n",
		    sc_dev->dv_xname, error);
		goto fail_create;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
	    BUS_DMA_NOWAIT)) != 0) {
		printf("%s: unable to load dma map, error = %d\n",
		    sc_dev->dv_xname, error);
		goto fail_load;
	}

	p->next = sc->sc_dmas;
	sc->sc_dmas = p;

	return p->addr;

fail_load:
	bus_dmamap_destroy(sc->sc_dmat, p->map);
fail_create:
	bus_dmamem_unmap(sc->sc_dmat, p->addr, size);
fail_map:
	bus_dmamem_free(sc->sc_dmat, &p->seg, 1);
fail_alloc:
	free(p, type);
	return 0;
}
static void *
fms_malloc(void *addr, int direction, size_t size)
{
	struct fms_softc *sc;
	struct fms_dma *p;
	int error;
	int rseg;

	sc = addr;
	p = kmem_alloc(sizeof(*p), KM_SLEEP);
	if (p == NULL)
		return NULL;

	p->size = size;
	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &p->seg,
				      1, &rseg, BUS_DMA_WAITOK)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to allocate DMA, error = %d\n", error);
		goto fail_alloc;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &p->seg, rseg, size, &p->addr,
				    BUS_DMA_WAITOK | BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to map DMA, error = %d\n",
		       error);
		goto fail_map;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
				       BUS_DMA_WAITOK, &p->map)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to create DMA map, error = %d\n",
		       error);
		goto fail_create;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
				     BUS_DMA_WAITOK)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to load DMA map, error = %d\n",
		       error);
		goto fail_load;
	}

	p->next = sc->sc_dmas;
	sc->sc_dmas = p;

	return p->addr;


fail_load:
	bus_dmamap_destroy(sc->sc_dmat, p->map);
fail_create:
	bus_dmamem_unmap(sc->sc_dmat, p->addr, size);
fail_map:
	bus_dmamem_free(sc->sc_dmat, &p->seg, 1);
fail_alloc:
	kmem_free(p, sizeof(*p));
	return NULL;
}
Exemple #13
0
static void
ahaallocccbs(struct aha_softc *aha)
{
	struct aha_ccb *next_ccb;
	struct sg_map_node *sg_map;
	bus_addr_t physaddr;
	aha_sg_t *segs;
	int newcount;
	int i;

	next_ccb = &aha->aha_ccb_array[aha->num_ccbs];

	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);

	if (sg_map == NULL)
		return;

	/* Allocate S/G space for the next batch of CCBS */
	if (bus_dmamem_alloc(aha->sg_dmat, (void **)&sg_map->sg_vaddr,
	    BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
		free(sg_map, M_DEVBUF);
		return;
	}

	SLIST_INSERT_HEAD(&aha->sg_maps, sg_map, links);

	bus_dmamap_load(aha->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
	    PAGE_SIZE, ahamapsgs, aha, /*flags*/0);

	segs = sg_map->sg_vaddr;
	physaddr = sg_map->sg_physaddr;

	newcount = (PAGE_SIZE / (AHA_NSEG * sizeof(aha_sg_t)));
	for (i = 0; aha->num_ccbs < aha->max_ccbs && i < newcount; i++) {
		int error;

		next_ccb->sg_list = segs;
		next_ccb->sg_list_phys = physaddr;
		next_ccb->flags = ACCB_FREE;
		callout_init_mtx(&next_ccb->timer, &aha->lock, 0);
		error = bus_dmamap_create(aha->buffer_dmat, /*flags*/0,
		    &next_ccb->dmamap);
		if (error != 0)
			break;
		SLIST_INSERT_HEAD(&aha->free_aha_ccbs, next_ccb, links);
		segs += AHA_NSEG;
		physaddr += (AHA_NSEG * sizeof(aha_sg_t));
		next_ccb++;
		aha->num_ccbs++;
	}

	/* Reserve a CCB for error recovery */
	if (aha->recovery_accb == NULL) {
		aha->recovery_accb = SLIST_FIRST(&aha->free_aha_ccbs);
		SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links);
	}
}
static int
auich_alloc_cdata(struct auich_softc *sc)
{
	bus_dma_segment_t seg;
	int error, rseg;

	/*
	 * Allocate the control data structure, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->dmat,
				      sizeof(struct auich_cdata),
				      PAGE_SIZE, 0, &seg, 1, &rseg, 0)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to allocate control data, error = %d\n", error);
		goto fail_0;
	}

	if ((error = bus_dmamem_map(sc->dmat, &seg, rseg,
				    sizeof(struct auich_cdata),
				    (void **) &sc->sc_cdata,
				    sc->sc_dmamap_flags)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n", error);
		goto fail_1;
	}

	if ((error = bus_dmamap_create(sc->dmat, sizeof(struct auich_cdata), 1,
				       sizeof(struct auich_cdata), 0, 0,
				       &sc->sc_cddmamap)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, "
		    "error = %d\n", error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->dmat, sc->sc_cddmamap,
				     sc->sc_cdata, sizeof(struct auich_cdata),
				     NULL, 0)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable tp load control data DMA map, "
		    "error = %d\n", error);
		goto fail_3;
	}

	sc->pcmo.dmalist = sc->sc_cdata->ic_dmalist_pcmo;
	sc->pcmi.dmalist = sc->sc_cdata->ic_dmalist_pcmi;
	sc->mici.dmalist = sc->sc_cdata->ic_dmalist_mici;

	return 0;

 fail_3:
	bus_dmamap_destroy(sc->dmat, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->dmat, (void *) sc->sc_cdata,
	    sizeof(struct auich_cdata));
 fail_1:
	bus_dmamem_free(sc->dmat, &seg, rseg);
 fail_0:
	return error;
}
Exemple #15
0
void *
auvia_malloc(void *addr, int direction, size_t size, int pool, int flags)
{
	struct auvia_softc *sc = addr;
	struct auvia_dma *p;
	int error;
	int rseg;

	p = malloc(sizeof(*p), pool, flags);
	if (!p)
		return 0;

	p->size = size;
	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &p->seg,
	    1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		printf("%s: unable to allocate dma, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_alloc;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &p->seg, rseg, size, &p->addr,
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map dma, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_map;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &p->map)) != 0) {
		printf("%s: unable to create dma map, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_create;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
	    BUS_DMA_NOWAIT)) != 0) {
		printf("%s: unable to load dma map, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_load;
	}

	p->next = sc->sc_dmas;
	sc->sc_dmas = p;

	return p->addr;


fail_load:
	bus_dmamap_destroy(sc->sc_dmat, p->map);
fail_create:
	bus_dmamem_unmap(sc->sc_dmat, p->addr, size);
fail_map:
	bus_dmamem_free(sc->sc_dmat, &p->seg, 1);
fail_alloc:
	free(p, pool);
	return 0;
}
Exemple #16
0
static void
ata_dmaalloc(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);
    struct ata_dc_cb_args dcba;
    int i;

    /* alloc and setup needed dma slots */
    bzero(ch->dma.slot, sizeof(struct ata_dmaslot) * ATA_DMA_SLOTS);
    for (i = 0; i < ch->dma.dma_slots; i++) {
	struct ata_dmaslot *slot = &ch->dma.slot[i];

	if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, PAGE_SIZE,
			       ch->dma.max_address, BUS_SPACE_MAXADDR,
			       NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE,
			       0, NULL, NULL, &slot->sg_tag)) {
            device_printf(ch->dev, "FAILURE - create sg_tag\n");
            goto error;
	}

	if (bus_dmamem_alloc(slot->sg_tag, (void **)&slot->sg, BUS_DMA_WAITOK,
			     &slot->sg_map)) {
	    device_printf(ch->dev, "FAILURE - alloc sg_map\n");
	    goto error;
        }

	if (bus_dmamap_load(slot->sg_tag, slot->sg_map, slot->sg, MAXTABSZ,
			    ata_dmasetupc_cb, &dcba, 0) || dcba.error) {
	    device_printf(ch->dev, "FAILURE - load sg\n");
	    goto error;
	}
	slot->sg_bus = dcba.maddr;

	if (bus_dma_tag_create(ch->dma.dmatag,
			       ch->dma.alignment, ch->dma.boundary,
                               ch->dma.max_address, BUS_SPACE_MAXADDR,
                               NULL, NULL, ch->dma.max_iosize,
                               ATA_DMA_ENTRIES, ch->dma.segsize,
                               BUS_DMA_ALLOCNOW, NULL, NULL, &slot->data_tag)) {
	    device_printf(ch->dev, "FAILURE - create data_tag\n");
	    goto error;
	}

	if (bus_dmamap_create(slot->data_tag, 0, &slot->data_map)) {
	    device_printf(ch->dev, "FAILURE - create data_map\n");
	    goto error;
        }
    }

    return;

error:
    device_printf(dev, "WARNING - DMA allocation failed, disabling DMA\n");
    ata_dmafree(dev);
}
static int
at91_spi_attach(device_t dev)
{
	struct at91_spi_softc *sc = device_get_softc(dev);
	int err, i;

	sc->dev = dev;
	err = at91_spi_activate(dev);
	if (err)
		goto out;

	/*
	 * Allocate DMA tags and maps
	 */
	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2058, 1,
	    2048, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->dmatag);
	if (err != 0)
		goto out;
	for (i = 0; i < 4; i++) {
		err = bus_dmamap_create(sc->dmatag, 0,  &sc->map[i]);
		if (err != 0)
			goto out;
	}

	// reset the SPI
	WR4(sc, SPI_CR, SPI_CR_SWRST);
	WR4(sc, SPI_IDR, 0xffffffff);

	WR4(sc, SPI_MR, (0xf << 24) | SPI_MR_MSTR | SPI_MR_MODFDIS |
	    (0xE << 16));

	WR4(sc, SPI_CSR0, SPI_CSR_CPOL | (4 << 16) | (2 << 8));
	WR4(sc, SPI_CR, SPI_CR_SPIEN);

	WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS);
	WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS);
	WR4(sc, PDC_RNPR, 0);
	WR4(sc, PDC_RNCR, 0);
	WR4(sc, PDC_TNPR, 0);
	WR4(sc, PDC_TNCR, 0);
	WR4(sc, PDC_RPR, 0);
	WR4(sc, PDC_RCR, 0);
	WR4(sc, PDC_TPR, 0);
	WR4(sc, PDC_TCR, 0);
	RD4(sc, SPI_RDR);
	RD4(sc, SPI_SR);

	device_add_child(dev, "spibus", -1);
	bus_generic_attach(dev);
out:;
	if (err)
		at91_spi_deactivate(dev);
	return (err);
}
Exemple #18
0
drm_dma_handle_t *
drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr)
{
    drm_dma_handle_t *h;
    int error, nsegs;


    /* Need power-of-two alignment, so fail the allocation if it isn't. */
    if ((align & (align - 1)) != 0) {
        DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n",
                  (int)align);
        return NULL;
    }

    h = malloc(sizeof(drm_dma_handle_t), M_DRM, M_ZERO | M_NOWAIT);

    if (h == NULL)
        return NULL;
    if ((error = bus_dmamem_alloc(dev->pa.pa_dmat, size, align, 0,
                                  h->segs, 1, &nsegs, BUS_DMA_NOWAIT)) != 0) {
        printf("drm: Unable to allocate DMA, error %d\n", error);
        goto fail;
    }
    if ((error = bus_dmamem_map(dev->pa.pa_dmat, h->segs, nsegs, size,
                                &h->addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
        printf("drm: Unable to map DMA, error %d\n", error);
        goto free;
    }
    if ((error = bus_dmamap_create(dev->pa.pa_dmat, size, 1, size, 0,
                                   BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &h->map)) != 0) {
        printf("drm: Unable to create DMA map, error %d\n", error);
        goto unmap;
    }
    if ((error = bus_dmamap_load(dev->pa.pa_dmat, h->map, h->addr, size,
                                 NULL, BUS_DMA_NOWAIT)) != 0) {
        printf("drm: Unable to load DMA map, error %d\n", error);
        goto destroy;
    }
    h->busaddr = DRM_PCI_DMAADDR(h);
    h->vaddr = h->addr;
    h->size = size;

    return h;

destroy:
    bus_dmamap_destroy(dev->pa.pa_dmat, h->map);
unmap:
    bus_dmamem_unmap(dev->pa.pa_dmat, h->addr, size);
free:
    bus_dmamem_free(dev->pa.pa_dmat, h->segs, 1);
fail:
    free(h, M_DRM);
    return NULL;

}
Exemple #19
0
void
isci_request_construct(struct ISCI_REQUEST *request,
    SCI_CONTROLLER_HANDLE_T scif_controller_handle,
    bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address)
{

	request->controller_handle = scif_controller_handle;
	request->dma_tag = io_buffer_dma_tag;
	request->physical_address = physical_address;
	bus_dmamap_create(request->dma_tag, 0, &request->dma_map);
	callout_init(&request->timer, CALLOUT_MPSAFE);
}
Exemple #20
0
/*
 * Attach the wdsc driver
 */
void
wdsc_attach(struct device *parent, struct device *self, void *aux)
{
	struct wdsc_softc *wsc = (struct wdsc_softc *)self;
	struct wd33c93_softc *sc = &wsc->sc_wd33c93;
	struct hpc_attach_args *haa = aux;
	int err;

	sc->sc_regt = haa->ha_st;
	wsc->sc_dmat = haa->ha_dmat;

	wsc->sc_hpcdma.hpc = haa->hpc_regs;

	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
	    haa->ha_devoff + 3, 1, &sc->sc_asr_regh)) != 0) {
		printf(": unable to map asr reg, err=%d\n", err);
		return;
	}

	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
	    haa->ha_devoff + 3 + 4,  1, &sc->sc_data_regh)) != 0) {
		printf(": unable to map data reg, err=%d\n", err);
		return;
	}

	if (bus_dmamap_create(wsc->sc_dmat, MAXPHYS,
	    wsc->sc_hpcdma.hpc->scsi_dma_segs,
	    wsc->sc_hpcdma.hpc->scsi_dma_segs_size,
	    wsc->sc_hpcdma.hpc->scsi_dma_segs_size,
	    BUS_DMA_WAITOK, &wsc->sc_dmamap) != 0) {
		printf(": failed to create dmamap\n");
		return;
	}

	sc->sc_dmasetup = wdsc_dmasetup;
	sc->sc_dmago    = wdsc_dmago;
	sc->sc_dmastop  = wdsc_dmastop;
	sc->sc_reset	= wdsc_reset;

	sc->sc_id = 0;					/* Host ID = 0 */
	sc->sc_clkfreq = 200;				/* 20MHz */
	sc->sc_dmamode = SBIC_CTL_BURST_DMA;

	if (hpc_intr_establish(haa->ha_irq, IPL_BIO,
	     wd33c93_intr, wsc, self->dv_xname) == NULL) {
		printf(": unable to establish interrupt!\n");
		return;
	}

	hpcdma_init(haa, &wsc->sc_hpcdma, wsc->sc_hpcdma.hpc->scsi_dma_segs);
	wd33c93_attach(sc, &wdsc_switch);
}
Exemple #21
0
static struct vioscsi_req *
vioscsi_req_get(struct vioscsi_softc *sc)
{
	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
	struct virtqueue *vq = &sc->sc_vqs[2];
	struct vioscsi_req *vr;
	int r, slot;

	if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
		DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
		goto err1;
	}
	vr = &sc->sc_reqs[slot];

	vr->vr_req.id = slot;
	vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;

	r = bus_dmamap_create(vsc->sc_dmat,
	    offsetof(struct vioscsi_req, vr_xs), 1,
	    offsetof(struct vioscsi_req, vr_xs), 0,
	    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
	if (r != 0) {
		DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r));
		goto err2;
	}
	r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
	    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
	if (r != 0) {
		DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r));
		goto err3;
	}
	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
	    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
	    BUS_DMA_NOWAIT);
	if (r != 0) {
		DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r));
		goto err4;
	}

	DPRINTF(("%s: %p, %d\n", __func__, vr, slot));

	return vr;

err4:
	bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
err3:
	bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
err2:
	virtio_enqueue_abort(vsc, vq, slot);
err1:
	return NULL;
}
Exemple #22
0
struct ldc_queue *
ldc_queue_alloc(int nentries)
#endif
{
	struct ldc_queue *lq;
	bus_size_t size;
	vaddr_t va = 0;
#if OPENBSD_BUSDMA
	int nsegs;
#endif

	lq = kmem_zalloc(sizeof(struct ldc_queue), KM_NOSLEEP);
	if (lq == NULL)
		return NULL;

	mutex_init(&lq->lq_mtx, MUTEX_DEFAULT, IPL_TTY);

	size = roundup(nentries * sizeof(struct ldc_pkt), PAGE_SIZE);
#if OPENBSD_BUSDMA
	if (bus_dmamap_create(t, size, 1, size, 0,
	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &lq->lq_map) != 0)
		return (NULL);

	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &lq->lq_seg, 1,
	    &nsegs, BUS_DMA_NOWAIT) != 0)
		goto destroy;

	if (bus_dmamem_map(t, &lq->lq_seg, 1, size, (void *)&va,
	    BUS_DMA_NOWAIT) != 0)
		goto free;

	 if (bus_dmamap_load(t, lq->lq_map, (void*)va, size, NULL,
	    BUS_DMA_NOWAIT) != 0)
		goto unmap;
#else
	 va = (vaddr_t)kmem_zalloc(size, KM_NOSLEEP);
#endif
	lq->lq_va = (vaddr_t)va;
	lq->lq_nentries = nentries;
	return (lq);
#if OPENBSD_BUSDMA
unmap:
	bus_dmamem_unmap(t, (void*)va, size);
free:
	bus_dmamem_free(t, &lq->lq_seg, 1);
destroy:
	bus_dmamap_destroy(t, lq->lq_map);
#endif
	return (NULL);
}
static int
dcons_crom_attach(device_t dev)
{
#ifdef NEED_NEW_DRIVER
	printf("dcons_crom: you need newer firewire driver\n");
	return (-1);
#else
	struct dcons_crom_softc *sc;
	int error;

	if (dcons_conf->buf == NULL)
		return (ENXIO);
        sc = (struct dcons_crom_softc *) device_get_softc(dev);
	sc->fd.fc = device_get_ivars(dev);
	sc->fd.dev = dev;
	sc->fd.post_explore = NULL;
	sc->fd.post_busreset = (void *) dcons_crom_post_busreset;

	/* map dcons buffer */
	error = bus_dma_tag_create(
		/*parent*/ sc->fd.fc->dmat,
		/*alignment*/ sizeof(u_int32_t),
		/*boundary*/ 0,
		/*lowaddr*/ BUS_SPACE_MAXADDR,
		/*highaddr*/ BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/ dcons_conf->size,
		/*nsegments*/ 1,
		/*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
		/*flags*/ BUS_DMA_ALLOCNOW,
#if __FreeBSD_version >= 501102
		/*lockfunc*/busdma_lock_mutex,
		/*lockarg*/&Giant,
#endif
		&sc->dma_tag);
	if (error != 0)
		return (error);
	error = bus_dmamap_create(sc->dma_tag, BUS_DMA_COHERENT, &sc->dma_map);
	if (error != 0)
		return (error);
	error = bus_dmamap_load(sc->dma_tag, sc->dma_map,
	    (void *)dcons_conf->buf, dcons_conf->size,
	    dmamap_cb, sc, 0);
	if (error != 0)
		return (error);
	sc->ehand = EVENTHANDLER_REGISTER(dcons_poll, dcons_crom_poll,
			 (void *)sc, 0);
	return (0);
#endif
}
Exemple #24
0
static int
vs_allocmem(struct vs_softc *sc, size_t size, size_t align, size_t boundary,
	struct vs_dma *vd)
{
	int error;

#ifdef DIAGNOSTIC
	if (size > DMAC_MAXSEGSZ)
		panic ("vs_allocmem: maximum size exceeded, %d", (int) size);
#endif

	vd->vd_size = size;

	error = bus_dmamem_alloc(vd->vd_dmat, vd->vd_size, align, boundary,
				 vd->vd_segs,
				 sizeof (vd->vd_segs) / sizeof (vd->vd_segs[0]),
				 &vd->vd_nsegs, BUS_DMA_WAITOK);
	if (error)
		goto out;

	error = bus_dmamem_map(vd->vd_dmat, vd->vd_segs, vd->vd_nsegs,
			       vd->vd_size, &vd->vd_addr,
			       BUS_DMA_WAITOK | BUS_DMA_COHERENT);
	if (error)
		goto free;

	error = bus_dmamap_create(vd->vd_dmat, vd->vd_size, 1, DMAC_MAXSEGSZ,
				  0, BUS_DMA_WAITOK, &vd->vd_map);
	if (error)
		goto unmap;

	error = bus_dmamap_load(vd->vd_dmat, vd->vd_map, vd->vd_addr,
				vd->vd_size, NULL, BUS_DMA_WAITOK);
	if (error)
		goto destroy;

	return 0;

 destroy:
	bus_dmamap_destroy(vd->vd_dmat, vd->vd_map);
 unmap:
	bus_dmamem_unmap(vd->vd_dmat, vd->vd_addr, vd->vd_size);
 free:
	bus_dmamem_free(vd->vd_dmat, vd->vd_segs, vd->vd_nsegs);
 out:
	return error;
}
Exemple #25
0
struct vdsk_dring *
vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
{
	struct vdsk_dring *vd;
	bus_size_t size;
	caddr_t va;
	int nsegs;
	int i;

	vd = malloc(sizeof(struct vdsk_dring), M_DEVBUF, M_NOWAIT);
	if (vd == NULL)
		return NULL;

	size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);

	if (bus_dmamap_create(t, size, 1, size, 0,
	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
		return (NULL);

	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
	    &nsegs, BUS_DMA_NOWAIT) != 0)
		goto destroy;

	if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va,
	    BUS_DMA_NOWAIT) != 0)
		goto free;

	if (bus_dmamap_load(t, vd->vd_map, va, size, NULL,
	    BUS_DMA_NOWAIT) != 0)
		goto unmap;

	vd->vd_desc = (struct vd_desc *)va;
	vd->vd_nentries = nentries;
	bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
	for (i = 0; i < vd->vd_nentries; i++)
		vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
	return (vd);

unmap:
	bus_dmamem_unmap(t, va, size);
free:
	bus_dmamem_free(t, &vd->vd_seg, 1);
destroy:
	bus_dmamap_destroy(t, vd->vd_map);

	return (NULL);
}
Exemple #26
0
/*
 * Setup a DMA channel's bounce buffer.
 */
int
isa_dma_init(int chan, u_int bouncebufsize, int flag __unused)
{
	static int initted = 0;
	bus_addr_t boundary = chan >= 4 ? 0x20000 : 0x10000;

	if (!initted) {
		/*
		 * Reset the DMA hardware.
		 */
		outb(DMA1_RESET, 0);
		outb(DMA2_RESET, 0);
		isa_dmacascade(4);
	    
		initted = 1;
	}

#ifdef DIAGNOSTIC
	if (chan & ~VALID_DMA_MASK)
		panic("isa_dma_init: channel out of range");

	if (dma_tag[chan] || dma_map[chan])
		panic("isa_dma_init: impossible request"); 
#endif

	if (bus_dma_tag_create(/*parent*/NULL,
			       /*alignment*/2,
			       /*boundary*/boundary,
			       /*lowaddr*/BUS_SPACE_MAXADDR_24BIT,
			       /*highaddr*/BUS_SPACE_MAXADDR,
			       /*filter*/NULL, /*filterarg*/NULL,
			       /*maxsize*/bouncebufsize,
			       /*nsegments*/1, /*maxsegz*/0x3ffff,
			       /*flags*/0,
			       /*lockfunc*/busdma_lock_mutex,
			       /*lockarg*/&Giant,
			       &dma_tag[chan]) != 0) {
		panic("isa_dma_init: unable to create dma tag\n");
	}
	
	if (bus_dmamap_create(dma_tag[chan], 0, &dma_map[chan])) {
		panic("isa_dma_init: unable to create dma map\n");
	}

	return (0);
}
Exemple #27
0
static struct adv_ccb_info *
adv_alloc_ccb_info(struct adv_softc *adv)
{
	int error;
	struct adv_ccb_info *cinfo;

	cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
	cinfo->state = ACCB_FREE;
	error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
				  &cinfo->dmamap);
	if (error != 0) {
		printf("%s: Unable to allocate CCB info "
		       "dmamap - error %d\n", adv_name(adv), error);
		return (NULL);
	}
	adv->ccb_infos_allocated++;
	return (cinfo);
}
Exemple #28
0
int
_isa_dmamap_create(struct isa_dma_state *ids, int chan, bus_size_t size, int flags)
{
	int error;

	if (chan < 0 || chan > 7) {
		printf("%s: bogus drq %d\n", device_xname(ids->ids_dev), chan);
		return (EINVAL);
	}

	if (size > ids->ids_maxsize[chan])
		return (EINVAL);

	error = bus_dmamap_create(ids->ids_dmat, size, 1, size,
	    ids->ids_maxsize[chan], flags, &ids->ids_dmamaps[chan]);

	return (error);
}
void
rlcattach(device_t parent, device_t self, void *aux)
{
	struct rlc_softc *sc = device_private(self);
	struct uba_attach_args *ua = aux;
	struct rlc_attach_args ra;
	int i, error;

	sc->sc_dev = self;
	sc->sc_uh = device_private(parent);
	sc->sc_iot = ua->ua_iot;
	sc->sc_ioh = ua->ua_ioh;
	sc->sc_dmat = ua->ua_dmat;
	uba_intr_establish(ua->ua_icookie, ua->ua_cvec,
		rlcintr, sc, &sc->sc_intrcnt);
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
		device_xname(sc->sc_dev), "intr");
	uba_reset_establish(rlcreset, self);

	printf("\n");

	/*
	 * The RL11 can only have one transfer going at a time,
	 * and max transfer size is one track, so only one dmamap
	 * is needed.
	 */
	error = bus_dmamap_create(sc->sc_dmat, MAXRLXFER, 1, MAXRLXFER, 0,
	    BUS_DMA_ALLOCNOW, &sc->sc_dmam);
	if (error) {
		aprint_error(": Failed to allocate DMA map, error %d\n", error);
		return;
	}
	bufq_alloc(&sc->sc_q, "disksort", BUFQ_SORT_CYLINDER);
	for (i = 0; i < RL_MAXDPC; i++) {
		waitcrdy(sc);
		RL_WREG(RL_DA, RLDA_GS|RLDA_RST);
		RL_WREG(RL_CS, RLCS_GS|(i << RLCS_USHFT));
		waitcrdy(sc);
		ra.type = RL_RREG(RL_MP);
		ra.hwid = i;
		if ((RL_RREG(RL_CS) & RLCS_ERR) == 0)
			config_found(sc->sc_dev, &ra, rlcprint);
	}
}
Exemple #30
0
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
				    unsigned long size, uint32_t page_flags,
				    struct vm_page *dummy_read_page)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt;

	rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_create(bdev, rdev->ddev->agp,
					 size, page_flags, dummy_read_page);
	}
#endif

	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
	if (gtt == NULL) {
		return NULL;
	}
	gtt->ttm.ttm.func = &radeon_backend_func;
	gtt->rdev = rdev;
	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
		kfree(gtt);
		return NULL;
	}

	gtt->segs = mallocarray(gtt->ttm.ttm.num_pages,
	    sizeof(bus_dma_segment_t), M_DRM, M_WAITOK | M_ZERO);
	if (gtt->segs == NULL) {
		ttm_dma_tt_fini(&gtt->ttm);
		free(gtt, M_DRM, 0);
		return NULL;
	}

	if (bus_dmamap_create(rdev->dmat, size, gtt->ttm.ttm.num_pages, size,
			      0, BUS_DMA_WAITOK, &gtt->map)) {
		free(gtt->segs, M_DRM, 0);
		ttm_dma_tt_fini(&gtt->ttm);
		free(gtt, M_DRM, 0);
		return NULL;
	}

	return &gtt->ttm.ttm;
}