/* * Allocate a DMA map, and set up DMA channel. */ int mca_dmamap_create(bus_dma_tag_t t, bus_size_t size, int flags, bus_dmamap_t *dmamp, int dmach) { int error; struct x86_isa_dma_cookie *cookie; #ifdef DEBUG /* Sanity check */ if (dmach < 0 || dmach >= 16) { printf("mcadma_create: invalid DMA channel %d\n", dmach); return (EINVAL); } if (size > 65536) { panic("mca_dmamap_create: dmamap sz %ld > 65536", (long) size); } #endif /* * MCA DMA transfer can be maximum 65536 bytes long and must * be in one chunk. No specific boundary constraints are present. */ if ((error = _bus_dmamap_create(t, size, 1, 65536, 0, flags, dmamp))) return (error); cookie = (struct x86_isa_dma_cookie *) (*dmamp)->_dm_cookie; if (cookie == NULL) { /* * Allocate our cookie if not yet done. */ cookie = malloc(sizeof(struct x86_bus_dma_cookie), M_DMAMAP, ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK) | M_ZERO); if (cookie == NULL) { return ENOMEM; } (*dmamp)->_dm_cookie = cookie; } /* Encode DMA channel */ cookie->id_flags &= 0x0f; cookie->id_flags |= dmach << 4; /* Mark the dmamap as using DMA controller. Some devices * drive DMA themselves, and don't need the MCA DMA controller. * To distinguish the two, use a flag for dmamaps which use the DMA * controller. */ (*dmamp)->_dm_flags |= _MCABUS_DMA_USEDMACTRL; return (0); }
/* * function to create a DMA map. If BUS_DMA_ALLOCNOW is specified and * nsegments is 1, allocate jazzdmatlb here, too. */ int jazz_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { struct arc_bus_dmamap *map; jazz_tlbmap_t tlbmap; int error, npte; if (nsegments > 1) /* * BUS_DMA_ALLOCNOW is allowed only with one segment for now. * XXX needs re-think. */ flags &= ~BUS_DMA_ALLOCNOW; if ((flags & BUS_DMA_ALLOCNOW) == 0) return _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp); tlbmap = malloc(sizeof(struct jazz_tlbmap), M_DMAMAP, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); if (tlbmap == NULL) return ENOMEM; npte = jazz_dma_page_round(maxsegsz) / JAZZ_DMA_PAGE_SIZE + 1; tlbmap->ptebase = jazz_dmatlb_alloc(npte, boundary, flags, &tlbmap->vaddr); if (tlbmap->ptebase == NULL) { free(tlbmap, M_DMAMAP); return ENOMEM; } error = _bus_dmamap_create(t, size, 1, maxsegsz, boundary, flags, dmamp); if (error != 0) { jazz_dmatlb_free(tlbmap->vaddr, npte); free(tlbmap, M_DMAMAP); return error; } map = *dmamp; map->_dm_cookie = (void *)tlbmap; return 0; }
int alpha_sgmap_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { bus_dmamap_t map; int error; error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, &map); if (error) return (error); /* XXX BUS_DMA_ALLOCNOW */ if (error == 0) *dmamp = map; else alpha_sgmap_dmamap_destroy(t, map); return (error); }
/* * Create a UBA SGMAP-mapped DMA map. */ int uba_bus_dmamap_create_sgmap(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { bus_dmamap_t map; int error; error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp); if (error) return (error); map = *dmamp; if (flags & BUS_DMA_ALLOCNOW) { error = vax_sgmap_alloc(map, vax_round_page(size), t->_sgmap, flags); if (error) uba_bus_dmamap_destroy_sgmap(t, map); } return (error); }
/* * Create an ISA DMA map. */ int isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { struct isadma_bounce_cookie *cookie; bus_dmamap_t map; int error, cookieflags; void *cookiestore; size_t cookiesize; /* Call common function to create the basic map. */ error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp); if (error) return (error); map = *dmamp; map->_dm_cookie = NULL; cookiesize = sizeof(*cookie); /* * ISA only has 24-bits of address space. This means * we can't DMA to pages over 16M. In order to DMA to * arbitrary buffers, we use "bounce buffers" - pages * in memory below the 16M boundary. On DMA reads, * DMA happens to the bounce buffers, and is copied into * the caller's buffer. On writes, data is copied into * but bounce buffer, and the DMA happens from those * pages. To software using the DMA mapping interface, * this looks simply like a data cache. * * If we have more than 16M of RAM in the system, we may * need bounce buffers. We check and remember that here. * * ...or, there is an opposite case. The most segments * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If * the caller can't handle that many segments (e.g. the * ISA DMA controller), we may have to bounce it as well. */ cookieflags = 0; if (avail_end > (t->_wbase + t->_wsize) || ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { cookieflags |= ID_MIGHT_NEED_BOUNCE; cookiesize += (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1)); } /* * Allocate our cookie. */ if ((cookiestore = malloc(cookiesize, M_DMAMAP, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { error = ENOMEM; goto out; } memset(cookiestore, 0, cookiesize); cookie = (struct isadma_bounce_cookie *)cookiestore; cookie->id_flags = cookieflags; map->_dm_cookie = cookie; if (cookieflags & ID_MIGHT_NEED_BOUNCE) { /* * Allocate the bounce pages now if the caller * wishes us to do so. */ if ((flags & BUS_DMA_ALLOCNOW) == 0) goto out; error = isadma_bounce_alloc_bouncebuf(t, map, size, flags); } out: if (error) { if (map->_dm_cookie != NULL) free(map->_dm_cookie, M_DMAMAP); _bus_dmamap_destroy(t, map); } return (error); }
/* * Create an Integrator DMA map. */ static int integrator_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { struct integrator_dma_cookie *cookie; bus_dmamap_t map; int error, cookieflags; void *cookiestore; size_t cookiesize; DEBUG(printf("I_bus_dmamap_create(tag %x, size %x, nseg %d, max %x," " boundary %x, flags %x, dmamap %p)\n", (unsigned) t, (unsigned) size, nsegments, (unsigned) maxsegsz, (unsigned)boundary, flags, dmamp)); /* Call common function to create the basic map. */ error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp); if (error) return (error); map = *dmamp; map->_dm_cookie = NULL; cookiesize = sizeof(struct integrator_dma_cookie); /* * Some CM boards have private memory which is significantly * faster than the normal memory stick. To support this * memory we have to bounce any DMA transfers. * * In order to DMA to arbitrary buffers, we use "bounce * buffers" - pages in in the main PCI visible memory. On DMA * reads, DMA happens to the bounce buffers, and is copied * into the caller's buffer. On writes, data is copied into * but bounce buffer, and the DMA happens from those pages. * To software using the DMA mapping interface, this looks * simply like a data cache. * * If we have private RAM in the system, we may need bounce * buffers. We check and remember that here. */ #if 0 cookieflags = ID_MIGHT_NEED_BOUNCE; #else cookieflags = 0; #endif cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); /* * Allocate our cookie. */ if ((cookiestore = malloc(cookiesize, M_DMAMAP, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { error = ENOMEM; goto out; } memset(cookiestore, 0, cookiesize); cookie = (struct integrator_dma_cookie *)cookiestore; cookie->id_flags = cookieflags; map->_dm_cookie = cookie; if (cookieflags & ID_MIGHT_NEED_BOUNCE) { /* * Allocate the bounce pages now if the caller * wishes us to do so. */ if ((flags & BUS_DMA_ALLOCNOW) == 0) goto out; DEBUG(printf("I_bus_dmamap_create bouncebuf alloc\n")); error = integrator_dma_alloc_bouncebuf(t, map, size, flags); } out: if (error) { if (map->_dm_cookie != NULL) free(map->_dm_cookie, M_DMAMAP); _bus_dmamap_destroy(t, map); printf("I_bus_dmamap_create failed (%d)\n", error); } return (error); }