/* * Load an ISA DMA map with a linear buffer. */ int isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) { struct isadma_bounce_cookie *cookie = map->_dm_cookie; int error; /* * Make sure that on error condition we return "no valid mappings." */ map->dm_mapsize = 0; map->dm_nsegs = 0; /* * Try to load the map the normal way. If this errors out, * and we can bounce, we will. */ error = _bus_dmamap_load(t, map, buf, buflen, p, flags); if (error == 0 || (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) return (error); /* * First attempt failed; bounce it. */ /* * Allocate bounce pages, if necessary. */ if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags); if (error) return (error); } /* * Cache a pointer to the caller's buffer and load the DMA map * with the bounce buffer. */ cookie->id_origbuf = buf; cookie->id_origbuflen = buflen; cookie->id_buftype = ID_BUFTYPE_LINEAR; error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, p, flags); if (error) { /* * Free the bounce pages, unless our resources * are reserved for our exclusive use. */ if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) isadma_bounce_free_bouncebuf(t, map); return (error); } /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ cookie->id_flags |= ID_IS_BOUNCING; return (0); }
static int vme_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) { int error; error = _bus_dmamap_load(t, map, buf, buflen, p, flags); if (error == 0) map->dm_segs[0].ds_addr &= DVMA_VME_SLAVE_MASK; return error; }
/* * function for loading a direct-mapped DMA map with a linear buffer. */ int jazz_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) { int error; if ((map->_dm_flags & BUS_DMA_ALLOCNOW) != 0) { /* just use pre-allocated DMA TLB for the buffer */ jazz_tlbmap_t tlbmap; bus_size_t off; struct vmspace *vm; if (p != NULL) { vm = p->p_vmspace; } else { vm = vmspace_kernel(); } tlbmap = (jazz_tlbmap_t)map->_dm_cookie; off = jazz_dma_page_offs(buf); jazz_dmatlb_map_va(vm, (vaddr_t)buf, buflen, tlbmap->ptebase); map->dm_segs[0].ds_addr = tlbmap->vaddr + off; map->dm_segs[0].ds_len = buflen; map->dm_segs[0]._ds_vaddr = (vaddr_t)buf; map->dm_mapsize = buflen; map->dm_nsegs = 1; map->_dm_vmspace = vm; if (buf >= (void *)MIPS_KSEG1_START && buf < (void *)MIPS_KSEG2_START) map->_dm_flags |= ARC_DMAMAP_COHERENT; return 0; } error = _bus_dmamap_load(t, map, buf, buflen, p, flags); if (error == 0) { /* allocate DMA TLB for each dmamap segment */ error = jazz_bus_dmamap_alloc_sgmap(t, map->dm_segs, map->dm_nsegs, map->_dm_boundary, flags); } return error; }
/* * Like isadma_bounce_dmamap_load(), but for mbufs. */ int isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags) { struct isadma_bounce_cookie *cookie = map->_dm_cookie; int error; /* * Make sure on error condition we return "no valid mappings." */ map->dm_mapsize = 0; map->dm_nsegs = 0; #ifdef DIAGNOSTIC if ((m0->m_flags & M_PKTHDR) == 0) panic("isadma_bounce_dmamap_load_mbuf: no packet header"); #endif if (m0->m_pkthdr.len > map->_dm_size) return (EINVAL); /* * Try to load the map the normal way. If this errors out, * and we can bounce, we will. */ error = _bus_dmamap_load_mbuf(t, map, m0, flags); if (error == 0 || (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) return (error); /* * First attempt failed; bounce it. */ /* * Allocate bounce pages, if necessary. */ if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len, flags); if (error) return (error); } /* * Cache a pointer to the caller's buffer and load the DMA map * with the bounce buffer. */ cookie->id_origbuf = m0; cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ cookie->id_buftype = ID_BUFTYPE_MBUF; error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, m0->m_pkthdr.len, NULL, flags); if (error) { /* * Free the bounce pages, unless our resources * are reserved for our exclusive use. */ if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) isadma_bounce_free_bouncebuf(t, map); return (error); } /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */ cookie->id_flags |= ID_IS_BOUNCING; return (0); }
/* * Load an ISA DMA map with a linear buffer. */ int _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; int error; STAT_INCR(isa_dma_stats_loads); /* * Check to see if we might need to bounce the transfer. */ if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) { /* * Check if all pages are below the bounce * threshold. If they are, don't bother bouncing. */ if (_isa_dma_check_buffer(buf, buflen, map->_dm_segcnt, map->_dm_boundary, p) == 0) return (_bus_dmamap_load(t, map, buf, buflen, p, flags)); STAT_INCR(isa_dma_stats_bounces); /* * Allocate bounce pages, if necessary. */ if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags); if (error) return (error); } /* * Cache a pointer to the caller's buffer and * load the DMA map with the bounce buffer. */ cookie->id_origbuf = buf; cookie->id_origbuflen = buflen; error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, p, flags); if (error) { /* * Free the bounce pages, unless our resources * are reserved for our exclusive use. */ if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) _isa_dma_free_bouncebuf(t, map); } /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ cookie->id_flags |= ID_IS_BOUNCING; } else { /* * Just use the generic load function. */ error = _bus_dmamap_load(t, map, buf, buflen, p, flags); } return (error); }