/* * Common function for DMA-safe memory allocation. May be called * by bus-specific DMA memory allocation functions. */ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, pmap_limits.avail_start, trunc_page(pmap_limits.avail_end)); }
/* * Common function for DMA-safe memory allocation. May be called * by bus-specific DMA memory allocation functions. */ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { return (_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1)); }
int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { struct arm32_dma_range *dr; int error, i; #ifdef DEBUG_DMA printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx " "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment, boundary, segs, nsegs, rsegs, flags); #endif if ((dr = t->_ranges) != NULL) { error = ENOMEM; for (i = 0; i < t->_nranges; i++, dr++) { if (dr->dr_len == 0) continue; error = _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, trunc_page(dr->dr_sysbase), trunc_page(dr->dr_sysbase + dr->dr_len)); if (error == 0) break; } } else { error = _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)); } #ifdef DEBUG_DMA printf("dmamem_alloc: =%d\n", error); #endif return(error); }
/* * Allocate memory safe for ISA DMA. */ int isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { paddr_t high; if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); else high = trunc_page(avail_end); return (_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, 0, high)); }
/* * Common function for DMA-safe memory allocation. May be called * by bus-specific DMA memory allocation functions. */ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { /* * XXX in the presence of decent (working) iommus and bouncebuffers * we can then fallback this allocation to a range of { 0, -1 }. * However for now we err on the side of caution and allocate dma * memory under the 4gig boundary. */ return (_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)0xffffffff)); }
/* * Common function for DMA-safe memory allocation. May be called * by bus-specific DMA memory allocation functions. */ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { bus_addr_t high; if (t->_bounce_alloc_hi != 0 && _BUS_AVAIL_END > t->_bounce_alloc_hi) high = trunc_page(t->_bounce_alloc_hi); else high = trunc_page(_BUS_AVAIL_END); return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high); }
/* * Allocate memory safe for ISA DMA. */ int _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { paddr_t high, avail_end = 0; int bank; for (bank = 0; bank < vm_nphysseg; bank++) { if (avail_end < vm_physmem[bank].avail_end << PGSHIFT) avail_end = vm_physmem[bank].avail_end << PGSHIFT; } if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); else high = trunc_page(avail_end); return (_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, 0, high)); }