Example #1
0
/*
 * Common function for loading a DMA map with a linear buffer.  May
 * be called by bus-specific DMA map load functions.
 */
int
bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
	void *buf, bus_size_t buflen, struct proc *p, int flags)
{
	paddr_t lastaddr = 0;
	int seg, error;
	struct vmspace *vm;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return (EINVAL);

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
		&lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}
Example #2
0
File: bus.c Project: ryo/netbsd-src
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    int flags)
{
	vaddr_t lastaddr;
	int seg, error, first;
	struct mbuf *m;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return EINVAL;

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
		error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
		    vmspace_kernel(), flags, &lastaddr, &seg, first);
		first = 0;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
	}
	return error;
}
Example #3
0
/*
 * Like _bus_dmamap_load(), but for raw memory.
 */
int
_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{

	struct vmspace * const vm = vmspace_kernel();
	const bool coherent_p = (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT);
	const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0;
	bus_size_t mapsize = 0;
	bool first = true;
	int curseg = 0;
	int error = 0;

	for (; error == 0 && nsegs-- > 0; segs++) {
		void *kva;
#ifdef _LP64
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(segs->ds_addr);
		}
#else
		if (segs->ds_addr >= MIPS_PHYS_MASK)
			return EFBIG;
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
		}
#endif	/* _LP64 */
		mapsize += segs->ds_len;
		error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
		    vm, flags, &curseg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = mapsize;
		map->dm_nsegs = curseg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (coherent_p)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
	/*
	 * If bus_dmamem_alloc didn't return memory that didn't need bouncing
	 * that's a bug which we will not workaround.
	 */
	return error;
}
Example #4
0
static int
_bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
	size_t buflen, int buftype, int flags)
{
	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
	struct vmspace * const vm = vmspace_kernel();
	int seg, error;

	KASSERT(cookie != NULL);
	KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);

	/*
	 * Allocate bounce pages, if necessary.
	 */
	if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
		if (error)
			return (error);
	}

	/*
	 * Cache a pointer to the caller's buffer and load the DMA map
	 * with the bounce buffer.
	 */
	cookie->id_origbuf = buf;
	cookie->id_origbuflen = buflen;
	cookie->id_buftype = buftype;
	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
	    buflen, vm, flags, &seg, true);
	if (error)
		return (error);

	STAT_INCR(bounced_loads);
	map->dm_mapsize = buflen;
	map->dm_nsegs = seg + 1;
	map->_dm_vmspace = vm;
	/*
	 * If our cache is coherent, then the map must be coherent too.
	 */
	if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
		map->_dm_flags |= _BUS_DMAMAP_COHERENT;

	/* ...so _bus_dmamap_sync() knows we're bouncing */
	cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
	return 0;
}
Example #5
0
File: bus.c Project: ryo/netbsd-src
/*
 * Common function for loading a direct-mapped DMA map with a linear
 * buffer.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	vaddr_t lastaddr;
	int seg, error;
	struct vmspace *vm;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return EINVAL;

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(map, buf, buflen,
	    vm, flags, &lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;

		/*
		 * For linear buffers, we support marking the mapping
		 * as COHERENT.
		 *
		 * XXX Check TLB entries for cache-inhibit bits?
		 */
		if (buf >= (void *)MIPS_KSEG1_START &&
		    buf < (void *)MIPS_KSEG2_START)
			map->_dm_flags |= NEWSMIPS_DMAMAP_COHERENT;
	}
	return error;
}
Example #6
0
/*
 * function for loading a direct-mapped DMA map with a linear buffer.
 */
int
jazz_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	int error;

	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) != 0) {
		/* just use pre-allocated DMA TLB for the buffer */
		jazz_tlbmap_t tlbmap;
		bus_size_t off;
		struct vmspace *vm;

		if (p != NULL) {
			vm = p->p_vmspace;
		} else {
			vm = vmspace_kernel();
		}

		tlbmap = (jazz_tlbmap_t)map->_dm_cookie;
		off = jazz_dma_page_offs(buf);
		jazz_dmatlb_map_va(vm, (vaddr_t)buf, buflen, tlbmap->ptebase);

		map->dm_segs[0].ds_addr = tlbmap->vaddr + off;
		map->dm_segs[0].ds_len = buflen;
		map->dm_segs[0]._ds_vaddr = (vaddr_t)buf;
		map->dm_mapsize = buflen;
		map->dm_nsegs = 1;
		map->_dm_vmspace = vm;

		if (buf >= (void *)MIPS_KSEG1_START &&
		    buf < (void *)MIPS_KSEG2_START)
			map->_dm_flags |= ARC_DMAMAP_COHERENT;

		return 0;
	}

	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
	if (error == 0) {
		/* allocate DMA TLB for each dmamap segment */
		error = jazz_bus_dmamap_alloc_sgmap(t, map->dm_segs,
		    map->dm_nsegs, map->_dm_boundary, flags);
	}
	return error;
}
Example #7
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
    struct mbuf *m0, int flags)
{
	int seg, error;
	struct mbuf *m;
	struct vmspace * vm = vmspace_kernel();
	bool first;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = true;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
		    vm, flags, &seg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie * cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
		    _BUS_DMA_BUFTYPE_MBUF, flags);
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
	return (error);
}
Example #8
0
/*
 * Common function for loading a direct-mapped DMA map with a linear
 * buffer.  Called by bus-specific DMA map load functions with the
 * OR value appropriate for indicating "direct-mapped" for that
 * chipset.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	int seg, error;
	struct vmspace *vm;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return (EINVAL);

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
	    vm, flags, &seg, true);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;

		STAT_INCR(loads);

		/*
		 * For linear buffers, we support marking the mapping
		 * as COHERENT.
		 *
		 * XXX Check TLB entries for cache-inhibit bits?
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		else if (MIPS_KSEG1_P(buf))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#ifdef _LP64
		else if (MIPS_XKPHYS_P((vaddr_t)buf)
		    && MIPS_XKPHYS_TO_CCA((vaddr_t)buf) == MIPS3_PG_TO_CCA(MIPS3_PG_UNCACHED))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#endif
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
		    _BUS_DMA_BUFTYPE_LINEAR, flags);
	}
#endif
	return (error);
}
Example #9
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
	struct mbuf *m0, int flags)
{
	paddr_t lastaddr = 0;
	int seg, error, first;
	struct mbuf *m;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
		if (m->m_len == 0)
			continue;
#ifdef POOL_VTOPHYS
		/* XXX Could be better about coalescing. */
		/* XXX Doesn't check boundaries. */
		switch (m->m_flags & (M_EXT|M_CLUSTER)) {
		case M_EXT|M_CLUSTER:
			/* XXX KDASSERT */
			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
			lastaddr = m->m_ext.ext_paddr +
			    (m->m_data - m->m_ext.ext_buf);
 have_addr:
			if (first == 0 && ++seg >= map->_dm_segcnt) {
				error = EFBIG;
				continue;
			}
			map->dm_segs[seg].ds_addr =
			    rumpcomp_pci_virt_to_mach((void *)lastaddr);
			map->dm_segs[seg].ds_len = m->m_len;
			lastaddr += m->m_len;
			continue;

		case 0:
			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
			    (m->m_data - M_BUFADDR(m));
			goto have_addr;

		default:
			break;
		}
#endif
		error = _bus_dmamap_load_buffer(t, map, m->m_data,
		    m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}