示例#1
0
/*
 * Common function for loading a DMA map with a linear buffer.  May
 * be called by bus-specific DMA map load functions.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	bus_addr_t lastaddr = 0;
	int seg, error;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

	if (buflen > map->_dm_size)
		return (EINVAL);

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
	    &lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}
示例#2
0
int
mbus_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m0, int flags)
{
	paddr_t lastaddr;
	int seg, error, first;
	struct mbuf *m;

	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif  

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = 1;
	seg = 0;
	error = 0;
	lastaddr = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		error = _bus_dmamap_load_buffer(NULL, map, m->m_data, m->m_len,
		    NULL, flags, &lastaddr, &seg, first);
		first = 0;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
	}

	return (error);
}
示例#3
0
/*
 * Common function for loading a DMA map with a linear buffer.  May
 * be called by bus-specific DMA map load functions.
 */
int
bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
	void *buf, bus_size_t buflen, struct proc *p, int flags)
{
	paddr_t lastaddr = 0;
	int seg, error;
	struct vmspace *vm;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return (EINVAL);

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
		&lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}
示例#4
0
int
mbus_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
		 struct proc *p, int flags)
{
	paddr_t lastaddr;
	int seg, error;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;
	map->dm_mapsize = 0;

	if (size > map->_dm_size)
		return (EINVAL);

	seg = 0;
	lastaddr = 0;
	error = _bus_dmamap_load_buffer(NULL, map, addr, size, p, flags,
	    &lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = size;
		map->dm_nsegs = seg + 1;
	}

	return (0);
}
示例#5
0
/*
 * Load from block io.
 */
static int
_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
                     int *nsegs, int flags)
{
    vm_paddr_t paddr;
    bus_size_t len, tlen;
    int error, i, ma_offs;

    if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
        error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
                                        bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
        return (error);
    }

    error = 0;
    tlen = bio->bio_bcount;
    ma_offs = bio->bio_ma_offset;
    for (i = 0; tlen > 0; i++, tlen -= len) {
        len = min(PAGE_SIZE - ma_offs, tlen);
        paddr = VM_PAGE_TO_PHYS(bio->bio_ma[i]) + ma_offs;
        error = _bus_dmamap_load_phys(dmat, map, paddr, len,
                                      flags, NULL, nsegs);
        if (error != 0)
            break;
        ma_offs = 0;
    }
    return (error);
}
示例#6
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    int flags)
{
#if 0
	struct arm32_dma_range *dr;
#endif
	paddr_t lastaddr;
	int seg, error, first;
	struct mbuf *m;

#ifdef DEBUG_DMA
	printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
	    t, map, m0, flags);
#endif	/* DEBUG_DMA */

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif	/* DIAGNOSTIC */

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	/*
	 * Mbuf chains should almost never have coherent (i.e.
	 * un-cached) mappings, so clear that flag now.
	 */
	map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
 		    NULL, flags, &lastaddr, &seg, first);
		first = 0;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
		map->_dm_origbuf = m0;
		map->_dm_buftype = ARM32_BUFTYPE_MBUF;
		map->_dm_proc = NULL;	/* always kernel */
	}
#ifdef DEBUG_DMA
	printf("dmamap_load_mbuf: error=%d\n", error);
#endif	/* DEBUG_DMA */
	return (error);
}
示例#7
0
/*
 * Like _bus_dmamap_load(), but for uios.
 */
int
_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    int flags)
{
	paddr_t lastaddr;
	int seg, i, error, first;
	bus_size_t minlen, resid;
	struct proc *p = NULL;
	struct iovec *iov;
	caddr_t addr;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

	resid = uio->uio_resid;
	iov = uio->uio_iov;

	if (uio->uio_segflg == UIO_USERSPACE) {
		p = uio->uio_procp;
#ifdef DIAGNOSTIC
		if (p == NULL)
			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
#endif
	}

	/* _bus_dmamap_load_buffer() clears this if we're not... */
	map->_dm_flags |= ARM32_DMAMAP_COHERENT;

	first = 1;
	seg = 0;
	error = 0;
	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
		/*
		 * Now at the first iovec to load.  Load each iovec
		 * until we have exhausted the residual count.
		 */
		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
		addr = (caddr_t)iov[i].iov_base;

		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
		    p, flags, &lastaddr, &seg, first);
		first = 0;

		resid -= minlen;
	}
	if (error == 0) {
		map->dm_mapsize = uio->uio_resid;
		map->dm_nsegs = seg + 1;
		map->_dm_origbuf = uio;
		map->_dm_buftype = ARM32_BUFTYPE_UIO;
		map->_dm_proc = p;
	}
	return (error);
}
示例#8
0
int
mbus_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
{
	paddr_t lastaddr;
	int seg, i, error, first;
	bus_size_t minlen, resid;
	struct proc *p = NULL;
	struct iovec *iov;
	caddr_t addr;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

	resid = uio->uio_resid;
	iov = uio->uio_iov;

	if (resid > map->_dm_size)
		return (EINVAL);

	if (uio->uio_segflg == UIO_USERSPACE) {
		p = uio->uio_procp;
#ifdef DIAGNOSTIC
		if (p == NULL)
			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
#endif
	}

	first = 1;
	seg = 0;
	error = 0;
	lastaddr = 0;
	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
		/*
		 * Now at the first iovec to load.  Load each iovec
		 * until we have exhausted the residual count.
		 */
		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
		addr = (caddr_t)iov[i].iov_base;

		error = _bus_dmamap_load_buffer(NULL, map, addr, minlen,
		    p, flags, &lastaddr, &seg, first);
		first = 0;

		resid -= minlen;
	}
	if (error == 0) {
		map->dm_mapsize = uio->uio_resid;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}
示例#9
0
/*
 * Like _bus_dmamap_load(), but for raw memory.
 */
int
_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{

	struct vmspace * const vm = vmspace_kernel();
	const bool coherent_p = (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT);
	const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0;
	bus_size_t mapsize = 0;
	bool first = true;
	int curseg = 0;
	int error = 0;

	for (; error == 0 && nsegs-- > 0; segs++) {
		void *kva;
#ifdef _LP64
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(segs->ds_addr);
		}
#else
		if (segs->ds_addr >= MIPS_PHYS_MASK)
			return EFBIG;
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
		}
#endif	/* _LP64 */
		mapsize += segs->ds_len;
		error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
		    vm, flags, &curseg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = mapsize;
		map->dm_nsegs = curseg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (coherent_p)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
	/*
	 * If bus_dmamem_alloc didn't return memory that didn't need bouncing
	 * that's a bug which we will not workaround.
	 */
	return error;
}
示例#10
0
/*
 * Load from block io.
 */
static int
_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
                     int *nsegs, int flags)
{
    int error;

    if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
        error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
                                        bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
    } else {
        error = _bus_dmamap_load_ma(dmat, map, bio->bio_ma,
                                    bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs);
    }
    return (error);
}
示例#11
0
static int
_bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
	size_t buflen, int buftype, int flags)
{
	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
	struct vmspace * const vm = vmspace_kernel();
	int seg, error;

	KASSERT(cookie != NULL);
	KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);

	/*
	 * Allocate bounce pages, if necessary.
	 */
	if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
		if (error)
			return (error);
	}

	/*
	 * Cache a pointer to the caller's buffer and load the DMA map
	 * with the bounce buffer.
	 */
	cookie->id_origbuf = buf;
	cookie->id_origbuflen = buflen;
	cookie->id_buftype = buftype;
	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
	    buflen, vm, flags, &seg, true);
	if (error)
		return (error);

	STAT_INCR(bounced_loads);
	map->dm_mapsize = buflen;
	map->dm_nsegs = seg + 1;
	map->_dm_vmspace = vm;
	/*
	 * If our cache is coherent, then the map must be coherent too.
	 */
	if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
		map->_dm_flags |= _BUS_DMAMAP_COHERENT;

	/* ...so _bus_dmamap_sync() knows we're bouncing */
	cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
	return 0;
}
示例#12
0
/*
 * Load a list of virtual addresses.
 */
static int
_bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
                       bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
                       int flags)
{
    int error;

    error = 0;
    for (; sglist_cnt > 0; sglist_cnt--, list++) {
        error = _bus_dmamap_load_buffer(dmat, map,
                                        (void *)(uintptr_t)list->ds_addr, list->ds_len, pmap,
                                        flags, NULL, nsegs);
        if (error)
            break;
    }
    return (error);
}
示例#13
0
文件: bus.c 项目: ryo/netbsd-src
/*
 * Common function for loading a direct-mapped DMA map with a linear
 * buffer.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	vaddr_t lastaddr;
	int seg, error;
	struct vmspace *vm;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return EINVAL;

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(map, buf, buflen,
	    vm, flags, &lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;

		/*
		 * For linear buffers, we support marking the mapping
		 * as COHERENT.
		 *
		 * XXX Check TLB entries for cache-inhibit bits?
		 */
		if (buf >= (void *)MIPS_KSEG1_START &&
		    buf < (void *)MIPS_KSEG2_START)
			map->_dm_flags |= NEWSMIPS_DMAMAP_COHERENT;
	}
	return error;
}
示例#14
0
文件: bus.c 项目: ryo/netbsd-src
/*
 * Like _bus_dmamap_load(), but for uios.
 */
int
_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    int flags)
{
	vaddr_t lastaddr;
	int seg, i, error, first;
	bus_size_t minlen, resid;
	struct iovec *iov;
	void *addr;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	resid = uio->uio_resid;
	iov = uio->uio_iov;

	first = 1;
	seg = 0;
	error = 0;
	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
		/*
		 * Now at the first iovec to load.  Load each iovec
		 * until we have exhausted the residual count.
		 */
		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
		addr = (void *)iov[i].iov_base;

		error = _bus_dmamap_load_buffer(map, addr, minlen,
		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
		first = 0;

		resid -= minlen;
	}
	if (error == 0) {
		map->dm_mapsize = uio->uio_resid;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = uio->uio_vmspace;
	}
	return error;
}
示例#15
0
/*
 * Load an mbuf chain.
 */
static int
_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
                         struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
{
    struct mbuf *m;
    int error;

    error = 0;
    for (m = m0; m != NULL && error == 0; m = m->m_next) {
        if (m->m_len > 0) {
            error = _bus_dmamap_load_buffer(dmat, map, m->m_data,
                                            m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF,
                                            segs, nsegs);
        }
    }
    CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
         __func__, dmat, flags, error, *nsegs);
    return (error);
}
示例#16
0
/*
 * Map the buffer buf into bus space using the dmamap map.
 */
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
                bus_size_t buflen, bus_dmamap_callback_t *callback,
                void *callback_arg, int flags)
{
    bus_dma_segment_t *segs;
    struct memdesc mem;
    int error;
    int nsegs;

    if ((flags & BUS_DMA_NOWAIT) == 0) {
        mem = memdesc_vaddr(buf, buflen);
        _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
    }

    nsegs = -1;
    error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
                                    flags, NULL, &nsegs);
    nsegs++;

    CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
         __func__, dmat, flags, error, nsegs);

    if (error == EINPROGRESS)
        return (error);

    segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
    if (error)
        (*callback)(callback_arg, segs, 0, error);
    else
        (*callback)(callback_arg, segs, nsegs, 0);

    /*
     * Return ENOMEM to the caller so that it can pass it up the stack.
     * This error only happens when NOWAIT is set, so deferral is disabled.
     */
    if (error == ENOMEM)
        return (error);

    return (0);
}
示例#17
0
文件: bus.c 项目: ryo/netbsd-src
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    int flags)
{
	vaddr_t lastaddr;
	int seg, error, first;
	struct mbuf *m;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return EINVAL;

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
		error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
		    vmspace_kernel(), flags, &lastaddr, &seg, first);
		first = 0;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
	}
	return error;
}
示例#18
0
/*
 * Common function for loading a DMA map with a linear buffer.  May
 * be called by bus-specific DMA map load functions.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	paddr_t lastaddr;
	int seg, error;

#ifdef DEBUG_DMA
	printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
	    t, map, buf, buflen, p, flags);
#endif	/* DEBUG_DMA */

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

	if (buflen > map->_dm_size)
		return (EINVAL);

	/* _bus_dmamap_load_buffer() clears this if we're not... */
	map->_dm_flags |= ARM32_DMAMAP_COHERENT;

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
	    &lastaddr, &seg, 1);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_origbuf = buf;
		map->_dm_buftype = ARM32_BUFTYPE_LINEAR;
		map->_dm_proc = p;
	}
#ifdef DEBUG_DMA
	printf("dmamap_load: error=%d\n", error);
#endif	/* DEBUG_DMA */
	return (error);
}
示例#19
0
/*
 * Load a uio.
 */
static int
_bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
                     int *nsegs, int flags)
{
    bus_size_t resid;
    bus_size_t minlen;
    struct iovec *iov;
    pmap_t pmap;
    caddr_t addr;
    int error, i;

    if (uio->uio_segflg == UIO_USERSPACE) {
        KASSERT(uio->uio_td != NULL,
                ("bus_dmamap_load_uio: USERSPACE but no proc"));
        pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
    } else
        pmap = kernel_pmap;
    resid = uio->uio_resid;
    iov = uio->uio_iov;
    error = 0;

    for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
        /*
         * Now at the first iovec to load.  Load each iovec
         * until we have exhausted the residual count.
         */

        addr = (caddr_t) iov[i].iov_base;
        minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
        if (minlen > 0) {
            error = _bus_dmamap_load_buffer(dmat, map, addr,
                                            minlen, pmap, flags, NULL, nsegs);
            resid -= minlen;
        }
    }

    return (error);
}
示例#20
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    int flags)
{
	paddr_t lastaddr = 0;
	int seg, error, first;
	struct mbuf *m;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
		    NULL, flags, &lastaddr, &seg, first);
		first = 0;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}
示例#21
0
int
bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
                    struct memdesc *mem, bus_dmamap_callback_t *callback,
                    void *callback_arg, int flags)
{
    bus_dma_segment_t *segs;
    int error;
    int nsegs;

    if ((flags & BUS_DMA_NOWAIT) == 0)
        _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);

    nsegs = -1;
    error = 0;
    switch (mem->md_type) {
    case MEMDESC_VADDR:
        error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
                                        mem->md_opaque, kernel_pmap, flags, NULL, &nsegs);
        break;
    case MEMDESC_PADDR:
        error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
                                      mem->md_opaque, flags, NULL, &nsegs);
        break;
    case MEMDESC_VLIST:
        error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
                                       mem->md_opaque, kernel_pmap, &nsegs, flags);
        break;
    case MEMDESC_PLIST:
        error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
                                       mem->md_opaque, &nsegs, flags);
        break;
    case MEMDESC_BIO:
        error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio,
                                     &nsegs, flags);
        break;
    case MEMDESC_UIO:
        error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
                                     &nsegs, flags);
        break;
    case MEMDESC_MBUF:
        error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
                                         NULL, &nsegs, flags);
        break;
    case MEMDESC_CCB:
        error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs,
                                     flags);
        break;
    }
    nsegs++;

    CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
         __func__, dmat, flags, error, nsegs);

    if (error == EINPROGRESS)
        return (error);

    segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
    if (error)
        (*callback)(callback_arg, segs, 0, error);
    else
        (*callback)(callback_arg, segs, nsegs, 0);

    /*
     * Return ENOMEM to the caller so that it can pass it up the stack.
     * This error only happens when NOWAIT is set, so deferral is disabled.
     */
    if (error == ENOMEM)
        return (error);

    return (0);
}
示例#22
0
/*
 * Load a cam control block.
 */
static int
_bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
                     int *nsegs, int flags)
{
    struct ccb_hdr *ccb_h;
    void *data_ptr;
    int error;
    uint32_t dxfer_len;
    uint16_t sglist_cnt;

    error = 0;
    ccb_h = &ccb->ccb_h;
    switch (ccb_h->func_code) {
    case XPT_SCSI_IO: {
        struct ccb_scsiio *csio;

        csio = &ccb->csio;
        data_ptr = csio->data_ptr;
        dxfer_len = csio->dxfer_len;
        sglist_cnt = csio->sglist_cnt;
        break;
    }
    case XPT_CONT_TARGET_IO: {
        struct ccb_scsiio *ctio;

        ctio = &ccb->ctio;
        data_ptr = ctio->data_ptr;
        dxfer_len = ctio->dxfer_len;
        sglist_cnt = ctio->sglist_cnt;
        break;
    }
    case XPT_ATA_IO: {
        struct ccb_ataio *ataio;

        ataio = &ccb->ataio;
        data_ptr = ataio->data_ptr;
        dxfer_len = ataio->dxfer_len;
        sglist_cnt = 0;
        break;
    }
    default:
        panic("_bus_dmamap_load_ccb: Unsupported func code %d",
              ccb_h->func_code);
    }

    switch ((ccb_h->flags & CAM_DATA_MASK)) {
    case CAM_DATA_VADDR:
        error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len,
                                        kernel_pmap, flags, NULL, nsegs);
        break;
    case CAM_DATA_PADDR:
        error = _bus_dmamap_load_phys(dmat, map,
                                      (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL,
                                      nsegs);
        break;
    case CAM_DATA_SG:
        error = _bus_dmamap_load_vlist(dmat, map,
                                       (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
                                       nsegs, flags);
        break;
    case CAM_DATA_SG_PADDR:
        error = _bus_dmamap_load_plist(dmat, map,
                                       (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags);
        break;
    case CAM_DATA_BIO:
        error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr,
                                     nsegs, flags);
        break;
    default:
        panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented",
              ccb_h->flags);
    }
    return (error);
}
示例#23
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
	struct mbuf *m0, int flags)
{
	paddr_t lastaddr = 0;
	int seg, error, first;
	struct mbuf *m;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
		if (m->m_len == 0)
			continue;
#ifdef POOL_VTOPHYS
		/* XXX Could be better about coalescing. */
		/* XXX Doesn't check boundaries. */
		switch (m->m_flags & (M_EXT|M_CLUSTER)) {
		case M_EXT|M_CLUSTER:
			/* XXX KDASSERT */
			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
			lastaddr = m->m_ext.ext_paddr +
			    (m->m_data - m->m_ext.ext_buf);
 have_addr:
			if (first == 0 && ++seg >= map->_dm_segcnt) {
				error = EFBIG;
				continue;
			}
			map->dm_segs[seg].ds_addr =
			    rumpcomp_pci_virt_to_mach((void *)lastaddr);
			map->dm_segs[seg].ds_len = m->m_len;
			lastaddr += m->m_len;
			continue;

		case 0:
			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
			    (m->m_data - M_BUFADDR(m));
			goto have_addr;

		default:
			break;
		}
#endif
		error = _bus_dmamap_load_buffer(t, map, m->m_data,
		    m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}
示例#24
0
/*
 * Like _bus_dmamap_load(), but for uios.
 */
int
_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
    struct uio *uio, int flags)
{
	int seg, i, error;
	bool first;
	bus_size_t minlen, resid;
	struct iovec *iov;
	void *addr;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	resid = uio->uio_resid;
	iov = uio->uio_iov;

	first = true;
	seg = 0;
	error = 0;
	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
		/*
		 * Now at the first iovec to load.  Load each iovec
		 * until we have exhausted the residual count.
		 */
		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
		addr = (void *)iov[i].iov_base;

		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
		    uio->uio_vmspace, flags, &seg, first);
		first = false;

		resid -= minlen;
	}
	if (error == 0) {
		map->dm_mapsize = uio->uio_resid;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = uio->uio_vmspace;
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, uio, uio->uio_resid,
		    _BUS_DMA_BUFTYPE_UIO, flags);
	}
#endif
	return (error);
}
示例#25
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
    struct mbuf *m0, int flags)
{
	int seg, error;
	struct mbuf *m;
	struct vmspace * vm = vmspace_kernel();
	bool first;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = true;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
		    vm, flags, &seg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie * cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
		    _BUS_DMA_BUFTYPE_MBUF, flags);
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
	return (error);
}
示例#26
0
/*
 * Common function for loading a direct-mapped DMA map with a linear
 * buffer.  Called by bus-specific DMA map load functions with the
 * OR value appropriate for indicating "direct-mapped" for that
 * chipset.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	int seg, error;
	struct vmspace *vm;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return (EINVAL);

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
	    vm, flags, &seg, true);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;

		STAT_INCR(loads);

		/*
		 * For linear buffers, we support marking the mapping
		 * as COHERENT.
		 *
		 * XXX Check TLB entries for cache-inhibit bits?
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		else if (MIPS_KSEG1_P(buf))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#ifdef _LP64
		else if (MIPS_XKPHYS_P((vaddr_t)buf)
		    && MIPS_XKPHYS_TO_CCA((vaddr_t)buf) == MIPS3_PG_TO_CCA(MIPS3_PG_UNCACHED))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#endif
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
		    _BUS_DMA_BUFTYPE_LINEAR, flags);
	}
#endif
	return (error);
}