Пример #1
0
static __inline void
_bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	vaddr_t addr = (vaddr_t) map->_dm_origbuf;

	addr += offset;

	switch (ops) {
	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
		cpu_dcache_wbinv_range(addr, len);
		break;

	case BUS_DMASYNC_PREREAD:
		if (((addr | len) & arm_dcache_align_mask) == 0)
			cpu_dcache_inv_range(addr, len);
		else
			cpu_dcache_wbinv_range(addr, len);
		break;

	case BUS_DMASYNC_PREWRITE:
		cpu_dcache_wb_range(addr, len);
		break;

	case BUS_DMASYNC_POSTREAD:
		cpu_dcache_inv_range(addr, len);
		break;
	}
}
void
gemini_ipm_copyin(void *dst, bus_addr_t ba, size_t len)
{
	void *src;

	DPRINTFN(2, ("%s:%d: %p %#lx %ld\n",
		__FUNCTION__, __LINE__, dst, ba, len));
	src = gemini_ba_to_va(ba);
	memcpy(dst, src, len);
	cpu_dcache_inv_range((vaddr_t)src, len);
}
Пример #3
0
static void
_bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops)
{
	KASSERT((va & PAGE_MASK) == (pa & PAGE_MASK));

#ifdef DEBUG_DMA
	printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x\n",
	    va, pa, len, ops);
#endif

	switch (ops) {
	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
		cpu_dcache_wbinv_range(va, len);
		cpu_sdcache_wbinv_range(va, pa, len);
		break;
		/* FALLTHROUGH */

	case BUS_DMASYNC_PREREAD: {
		const size_t line_size = arm_dcache_align;
		const size_t line_mask = arm_dcache_align_mask;
		vsize_t misalignment = va & line_mask;
		if (misalignment) {
			va -= misalignment;
			pa -= misalignment;
			len += misalignment;
			cpu_dcache_wbinv_range(va, line_size);
			cpu_sdcache_wbinv_range(va, pa, line_size);
			if (len <= line_size)
				break;
			va += line_size;
			pa += line_size;
			len -= line_size;
		}
		misalignment = len & line_mask;
		len -= misalignment;
		if (len > 0) {
			cpu_dcache_inv_range(va, len);
			cpu_sdcache_inv_range(va, pa, len);
		}
		if (misalignment) {
			va += len;
			pa += len;
			cpu_dcache_wbinv_range(va, line_size);
			cpu_sdcache_wbinv_range(va, pa, line_size);
		}
		break;
	}

	case BUS_DMASYNC_PREWRITE:
		cpu_dcache_wb_range(va, len);
		cpu_sdcache_wb_range(va, pa, len);
		break;
	}
}
Пример #4
0
static __inline void
_bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	struct uio *uio = map->_dm_origbuf;
	struct iovec *iov;
	bus_size_t minlen, ioff;
	vaddr_t addr;

	for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) {
		/* Find the beginning iovec. */
		if (ioff >= iov->iov_len) {
			ioff -= iov->iov_len;
			continue;
		}

		/*
		 * Now at the first iovec to sync; nail each one until
		 * we have exhausted the length.
		 */
		minlen = iov->iov_len - ioff;
		if (len < minlen)
			minlen = len;

		addr = (vaddr_t) iov->iov_base;
		addr += ioff;

		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			cpu_dcache_wbinv_range(addr, minlen);
			break;

		case BUS_DMASYNC_PREREAD:
			if (((addr | minlen) & arm_dcache_align_mask) == 0)
				cpu_dcache_inv_range(addr, minlen);
			else
				cpu_dcache_wbinv_range(addr, minlen);
			break;

		case BUS_DMASYNC_PREWRITE:
			cpu_dcache_wb_range(addr, minlen);
			break;
		}
		ioff = 0;
		len -= minlen;
	}
}
Пример #5
0
static int
aau_bzero(void *dst, int len, int flags)
{
	struct i80321_aau_softc *sc = aau_softc;
	i80321_aaudesc_t *desc;
	int ret;
	int csr;
	int descnb = 0;
	int tmplen = len;
	int to_nextpagedst;
	int min_hop;
	vm_paddr_t pa, tmppa;

	if (!sc)
		return (-1);
	mtx_lock_spin(&sc->mtx);
	if (sc->flags & BUSY) {
		mtx_unlock_spin(&sc->mtx);
		return (-1);
	}
	sc->flags |= BUSY;
	mtx_unlock_spin(&sc->mtx);
	desc = sc->aauring[0].desc;
	if (flags & IS_PHYSICAL) {
		desc->local_addr = (vm_paddr_t)dst;
		desc->next_desc = 0;
		desc->count = len;
		desc->descr_ctrl = 2 << 1 | 1 << 31; /* Fill, enable dest write */
		bus_dmamap_sync(sc->dmatag, sc->aauring[0].map,
		    BUS_DMASYNC_PREWRITE);
	} else {
		test_virt_addr(dst, len);
		if ((vm_offset_t)dst & (31))
			cpu_dcache_wb_range((vm_offset_t)dst & ~31, 32);
		if (((vm_offset_t)dst + len) & 31)
			cpu_dcache_wb_range(((vm_offset_t)dst + len) & ~31,
			    32);
		cpu_dcache_inv_range((vm_offset_t)dst, len);
		while (tmplen > 0) {
			pa = vtophys(dst);
			to_nextpagedst = ((vm_offset_t)dst & ~PAGE_MASK) +
			    PAGE_SIZE - (vm_offset_t)dst;
			while (to_nextpagedst < tmplen) {
				tmppa = vtophys((vm_offset_t)dst +
				    to_nextpagedst);
				if (tmppa != pa + to_nextpagedst)
					break;
				to_nextpagedst += PAGE_SIZE;
			}
			min_hop = to_nextpagedst;
			if (min_hop < 64) {
				tmplen -= min_hop;
				bzero(dst, min_hop);
				cpu_dcache_wbinv_range((vm_offset_t)dst,
				    min_hop);

				dst = (void *)((vm_offset_t)dst + min_hop);
				if (tmplen <= 0 && descnb > 0) {
					sc->aauring[descnb - 1].desc->next_desc
					    = 0;
					bus_dmamap_sync(sc->dmatag,
					    sc->aauring[descnb - 1].map,
					    BUS_DMASYNC_PREWRITE);
				}
				continue;
			}
			desc->local_addr = pa;
			desc->count = tmplen > min_hop ? min_hop : tmplen;
			desc->descr_ctrl = 2 << 1 | 1 << 31; /* Fill, enable dest write */;
			if (min_hop < tmplen) {
				tmplen -= min_hop;
				dst = (void *)((vm_offset_t)dst + min_hop);
			} else
				tmplen = 0;
			if (descnb + 1 >= AAU_RING_SIZE) {
				mtx_lock_spin(&sc->mtx);
				sc->flags &= ~BUSY;
				mtx_unlock_spin(&sc->mtx);
				return (-1);
			}
			if (tmplen > 0) {
				desc->next_desc = sc->aauring[descnb + 1].
				    phys_addr;
				bus_dmamap_sync(sc->dmatag,
				    sc->aauring[descnb].map,
				    BUS_DMASYNC_PREWRITE);
				desc = sc->aauring[descnb + 1].desc;
				descnb++;
			} else {
				desc->next_desc = 0;
				bus_dmamap_sync(sc->dmatag,
				    sc->aauring[descnb].map,
				    BUS_DMASYNC_PREWRITE);
			}
									
		}

	}
	AAU_REG_WRITE(sc, 0x0c /* Descriptor addr */,
	    sc->aauring[0].phys_addr);
	AAU_REG_WRITE(sc, 0 /* Control register */, 1 << 0/* Start transfer */);
	while ((csr = AAU_REG_READ(sc, 0x4)) & (1 << 10));
	/* Wait until it's done. */
	if (csr & (1 << 5)) /* error */
		ret = -1;
	else
		ret = 0;
	/* Clear the interrupt. */
	AAU_REG_WRITE(sc, 0x4, csr);
	/* Stop the AAU. */
	AAU_REG_WRITE(sc, 0, 0);
	mtx_lock_spin(&sc->mtx);
	sc->flags &= ~BUSY;
	mtx_unlock_spin(&sc->mtx);
	return (ret);
}
Пример #6
0
static int
dma_memcpy(void *dst, void *src, int len, int flags)
{
	struct i80321_dma_softc *sc;
	i80321_dmadesc_t *desc;
	int ret;
	int csr;
	int descnb = 0;
	int tmplen = len;
	int to_nextpagesrc, to_nextpagedst;
	int min_hop;
	vm_paddr_t pa, pa2, tmppa;
	pmap_t pmap = vmspace_pmap(curthread->td_proc->p_vmspace);

	if (!softcs[0] || !softcs[1])
		return (-1);
	mtx_lock_spin(&softcs[0]->mtx);
	if (softcs[0]->flags & BUSY) {
		mtx_unlock_spin(&softcs[0]->mtx);
		mtx_lock_spin(&softcs[1]->mtx);
		if (softcs[1]->flags & BUSY) {
			mtx_unlock(&softcs[1]->mtx);
			return (-1);
		}
		sc = softcs[1];
	} else
		sc = softcs[0];
	sc->flags |= BUSY;
	mtx_unlock_spin(&sc->mtx);
	desc = sc->dmaring[0].desc;
	if (flags & IS_PHYSICAL) {
		desc->next_desc = 0;
		desc->low_pciaddr = (vm_paddr_t)src;
		desc->high_pciaddr = 0;
		desc->local_addr = (vm_paddr_t)dst;
		desc->count = len;
		desc->descr_ctrl = 1 << 6; /* Local memory to local memory. */
		bus_dmamap_sync(sc->dmatag, 
		    sc->dmaring[0].map, 
		    BUS_DMASYNC_PREWRITE);
	} else {
		if (!virt_addr_is_valid(dst, len, 1, !(flags & DST_IS_USER)) || 
		    !virt_addr_is_valid(src, len, 0, !(flags & SRC_IS_USER))) {
			mtx_lock_spin(&sc->mtx);
			sc->flags &= ~BUSY;
			mtx_unlock_spin(&sc->mtx);
			return (-1);
		}
		cpu_dcache_wb_range((vm_offset_t)src, len);
		if ((vm_offset_t)dst & (31))
			cpu_dcache_wb_range((vm_offset_t)dst & ~31, 32);
		if (((vm_offset_t)dst + len) & 31)
			cpu_dcache_wb_range(((vm_offset_t)dst + len) & ~31,
			    32);
		cpu_dcache_inv_range((vm_offset_t)dst, len);
		while (tmplen > 0) {
			pa = (flags & SRC_IS_USER) ?
			    pmap_extract(pmap, (vm_offset_t)src) :
				    vtophys(src);
			pa2 = (flags & DST_IS_USER) ?
			    pmap_extract(pmap, (vm_offset_t)dst) :
				    vtophys(dst);
			to_nextpagesrc = ((vm_offset_t)src & ~PAGE_MASK) +
			    PAGE_SIZE - (vm_offset_t)src;
			to_nextpagedst = ((vm_offset_t)dst & ~PAGE_MASK) +
			    PAGE_SIZE - (vm_offset_t)dst;
			while (to_nextpagesrc < tmplen) {
				tmppa = (flags & SRC_IS_USER) ?
				    pmap_extract(pmap, (vm_offset_t)src +
				    to_nextpagesrc) :
					    vtophys((vm_offset_t)src +
						to_nextpagesrc);
				if (tmppa != pa + to_nextpagesrc)
					break;
				to_nextpagesrc += PAGE_SIZE;
			}
			while (to_nextpagedst < tmplen) {
				tmppa = (flags & DST_IS_USER) ?
				    pmap_extract(pmap, (vm_offset_t)dst +
				    to_nextpagedst) :
					    vtophys((vm_offset_t)dst +
						to_nextpagedst);
				if (tmppa != pa2 + to_nextpagedst)
					break;
				to_nextpagedst += PAGE_SIZE;
			}
			min_hop = to_nextpagedst > to_nextpagesrc ?
			    to_nextpagesrc : to_nextpagedst;
			if (min_hop < 64) {
				tmplen -= min_hop;
				memcpy(dst, src, min_hop);
				cpu_dcache_wbinv_range((vm_offset_t)dst,
				    min_hop);

				src = (void *)((vm_offset_t)src + min_hop);
				dst = (void *)((vm_offset_t)dst + min_hop);
				if (tmplen <= 0 && descnb > 0) {
					sc->dmaring[descnb - 1].desc->next_desc
					    = 0;
					bus_dmamap_sync(sc->dmatag, 
					    sc->dmaring[descnb - 1].map, 
					    BUS_DMASYNC_PREWRITE);
				}
				continue;
			}
			desc->low_pciaddr = pa;
			desc->high_pciaddr = 0;
			desc->local_addr = pa2;
			desc->count = tmplen > min_hop ? min_hop : tmplen;
			desc->descr_ctrl = 1 << 6;
			if (min_hop < tmplen) {
				tmplen -= min_hop;
				src = (void *)((vm_offset_t)src + min_hop);
				dst = (void *)((vm_offset_t)dst + min_hop);
			} else
				tmplen = 0;
			if (descnb + 1 >= DMA_RING_SIZE) {
				mtx_lock_spin(&sc->mtx);
				sc->flags &= ~BUSY;
				mtx_unlock_spin(&sc->mtx);
				return (-1);
			}
			if (tmplen > 0) {
				desc->next_desc = sc->dmaring[descnb + 1].
				    phys_addr;
				bus_dmamap_sync(sc->dmatag, 
				    sc->dmaring[descnb].map, 
				    BUS_DMASYNC_PREWRITE);
				desc = sc->dmaring[descnb + 1].desc;
				descnb++;
			} else {
				desc->next_desc = 0;
				bus_dmamap_sync(sc->dmatag,
				    sc->dmaring[descnb].map,
				    BUS_DMASYNC_PREWRITE);
			}
									
		}

	}
	DMA_REG_WRITE(sc, 4 /* Status register */,
	    DMA_REG_READ(sc, 4) | DMA_CLEAN_MASK);
	DMA_REG_WRITE(sc, 0x10 /* Descriptor addr */,
	    sc->dmaring[0].phys_addr);
	DMA_REG_WRITE(sc, 0 /* Control register */, 1 | 2/* Start transfer */);
	while ((csr = DMA_REG_READ(sc, 0x4)) & (1 << 10));
	/* Wait until it's done. */
	if (csr & 0x2e) /* error */
		ret = -1;
	else
		ret = 0;
	DMA_REG_WRITE(sc, 0, 0);
	mtx_lock_spin(&sc->mtx);
	sc->flags &= ~BUSY;
	mtx_unlock_spin(&sc->mtx);
	return (ret);
}
Пример #7
0
static __inline void
_bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	struct mbuf *m, *m0 = map->_dm_origbuf;
	bus_size_t minlen, moff;
	vaddr_t maddr;

	for (moff = offset, m = m0; m != NULL && len != 0;
	     m = m->m_next) {
		/* Find the beginning mbuf. */
		if (moff >= m->m_len) {
			moff -= m->m_len;
			continue;
		}

		/*
		 * Now at the first mbuf to sync; nail each one until
		 * we have exhausted the length.
		 */
		minlen = m->m_len - moff;
		if (len < minlen)
			minlen = len;

		maddr = mtod(m, vaddr_t);
		maddr += moff;

		/*
		 * We can save a lot of work here if we know the mapping
		 * is read-only at the MMU:
		 *
		 * If a mapping is read-only, no dirty cache blocks will
		 * exist for it.  If a writable mapping was made read-only,
		 * we know any dirty cache lines for the range will have
		 * been cleaned for us already.  Therefore, if the upper
		 * layer can tell us we have a read-only mapping, we can
		 * skip all cache cleaning.
		 *
		 * NOTE: This only works if we know the pmap cleans pages
		 * before making a read-write -> read-only transition.  If
		 * this ever becomes non-true (e.g. Physically Indexed
		 * cache), this will have to be revisited.
		 */
		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			/* if (! M_ROMAP(m)) */{
				cpu_dcache_wbinv_range(maddr, minlen);
				break;
			}
			/* else FALLTHROUGH */

		case BUS_DMASYNC_PREREAD:
			if (((maddr | minlen) & arm_dcache_align_mask) == 0)
				cpu_dcache_inv_range(maddr, minlen);
			else
				cpu_dcache_wbinv_range(maddr, minlen);
			break;

		case BUS_DMASYNC_PREWRITE:
			/* if (! M_ROMAP(m)) */
				cpu_dcache_wb_range(maddr, minlen);
			break;
		}
		moff = 0;
		len -= minlen;
	}
}