Esempio n. 1
0
void
dmac3_start(struct dmac3_softc *sc, vaddr_t addr, int len, int direction)
{
	struct dmac3reg *reg = sc->sc_reg;
	paddr_t pa;
	vaddr_t start, end, v;
	volatile uint32_t *p;

	if (reg->csr & DMAC3_CSR_ENABLE)
		dmac3_reset(sc);

	start = mips_trunc_page(addr);
	end   = mips_round_page(addr + len);
	p = sc->sc_dmamap;
	for (v = start; v < end; v += PAGE_SIZE) {
		pa = kvtophys(v);
		mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE);
		*p++ = 0;
		*p++ = (pa >> PGSHIFT) | 0xc0000000;
	}
	*p++ = 0;
	*p++ = 0x003fffff;

	addr &= PGOFSET;
	addr += sc->sc_dmaaddr;

	reg->len = len;
	reg->addr = addr;
	reg->intr = DMAC3_INTR_EOPIE | DMAC3_INTR_INTEN;
	reg->csr = DMAC3_CSR_ENABLE | direction | BURST_MODE | APAD_MODE;
}
Esempio n. 2
0
int
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
	int ret;
	jmp_buf jb;
	void *prev_jb;

	prev_jb = kdb_jmpbuf(jb);
	ret = setjmp(jb);

	if (ret == 0) {
		/*
		 * 'addr' could be a memory-mapped I/O address.  Try to
		 * do atomic load/store in unit of size requested.
		 * size == 8 is only atomic on 64bit or n32 kernel.
		 */
		if ((size == 2 || size == 4 || size == 8) &&
		    ((addr & (size -1)) == 0) &&
		    (((vm_offset_t)data & (size -1)) == 0)) {
			switch (size) {
			case 2:
				*(uint16_t *)addr = *(uint16_t *)data;
				break;
			case 4:
				*(uint32_t *)addr = *(uint32_t *)data;
				break;
			case 8:
				*(uint64_t *)addr = *(uint64_t *)data;
				break;
			}
		} else {
			char *dst;
			size_t len = size;

			dst = (char *)addr;
			while (len-- > 0)
				*dst++ = *data++;
		}

		mips_icache_sync_range((db_addr_t) addr, size);
		mips_dcache_wbinv_range((db_addr_t) addr, size);
	}
	(void)kdb_jmpbuf(prev_jb);
	return (ret);
}
Esempio n. 3
0
/*
 * Synchronize an ISA DMA map.
 */
void
isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	struct isadma_bounce_cookie *cookie = map->_dm_cookie;

	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("isadma_bounce_dmamap_sync: mix PRE and POST");

#ifdef DIAGNOSTIC
	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
		if (offset >= map->dm_mapsize)
			panic("isadma_bounce_dmamap_sync: bad offset");
		if (len == 0 || (offset + len) > map->dm_mapsize)
			panic("isadma_bounce_dmamap_sync: bad length");
	}
#endif

	/*
	 * If we're not bouncing, just do the normal sync operation
	 * and return.
	 */
	if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
		_bus_dmamap_sync(t, map, offset, len, ops);
		return;
	}

	/*
	 * Flush data cache for PREREAD.  This has the side-effect
	 * of invalidating the cache.  Done at PREREAD since it
	 * causes the cache line(s) to be written back to memory.
	 *
	 * Copy the original buffer to the bounce buffer and flush
	 * the data cache for PREWRITE, so that the contents
	 * of the data buffer in memory reflect reality.
	 *
	 * Copy the bounce buffer to the original buffer in POSTREAD.
	 */

	switch (cookie->id_buftype) {
	case ID_BUFTYPE_LINEAR:
		/*
		 * Nothing to do for pre-read.
		 */

		if (ops & BUS_DMASYNC_PREWRITE) {
			/*
			 * Copy the caller's buffer to the bounce buffer.
			 */
			memcpy((char *)cookie->id_bouncebuf + offset,
			    (char *)cookie->id_origbuf + offset, len);
			wbflush();
		}

		if (ops & BUS_DMASYNC_POSTREAD) {
			/*
			 * Copy the bounce buffer to the caller's buffer.
			 */
			memcpy((char *)cookie->id_origbuf + offset,
			    (char *)cookie->id_bouncebuf + offset, len);
		}

		/*
		 * Nothing to do for post-write.
		 */
		break;

	case ID_BUFTYPE_MBUF:
	    {
		struct mbuf *m, *m0 = cookie->id_origbuf;
		bus_size_t minlen, moff;

		/*
		 * Nothing to do for pre-read.
		 */

		if (ops & BUS_DMASYNC_PREWRITE) {
			/*
			 * Copy the caller's buffer to the bounce buffer.
			 */
			m_copydata(m0, offset, len,
			    (char *)cookie->id_bouncebuf + offset);
		}

		if (ops & BUS_DMASYNC_POSTREAD) {
			/*
			 * Copy the bounce buffer to the caller's buffer.
			 */
			for (moff = offset, m = m0; m != NULL && len != 0;
			     m = m->m_next) {
				/* Find the beginning mbuf. */
				if (moff >= m->m_len) {
					moff -= m->m_len;
					continue;
				}

				/*
				 * Now at the first mbuf to sync; nail
				 * each one until we have exhausted the
				 * length.
				 */
				minlen = len < m->m_len - moff ?
				    len : m->m_len - moff;

				memcpy(mtod(m, caddr_t) + moff,
				    (char *)cookie->id_bouncebuf + offset,
				    minlen);

				moff = 0;
				len -= minlen;
				offset += minlen;
			}
		}

		/*
		 * Nothing to do for post-write.
		 */
		break;
	    }

	case ID_BUFTYPE_UIO:
		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
		break;

	case ID_BUFTYPE_RAW:
		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
		break;

	case ID_BUFTYPE_INVALID:
		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
		break;

	default:
		printf("unknown buffer type %d\n", cookie->id_buftype);
		panic("isadma_bounce_dmamap_sync");
	}

	/* Drain the write buffer. */
	wbflush();

	/* XXXJRT */
	if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE))
		mips_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + offset,
		    len);
}
Esempio n. 4
0
/*
 * Implement uiomove(9) from physical memory using a combination
 * of the direct mapping and sf_bufs to reduce the creation and
 * destruction of ephemeral mappings.  
 */
int
uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
	struct sf_buf *sf;
	struct thread *td = curthread;
	struct iovec *iov;
	void *cp;
	vm_offset_t page_offset;
	vm_paddr_t pa;
	vm_page_t m;
	size_t cnt;
	int error = 0;
	int save = 0;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomove_fromphys: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
	    ("uiomove_fromphys proc"));
	save = td->td_pflags & TDP_DEADLKTREAT;
	td->td_pflags |= TDP_DEADLKTREAT;
	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;
		page_offset = offset & PAGE_MASK;
		cnt = ulmin(cnt, PAGE_SIZE - page_offset);
		m = ma[offset >> PAGE_SHIFT];
		pa = VM_PAGE_TO_PHYS(m);
		if (MIPS_DIRECT_MAPPABLE(pa)) {
			sf = NULL;
			cp = (char *)MIPS_PHYS_TO_DIRECT(pa) + page_offset;
			/*
			 * flush all mappings to this page, KSEG0 address first
			 * in order to get it overwritten by correct data
			 */
			mips_dcache_wbinv_range((vm_offset_t)cp, cnt);
			pmap_flush_pvcache(m);
		} else {
			sf = sf_buf_alloc(m, 0);
			cp = (char *)sf_buf_kva(sf) + page_offset;
		}
		switch (uio->uio_segflg) {
		case UIO_USERSPACE:
			maybe_yield();
			if (uio->uio_rw == UIO_READ)
				error = copyout(cp, iov->iov_base, cnt);
			else
				error = copyin(iov->iov_base, cp, cnt);
			if (error) {
				if (sf != NULL)
					sf_buf_free(sf);
				goto out;
			}
			break;
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy(cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}
		if (sf != NULL)
			sf_buf_free(sf);
		else
			mips_dcache_wbinv_range((vm_offset_t)cp, cnt);
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		offset += cnt;
		n -= cnt;
	}
out:
	if (save == 0)
		td->td_pflags &= ~TDP_DEADLKTREAT;
	return (error);
}
Esempio n. 5
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 *
 * This version works with the virtually-indexed, write-back cache
 * found in the MIPS-3/MIPS-4 CPUs available for the Algorithmics.
 */
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;

#ifdef DIAGNOSTIC
	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync: mix PRE and POST");

	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync: bad offset %"PRIxPADDR 
			" (map size is %"PRIxPSIZE")",
				offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync: bad length");
#endif

	/*
	 * Since we're dealing with a virtually-indexed, write-back
	 * cache, we need to do the following things:
	 *
	 *	PREREAD -- Invalidate D-cache.  Note we might have
	 *	to also write-back here if we have to use an Index
	 *	op, or if the buffer start/end is not cache-line aligned.
	 *
	 *	PREWRITE -- Write-back the D-cache.  If we have to use
	 *	an Index op, we also have to invalidate.  Note that if
	 *	we are doing PREREAD|PREWRITE, we can collapse everything
	 *	into a single op.
	 *
	 *	POSTREAD -- Nothing.
	 *
	 *	POSTWRITE -- Nothing.
	 */
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
	    && (ops & BUS_DMASYNC_PREWRITE)) {
		STAT_INCR(write_bounces);
		/*
		 * Copy the caller's buffer to the bounce buffer.
		 */
		switch (cookie->id_buftype) {
		case _BUS_DMA_BUFTYPE_LINEAR:
			memcpy((char *)cookie->id_bouncebuf + offset,
			    cookie->id_origlinearbuf + offset, len);
			break;
		case _BUS_DMA_BUFTYPE_MBUF:
			m_copydata(cookie->id_origmbuf, offset, len,
			    (char *)cookie->id_bouncebuf + offset);
			break;
		case _BUS_DMA_BUFTYPE_UIO:
			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
			    cookie->id_origuio, len, UIO_WRITE);
			break;
#ifdef DIAGNOSTIC
		case _BUS_DMA_BUFTYPE_RAW:
			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
			break;

		case _BUS_DMA_BUFTYPE_INVALID:
			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
			break;

		default:
			panic("_bus_dmamap_sync: unknown buffer type %d\n",
			    cookie->id_buftype);
			break;
#endif /* DIAGNOSTIC */
		}
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */

	/*
	 * Flush the write buffer.
	 * XXX Is this always necessary?
	 */
	wbflush();

	/*
	 * If the mapping is of COHERENT DMA-safe memory or this isn't a
	 * PREREAD or PREWRITE, no cache flush is necessary.  Check to see
	 * if we need to bounce it.
	 */
	if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)
	    || (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
		goto bounce_it;

	/*
	 * If the mapping belongs to the kernel, or it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	const bool useindex = (!VMSPACE_IS_KERNEL_P(map->_dm_vmspace)
	    && map->_dm_vmspace != curproc->p_vmspace);

	bus_dma_segment_t *seg = map->dm_segs;
	bus_dma_segment_t * const lastseg = seg + map->dm_nsegs;
	/*
	 * Skip segments until offset are withing a segment.
	 */
	for (; offset >= seg->ds_len; seg++) {
		offset -= seg->ds_len;
	}
		
	for (; seg < lastseg && len != 0; seg++, offset = 0, len -= minlen) {
		/*
		 * Now at the first segment to sync; nail each segment until we
		 * have exhausted the length.
		 */
		vaddr_t vaddr = seg->_ds_vaddr + offset;
		minlen = ulmin(len, seg->ds_len - offset);

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %p "
		    "(0x%"PRIxBUSADDR"+%"PRIxBUSADDR
		    ", 0x%"PRIxBUSADDR"+0x%"PRIxBUSADDR
		    ") (olen = %"PRIxBUSADDR")...", seg,
		    vaddr - offset, offset,
		    vaddr - offset, offset + minlen - 1, len);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(vaddr, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			continue;
		}

		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(vaddr, minlen);
			break;

		case BUS_DMASYNC_PREREAD:
#if 1
			mips_dcache_wbinv_range(vaddr, minlen);
#else
			mips_dcache_inv_range(vaddr, minlen);
#endif
			break;

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(vaddr, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
	}

  bounce_it:
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	if ((ops & BUS_DMASYNC_POSTREAD) == 0
	    || cookie == NULL
	    || (cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0)
		return;

	STAT_INCR(read_bounces);
	/*
	 * Copy the bounce buffer to the caller's buffer.
	 */
	switch (cookie->id_buftype) {
	case _BUS_DMA_BUFTYPE_LINEAR:
		memcpy(cookie->id_origlinearbuf + offset,
		    (char *)cookie->id_bouncebuf + offset, len);
		break;

	case _BUS_DMA_BUFTYPE_MBUF:
		m_copyback(cookie->id_origmbuf, offset, len, 
		    (char *)cookie->id_bouncebuf + offset);
		break;

	case _BUS_DMA_BUFTYPE_UIO:
		_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
		    cookie->id_origuio, len, UIO_READ);
		break;
#ifdef DIAGNOSTIC
	case _BUS_DMA_BUFTYPE_RAW:
		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
		break;

	case _BUS_DMA_BUFTYPE_INVALID:
		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
		break;

	default:
		panic("_bus_dmamap_sync: unknown buffer type %d\n",
		    cookie->id_buftype);
		break;
#endif
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
	;
}
Esempio n. 6
0
File: bus.c Progetto: ryo/netbsd-src
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 *
 * This is the R4000 version.
 */
void
_bus_dmamap_sync_r4k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;
	bus_addr_t addr;
	int i, useindex;

	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync_r4k: mix PRE and POST");

#ifdef DIAGNOSTIC
	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)",
		      offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync_r4k: bad length");
#endif

	/*
	 * The R4000 cache is virtually-indexed, write-back.  This means
	 * we need to do the following things:
	 *
	 *	PREREAD -- Invalidate D-cache.  Note we might have
	 *	to also write-back here if we have to use an Index
	 *	op, or if the buffer start/end is not cache-line aligned.
	 *
	 *	PREWRITE -- Write-back the D-cache.  If we have to use
	 *	an Index op, we also have to invalidate.  Note that if
	 *	we are doing PREREAD|PREWRITE, we can collapse everything
	 *	into a single op.
	 *
	 *	POSTREAD -- Nothing.
	 *
	 *	POSTWRITE -- Nothing.
	 */

	/*
	 * Flush the write buffer.
	 * XXX Is this always necessary?
	 */
	wbflush();

	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
	if (ops == 0)
		return;

	/*
	 * If the mapping is of COHERENT DMA-safe memory, no cache
	 * flush is necessary.
	 */
	if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT)
		return;

	/*
	 * If the mapping belongs to the kernel, or if it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
	    map->_dm_vmspace == curproc->p_vmspace))
		useindex = 0;
	else
		useindex = 1;

	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
		/* Find the beginning segment. */
		if (offset >= map->dm_segs[i].ds_len) {
			offset -= map->dm_segs[i].ds_len;
			continue;
		}

		/*
		 * Now at the first segment to sync; nail
		 * each segment until we have exhausted the
		 * length.
		 */
		minlen = len < map->dm_segs[i].ds_len - offset ?
		    len : map->dm_segs[i].ds_len - offset;

		addr = map->dm_segs[i]._ds_vaddr;

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %d "
		    "(0x%lx..0x%lx) ...", i, addr + offset,
		    addr + offset + minlen - 1);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(addr + offset, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			offset = 0;
			len -= minlen;
			continue;
		}

		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(addr + offset, minlen);
			break;

		case BUS_DMASYNC_PREREAD:
#if 1
			mips_dcache_wbinv_range(addr + offset, minlen);
#else
			mips_dcache_inv_range(addr + offset, minlen);
#endif
			break;

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(addr + offset, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
		offset = 0;
		len -= minlen;
	}
}
Esempio n. 7
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 */
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;
	vaddr_t vaddr, start, end, preboundary, firstboundary, lastboundary;
	int i, useindex;

	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync: mix PRE and POST");

#ifdef DIAGNOSTIC
	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
		    offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync: bad length");
#endif

	/*
	 * Since we're dealing with a virtually-indexed, write-back
	 * cache, we need to do the following things:
	 *
	 *      PREREAD -- Invalidate D-cache.  Note we might have
	 *      to also write-back here if we have to use an Index
	 *      op, or if the buffer start/end is not cache-line aligned.
	 *
	 *      PREWRITE -- Write-back the D-cache.  If we have to use
	 *      an Index op, we also have to invalidate.  Note that if
	 *      we are doing PREREAD|PREWRITE, we can collapse everything
	 *      into a single op.
	 *
	 *      POSTREAD -- Nothing.
	 *
	 *      POSTWRITE -- Nothing.
	 */

	/*
	 * Flush the write buffer.
	 */
	wbflush();

	/*
	 * If the mapping is of COHERENT DMA-safe memory, no cache
	 * flush is necessary.
	 */
	if (map->_dm_flags & EWS4800MIPS_DMAMAP_COHERENT)
		return;

	/*
	 * No cache flushes are necessary if we're only doing
	 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE).
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
		return;

	/*
	 * If the mapping belongs to the kernel, or it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
		map->_dm_vmspace == curproc->p_vmspace))
		useindex = 0;
	else
		useindex = 1;

	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
		/* Find the beginning segment. */
		if (offset >= map->dm_segs[i].ds_len) {
			offset -= map->dm_segs[i].ds_len;
			continue;
		}

		/*
		 * Now at the first segment to sync; nail
		 * each segment until we have exhausted the
		 * length.
		 */
		minlen = len < map->dm_segs[i].ds_len - offset ?
		    len : map->dm_segs[i].ds_len - offset;

		vaddr = map->dm_segs[i]._ds_vaddr;

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %d "
		    "(0x%lx+%lx, 0x%lx+0x%lx) (olen = %ld)...", i,
		    vaddr, offset, vaddr, offset + minlen - 1, len);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(vaddr + offset, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			offset = 0;
			len -= minlen;
			continue;
		}

		start = vaddr + offset;
		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(start, minlen);
			break;

		case BUS_DMASYNC_PREREAD: {
			const struct mips_cache_info * const mci = &mips_cache_info;
			end = start + minlen;
			preboundary = start & ~mci->mci_dcache_align_mask;
			firstboundary = (start + mci->mci_dcache_align_mask)
			    & ~mci->mci_dcache_align_mask;
			lastboundary = end & ~mci->mci_dcache_align_mask;
			if (preboundary < start && preboundary < lastboundary)
				mips_dcache_wbinv_range(preboundary,
				    mci->mci_dcache_align);
			if (firstboundary < lastboundary)
				mips_dcache_inv_range(firstboundary,
				    lastboundary - firstboundary);
			if (lastboundary < end)
				mips_dcache_wbinv_range(lastboundary,
				    mci->mci_dcache_align);
			break;
		}

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(start, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
		offset = 0;
		len -= minlen;
	}
}
Esempio n. 8
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 */
void
_hpcmips_bd_map_sync(bus_dma_tag_t t, bus_dmamap_t mapx, bus_addr_t offset,
    bus_size_t len, int ops)
{
	struct bus_dmamap_hpcmips *map = (struct bus_dmamap_hpcmips *)mapx;
	bus_size_t minlen;
	bus_addr_t addr;
	int i;

	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_hpcmips_bd_map_sync: mix PRE and POST");

#ifdef DIAGNOSTIC
	if (offset >= map->bdm.dm_mapsize)
		panic("_hpcmips_bd_map_sync: bad offset %lu (map size is %lu)",
		    offset, map->bdm.dm_mapsize);
	if (len == 0 || (offset + len) > map->bdm.dm_mapsize)
		panic("_hpcmips_bd_map_sync: bad length");
#endif

	/*
	 * Flush the write buffer.
	 */
	wbflush();

	/*
	 * If the mapping is of COHERENT DMA-safe memory, no cache
	 * flush is necessary.
	 */
	if (map->_dm_flags & HPCMIPS_DMAMAP_COHERENT)
		return;

	/*
	 * No cache flushes are necessary if we're only doing
	 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE).
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
		return;

	/*
	 * Flush data cache for PREREAD.  This has the side-effect
	 * of invalidating the cache.  Done at PREREAD since it
	 * causes the cache line(s) to be written back to memory.
	 *
	 * Flush data cache for PREWRITE, so that the contents of
	 * the data buffer in memory reflect reality.
	 *
	 * Given the test above, we know we're doing one of these
	 * two operations, so no additional tests are necessary.
	 */

	/*
	 * The R2000 and R3000 have a physically indexed
	 * cache.  Loop through the DMA segments, looking
	 * for the appropriate offset, and flush the D-cache
	 * at that physical address.
	 *
	 * The R4000 has a virtually indexed primary data cache.  We
	 * do the same loop, instead using the virtual address stashed
	 * away in the segments when the map was loaded.
	 */
	for (i = 0; i < map->bdm.dm_nsegs && len != 0; i++) {
		/* Find the beginning segment. */
		if (offset >= map->bdm.dm_segs[i].ds_len) {
			offset -= map->bdm.dm_segs[i].ds_len;
			continue;
		}

		/*
		 * Now at the first segment to sync; nail
		 * each segment until we have exhausted the
		 * length.
		 */
		minlen = len < map->bdm.dm_segs[i].ds_len - offset ?
		    len : map->bdm.dm_segs[i].ds_len - offset;

		if (CPUISMIPS3)
			addr = map->_dm_segs[i]._ds_vaddr;
		else
			addr = map->bdm.dm_segs[i].ds_addr;

#ifdef BUS_DMA_DEBUG
		printf("_hpcmips_bd_map_sync: flushing segment %d "
		    "(0x%lx..0x%lx) ...", i, addr + offset,
		    addr + offset + minlen - 1);
#endif
		if (CPUISMIPS3)
			mips_dcache_wbinv_range(addr + offset, minlen);
		else {
			/*
			 * We can't have a TLB miss; use KSEG0.
			 */
			mips_dcache_wbinv_range(
				MIPS_PHYS_TO_KSEG0(map->bdm.dm_segs[i].ds_addr
				    + offset),
				minlen);
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
		offset = 0;
		len -= minlen;
	}
}