Beispiel #1
0
void
tx3920_icache_sync_range_16wb(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	mips_dcache_wb_range(va, (eva - va));

	__asm volatile(".set push; .set mips2; sync; .set pop");

	tx3920_icache_do_inv_16(va, eva);
}
Beispiel #2
0
void
r10k_icache_sync_range(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	mips_dcache_wb_range(va, (eva - va));

	__asm volatile("sync");

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += 64;
	}
}
void
r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	mips_dcache_wb_range(va, (eva - va));

	__asm volatile("sync");

	while ((eva - va) >= (32 * 16)) {
		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += (32 * 16);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += 16;
	}
}
Beispiel #4
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 *
 * This version works with the virtually-indexed, write-back cache
 * found in the MIPS-3/MIPS-4 CPUs available for the Algorithmics.
 */
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;

#ifdef DIAGNOSTIC
	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync: mix PRE and POST");

	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync: bad offset %"PRIxPADDR 
			" (map size is %"PRIxPSIZE")",
				offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync: bad length");
#endif

	/*
	 * Since we're dealing with a virtually-indexed, write-back
	 * cache, we need to do the following things:
	 *
	 *	PREREAD -- Invalidate D-cache.  Note we might have
	 *	to also write-back here if we have to use an Index
	 *	op, or if the buffer start/end is not cache-line aligned.
	 *
	 *	PREWRITE -- Write-back the D-cache.  If we have to use
	 *	an Index op, we also have to invalidate.  Note that if
	 *	we are doing PREREAD|PREWRITE, we can collapse everything
	 *	into a single op.
	 *
	 *	POSTREAD -- Nothing.
	 *
	 *	POSTWRITE -- Nothing.
	 */
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
	    && (ops & BUS_DMASYNC_PREWRITE)) {
		STAT_INCR(write_bounces);
		/*
		 * Copy the caller's buffer to the bounce buffer.
		 */
		switch (cookie->id_buftype) {
		case _BUS_DMA_BUFTYPE_LINEAR:
			memcpy((char *)cookie->id_bouncebuf + offset,
			    cookie->id_origlinearbuf + offset, len);
			break;
		case _BUS_DMA_BUFTYPE_MBUF:
			m_copydata(cookie->id_origmbuf, offset, len,
			    (char *)cookie->id_bouncebuf + offset);
			break;
		case _BUS_DMA_BUFTYPE_UIO:
			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
			    cookie->id_origuio, len, UIO_WRITE);
			break;
#ifdef DIAGNOSTIC
		case _BUS_DMA_BUFTYPE_RAW:
			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
			break;

		case _BUS_DMA_BUFTYPE_INVALID:
			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
			break;

		default:
			panic("_bus_dmamap_sync: unknown buffer type %d\n",
			    cookie->id_buftype);
			break;
#endif /* DIAGNOSTIC */
		}
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */

	/*
	 * Flush the write buffer.
	 * XXX Is this always necessary?
	 */
	wbflush();

	/*
	 * If the mapping is of COHERENT DMA-safe memory or this isn't a
	 * PREREAD or PREWRITE, no cache flush is necessary.  Check to see
	 * if we need to bounce it.
	 */
	if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)
	    || (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
		goto bounce_it;

	/*
	 * If the mapping belongs to the kernel, or it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	const bool useindex = (!VMSPACE_IS_KERNEL_P(map->_dm_vmspace)
	    && map->_dm_vmspace != curproc->p_vmspace);

	bus_dma_segment_t *seg = map->dm_segs;
	bus_dma_segment_t * const lastseg = seg + map->dm_nsegs;
	/*
	 * Skip segments until offset are withing a segment.
	 */
	for (; offset >= seg->ds_len; seg++) {
		offset -= seg->ds_len;
	}
		
	for (; seg < lastseg && len != 0; seg++, offset = 0, len -= minlen) {
		/*
		 * Now at the first segment to sync; nail each segment until we
		 * have exhausted the length.
		 */
		vaddr_t vaddr = seg->_ds_vaddr + offset;
		minlen = ulmin(len, seg->ds_len - offset);

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %p "
		    "(0x%"PRIxBUSADDR"+%"PRIxBUSADDR
		    ", 0x%"PRIxBUSADDR"+0x%"PRIxBUSADDR
		    ") (olen = %"PRIxBUSADDR")...", seg,
		    vaddr - offset, offset,
		    vaddr - offset, offset + minlen - 1, len);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(vaddr, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			continue;
		}

		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(vaddr, minlen);
			break;

		case BUS_DMASYNC_PREREAD:
#if 1
			mips_dcache_wbinv_range(vaddr, minlen);
#else
			mips_dcache_inv_range(vaddr, minlen);
#endif
			break;

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(vaddr, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
	}

  bounce_it:
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	if ((ops & BUS_DMASYNC_POSTREAD) == 0
	    || cookie == NULL
	    || (cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0)
		return;

	STAT_INCR(read_bounces);
	/*
	 * Copy the bounce buffer to the caller's buffer.
	 */
	switch (cookie->id_buftype) {
	case _BUS_DMA_BUFTYPE_LINEAR:
		memcpy(cookie->id_origlinearbuf + offset,
		    (char *)cookie->id_bouncebuf + offset, len);
		break;

	case _BUS_DMA_BUFTYPE_MBUF:
		m_copyback(cookie->id_origmbuf, offset, len, 
		    (char *)cookie->id_bouncebuf + offset);
		break;

	case _BUS_DMA_BUFTYPE_UIO:
		_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
		    cookie->id_origuio, len, UIO_READ);
		break;
#ifdef DIAGNOSTIC
	case _BUS_DMA_BUFTYPE_RAW:
		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
		break;

	case _BUS_DMA_BUFTYPE_INVALID:
		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
		break;

	default:
		panic("_bus_dmamap_sync: unknown buffer type %d\n",
		    cookie->id_buftype);
		break;
#endif
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
	;
}
Beispiel #5
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 *
 * This is the R4000 version.
 */
void
_bus_dmamap_sync_r4k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;
	bus_addr_t addr;
	int i, useindex;

	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync_r4k: mix PRE and POST");

#ifdef DIAGNOSTIC
	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)",
		      offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync_r4k: bad length");
#endif

	/*
	 * The R4000 cache is virtually-indexed, write-back.  This means
	 * we need to do the following things:
	 *
	 *	PREREAD -- Invalidate D-cache.  Note we might have
	 *	to also write-back here if we have to use an Index
	 *	op, or if the buffer start/end is not cache-line aligned.
	 *
	 *	PREWRITE -- Write-back the D-cache.  If we have to use
	 *	an Index op, we also have to invalidate.  Note that if
	 *	we are doing PREREAD|PREWRITE, we can collapse everything
	 *	into a single op.
	 *
	 *	POSTREAD -- Nothing.
	 *
	 *	POSTWRITE -- Nothing.
	 */

	/*
	 * Flush the write buffer.
	 * XXX Is this always necessary?
	 */
	wbflush();

	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
	if (ops == 0)
		return;

	/*
	 * If the mapping is of COHERENT DMA-safe memory, no cache
	 * flush is necessary.
	 */
	if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT)
		return;

	/*
	 * If the mapping belongs to the kernel, or if it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
	    map->_dm_vmspace == curproc->p_vmspace))
		useindex = 0;
	else
		useindex = 1;

	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
		/* Find the beginning segment. */
		if (offset >= map->dm_segs[i].ds_len) {
			offset -= map->dm_segs[i].ds_len;
			continue;
		}

		/*
		 * Now at the first segment to sync; nail
		 * each segment until we have exhausted the
		 * length.
		 */
		minlen = len < map->dm_segs[i].ds_len - offset ?
		    len : map->dm_segs[i].ds_len - offset;

		addr = map->dm_segs[i]._ds_vaddr;

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %d "
		    "(0x%lx..0x%lx) ...", i, addr + offset,
		    addr + offset + minlen - 1);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(addr + offset, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			offset = 0;
			len -= minlen;
			continue;
		}

		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(addr + offset, minlen);
			break;

		case BUS_DMASYNC_PREREAD:
#if 1
			mips_dcache_wbinv_range(addr + offset, minlen);
#else
			mips_dcache_inv_range(addr + offset, minlen);
#endif
			break;

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(addr + offset, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
		offset = 0;
		len -= minlen;
	}
}
Beispiel #6
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 */
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;
	vaddr_t vaddr, start, end, preboundary, firstboundary, lastboundary;
	int i, useindex;

	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync: mix PRE and POST");

#ifdef DIAGNOSTIC
	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
		    offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync: bad length");
#endif

	/*
	 * Since we're dealing with a virtually-indexed, write-back
	 * cache, we need to do the following things:
	 *
	 *      PREREAD -- Invalidate D-cache.  Note we might have
	 *      to also write-back here if we have to use an Index
	 *      op, or if the buffer start/end is not cache-line aligned.
	 *
	 *      PREWRITE -- Write-back the D-cache.  If we have to use
	 *      an Index op, we also have to invalidate.  Note that if
	 *      we are doing PREREAD|PREWRITE, we can collapse everything
	 *      into a single op.
	 *
	 *      POSTREAD -- Nothing.
	 *
	 *      POSTWRITE -- Nothing.
	 */

	/*
	 * Flush the write buffer.
	 */
	wbflush();

	/*
	 * If the mapping is of COHERENT DMA-safe memory, no cache
	 * flush is necessary.
	 */
	if (map->_dm_flags & EWS4800MIPS_DMAMAP_COHERENT)
		return;

	/*
	 * No cache flushes are necessary if we're only doing
	 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE).
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
		return;

	/*
	 * If the mapping belongs to the kernel, or it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
		map->_dm_vmspace == curproc->p_vmspace))
		useindex = 0;
	else
		useindex = 1;

	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
		/* Find the beginning segment. */
		if (offset >= map->dm_segs[i].ds_len) {
			offset -= map->dm_segs[i].ds_len;
			continue;
		}

		/*
		 * Now at the first segment to sync; nail
		 * each segment until we have exhausted the
		 * length.
		 */
		minlen = len < map->dm_segs[i].ds_len - offset ?
		    len : map->dm_segs[i].ds_len - offset;

		vaddr = map->dm_segs[i]._ds_vaddr;

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %d "
		    "(0x%lx+%lx, 0x%lx+0x%lx) (olen = %ld)...", i,
		    vaddr, offset, vaddr, offset + minlen - 1, len);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(vaddr + offset, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			offset = 0;
			len -= minlen;
			continue;
		}

		start = vaddr + offset;
		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(start, minlen);
			break;

		case BUS_DMASYNC_PREREAD: {
			const struct mips_cache_info * const mci = &mips_cache_info;
			end = start + minlen;
			preboundary = start & ~mci->mci_dcache_align_mask;
			firstboundary = (start + mci->mci_dcache_align_mask)
			    & ~mci->mci_dcache_align_mask;
			lastboundary = end & ~mci->mci_dcache_align_mask;
			if (preboundary < start && preboundary < lastboundary)
				mips_dcache_wbinv_range(preboundary,
				    mci->mci_dcache_align);
			if (firstboundary < lastboundary)
				mips_dcache_inv_range(firstboundary,
				    lastboundary - firstboundary);
			if (lastboundary < end)
				mips_dcache_wbinv_range(lastboundary,
				    mci->mci_dcache_align);
			break;
		}

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(start, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
		offset = 0;
		len -= minlen;
	}
}