/*ARGSUSED1*/
int
cachectl(struct proc *p, int req, vaddr_t addr, int len)
{
	int error = 0;

#if defined(M68040) || defined(M68060)
	if (mmutype <= MMU_68040) {
		int inc = 0;
		int doall = 0;
		paddr_t pa = 0;
		vaddr_t end = 0;
#ifdef COMPAT_HPUX
		extern struct emul emul_hpux;

		if ((p->p_emul == &emul_hpux) &&
		    len != 16 && len != NBPG)
			doall = 1;
#endif

		if (addr == 0 ||
		    ((req & ~CC_EXTPURGE) != CC_PURGE && len > 2*NBPG))
			doall = 1;

		if (!doall) {
			end = addr + len;
			if (len <= 1024) {
				addr = addr & ~0xF;
				inc = 16;
			} else {
				addr = addr & ~PGOFSET;
				inc = NBPG;
			}
		}
		do {
			/*
			 * Convert to physical address if needed.
			 * If translation fails, we perform operation on
			 * entire cache (XXX is this a rational thing to do?)
			 */
			if (!doall &&
			    (pa == 0 || ((int)addr & PGOFSET) == 0)) {
				if (pmap_extract(
				    p->p_vmspace->vm_map.pmap,
				    addr, &pa) == FALSE)
					doall = 1;
			}
			switch (req) {
			case CC_EXTPURGE|CC_IPURGE:
			case CC_IPURGE:
				if (doall) {
					DCFA();
					ICPA();
				} else if (inc == 16) {
					DCFL(pa);
					ICPL(pa);
				} else if (inc == NBPG) {
					DCFP(pa);
					ICPP(pa);
				}
				break;

			case CC_EXTPURGE|CC_PURGE:
			case CC_PURGE:
				if (doall)
					DCFA();	/* note: flush not purge */
				else if (inc == 16)
					DCPL(pa);
				else if (inc == NBPG)
					DCPP(pa);
				break;

			case CC_EXTPURGE|CC_FLUSH:
			case CC_FLUSH:
				if (doall)
					DCFA();
				else if (inc == 16)
					DCFL(pa);
				else if (inc == NBPG)
					DCFP(pa);
				break;

			default:
				error = EINVAL;
				break;
			}
			if (doall)
				break;
			pa += inc;
			addr += inc;
		} while (addr < end);
		return (error);
	}
#endif
	switch (req) {
	case CC_EXTPURGE|CC_PURGE:
	case CC_EXTPURGE|CC_FLUSH:
#if defined(CACHE_HAVE_PAC)
		if (ectype == EC_PHYS)
			PCIA();
		/* FALLTHROUGH */
#endif
	case CC_PURGE:
	case CC_FLUSH:
		DCIU();
		break;
	case CC_EXTPURGE|CC_IPURGE:
#if defined(CACHE_HAVE_PAC)
		if (ectype == EC_PHYS)
			PCIA();
		else
#endif
		DCIU();
		/* FALLTHROUGH */
	case CC_IPURGE:
		ICIA();
		break;
	default:
		error = EINVAL;
		break;
	}
	return (error);
}
Beispiel #2
0
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
#if defined(M68040) || defined(M68060)
	bus_addr_t p, e, ps, pe;
	bus_size_t seglen;
	bus_dma_segment_t *seg;
	int i;
#endif

#if defined(M68020) || defined(M68030)
#if defined(M68040) || defined(M68060)
	if (cputype == CPU_68020 || cputype == CPU_68030)
#endif
		/* assume no L2 physical cache */
		return;
#endif

#if defined(M68040) || defined(M68060)
	/* If the whole DMA map is uncached, do nothing. */
	if ((map->_dm_flags & BUS_DMA_COHERENT) != 0)
		return;

	/* Short-circuit for unsupported `ops' */
	if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0)
		return;

	/*
	 * flush/purge the cache.
	 */
	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
		seg = &map->dm_segs[i];
		if (seg->ds_len <= offset) {
			/* Segment irrelevant - before requested offset */
			offset -= seg->ds_len;
			continue;
		}

		/*
		 * Now at the first segment to sync; nail
		 * each segment until we have exhausted the
		 * length.
		 */
		seglen = seg->ds_len - offset;
		if (seglen > len)
			seglen = len;

		/* Ignore cache-inhibited segments */
		if ((seg->_ds_flags & BUS_DMA_COHERENT) != 0)
			continue;

		ps = seg->ds_addr + offset;
		pe = ps + seglen;

		if (ops & BUS_DMASYNC_PREWRITE) {
			p = ps & ~CACHELINE_MASK;
			e = (pe + CACHELINE_MASK) & ~CACHELINE_MASK;

			/* flush cacheline */
			while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) {
				DCFL(p);
				p += CACHELINE_SIZE;
			}

			/* flush cachelines per 128bytes */
			while ((p < e) && (p & PAGE_MASK) != 0) {
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
			}

			/* flush page */
			while (p + PAGE_SIZE <= e) {
				DCFP(p);
				p += PAGE_SIZE;
			}

			/* flush cachelines per 128bytes */
			while (p + CACHELINE_SIZE * 8 <= e) {
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
				DCFL(p);
				p += CACHELINE_SIZE;
			}

			/* flush cacheline */
			while (p < e) {
				DCFL(p);
				p += CACHELINE_SIZE;
			}
		}

		/*
		 * Normally, the `PREREAD' flag instructs us to purge the
		 * cache for the specified offset and length. However, if
		 * the offset/length is not aligned to a cacheline boundary,
		 * we may end up purging some legitimate data from the
		 * start/end of the cache. In such a case, *flush* the
		 * cachelines at the start and end of the required region.
		 */
		else if (ops & BUS_DMASYNC_PREREAD) {
			/* flush cacheline on start boundary */
			if (ps & CACHELINE_MASK) {
				DCFL(ps & ~CACHELINE_MASK);
			}

			p = (ps + CACHELINE_MASK) & ~CACHELINE_MASK;
			e = pe & ~CACHELINE_MASK;

			/* purge cacheline */
			while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) {
				DCPL(p);
				p += CACHELINE_SIZE;
			}

			/* purge cachelines per 128bytes */
			while ((p < e) && (p & PAGE_MASK) != 0) {
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
			}

			/* purge page */
			while (p + PAGE_SIZE <= e) {
				DCPP(p);
				p += PAGE_SIZE;
			}

			/* purge cachelines per 128bytes */
			while (p + CACHELINE_SIZE * 8 <= e) {
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
				DCPL(p);
				p += CACHELINE_SIZE;
			}

			/* purge cacheline */
			while (p < e) {
				DCPL(p);
				p += CACHELINE_SIZE;
			}

			/* flush cacheline on end boundary */
			if (p < pe) {
				DCFL(p);
			}
		}
		offset = 0;
		len -= seglen;
	}
#endif	/* defined(M68040) || defined(M68060) */
}