예제 #1
0
static inline void
drm_clflush_pages(struct vm_page *pages[], unsigned long num_pages)
{
	unsigned long i;

	for (i = 0; i < num_pages; i++)
		pmap_flush_page(VM_PAGE_TO_PHYS(pages[i]));
}
예제 #2
0
/*
 * bus_dmamap_sync routine for intagp.
 *
 * This is tailored to the usage that drm with the GEM memory manager
 * will be using, since intagp is for intel IGD, and thus shouldn't be
 * used for anything other than gpu-based work. Essentially for the intel GEM
 * driver we use bus_dma as an abstraction to convert our memory into a gtt
 * address and deal with any cache incoherencies that we create.
 *
 * We use the cflush instruction to deal with clearing the caches, since our
 * cache is physically indexed, we can even map then clear the page and it'll
 * work. on i386 we need to check for the presence of cflush() in cpuid,
 * however, all cpus that have a new enough intel GMCH should be suitable.
 */
void	
intagp_dma_sync(bus_dma_tag_t tag, bus_dmamap_t dmam,
    bus_addr_t offset, bus_size_t size, int ops)
{
	bus_dma_segment_t	*segp;
	struct sg_page_map	*spm;
	void			*addr;
	paddr_t	 		 pa;
	bus_addr_t		 poff, endoff, soff;

#ifdef DIAGNOSTIC
	if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
		panic("agp_dmamap_sync: mix PRE and POST");
	if (offset >= dmam->dm_mapsize)
		panic("_intagp_dma_sync: bad offset %lu (size = %lu)",
		    offset, dmam->dm_mapsize);
	if (size == 0 || (offset + size) > dmam->dm_mapsize)
		panic("intagp_dma_sync: bad length");
#endif /* DIAGNOSTIC */
	
	/* Coherent mappings need no sync. */
	if (dmam->_dm_flags & BUS_DMA_COHERENT)
		return;

	/*
	 * We need to clflush the object cache in all cases but postwrite.
	 *
	 * - Due to gpu incoherency, postread we need to flush speculative
	 * reads (which are not written back on intel cpus).
	 *
	 * - preread we need to flush data which will very soon be stale from
	 * the caches
	 *
	 * - prewrite we need to make sure our data hits the memory before the 
	 * gpu hoovers it up.
	 *
	 * The chipset also may need flushing, but that fits badly into
	 * bus_dma and it done in the driver.
	 */
	soff = trunc_page(offset);
	endoff = round_page(offset + size);
	if (ops & BUS_DMASYNC_POSTREAD || ops & BUS_DMASYNC_PREREAD ||
	    ops & BUS_DMASYNC_PREWRITE) {
		if (curcpu()->ci_cflushsz == 0) {
			/* save some wbinvd()s. we're MD anyway so it's ok */
			wbinvd();
			return;
		}

		mfence();
		spm = dmam->_dm_cookie;
		switch (spm->spm_buftype) {
		case BUS_BUFTYPE_LINEAR:
			addr = spm->spm_origbuf + soff;
			while (soff < endoff) {
				pmap_flush_cache((vaddr_t)addr, PAGE_SIZE);
				soff += PAGE_SIZE;
				addr += PAGE_SIZE;
			} break;
		case BUS_BUFTYPE_RAW:
			segp = (bus_dma_segment_t *)spm->spm_origbuf;
			poff = 0;

			while (poff < soff) {
				if (poff + segp->ds_len > soff)
					break;
				poff += segp->ds_len;
				segp++;
			}
			/* first time round may not start at seg beginning */
			pa = segp->ds_addr + (soff - poff);
			while (poff < endoff) {
				for (; pa < segp->ds_addr + segp->ds_len &&
				    poff < endoff; pa += PAGE_SIZE) {
					pmap_flush_page(pa);
					poff += PAGE_SIZE;
				}
				segp++;
				if (poff < endoff)
					pa = segp->ds_addr;
			}
			break;
		/* You do not want to load mbufs or uios onto a graphics card */
		case BUS_BUFTYPE_MBUF:
			/* FALLTHROUGH */
		case BUS_BUFTYPE_UIO:
			/* FALLTHROUGH */
		default:
			panic("intagp_dmamap_sync: bad buftype %d",
			    spm->spm_buftype);
			
		}
		mfence();
	}
}