예제 #1
0
/*
 * Common function for DMA map destruction.  May be called by bus-specific
 * DMA map destruction functions.
 */
void
_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
{

#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;

	/*
	 * Free any bounce pages this map might hold.
	 */
	if (cookie != NULL) {
		if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
			STAT_INCR(bounced_unloads);
		map->dm_nsegs = 0;
		if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
			_bus_dma_free_bouncebuf(t, map);
		STAT_INCR(bounced_destroys);
		free(cookie, M_DMAMAP);
	} else
#endif
	STAT_INCR(destroys);
	if (map->dm_nsegs > 0)
		STAT_INCR(unloads);
	free(map, M_DMAMAP);
}
예제 #2
0
/*
 * Common function for unloading a DMA map.  May be called by
 * chipset-specific DMA map unload functions.
 */
void
_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
{
	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
				STAT_INCR(bounced_unloads);
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif

		STAT_INCR(unloads);
	}
	/*
	 * No resources to free; just mark the mappings as
	 * invalid.
	 */
	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	map->_dm_flags &= ~_BUS_DMAMAP_COHERENT;
}
예제 #3
0
int
_isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
			bus_size_t size, int flags)
{
	struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
	int error = 0;

	cookie->id_bouncebuflen = round_page(size);
	error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
	    NBPG, map->_dm_boundary, cookie->id_bouncesegs,
	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
	if (error)
		goto out;
	error = _isa_bus_dmamem_map(t, cookie->id_bouncesegs,
	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
	    (caddr_t *)&cookie->id_bouncebuf, flags);

 out:
	if (error) {
		_isa_bus_dmamem_free(t, cookie->id_bouncesegs,
		    cookie->id_nbouncesegs);
		cookie->id_bouncebuflen = 0;
		cookie->id_nbouncesegs = 0;
	} else {
		cookie->id_flags |= ID_HAS_BOUNCE;
		STAT_INCR(isa_dma_stats_nbouncebufs);
	}

	return (error);
}
예제 #4
0
// call with *e locked
static store_page *_allocate_page(store_engine *e, unsigned int bucket,
        unsigned int free_bucket) {
    assert(!e->page_buckets[bucket] || e->page_buckets[bucket]->allocated == e->page_size);
    store_page *tmp = NULL;
    // if a specific free bucket was requested, check there first
    if (free_bucket != 0 && e->free_page_buckets[free_bucket] != NULL) {
        assert(e->page_free > 0);
        tmp = e->free_page_buckets[free_bucket];
        e->free_page_buckets[free_bucket] = tmp->next;
    }
    // failing that, try the global list.
    if (tmp == NULL && e->page_freelist != NULL) {
        tmp = e->page_freelist;
        e->page_freelist = tmp->next;
    }
    E_DEBUG("EXTSTORE: allocating new page\n");
    // page_freelist can be empty if the only free pages are specialized and
    // we didn't just request one.
    if (e->page_free > 0 && tmp != NULL) {
        tmp->next = e->page_buckets[bucket];
        e->page_buckets[bucket] = tmp;
        tmp->active = true;
        tmp->free = false;
        tmp->closed = false;
        tmp->version = _next_version(e);
        tmp->bucket = bucket;
        e->page_free--;
        STAT_INCR(e, page_allocs, 1);
    } else {
        extstore_run_maint(e);
    }
    if (tmp)
        E_DEBUG("EXTSTORE: got page %u\n", tmp->id);
    return tmp;
}
예제 #5
0
static int
_bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
	size_t buflen, int buftype, int flags)
{
	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
	struct vmspace * const vm = vmspace_kernel();
	int seg, error;

	KASSERT(cookie != NULL);
	KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);

	/*
	 * Allocate bounce pages, if necessary.
	 */
	if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
		if (error)
			return (error);
	}

	/*
	 * Cache a pointer to the caller's buffer and load the DMA map
	 * with the bounce buffer.
	 */
	cookie->id_origbuf = buf;
	cookie->id_origbuflen = buflen;
	cookie->id_buftype = buftype;
	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
	    buflen, vm, flags, &seg, true);
	if (error)
		return (error);

	STAT_INCR(bounced_loads);
	map->dm_mapsize = buflen;
	map->dm_nsegs = seg + 1;
	map->_dm_vmspace = vm;
	/*
	 * If our cache is coherent, then the map must be coherent too.
	 */
	if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
		map->_dm_flags |= _BUS_DMAMAP_COHERENT;

	/* ...so _bus_dmamap_sync() knows we're bouncing */
	cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
	return 0;
}
예제 #6
0
static void
uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
{
	int free_list, color, pgflidx;
#ifdef DEBUG
	struct vm_page *tp;
#endif

	KASSERT(mutex_owned(&uvm_fpageqlock));

#if PGFL_NQUEUES != 2
#error uvm_pglistalloc needs to be updated
#endif

	free_list = uvm_page_lookup_freelist(pg);
	color = VM_PGCOLOR_BUCKET(pg);
	pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
	for (tp = LIST_FIRST(&uvm.page_free[
		free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
	     tp != NULL;
	     tp = LIST_NEXT(tp, pageq.list)) {
		if (tp == pg)
			break;
	}
	if (tp == NULL)
		panic("uvm_pglistalloc: page not on freelist");
#endif
	LIST_REMOVE(pg, pageq.list);	/* global */
	LIST_REMOVE(pg, listq.list);	/* cpu */
	uvmexp.free--;
	if (pg->flags & PG_ZERO)
		uvmexp.zeropages--;
	VM_FREE_PAGE_TO_CPU(pg)->pages[pgflidx]--;
	pg->flags = PG_CLEAN;
	pg->pqflags = 0;
	pg->uobject = NULL;
	pg->uanon = NULL;
	TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
	STAT_INCR(uvm_pglistalloc_npages);
}
예제 #7
0
/*
 * Common function for DMA map synchronization.  May be called
 * by chipset-specific DMA map synchronization functions.
 *
 * This version works with the virtually-indexed, write-back cache
 * found in the MIPS-3/MIPS-4 CPUs available for the Algorithmics.
 */
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    bus_size_t len, int ops)
{
	bus_size_t minlen;

#ifdef DIAGNOSTIC
	/*
	 * Mixing PRE and POST operations is not allowed.
	 */
	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
		panic("_bus_dmamap_sync: mix PRE and POST");

	if (offset >= map->dm_mapsize)
		panic("_bus_dmamap_sync: bad offset %"PRIxPADDR 
			" (map size is %"PRIxPSIZE")",
				offset, map->dm_mapsize);
	if (len == 0 || (offset + len) > map->dm_mapsize)
		panic("_bus_dmamap_sync: bad length");
#endif

	/*
	 * Since we're dealing with a virtually-indexed, write-back
	 * cache, we need to do the following things:
	 *
	 *	PREREAD -- Invalidate D-cache.  Note we might have
	 *	to also write-back here if we have to use an Index
	 *	op, or if the buffer start/end is not cache-line aligned.
	 *
	 *	PREWRITE -- Write-back the D-cache.  If we have to use
	 *	an Index op, we also have to invalidate.  Note that if
	 *	we are doing PREREAD|PREWRITE, we can collapse everything
	 *	into a single op.
	 *
	 *	POSTREAD -- Nothing.
	 *
	 *	POSTWRITE -- Nothing.
	 */
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
	    && (ops & BUS_DMASYNC_PREWRITE)) {
		STAT_INCR(write_bounces);
		/*
		 * Copy the caller's buffer to the bounce buffer.
		 */
		switch (cookie->id_buftype) {
		case _BUS_DMA_BUFTYPE_LINEAR:
			memcpy((char *)cookie->id_bouncebuf + offset,
			    cookie->id_origlinearbuf + offset, len);
			break;
		case _BUS_DMA_BUFTYPE_MBUF:
			m_copydata(cookie->id_origmbuf, offset, len,
			    (char *)cookie->id_bouncebuf + offset);
			break;
		case _BUS_DMA_BUFTYPE_UIO:
			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
			    cookie->id_origuio, len, UIO_WRITE);
			break;
#ifdef DIAGNOSTIC
		case _BUS_DMA_BUFTYPE_RAW:
			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
			break;

		case _BUS_DMA_BUFTYPE_INVALID:
			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
			break;

		default:
			panic("_bus_dmamap_sync: unknown buffer type %d\n",
			    cookie->id_buftype);
			break;
#endif /* DIAGNOSTIC */
		}
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */

	/*
	 * Flush the write buffer.
	 * XXX Is this always necessary?
	 */
	wbflush();

	/*
	 * If the mapping is of COHERENT DMA-safe memory or this isn't a
	 * PREREAD or PREWRITE, no cache flush is necessary.  Check to see
	 * if we need to bounce it.
	 */
	if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)
	    || (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
		goto bounce_it;

	/*
	 * If the mapping belongs to the kernel, or it belongs
	 * to the currently-running process (XXX actually, vmspace),
	 * then we can use Hit ops.  Otherwise, Index ops.
	 *
	 * This should be true the vast majority of the time.
	 */
	const bool useindex = (!VMSPACE_IS_KERNEL_P(map->_dm_vmspace)
	    && map->_dm_vmspace != curproc->p_vmspace);

	bus_dma_segment_t *seg = map->dm_segs;
	bus_dma_segment_t * const lastseg = seg + map->dm_nsegs;
	/*
	 * Skip segments until offset are withing a segment.
	 */
	for (; offset >= seg->ds_len; seg++) {
		offset -= seg->ds_len;
	}
		
	for (; seg < lastseg && len != 0; seg++, offset = 0, len -= minlen) {
		/*
		 * Now at the first segment to sync; nail each segment until we
		 * have exhausted the length.
		 */
		vaddr_t vaddr = seg->_ds_vaddr + offset;
		minlen = ulmin(len, seg->ds_len - offset);

#ifdef BUS_DMA_DEBUG
		printf("bus_dmamap_sync: flushing segment %p "
		    "(0x%"PRIxBUSADDR"+%"PRIxBUSADDR
		    ", 0x%"PRIxBUSADDR"+0x%"PRIxBUSADDR
		    ") (olen = %"PRIxBUSADDR")...", seg,
		    vaddr - offset, offset,
		    vaddr - offset, offset + minlen - 1, len);
#endif

		/*
		 * If we are forced to use Index ops, it's always a
		 * Write-back,Invalidate, so just do one test.
		 */
		if (__predict_false(useindex)) {
			mips_dcache_wbinv_range_index(vaddr, minlen);
#ifdef BUS_DMA_DEBUG
			printf("\n");
#endif
			continue;
		}

		switch (ops) {
		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
			mips_dcache_wbinv_range(vaddr, minlen);
			break;

		case BUS_DMASYNC_PREREAD:
#if 1
			mips_dcache_wbinv_range(vaddr, minlen);
#else
			mips_dcache_inv_range(vaddr, minlen);
#endif
			break;

		case BUS_DMASYNC_PREWRITE:
			mips_dcache_wb_range(vaddr, minlen);
			break;
		}
#ifdef BUS_DMA_DEBUG
		printf("\n");
#endif
	}

  bounce_it:
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	if ((ops & BUS_DMASYNC_POSTREAD) == 0
	    || cookie == NULL
	    || (cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0)
		return;

	STAT_INCR(read_bounces);
	/*
	 * Copy the bounce buffer to the caller's buffer.
	 */
	switch (cookie->id_buftype) {
	case _BUS_DMA_BUFTYPE_LINEAR:
		memcpy(cookie->id_origlinearbuf + offset,
		    (char *)cookie->id_bouncebuf + offset, len);
		break;

	case _BUS_DMA_BUFTYPE_MBUF:
		m_copyback(cookie->id_origmbuf, offset, len, 
		    (char *)cookie->id_bouncebuf + offset);
		break;

	case _BUS_DMA_BUFTYPE_UIO:
		_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
		    cookie->id_origuio, len, UIO_READ);
		break;
#ifdef DIAGNOSTIC
	case _BUS_DMA_BUFTYPE_RAW:
		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
		break;

	case _BUS_DMA_BUFTYPE_INVALID:
		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
		break;

	default:
		panic("_bus_dmamap_sync: unknown buffer type %d\n",
		    cookie->id_buftype);
		break;
#endif
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
	;
}
예제 #8
0
/*
 * Like _bus_dmamap_load(), but for uios.
 */
int
_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
    struct uio *uio, int flags)
{
	int seg, i, error;
	bool first;
	bus_size_t minlen, resid;
	struct iovec *iov;
	void *addr;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	resid = uio->uio_resid;
	iov = uio->uio_iov;

	first = true;
	seg = 0;
	error = 0;
	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
		/*
		 * Now at the first iovec to load.  Load each iovec
		 * until we have exhausted the residual count.
		 */
		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
		addr = (void *)iov[i].iov_base;

		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
		    uio->uio_vmspace, flags, &seg, first);
		first = false;

		resid -= minlen;
	}
	if (error == 0) {
		map->dm_mapsize = uio->uio_resid;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = uio->uio_vmspace;
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, uio, uio->uio_resid,
		    _BUS_DMA_BUFTYPE_UIO, flags);
	}
#endif
	return (error);
}
예제 #9
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
    struct mbuf *m0, int flags)
{
	int seg, error;
	struct mbuf *m;
	struct vmspace * vm = vmspace_kernel();
	bool first;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = true;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next) {
		if (m->m_len == 0)
			continue;
		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
		    vm, flags, &seg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie * cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
		    _BUS_DMA_BUFTYPE_MBUF, flags);
	}
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
	return (error);
}
예제 #10
0
/*
 * Common function for loading a direct-mapped DMA map with a linear
 * buffer.  Called by bus-specific DMA map load functions with the
 * OR value appropriate for indicating "direct-mapped" for that
 * chipset.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	int seg, error;
	struct vmspace *vm;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return (EINVAL);

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
	    vm, flags, &seg, true);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;

		STAT_INCR(loads);

		/*
		 * For linear buffers, we support marking the mapping
		 * as COHERENT.
		 *
		 * XXX Check TLB entries for cache-inhibit bits?
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		else if (MIPS_KSEG1_P(buf))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#ifdef _LP64
		else if (MIPS_XKPHYS_P((vaddr_t)buf)
		    && MIPS_XKPHYS_TO_CCA((vaddr_t)buf) == MIPS3_PG_TO_CCA(MIPS3_PG_UNCACHED))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#endif
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
		    _BUS_DMA_BUFTYPE_LINEAR, flags);
	}
#endif
	return (error);
}
예제 #11
0
/*
 * Common function for DMA map creation.  May be called by bus-specific
 * DMA map creation functions.
 */
int
_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
{
	struct mips_bus_dmamap *map;
	void *mapstore;
	size_t mapsize;
	const int mallocflags = M_ZERO |
	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);

	int error = 0;

	/*
	 * Allocate and initialize the DMA map.  The end of the map
	 * is a variable-sized array of segments, so we allocate enough
	 * room for them in one shot.
	 *
	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
	 * of ALLOCNOW notifies others that we've reserved these resources,
	 * and they are not to be freed.
	 *
	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
	 * the (nsegments - 1).
	 */
	mapsize = sizeof(struct mips_bus_dmamap) +
	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
	if ((mapstore = malloc(mapsize, M_DMAMAP, mallocflags)) == NULL)
		return (ENOMEM);

	map = mapstore;
	map->_dm_size = size;
	map->_dm_segcnt = nsegments;
	map->_dm_maxmaxsegsz = maxsegsz;
	map->_dm_boundary = boundary;
	map->_dm_bounce_thresh = t->_bounce_thresh;
	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
	map->_dm_vmspace = NULL;
	map->dm_maxsegsz = maxsegsz;
	map->dm_mapsize = 0;		/* no valid mappings */
	map->dm_nsegs = 0;

	*dmamp = map;

#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie;
	int cookieflags;
	void *cookiestore;
	size_t cookiesize;

	if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh)
		map->_dm_bounce_thresh = 0;
	cookieflags = 0;

	if (t->_may_bounce != NULL) {
		error = (*t->_may_bounce)(t, map, flags, &cookieflags);
		if (error != 0)
			goto out;
	}

	if (map->_dm_bounce_thresh != 0)
		cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;

	if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
		STAT_INCR(creates);
		return 0;
	}

	cookiesize = sizeof(struct mips_bus_dma_cookie) +
	    (sizeof(bus_dma_segment_t) * map->_dm_segcnt);

	/*
	 * Allocate our cookie.
	 */
	if ((cookiestore = malloc(cookiesize, M_DMAMAP, mallocflags)) == NULL) {
		error = ENOMEM;
		goto out;
	}
	cookie = (struct mips_bus_dma_cookie *)cookiestore;
	cookie->id_flags = cookieflags;
	map->_dm_cookie = cookie;
	STAT_INCR(bounced_creates);

	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
 out:
	if (error)
		_bus_dmamap_destroy(t, map);
#else
	STAT_INCR(creates);
#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */

	return (error);
}
예제 #12
0
/*
 * Load an ISA DMA map with a linear buffer.
 */
int
_isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
			void *buf, bus_size_t buflen,
			struct proc *p, int flags)
{
	struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
	int error;

	STAT_INCR(isa_dma_stats_loads);

	/*
	 * Check to see if we might need to bounce the transfer.
	 */
	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
		/*
		 * Check if all pages are below the bounce
		 * threshold.  If they are, don't bother bouncing.
		 */
		if (_isa_dma_check_buffer(buf, buflen,
		    map->_dm_segcnt, map->_dm_boundary, p) == 0)
			return (_bus_dmamap_load(t, map, buf, buflen,
			    p, flags));

		STAT_INCR(isa_dma_stats_bounces);

		/*
		 * Allocate bounce pages, if necessary.
		 */
		if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
			error = _isa_dma_alloc_bouncebuf(t, map, buflen,
			    flags);
			if (error)
				return (error);
		}

		/*
		 * Cache a pointer to the caller's buffer and
		 * load the DMA map with the bounce buffer.
		 */
		cookie->id_origbuf = buf;
		cookie->id_origbuflen = buflen;
		error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
		    buflen, p, flags);

		if (error) {
			/*
			 * Free the bounce pages, unless our resources
			 * are reserved for our exclusive use.
			 */
			if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
				_isa_dma_free_bouncebuf(t, map);
		}

		/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
		cookie->id_flags |= ID_IS_BOUNCING;
	} else {
		/*
		 * Just use the generic load function.
		 */
		error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
	}

	return (error);
}
예제 #13
0
/*
 * Like _isa_bus_dmamap_load(), but for mbufs.
 */
int
_isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
	int flags)
{
	struct powerpc_isa_dma_cookie *cookie = map->_dm_cookie;
	int error;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_isa_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	/*
	 * Try to load the map the normal way.  If this errors out,
	 * and we can bounce, we will.
	 */
	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
	if (error == 0 ||
	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
		return (error);

	/*
	 * First attempt failed; bounce it.
	 */

	STAT_INCR(isa_dma_stats_bounces);

	/*
	 * Allocate bounce pages, if necessary.
	 */
	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
		error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
		    flags);
		if (error)
			return (error);
	}

	/*
	 * Cache a pointer to the caller's buffer and load the DMA map
	 * with the bounce buffer.
	 */
	cookie->id_origbuf = m0;
	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
	cookie->id_buftype = ID_BUFTYPE_MBUF;
	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
	    m0->m_pkthdr.len, NULL, flags);
	if (error) {
		/*
		 * Free the bounce pages, unless our resources
		 * are reserved for our exclusive use.
		 */
		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
			_isa_dma_free_bouncebuf(t, map);
		return (error);
	}

	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
	cookie->id_flags |= ID_IS_BOUNCING;
	return (0);
}
예제 #14
0
/*
 * Load an ISA DMA map with a linear buffer.
 */
int
_isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
	bus_size_t buflen, struct proc *p, int flags)
{
	struct powerpc_isa_dma_cookie *cookie = map->_dm_cookie;
	int error;

	STAT_INCR(isa_dma_stats_loads);

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

	/*
	 * Try to load the map the normal way.  If this errors out,
	 * and we can bounce, we will.
	 */
	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
	if (error == 0 ||
	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
		return (error);

	/*
	 * First attempt failed; bounce it.
	 */

	STAT_INCR(isa_dma_stats_bounces);

	/*
	 * Allocate bounce pages, if necessary.
	 */
	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
		error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
		if (error)
			return (error);
	}

	/*
	 * Cache a pointer to the caller's buffer and load the DMA map
	 * with the bounce buffer.
	 */
	cookie->id_origbuf = buf;
	cookie->id_origbuflen = buflen;
	cookie->id_buftype = ID_BUFTYPE_LINEAR;
	error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
	    p, flags);
	if (error) {
		/*
		 * Free the bounce pages, unless our resources
		 * are reserved for our exclusive use.
		 */
		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
			_isa_dma_free_bouncebuf(t, map);
		return (error);
	}

	/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
	cookie->id_flags |= ID_IS_BOUNCING;
	return (0);
}
예제 #15
0
파일: lcache.c 프로젝트: kstephens/ll
ll_define_primitive_end


#if 1
#define _ll_lookup_op_check() ((void)0)
#endif

#ifdef _ll_lookup_w_cache
#undef _ll_lookup_w_cache
#endif

void _ll_lookup_w_cache (ll_lcache *c)
{
  if ( c ) {
  struct ll_lcache_elem *p = c->_elems;
  struct ll_lcache_elem *e = ll_lcache_end(c);
  ll_v isa = ll_RCVR_TYPE;
  ll_v op;
  ll_v op_version;
  _ll_lookup_op_check();

  op = ll_AR_OP;
  ll_assert_ref(op);
  op_version = ll_THIS_ISA(operation, op)->_version;

#if 1
#define STAT_INCR(N) ((void) 0)
#else
#define STAT_INCR(N) \
  ++ _ll_lcache_stat_global.N; \
  ++ c->_stat.N
#endif

  STAT_INCR(_lookup_n);

  while ( p < e ) {
    STAT_INCR(_probe_n);

    if ( ll_EQ(p->_isa, isa) && ll_EQ(p->_op, op) ) {
      /* Has op been assocated with any new types or methods? */
      if ( ll_NE(p->_version, op_version) ) {
	STAT_INCR(_op_changed_n);
        goto update_cache;
      }
      
      STAT_INCR(_hit_n);

      /* Update hit count. */
      /* Trim cache hits? */
      if ( (++ p->_hits) == 0 ) {
	struct ll_lcache_elem *x = c->_elems;

	STAT_INCR(_overflow_n);

	-- p->_hits;
        while ( x < e ) {
	  (x ++)->_hits >>= 2;
	}
      }

      /* Unload cache. */
      ll_AR_METH = p->_meth;
      ll_AR_TYPE = p->_type;
      ll_AR_TYPE_OFFSET = p->_off;

      /* Keep cache sorted by decreasing hits. */
      while ( p > c->_elems && p->_hits > (p - 1)->_hits ) {
      	struct ll_lcache_elem temp = *(p - 1);

	STAT_INCR(_reorder_n);

	*(p - 1) = *p;
	*p = temp;
	-- p;
      }
      
      return;
    } else if ( p->_op == 0 || ll_EQ(p->_op, ll_undef) ) {
      /* An empty cache slot was found, use it. */
fill_cache:
      p->_op = op;
      p->_isa = isa;
      p->_hits = 1;

update_cache:
      /* Keep track of the operation version. */
      p->_version = op_version;

      STAT_INCR(_miss_n);
      
      /* Do real lookup. */
      _ll_lookup();
      
      /* Fill cache entry. */
      p->_meth = ll_AR_METH;
      p->_type = ll_AR_TYPE;
      p->_off  = ll_AR_TYPE_OFFSET;

      /* Mark cache entry in-use. */
      if ( ! c->_flags ) {
	c->_flags |= 1;

	/* Only add inline C code. */
	if ( c->_file && c->_line ) {
	  /* Add to lcache list. */
	  ++ _ll_lcache_count;
	  c->_next = _ll_lcache_list;
	  _ll_lcache_list = c;
	}
      }

      return;
    }
    
    /* Try next slot. */
    ++ p;
  }
  
  /* Reuse last cache slot. */
  -- p;
  goto fill_cache;

#undef STAT_INCR
  } else {