Example #1
0
/*
 * Unload a dvmamap.
 */
void
viommu_dvmamap_unload(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map)
{
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;
	bus_addr_t dvmaddr = map->_dm_dvmastart;
	bus_size_t sgsize = map->_dm_dvmasize;
	int error;

#ifdef DEBUG
	if (ims == NULL)
		panic("viommu_dvmamap_unload: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_unload: null iommu");
#endif /* DEBUG */

	is = ims->ims_iommu;

	/* Remove the IOMMU entries */
	viommu_iomap_unload_map(is, ims);

	/* Clear the iomap */
	iommu_iomap_clear_pages(ims);

	bus_dmamap_unload(t->_parent, map);

	/* Mark the mappings as invalid. */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;

	mtx_enter(&is->is_mtx);
	error = extent_free(is->is_dvmamap, dvmaddr, 
		sgsize, EX_NOWAIT);
	map->_dm_dvmastart = 0;
	map->_dm_dvmasize = 0;
	mtx_leave(&is->is_mtx);
	if (error != 0)
		printf("warning: %qd of DVMA space lost\n", sgsize);
}
Example #2
0
/*
 * Load a dvmamap from an array of segs or an mlist (if the first
 * "segs" entry's mlist is non-null).  It calls iommu_dvmamap_load_segs()
 * or iommu_dvmamap_load_mlist() for part of the 2nd pass through the
 * mapping.  This is ugly.  A better solution would probably be to have
 * function pointers for implementing the traversal.  That way, there
 * could be one core load routine for each of the three required algorithms
 * (buffer, seg, and mlist).  That would also mean that the traversal
 * algorithm would then only need one implementation for each algorithm
 * instead of two (one for populating the iomap and one for populating
 * the dvma map).
 */
int
viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
	int i;
	int left;
	int err = 0;
	bus_size_t sgsize;
	bus_size_t boundary, align;
	u_long dvmaddr, sgstart, sgend;
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;

#ifdef DIAGNOSTIC
	if (ims == NULL)
		panic("viommu_dvmamap_load_raw: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_load_raw: null iommu");
#endif
	is = ims->ims_iommu;

	if (map->dm_nsegs) {
		/* Already in use?? */
#ifdef DIAGNOSTIC
		panic("iommu_dvmamap_load_raw: map still in use");
#endif
		bus_dmamap_unload(t0, map);
	}

	/*
	 * A boundary presented to bus_dmamem_alloc() takes precedence
	 * over boundary in the map.
	 */
	if ((boundary = segs[0]._ds_boundary) == 0)
		boundary = map->_dm_boundary;

	align = MAX(segs[0]._ds_align, PAGE_SIZE);

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;

	iommu_iomap_clear_pages(ims);
	if (segs[0]._ds_mlist) {
		struct pglist *mlist = segs[0]._ds_mlist;
		struct vm_page *m;
		for (m = TAILQ_FIRST(mlist); m != NULL;
		    m = TAILQ_NEXT(m,pageq)) {
			err = iommu_iomap_insert_page(ims, VM_PAGE_TO_PHYS(m));

			if(err) {
				printf("iomap insert error: %d for "
				    "pa 0x%lx\n", err, VM_PAGE_TO_PHYS(m));
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}
		}
	} else {
		/* Count up the total number of pages we need */
		for (i = 0, left = size; left > 0 && i < nsegs; i++) {
			bus_addr_t a, aend;
			bus_size_t len = segs[i].ds_len;
			bus_addr_t addr = segs[i].ds_addr;
			int seg_len = MIN(left, len);

			if (len < 1)
				continue;

			aend = round_page(addr + seg_len);
			for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {

				err = iommu_iomap_insert_page(ims, a);
				if (err) {
					printf("iomap insert error: %d for "
					    "pa 0x%llx\n", err, a);
					iommu_iomap_clear_pages(ims);
					return (EFBIG);
				}
			}

			left -= seg_len;
		}
	}
	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;

	mtx_enter(&is->is_mtx);
	if (flags & BUS_DMA_24BIT) {
		sgstart = MAX(is->is_dvmamap->ex_start, 0xff000000);
		sgend = MIN(is->is_dvmamap->ex_end, 0xffffffff);
	} else {
		sgstart = is->is_dvmamap->ex_start;
		sgend = is->is_dvmamap->ex_end;
	}

	/* 
	 * If our segment size is larger than the boundary we need to 
	 * split the transfer up into little pieces ourselves.
	 */
	err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
	    sgsize, align, 0, (sgsize > boundary) ? 0 : boundary, 
	    EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
	mtx_leave(&is->is_mtx);

	if (err != 0) {
		iommu_iomap_clear_pages(ims);
		return (err);
	}

#ifdef DEBUG
	if (dvmaddr == (bus_addr_t)-1)	{ 
		printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) "
		    "failed!\n", (int)sgsize, flags);
#ifdef DDB
		if (iommudebug & IDB_BREAK)
			Debugger();
#else
		panic("");
#endif
	}		
#endif	

	/* Set the active DVMA map */
	map->_dm_dvmastart = dvmaddr;
	map->_dm_dvmasize = sgsize;

	map->dm_mapsize = size;

	viommu_iomap_load_map(is, ims, dvmaddr, flags);

	if (segs[0]._ds_mlist)
		err = viommu_dvmamap_load_mlist(t, is, map, segs[0]._ds_mlist,
		    flags, size, boundary);
	else
		err = viommu_dvmamap_load_seg(t, is, map, segs, nsegs,
		    flags, size, boundary);

	if (err)
		viommu_dvmamap_unload(t, t0, map);

	return (err);
}
Example #3
0
/*
 * Load a contiguous kva buffer into a dmamap.  The physical pages are
 * not assumed to be contiguous.  Two passes are made through the buffer
 * and both call pmap_extract() for the same va->pa translations.  It
 * is possible to run out of pa->dvma mappings; the code should be smart
 * enough to resize the iomap (when the "flags" permit allocation).  It
 * is trivial to compute the number of entries required (round the length
 * up to the page size and then divide by the page size)...
 */
int
viommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
    void *buf, bus_size_t buflen, struct proc *p, int flags)
{
	int err = 0;
	bus_size_t sgsize;
	u_long dvmaddr, sgstart, sgend;
	bus_size_t align, boundary;
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;
	pmap_t pmap;

#ifdef DIAGNOSTIC
	if (ims == NULL)
		panic("viommu_dvmamap_load: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_load: null iommu");
#endif
	is = ims->ims_iommu;

	if (map->dm_nsegs) {
		/*
		 * Is it still in use? _bus_dmamap_load should have taken care
		 * of this.
		 */
#ifdef DIAGNOSTIC
		panic("iommu_dvmamap_load: map still in use");
#endif
		bus_dmamap_unload(t0, map);
	}

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;

	if (buflen < 1 || buflen > map->_dm_size) {
		DPRINTF(IDB_BUSDMA,
		    ("iommu_dvmamap_load(): error %d > %d -- "
		     "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
		return (EINVAL);
	}

	/*
	 * A boundary presented to bus_dmamem_alloc() takes precedence
	 * over boundary in the map.
	 */
	if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
		boundary = map->_dm_boundary;
	align = MAX(map->dm_segs[0]._ds_align, PAGE_SIZE);

	pmap = p ? p->p_vmspace->vm_map.pmap : pmap_kernel();

	/* Count up the total number of pages we need */
	iommu_iomap_clear_pages(ims);
	{ /* Scope */
		bus_addr_t a, aend;
		bus_addr_t addr = (bus_addr_t)buf;
		int seg_len = buflen;

		aend = round_page(addr + seg_len);
		for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
			paddr_t pa;

			if (pmap_extract(pmap, a, &pa) == FALSE) {
				printf("iomap pmap error addr 0x%llx\n", a);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}

			err = iommu_iomap_insert_page(ims, pa);
			if (err) {
				printf("iomap insert error: %d for "
				    "va 0x%llx pa 0x%lx "
				    "(buf %p len %lld/%llx)\n",
				    err, a, pa, buf, buflen, buflen);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}
		}
	}
	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;

	mtx_enter(&is->is_mtx);
	if (flags & BUS_DMA_24BIT) {
		sgstart = MAX(is->is_dvmamap->ex_start, 0xff000000);
		sgend = MIN(is->is_dvmamap->ex_end, 0xffffffff);
	} else {
		sgstart = is->is_dvmamap->ex_start;
		sgend = is->is_dvmamap->ex_end;
	}

	/* 
	 * If our segment size is larger than the boundary we need to 
	 * split the transfer up into little pieces ourselves.
	 */
	err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
	    sgsize, align, 0, (sgsize > boundary) ? 0 : boundary, 
	    EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
	mtx_leave(&is->is_mtx);

#ifdef DEBUG
	if (err || (dvmaddr == (bus_addr_t)-1))	{ 
		printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
		    (int)sgsize, flags);
#ifdef DDB
		if (iommudebug & IDB_BREAK)
			Debugger();
#endif
	}		
#endif	
	if (err != 0) {
		iommu_iomap_clear_pages(ims);
		return (err);
	}

	/* Set the active DVMA map */
	map->_dm_dvmastart = dvmaddr;
	map->_dm_dvmasize = sgsize;

	map->dm_mapsize = buflen;

	viommu_iomap_load_map(is, ims, dvmaddr, flags);

	{ /* Scope */
		bus_addr_t a, aend;
		bus_addr_t addr = (bus_addr_t)buf;
		int seg_len = buflen;

		aend = round_page(addr + seg_len);
		for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
			bus_addr_t pgstart;
			bus_addr_t pgend;
			paddr_t pa;
			int pglen;

			/* Yuck... Redoing the same pmap_extract... */
			if (pmap_extract(pmap, a, &pa) == FALSE) {
				printf("iomap pmap error addr 0x%llx\n", a);
				err = EFBIG;
				break;
			}

			pgstart = pa | (MAX(a, addr) & PAGE_MASK);
			pgend = pa | (MIN(a + PAGE_SIZE - 1,
			    addr + seg_len - 1) & PAGE_MASK);
			pglen = pgend - pgstart + 1;

			if (pglen < 1)
				continue;

			err = viommu_dvmamap_append_range(t, map, pgstart,
			    pglen, flags, boundary);
			if (err == EFBIG)
				break;
			else if (err) {
				printf("iomap load seg page: %d for "
				    "va 0x%llx pa %lx (%llx - %llx) "
				    "for %d/0x%x\n",
				    err, a, pa, pgstart, pgend, pglen, pglen);
				break;
			}
		}
	}

	if (err)
		viommu_dvmamap_unload(t, t0, map);

	return (err);
}