/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

#ifdef PMAP_DEBUG
	if (pmap_debug_level > 0)
		printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
		    (u_int)bp, (u_int)bp->b_data, (u_int)len);
#endif	/* PMAP_DEBUG */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");

	/*
	 * Make sure the cache does not have dirty data for the
	 * pages we had mapped.
	 */
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);

	pmap_remove(pmap_kernel(), addr, addr + len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
示例#2
0
/*
 * Free pages from dvma_malloc()
 */
void 
dvma_free(void *addr, size_t size)
{
	vsize_t sz = m68k_round_page(size);

	uvm_km_free(phys_map, (vaddr_t)addr, sz, UVM_KMF_WIRED);
}
inline static void
mpbios_unmap(struct mp_map *handle)
{
	pmap_kremove(handle->baseva, handle->vsize);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, handle->baseva, handle->vsize, UVM_KMF_VAONLY);
}
示例#4
0
int 
sun68k_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
{
	bus_size_t	offset;
	vaddr_t va = (vaddr_t)bh;

	/*
	 * Adjust the user's request to be page-aligned.
	 */
	offset = va & PGOFSET;
	va -= offset;
	size += offset;
	size = m68k_round_page(size);
	if (size == 0) {
		printf("sun68k_bus_unmap: zero size\n");
		return (EINVAL);
	}

	/*
	 * If any part of the request is in the PROM's address space,
	 * don't unmap it.
	 */
#ifdef	DIAGNOSTIC
	if ((va >= SUN_MONSTART && va < SUN_MONEND) !=
	    ((va + size) >= SUN_MONSTART && (va + size) < SUN_MONEND))
		panic("sun_bus_unmap: bad PROM mapping");
#endif
	if (va >= SUN_MONSTART && va < SUN_MONEND)
		return (0);

	pmap_remove(pmap_kernel(), va, va + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY);
	return (0);
}
示例#5
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((uintptr_t)kva & PGOFSET)
		panic("_bus_dmamem_unmap: bad alignment on %p", kva);
#endif

	/*
	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
	 * not in KSEG2 or XKSEG).
	 */
	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
		return;
#ifdef _LP64
	if (MIPS_XKPHYS_P((vaddr_t)kva))
		return;
#endif

	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
示例#6
0
文件: obio.c 项目: ajinkya93/OpenBSD
void
obio_iomem_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
{
	u_long va, endva;
	bus_addr_t bpa;

	if (bsh >= SH3_P2SEG_BASE && bsh <= SH3_P2SEG_END) {
		/* maybe CS0,1,2,3,4,7 */
		return;
	}

	/* CS5,6 */
	va = trunc_page(bsh);
	endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
	if (endva <= va)
		panic("obio_io_unmap: overflow");
#endif

	pmap_extract(pmap_kernel(), va, &bpa);
	bpa += bsh & PGOFSET;

	pmap_kremove(va, endva - va);

	/*
	 * Free the kernel virtual mapping.
	 */
	uvm_km_free(kernel_map, va, endva - va);
}
示例#7
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{
	vaddr_t va;
	size_t s;

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif

	size = round_page(size);

	/*
	 * Re-enable cacheing on the range
	 * XXXSCW: There should be some way to indicate that the pages
	 * were mapped DMA_MAP_COHERENT in the first place...
	 */
	for (s = 0, va = (vaddr_t)kva; s < size;
	    s += PAGE_SIZE, va += PAGE_SIZE)
		_pmap_set_page_cacheable(pmap_kernel(), va);

	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
示例#8
0
static int
xennet_xenbus_detach(device_t self, int flags)
{
	struct xennet_xenbus_softc *sc = device_private(self);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int s0, s1;
	RING_IDX i;

	DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self)));
	s0 = splnet();
	xennet_stop(ifp, 1);
	/* wait for pending TX to complete, and collect pending RX packets */
	xennet_handler(sc);
	while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
		xennet_handler(sc);
	}
	xennet_free_rx_buffer(sc);

	s1 = splvm();
	for (i = 0; i < NET_RX_RING_SIZE; i++) {
		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
		uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
		    UVM_KMF_WIRED);
	}
	splx(s1);
		
	ether_ifdetach(ifp);
	if_detach(ifp);
	while (xengnt_status(sc->sc_tx_ring_gntref)) {
		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
	}
	xengnt_revoke_access(sc->sc_tx_ring_gntref);
	uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
	    UVM_KMF_WIRED);
	while (xengnt_status(sc->sc_rx_ring_gntref)) {
		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
	}
	xengnt_revoke_access(sc->sc_rx_ring_gntref);
	uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
	    UVM_KMF_WIRED);
	softint_disestablish(sc->sc_softintr);
	event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
	splx(s0);
	DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self)));
	return 0;
}
示例#9
0
static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size)
{
#ifdef _KERNEL
    uvm_km_free(module_map, (vaddr_t)chunk, size, UVM_KMF_WIRED);
#else
    munmap(chunk, size);
#endif
}
示例#10
0
static inline void
dev_mem_relva(paddr_t pa, vaddr_t va)
{
#ifdef __HAVE_MM_MD_CACHE_ALIASING
	uvm_km_free(kernel_map, va, PAGE_SIZE, UVM_KMF_VAONLY);
#else
	KASSERT(dev_mem_addr == va);
#endif
}
示例#11
0
void
armv7_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{

	if (bsh > (u_long)KERNEL_BASE) 
		return;

	uvm_km_free(kernel_map, bsh, size);
}
示例#12
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{
	if (IS_XKPHYS((vaddr_t)kva))
		return;

	size = round_page(size);
	uvm_km_free(kernel_map, (vaddr_t)kva, size);
}
示例#13
0
/*
 * Undo vmaprange.
 */
void
vunmaprange(vaddr_t kaddr, vsize_t len)
{
	vaddr_t addr;
	vsize_t off;

	addr = trunc_page(kaddr);
	off = kaddr - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
}
示例#14
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & BUS_DMA_COHERENT)
			*kvap = (caddr_t)PHYS_TO_UNCACHED(pa);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			if (flags & BUS_DMA_COHERENT)
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
示例#15
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif

	size = round_page(size);
	uvm_km_free(kernel_map, (vaddr_t)kva, size);
}
示例#16
0
void
pipe_free_kmem(struct pipe *cpipe)
{
	if (cpipe->pipe_buffer.buffer != NULL) {
		if (cpipe->pipe_buffer.size > PIPE_SIZE)
			--nbigpipe;
		amountpipekva -= cpipe->pipe_buffer.size;
		uvm_km_free(kernel_map, (vaddr_t)cpipe->pipe_buffer.buffer,
		    cpipe->pipe_buffer.size);
		cpipe->pipe_buffer.buffer = NULL;
	}
}
/*
 * _bus_dmamem_unmap_common --
 *	Remove a mapping created with _bus_dmamem_map_common().
 */
void
_bus_dmamem_unmap_common(bus_dma_tag_t t,
			 void *kva,
			 size_t size)
{

	KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);

	size = round_page(size);
	/* XXX pmap_kremove()?  See above... */
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
示例#18
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif
	if (kva >= (caddr_t)PMAP_DIRECT_BASE && kva <= (caddr_t)PMAP_DIRECT_END)
		return;

	size = round_page(size);
	uvm_km_free(kernel_map, (vaddr_t)kva, size);
}
示例#19
0
void
bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
{
	struct extent *ex;
	u_long va, endva;
	bus_addr_t bpa;

	/*
	 * Find the correct extent and bus physical address.
	 */
	if (t == X86_BUS_SPACE_IO) {
		ex = ioport_ex;
		bpa = bsh;
	} else if (t == X86_BUS_SPACE_MEM) {
		ex = iomem_ex;
		bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
		if (IOM_BEGIN <= bpa && bpa <= IOM_END)
			goto ok;

		va = trunc_page(bsh);
		endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
		if (endva <= va)
			panic("bus_space_unmap: overflow");
#endif

		(void)pmap_extract(pmap_kernel(), va, &bpa);
		bpa += (bsh & PGOFSET);

		pmap_kremove(va, endva - va);
		pmap_update(pmap_kernel());

		/*
		 * Free the kernel virtual mapping.
		 */
		uvm_km_free(kernel_map, va, endva - va);
	} else
		panic("bus_space_unmap: bad bus space tag");

ok:
	if (extent_free(ex, bpa, size,
	    EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
		printf("bus_space_unmap: %s 0x%lx, size 0x%lx\n",
		    (t == X86_BUS_SPACE_IO) ? "port" : "pa", bpa, size);
		printf("bus_space_unmap: can't free region\n");
	}
}
示例#20
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

#ifdef DEBUG_DMA
	printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
	    (unsigned long)size);
#endif	/* DEBUG_DMA */
#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif	/* DIAGNOSTIC */

	size = round_page(size);
	uvm_km_free(kernel_map, (vaddr_t)kva, size);
}
示例#21
0
void
obio_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t va, endva;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	endva = round_page(bsh + size);
	va = trunc_page(bsh);

	pmap_kremove(va, endva - va);
	uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
}
示例#22
0
文件: xbd.c 项目: MarginC/kame
static void
unmap_align(struct xbdreq *xr)
{
	int s;

	if (xr->xr_bp->b_flags & B_READ)
		memcpy(xr->xr_bp->b_data, (void *)xr->xr_aligned,
		    xr->xr_bp->b_bcount);
	DPRINTF(XBDB_IO, ("unmap_align(%p): bp %p addr %p align 0x%08lx "
	    "size 0x%04lx\n", xr, xr->xr_bp, xr->xr_bp->b_data,
	    xr->xr_aligned, xr->xr_bp->b_bcount));
	s = splvm();
	uvm_km_free(kmem_map, xr->xr_aligned, xr->xr_bp->b_bcount);
	splx(s);
	xr->xr_aligned = (vaddr_t)0;
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

	KASSERT((bp->b_flags & B_PHYS) != 0);

	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
	pmap_update(vm_map_pmap(phys_map));
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
示例#25
0
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");

	kva = mips_trunc_page(bp->b_data);
	len = mips_round_page((vaddr_t)bp->b_data - kva + len);
	pmap_kremove(kva, len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
示例#26
0
void
vme_unmap(struct vme_softc *sc, struct extent *ext, u_int awidth,
    vaddr_t vaddr, paddr_t paddr, bus_size_t size)
{
	const struct vme_range *r;
	vaddr_t va;
	paddr_t pa, addr;
	psize_t len;

	va = trunc_page(vaddr);
	pa = trunc_page(paddr);
	len = round_page(paddr + size) - pa;

	/*
	 * Retrieve the address range this mapping comes from.
	 */
	for (r = sc->sc_ranges; r->vr_width != 0; r++) {
		if (r->vr_width != awidth)
			continue;
		addr = paddr - r->vr_base;
		if (r->vr_width == awidth &&
		    r->vr_start <= addr && r->vr_end >= addr + size - 1)
			break;
	}
	if (r->vr_width == 0) {
#ifdef DIAGNOSTIC
		printf("%s: nonsensical A%d mapping at va 0x%08lx pa 0x%08lx\n",
		    __func__, AWIDTH(awidth), vaddr, paddr);
#endif
		return;
	}

	/*
	 * Undo the mapping.
	 */
	pmap_kremove(va, len);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, len);

	/*
	 * Unregister mapping.
	 */
	if (ext != NULL) {
		pa -= r->vr_base;
		extent_free(ext, atop(pa), atop(len), EX_NOWAIT | EX_MALLOCOK);
	}
}
示例#27
0
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t kva;
    vsize_t off;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vunmapbuf");

    kva = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - kva;
    len = round_page(off + len);
    pmap_kremove(kva, len);
    uvm_km_free(kernel_map, kva, len, UVM_KMF_VAONLY);
    bp->b_data = bp->b_saveaddr;
    bp->b_saveaddr = NULL;
}
示例#28
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, error;

	if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
		*kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
		return (0);
	}

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#29
0
文件: ifpga_io.c 项目: ryo/netbsd-src
void
ifpga_mem_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
    vaddr_t startva, endva;

    if (pmap_devmap_find_va(bsh, size) != NULL) {
        /* Device was statically mapped; nothing to do. */
        return;
    }

    startva = trunc_page(bsh);
    endva = round_page(bsh + size);

    pmap_remove(pmap_kernel(), startva, endva);
    pmap_update(pmap_kernel());
    uvm_km_free(kernel_map, startva, endva - startva, UVM_KMF_VAONLY);
}
void
mpcore_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t	va;
	vsize_t	sz;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	va = trunc_page(bsh);
	sz = round_page(bsh + size) - va;

	pmap_kremove(va, sz);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);
}