Example #1
0
/*
 *	vm_fault_unwire:
 *
 *	Unwire a range of virtual addresses in a map.
 */
void
vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
    boolean_t fictitious)
{
	vm_paddr_t pa;
	vm_offset_t va;
	vm_page_t m;
	pmap_t pmap;

	pmap = vm_map_pmap(map);

	/*
	 * Since the pages are wired down, we must be able to get their
	 * mappings from the physical map system.
	 */
	for (va = start; va < end; va += PAGE_SIZE) {
		pa = pmap_extract(pmap, va);
		if (pa != 0) {
			pmap_change_wiring(pmap, va, FALSE);
			if (!fictitious) {
				m = PHYS_TO_VM_PAGE(pa);
				vm_page_lock(m);
				vm_page_unwire(m, TRUE);
				vm_page_unlock(m);
			}
		}
	}
}
Example #2
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

#ifdef DEBUG_DMA
	printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
#endif	/* DEBUG_DMA */

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			TAILQ_INSERT_TAIL(&mlist, m, pageq);
		}
	}
	uvm_pglistfree(&mlist);
}
/*
 * Return true if we freed it, false if we didn't.
 */
bool
cpu_uarea_free(void *va)
{
#ifdef _LP64
	if (!MIPS_XKPHYS_P(va))
		return false;
	paddr_t pa = MIPS_XKPHYS_TO_PHYS(va);
#else
	if (!MIPS_KSEG0_P(va))
		return false;
	paddr_t pa = MIPS_KSEG0_TO_PHYS(va);
#endif

#ifdef MIPS3_PLUS
	if (MIPS_CACHE_VIRTUAL_ALIAS)
		mips_dcache_inv_range((vaddr_t)va, USPACE);
#endif

	for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
		KASSERT(pg != NULL);
		uvm_pagefree(pg);
	}
	return true;
}
Example #4
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	DPRINTF(("bus_dmamem_free: t = %p, segs = %p, nsegs = %d\n", t, segs, nsegs));

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		DPRINTF(("bus_dmamem_free: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			DPRINTF(("bus_dmamem_free: m = %p\n", m));
			TAILQ_INSERT_TAIL(&mlist, m, pageq);
		}
	}

	uvm_pglistfree(&mlist);
}
Example #5
0
/*
 * Common function for DMA map synchronization.  May be called
 * by bus-specific DMA map synchronization functions.
 */
void
_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
bus_size_t len, int op)
{
	int i;
	bus_size_t minlen, wlen;
	bus_addr_t pa, addr;
	struct vm_page *pg;

	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
		/* Find the beginning segment. */
		if (offset >= map->dm_segs[i].ds_len) {
			offset -= map->dm_segs[i].ds_len;
			continue;
		}

		minlen = len < map->dm_segs[i].ds_len - offset ?
		    len : map->dm_segs[i].ds_len - offset;

		addr = map->dm_segs[i].ds_addr + offset;

		switch (op) {
		case BUS_DMASYNC_POSTWRITE:
			for (pa = trunc_page(addr), wlen = 0;
			    pa < round_page(addr + minlen);
			    pa += PAGE_SIZE) {
				pg = PHYS_TO_VM_PAGE(pa);
				if (pg != NULL)
					atomic_clearbits_int(&pg->pg_flags,
					    PG_PMAP_EXE);
			}
		}
		
	}
}
Example #6
0
/*
 * bool __mm_mem_addr(paddr_t pa):
 *	Check specified physical address is memory device.
 */
bool
__mm_mem_addr(paddr_t pa)
{

	return ((atop(pa) < vm_physmem[0].start || PHYS_TO_VM_PAGE(pa) != NULL)
	    ? true : false);
}
Example #7
0
static void vclrmgfifo(vm_offset_t va)
{
	vm_page_t p;

	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page in vclrmgfifo!"));
	p->pageq.tqe_next = NULL;
}
void
uma_small_free(void *mem, int size, u_int8_t flags)
{
	vm_page_t m;

	m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&cnt.v_wire_count, 1);
}
Example #9
0
void
uma_small_free(void *mem, int size, u_int8_t flags)
{
	vm_page_t m;

	m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem));
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&cnt.v_wire_count, 1);
}
Example #10
0
static void
vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo)
{
	vm_page_t p;

	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page in vsetmgfifo!"));
	p->pageq.tqe_next = (vm_page_t)mgfifo;
}
Example #11
0
void *
XX_PhysToVirt(physAddress_t addr)
{
	struct pv_entry *pv;
	vm_page_t page;
	int cpu;

	/* Check CCSR */
	if (addr >= ccsrbar_pa && addr < ccsrbar_pa + ccsrbar_size)
		return ((void *)((vm_offset_t)(addr - ccsrbar_pa) +
		    ccsrbar_va));

	cpu = PCPU_GET(cpuid);

	/* Handle BMAN mappings */
	if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] +
	    XX_PInfo.portal_ce_size[BM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));

	if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] +
	    XX_PInfo.portal_ci_size[BM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));

	/* Handle QMAN mappings */
	if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] +
	    XX_PInfo.portal_ce_size[QM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu])));

	if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] +
	    XX_PInfo.portal_ci_size[QM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu])));

	page = PHYS_TO_VM_PAGE(addr);
	pv = TAILQ_FIRST(&page->md.pv_list);

	if (pv != NULL)
		return ((void *)(pv->pv_va + ((vm_offset_t)addr & PAGE_MASK)));

	if (PMAP_HAS_DMAP)
		return ((void *)(uintptr_t)PHYS_TO_DMAP(addr));

	printf("NetCommSW: "
	    "Unable to translate physical address 0x%09jx!\n", (uintmax_t)addr);

	return (NULL);
}
Example #12
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & BUS_DMA_COHERENT)
			*kvap = (caddr_t)PHYS_TO_UNCACHED(pa);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			if (flags & BUS_DMA_COHERENT)
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Example #13
0
/*
 * vtomgfifo() converts a virtual address of the first page allocated for
 * an item to a memguard_fifo_pool reference for the corresponding item's
 * size.
 *
 * vsetmgfifo() sets a reference in an underlying page for the specified
 * virtual address to an appropriate memguard_fifo_pool.
 *
 * These routines are very similar to those defined by UMA in uma_int.h.
 * The difference is that these routines store the mgfifo in one of the
 * page's fields that is unused when the page is wired rather than the
 * object field, which is used.
 */
static struct memguard_fifo *
vtomgfifo(vm_offset_t va)
{
	vm_page_t p;
	struct memguard_fifo *mgfifo;

	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page in vtomgfifo!"));
	mgfifo = (struct memguard_fifo *)p->pageq.tqe_next;
	return mgfifo;
}
void
db_page_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif)
{

	if (!have_addr) {
		db_printf("Need paddr for page\n");
		return;
	}

	db_printf("pa %llx pg %p\n", (unsigned long long)addr,
	    PHYS_TO_VM_PAGE(addr));
}
Example #15
0
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
	vm_page_t m;
	vm_paddr_t pa;

	pa = DMAP_TO_PHYS((vm_offset_t)mem);
	m = PHYS_TO_VM_PAGE(pa);
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
}
Example #16
0
/*
 * Identify the physical page mapped at the given kernel virtual
 * address.  Insert this physical page into the given address space at
 * the given virtual address, replacing the physical page, if any,
 * that already exists there.
 */
static int
vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
{
	vm_map_t map = mapa;
	vm_page_t kern_pg, user_pg;
	vm_object_t uobject;
	vm_map_entry_t entry;
	vm_pindex_t upindex;
	vm_prot_t prot;
	boolean_t wired;

	KASSERT((uaddr & PAGE_MASK) == 0,
	    ("vm_pgmoveco: uaddr is not page aligned"));

	/*
	 * Herein the physical page is validated and dirtied.  It is
	 * unwired in sf_buf_mext().
	 */
	kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
	kern_pg->valid = VM_PAGE_BITS_ALL;
	KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
	    ("vm_pgmoveco: kern_pg is not correctly wired"));

	if ((vm_map_lookup(&map, uaddr,
			   VM_PROT_WRITE, &entry, &uobject,
			   &upindex, &prot, &wired)) != KERN_SUCCESS) {
		return(EFAULT);
	}
	VM_OBJECT_LOCK(uobject);
retry:
	if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
		if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
			goto retry;
		vm_page_lock_queues();
		pmap_remove_all(user_pg);
		vm_page_free(user_pg);
	} else {
		/*
		 * Even if a physical page does not exist in the
		 * object chain's first object, a physical page from a
		 * backing object may be mapped read only.
		 */
		if (uobject->backing_object != NULL)
			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
		vm_page_lock_queues();
	}
	vm_page_insert(kern_pg, uobject, upindex);
	vm_page_dirty(kern_pg);
	vm_page_unlock_queues();
	VM_OBJECT_UNLOCK(uobject);
	vm_map_lookup_done(map, entry);
	return(KERN_SUCCESS);
}
Example #17
0
int
vsunlock(
	user_addr_t addr,
	user_size_t len,
	__unused int dirtied)
{
#if FIXME  /* [ */
	pmap_t		pmap;
	vm_page_t	pg;
	vm_map_offset_t	vaddr;
	ppnum_t		paddr;
#endif  /* FIXME ] */
	kern_return_t	kret;
	vm_map_t	map;

	map = current_map();

#if FIXME  /* [ */
	if (dirtied) {
		pmap = get_task_pmap(current_task());
		for (vaddr = vm_map_trunc_page(addr, PAGE_MASK);
		     vaddr < vm_map_round_page(addr+len, PAGE_MASK);
		     vaddr += PAGE_SIZE) {
			paddr = pmap_extract(pmap, vaddr);
			pg = PHYS_TO_VM_PAGE(paddr);
			vm_page_set_modified(pg);
		}
	}
#endif  /* FIXME ] */
#ifdef	lint
	dirtied++;
#endif	/* lint */
	kret = vm_map_unwire(map,
			     vm_map_trunc_page(addr,
					       vm_map_page_mask(map)),
			     vm_map_round_page(addr+len,
					       vm_map_page_mask(map)),
			     FALSE);
	switch (kret) {
	case KERN_SUCCESS:
		return (0);
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		return (ENOMEM);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	default:
		return (EINVAL);
	}
}
Example #18
0
static u_long *
v2sizev(vm_offset_t va)
{
	vm_paddr_t pa;
	struct vm_page *p;

	pa = pmap_kextract(va);
	if (pa == 0)
		panic("MemGuard detected double-free of %p", (void *)va);
	p = PHYS_TO_VM_PAGE(pa);
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
	return (&p->plinks.memguard.v);
}
Example #19
0
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
	vm_page_t m;

	if (!hw_direct_map)
		pmap_remove(kernel_pmap,(vm_offset_t)mem,
		    (vm_offset_t)mem + PAGE_SIZE);

	m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
	atomic_subtract_int(&hw_uma_mdpages, 1);
}
Example #20
0
void
uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
{
	struct vm_page *pg;
	vaddr_t va;
	paddr_t pa;

	for (va = start; va < end; va += PAGE_SIZE) {
		if (!pmap_extract(pmap_kernel(), va, &pa))
			continue;
		pg = PHYS_TO_VM_PAGE(pa);
		if (pg == NULL)
			panic("uvm_km_pgremove_intrsafe: no page");
		uvm_pagefree(pg);
	}
}
Example #21
0
/*
 * No requirements.
 */
void
contigfree(void *addr, unsigned long size, struct malloc_type *type)
{
    vm_paddr_t pa;
    vm_page_t m;

    if (size == 0)
        panic("vm_contig_pg_kmap: size must not be 0");
    size = round_page(size);

    pa = pmap_extract(&kernel_pmap, (vm_offset_t)addr);
    pmap_qremove((vm_offset_t)addr, size / PAGE_SIZE);
    kmem_free(&kernel_map, (vm_offset_t)addr, size);

    m = PHYS_TO_VM_PAGE(pa);
    vm_page_free_contig(m, size);
}
Example #22
0
/*
 * Initialize an XIO given a kernelspace buffer.  0 is returned on success,
 * an error code on failure.  The actual number of bytes that could be
 * accomodated in the XIO will be stored in xio_bytes and the page offset
 * will be stored in xio_offset.
 */
int
xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
{
    vm_offset_t addr;
    vm_paddr_t paddr;
    vm_page_t m;
    int i;
    int n;

    addr = trunc_page((vm_offset_t)kbase);
    xio->xio_flags = 0;
    xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
    xio->xio_bytes = 0;
    xio->xio_pages = xio->xio_internal_pages;
    xio->xio_error = 0;
    if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
	n = kbytes;
    lwkt_gettoken(&vm_token);
    crit_enter();
    for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
	if ((paddr = pmap_kextract(addr)) == 0)
	    break;
	m = PHYS_TO_VM_PAGE(paddr);
	vm_page_hold(m);
	xio->xio_pages[i] = m;
	kbytes -= n;
	xio->xio_bytes += n;
	if ((n = kbytes) > PAGE_SIZE)
	    n = PAGE_SIZE;
	addr += PAGE_SIZE;
    }
    crit_exit();
    lwkt_reltoken(&vm_token);
    xio->xio_npages = i;

    /*
     * If a failure occured clean out what we loaded and return EFAULT.
     * Return 0 on success.
     */
    if (i < XIO_INTERNAL_PAGES && n) {
	xio_release(xio);
	xio->xio_error = EFAULT;
    }
    return(xio->xio_error);
}
/*
 * Return true if we freed it, false if we didn't.
 */
bool
cpu_uarea_free(void *vva)
{
	vaddr_t va = (vaddr_t) vva;
	if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
		return false;

	/*
	 * Since the pages are physically contiguous, the vm_page structure
	 * will be as well.
	 */
	struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
	KASSERT(pg != NULL);
	for (size_t i = 0; i < UPAGES; i++, pg++) {
		uvm_pagefree(pg);
	}
	return true;
}
Example #24
0
void
mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
{
	struct pglist pglist;
	paddr_t pa, epa;

	TAILQ_INIT(&pglist);
	for(; nsegs--; segs++)
		for (pa = segs->ds_addr, epa = pa + segs->ds_len;
		     pa < epa; pa += PAGE_SIZE) {
			struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
			if (!pg)
				panic("mbus_dmamem_free: no page for pa");
			TAILQ_INSERT_TAIL(&pglist, pg, pageq);
			pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
			pdtlb(HPPA_SID_KERNEL, pa);
			pitlb(HPPA_SID_KERNEL, pa);
		}
	uvm_pglistfree(&pglist);
}
Example #25
0
int
physcopyin(void *src, vm_paddr_t dst, size_t len)
{
    vm_page_t m[PHYS_PAGE_COUNT(len)];
    struct iovec iov[1];
    struct uio uio;
    int i;

    iov[0].iov_base = src;
    iov[0].iov_len = len;
    uio.uio_iov = iov;
    uio.uio_iovcnt = 1;
    uio.uio_offset = 0;
    uio.uio_resid = len;
    uio.uio_segflg = UIO_SYSSPACE;
    uio.uio_rw = UIO_WRITE;
    for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
        m[i] = PHYS_TO_VM_PAGE(dst);
    return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
}
Example #26
0
int
physcopyout(vm_paddr_t src, void *dst, size_t len)
{
    vm_page_t m[PHYS_PAGE_COUNT(len)];
    struct iovec iov[1];
    struct uio uio;
    int i;

    iov[0].iov_base = dst;
    iov[0].iov_len = len;
    uio.uio_iov = iov;
    uio.uio_iovcnt = 1;
    uio.uio_offset = 0;
    uio.uio_resid = len;
    uio.uio_segflg = UIO_SYSSPACE;
    uio.uio_rw = UIO_READ;
    for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
        m[i] = PHYS_TO_VM_PAGE(src);
    return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
}
/*
 * _bus_dmamem_free_common --
 *	Free memory allocated with _bus_dmamem_alloc_range_common()
 *	back to the VM system.
 */
void
_bus_dmamem_free_common(bus_dma_tag_t t,
			bus_dma_segment_t *segs,
			int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		     addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
		}
	}

	uvm_pglistfree(&mlist);
}
Example #28
0
static int
privcmd_pg_fault(vm_object_t object, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{
	struct privcmd_map *map = object->handle;
	vm_pindex_t pidx;
	vm_page_t page, oldm;

	if (map->mapped != true)
		return (VM_PAGER_FAIL);

	pidx = OFF_TO_IDX(offset);
	if (pidx >= map->size || BIT_ISSET(map->size, pidx, map->err))
		return (VM_PAGER_FAIL);

	page = PHYS_TO_VM_PAGE(map->phys_base_addr + offset);
	if (page == NULL)
		return (VM_PAGER_FAIL);

	KASSERT((page->flags & PG_FICTITIOUS) != 0,
	    ("not fictitious %p", page));
	KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
	KASSERT(vm_page_busied(page) == 0, ("page %p is busy", page));

	if (*mres != NULL) {
		oldm = *mres;
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	}

	vm_page_insert(page, object, pidx);
	page->valid = VM_PAGE_BITS_ALL;
	vm_page_xbusy(page);
	*mres = page;
	return (VM_PAGER_OK);
}
Example #29
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_hpcmips_bd_mem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
		}
	}

	uvm_pglistfree(&mlist);
}
Example #30
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	vm_page_t m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE((*t->_device_to_pa)(addr));
			TAILQ_INSERT_TAIL(&mlist, m, pageq);
		}
	}

	uvm_pglistfree(&mlist);
}