Esempio n. 1
0
/*
 * Free the io map PTEs associated with this IO operation.
 * We also invalidate the TLB entries and restore the original b_addr.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t addr, off;
    pmap_t kpmap;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vunmapbuf");
    addr = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - addr;
    len = round_page(off + len);
    kpmap = vm_map_pmap(phys_map);
    pmap_remove(kpmap, addr, addr + len);
    pmap_update(kpmap);
    uvm_km_free_wakeup(phys_map, addr, len);
    bp->b_data = bp->b_saveaddr;
    bp->b_saveaddr = 0;
}
Esempio n. 2
0
/*
 * Unmap IO request from the kernel virtual address space.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
#endif
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	pmap_update(pmap_kernel());
	uvm_km_free_wakeup(phys_map, addr, len);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Esempio n. 3
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return ENOMEM;

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

			/* Cache-inhibit the page if necessary */
			if ((flags & BUS_DMA_COHERENT) != 0)
				_pmap_set_page_cacheinhibit(pmap_kernel(), va);

			segs[curseg]._ds_flags &= ~BUS_DMA_COHERENT;
			segs[curseg]._ds_flags |= (flags & BUS_DMA_COHERENT);
		}
	}
	pmap_update(pmap_kernel());

	if ((flags & BUS_DMA_COHERENT) != 0)
		TBIAS();

	return 0;
}
Esempio n. 4
0
/* mem bs */
int
ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int cacheable, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
	paddr_t		endpa;
	paddr_t		pa;
	paddr_t		offset;
	vaddr_t		va;
	pt_entry_t	*pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	/* Get some VM.  */
	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + offset;

	/* Now map the pages */
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Esempio n. 5
0
int
armv7_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int flag, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	pt_entry_t *pte;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* Some IO registers (ex. UART ports for console)
		   are mapped to fixed address by board specific
		   routine. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
			/* XXX: pmap_kenter_pa() also does PTE_SYNC(). a bit of
			 *      waste.
			 */
		}
	}
	pmap_update(pmap_kernel());

	return(0);
}
Esempio n. 6
0
/*
 * void _bus_space_unmap(bus_space_tag bst, bus_space_handle bsh,
 *                        bus_size_t size, bus_addr_t *adrp)
 *
 *   This function unmaps memory- or io-space mapped by the function
 *   _bus_space_map().  This function works nearly as same as
 *   bus_space_unmap(), but this function does not ask kernel
 *   built-in extents and returns physical address of the bus space,
 *   for the convenience of the extra extent manager.
 */
void
_bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size,
    bus_addr_t *adrp)
{
	u_long va, endva;
	bus_addr_t bpa;

	/*
	 * Find the correct bus physical address.
	 */
	if (t == X86_BUS_SPACE_IO) {
		bpa = bsh;
	} else if (t == X86_BUS_SPACE_MEM) {
		bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
		if (IOM_BEGIN <= bpa && bpa <= IOM_END)
			goto ok;

		va = trunc_page(bsh);
		endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
		if (endva <= va)
			panic("_bus_space_unmap: overflow");
#endif

		(void) pmap_extract(pmap_kernel(), va, &bpa);
		bpa += (bsh & PGOFSET);

		pmap_kremove(va, endva - va);
		pmap_update(pmap_kernel());

		/*
		 * Free the kernel virtual mapping.
		 */
		uvm_km_free(kernel_map, va, endva - va);
	} else
		panic("bus_space_unmap: bad bus space tag");

ok:
	if (adrp != NULL)
		*adrp = bpa;
}
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;


#ifdef PMAP_DEBUG
	if (pmap_debug_level > 0)
		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
		    (u_int)bp->b_data, (u_int)len);
#endif	/* PMAP_DEBUG */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, atop(faddr) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	bp->b_data = (void *)(taddr + off);

	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_enter(pmap_kernel(), taddr, fpa,
			VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return 0;
}
Esempio n. 8
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int 
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	struct vm_page *m;
	vaddr_t va;
	struct pglist *mlist;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	if (nsegs != 1)
		panic("_bus_dmamem_map: nsegs = %d", nsegs);

	size = m68k_round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
	if (va == 0)
		return (ENOMEM);

	segs[0]._ds_va = va;
	*kvap = (void *)va;

	mlist = segs[0]._ds_mlist;
	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
		paddr_t pa;

		if (size == 0)
			panic("_bus_dmamem_map: size botch");

		pa = VM_PAGE_TO_PHYS(m);
		pmap_enter(pmap_kernel(), va, pa | PMAP_NC,
			   VM_PROT_READ | VM_PROT_WRITE,
			   VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

		va += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return (0);
}
Esempio n. 9
0
int
ifpga_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable, bus_space_handle_t *bshp)
{
    bus_addr_t startpa, endpa;
    vaddr_t va;
    const struct pmap_devmap *pd;
    bus_addr_t pa = bpa + (bus_addr_t) t;

    if ((pd = pmap_devmap_find_pa(pa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (pa - pd->pd_pa);
        return 0;
    }

    /* Round the allocation to page boundries */
    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    /* Get some VM.  */
    va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
                      UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    if (va == 0)
        return ENOMEM;

    /* Store the bus space handle */
    *bshp = va + (bpa & PGOFSET);

    /* Now map the pages */
    /* The cookie is the physical base address for the I/O area */
    while (startpa < endpa) {
        /* XXX pmap_kenter_pa maps pages cacheable -- not what
           we want.  */
        pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa,
                   VM_PROT_READ | VM_PROT_WRITE, 0);
        va += PAGE_SIZE;
        startpa += PAGE_SIZE;
    }
    pmap_update(pmap_kernel());

    return 0;
}
Esempio n. 10
0
/*
 * Unmap IO request from the kernel virtual address space.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *pmap;
	vaddr_t kva;
	vsize_t off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
#endif
	kva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - kva;
	len = round_page(off + len);
	pmap = vm_map_pmap(phys_map);
	pmap_remove(pmap, kva, kva + len);
	pmap_update(pmap);
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Esempio n. 11
0
void
bus_mapout(void *ptr, int sz)
{
	vaddr_t va;
	int off;

	va = (vaddr_t)ptr;

	/* If it was a PROM mapping, do NOT free it! */
	if ((va >= SUN3_MONSTART) && (va < SUN3_MONEND))
		return;

	off = va & PGOFSET;
	va -= off;
	sz += off;
	sz = m68k_round_page(sz);

	pmap_remove(pmap_kernel(), va, va + sz);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);
}
Esempio n. 12
0
void
au_himem_unmap(void *cookie, bus_space_handle_t bsh, bus_size_t size, int acct)
{
	au_himem_cookie_t	*c = (au_himem_cookie_t *)cookie;
	vaddr_t			va;
	vsize_t			realsz;
	paddr_t			pa;
	int			s;

	va = (vaddr_t)TRUNC_PAGE(bsh);
	realsz = (vsize_t)ROUND_PAGE((bsh % PAGE_SIZE) + size);

	s = splhigh();

	/* make sure that any pending writes are flushed */
	wbflush();

	/*
	 * we have to get the bus address, so that we can free it in the
	 * extent manager.  this is the unfortunate thing about using
	 * virtual memory instead of just a 1:1 mapping scheme.
	 */
	if (pmap_extract(pmap_kernel(), va, &pa) == false)
		panic("au_himem_unmap: virtual address invalid!");

	/* now remove it from the pmap */
	pmap_kremove(va, realsz);
	pmap_update(pmap_kernel());
	splx(s);

	/* finally we can release both virtual and bus address ranges */
	uvm_km_free(kernel_map, va, realsz, UVM_KMF_VAONLY);

	if (acct) {
		bus_addr_t		addr;
		addr = ((pa - c->c_physoff) + (bsh % PAGE_SIZE));
		extent_free(c->c_extent, addr, size, EX_NOWAIT);
	}
}
Esempio n. 13
0
void
vmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t faddr, taddr, off;
    paddr_t fpa;
    pmap_t kpmap, upmap;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vmapbuf");
    bp->b_saveaddr = bp->b_data;
    faddr = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - faddr;
    len = round_page(off + len);
    taddr = uvm_km_valloc_prefer_wait(phys_map, len, faddr);
    bp->b_data = (caddr_t)(taddr + off);
    /*
     * The region is locked, so we expect that pmap_pte() will return
     * non-NULL.
     * XXX: unwise to expect this in a multithreaded environment.
     * anything can happen to a pmap between the time we lock a
     * region, release the pmap lock, and then relock it for
     * the pmap_extract().
     *
     * no need to flush TLB since we expect nothing to be mapped
     * where we we just allocated (TLB will be flushed when our
     * mapping is removed).
     */
    upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
    kpmap = vm_map_pmap(phys_map);
    while (len) {
        pmap_extract(upmap, faddr, &fpa);
        pmap_enter(kpmap, taddr, fpa,
                   PROT_READ | PROT_WRITE, PMAP_WIRED);
        faddr += PAGE_SIZE;
        taddr += PAGE_SIZE;
        len -= PAGE_SIZE;
    }
    pmap_update(kpmap);
}
Esempio n. 14
0
/*
 * Deallocate all memory resources for this buffer. We need to be careful
 * to not drop kvm since we have no way to reclaim it. So, if the buffer
 * has kvm, we need to free it later. We put it on the front of the
 * freelist just so it gets picked up faster.
 *
 * Also, lots of assertions count on bp->b_data being NULL, so we
 * set it temporarily to NULL.
 *
 * Return non-zero if we take care of the freeing later.
 */
int
buf_dealloc_mem(struct buf *bp)
{
	caddr_t data;
	int s;

	s = splbio();

	data = bp->b_data;
	bp->b_data = NULL;

	if (data) {
		if (bp->b_flags & B_BUSY)
			bcstats.busymapped--;
		pmap_kremove((vaddr_t)data, bp->b_bufsize);
		pmap_update(pmap_kernel());
	}

	if (bp->b_pobj)
		buf_free_pages(bp);

	if (data == NULL) {
		splx(s);
		return (0);
	}

	bp->b_data = data;
	if (!(bp->b_flags & B_BUSY)) {		/* XXX - need better test */
		TAILQ_REMOVE(&buf_valist, bp, b_valist);
		bcstats.kvaslots_avail--;
	} else
		CLR(bp->b_flags, B_BUSY);
	SET(bp->b_flags, B_RELEASED);
	TAILQ_INSERT_HEAD(&buf_valist, bp, b_valist);
	bcstats.kvaslots_avail++;
	splx(s);

	return (1);
}
void
footbridge_mem_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t startva, endva;

	/*
	 * Check for mappings below 1MB as we have this space permenantly
	 * mapped. In practice it is only the VGA hole that takes
	 * advantage of this.
	 */
	if (bsh >= DC21285_PCI_ISA_MEM_VBASE
	    && bsh < (DC21285_PCI_ISA_MEM_VBASE + DC21285_PCI_ISA_MEM_VSIZE)) {
		return;
	}

	startva = trunc_page(bsh);
	endva = round_page(bsh + size);

	pmap_kremove(startva, endva);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, startva, endva - startva, UVM_KMF_VAONLY);
}
/*
 * _bus_dmamem_map_common --
 *	Map memory allocated with _bus_dmamem_alloc_range_common() into
 *	the kernel virtual address space.
 */
int
_bus_dmamem_map_common(bus_dma_tag_t t,
		       bus_dma_segment_t *segs,
		       int nsegs,
		       size_t size,
		       void **kvap,
		       int flags,
		       int pmapflags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
	if (__predict_false(va == 0))
		return (ENOMEM);
	
	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			KASSERT(size != 0);
			/* XXX pmap_kenter_pa()? */
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    pmapflags | PMAP_WIRED |
			    	VM_PROT_READ | VM_PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Esempio n. 17
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif

	/*
	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
	 * not in KSEG2).
	 */
	if (kva >= (void *)MIPS_KSEG0_START &&
	    kva < (void *)MIPS_KSEG2_START)
		return;

	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
Esempio n. 18
0
static inline void
cpu_activate_pcb(struct lwp *l)
{
	struct trapframe *tf = l->l_md.md_regs;
	struct pcb *pcb = lwp_getpcb(l);
#ifdef DIAGNOSTIC
	vaddr_t uarea = (vaddr_t)pcb;
	vaddr_t maxsp = uarea + USPACE;
#endif
	KASSERT(tf == (void *)(uarea + PAGE_SIZE));

	/*
	 * Stash the physical address of FP regs for later perusal
	 */
	tf->tf_cr30 = (u_int)pcb->pcb_fpregs;

#ifdef DIAGNOSTIC
	/* Create the kernel stack red zone. */
	pmap_remove(pmap_kernel(), maxsp - PAGE_SIZE, maxsp);
	pmap_update(pmap_kernel());
#endif
}
Esempio n. 19
0
int
x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	paddr_t pa, endpa;
	vaddr_t va;
	bus_size_t map_size;
	int pmap_flags = PMAP_NOCACHE;

	pa = trunc_page(bpa);
	endpa = round_page(bpa + size);

#ifdef DIAGNOSTIC
	if (endpa <= pa && endpa != 0)
		panic("bus_mem_add_mapping: overflow");
#endif

	map_size = endpa - pa;

	va = uvm_km_valloc(kernel_map, map_size);
	if (va == 0)
		return (ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));

	if (flags & BUS_SPACE_MAP_CACHEABLE)
		pmap_flags = 0;
	else if (flags & BUS_SPACE_MAP_PREFETCHABLE)
		pmap_flags = PMAP_WC;

	for (; map_size > 0;
	    pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
		pmap_kenter_pa(va, pa | pmap_flags,
		    VM_PROT_READ | VM_PROT_WRITE);
	pmap_update(pmap_kernel());

	return 0;
}
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;

	KASSERT((bp->b_flags & B_PHYS) != 0);

	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(taddr + off);
	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 * XXX: unwise to expect this in a multithreaded environment.
	 * anything can happen to a pmap between the time we lock a
	 * region, release the pmap lock, and then relock it for
	 * the pmap_extract().
	 *
	 * no need to flush TLB since we expect nothing to be mapped
	 * where we we just allocated (TLB will be flushed when our
	 * mapping is removed).
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE, 0);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return 0;
}
Esempio n. 21
0
int
ixp12x0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	       int flags, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
	paddr_t		endpa;
	paddr_t		pa;
	paddr_t		offset;
	vaddr_t		va;
	pt_entry_t	*pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);
		
	if ((va = uvm_km_valloc(kernel_map, endpa - startpa)) == 0)
		return ENOMEM;

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return 0;
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;
	vsize_t off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");

	kva = m68k_trunc_page(bp->b_data);
	off = (vaddr_t)bp->b_data - kva;
	len = m68k_round_page(off + len);

#ifdef M68K_VAC
	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
#else
	pmap_kremove(kva, len);
#endif
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Esempio n. 23
0
static const void *
mpbios_map(paddr_t pa, int len, struct mp_map *handle)
{
	paddr_t pgpa = x86_trunc_page(pa);
	paddr_t endpa = x86_round_page(pa + len);
	vaddr_t va = uvm_km_alloc(kernel_map, endpa - pgpa, 0, UVM_KMF_VAONLY);
	vaddr_t retva = va + (pa & PGOFSET);

	handle->pa = pa;
	handle->pg = pgpa;
	handle->psize = len;
	handle->baseva = va;
	handle->vsize = endpa-pgpa;

	do {
		pmap_kenter_pa(va, pgpa, VM_PROT_READ, 0);
		va += PAGE_SIZE;
		pgpa += PAGE_SIZE;
	} while (pgpa < endpa);
	pmap_update(pmap_kernel());

	return (const void *)retva;
}
Esempio n. 24
0
vaddr_t
buf_unmap(struct buf *bp)
{
	vaddr_t va;
	int s;

	KASSERT((bp->b_flags & B_BUSY) == 0);
	KASSERT(bp->b_data != NULL);

	s = splbio();
	TAILQ_REMOVE(&buf_valist, bp, b_valist);
	va = (vaddr_t)bp->b_data;
	bp->b_data = 0;
	pmap_kremove(va, bp->b_bufsize);
	pmap_update(pmap_kernel());

	if (bp->b_flags & B_RELEASED)
		pool_put(&bufpool, bp);

	splx(s);

	return (va);
}
Esempio n. 25
0
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr;
	vsize_t off;

#ifdef	DIAGNOSTIC
	if (!(bp->b_flags & B_PHYS))
		panic("vunmapbuf");
#endif
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	/*
	 * Since the pages were entered by pmap_enter, use pmap_remove
	 * to remove them.
	 */
	pmap_kremove(addr, len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Esempio n. 26
0
/*
 * mm_init: initialize memory device driver.
 */
void
mm_init(void)
{
	vaddr_t pg;

	mutex_init(&dev_mem_lock, MUTEX_DEFAULT, IPL_NONE);

	/* Read-only zero-page. */
	pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
	KASSERT(pg != 0);
	pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
	pmap_update(pmap_kernel());
	dev_zero_page = (void *)pg;

#ifndef __HAVE_MM_MD_CACHE_ALIASING
	/* KVA for mappings during I/O. */
	dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
	    UVM_KMF_VAONLY|UVM_KMF_WAITVA);
	KASSERT(dev_mem_addr != 0);
#else
	dev_mem_addr = 0;
#endif
}
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *upmap, *kpmap __unused;
	vaddr_t uva;		/* User VA (map from) */
	vaddr_t kva;		/* Kernel VA (new to) */
	paddr_t pa; 		/* physical address */
	vsize_t off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	len = m68k_round_page(off + len);
	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(kva + off);

	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	kpmap = vm_map_pmap(phys_map);
	do {
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
#ifdef M68K_VAC
		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
		    PMAP_WIRED);
#else
		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
#endif
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		len -= PAGE_SIZE;
	} while (len);
	pmap_update(kpmap);

	return 0;
}
int
mpcore_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int flag, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	const struct pmap_devmap	*pd;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	const int pmapflags =
	    (flag & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE))
		? 0
		: PMAP_NOCACHE;
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Esempio n. 29
0
/*
 * Map an IO request into kernel virtual address space.
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t uva, kva;
	paddr_t pa;
	vsize_t size, off;
	int npf;
	struct proc *p;
	struct vm_map *map;
	struct pmap *upmap, *kpmap;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
#endif
	p = bp->b_proc;
	map = &p->p_vmspace->vm_map;
	upmap = vm_map_pmap(map);
	kpmap = vm_map_pmap(phys_map);
	bp->b_saveaddr = bp->b_data;
	uva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	size = round_page(off + len);

	kva = uvm_km_valloc_prefer_wait(phys_map, size, uva);
	bp->b_data = (caddr_t)(kva + off);
	npf = btoc(size);
	while (npf--) {
		if (pmap_extract(upmap, uva, &pa) == FALSE)
			panic("vmapbuf: null page frame");
		pmap_enter(kpmap, kva, pa,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
	}
	pmap_update(kpmap);
}
Esempio n. 30
0
void fic_init(void)
{
	int i;

	extern paddr_t avail_start, avail_end;

	boothowto = RB_SINGLE; /* XXX for now */
	boothowto |= RB_KDB; /* XXX for now */

	delay_divisor = 30; /* XXX */

	/*
	 * Tell the VM system about available physical memory.  The
	 * fic uses one segment.
	 */
	uvm_page_physload(atop(avail_start), atop(avail_end),
	    atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);

	/*
	 * map and init interrupt controller
	 */
	physaccess((void*)virtual_avail, (void*)0x44000000,
	    PAGE_SIZE, PG_RW|PG_CI);
	sicinit((void*)virtual_avail);
	virtual_avail += PAGE_SIZE;

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < btoc(MSGBUFSIZE); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE,
		    avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
}