Ejemplo n.º 1
0
/*
 * Initialize an XIO given a kernelspace buffer.  0 is returned on success,
 * an error code on failure.  The actual number of bytes that could be
 * accomodated in the XIO will be stored in xio_bytes and the page offset
 * will be stored in xio_offset.
 */
int
xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
{
    vm_offset_t addr;
    vm_paddr_t paddr;
    vm_page_t m;
    int i;
    int n;

    addr = trunc_page((vm_offset_t)kbase);
    xio->xio_flags = 0;
    xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
    xio->xio_bytes = 0;
    xio->xio_pages = xio->xio_internal_pages;
    xio->xio_error = 0;
    if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
	n = kbytes;
    lwkt_gettoken(&vm_token);
    crit_enter();
    for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
	if ((paddr = pmap_kextract(addr)) == 0)
	    break;
	m = PHYS_TO_VM_PAGE(paddr);
	vm_page_hold(m);
	xio->xio_pages[i] = m;
	kbytes -= n;
	xio->xio_bytes += n;
	if ((n = kbytes) > PAGE_SIZE)
	    n = PAGE_SIZE;
	addr += PAGE_SIZE;
    }
    crit_exit();
    lwkt_reltoken(&vm_token);
    xio->xio_npages = i;

    /*
     * If a failure occured clean out what we loaded and return EFAULT.
     * Return 0 on success.
     */
    if (i < XIO_INTERNAL_PAGES && n) {
	xio_release(xio);
	xio->xio_error = EFAULT;
    }
    return(xio->xio_error);
}
Ejemplo n.º 2
0
static int
terasic_mtl_reg_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int nprot, vm_memattr_t *memattr)
{
	struct terasic_mtl_softc *sc;
	int error;

	sc = dev->si_drv1;
	error = 0;
	if (trunc_page(offset) == offset &&
	    offset + PAGE_SIZE > offset &&
	    rman_get_size(sc->mtl_reg_res) >= offset + PAGE_SIZE) {
		*paddr = rman_get_start(sc->mtl_reg_res) + offset;
		*memattr = VM_MEMATTR_UNCACHEABLE;
	} else
		error = ENODEV;
	return (error);
}
Ejemplo n.º 3
0
/*
 * Free the io map PTEs associated with this IO operation.
 * We also invalidate the TLB entries and restore the original b_addr.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t addr, off;
    pmap_t kpmap;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vunmapbuf");
    addr = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - addr;
    len = round_page(off + len);
    kpmap = vm_map_pmap(phys_map);
    pmap_remove(kpmap, addr, addr + len);
    pmap_update(kpmap);
    uvm_km_free_wakeup(phys_map, addr, len);
    bp->b_data = bp->b_saveaddr;
    bp->b_saveaddr = 0;
}
Ejemplo n.º 4
0
/*
 * Unmap IO request from the kernel virtual address space.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
#endif
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	pmap_update(pmap_kernel());
	uvm_km_free_wakeup(phys_map, addr, len);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Ejemplo n.º 5
0
/*
 * Create writeable aliases of memory we need
 * to write to as kernel is mapped read-only
 */
void *codepatch_maprw(vaddr_t *nva, vaddr_t dest)
{
	paddr_t kva = trunc_page((paddr_t)dest);
	paddr_t po = (paddr_t)dest & PAGE_MASK;
	paddr_t pa1, pa2;

	if (*nva == 0)
		*nva = (vaddr_t)km_alloc(2 * PAGE_SIZE, &kv_any, &kp_none,
					&kd_waitok);

	pmap_extract(pmap_kernel(), kva, &pa1);
	pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
	pmap_kenter_pa(*nva, pa1, PROT_READ | PROT_WRITE);
	pmap_kenter_pa(*nva + PAGE_SIZE, pa2, PROT_READ | PROT_WRITE);
	pmap_update(pmap_kernel());

	return (void *)(*nva + po);
}
void
mpcore_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t	va;
	vsize_t	sz;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	va = trunc_page(bsh);
	sz = round_page(bsh + size) - va;

	pmap_kremove(va, sz);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);
}
Ejemplo n.º 7
0
void
ep93xx_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t	va;
	vaddr_t	endva;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	endva = round_page(bsh + size);
	va = trunc_page(bsh);

	pmap_remove(pmap_kernel(), va, endva);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
}
Ejemplo n.º 8
0
void
generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
{
	vm_offset_t va, endva;

	if (pmap_devmap_find_va((vm_offset_t)t, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	endva = round_page((vm_offset_t)t + size);
	va = trunc_page((vm_offset_t)t);

	while (va < endva) {
		pmap_kremove(va);
		va += PAGE_SIZE;
	}
	kmem_free(kernel_map, va, endva - va);
}
Ejemplo n.º 9
0
int
sparc64_bus_mem_unmap(bus_space_tag_t tag, bus_space_handle_t handle,
    bus_size_t size)
{
	vm_offset_t sva;
	vm_offset_t va;
	vm_offset_t endva;

	if (tag->bst_cookie == NULL ||
	    (sva = (vm_offset_t)rman_get_virtual(tag->bst_cookie)) == 0)
		return (0);
	sva = trunc_page(sva);
	endva = sva + round_page(size);
	for (va = sva; va < endva; va += PAGE_SIZE)
		pmap_kremove_flags(va);
	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
	kmem_free(kernel_map, sva, size);
	return (0);
}
Ejemplo n.º 10
0
/* mem bs */
int
ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int cacheable, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
	paddr_t		endpa;
	paddr_t		pa;
	paddr_t		offset;
	vaddr_t		va;
	pt_entry_t	*pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	/* Get some VM.  */
	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + offset;

	/* Now map the pages */
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Ejemplo n.º 11
0
int
mlock (const void *addr, size_t len)
{
  mach_port_t hostpriv;
  vm_address_t page;
  error_t err;

  err = __get_privileged_ports (&hostpriv, NULL);
  if (err)
    return __hurd_fail (EPERM);

  page = trunc_page ((vm_address_t) addr);
  len = round_page ((vm_address_t) addr + len) - page;
  err = __vm_wire (hostpriv, __mach_task_self (), page, len,
		   VM_PROT_READ);
  __mach_port_deallocate (__mach_task_self (), hostpriv);

  return err ? __hurd_fail (err) : 0;
}
Ejemplo n.º 12
0
int
vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
{
	int error;

	if (cmd->ev_len == 0)
		return (0);
	
	cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
	    round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
	    UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
	    UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));

	if (error)
		return error;

	return (0);
}
Ejemplo n.º 13
0
static int
at91_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	vm_paddr_t pa, endpa;

	pa = trunc_page(bpa);
	if (pa >= 0xfff00000) {
		*bshp = pa - 0xf0000000 + 0xd0000000;
		return (0);
	}
	if (pa >= 0xdff00000)
		return (0);
	endpa = round_page(bpa + size);

	*bshp = (vm_offset_t)pmap_mapdev(pa, endpa - pa);
		       
	return (0);
}
Ejemplo n.º 14
0
void
bs_remap_earlyboot(void)
{
	int i;
	vm_offset_t pa, spa;

	if (hw_direct_map)
		return;

	for (i = 0; i < earlyboot_map_idx; i++) {
		spa = earlyboot_mappings[i].addr;

		pa = trunc_page(spa);
		while (pa < spa + earlyboot_mappings[i].size) {
			pmap_kenter(pa,pa);
			pa += PAGE_SIZE;
		}
	}
}
Ejemplo n.º 15
0
static void copy_bootstrap(void *e, struct exec_info *boot_exec_info)
{
	register vm_map_t	user_map = current_task()->map;
	int err;

    printf("loading...\n");
	if (err = exec_load(boot_read, read_exec, e, boot_exec_info))
		panic("Cannot load user-bootstrap image: error code %d", err);

#if	MACH_KDB
	/*
	 * Enter the bootstrap symbol table.
	 */

#if 0 /*XXX*/
	if (load_bootstrap_symbols)
	(void) X_db_sym_init(
		(char*) boot_start+lp->sym_offset,
		(char*) boot_start+lp->sym_offset+lp->sym_size,
		"bootstrap",
		(char *) user_map);
#endif

#if 0 /*XXX*/
	if (load_fault_in_text)
	  {
	    vm_offset_t lenp = round_page(lp->text_start+lp->text_size) -
	      		     trunc_page(lp->text_start);
	    vm_offset_t i = 0;

	    while (i < lenp)
	      {
		vm_fault(user_map, text_page_start +i, 
		        load_protect_text ?  
			 VM_PROT_READ|VM_PROT_EXECUTE :
			 VM_PROT_READ|VM_PROT_EXECUTE | VM_PROT_WRITE,
			 0,0,0);
		i = round_page (i+1);
	      }
	  }
#endif
#endif	MACH_KDB
}
Ejemplo n.º 16
0
/*
 * void _bus_space_unmap(bus_space_tag bst, bus_space_handle bsh,
 *                        bus_size_t size, bus_addr_t *adrp)
 *
 *   This function unmaps memory- or io-space mapped by the function
 *   _bus_space_map().  This function works nearly as same as
 *   bus_space_unmap(), but this function does not ask kernel
 *   built-in extents and returns physical address of the bus space,
 *   for the convenience of the extra extent manager.
 */
void
_bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size,
    bus_addr_t *adrp)
{
	u_long va, endva;
	bus_addr_t bpa;

	/*
	 * Find the correct bus physical address.
	 */
	if (t == X86_BUS_SPACE_IO) {
		bpa = bsh;
	} else if (t == X86_BUS_SPACE_MEM) {
		bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
		if (IOM_BEGIN <= bpa && bpa <= IOM_END)
			goto ok;

		va = trunc_page(bsh);
		endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
		if (endva <= va)
			panic("_bus_space_unmap: overflow");
#endif

		(void) pmap_extract(pmap_kernel(), va, &bpa);
		bpa += (bsh & PGOFSET);

		pmap_kremove(va, endva - va);
		pmap_update(pmap_kernel());

		/*
		 * Free the kernel virtual mapping.
		 */
		uvm_km_free(kernel_map, va, endva - va);
	} else
		panic("bus_space_unmap: bad bus space tag");

ok:
	if (adrp != NULL)
		*adrp = bpa;
}
Ejemplo n.º 17
0
/*
 * No requirements.
 */
int
useracc(c_caddr_t addr, int len, int rw)
{
	boolean_t rv;
	vm_prot_t prot;
	vm_map_t map;
	vm_map_entry_t save_hint;
	vm_offset_t wrap;
	vm_offset_t gpa;

	KASSERT((rw & (~VM_PROT_ALL)) == 0,
	    ("illegal ``rw'' argument to useracc (%x)", rw));
	prot = rw;

	if (curthread->td_vmm) {
		if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr))
			panic("%s: could not get GPA\n", __func__);
		addr = (c_caddr_t) gpa;
	}

	/*
	 * XXX - check separately to disallow access to user area and user
	 * page tables - they are in the map.
	 */
	wrap = (vm_offset_t)addr + len;
	if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
		return (FALSE);
	}
	map = &curproc->p_vmspace->vm_map;
	vm_map_lock_read(map);
	/*
	 * We save the map hint, and restore it.  Useracc appears to distort
	 * the map hint unnecessarily.
	 */
	save_hint = map->hint;
	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
				     round_page(wrap), prot, TRUE);
	map->hint = save_hint;
	vm_map_unlock_read(map);
	
	return (rv == TRUE);
}
Ejemplo n.º 18
0
/*
 * Look for an ACPI System Resource Affinity Table ("SRAT"),
 * allocate space for cpu information, and initialize globals.
 */
int
acpi_pxm_init(int ncpus, vm_paddr_t maxphys)
{
	unsigned int idx, size;
	vm_paddr_t addr;

	if (resource_disabled("srat", 0))
		return (-1);

	max_cpus = ncpus;
	last_cpu = -1;
	maxphyaddr = maxphys;
	srat_physaddr = acpi_find_table(ACPI_SIG_SRAT);
	if (srat_physaddr == 0)
		return (-1);

	/*
	 * Allocate data structure:
	 *
	 * Find the last physical memory region and steal some memory from
	 * it. This is done because at this point in the boot process
	 * malloc is still not usable.
	 */
	for (idx = 0; phys_avail[idx + 1] != 0; idx += 2);
	KASSERT(idx != 0, ("phys_avail is empty!"));
	idx -= 2;

	size =  sizeof(*cpus) * max_cpus;
	addr = trunc_page(phys_avail[idx + 1] - size);
	KASSERT(addr >= phys_avail[idx],
	    ("Not enough memory for SRAT table items"));
	phys_avail[idx + 1] = addr - 1;

	/*
	 * We cannot rely on PHYS_TO_DMAP because this code is also used in
	 * i386, so use pmap_mapbios to map the memory, this will end up using
	 * the default memory attribute (WB), and the DMAP when available.
	 */
	cpus = (struct cpu_info *)pmap_mapbios(addr, size);
	bzero(cpus, size);
	return (0);
}
Ejemplo n.º 19
0
int
armv7_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int flag, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	pt_entry_t *pte;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* Some IO registers (ex. UART ports for console)
		   are mapped to fixed address by board specific
		   routine. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
			/* XXX: pmap_kenter_pa() also does PTE_SYNC(). a bit of
			 *      waste.
			 */
		}
	}
	pmap_update(pmap_kernel());

	return(0);
}
Ejemplo n.º 20
0
/*
 * kgdb_acc:
 *
 *	Determine if the mapping at va..(va+len) is valid.
 */
int
kgdb_acc(vaddr_t va, size_t len)
{
	vaddr_t last_va;
	pt_entry_t *pte;

	va = trunc_page(va);
	last_va = round_page(va + len);

	do  {
		if (va < VM_MIN_KERNEL_ADDRESS)
			return (0);
		pte = pmap_l3pte(pmap_kernel(), va, NULL);
		if (pte == NULL || pmap_pte_v(pte) == 0)
			return (0);
		va += PAGE_SIZE;
	} while (va < last_va);

	return (1);
}
Ejemplo n.º 21
0
/*
 * Allocate and map memory for devices that may need to be mapped before
 * Mach VM is running.
 *
 * This maps the all pages containing [PHYS_ADDR:PHYS_ADDR + SIZE].
 * For contiguous requests to those pages will reuse the previously
 * established mapping.
 *
 * Warning: this leaks memory maps for now, do not use it yet for something
 * else than Mach shutdown.
 */
vm_offset_t
io_map_cached(
	vm_offset_t	phys_addr,
	vm_size_t	size)
{
  static vm_offset_t base;
  static vm_size_t length;
  static vm_offset_t map;

  if (! map
      || (phys_addr < base)
      || (base + length < phys_addr + size))
    {
      base = trunc_page(phys_addr);
      length = round_page(phys_addr - base + size);
      map = io_map(base, length);
    }

  return map + (phys_addr - base);
}
Ejemplo n.º 22
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;


#ifdef PMAP_DEBUG
	if (pmap_debug_level > 0)
		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
		    (u_int)bp->b_data, (u_int)len);
#endif	/* PMAP_DEBUG */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, atop(faddr) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	bp->b_data = (void *)(taddr + off);

	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_enter(pmap_kernel(), taddr, fpa,
			VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return 0;
}
Ejemplo n.º 23
0
int
ifpga_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable, bus_space_handle_t *bshp)
{
    bus_addr_t startpa, endpa;
    vaddr_t va;
    const struct pmap_devmap *pd;
    bus_addr_t pa = bpa + (bus_addr_t) t;

    if ((pd = pmap_devmap_find_pa(pa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (pa - pd->pd_pa);
        return 0;
    }

    /* Round the allocation to page boundries */
    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    /* Get some VM.  */
    va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
                      UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    if (va == 0)
        return ENOMEM;

    /* Store the bus space handle */
    *bshp = va + (bpa & PGOFSET);

    /* Now map the pages */
    /* The cookie is the physical base address for the I/O area */
    while (startpa < endpa) {
        /* XXX pmap_kenter_pa maps pages cacheable -- not what
           we want.  */
        pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa,
                   VM_PROT_READ | VM_PROT_WRITE, 0);
        va += PAGE_SIZE;
        startpa += PAGE_SIZE;
    }
    pmap_update(pmap_kernel());

    return 0;
}
Ejemplo n.º 24
0
static int
imx_pcic_mem_map(pcmcia_chipset_handle_t pch, int kind, bus_addr_t card_addr,
    bus_size_t size, struct pcmcia_mem_handle *pmh, bus_size_t *offsetp,
    int *windowp)
{
	struct imx_pcic_socket *so = (struct imx_pcic_socket *)pch;
	int error;
	bus_addr_t pa;
 
printf("%s: card_addr %lx\n", __func__, card_addr);
	pa = trunc_page(card_addr);
	*offsetp = card_addr - pa;
printf("%s: offset %lx\n", __func__, *offsetp);
	size = round_page(card_addr + size) - pa;
	pmh->realsize = size;

	pa += IMX_PCIC_SOCKET_BASE;
	pa += IMX_PCIC_SOCKET_OFFSET * so->socket;
printf("%s: pa %lx\n", __func__, pa);
printf("%s: kind %x\n", __func__, kind);

	switch (kind & ~PCMCIA_WIDTH_MEM_MASK) {
	case PCMCIA_MEM_ATTR:   
		pa += IMX_PCIC_ATTR_OFFSET;
		break;
	case PCMCIA_MEM_COMMON:
		pa += IMX_PCIC_COMMON_OFFSET;
		break;
	default:
		panic("imx_pcic_mem_map: bogus kind");
	}

printf("%s: pa %lx\n", __func__, pa);
Debugger();
	error = bus_space_map(so->sc->sc_iot, pa, size, 0, &pmh->memh);
	if (error)
		return error;

	*windowp = (int)pmh->memh;
	return 0;
}
Ejemplo n.º 25
0
/*
 * Re-allocate an allocation that was originally guarded.
 */
void *
memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
    int flags)
{
	void *newaddr;
	u_long old_size;

	/*
	 * Allocate the new block.  Force the allocation to be guarded
	 * as the original may have been guarded through random
	 * chance, and that should be preserved.
	 */
	if ((newaddr = memguard_alloc(size, flags)) == NULL)
		return (NULL);

	/* Copy over original contents. */
	old_size = *v2sizep(trunc_page((uintptr_t)addr));
	bcopy(addr, newaddr, min(size, old_size));
	memguard_free(addr);
	return (newaddr);
}
Ejemplo n.º 26
0
void
mbus_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
{
	u_long sva, eva;

	sva = trunc_page(bsh);
	eva = round_page(bsh + size);

#ifdef DIAGNOSTIC
	if (eva <= sva)
		panic("bus_space_unmap: overflow");
#endif

	pmap_kremove(sva, eva - sva);

	if (extent_free(hppa_ex, bsh, size, EX_NOWAIT)) {
		printf("bus_space_unmap: ps 0x%lx, size 0x%lx\n",
		    bsh, size);
		printf("bus_space_unmap: can't free region\n");
	}
}
Ejemplo n.º 27
0
void
net_filter_free(filter_fct_t fp, unsigned int len)
{
    struct exec_page_header *p, **linkp;
    int offset, blockno, nblocks, blockmask;

    nblocks = (len + BLOCK_SIZE - 1) / BLOCK_SIZE;
    p = (struct exec_page_header *) trunc_page(fp);
    offset = (vm_address_t) fp - (vm_address_t) (p + 1);
    assert(offset % BLOCK_SIZE == 0);
    blockno = offset / BLOCK_SIZE;
    blockmask = ((1 << nblocks) - 1) << blockno;
    assert((p->usedmap & blockmask) == blockmask);
    p->usedmap &= ~blockmask;
    if (p->usedmap == 0) {
	for (linkp = &exec_pages; (*linkp) != p; linkp = &(*linkp)->next)
	    assert(*linkp != NULL);
	*linkp = p->next;
	kmem_free(kernel_map, (vm_offset_t) p, PAGE_SIZE);
    }
}
Ejemplo n.º 28
0
/*
 * Unmap IO request from the kernel virtual address space.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *pmap;
	vaddr_t kva;
	vsize_t off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
#endif
	kva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - kva;
	len = round_page(off + len);
	pmap = vm_map_pmap(phys_map);
	pmap_remove(pmap, kva, kva + len);
	pmap_update(pmap);
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Ejemplo n.º 29
0
static int
at91_bs_map(bus_space_tag_t tag, bus_addr_t bpa, bus_size_t size, int flags,
            bus_space_handle_t *bshp)
{
    vm_paddr_t pa, endpa;

    pa = trunc_page(bpa);
    if (pa >= AT91_PA_BASE + 0xff00000) {
        *bshp = bpa - AT91_PA_BASE + AT91_BASE;
        return (0);
    }
    if (pa >= AT91_BASE + 0xff00000) {
        *bshp = bpa;
        return (0);
    }
    endpa = round_page(bpa + size);

    *bshp = (vm_offset_t)pmap_mapdev(pa, endpa - pa) + (bpa - pa);

    return (0);
}
Ejemplo n.º 30
0
/*
 * Map a range of user addresses into the kernel.
 */
vaddr_t
vmaprange(struct proc *p, vaddr_t uaddr, vsize_t len, int prot)
{
	vaddr_t faddr, taddr, kaddr;
	vsize_t off;
	paddr_t pa;

	faddr = trunc_page(uaddr);
	off = uaddr - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	kaddr = taddr + off;
	for (; len > 0; len -= PAGE_SIZE) {
		(void) pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
		    faddr, &pa);
		pmap_kenter_pa(taddr, pa, prot, 0);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
	}
	return (kaddr);
}