コード例 #1
0
ファイル: busypage.c プロジェクト: 2asoft/freebsd
void
rumptest_busypage()
{
	struct lwp *newl;
	int rv;

	cv_init(&tcv, "napina");

	uobj = uao_create(1, 0);
	mutex_enter(uobj->vmobjlock);
	testpg = uvm_pagealloc(uobj, 0, NULL, 0);
	mutex_exit(uobj->vmobjlock);
	if (testpg == NULL)
		panic("couldn't create vm page");

	rv = kthread_create(PRI_NONE, KTHREAD_MUSTJOIN | KTHREAD_MPSAFE, NULL,
	    thread, NULL, &newl, "jointest");
	if (rv)
		panic("thread creation failed: %d", rv);

	mutex_enter(uobj->vmobjlock);
	while (!threadrun)
		cv_wait(&tcv, uobj->vmobjlock);

	uvm_page_unbusy(&testpg, 1);
	mutex_exit(uobj->vmobjlock);

	rv = kthread_join(newl);
	if (rv)
		panic("thread join failed: %d", rv);

}
コード例 #2
0
ファイル: uvm_km.c プロジェクト: repos-holder/openbsd-patches
void
uvm_km_init(vaddr_t start, vaddr_t end)
{
	vaddr_t base = VM_MIN_KERNEL_ADDRESS;

	/*
	 * next, init kernel memory objects.
	 */

	/* kernel_object: for pageable anonymous kernel memory */
	uao_init();
	uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);

	/*
	 * init the map and reserve already allocated kernel space 
	 * before installing.
	 */

	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
	kernel_map_store.pmap = pmap_kernel();
	if (base != start && uvm_map(&kernel_map_store, &base, start - base,
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
	    UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
		panic("uvm_km_init: could not reserve space for kernel");
	
	/*
	 * install!
	 */

	kernel_map = &kernel_map_store;
}
コード例 #3
0
int
exec_sigcode_map(struct proc *p, struct emul *e)
{
	vsize_t sz;

	sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;

	/*
	 * If we don't have a sigobject for this emulation, create one.
	 *
	 * sigobject is an anonymous memory object (just like SYSV shared
	 * memory) that we keep a permanent reference to and that we map
	 * in all processes that need this sigcode. The creation is simple,
	 * we create an object, add a permanent reference to it, map it in
	 * kernel space, copy out the sigcode to it and unmap it.
	 * Then we map it with PROT_READ|PROT_EXEC into the process just
	 * the way sys_mmap would map it.
	 */
	if (e->e_sigobject == NULL) {
		vaddr_t va;
		int r;

		e->e_sigobject = uao_create(sz, 0);
		uao_reference(e->e_sigobject);	/* permanent reference */

		va = vm_map_min(kernel_map);	/* hint */
		if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
		    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
		    UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
			uao_detach(e->e_sigobject);
			return (ENOMEM);
		}
		memcpy((void *)va, e->e_sigcode, sz);
		uvm_unmap(kernel_map, va, va + round_page(sz));
	}

	/* Just a hint to uvm_mmap where to put it. */
	p->p_sigcode = uvm_map_hint(p, VM_PROT_READ|VM_PROT_EXECUTE);
	uao_reference(e->e_sigobject);
	if (uvm_map(&p->p_vmspace->vm_map, &p->p_sigcode, round_page(sz),
	    e->e_sigobject, 0, 0, UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX,
	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) {
		uao_detach(e->e_sigobject);
		return (ENOMEM);
	}

	return (0);
}
コード例 #4
0
ファイル: kern_exec.c プロジェクト: orumin/openbsd-efivars
int
exec_sigcode_map(struct process *pr, struct emul *e)
{
	vsize_t sz;

	sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;

	/*
	 * If we don't have a sigobject for this emulation, create one.
	 *
	 * sigobject is an anonymous memory object (just like SYSV shared
	 * memory) that we keep a permanent reference to and that we map
	 * in all processes that need this sigcode. The creation is simple,
	 * we create an object, add a permanent reference to it, map it in
	 * kernel space, copy out the sigcode to it and unmap it.
	 * Then we map it with PROT_READ|PROT_EXEC into the process just
	 * the way sys_mmap would map it.
	 */
	if (e->e_sigobject == NULL) {
		vaddr_t va;
		int r;

		e->e_sigobject = uao_create(sz, 0);
		uao_reference(e->e_sigobject);	/* permanent reference */

		if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
		    MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
			uao_detach(e->e_sigobject);
			return (ENOMEM);
		}
		memcpy((void *)va, e->e_sigcode, sz);
		uvm_unmap(kernel_map, va, va + round_page(sz));
	}

	pr->ps_sigcode = 0; /* no hint */
	uao_reference(e->e_sigobject);
	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
	    e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC,
	    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
	    MADV_RANDOM, UVM_FLAG_COPYONW))) {
		uao_detach(e->e_sigobject);
		return (ENOMEM);
	}

	return (0);
}
コード例 #5
0
/**
 * Initialize an already allocated GEM object of the specified size with
 * shmfs backing store.
 */
int drm_gem_object_init(struct drm_device *dev,
			struct drm_gem_object *obj, size_t size)
{
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);

	obj->dev = dev;
#ifdef __NetBSD__
	obj->gemo_shm_uao = uao_create(size, 0);
	KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
	KASSERT(dev->driver->gem_uvm_ops != NULL);
	uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
#else
	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
	if (IS_ERR(obj->filp))
		return PTR_ERR(obj->filp);
#endif

	kref_init(&obj->refcount);
	atomic_set(&obj->handle_count, 0);
	obj->size = size;

	return 0;
}
コード例 #6
0
ファイル: uvm_init.c プロジェクト: dzavalishin/oskit
void
uvm_init()
{
	vaddr_t kvm_start, kvm_end;

	/*
	 * step 0: ensure that the hardware set the page size
	 */

	if (uvmexp.pagesize == 0) {
		panic("uvm_init: page size not set");
	}

	/*
	 * step 1: zero the uvm structure
	 */

	memset(&uvm, 0, sizeof(uvm));
#ifndef OSKIT
	averunnable.fscale = FSCALE;
#endif

	/*
	 * step 2: init the page sub-system.  this includes allocating the
	 * vm_page structures, and setting up all the page queues (and
	 * locks).  available memory will be put in the "free" queue.
	 * kvm_start and kvm_end will be set to the area of kernel virtual
	 * memory which is available for general use.
	 */

	uvm_page_init(&kvm_start, &kvm_end);

	/*
	 * step 3: init the map sub-system.  allocates the static pool of
	 * vm_map_entry structures that are used for "special" kernel maps
	 * (e.g. kernel_map, kmem_map, etc...).
	 */

	uvm_map_init();

	/*
	 * step 4: setup the kernel's virtual memory data structures.  this
	 * includes setting up the kernel_map/kernel_object and the kmem_map/
	 * kmem_object.
	 */
	
	uvm_km_init(kvm_start, kvm_end);

	/*
	 * step 5: init the pmap module.   the pmap module is free to allocate
	 * memory for its private use (e.g. pvlists).
	 */

	pmap_init();

	/*
	 * step 6: init the kernel memory allocator.   after this call the
	 * kernel memory allocator (malloc) can be used.
	 */

	kmeminit();

	/*
	 * step 7: init all pagers and the pager_map.
	 */
	uvm_pager_init();

	/*
	 * step 8: init anonymous memory systems (both amap and anons)
	 */

	amap_init();		/* init amap module */
	uvm_anon_init();	/* allocate initial anons */

	/*
	 * the VM system is now up!  now that malloc is up we can resize the
	 * <obj,off> => <page> hash table for general use and enable paging
	 * of kernel objects.
	 */

	uvm_page_rehash();
	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
	    UAO_FLAG_KERNSWAP);

	/*
	 * done!
	 */

	return;
}
コード例 #7
0
void
uvm_init()
{
	vaddr_t kvm_start, kvm_end;

	/*
	 * step 0: ensure that the hardware set the page size
	 */

	if (uvmexp.pagesize == 0) {
		panic("uvm_init: page size not set");
	}

	/*
	 * step 1: zero the uvm structure
	 */

	memset(&uvm, 0, sizeof(uvm));
	averunnable.fscale = FSCALE;

	/*
	 * step 2: init the page sub-system.  this includes allocating the
	 * vm_page structures, and setting up all the page queues (and
	 * locks).  available memory will be put in the "free" queue.
	 * kvm_start and kvm_end will be set to the area of kernel virtual
	 * memory which is available for general use.
	 */

	uvm_page_init(&kvm_start, &kvm_end);

	/*
	 * step 3: init the map sub-system.  allocates the static pool of
	 * vm_map_entry structures that are used for "special" kernel maps
	 * (e.g. kernel_map, kmem_map, etc...).
	 */

	uvm_map_init();

	/*
	 * step 4: setup the kernel's virtual memory data structures.  this
	 * includes setting up the kernel_map/kernel_object and the kmem_map/
	 * kmem_object.
	 */

	uvm_km_init(kvm_start, kvm_end);

	/*
	 * step 5: init the pmap module.   the pmap module is free to allocate
	 * memory for its private use (e.g. pvlists).
	 */

	pmap_init();

	/*
	 * step 6: init the kernel memory allocator.   after this call the
	 * kernel memory allocator (malloc) can be used.
	 */

	uvm_km_page_init();
	kmeminit();
#if !defined(__HAVE_PMAP_DIRECT)
	kthread_create_deferred(uvm_km_createthread, NULL);
#endif

	/*
	 * step 7: init all pagers and the pager_map.
	 */

	uvm_pager_init();

	/*
	 * step 8: init anonymous memory system
	 */

	amap_init();		/* init amap module */

	/*
	 * the VM system is now up!  now that malloc is up we can resize the
	 * <obj,off> => <page> hash table for general use and enable paging
	 * of kernel objects.
	 */

	uvm_page_rehash();
	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
	    UAO_FLAG_KERNSWAP);

	/*
	 * reserve some unmapped space for malloc/pool use after free usage
	 */
#ifdef DEADBEEF0
	kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE;
	if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE,
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
	    UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)))
		panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF0);
#endif
#ifdef DEADBEEF1
	kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE;
	if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE,
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
	    UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)))
		panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF1);
#endif
	/*
	 * init anonymous memory systems
	 */
	uvm_anon_init();
}
コード例 #8
0
void
uvm_init(void)
{
	vaddr_t kvm_start, kvm_end;

	/*
	 * step 0: ensure that the hardware set the page size
	 */

	if (uvmexp.pagesize == 0) {
		panic("uvm_init: page size not set");
	}

	/*
	 * step 1: zero the uvm structure
	 */

	memset(&uvm, 0, sizeof(uvm));
	averunnable.fscale = FSCALE;
	uvm_amap_init();

	/*
	 * step 2: init the page sub-system.  this includes allocating the
	 * vm_page structures, and setting up all the page queues (and
	 * locks).  available memory will be put in the "free" queue.
	 * kvm_start and kvm_end will be set to the area of kernel virtual
	 * memory which is available for general use.
	 */

	uvm_page_init(&kvm_start, &kvm_end);

	/*
	 * step 3: init the map sub-system.  allocates the static pool of
	 * vm_map_entry structures that are used for "special" kernel maps
	 * (e.g. kernel_map, kmem_map, etc...).
	 */

	uvm_map_init();

	/*
	 * step 4: setup the kernel's virtual memory data structures.  this
	 * includes setting up the kernel_map/kernel_object.
	 */

	uvm_km_init(kvm_start, kvm_end);

	/*
	 * step 5: init the pmap module.   the pmap module is free to allocate
	 * memory for its private use (e.g. pvlists).
	 */

	pmap_init();

	/*
	 * step 6: init the kernel memory allocator.   after this call the
	 * kernel memory allocator (malloc) can be used. this includes
	 * setting up the kmem_map.
	 */

	kmeminit();

#ifdef DEBUG
	debug_init();
#endif

	/*
	 * step 7: init all pagers and the pager_map.
	 */

	uvm_pager_init();

	/*
	 * step 8: init the uvm_loan() facility.
	 */

	uvm_loan_init();

	/*
	 * Initialize pools.  This must be done before anyone manipulates
	 * any vm_maps because we use a pool for some map entry structures.
	 */

	pool_subsystem_init();

	/*
	 * init slab memory allocator kmem(9).
	 */

	kmem_init();

	/*
	 * the VM system is now up!  now that kmem is up we can resize the
	 * <obj,off> => <page> hash table for general use and enable paging
	 * of kernel objects.
	 */

	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
	    UAO_FLAG_KERNSWAP);

	uvmpdpol_reinit();

	/*
	 * init anonymous memory systems
	 */

	uvm_anon_init();

	uvm_uarea_init();

	/*
	 * init readahead module
	 */

	uvm_ra_init();
}
コード例 #9
0
ファイル: sysv_shm.c プロジェクト: appleorange1/bitrig
int
shmget_allocate_segment(struct proc *p,
	struct sys_shmget_args /* {
		syscallarg(key_t) key;
		syscallarg(size_t) size;
		syscallarg(int) shmflg;
	} */ *uap,
	int mode, register_t *retval)
{
	size_t size;
	key_t key;
	int segnum;
	struct ucred *cred = p->p_ucred;
	struct shmid_ds *shmseg;
	struct shm_handle *shm_handle;
	int error = 0;
	
	if (SCARG(uap, size) < shminfo.shmmin ||
	    SCARG(uap, size) > shminfo.shmmax)
		return (EINVAL);
	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
		return (ENOSPC);
	size = round_page(SCARG(uap, size));
	if (shm_committed + atop(size) > shminfo.shmall)
		return (ENOMEM);
	shm_nused++;
	shm_committed += atop(size);

	/*
	 * If a key has been specified and we had to wait for memory
	 * to be freed up we need to verify that no one has allocated
	 * the key we want in the meantime.  Yes, this is ugly.
	 */
	key = SCARG(uap, key);
	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
	    PR_NOWAIT);
	if (shmseg == NULL) {
		shmseg = pool_get(&shm_pool, PR_WAITOK);
		if (shm_find_segment_by_key(key) != -1) {
			pool_put(&shm_pool, shmseg);
			shm_nused--;
			shm_committed -= atop(size);
			return (EAGAIN);
		}
	}

	/* XXX - hash shmids instead */
	if (shm_last_free < 0) {
		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
		    segnum++)
			;
		if (segnum == shminfo.shmmni)
			panic("shmseg free count inconsistent");
	} else {
		segnum = shm_last_free;
		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
			shm_last_free = -1;
	}
	shmsegs[segnum] = shmseg;

	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
	shm_handle->shm_object = uao_create(size, 0);

	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
	shmseg->shm_perm.key = key;
	shmseg->shm_segsz = SCARG(uap, size);
	shmseg->shm_cpid = p->p_p->ps_pid;
	shmseg->shm_lpid = shmseg->shm_nattch = 0;
	shmseg->shm_atime = shmseg->shm_dtime = 0;
	shmseg->shm_ctime = time_second;
	shmseg->shm_internal = shm_handle;

	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
	return (error);
}
コード例 #10
0
ファイル: uvm_init.c プロジェクト: sofuture/bitrig
void
uvm_init(void)
{
	vaddr_t kvm_start, kvm_end;

	/*
	 * step 0: ensure that the hardware set the page size
	 */

	if (uvmexp.pagesize == 0) {
		panic("uvm_init: page size not set");
	}

	/*
	 * step 1: set up stats.
	 */
	averunnable.fscale = FSCALE;

	/*
	 * step 2: init the page sub-system.  this includes allocating the
	 * vm_page structures, and setting up all the page queues (and
	 * locks).  available memory will be put in the "free" queue.
	 * kvm_start and kvm_end will be set to the area of kernel virtual
	 * memory which is available for general use.
	 */

	uvm_page_init(&kvm_start, &kvm_end);

	/*
	 * step 3: init the map sub-system.  allocates the static pool of
	 * vm_map_entry structures that are used for "special" kernel maps
	 * (e.g. kernel_map, kmem_map, etc...).
	 */

	uvm_map_init();

	/*
	 * step 4: setup the kernel's virtual memory data structures.  this
	 * includes setting up the kernel_map/kernel_object and the kmem_map/
	 * kmem_object.
	 */

	uvm_km_init(kvm_start, kvm_end);

	/*
	 * step 5: init the pmap module.   the pmap module is free to allocate
	 * memory for its private use (e.g. pvlists).
	 */

	pmap_init();

	/*
	 * step 6: init the kernel memory allocator.   after this call the
	 * kernel memory allocator (malloc) can be used.
	 */

	kmeminit();

	/*
	 * step 6.5: init the dma allocator, which is backed by pools.
	 */
	dma_alloc_init();

	/*
	 * step 7: init all pagers and the pager_map.
	 */

	uvm_pager_init();

	/*
	 * step 8: init anonymous memory system
	 */

	amap_init();		/* init amap module */

	/*
	 * step 9: init uvm_km_page allocator memory.
	 */
	uvm_km_page_init();

	/*
	 * the VM system is now up!  now that malloc is up we can
	 * enable paging of kernel objects.
	 */

	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
	    UAO_FLAG_KERNSWAP);

	/*
	 * reserve some unmapped space for malloc/pool use after free usage
	 */
#ifdef DEADBEEF0
	kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE;
	if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE,
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
	    UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)))
		panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF0);
#endif
#ifdef DEADBEEF1
	kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE;
	if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE,
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
	    UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)))
		panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1);
#endif
	/*
	 * init anonymous memory systems
	 */
	uvm_anon_init();

#ifndef SMALL_KERNEL
	/*
	 * Switch kernel and kmem_map over to a best-fit allocator,
	 * instead of walking the tree.
	 */
	uvm_map_set_uaddr(kernel_map, &kernel_map->uaddr_any[3],
	    uaddr_bestfit_create(vm_map_min(kernel_map),
	    vm_map_max(kernel_map)));
	uvm_map_set_uaddr(kmem_map, &kmem_map->uaddr_any[3],
	    uaddr_bestfit_create(vm_map_min(kmem_map),
	    vm_map_max(kmem_map)));
#endif /* !SMALL_KERNEL */
}
コード例 #11
0
void
uvm_init(void)
{
	vaddr_t kvm_start, kvm_end;

	/*
	 * Ensure that the hardware set the page size, zero the UVM structure.
	 */

	if (uvmexp.pagesize == 0) {
		panic("uvm_init: page size not set");
	}

	memset(&uvm, 0, sizeof(uvm));
	averunnable.fscale = FSCALE;

	/*
	 * Init the page sub-system.  This includes allocating the vm_page
	 * structures, and setting up all the page queues (and locks).
	 * Available memory will be put in the "free" queue, kvm_start and
	 * kvm_end will be set to the area of kernel virtual memory which
	 * is available for general use.
	 */

	uvm_page_init(&kvm_start, &kvm_end);

	/*
	 * Init the map sub-system.
	 */

	uvm_map_init();

	/*
	 * Setup the kernel's virtual memory data structures.  This includes
	 * setting up the kernel_map/kernel_object.
	 * Bootstrap all kernel memory allocators.
	 */

	uao_init();
	uvm_km_bootstrap(kvm_start, kvm_end);

	/*
	 * Setup uvm_map caches and init the amap.
	 */

	uvm_map_init_caches();
	uvm_amap_init();

	/*
	 * Init the pmap module.  The pmap module is free to allocate
	 * memory for its private use (e.g. pvlists).
	 */

	pmap_init();

	/*
	 * Make kernel memory allocators ready for use.
	 * After this call the pool/kmem memory allocators can be used.
	 */

	uvm_km_init();

#ifdef DEBUG
	debug_init();
#endif

	/*
	 * Init all pagers and the pager_map.
	 */

	uvm_pager_init();

	/*
	 * Initialize the uvm_loan() facility.
	 */

	uvm_loan_init();

	/*
	 * Init emap subsystem.
	 */

	uvm_emap_sysinit();

	/*
	 * The VM system is now up!  Now that kmem is up we can resize the
	 * <obj,off> => <page> hash table for general use and enable paging
	 * of kernel objects.
	 */

	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
	    UAO_FLAG_KERNSWAP);

	uvmpdpol_reinit();

	/*
	 * Init anonymous memory systems.
	 */

	uvm_anon_init();

	uvm_uarea_init();

	/*
	 * Init readahead mechanism.
	 */

	uvm_ra_init();
}