/* * Functions to allocate node id's starting from 1. Based on vmem routines. * The vmem arena is extended in NM_INOQUANT chunks. */ uint64_t namenodeno_alloc(void) { uint64_t nno; mutex_enter(&nm_inolock); nno = (uint64_t)(uintptr_t) vmem_alloc(nm_inoarena, 1, VM_NOSLEEP + VM_FIRSTFIT); if (nno == 0) { (void) vmem_add(nm_inoarena, (void *)(vmem_size(nm_inoarena, VMEM_ALLOC | VMEM_FREE) + 1), NM_INOQUANT, VM_SLEEP); nno = (uint64_t)(uintptr_t) vmem_alloc(nm_inoarena, 1, VM_SLEEP + VM_FIRSTFIT); ASSERT(nno != 0); } mutex_exit(&nm_inolock); ASSERT32(nno <= ULONG_MAX); return (nno); }
vmem_t * vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) { int i; KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT(quantum > 0); #if defined(_KERNEL) /* XXX: SMP, we get called early... */ if (!vmem_bootstrapped) { vmem_bootstrap(); } #endif /* defined(_KERNEL) */ if (vm == NULL) { vm = xmalloc(sizeof(*vm), flags); } if (vm == NULL) { return NULL; } VMEM_CONDVAR_INIT(vm, "vmem"); VMEM_LOCK_INIT(vm, ipl); vm->vm_flags = flags; vm->vm_nfreetags = 0; LIST_INIT(&vm->vm_freetags); strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); vm->vm_quantum_mask = quantum - 1; vm->vm_quantum_shift = SIZE2ORDER(quantum); KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); vm->vm_importfn = importfn; vm->vm_releasefn = releasefn; vm->vm_arg = arg; vm->vm_nbusytag = 0; vm->vm_size = 0; vm->vm_inuse = 0; #if defined(QCACHE) qc_init(vm, qcache_max, ipl); #endif /* defined(QCACHE) */ TAILQ_INIT(&vm->vm_seglist); for (i = 0; i < VMEM_MAXORDER; i++) { LIST_INIT(&vm->vm_freelist[i]); } memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist)); vm->vm_hashsize = 1; vm->vm_hashlist = &vm->vm_hash0; if (size != 0) { if (vmem_add(vm, base, size, flags) != 0) { vmem_destroy1(vm); return NULL; } } #if defined(_KERNEL) if (flags & VM_BOOTSTRAP) { bt_refill(vm, VM_NOSLEEP); } mutex_enter(&vmem_list_lock); LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); mutex_exit(&vmem_list_lock); #endif /* defined(_KERNEL) */ return vm; }
void id_space_extend(id_space_t *isp, id_t low, id_t high) { (void) vmem_add(isp, (void *)(uintptr_t)(low + 1), high - low, VM_SLEEP); }