static int bt_refill(vmem_t *vm, vm_flag_t flags) { bt_t *bt; VMEM_LOCK(vm); if (vm->vm_nfreetags > BT_MINRESERVE) { VMEM_UNLOCK(vm); return 0; } mutex_enter(&vmem_btag_lock); while (!LIST_EMPTY(&vmem_btag_freelist) && vm->vm_nfreetags <= BT_MINRESERVE) { bt = LIST_FIRST(&vmem_btag_freelist); LIST_REMOVE(bt, bt_freelist); LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; vmem_btag_freelist_count--; VMEM_EVCNT_INCR(static_bt_inuse); } mutex_exit(&vmem_btag_lock); while (vm->vm_nfreetags <= BT_MINRESERVE) { VMEM_UNLOCK(vm); mutex_enter(&vmem_btag_refill_lock); bt = pool_get(&vmem_btag_pool, (flags & VM_SLEEP) ? PR_WAITOK: PR_NOWAIT); mutex_exit(&vmem_btag_refill_lock); VMEM_LOCK(vm); if (bt == NULL && (flags & VM_SLEEP) == 0) break; LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; } VMEM_UNLOCK(vm); if (vm->vm_nfreetags == 0) { return ENOMEM; } if (kmem_meta_arena != NULL) { bt_refill(kmem_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING); bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING); bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING); } return 0; }
static int bt_refill(vmem_t *vm) { bt_t *bt; VMEM_LOCK(vm); if (vm->vm_nfreetags > BT_MINRESERVE) { VMEM_UNLOCK(vm); return 0; } mutex_enter(&vmem_btag_lock); while (!LIST_EMPTY(&vmem_btag_freelist) && vm->vm_nfreetags <= BT_MINRESERVE) { bt = LIST_FIRST(&vmem_btag_freelist); LIST_REMOVE(bt, bt_freelist); LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; vmem_btag_freelist_count--; VMEM_EVCNT_INCR(static_bt_inuse); } mutex_exit(&vmem_btag_lock); while (vm->vm_nfreetags <= BT_MINRESERVE) { VMEM_UNLOCK(vm); mutex_enter(&vmem_btag_refill_lock); bt = pool_get(&vmem_btag_pool, PR_NOWAIT); mutex_exit(&vmem_btag_refill_lock); VMEM_LOCK(vm); if (bt == NULL) break; LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; } if (vm->vm_nfreetags <= BT_MINRESERVE) { VMEM_UNLOCK(vm); return ENOMEM; } VMEM_UNLOCK(vm); if (kmem_meta_arena != NULL) { (void)bt_refill(kmem_arena); (void)bt_refill(kmem_va_meta_arena); (void)bt_refill(kmem_meta_arena); } return 0; }
static bt_t * bt_alloc(vmem_t *vm, vm_flag_t flags) { bt_t *bt; VMEM_LOCK(vm); while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { VMEM_UNLOCK(vm); if (bt_refill(vm)) { if ((flags & VM_NOSLEEP) != 0) { return NULL; } /* * It would be nice to wait for something specific here * but there are multiple ways that a retry could * succeed and we can't wait for multiple things * simultaneously. So we'll just sleep for an arbitrary * short period of time and retry regardless. * This should be a very rare case. */ vmem_kick_pdaemon(); kpause("btalloc", false, 1, NULL); } VMEM_LOCK(vm); } bt = LIST_FIRST(&vm->vm_freetags); LIST_REMOVE(bt, bt_freelist); vm->vm_nfreetags--; VMEM_UNLOCK(vm); return bt; }
static bt_t * bt_alloc(vmem_t *vm, vm_flag_t flags) { bt_t *bt; VMEM_LOCK(vm); while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { VMEM_UNLOCK(vm); if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) { return NULL; } VMEM_LOCK(vm); } bt = LIST_FIRST(&vm->vm_freetags); LIST_REMOVE(bt, bt_freelist); vm->vm_nfreetags--; VMEM_UNLOCK(vm); return bt; }
vmem_t * vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) { int i; KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT(quantum > 0); #if defined(_KERNEL) /* XXX: SMP, we get called early... */ if (!vmem_bootstrapped) { vmem_bootstrap(); } #endif /* defined(_KERNEL) */ if (vm == NULL) { vm = xmalloc(sizeof(*vm), flags); } if (vm == NULL) { return NULL; } VMEM_CONDVAR_INIT(vm, "vmem"); VMEM_LOCK_INIT(vm, ipl); vm->vm_flags = flags; vm->vm_nfreetags = 0; LIST_INIT(&vm->vm_freetags); strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); vm->vm_quantum_mask = quantum - 1; vm->vm_quantum_shift = SIZE2ORDER(quantum); KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); vm->vm_importfn = importfn; vm->vm_releasefn = releasefn; vm->vm_arg = arg; vm->vm_nbusytag = 0; vm->vm_size = 0; vm->vm_inuse = 0; #if defined(QCACHE) qc_init(vm, qcache_max, ipl); #endif /* defined(QCACHE) */ TAILQ_INIT(&vm->vm_seglist); for (i = 0; i < VMEM_MAXORDER; i++) { LIST_INIT(&vm->vm_freelist[i]); } memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist)); vm->vm_hashsize = 1; vm->vm_hashlist = &vm->vm_hash0; if (size != 0) { if (vmem_add(vm, base, size, flags) != 0) { vmem_destroy1(vm); return NULL; } } #if defined(_KERNEL) if (flags & VM_BOOTSTRAP) { bt_refill(vm, VM_NOSLEEP); } mutex_enter(&vmem_list_lock); LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); mutex_exit(&vmem_list_lock); #endif /* defined(_KERNEL) */ return vm; }