static bt_t * bt_alloc(vmem_t *vm, vm_flag_t flags) { bt_t *bt; VMEM_LOCK(vm); while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { VMEM_UNLOCK(vm); if (bt_refill(vm)) { if ((flags & VM_NOSLEEP) != 0) { return NULL; } /* * It would be nice to wait for something specific here * but there are multiple ways that a retry could * succeed and we can't wait for multiple things * simultaneously. So we'll just sleep for an arbitrary * short period of time and retry regardless. * This should be a very rare case. */ vmem_kick_pdaemon(); kpause("btalloc", false, 1, NULL); } VMEM_LOCK(vm); } bt = LIST_FIRST(&vm->vm_freetags); LIST_REMOVE(bt, bt_freelist); vm->vm_nfreetags--; VMEM_UNLOCK(vm); return bt; }
static int bt_refill(vmem_t *vm, vm_flag_t flags) { bt_t *bt; VMEM_LOCK(vm); if (vm->vm_nfreetags > BT_MINRESERVE) { VMEM_UNLOCK(vm); return 0; } mutex_enter(&vmem_btag_lock); while (!LIST_EMPTY(&vmem_btag_freelist) && vm->vm_nfreetags <= BT_MINRESERVE) { bt = LIST_FIRST(&vmem_btag_freelist); LIST_REMOVE(bt, bt_freelist); LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; vmem_btag_freelist_count--; VMEM_EVCNT_INCR(static_bt_inuse); } mutex_exit(&vmem_btag_lock); while (vm->vm_nfreetags <= BT_MINRESERVE) { VMEM_UNLOCK(vm); mutex_enter(&vmem_btag_refill_lock); bt = pool_get(&vmem_btag_pool, (flags & VM_SLEEP) ? PR_WAITOK: PR_NOWAIT); mutex_exit(&vmem_btag_refill_lock); VMEM_LOCK(vm); if (bt == NULL && (flags & VM_SLEEP) == 0) break; LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; } VMEM_UNLOCK(vm); if (vm->vm_nfreetags == 0) { return ENOMEM; } if (kmem_meta_arena != NULL) { bt_refill(kmem_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING); bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING); bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING); } return 0; }
static int bt_refill(vmem_t *vm) { bt_t *bt; VMEM_LOCK(vm); if (vm->vm_nfreetags > BT_MINRESERVE) { VMEM_UNLOCK(vm); return 0; } mutex_enter(&vmem_btag_lock); while (!LIST_EMPTY(&vmem_btag_freelist) && vm->vm_nfreetags <= BT_MINRESERVE) { bt = LIST_FIRST(&vmem_btag_freelist); LIST_REMOVE(bt, bt_freelist); LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; vmem_btag_freelist_count--; VMEM_EVCNT_INCR(static_bt_inuse); } mutex_exit(&vmem_btag_lock); while (vm->vm_nfreetags <= BT_MINRESERVE) { VMEM_UNLOCK(vm); mutex_enter(&vmem_btag_refill_lock); bt = pool_get(&vmem_btag_pool, PR_NOWAIT); mutex_exit(&vmem_btag_refill_lock); VMEM_LOCK(vm); if (bt == NULL) break; LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; } if (vm->vm_nfreetags <= BT_MINRESERVE) { VMEM_UNLOCK(vm); return ENOMEM; } VMEM_UNLOCK(vm); if (kmem_meta_arena != NULL) { (void)bt_refill(kmem_arena); (void)bt_refill(kmem_va_meta_arena); (void)bt_refill(kmem_meta_arena); } return 0; }
static int vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) { bt_t *bt; int i; struct vmem_hashlist *newhashlist; struct vmem_hashlist *oldhashlist; size_t oldhashsize; KASSERT(newhashsize > 0); newhashlist = xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); if (newhashlist == NULL) { return ENOMEM; } for (i = 0; i < newhashsize; i++) { LIST_INIT(&newhashlist[i]); } if (!VMEM_TRYLOCK(vm)) { xfree(newhashlist, sizeof(struct vmem_hashlist *) * newhashsize); return EBUSY; } oldhashlist = vm->vm_hashlist; oldhashsize = vm->vm_hashsize; vm->vm_hashlist = newhashlist; vm->vm_hashsize = newhashsize; if (oldhashlist == NULL) { VMEM_UNLOCK(vm); return 0; } for (i = 0; i < oldhashsize; i++) { while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { bt_rembusy(vm, bt); /* XXX */ bt_insbusy(vm, bt); } } VMEM_UNLOCK(vm); if (oldhashlist != &vm->vm_hash0) { xfree(oldhashlist, sizeof(struct vmem_hashlist *) * oldhashsize); } return 0; }
static void bt_freetrim(vmem_t *vm, int freelimit) { bt_t *t; LIST_HEAD(, vmem_btag) tofree; LIST_INIT(&tofree); VMEM_LOCK(vm); while (vm->vm_nfreetags > freelimit) { bt_t *bt = LIST_FIRST(&vm->vm_freetags); LIST_REMOVE(bt, bt_freelist); vm->vm_nfreetags--; if (bt >= static_bts && bt < static_bts + sizeof(static_bts)) { mutex_enter(&vmem_btag_lock); LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); vmem_btag_freelist_count++; mutex_exit(&vmem_btag_lock); VMEM_EVCNT_DECR(static_bt_inuse); } else { LIST_INSERT_HEAD(&tofree, bt, bt_freelist); } } VMEM_UNLOCK(vm); while (!LIST_EMPTY(&tofree)) { t = LIST_FIRST(&tofree); LIST_REMOVE(t, bt_freelist); pool_put(&vmem_btag_pool, t); } }
static void bt_free(vmem_t *vm, bt_t *bt) { VMEM_LOCK(vm); LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); vm->vm_nfreetags++; VMEM_UNLOCK(vm); }
static bt_t * bt_alloc(vmem_t *vm, vm_flag_t flags) { bt_t *bt; VMEM_LOCK(vm); while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { VMEM_UNLOCK(vm); if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) { return NULL; } VMEM_LOCK(vm); } bt = LIST_FIRST(&vm->vm_freetags); LIST_REMOVE(bt, bt_freelist); vm->vm_nfreetags--; VMEM_UNLOCK(vm); return bt; }
static int vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, int spanbttype) { bt_t *btspan; bt_t *btfree; KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT(spanbttype == BT_TYPE_SPAN || spanbttype == BT_TYPE_SPAN_STATIC); btspan = bt_alloc(vm, flags); if (btspan == NULL) { return ENOMEM; } btfree = bt_alloc(vm, flags); if (btfree == NULL) { bt_free(vm, btspan); return ENOMEM; } btspan->bt_type = spanbttype; btspan->bt_start = addr; btspan->bt_size = size; btfree->bt_type = BT_TYPE_FREE; btfree->bt_start = addr; btfree->bt_size = size; VMEM_LOCK(vm); bt_insseg_tail(vm, btspan); bt_insseg(vm, btfree, btspan); bt_insfree(vm, btfree); vm->vm_size += size; VMEM_UNLOCK(vm); return 0; }