static uintptr_t get_ksect_addr(size_t region_size) { uintptr_t retval; if (l_ksect_next.mtx_type == MTX_TYPE_UNDEF) mtx_init(&l_ksect_next, MTX_TYPE_SPIN, 0); mtx_lock(&l_ksect_next); if (region_size >= MMU_PGSIZE_SECTION) retval = memalign_size(ksect_next, MMU_PGSIZE_SECTION); else retval = memalign_size(ksect_next, MMU_PGSIZE_COARSE); if (retval > configKSECT_END) return 0; ksect_next = retval + region_size; mtx_unlock(&l_ksect_next); return retval; }
static uintptr_t get_ksect_addr(size_t region_size) { uintptr_t retval; if (l_ksect_next.mtx_tflags == MTX_TYPE_UNDEF) mtx_init(&l_ksect_next, MTX_TYPE_SPIN); mtx_spinlock(&l_ksect_next); if (region_size < MMU_PGSIZE_SECTION) retval = memalign_size(ksect_next, MMU_PGSIZE_SECTION); else retval = memalign_size(ksect_next, MMU_PGSIZE_COARSE); if (retval > MMU_VADDR_KSECT_END) return 0; ksect_next += retval; mtx_unlock(&l_ksect_next); return retval; }
struct buf * geteblk(size_t size) { size_t iblock; /* Block index of the allocation */ const size_t orig_size = size; size = memalign_size(size, MMU_PGSIZE_COARSE); const size_t pcount = VREG_PCOUNT(size); struct vregion * vreg; struct buf * retval = NULL; if (get_iblocks(&iblock, pcount, &vreg)) return NULL; retval = kcalloc(1, sizeof(struct buf)); if (!retval) return 0; /* Can't allocate vm_region struct */ mtx_init(&(retval->lock), MTX_TYPE_SPIN | MTX_TYPE_TICKET); /* Update target struct */ retval->b_mmu.paddr = VREG_I2ADDR(vreg, iblock); retval->b_mmu.num_pages = pcount; retval->b_data = retval->b_mmu.paddr; /* Currently this way as * kernel space is 1:1 */ retval->b_bufsize = VREG_BYTESIZE(pcount); retval->b_bcount = orig_size; retval->b_flags = B_BUSY; retval->refcount = 1; retval->allocator_data = vreg; retval->vm_ops = &vra_ops; retval->b_uflags = VM_PROT_READ | VM_PROT_WRITE; vm_updateusr_ap(retval); vreg->count += pcount; /* Update stats */ vmem_used += size; last_vreg = vreg; return retval; }
void allocbuf(struct buf * bp, size_t size) { const size_t orig_size = size; const size_t new_size = memalign_size(size, MMU_PGSIZE_COARSE); const size_t pcount = VREG_PCOUNT(new_size); const size_t blockdiff = pcount - VREG_PCOUNT(bp->b_bufsize); size_t iblock; size_t bcount = VREG_PCOUNT(bp->b_bufsize); struct vregion * vreg = bp->allocator_data; if (!vreg) panic("bp->allocator_data not set"); if (bp->b_bufsize == new_size) return; mtx_spinlock(&bp->lock); if (blockdiff > 0) { const size_t sblock = VREG_PCOUNT((bp->b_data - vreg->kaddr)) + bcount; if (bitmap_block_search_s(sblock, &iblock, blockdiff, vreg->map, vreg->size) == 0) { if (iblock == sblock + 1) { bitmap_block_update(vreg->map, 1, sblock, blockdiff); } else { /* Must allocate a new region */ struct vregion * nvreg; uintptr_t new_addr; if (get_iblocks(&iblock, pcount, &nvreg)) panic("OOM during allocbuf()"); new_addr = VREG_I2ADDR(nvreg, iblock); memcpy((void *)(new_addr), (void *)(bp->b_data), bp->b_bufsize); bp->b_mmu.paddr = new_addr; bp->b_data = bp->b_mmu.paddr; /* Currently this way as * kernel space is 1:1 */ bp->allocator_data = nvreg; /* Free blocks from old vreg */ bitmap_block_update(vreg->map, 0, iblock, bcount); vreg = nvreg; mtx_unlock(&bp->lock); } } } else { #if 0 /* We don't usually want to shrunk because it's hard to get memory back. */ const size_t sblock = VREG_PCOUNT((bp->b_data - vreg->kaddr)) + bcount + blockdiff; bitmap_block_update(vreg->map, 0, sblock, -blockdiff); #endif } vreg->count += blockdiff; bp->b_bufsize = new_size; bp->b_bcount = orig_size; bp->b_mmu.num_pages = pcount; mtx_unlock(&bp->lock); }