void vrfree(struct buf * region) { struct vregion * vreg; size_t iblock; const size_t bcount = VREG_PCOUNT(region->b_bufsize); mtx_spinlock(&(region->lock)); region->refcount--; if (region->refcount > 0) { mtx_unlock(&(region->lock)); return; } mtx_unlock(&(region->lock)); vreg = (struct vregion *)(region->allocator_data); iblock = VREG_PCOUNT(region->b_data - vreg->kaddr); bitmap_block_update(vreg->map, 0, iblock, bcount); vreg->count -= bcount; vmem_used -= region->b_bufsize; /* Update stats */ kfree(region); if (vreg->count <= 0 && last_vreg != vreg) { vreg_free_node(vreg); } }
int bitmap_block_alloc(size_t * start, size_t len, bitmap_t * bitmap, size_t size) { int retval; retval = bitmap_block_search(start, len, bitmap, size); if (retval == 0) { bitmap_block_update(bitmap, 1, *start, len); } return retval; }
static int get_iblocks(size_t * iblock, size_t pcount, struct vregion ** vreg_ret) { struct vregion * vreg = last_vreg; retry_vra: do { if (bitmap_block_search(iblock, pcount, vreg->map, vreg->size) == 0) break; /* Found block */ } while ((vreg = vreg->node.next)); if (!vreg) { /* Not found */ vreg = vreg_alloc_node(pcount); if (!vreg) return -ENOMEM; goto retry_vra; } bitmap_block_update(vreg->map, 1, *iblock, pcount); *vreg_ret = vreg; return 0; }
int bitmap_block_align_alloc(size_t * start, size_t len, bitmap_t * bitmap, size_t size, size_t balign) { size_t begin = 0; int retval; do { if (begin >= size * 8) { retval = -1; goto out; } retval = bitmap_block_search_s(begin, start, len, bitmap, size); if (retval != 0) goto out; begin = *start + (balign - (*start % balign)); } while (*start % balign); bitmap_block_update(bitmap, 1, *start, len); out: return retval; }
void allocbuf(struct buf * bp, size_t size) { const size_t orig_size = size; const size_t new_size = memalign_size(size, MMU_PGSIZE_COARSE); const size_t pcount = VREG_PCOUNT(new_size); const size_t blockdiff = pcount - VREG_PCOUNT(bp->b_bufsize); size_t iblock; size_t bcount = VREG_PCOUNT(bp->b_bufsize); struct vregion * vreg = bp->allocator_data; if (!vreg) panic("bp->allocator_data not set"); if (bp->b_bufsize == new_size) return; mtx_spinlock(&bp->lock); if (blockdiff > 0) { const size_t sblock = VREG_PCOUNT((bp->b_data - vreg->kaddr)) + bcount; if (bitmap_block_search_s(sblock, &iblock, blockdiff, vreg->map, vreg->size) == 0) { if (iblock == sblock + 1) { bitmap_block_update(vreg->map, 1, sblock, blockdiff); } else { /* Must allocate a new region */ struct vregion * nvreg; uintptr_t new_addr; if (get_iblocks(&iblock, pcount, &nvreg)) panic("OOM during allocbuf()"); new_addr = VREG_I2ADDR(nvreg, iblock); memcpy((void *)(new_addr), (void *)(bp->b_data), bp->b_bufsize); bp->b_mmu.paddr = new_addr; bp->b_data = bp->b_mmu.paddr; /* Currently this way as * kernel space is 1:1 */ bp->allocator_data = nvreg; /* Free blocks from old vreg */ bitmap_block_update(vreg->map, 0, iblock, bcount); vreg = nvreg; mtx_unlock(&bp->lock); } } } else { #if 0 /* We don't usually want to shrunk because it's hard to get memory back. */ const size_t sblock = VREG_PCOUNT((bp->b_data - vreg->kaddr)) + bcount + blockdiff; bitmap_block_update(vreg->map, 0, sblock, -blockdiff); #endif } vreg->count += blockdiff; bp->b_bufsize = new_size; bp->b_bcount = orig_size; bp->b_mmu.num_pages = pcount; mtx_unlock(&bp->lock); }