void brel(void *buf) { struct bfhead *b, *bn; b = BFH(getPointerOffset(buf, -(int32_t)sizeof(struct bhead))); #ifdef BufStats numrel++; /* Increment number of brel() calls */ #endif assert(buf != NULL); #ifdef BECtl if (b->bh.bsize == 0) { /* Directly-acquired buffer? */ struct bdhead *bdh; bdh = BDH(getPointerOffset(buf, -(int32_t)sizeof(struct bdhead))); assert(b->bh.prevfree == 0); #ifdef BufStats totalloc -= bdh->tsize; assert(totalloc >= 0); numdrel++; /* Number of direct releases */ #endif /* BufStats */ #ifdef FreeWipe V memset((int8_t *) buf, 0x55, (MemSize) (bdh->tsize - (int32_t)sizeof(struct bdhead))); #endif /* FreeWipe */ assert(relfcn != NULL); (*relfcn)((void *) bdh); /* Release it directly. */ } else { #endif /* BECtl */ /* Buffer size must be negative, indicating that the buffer is allocated. */ if (b->bh.bsize >= 0) { bn = NULL; } assert(b->bh.bsize < 0); /* Back pointer in next buffer must be zero, indicating the same thing: */ assert(BH((int8_t *) b - b->bh.bsize)->prevfree == 0); #ifdef BufStats totalloc += b->bh.bsize; assert(totalloc >= 0); #endif /* If the back link is nonzero, the previous buffer is free. */ if (b->bh.prevfree != 0) { /* The previous buffer is free. Consolidate this buffer with it by adding the length of this buffer to the previous free buffer. Note that we subtract the size in the buffer being released, since it's negative to indicate that the buffer is allocated. */ register bufsize size = b->bh.bsize; /* Make the previous buffer the one we're working on. */ assert(BH(b - b->bh.prevfree)->bsize == b->bh.prevfree); b = BFH(getPointerOffset(b, -b->bh.prevfree)); b->bh.bsize -= size; } else { /* The previous buffer isn't allocated. Insert this buffer on the free list as an isolated free block. */ assert(freelist.ql.blink->ql.flink == &freelist); assert(freelist.ql.flink->ql.blink == &freelist); b->ql.flink = &freelist; b->ql.blink = freelist.ql.blink; freelist.ql.blink = b; b->ql.blink->ql.flink = b; b->bh.bsize = -b->bh.bsize; } /* Now we look at the next buffer in memory, located by advancing from the start of this buffer by its size, to see if that buffer is free. If it is, we combine this buffer with the next one in memory, dechaining the second buffer from the free list. */ bn = BFH(getPointerOffset(b, b->bh.bsize)); if (bn->bh.bsize > 0) { /* The buffer is free. Remove it from the free list and add its size to that of our buffer. */ assert(BH(bn + bn->bh.bsize)->prevfree == bn->bh.bsize); assert(bn->ql.blink->ql.flink == bn); assert(bn->ql.flink->ql.blink == bn); bn->ql.blink->ql.flink = bn->ql.flink; bn->ql.flink->ql.blink = bn->ql.blink; b->bh.bsize += bn->bh.bsize; /* Finally, advance to the buffer that follows the newly consolidated free block. We must set its backpointer to the head of the consolidated free block. We know the next block must be an allocated block because the process of recombination guarantees that two free blocks will never be contiguous in memory. */ bn = BFH(getPointerOffset(b, b->bh.bsize)); } #ifdef FreeWipe V memset(getPointerOffset(b, sizeof(struct bfhead)), 0x55, (MemSize) (b->bh.bsize - (int32_t)sizeof(struct bfhead))); #endif assert(bn->bh.bsize < 0); /* The next buffer is allocated. Set the backpointer in it to point to this buffer; the previous free buffer in memory. */ bn->bh.prevfree = b->bh.bsize; #ifdef BECtl /* If a block-release function is defined, and this free buffer constitutes the entire block, release it. Note that pool_len is defined in such a way that the test will fail unless all pool blocks are the same size. */ if ((relfcn != NULL) && (((bufsize) b->bh.bsize) == (pool_len - (int32_t)sizeof(struct bhead)))) { assert(b->bh.prevfree == 0); assert(BH(b + b->bh.bsize)->bsize == ESent); assert(BH(b + b->bh.bsize)->prevfree == b->bh.bsize); /* Unlink the buffer from the free list */ b->ql.blink->ql.flink = b->ql.flink; b->ql.flink->ql.blink = b->ql.blink; (*relfcn)(b); #ifdef BufStats numprel++; /* Nr of expansion block releases */ numpblk--; /* Total number of blocks */ assert(numpblk == numpget - numprel); #endif /* BufStats */ } } #endif /* BECtl */ }
void* BGet::bget(bufsize requested_size) { bufsize size = requested_size; struct bfhead *b; #ifdef BestFit struct bfhead *best; #endif void *buf; assert(size >= 0); if (size < SizeQ) { /* Need at least room for the */ size = SizeQ; /* queue links. */ } #ifdef SizeQuant #if SizeQuant > 1 size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1)); #endif #endif size += sizeof(struct bhead); /* Add overhead in allocated buffer to size required. */ b = freelist.ql.flink; #ifdef BestFit best = &freelist; #endif /* Scan the free list searching for the first buffer big enough to hold the requested size buffer. */ #ifdef BestFit while (b != &freelist) { if (b->bh.bsize >= size) { if ((best == &freelist) || (b->bh.bsize < best->bh.bsize)) { best = b; } } b = b->ql.flink; /* Link to next buffer */ } b = best; #endif /* BestFit */ while (b != &freelist) { if ((bufsize) b->bh.bsize >= size) { /* Buffer is big enough to satisfy the request. Allocate it to the caller. We must decide whether the buffer is large enough to split into the part given to the caller and a free buffer that remains on the free list, or whether the entire buffer should be removed from the free list and given to the caller in its entirety. We only split the buffer if enough room remains for a header plus the minimum quantum of allocation. */ if ((b->bh.bsize - size) > (SizeQ + (sizeof(struct bhead)))) { struct bhead *ba, *bn; ba = BH(((char *) b) + (b->bh.bsize - size)); bn = BH(((char *) ba) + size); assert(bn->prevfree == b->bh.bsize); /* Subtract size from length of free block. */ b->bh.bsize -= size; /* Link allocated buffer to the previous free buffer. */ ba->prevfree = b->bh.bsize; /* Plug negative size into user buffer. */ ba->bsize = -(bufsize) size; /* Mark buffer after this one not preceded by free block. */ bn->prevfree = 0; buf = (void *) ((((char *) ba) + sizeof(struct bhead))); return buf; } else { struct bhead *ba; ba = BH(((char *) b) + b->bh.bsize); assert(ba->prevfree == b->bh.bsize); /* The buffer isn't big enough to split. Give the whole shebang to the caller and remove it from the free list. */ assert(b->ql.blink->ql.flink == b); assert(b->ql.flink->ql.blink == b); b->ql.blink->ql.flink = b->ql.flink; b->ql.flink->ql.blink = b->ql.blink; /* Negate size to mark buffer allocated. */ b->bh.bsize = -(b->bh.bsize); /* Zero the back pointer in the next buffer in memory to indicate that this buffer is allocated. */ ba->prevfree = 0; /* Give user buffer starting at queue links. */ buf = (void *) &(b->ql); return buf; } } b = b->ql.flink; /* Link to next buffer */ } #ifdef BECtl /* No buffer available with requested size free. */ /* Don't give up yet -- look in the reserve supply. */ if (acqfcn != NULL) { if (size > exp_incr - sizeof(struct bhead)) { /* Request is too large to fit in a single expansion block. Try to satisy it by a direct buffer acquisition. */ struct bdhead *bdh; size += sizeof(struct bdhead) - sizeof(struct bhead); if ((bdh = BDH((*acqfcn)((bufsize) size))) != NULL) { /* Mark the buffer special by setting the size field of its header to zero. */ bdh->bh.bsize = 0; bdh->bh.prevfree = 0; bdh->tsize = size; buf = (void *) (bdh + 1); return buf; } } else { /* Try to obtain a new expansion block */ void *newpool; if ((newpool = (*acqfcn)((bufsize) exp_incr)) != NULL) { bpool(newpool, exp_incr); buf = bget(requested_size); /* This can't, I say, can't get into a loop. */ return buf; } } } /* Still no buffer available */ #endif /* BECtl */ return NULL; }
void *bget(bufsize requested_size) { uint32_t size = (uint32_t)requested_size; struct bfhead *b; #ifdef BestFit struct bfhead *best; #endif void *buf = NULL; #ifdef BECtl int32_t compactseq = 0; int8_t acquireLoop = 1; /* boolean used for first while loop control */ int8_t BECtlLoop = 1; /* boolean used for second while loop control */ #endif int32_t compfcnResult = 0; /* used to store compfcn call result */ int32_t foundBuffer = 0; /* Boolean indicating if we found a buffer. */ /* It is used to avoid multiple calls to return, */ /* which is not MISRA compliant */ #ifdef BECtl /* If an acquire function was provided in the call to bectl(), wrap a loop around the allocation process to allow acquire to intervene in case we don't find a suitable buffer in the chain. */ while (acquireLoop != 0) { acquireLoop = 0; #endif assert(size > 0); if (size < SizeQ) { /* Need at least room for the */ size = SizeQ; /* queue links. */ } #ifdef SizeQuant #if SizeQuant > 1 size = (size + ((uint32_t)SizeQuant - 1)) & (~((uint32_t)SizeQuant - 1)); #endif #endif size += sizeof(struct bhead); /* Add overhead in allocated buffer to size required. */ #ifdef BECtl /* If a compact function was provided in the call to bectl(), wrap a loop around the allocation process to allow compaction to intervene in case we don't find a suitable buffer in the chain. */ while (BECtlLoop != 0) { #endif b = freelist.ql.flink; #ifdef BestFit best = &freelist; #endif /* Scan the free list searching for the first buffer big enough to hold the requested size buffer. */ #ifdef BestFit while (b != &freelist) { if (b->bh.bsize >= (int32_t)size) { if ((best == &freelist) || (b->bh.bsize < best->bh.bsize)) { best = b; } } b = b->ql.flink; /* Link to next buffer */ } b = best; #endif /* BestFit */ while ((b != &freelist) && (foundBuffer == 0)) { if ((bufsize) b->bh.bsize >= (int32_t)size) { /* Buffer is big enough to satisfy the request. Allocate it to the caller. We must decide whether the buffer is large enough to split into the part given to the caller and a free buffer that remains on the free list, or whether the entire buffer should be removed from the free list and given to the caller in its entirety. We only split the buffer if enough room remains for a header plus the minimum quantum of allocation. */ if ((b->bh.bsize - (int32_t)size) > (int32_t)(SizeQ + (sizeof(struct bhead)))) { struct bhead *ba, *bn; ba = BH(getPointerOffset(b, (b->bh.bsize - (bufsize)size))); bn = BH(getPointerOffset(ba, (int32_t)size)); assert(bn->prevfree == b->bh.bsize); /* Subtract size from length of free block. */ b->bh.bsize -= (bufsize)size; /* Link allocated buffer to the previous free buffer. */ ba->prevfree = b->bh.bsize; /* Plug negative size into user buffer. */ ba->bsize = -(bufsize) size; /* Mark buffer after this one not preceded by free block. */ bn->prevfree = 0; #ifdef BufStats totalloc += (int32_t)size; numget++; /* Increment number of bget() calls */ #endif buf = (void *) getPointerOffset(ba, sizeof(struct bhead)); foundBuffer = 1; } else { struct bhead *ba; ba = BH(getPointerOffset(b, b->bh.bsize)); assert(ba->prevfree == b->bh.bsize); /* The buffer isn't big enough to split. Give the whole shebang to the caller and remove it from the free list. */ assert(b->ql.blink->ql.flink == b); assert(b->ql.flink->ql.blink == b); b->ql.blink->ql.flink = b->ql.flink; b->ql.flink->ql.blink = b->ql.blink; #ifdef BufStats totalloc += b->bh.bsize; numget++; /* Increment number of bget() calls */ #endif /* Negate size to mark buffer allocated. */ b->bh.bsize = -(b->bh.bsize); /* Zero the back pointer in the next buffer in memory to indicate that this buffer is allocated. */ ba->prevfree = 0; /* Give user buffer starting at queue links. */ buf = (void *) &(b->ql); foundBuffer = 1; } } if (foundBuffer == 0) { b = b->ql.flink; /* Link to next buffer */ } } #ifdef BECtl if (foundBuffer == 0 ) { /* We failed to find a buffer. If there's a compact function defined, notify it of the size requested. If it returns TRUE, try the allocation again. */ compactseq++; if (compfcn != NULL) { compfcnResult = (*compfcn)((bufsize)size, compactseq); } if ((compfcn == NULL) || (compfcnResult == 0)) { BECtlLoop = 0; } } else { BECtlLoop = 0; } } if (foundBuffer == 0 ) { /* No buffer available with requested size free. */ /* Don't give up yet -- look in the reserve supply. */ if (acqfcn != NULL) { if ((int32_t)size > (exp_incr - (int32_t)sizeof(struct bhead))) { /* Request is too large to fit in a single expansion block. Try to satisy it by a direct buffer acquisition. */ struct bdhead *bdh; size += (sizeof(struct bdhead) - sizeof(struct bhead)); bdh = BDH((*acqfcn)((bufsize) size)); if (bdh != NULL) { /* Mark the buffer special by setting the size field of its header to zero. */ bdh->bh.bsize = 0; bdh->bh.prevfree = 0; bdh->tsize = (bufsize)size; #ifdef BufStats totalloc += (int32_t)size; numget++; /* Increment number of bget() calls */ numdget++; /* Direct bget() call count */ #endif /* Bufstats */ buf = (void *) getPointerOffset(bdh,1); } } else { /* Try to obtain a new expansion block */ void *newpool; newpool = (*acqfcn)((bufsize) exp_incr); if (newpool != NULL) { bpool(newpool, exp_incr); acquireLoop = 1; } } } } /* Still no buffer available */ } #endif /* BECtl */ return buf; }
/* bget * Used for buffer allocation with the given pool. Returns NULL if there * is no more room available. */ void* bget( _In_ BytePool_t *pool, _In_ long requested_size) { // Variables long size = requested_size; struct bfhead *b; #ifdef BestFit struct bfhead *best; #endif void *buf; #ifdef BECtl int compactseq = 0; #endif assert(pool != NULL); assert(size > 0); if (size < SizeQ) { /* Need at least room for the */ size = SizeQ; /* queue links. */ } #ifdef SizeQuant #if SizeQuant > 1 size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1)); #endif #endif size += sizeof(struct bhead); /* Add overhead in allocated buffer to size required. */ #ifdef BECtl /* If a compact function was provided in the call to bectl(), wrap a loop around the allocation process to allow compaction to intervene in case we don't find a suitable buffer in the chain. */ while (1) { #endif b = pool->freelist.ql.flink; #ifdef BestFit best = &pool->freelist; #endif /* Scan the free list searching for the first buffer big enough to hold the requested size buffer. */ #ifdef BestFit while (b != &pool->freelist) { if (b->bh.bsize >= size) { if ((best == &pool->freelist) || (b->bh.bsize < best->bh.bsize)) { best = b; } } b = b->ql.flink; /* Link to next buffer */ } b = best; #endif /* BestFit */ while (b != &pool->freelist) { if ((long) b->bh.bsize >= size) { /* Buffer is big enough to satisfy the request. Allocate it to the caller. We must decide whether the buffer is large enough to split into the part given to the caller and a free buffer that remains on the free list, or whether the entire buffer should be removed from the free list and given to the caller in its entirety. We only split the buffer if enough room remains for a header plus the minimum quantum of allocation. */ if ((b->bh.bsize - size) > (SizeQ + (sizeof(struct bhead)))) { struct bhead *ba, *bn; ba = BH(((char *) b) + (b->bh.bsize - size)); bn = BH(((char *) ba) + size); assert(bn->prevfree == b->bh.bsize); /* Subtract size from length of free block. */ b->bh.bsize -= size; /* Link allocated buffer to the previous free buffer. */ ba->prevfree = b->bh.bsize; /* Plug negative size into user buffer. */ ba->bsize = -(long) size; /* Mark buffer after this one not preceded by free block. */ bn->prevfree = 0; #ifdef BufStats pool->totalloc += size; pool->numget++; /* Increment number of bget() calls */ #endif buf = (void *) ((((char *) ba) + sizeof(struct bhead))); return buf; } else { struct bhead *ba; ba = BH(((char *) b) + b->bh.bsize); assert(ba->prevfree == b->bh.bsize); /* The buffer isn't big enough to split. Give the whole shebang to the caller and remove it from the free list. */ assert(b->ql.blink->ql.flink == b); assert(b->ql.flink->ql.blink == b); b->ql.blink->ql.flink = b->ql.flink; b->ql.flink->ql.blink = b->ql.blink; #ifdef BufStats pool->totalloc += b->bh.bsize; pool->numget++; /* Increment number of bget() calls */ #endif /* Negate size to mark buffer allocated. */ b->bh.bsize = -(b->bh.bsize); /* Zero the back pointer in the next buffer in memory to indicate that this buffer is allocated. */ ba->prevfree = 0; /* Give user buffer starting at queue links. */ buf = (void *) &(b->ql); return buf; } } b = b->ql.flink; /* Link to next buffer */ } #ifdef BECtl /* We failed to find a buffer. If there's a compact function defined, notify it of the size requested. If it returns TRUE, try the allocation again. */ if ((pool->compfcn == NULL) || (!(*pool->compfcn)(size, ++compactseq))) { break; } } /* No buffer available with requested size free. */ /* Don't give up yet -- look in the reserve supply. */ if (pool->acqfcn != NULL) { if (size > pool->exp_incr - sizeof(struct bhead)) { /* Request is too large to fit in a single expansion block. Try to satisy it by a direct buffer acquisition. */ struct bdhead *bdh; size += sizeof(struct bdhead) - sizeof(struct bhead); if ((bdh = BDH((*pool->acqfcn)((long) size))) != NULL) { /* Mark the buffer special by setting the size field of its header to zero. */ bdh->bh.bsize = 0; bdh->bh.prevfree = 0; bdh->tsize = size; #ifdef BufStats pool->totalloc += size; pool->numget++; /* Increment number of bget() calls */ pool->numdget++; /* Direct bget() call count */ #endif buf = (void *) (bdh + 1); return buf; } } else { /* Try to obtain a new expansion block */ void *newpool; if ((newpool = (*pool->acqfcn)((long) pool->exp_incr)) != NULL) { bpool(newpool, pool->exp_incr, &pool); buf = bget(pool, requested_size); /* This can't, I say, can't get into a loop. */ return buf; } } } /* Still no buffer available */ #endif /* BECtl */ return NULL; }