int __cdecl _heap_addblock ( void * block, size_t size ) { _PBLKDESC pdesc; REG1 _PBLKDESC pnewdesc; _PBLKDESC pdescs[4] = { NULL, NULL, NULL, NULL }; _PBLKDESC *ppdesc = pdescs; size_t lastsize; int find; /* * Make sure we enough empty descriptors to do the job! Do it here * and now because recovering from an out-of-descriptors condition * is too dicey later on. */ if ( ((pdescs[0] = __getempty()) == NULL) || ((pdescs[1] = __getempty()) == NULL) || ((pdescs[2] = __getempty()) == NULL) ) { goto error; } /* * Find where the address fits into the heap. */ find = _heap_findaddr(block, &pdesc); /* * Fill in the new heap descriptor. * (1) If the new address is an exact fit, use the dummy * descriptor that already exists for it. * (2) If the address is NOT in the heap, allocate a new one. */ if ( find == _HEAPFIND_EXACT ) { if ( !(_IS_DUMMY(pdesc)) ) goto error; pnewdesc = pdesc; } else { pnewdesc = *(ppdesc++); } pnewdesc->pblock = block; /* pointer to block */ _SET_FREE(pnewdesc); /* set me free (why don't ya, babe) */ *(_PBLKDESC*)block = pnewdesc; /* init back pointer */ /* * Put the block in the heap * find = result of _heap_findaddr() call * pnewdesc = points to desc to be inserted * pdesc = filled in by _heap_findaddr() call as appropriate */ switch (find) { case(_HEAPFIND_EMPTY): /* * No memory in heap yet */ _heap_desc.sentinel.pblock = (char *) block + size; _before(pnewdesc, size, &_heap_desc.sentinel, &ppdesc); _heap_desc.pfirstdesc = _heap_desc.proverdesc = pnewdesc; break; case(_HEAPFIND_BEFORE): /* * New block is before the heap */ _before(pnewdesc, size, _heap_desc.pfirstdesc, &ppdesc); _heap_desc.pfirstdesc = pnewdesc; break; case(_HEAPFIND_AFTER): /* * New block is after the heap * * Find the current last block in the heap */ if ( _heap_findaddr((void *)((char *) (_heap_desc.sentinel.pblock) - 1), &pdesc) != _HEAPFIND_WITHIN ) _heap_abort(); lastsize = _MEMSIZE(pdesc); /* * Start insertion by placing new block immediately * in front of the sentinel */ _heap_desc.sentinel.pblock = (char *) block + size; pnewdesc->pnextdesc = &_heap_desc.sentinel; /* * Finish insertion by placing new block after the * old last block (with a possible intervening dummy * block being created) */ _before(pdesc, lastsize, pnewdesc, &ppdesc); break; case(_HEAPFIND_EXACT): /* * Block is already in the heap (and we've checked * that it was a "dummy" before this call). * * [NOTES: (1) pnewdesc and pdesc are the same, * (2) pnewdesc is already linked to the previous * heap entry, (3) pdesc->pnextdesc is still valid! * (4) Also, if pdesc->pnextdesc is the sentinel, * then simply update the sentinel size (calling * before will cause an error if the previous last * block was bigger than the current one!). * (see code at top of this routine).] */ if (pdesc->pnextdesc == &_heap_desc.sentinel) _heap_desc.sentinel.pblock = (char *) _ADDRESS(pdesc) + size; else _before(pnewdesc, size, pdesc->pnextdesc, &ppdesc); break; default: /* * New block is within heap */ if (!(_IS_DUMMY(pdesc))) goto error; /* * If the last block in the heap is a dummy region * and a new region is allocated which lies within * that region, we need to update sentinel.pblock. */ if (pdesc->pnextdesc == &_heap_desc.sentinel) { void * newend = (char *) _ADDRESS(pnewdesc) + size; if (_heap_desc.sentinel.pblock < newend) _heap_desc.sentinel.pblock = newend; } _before(pnewdesc, size, pdesc->pnextdesc, &ppdesc); _before(pdesc, _MEMSIZE(pdesc), pnewdesc, &ppdesc); break; } /* * Update rover, if appropriate */ if ( (block < _ADDRESS(_heap_desc.proverdesc)) && (_BLKSIZE(pnewdesc) >= _heap_resetsize) ) _heap_desc.proverdesc = pnewdesc; /* * Good return */ /* good: unreferenced label to be removed */ return(0); /* * Error return */ error: while ( *ppdesc != NULL ) { _PUTEMPTY(*ppdesc); ppdesc++; } return(-1); }
int __cdecl _heapmin(void) { REG1 int index; _PBLKDESC pdesc; REG2 _PBLKDESC pdesc2; void * regend; int region_min_count = 0; #if defined(_M_MPPC) || defined(_M_M68K) struct _heap_region_ *pHeapRegions; #endif /* * Lock the heap */ _mlock(_HEAP_LOCK); /* * Coalesce the heap (should return NULL) */ if ( _heap_search((unsigned)_HEAP_COALESCE) != NULL ) _heap_abort(); /* * Loop through the region descriptor table freeing as much * memory to the OS as possible. */ #if defined(_M_MPPC) || defined(_M_M68K) for ( index=0 ; index < _heap_region_table_cur ; index++ ) { pHeapRegions = (struct _heap_region_ *)(*hHeapRegions); if ( (pHeapRegions + index)->_regbase == NULL ) continue; /* region entry is empty */ /* * Get the entry that contains the last address of * the region (allocated so far, that is). */ regend = (char *) ( (pHeapRegions + index)->_regbase) + (pHeapRegions + index)->_currsize - 1; #else /* !defined(_M_MPPC) && !defined(_M_M68K) */ for ( index=0 ; index < _HEAP_REGIONMAX ; index++ ) { if ( _heap_regions[index]._regbase == NULL ) continue; /* region entry is empty */ /* * Get the entry that contains the last address of * the region (allocated so far, that is). */ regend = (char *) _heap_regions[index]._regbase + _heap_regions[index]._currsize - 1; #endif /* defined(_M_MPPC) || defined(_M_M68K) */ if ( _heap_findaddr(regend, &pdesc) != _HEAPFIND_WITHIN ) _heap_abort(); /* last address not within a block */ /* * See if the containing block is free */ if ( !(_IS_FREE(pdesc)) ) continue; /* block is not free */ /* * Region ends with a free block, go free as much mem * as possible. */ region_min_count += _heapmin_region(index, regend, pdesc); } /* region loop */ /* * By minimizing the heap, we've likely invalidated the rover and * may have produced contiguous dummy blocks so: * * (1) reset the rover * (2) coalesce contiguous dummy blocks */ if ( region_min_count ) { /* * Set proverdesc to pfirstdesc */ _heap_desc.proverdesc = _heap_desc.pfirstdesc; for ( pdesc = _heap_desc.pfirstdesc ; pdesc != &_heap_desc.sentinel ; pdesc = pdesc->pnextdesc ) { /* * Check and remove consecutive dummy blocks */ if ( _IS_DUMMY(pdesc) ) { for ( pdesc2 = pdesc->pnextdesc ; _IS_DUMMY(pdesc2) ; pdesc2 = pdesc->pnextdesc ) { /* * coalesce the dummy blocks */ pdesc->pnextdesc = pdesc2->pnextdesc; _PUTEMPTY(pdesc2); } /* dummy loop */ } /* if */ } /* heap loop */ } /* region_min_count */ /* * Good return */ /* goodrtn: unreferenced label to be removed */ /* * Release the heap lock */ _munlock(_HEAP_LOCK); return(0); } /*** *_heapmin_region() - Minimize a region * *Purpose: * Free as much of a region back to the OS as possible. * *Entry: * int index = index of the region in the region table * void * regend = last valid address in region * pdesc = pointer to the last block of memory in the region * (it has already been determined that this block is free) * *Exit: * int 1 = minimized region * 0 = no change to region * *Exceptions: * *******************************************************************************/ static int __cdecl _heapmin_region ( int index, void * regend, REG1 _PBLKDESC pdesc ) { unsigned size; REG2 _PBLKDESC pnew; #if defined(_M_MPPC) || defined(_M_M68K) struct _heap_region_ *pHeapRegions; #endif /* * Init some variables * * regend = 1st address AFTER region * size = amount of free memory at end of current region */ regend = (char *) regend + 1; /* "regend++" give compiler error... */ size = ((char *)regend - (char *)_ADDRESS(pdesc)); /* * See if there's enough free memory to release to the OS. * (NOTE: Need more than a page since we may need a back pointer.) */ if ( size <= _PAGESIZE_ ) return(0); /* 0 = no change to region */ /* * We're going to free some memory to the OS. See if the * free block crosses the end of the region and, if so, * split up the block appropriately. */ if ( (_MEMSIZE(pdesc) - size) != 0 ) { /* * The free block spans the end of the region. * Divide it up. */ /* * Get an empty descriptor */ if ( (pnew = __getempty()) == NULL ) return(0); pnew->pblock = regend; /* init block pointer */ * (_PBLKDESC*)regend = pnew; /* init back pointer */ _SET_FREE(pnew); /* set the block free */ pnew->pnextdesc = pdesc->pnextdesc; /* link it in */ pdesc->pnextdesc = pnew; } /* * At this point, we have a free block of memory that goes * up to (but not exceeding) the end of the region. * * pdesc = descriptor of the last free block in region * size = amount of free mem at end of region (i.e., _MEMSIZE(pdesc)) * regend = 1st address AFTER end of region */ /* * See if we should return the whole region of only part of it. */ #if defined(_M_MPPC) || defined(_M_M68K) pHeapRegions = (struct _heap_region_ *)(*hHeapRegions); if ( _ADDRESS(pdesc) == (pHeapRegions + index)->_regbase ) { #else if ( _ADDRESS(pdesc) == _heap_regions[index]._regbase ) { #endif /* * Whole region is free, return it to OS */ _heap_free_region(index); /* * Put a dummy block in the heap to hold space for * the memory we just freed up. */ _SET_DUMMY(pdesc); } else { /* * Whole region is NOT free, return part of it to OS */ #if !defined(_M_MPPC) && !defined(_M_M68K) _free_partial_region(pdesc, size, index); #endif } /* * Exit paths */ return(1); /* 1 = minimized region */ } /*** *_free_partial_region() - Free part of a region to the OS * *Purpose: * Free a portion of a region to the OS * *Entry: * pdesc = descriptor of last free block in region * size = amount of free mem at end of region (i.e., _MEMSIZE(pdesc)) * index = index of region * *Exit: * *Exceptions: * *******************************************************************************/ static void __cdecl _free_partial_region ( REG1 _PBLKDESC pdesc, unsigned size, int index ) { unsigned left; void * base; REG2 _PBLKDESC pnew; #if defined(_M_MPPC) || defined(_M_M68K) struct _heap_region_ *pHeapRegions; #endif /* * Init a few variables. */ left = (size & (_PAGESIZE_-1)); base = (char *)_ADDRESS(pdesc); /* * We return memory to the OS in page multiples. If the * free block is not page aligned, we'll insert a new free block * to fill in the difference. */ if ( left != 0 ) { /* * The block is not a multiple of pages so we need * to adjust variables accordingly. */ size -= left; base = (char *)base + left; } /* * Return the free pages to the OS. */ #if defined(_M_MPPC) || defined(_M_M68K) if (base) { DisposePtr(base); } /* * Adjust the region table entry */ pHeapRegions = (struct _heap_region_ *)(*hHeapRegions); (pHeapRegions + index)->_currsize -= size; #else /* !defined(_M_MPPC) && !defined(_M_M68K) */ if (!VirtualFree(base, size, MEM_DECOMMIT)) _heap_abort(); /* * Adjust the region table entry */ _heap_regions[index]._currsize -= size; #endif /* defined(_M_MPPC) || defined(_M_M68K) */ /* * Adjust the heap according to whether we released the whole * free block or not. (Don't worry about consecutive dummies, * we'll coalesce them later.) * * base = address of block we just gave back to OS * size = size of block we gave back to OS * left = size of block we did NOT give back to OS */ if ( left == 0 ) { /* * The free block was released to the OS in its * entirety. Make the free block a dummy place holder. */ _SET_DUMMY(pdesc); } else { /* * Did NOT release the whole free block to the OS. * There's a block of free memory we want to leave * in the heap. Insert a dummy entry after it. */ if ( (pnew = __getempty()) == NULL ) _heap_abort(); pnew->pblock = (char *)base; _SET_DUMMY(pnew); pnew->pnextdesc = pdesc->pnextdesc; pdesc->pnextdesc = pnew; } }