_WCRTLINK void _nheapgrow( void ) { #if defined( __WINDOWS_286__ ) || !defined( _M_I86 ) _nfree( _nmalloc( 1 ) ); /* get something into the heap */ #else unsigned max_paras; unsigned curr_paras; unsigned diff_paras; unsigned expand; _AccessNHeap(); /* calculate # pages which always has extra slack space (ie. 0x10) */ curr_paras = (( _curbrk + 0x10 ) & ~0x0f ) >> 4; if( curr_paras == 0 ) { /* we're already at 64k */ _ReleaseNHeap(); return; } #if defined(__QNX__) if( qnx_segment_realloc( _DGroup(), 65536L ) == -1 ) { _ReleaseNHeap(); return; } max_paras = PARAS_IN_64K; #elif defined(__OS2__) if( DosReallocSeg( 0, _DGroup() ) ) { _ReleaseNHeap(); return; } max_paras = PARAS_IN_64K; #else if( _RWD_osmode != DOS_MODE ) { /* 23-apr-91 */ max_paras = PARAS_IN_64K; } else { max_paras = TinyMaxSet( _RWD_psp ); /* subtract off code size */ max_paras -= _DGroup() - _RWD_psp; if( max_paras > PARAS_IN_64K ) { max_paras = PARAS_IN_64K; } } #endif if( max_paras <= curr_paras ) { /* '<' -> something is wrong, '==' -> can't change size */ _ReleaseNHeap(); return; } diff_paras = max_paras - curr_paras; expand = (( diff_paras + 1 ) << 4 ) - ( _curbrk & 0x0f ); expand += __LastFree(); /* compensate for _expand's adjustment */ _ReleaseNHeap(); _nfree( _nmalloc( expand - ( sizeof( size_t ) + sizeof(frl) ) ) ); #endif }
void _WCNEAR *__brk( unsigned brk_value ) { unsigned old_brk_value; unsigned seg_size; __segment segment; if( brk_value < _STACKTOP ) { _RWD_errno = ENOMEM; return( (void _WCNEAR *)-1 ); } seg_size = ( brk_value + 0x0f ) >> 4; if( seg_size == 0 ) { seg_size = 0x1000; } /* try setting the block of memory */ _AccessNHeap(); segment = _DGroup(); if( qnx_segment_realloc( segment,((unsigned long)seg_size) << 4) == -1 ) { _RWD_errno = ENOMEM; _ReleaseNHeap(); return( (void _WCNEAR *)-1 ); } old_brk_value = _curbrk; /* return old value of _curbrk */ _curbrk = brk_value; /* set new break value */ _ReleaseNHeap(); return( (void _WCNEAR *)old_brk_value ); }
_WCRTLINK void _WCNEAR *_nexpand( void _WCNEAR *stg, size_t req_size ) { struct { unsigned expanded : 1; } flags; int retval; size_t growth_size; flags.expanded = 0; _AccessNHeap(); for( ;; ) { retval = __HeapManager_expand( _DGroup(), (unsigned) stg, req_size, &growth_size ); if( retval == __HM_SUCCESS ) { _ReleaseNHeap(); return( stg ); } if( retval == __HM_FAIL || !__IsCtsNHeap() ) break; if( retval == __HM_TRYGROW ) { if( flags.expanded ) break; if( __ExpandDGROUP( growth_size ) == 0 ) { break; } flags.expanded = 1; } } _ReleaseNHeap(); return( NULL ); }
_WCRTLINK int _bheapwalk( __segment seg, struct _heapinfo *entry ) { int heap_status; if( seg == _DGroup() ) return( _nheapwalk( entry ) ); _AccessFHeap(); heap_status = __HeapWalk( entry, seg == _NULLSEG ? __bheap : seg, seg ); _ReleaseFHeap(); return( heap_status ); }
_WCRTLINK int _bheapset( __segment seg, unsigned int fill ) { int heap_status; if( seg == _DGroup() ) return( _nheapset( fill ) ); if( seg == _NULLSEG ) { for( seg = __bheapbeg; seg != _NULLSEG; seg = HEAP( seg )->nextseg ) { heap_status = _bheapset( seg, fill ); if( heap_status != _HEAPOK ) { return( heap_status ); } } return( _HEAPOK ); } heap_status = _bheapchk( seg ); if( heap_status != _HEAPOK ) return( heap_status ); return( __HeapSet( seg, fill ) ); }
_WCRTLINK void _WCNEAR *_nmalloc( size_t amt ) { unsigned largest; unsigned size; unsigned ptr; unsigned char expanded; mheapptr miniheap_ptr; # if defined(__WARP__) int use_obj_any; # endif // __WARP__ if( (amt == 0) || (amt > -sizeof(struct heapblk)) ) { return( (void _WCNEAR *)NULL ); } // Try to determine which miniheap to begin allocating from. // first, round up the amount size = (amt + TAG_SIZE + ROUND_SIZE) & ~ROUND_SIZE; if( size < FRL_SIZE ) { size = FRL_SIZE; } _AccessNHeap(); ptr = 0; expanded = 0; for(;;) { # if defined(__WARP__) // Need to update each pass in case 1st DosAllocMem determines OBJ_ANY not supported use_obj_any = _os2_obj_any_supported && _os2_use_obj_any; # endif // Figure out where to start looking for free blocks if( size > __LargestSizeB4MiniHeapRover ) { miniheap_ptr = __MiniHeapRover; if( miniheap_ptr == NULL ) { __LargestSizeB4MiniHeapRover = 0; // force to be updated miniheap_ptr = __nheapbeg; } } else { __LargestSizeB4MiniHeapRover = 0; // force to be updated miniheap_ptr = __nheapbeg; } // Search for free block for(;;) { if( miniheap_ptr == NULL ) { break; // Expand heap and retry maybe } __MiniHeapRover = miniheap_ptr; largest = miniheap_ptr->largest_blk; # if defined(__WARP__) if( use_obj_any == ( miniheap_ptr->used_obj_any != 0 ) ) { # endif // __WARP__ if( largest >= amt ) { ptr = __MemAllocator( amt, _DGroup(), (unsigned)miniheap_ptr ); if( ptr != 0 ) { goto lbl_release_heap; } } # if defined(__WARP__) } # endif // __WARP__ if( largest > __LargestSizeB4MiniHeapRover ) { __LargestSizeB4MiniHeapRover = largest; } miniheap_ptr = miniheap_ptr->next; } /* forever */ // OS/2 only - if not block of requested type, will allocate one and find in 2nd pass // Try to expand heap and retry if( expanded || !__ExpandDGROUP( amt ) ) { if( !__nmemneed( amt ) ) { break; // give up } expanded = 0; } else { expanded = 1; } } /* forever */ lbl_release_heap: _ReleaseNHeap(); return( (void _WCNEAR *)ptr ); }
int __HeapManager_expand( __segment seg, unsigned offset, size_t req_size, size_t *growth_size ) { #if defined( _M_I86 ) typedef struct freelistp __based(seg) *fptr; typedef char __based(void) *cptr; struct miniheapblkp __based(seg) *hblk; #else typedef struct freelistp _WCNEAR *fptr; typedef char _WCNEAR *cptr; mheapptr hblk; #endif fptr p1; fptr p2; fptr pnext; fptr pprev; size_t new_size; size_t old_size; size_t free_size; /* round (new_size + tag) to multiple of pointer size */ new_size = (req_size + TAG_SIZE + ROUND_SIZE) & ~ROUND_SIZE; if( new_size < req_size ) new_size = ~0; //go for max if( new_size < FRL_SIZE ) { new_size = FRL_SIZE; } p1 = (fptr) ((cptr)offset - TAG_SIZE); old_size = p1->len & ~1; if( new_size > old_size ) { /* enlarging the current allocation */ p2 = (fptr) ((cptr)p1 + old_size); *growth_size = new_size - old_size; for(;;) { free_size = p2->len; if( p2->len == END_TAG ) { return( __HM_TRYGROW ); } else if( free_size & 1 ) { /* next piece is allocated */ break; } else { pnext = p2->next; pprev = p2->prev; if( seg == _DGroup() ) { // near heap for( hblk = __nheapbeg; hblk->next; hblk = hblk->next ) { if( (fptr)hblk <= (fptr)offset && (fptr)((PTR)hblk+hblk->len) > (fptr)offset ) break; } } #if defined( _M_I86 ) else { // Based heap hblk = 0; } #endif if( hblk->rover == p2 ) { /* 09-feb-91 */ hblk->rover = p2->prev; } if( free_size < *growth_size || free_size - *growth_size < FRL_SIZE ) { /* unlink small free block */ pprev->next = pnext; pnext->prev = pprev; p1->len += free_size; hblk->numfree--; if( free_size >= *growth_size ) { return( __HM_SUCCESS ); } *growth_size -= free_size; p2 = (fptr) ((cptr)p2 + free_size); } else { p2 = (fptr) ((cptr)p2 + *growth_size); p2->len = free_size - *growth_size; p2->prev = pprev; p2->next = pnext; pprev->next = p2; pnext->prev = p2; p1->len += *growth_size; return( __HM_SUCCESS ); } } } /* no suitable free blocks behind, have to move block */ return( __HM_FAIL ); } else { /* shrinking the current allocation */ if( old_size - new_size >= FRL_SIZE ) { /* block big enough to split */ p1->len = new_size | 1; p1 = (fptr) ((cptr)p1 + new_size); p1->len = (old_size - new_size) | 1; if( seg == _DGroup() ) { // near heap for( hblk = __nheapbeg; hblk->next; hblk = hblk->next ) { if( (fptr)hblk <= (fptr)offset && (fptr)((PTR)hblk+hblk->len) > (fptr)offset ) break; } } #if defined( _M_I86 ) else // Based heap hblk = 0; #endif /* _bfree will decrement 'numalloc' 08-jul-91 */ hblk->numalloc++; #if defined( _M_I86 ) _bfree( seg, (cptr)p1 + TAG_SIZE ); /* free the top portion */ #else _nfree( (cptr)p1 + TAG_SIZE ); #endif } } return( __HM_SUCCESS ); }
_WCRTLINK void _WCFAR *_fmalloc( size_t amt ) { unsigned size; unsigned offset; unsigned short seg; unsigned short prev_seg; struct heapblk _WCFAR *p; if( amt == 0 || amt > - (sizeof(struct heapblk) + TAG_SIZE*2) ) { return( (void _WCFAR *)NULL ); } // Try to determine which segment to begin allocating from. // first, round up the amount size = (amt + TAG_SIZE + ROUND_SIZE) & ~ROUND_SIZE; if( size < FRL_SIZE ) { size = FRL_SIZE; } _AccessFHeap(); for(;;) { if( size > __LargestSizeB4Rover ) { seg = __fheapRover; } else { __LargestSizeB4Rover = 0; // force value to be updated seg = __fheap; } for(;;) { if( seg == 0 ) { seg = __AllocSeg( amt ); if( seg == 0 ) break; if( __fheap == 0 ) { __fheap = seg; } else { p->nextseg = seg; p = MK_FP( seg, 0 ); p->prevseg = prev_seg; } } for(;;) { __fheapRover = seg; offset = __MemAllocator( amt, seg, 0 ); if( offset != 0 ) goto release_heap; if( __GrowSeg( seg, amt ) == 0 ) break; } prev_seg = seg; p = MK_FP( seg, 0 ); if( p->largest_blk > __LargestSizeB4Rover ) { __LargestSizeB4Rover = p->largest_blk; } seg = p->nextseg; } if( __fmemneed( amt ) == 0 ) break; } if( seg == 0 ) { offset = (unsigned)_nmalloc( amt ); if( offset != 0 ) seg = _DGroup(); } release_heap: _ReleaseFHeap(); return( MK_FP( seg, offset ) ); }
_WCRTLINK void _nfree( void _WCNEAR *stg ) { mheapptr p1,p2; if( !stg ) return; _AccessNHeap(); do { // first try some likely locations p1 = __MiniHeapFreeRover; if( p1 ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { break; } p2 = p1; p1 = p1->prev; if( p1 ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { break; } } p1 = p2->next; if( p1 ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { break; } } } p1 = __MiniHeapRover; if( p1 ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { break; } p2 = p1; p1 = p1->prev; if( p1 ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { break; } } p1 = p2->next; if( p1 ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { break; } } } // not found near rover, so search the list for( p1 = __nheapbeg; p1; p1 = p1->next ) { if( (PTR)p1 <= (PTR)stg && (PTR)p1+p1->len > (PTR)stg ) { // break twice! goto found_it; } } // this pointer is not in the heap _ReleaseNHeap(); return; } while( 0 ); found_it: // we found the miniheap, free the storage __MemFree( (unsigned)stg, _DGroup(), (unsigned) p1 ); __MiniHeapFreeRover = p1; if( p1 < __MiniHeapRover ) { if( p1->largest_blk > __LargestSizeB4MiniHeapRover ) { __LargestSizeB4MiniHeapRover = p1->largest_blk; } } _ReleaseNHeap(); }
_WCRTLINK void _nfree( void_nptr cstg ) { heapblk_nptr heap; heapblk_nptr heap2; if( cstg == NULL ) return; _AccessNHeap(); do { // first try some likely locations heap = __MiniHeapFreeRover; if( heap != NULL ) { if( IS_IN_HEAP( cstg, heap ) ) { break; } heap2 = heap; heap = heap->prev.nptr; if( heap != NULL ) { if( IS_IN_HEAP( cstg, heap ) ) { break; } } heap = heap2->next.nptr; if( heap != NULL ) { if( IS_IN_HEAP( cstg, heap ) ) { break; } } } heap = __MiniHeapRover; if( heap != NULL ) { if( IS_IN_HEAP( cstg, heap ) ) { break; } heap2 = heap; heap = heap->prev.nptr; if( heap != NULL ) { if( IS_IN_HEAP( cstg, heap ) ) { break; } } heap = heap2->next.nptr; if( heap != NULL ) { if( IS_IN_HEAP( cstg, heap ) ) { break; } } } // not found near rover, so search the list for( heap = __nheapbeg; heap != NULL; heap = heap->next.nptr ) { if( IS_IN_HEAP( cstg, heap ) ) { // break twice! goto found_it; } } // this pointer is not in the heap _ReleaseNHeap(); return; } while( 0 ); found_it: // we found the miniheap, free the storage #ifdef _M_I86 __MemFree( cstg, _DGroup(), heap ); #else __MemFree( cstg, heap ); #endif __MiniHeapFreeRover = heap; if( heap < __MiniHeapRover ) { if( __LargestSizeB4MiniHeapRover < heap->largest_blk ) { __LargestSizeB4MiniHeapRover = heap->largest_blk; } } _ReleaseNHeap(); }
int __HeapManager_expand( __segment seg, unsigned offset, size_t req_size, size_t *growth_size ) { miniheapblkp SEG_BPTR( seg ) hblk; freelistp SEG_BPTR( seg ) p1; freelistp SEG_BPTR( seg ) p2; freelistp SEG_BPTR( seg ) pnext; freelistp SEG_BPTR( seg ) pprev; size_t new_size; size_t old_size; size_t free_size; /* round (new_size + tag) to multiple of pointer size */ new_size = __ROUND_UP_SIZE( req_size + TAG_SIZE, ROUND_SIZE ); if( new_size < req_size ) new_size = ~0; //go for max if( new_size < FRL_SIZE ) { new_size = FRL_SIZE; } p1 = FRL_BPTR( seg, offset, -TAG_SIZE ); old_size = MEMBLK_SIZE( p1 ); if( new_size > old_size ) { /* enlarging the current allocation */ p2 = FRL_BPTR( seg, p1, old_size ); *growth_size = new_size - old_size; for( ;; ) { if( p2->len == END_TAG ) { return( __HM_TRYGROW ); } else if( IS_MEMBLK_USED( p2 ) ) { /* next piece is allocated */ break; } else { free_size = p2->len; pnext = p2->next; pprev = p2->prev; if( seg == _DGroup() ) { // near heap for( hblk = __nheapbeg; hblk->next != NULL; hblk = hblk->next ) { if( FRL_BPTR( seg, hblk, 0 ) <= FRL_BPTR( seg, offset, 0 ) && FRL_BPTR( seg, hblk, hblk->len ) > FRL_BPTR( seg, offset, 0 ) ) { break; } } #if defined( _M_I86 ) } else { // Based heap hblk = 0; #endif } if( hblk->rover == p2 ) { hblk->rover = p2->prev; } if( free_size < *growth_size || free_size - *growth_size < FRL_SIZE ) { /* unlink small free block */ pprev->next = pnext; pnext->prev = pprev; p1->len += free_size; hblk->numfree--; if( free_size >= *growth_size ) { return( __HM_SUCCESS ); } *growth_size -= free_size; p2 = FRL_BPTR( seg, p2, free_size ); } else { p2 = FRL_BPTR( seg, p2, *growth_size ); p2->len = free_size - *growth_size; p2->prev = pprev; p2->next = pnext; pprev->next = p2; pnext->prev = p2; p1->len += *growth_size; return( __HM_SUCCESS ); } } } /* no suitable free blocks behind, have to move block */ return( __HM_FAIL ); } else { /* shrinking the current allocation */ if( old_size - new_size >= FRL_SIZE ) { /* block big enough to split */ SET_MEMBLK_SIZE_USED( p1, new_size ); p1 = FRL_BPTR( seg, p1, new_size ); SET_MEMBLK_SIZE_USED( p1, old_size - new_size ); if( seg == _DGroup() ) { // near heap for( hblk = __nheapbeg; hblk->next != NULL; hblk = hblk->next ) { if( FRL_BPTR( seg, hblk, 0 ) <= FRL_BPTR( seg, offset, 0 ) && FRL_BPTR( seg, hblk, hblk->len ) > FRL_BPTR( seg, offset, 0 ) ) { break; } } #if defined( _M_I86 ) } else { // Based heap hblk = 0; #endif } /* _bfree will decrement 'numalloc' 08-jul-91 */ hblk->numalloc++; #if defined( _M_I86 ) _bfree( seg, FRL_BPTR( seg, p1, TAG_SIZE ) ); /* free the top portion */ #else _nfree( FRL_BPTR( seg, p1, TAG_SIZE ) ); #endif } } return( __HM_SUCCESS ); }