/* Extend the process's data space by INCREMENT. If INCREMENT is negative, shrink data space by - INCREMENT. Return start of new space allocated, or -1 for errors. */ void * __sbrk (intptr_t increment) { void *oldbrk; /* If this is not part of the dynamic library or the library is used via dynamic loading in a statically linked program update __curbrk from the kernel's brk value. That way two separate instances of __brk and __sbrk can share the heap, returning interleaved pieces of it. */ if (__curbrk == NULL || __libc_multiple_libcs) if (__brk (0) < 0) /* Initialize the break. */ return (void *) -1; if (increment == 0) return __curbrk; oldbrk = __curbrk; if (increment > 0 ? ((uintptr_t) oldbrk + (uintptr_t) increment < (uintptr_t) oldbrk) : ((uintptr_t) oldbrk < (uintptr_t) -increment)) { __set_errno (ENOMEM); return (void *) -1; } if (__brk (oldbrk + increment) < 0) return (void *) -1; return oldbrk; }
void* sbrk(ptrdiff_t increment) { // Initialize __bionic_brk if necessary. if (__bionic_brk == NULL) { __bionic_brk = __brk(NULL); } // Don't ask the kernel if we already know the answer. if (increment == 0) { return __bionic_brk; } // Avoid overflow. uintptr_t old_brk = reinterpret_cast<uintptr_t>(__bionic_brk); if ((increment > 0 && static_cast<uintptr_t>(increment) > (UINTPTR_MAX - old_brk)) || (increment < 0 && static_cast<uintptr_t>(-increment) > old_brk)) { errno = ENOMEM; return reinterpret_cast<void*>(-1); } void* desired_brk = reinterpret_cast<void*>(old_brk + increment); __bionic_brk = __brk(desired_brk); if (__bionic_brk < desired_brk) { errno = ENOMEM; return reinterpret_cast<void*>(-1); } return reinterpret_cast<void*>(old_brk); }
static inline void frob_brk (void) { __brk (0); /* Initialize the break. */ #if ! __ASSUME_BRK_PAGE_ROUNDED /* If the dynamic linker was executed as a program, then the break may start immediately after our data segment. However, dl-minimal.c has already stolen the remainder of the page for internal allocations. If we don't adjust the break location recorded by the kernel, the normal program startup will inquire, find the value at our &_end, and start allocating its own data there, clobbering dynamic linker data structures allocated there during startup. Later Linux kernels have changed this behavior so that the initial break value is rounded up to the page boundary before we start. */ extern char *__curbrk attribute_hidden; extern char _end[] attribute_hidden; char *const endpage = (void *) 0 + (((__curbrk - (char *) 0) + GLRO(dl_pagesize) - 1) & -GLRO(dl_pagesize)); if (__builtin_expect (__curbrk >= _end && __curbrk < endpage, 0)) __brk (endpage); #endif }
int brk(void* end_data) { void* new_brk = __brk(end_data); if (new_brk != end_data) { return -1; } __bionic_brk = new_brk; return 0; }
int brk(void* end_data) { __bionic_brk = __brk(end_data); if (__bionic_brk < end_data) { errno = ENOMEM; return -1; } return 0; }
int brk(void* end_data) { char* new_brk = __brk( end_data ); if (new_brk != end_data) return -1; __bionic_brk = new_brk; return 0; }
void *sbrk(ptrdiff_t increment) { char* start; char* end; char* new_brk; if ( !__bionic_brk) __bionic_brk = __brk((void*)0); start = (char*)(((long)__bionic_brk + SBRK_ALIGN-1) & ~(SBRK_ALIGN-1)); end = start + increment; new_brk = __brk(end); if (new_brk == (void*)-1) return new_brk; else if (new_brk < end) { errno = ENOMEM; return (void*)-1; } __bionic_brk = new_brk; return start; }
_WCRTLINK int brk( void *endds ) { return( __brk( (unsigned)endds ) == (void *)-1 ? -1 : 0 ); }
_WCRTLINK void _WCNEAR *sbrk( int increment ) { return( __brk( _curbrk + increment ) ); }
int __ExpandDGROUP( unsigned amount ) { #if defined(__WINDOWS__) || defined(__WARP__) || defined(__NT__) \ || defined(__CALL21__) || defined(__RDOS__) // first try to free any available storage _nheapshrink(); return( __CreateNewNHeap( amount ) ); #else mheapptr p1; frlptr flp; unsigned brk_value; tag *last_tag; unsigned new_brk_value; void _WCNEAR *brk_ret; #if defined(__DOS_EXT__) if( !__IsCtsNHeap() ) { return( __CreateNewNHeap( amount ) ); // Won't slice either } // Rational non-zero based system should go through. #endif if( !__heap_enabled ) return( 0 ); if( _curbrk == ~1u ) return( 0 ); if( __AdjustAmount( &amount ) == 0 ) return( 0 ); #if defined(__DOS_EXT__) if( _IsPharLap() && !_IsFlashTek() ) { _curbrk = SegmentLimit(); } #endif new_brk_value = amount + _curbrk; if( new_brk_value < _curbrk ) { new_brk_value = ~1u; } brk_ret = __brk( new_brk_value ); if( brk_ret == (void _WCNEAR *)-1 ) { return( 0 ); } brk_value = (unsigned)brk_ret; if( brk_value > /*0xfff8*/ ~7u ) { return( 0 ); } if( new_brk_value <= brk_value ) { return( 0 ); } amount = new_brk_value - brk_value; if( amount - TAG_SIZE > amount ) { return( 0 ); } else { amount -= TAG_SIZE; } for( p1 = __nheapbeg; p1 != NULL; p1 = p1->next ) { if( p1->next == NULL ) break; if( (unsigned)p1 <= brk_value && ((unsigned)p1) + p1->len + TAG_SIZE >= brk_value ) { break; } } if( (p1 != NULL) && ((brk_value - TAG_SIZE) == (unsigned)( (PTR)p1 + p1->len) ) ) { /* we are extending the previous heap block (slicing) */ /* nb. account for the end-of-heap tag */ brk_value -= TAG_SIZE; amount += TAG_SIZE; flp = (frlptr) brk_value; /* adjust current entry in heap list */ p1->len += amount; /* fix up end of heap links */ last_tag = (tag *) ( (PTR)flp + amount ); last_tag[0] = END_TAG; } else { if( amount < sizeof( miniheapblkp ) + sizeof( frl ) ) { /* there isn't enough for a heap block (struct miniheapblkp) and one free block (frl) */ return( 0 ); } // Initializing the near heap if __nheapbeg == NULL, // otherwise, a new mini-heap is getting linked up p1 = (mheapptr)brk_value; p1->len = amount; flp = __LinkUpNewMHeap( p1 ); amount = flp->len; } /* build a block for _nfree() */ SET_MEMBLK_SIZE_USED( flp, amount ); ++p1->numalloc; /* 28-dec-90 */ p1->largest_blk = ~0; /* set to largest value to be safe */ _nfree( (PTR)flp + TAG_SIZE ); return( 1 ); #endif }
_WCRTLINK void_nptr sbrk( int increment ) { return( __brk( _curbrk + increment ) ); }
static inline void frob_brk (void) { __brk (0); /* Initialize the break. */ }
_WCRTLINK int _nheapshrink( void ) { mheapptr mhp; #if !defined(__WARP__) && \ !defined(__WINDOWS_286__) && \ !defined(__WINDOWS_386__) && \ !defined(__NT__) && \ !defined(__CALL21__) && \ !defined(__SNAP__) // Shrink by adjusting _curbrk frlptr last_free; frlptr end_tag; unsigned new_brk; _AccessNHeap(); #if defined(__DOS_EXT__) if( !_IsRationalZeroBase() && !_IsCodeBuilder() ) { #endif if( __nheapbeg == NULL ) { _ReleaseNHeap(); return( 0 ); // No near heap, can't shrink } /* Goto the end of miniheaplist (if there's more than 1 blk) */ for( mhp = __nheapbeg; mhp->next; mhp = mhp->next ); /* check that last free block is at end of heap */ last_free = mhp->freehead.prev; end_tag = (frlptr) ( (PTR)last_free + last_free->len ); if( end_tag->len != END_TAG ) { _ReleaseNHeap(); return( 0 ); } if( end_tag != (frlptr) ((PTR)mhp + mhp->len ) ) { _ReleaseNHeap(); return( 0 ); } #if defined(__DOS_EXT__) // only shrink if we can shave off at least 4k if( last_free->len < 0x1000 ) { _ReleaseNHeap(); return( 0 ); } #else if( last_free->len <= sizeof( frl ) ) { _ReleaseNHeap(); return( 0 ); } #endif /* make sure there hasn't been an external change in _curbrk */ if( sbrk( 0 ) != &(end_tag->prev) ) { _ReleaseNHeap(); return( 0 ); } /* calculate adjustment factor */ if( mhp->len-last_free->len > sizeof( struct miniheapblkp ) ) { // this miniheapblk is still being used #if defined(__DOS_EXT__) frlptr new_last_free; new_last_free = (frlptr)((((unsigned)last_free + 0xfff) & ~0xfff) - TAG_SIZE); if( new_last_free == last_free ) { #endif // remove entire entry mhp->len -= last_free->len; --mhp->numfree; // Relink the freelist entries, and update the rover mhp->freehead.prev = last_free->prev; last_free->prev->next = &mhp->freehead; if( mhp->rover == last_free ) mhp->rover = last_free->prev; #if defined(__DOS_EXT__) } else { // just shrink the last free entry mhp->len -= last_free->len; last_free->len = (PTR)new_last_free - (PTR)last_free; mhp->len += last_free->len; last_free = new_last_free; } #endif last_free->len = END_TAG; new_brk = (unsigned) ((PTR)last_free + TAG_SIZE ); } else { // we can remove this miniheapblk if( mhp->prev ) { // Not the first miniheapblk mhp->prev->next = NULL; new_brk = (unsigned)mhp;//->prev + (unsigned)mhp->prev->len; } else { // Is the first miniheapblk new_brk = (unsigned)__nheapbeg; __nheapbeg = NULL; } // Update rover info if( __MiniHeapRover == mhp ) { __MiniHeapRover = __nheapbeg; __LargestSizeB4MiniHeapRover = 0; } } if( __brk( new_brk ) == (void _WCNEAR *) -1 ) { _ReleaseNHeap(); return( -1 ); } _ReleaseNHeap(); return( 0 ); #if defined(__DOS_EXT__) } __FreeDPMIBlocks(); // For RSI/zero-base and Intel CB _ReleaseNHeap(); return( 0 ); #endif #else // Shrink by releasing mini-heaps { mheapptr pnext; _AccessNHeap(); for( mhp = __nheapbeg; mhp; mhp = pnext ) { pnext = mhp->next; if( mhp->len - sizeof(struct miniheapblkp) == (mhp->freehead.prev)->len ) __ReleaseMiniHeap( mhp ); } _ReleaseNHeap(); return( 0 ); } #endif }