Example #1
0
_WCRTLINK void _nheapgrow( void )
    {
#if defined( __WINDOWS_286__ ) || !defined( _M_I86 )
        _nfree( _nmalloc( 1 ) );        /* get something into the heap */
#else
        unsigned max_paras;
        unsigned curr_paras;
        unsigned diff_paras;
        unsigned expand;

        _AccessNHeap();
        /* calculate # pages which always has extra slack space (ie. 0x10) */
        curr_paras = (( _curbrk + 0x10 ) & ~0x0f ) >> 4;
        if( curr_paras == 0 ) {
            /* we're already at 64k */
            _ReleaseNHeap();
            return;
        }
#if defined(__QNX__)
        if( qnx_segment_realloc( _DGroup(), 65536L ) == -1 ) {
            _ReleaseNHeap();
            return;
        }
        max_paras = PARAS_IN_64K;
#elif defined(__OS2__)
        if( DosReallocSeg( 0, _DGroup() ) )  {
            _ReleaseNHeap();
            return;
        }
        max_paras = PARAS_IN_64K;
#else
        if( _RWD_osmode != DOS_MODE ) {                     /* 23-apr-91 */
            max_paras = PARAS_IN_64K;
        } else {
            max_paras = TinyMaxSet( _RWD_psp );
            /* subtract off code size */
            max_paras -= _DGroup() - _RWD_psp;
            if( max_paras > PARAS_IN_64K ) {
                max_paras = PARAS_IN_64K;
            }
        }
#endif
        if( max_paras <= curr_paras ) {
            /* '<' -> something is wrong, '==' -> can't change size */
            _ReleaseNHeap();
            return;
        }
        diff_paras = max_paras - curr_paras;
        expand = (( diff_paras + 1 ) << 4 ) - ( _curbrk & 0x0f );
        expand += __LastFree(); /* compensate for _expand's adjustment */
        _ReleaseNHeap();
        _nfree( _nmalloc( expand - ( sizeof( size_t ) + sizeof(frl) ) ) );
#endif
    }
Example #2
0
void f5( void )
{
    void *p;
    void __near *q;

    q = _nmalloc( 20 );
    p = malloc( 10 );
    free( p );
    _nfree( q );
}
Example #3
0
void *__ReAllocDPMIBlock( frlptr p1, unsigned req_size )
{
    mheapptr            mhp;
    struct dpmi_hdr     *dpmi;
    struct dpmi_hdr     *prev_dpmi;
    unsigned            size;
    frlptr              flp, flp2;

    if( !__heap_enabled )
        return( 0 );
    __FreeDPMIBlocks();
    prev_dpmi = NULL;
    for( mhp = __nheapbeg; mhp; mhp = mhp->next ) {
        if( ((PTR)mhp + sizeof(struct miniheapblkp) == (PTR)p1)
          && (mhp->numalloc == 1) ) {
            // The mini-heap contains only this memblk
            __unlink( mhp );
            dpmi = ((struct dpmi_hdr *)mhp) - 1;
            if( dpmi->dos_seg_value != 0 )
                return( NULL );
            size = mhp->len + sizeof(struct dpmi_hdr) + TAG_SIZE;
            size += ( req_size - (p1->len-TAG_SIZE) );
            size += BLKSIZE_ALIGN_MASK;
            size &= ~BLKSIZE_ALIGN_MASK;
            prev_dpmi = dpmi;
            dpmi = TinyDPMIRealloc( dpmi, size );
            if( dpmi == NULL ) {
                dpmi = prev_dpmi;
                return( NULL );         // indicate resize failed
            }
            dpmi->dos_seg_value = 0;
            mhp = (mheapptr)( dpmi + 1 );
            mhp->len = size - sizeof(struct dpmi_hdr) - TAG_SIZE;
            flp = __LinkUpNewMHeap( mhp );
            mhp->numalloc = 1;

            // round up to even number
            req_size = (req_size + 1) & ~1;
            size = flp->len - req_size;
            if( size >= FRL_SIZE ) {    // Enough to spare a free block
                flp->len = req_size | 1;// adjust size and set allocated bit
                // Make up a free block at the end
                flp2 = (frlptr)((PTR)flp + req_size);
                flp2->len = size | 1;
                ++mhp->numalloc;
                mhp->largest_blk = 0;
                _nfree( (PTR)flp2 + TAG_SIZE );
            } else {
                flp->len |= 1; // set allocated bit
            }
            return( flp );
        }
    }
    return( NULL );
}
Example #4
0
_WCRTLINK void _WCNEAR *_nrealloc( void _WCI86NEAR *stg, size_t req_size )
    {
        void _WCNEAR *p;
        size_t     old_size;

        if( stg == NULL ) {
            return( _nmalloc( req_size ) );
        }
        if( req_size == 0 ) {
            _nfree( stg );
            return( (void _WCNEAR *) NULL );
        }
        old_size = _nmsize( stg );
        p = _nexpand( stg, req_size );  /* try to expand it in place */
        if( p == NULL ) {               /* if couldn't be expanded in place */
            #if defined(__DOS_EXT__)
            if( _IsRational() ) {
                frlptr  flp, newflp;

                flp = (frlptr) ((PTR)stg - TAG_SIZE);
                newflp = __ReAllocDPMIBlock( flp, req_size + TAG_SIZE );
                if( newflp ) {
                    return( (void _WCNEAR *)((PTR)newflp + TAG_SIZE) );
                }
            }
            #endif
            p = _nmalloc( req_size );   /* - allocate a new block */
            if( p != NULL ) {           /* - if we got one */
                memcpy( p, stg, old_size );  /* copy it */
                _nfree( stg );                  /* and free old one */
            } else {
                _nexpand( stg, old_size );      /* reset back to old size */
            }
        }
        return( p );
    }
Example #5
0
int __ExpandDGROUP( unsigned amount )
{
#if defined(__WINDOWS__) || defined(__WARP__) || defined(__NT__) \
  || defined(__CALL21__) || defined(__RDOS__)
    // first try to free any available storage
    _nheapshrink();
    return( __CreateNewNHeap( amount ) );
#else
    mheapptr    p1;
    frlptr      flp;
    unsigned    brk_value;
    tag         *last_tag;
    unsigned    new_brk_value;
    void        _WCNEAR *brk_ret;

  #if defined(__DOS_EXT__)
    if( !__IsCtsNHeap() ) {
        return( __CreateNewNHeap( amount ) );   // Won't slice either
    }
    // Rational non-zero based system should go through.
  #endif
    if( !__heap_enabled )
        return( 0 );
    if( _curbrk == ~1u )
        return( 0 );
    if( __AdjustAmount( &amount ) == 0 )
        return( 0 );
  #if defined(__DOS_EXT__)
    if( _IsPharLap() && !_IsFlashTek() ) {
        _curbrk = SegmentLimit();
    }
  #endif
    new_brk_value = amount + _curbrk;
    if( new_brk_value < _curbrk ) {
        new_brk_value = ~1u;
    }
    brk_ret = __brk( new_brk_value );
    if( brk_ret == (void _WCNEAR *)-1 ) {
        return( 0 );
    }
    brk_value = (unsigned)brk_ret;
    if( brk_value >  /*0xfff8*/ ~7u ) {
        return( 0 );
    }
    if( new_brk_value <= brk_value ) {
        return( 0 );
    }
    amount = new_brk_value - brk_value;
    if( amount - TAG_SIZE > amount ) {
        return( 0 );
    } else {
        amount -= TAG_SIZE;
    }
    for( p1 = __nheapbeg; p1 != NULL; p1 = p1->next ) {
        if( p1->next == NULL )
            break;
        if( (unsigned)p1 <= brk_value && ((unsigned)p1) + p1->len + TAG_SIZE >= brk_value ) {
            break;
        }
    }
    if( (p1 != NULL) && ((brk_value - TAG_SIZE) == (unsigned)( (PTR)p1 + p1->len) ) ) {
        /* we are extending the previous heap block (slicing) */
        /* nb. account for the end-of-heap tag */
        brk_value -= TAG_SIZE;
        amount += TAG_SIZE;
        flp = (frlptr) brk_value;
        /* adjust current entry in heap list */
        p1->len += amount;
        /* fix up end of heap links */
        last_tag = (tag *) ( (PTR)flp + amount );
        last_tag[0] = END_TAG;
    } else {
        if( amount < sizeof( miniheapblkp ) + sizeof( frl ) ) {
        /*  there isn't enough for a heap block (struct miniheapblkp) and
            one free block (frl) */
            return( 0 );
        }
        // Initializing the near heap if __nheapbeg == NULL,
        // otherwise, a new mini-heap is getting linked up
        p1 = (mheapptr)brk_value;
        p1->len = amount;
        flp = __LinkUpNewMHeap( p1 );
        amount = flp->len;
    }
    /* build a block for _nfree() */
    SET_MEMBLK_SIZE_USED( flp, amount );
    ++p1->numalloc;                         /* 28-dec-90 */
    p1->largest_blk = ~0;    /* set to largest value to be safe */
    _nfree( (PTR)flp + TAG_SIZE );
    return( 1 );
#endif
}
Example #6
0
static int __CreateNewNHeap( unsigned amount )
{
    mheapptr        p1;
    frlptr          flp;
    unsigned        brk_value;
  #if defined(__WARP__)
    ULONG           os2_alloc_flags;
  #endif

    if( !__heap_enabled )
        return( 0 );
    if( _curbrk == ~1u )
        return( 0 );
    if( __AdjustAmount( &amount ) == 0 )
        return( 0 );
  #if defined(__WINDOWS_286__)
    brk_value = (unsigned)LocalAlloc( LMEM_FIXED, amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
  #elif defined(__WINDOWS_386__)
    brk_value = (unsigned)DPMIAlloc( amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
  #elif defined(__WARP__)
    {
        PBYTE           p;
        APIRET          apiret;

        os2_alloc_flags = PAG_COMMIT | PAG_READ | PAG_WRITE;
        if( _os2_obj_any_supported && _os2_use_obj_any ) {
            os2_alloc_flags |= OBJ_ANY;
        }
        apiret = DosAllocMem( (PPVOID)&p, amount, os2_alloc_flags );
        if( apiret )
            return( 0 );

        brk_value = (unsigned)p;
    }
  #elif defined(__NT__)
    brk_value = (unsigned)VirtualAlloc( NULL, amount, MEM_COMMIT,
                                        PAGE_EXECUTE_READWRITE );
    //brk_value = (unsigned) LocalAlloc( LMEM_FIXED, amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
  #elif defined(__CALL21__)
    {
        tag _WCNEAR *tmp_tag;

        tmp_tag = (tag _WCNEAR *)TinyMemAlloc( amount );
        if( tmp_tag == NULL ) {
            return( 0 );
        }
        /* make sure it will not look like the end of a heap */
        tmp_tag[0] = ! END_TAG;
        brk_value = (unsigned)&tmp_tag[2];
        amount -= 2 * TAG_SIZE; // 11-jun-95, subtract extra tag
    }
  #elif defined(__DOS_EXT__)
    // if( !__IsCtsNHeap() ) {
    {
        tag         *tmp_tag;

        if( _IsRational() ) {
            tmp_tag = RationalAlloc( amount );
            if( tmp_tag ) {
                amount = *tmp_tag;
            }
        } else {    /* CodeBuilder */
            tmp_tag = TinyCBAlloc( amount );
            amount -= TAG_SIZE;
        }
        if( tmp_tag == NULL ) {
            return( 0 );
        }
        brk_value = (unsigned)tmp_tag;
    }
    // Pharlap, RSI/non-zero can never call this function
  #elif defined(__RDOS__)
    brk_value = (unsigned)RdosAllocateMem( amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
  #endif
    if( amount - TAG_SIZE > amount ) {
        return( 0 );
    } else {
        amount -= TAG_SIZE;
    }
    if( amount < sizeof( miniheapblkp ) + sizeof( frl ) ) {
        /* there isn't enough for a heap block (struct miniheapblkp) and
           one free block (frl) */
        return( 0 );
    }
    /* we've got a new heap block */
    p1 = (mheapptr)brk_value;
    p1->len = amount;
  #if defined(__WARP__)
    // Remeber if block was allocated with OBJ_ANY - may be in high memory
    p1->used_obj_any = ( _os2_obj_any_supported && _os2_use_obj_any );
  #endif
    // Now link it up
    flp = __LinkUpNewMHeap( p1 );
    amount = flp->len;
    /* build a block for _nfree() */
    SET_MEMBLK_SIZE_USED( flp, amount );
    ++p1->numalloc;
    p1->largest_blk = 0;
    _nfree( (PTR)flp + TAG_SIZE );
    return( 1 );
}
Example #7
0
int __HeapManager_expand( __segment seg,
                          unsigned offset,
                          size_t req_size,
                          size_t *growth_size )
{
    #if defined( _M_I86 )
        typedef struct freelistp __based(seg) *fptr;
        typedef char __based(void) *cptr;

        struct miniheapblkp __based(seg) *hblk;
    #else
        typedef struct freelistp _WCNEAR *fptr;
        typedef char _WCNEAR *cptr;

        mheapptr hblk;
    #endif
    fptr        p1;
    fptr        p2;
    fptr        pnext;
    fptr        pprev;
    size_t      new_size;
    size_t      old_size;
    size_t      free_size;

    /* round (new_size + tag) to multiple of pointer size */
    new_size = (req_size + TAG_SIZE + ROUND_SIZE) & ~ROUND_SIZE;
    if( new_size < req_size ) new_size = ~0; //go for max
    if( new_size < FRL_SIZE ) {
        new_size = FRL_SIZE;
    }
    p1 = (fptr) ((cptr)offset - TAG_SIZE);
    old_size = p1->len & ~1;
    if( new_size > old_size ) {
        /* enlarging the current allocation */
        p2 = (fptr) ((cptr)p1 + old_size);
        *growth_size = new_size - old_size;
        for(;;) {
            free_size = p2->len;
            if( p2->len == END_TAG ) {
                return( __HM_TRYGROW );
            } else if( free_size & 1 ) { /* next piece is allocated */
                break;
            } else {
                pnext = p2->next;
                pprev = p2->prev;

                if( seg == _DGroup() ) { // near heap
                    for( hblk = __nheapbeg; hblk->next; hblk = hblk->next ) {
                        if( (fptr)hblk <= (fptr)offset &&
                            (fptr)((PTR)hblk+hblk->len) > (fptr)offset ) break;
                    }
                }
                #if defined( _M_I86 )
                    else {      // Based heap
                        hblk = 0;
                    }
                #endif

                if( hblk->rover == p2 ) { /* 09-feb-91 */
                    hblk->rover = p2->prev;
                }
                if( free_size < *growth_size  ||
                    free_size - *growth_size < FRL_SIZE ) {
                    /* unlink small free block */
                    pprev->next = pnext;
                    pnext->prev = pprev;
                    p1->len += free_size;
                    hblk->numfree--;
                    if( free_size >= *growth_size ) {
                        return( __HM_SUCCESS );
                    }
                    *growth_size -= free_size;
                    p2 = (fptr) ((cptr)p2 + free_size);
                } else {
                    p2 = (fptr) ((cptr)p2 + *growth_size);
                    p2->len = free_size - *growth_size;
                    p2->prev = pprev;
                    p2->next = pnext;
                    pprev->next = p2;
                    pnext->prev = p2;
                    p1->len += *growth_size;
                    return( __HM_SUCCESS );
                }
            }
        }
        /* no suitable free blocks behind, have to move block */
        return( __HM_FAIL );
    } else {
        /* shrinking the current allocation */
        if( old_size - new_size >= FRL_SIZE ) {
            /* block big enough to split */
            p1->len = new_size | 1;
            p1 = (fptr) ((cptr)p1 + new_size);
            p1->len = (old_size - new_size) | 1;
            if( seg == _DGroup() ) { // near heap
                for( hblk = __nheapbeg; hblk->next; hblk = hblk->next ) {
                    if( (fptr)hblk <= (fptr)offset &&
                        (fptr)((PTR)hblk+hblk->len) > (fptr)offset ) break;
                }
            }
            #if defined( _M_I86 )
                else    // Based heap
                    hblk = 0;
            #endif
            /* _bfree will decrement 'numalloc' 08-jul-91 */
            hblk->numalloc++;
            #if defined( _M_I86 )
                _bfree( seg, (cptr)p1 + TAG_SIZE );
                /* free the top portion */
            #else
                _nfree( (cptr)p1 + TAG_SIZE );
            #endif
        }
    }
    return( __HM_SUCCESS );
}
Example #8
0
_WCRTLINK void free( void *stg )
{
    _nfree( stg );
}
Example #9
0
_WCRTLINK void free( void *cstg )
{
    _nfree( cstg );
}
Example #10
0
static int __AdjustAmount( unsigned *amount )
{
    unsigned old_amount = *amount;
    unsigned amt;
    #if ! ( defined(__WINDOWS_286__) || \
            defined(__WINDOWS_386__) || \
            defined(__WARP__)        || \
            defined(__NT__)             \
        )
        unsigned last_free_amt;
    #endif

    amt = old_amount;
    amt = ( amt + TAG_SIZE + ROUND_SIZE) & ~ROUND_SIZE;
    if( amt < old_amount ) {
        return( 0 );
    }
    #if ! ( defined(__WINDOWS_286__) || \
            defined(__WINDOWS_386__) || \
            defined(__WARP__)        || \
            defined(__NT__)             \
        )
        #if defined(__DOS_EXT__)
            if( _IsRationalZeroBase() || _IsCodeBuilder() ) {
                // Allocating extra to identify the dpmi block
                amt += sizeof(struct dpmi_hdr);
            } else {
        #else
            {
        #endif
                last_free_amt = __LastFree();   /* adjust for last free block */
                if( last_free_amt >= amt ) {
                    amt = 0;
                } else {
                    amt -= last_free_amt;
                }
            }
    #endif
    /* amount is even here */
    /*
      extra amounts        (22-feb-91 AFS)

       (1) adding a new heap needs:
           frl                    free block req'd for _nmalloc request
                                  (frl is the MINIMUM because the block
                                  may be freed)
           tag                    end of miniheap descriptor
           struct miniheapblkp    start of miniheap descriptor
       (2) extending heap needs:
           tag               free block req'd for _nmalloc request
    */
    *amount = amt;
    amt += ( (TAG_SIZE) + sizeof(frl) + sizeof(struct miniheapblkp) );
    if( amt < *amount ) return( 0 );
    if( amt < _amblksiz ) {
        /*
          _amblksiz may not be even so round down to an even number
          nb. pathological case: where _amblksiz == 0xffff, we don't
                                 want the usual round up to even
        */
        amt = _amblksiz & ~1u;
    }
    #if defined(__WINDOWS_386__) || \
        defined(__WARP__)        || \
        defined(__NT__)          || \
        defined(__CALL21__)      || \
        defined(__DOS_EXT__)     || \
        defined(__RDOS__)
        /* make sure amount is a multiple of 4k */
        *amount = amt;
        amt += 0x0fff;
        if( amt < *amount ) return( 0 );
        amt &= ~0x0fff;
    #endif
    *amount = amt;
    return( *amount != 0 );
}

#if defined(__WINDOWS_286__) || \
    defined(__WINDOWS_386__) || \
    defined(__WARP__)        || \
    defined(__NT__)          || \
    defined(__CALL21__)      || \
    defined(__DOS_EXT__)     || \
    defined(__RDOS__)
static int __CreateNewNHeap( unsigned amount )
{
    mheapptr        p1;
    frlptr          flp;
    unsigned        brk_value;

    if( !__heap_enabled ) return( 0 );
    if( _curbrk == ~1u ) return( 0 );
    if( __AdjustAmount( &amount ) == 0 ) return( 0 );
#if defined(__WINDOWS_286__)
    brk_value = (unsigned) LocalAlloc( LMEM_FIXED, amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
#elif defined(__WINDOWS_386__)
    brk_value = (unsigned) DPMIAlloc( amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
#elif defined(__WARP__)
    {
        PBYTE           p;

        if( DosAllocMem( (PPVOID)&p, amount, PAG_COMMIT|PAG_READ|PAG_WRITE ) ) {
            return( 0 );
        }
        brk_value = (unsigned)p;
    }
#elif defined(__NT__)
    brk_value = (unsigned) VirtualAlloc( NULL, amount, MEM_COMMIT,
                                        PAGE_EXECUTE_READWRITE );
    //brk_value = (unsigned) LocalAlloc( LMEM_FIXED, amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
#elif defined(__CALL21__)
    {
        tag _WCNEAR *tmp_tag;

        tmp_tag = (tag _WCNEAR *)TinyMemAlloc( amount );
        if( tmp_tag == NULL ) {
            return( 0 );
        }
        /* make sure it will not look like the end of a heap */
        tmp_tag[0] = ! END_TAG;
        brk_value = (unsigned) &tmp_tag[2];
        amount -= 2 * TAG_SIZE; // 11-jun-95, subtract extra tag
    }
#elif defined(__DOS_EXT__)
    // if( _IsRationalZeroBase() || _IsCodeBuilder() ) {
    {
        tag         *tmp_tag;

        if( _IsRational() ) {
            tmp_tag = RationalAlloc( amount );
            if( tmp_tag ) amount = *tmp_tag;
        } else {    /* CodeBuilder */
            tmp_tag = TinyCBAlloc( amount );
            amount -= TAG_SIZE;
        }
        if( tmp_tag == NULL ) {
            return( 0 );
        }
        brk_value = (unsigned) tmp_tag;
    }
    // Pharlap, RSI/non-zero can never call this function
#elif defined(__RDOS__)
    brk_value = (unsigned) RdosAllocateMem( amount );
    if( brk_value == 0 ) {
        return( 0 );
    }
#endif
    if( amount - TAG_SIZE > amount ) {
        return( 0 );
    } else {
        amount -= TAG_SIZE;
    }
    if( amount < sizeof( struct miniheapblkp ) + sizeof( frl ) ) {
        /* there isn't enough for a heap block (struct miniheapblkp) and
           one free block (frl) */
        return( 0 );
    }
    /* we've got a new heap block */
    p1 = (mheapptr) brk_value;
    p1->len = amount;
    // Now link it up
    flp = __LinkUpNewMHeap( p1 );
    amount = flp->len;
    /* build a block for _nfree() */
    flp->len = amount | 1;
    ++p1->numalloc;                         /* 28-dec-90 */
    p1->largest_blk = 0;
    _nfree( (PTR)flp + TAG_SIZE );
    return( 1 );
}
Example #11
0
_WCRTLINK void _nheapgrow( void )
{
    _nfree( _nmalloc( 1 ) );        /* get something into the heap */
}
Example #12
0
int __HeapManager_expand( __segment seg, unsigned offset, size_t req_size, size_t *growth_size )
{
    miniheapblkp    SEG_BPTR( seg ) hblk;
    freelistp       SEG_BPTR( seg ) p1;
    freelistp       SEG_BPTR( seg ) p2;
    freelistp       SEG_BPTR( seg ) pnext;
    freelistp       SEG_BPTR( seg ) pprev;
    size_t          new_size;
    size_t          old_size;
    size_t          free_size;

    /* round (new_size + tag) to multiple of pointer size */
    new_size = __ROUND_UP_SIZE( req_size + TAG_SIZE, ROUND_SIZE );
    if( new_size < req_size )
        new_size = ~0; //go for max
    if( new_size < FRL_SIZE ) {
        new_size = FRL_SIZE;
    }
    p1 = FRL_BPTR( seg, offset, -TAG_SIZE );
    old_size = MEMBLK_SIZE( p1 );
    if( new_size > old_size ) {
        /* enlarging the current allocation */
        p2 = FRL_BPTR( seg, p1, old_size );
        *growth_size = new_size - old_size;
        for( ;; ) {
            if( p2->len == END_TAG ) {
                return( __HM_TRYGROW );
            } else if( IS_MEMBLK_USED( p2 ) ) { /* next piece is allocated */
                break;
            } else {
                free_size = p2->len;
                pnext = p2->next;
                pprev = p2->prev;

                if( seg == _DGroup() ) { // near heap
                    for( hblk = __nheapbeg; hblk->next != NULL; hblk = hblk->next ) {
                        if( FRL_BPTR( seg, hblk, 0 ) <= FRL_BPTR( seg, offset, 0 )
                          && FRL_BPTR( seg, hblk, hblk->len ) > FRL_BPTR( seg, offset, 0 ) ) {
                            break;
                        }
                    }
#if defined( _M_I86 )
                } else {      // Based heap
                    hblk = 0;
#endif
                }

                if( hblk->rover == p2 ) {
                    hblk->rover = p2->prev;
                }
                if( free_size < *growth_size || free_size - *growth_size < FRL_SIZE ) {
                    /* unlink small free block */
                    pprev->next = pnext;
                    pnext->prev = pprev;
                    p1->len += free_size;
                    hblk->numfree--;
                    if( free_size >= *growth_size ) {
                        return( __HM_SUCCESS );
                    }
                    *growth_size -= free_size;
                    p2 = FRL_BPTR( seg, p2, free_size );
                } else {
                    p2 = FRL_BPTR( seg, p2, *growth_size );
                    p2->len = free_size - *growth_size;
                    p2->prev = pprev;
                    p2->next = pnext;
                    pprev->next = p2;
                    pnext->prev = p2;
                    p1->len += *growth_size;
                    return( __HM_SUCCESS );
                }
            }
        }
        /* no suitable free blocks behind, have to move block */
        return( __HM_FAIL );
    } else {
        /* shrinking the current allocation */
        if( old_size - new_size >= FRL_SIZE ) {
            /* block big enough to split */
            SET_MEMBLK_SIZE_USED( p1, new_size );
            p1 = FRL_BPTR( seg, p1, new_size );
            SET_MEMBLK_SIZE_USED( p1, old_size - new_size );
            if( seg == _DGroup() ) { // near heap
                for( hblk = __nheapbeg; hblk->next != NULL; hblk = hblk->next ) {
                    if( FRL_BPTR( seg, hblk, 0 ) <= FRL_BPTR( seg, offset, 0 )
                      && FRL_BPTR( seg, hblk, hblk->len ) > FRL_BPTR( seg, offset, 0 ) ) {
                        break;
                    }
                }
#if defined( _M_I86 )
            } else {    // Based heap
                hblk = 0;
#endif
            }
            /* _bfree will decrement 'numalloc' 08-jul-91 */
            hblk->numalloc++;
#if defined( _M_I86 )
            _bfree( seg, FRL_BPTR( seg, p1, TAG_SIZE ) );
            /* free the top portion */
#else
            _nfree( FRL_BPTR( seg, p1, TAG_SIZE ) );
#endif
        }
    }
    return( __HM_SUCCESS );
}