/* Unlock a memory pointer so it can be released if there is no references inside of harbour variables */ void * hb_gcUnlock( void * pBlock ) { if( pBlock ) { PHB_GARBAGE pAlloc = HB_GC_PTR( pBlock ); if( pAlloc->locked ) { HB_GC_LOCK(); if( pAlloc->locked ) { if( --pAlloc->locked == 0 ) { pAlloc->used = s_uUsedFlag; hb_gcUnlink( &s_pLockedBlock, pAlloc ); hb_gcLink( &s_pCurrBlock, pAlloc ); HB_GC_AUTO_INC(); } } HB_GC_UNLOCK(); } } return pBlock; }
/* allocates a memory block */ void * hb_gcAllocRaw( HB_SIZE nSize, const HB_GC_FUNCS * pFuncs ) { PHB_GARBAGE pAlloc; pAlloc = HB_GARBAGE_NEW( nSize ); pAlloc->pFuncs = pFuncs; pAlloc->locked = 0; pAlloc->used = s_uUsedFlag; HB_GC_LOCK(); #ifdef HB_GC_AUTO if( s_ulBlocks > s_ulBlocksCheck ) { HB_GC_UNLOCK(); hb_gcCollectAll( HB_TRUE ); HB_GC_LOCK(); pAlloc->used = s_uUsedFlag; } HB_GC_AUTO_INC(); #endif hb_gcLink( &s_pCurrBlock, pAlloc ); HB_GC_UNLOCK(); return HB_BLOCK_PTR( pAlloc ); /* hide the internal data */ }
/* Unlock a memory pointer so it can be released if there is no references inside of harbour variables */ void *hb_gcUnlock( void *pBlock ) { if( pBlock ) { HB_GARBAGE_PTR pAlloc = ( HB_GARBAGE_PTR ) pBlock; --pAlloc; if( pAlloc->locked ) { if( --pAlloc->locked == 0 ) { HB_CRITICAL_LOCK( hb_garbageAllocMutex ); hb_gcUnlink( &s_pLockedBlock, pAlloc ); hb_gcLink( &s_pCurrBlock, pAlloc ); pAlloc->used = s_uUsedFlag; HB_CRITICAL_UNLOCK( hb_garbageAllocMutex ); } } } return pBlock; }
/* allocates a memory block */ void * hb_gcAllocate( HB_SIZE nSize, const HB_GC_FUNCS * pFuncs ) { PHB_GARBAGE pAlloc; pAlloc = HB_GARBAGE_NEW( nSize ); pAlloc->pFuncs = pFuncs; pAlloc->locked = 1; pAlloc->used = s_uUsedFlag; HB_GC_LOCK(); hb_gcLink( &s_pLockedBlock, pAlloc ); HB_GC_UNLOCK(); return HB_BLOCK_PTR( pAlloc ); /* hide the internal data */ }
HB_ITEM_PTR hb_gcGripGet( HB_ITEM_PTR pOrigin ) { HB_GARBAGE_PTR pAlloc; #ifdef GC_RECYCLE HB_CRITICAL_LOCK( hb_garbageAllocMutex ); if( s_pAvailableItems ) { pAlloc = s_pAvailableItems; hb_gcUnlink( &s_pAvailableItems, s_pAvailableItems ); HB_CRITICAL_UNLOCK( hb_garbageAllocMutex ); } else { HB_CRITICAL_UNLOCK( hb_garbageAllocMutex ); pAlloc = ( HB_GARBAGE_PTR ) hb_xgrab( sizeof( HB_ITEM ) + sizeof( HB_GARBAGE ) ); } #else pAlloc = HB_GARBAGE_NEW( sizeof( HB_ITEM ) + sizeof( HB_GARBAGE ) ); #endif if( pAlloc ) { HB_ITEM_PTR pItem = ( HB_ITEM_PTR )( pAlloc + 1 ); pAlloc->pFunc = hb_gcGripRelease; pAlloc->locked = 1; pAlloc->used = s_uUsedFlag; pItem->type = HB_IT_NIL; if( pOrigin ) { hb_itemCopy( pItem, pOrigin ); } HB_THREAD_GUARD( hb_garbageAllocMutex, hb_gcLink( &s_pLockedBlock, pAlloc ) ); return pItem; } else { return NULL; } }
/* Lock a memory pointer so it will not be released if stored outside of harbour variables */ void * hb_gcLock( void * pBlock ) { if( pBlock ) { PHB_GARBAGE pAlloc = HB_GC_PTR( pBlock ); HB_GC_LOCK(); if( ! pAlloc->locked ) { hb_gcUnlink( &s_pCurrBlock, pAlloc ); hb_gcLink( &s_pLockedBlock, pAlloc ); HB_GC_AUTO_DEC(); } ++pAlloc->locked; HB_GC_UNLOCK(); } return pBlock; }
/* allocates a memory block */ void * hb_gcAlloc( ULONG ulSize, HB_GARBAGE_FUNC_PTR pCleanupFunc ) { HB_GARBAGE_PTR pAlloc; #ifdef GC_RECYCLE HB_CRITICAL_LOCK( hb_garbageAllocMutex ); if( s_pAvailableBaseArrays && ulSize == sizeof( HB_BASEARRAY ) ) { pAlloc = s_pAvailableBaseArrays; hb_gcUnlink( &s_pAvailableBaseArrays, s_pAvailableBaseArrays ); HB_CRITICAL_UNLOCK( hb_garbageAllocMutex ); } else { HB_CRITICAL_UNLOCK( hb_garbageAllocMutex ); pAlloc = ( HB_GARBAGE_PTR ) hb_xgrab( ulSize + sizeof( HB_GARBAGE ) ); } #else pAlloc = HB_GARBAGE_NEW( ulSize + sizeof( HB_GARBAGE ) ); #endif if( pAlloc ) { pAlloc->pFunc = pCleanupFunc; pAlloc->ulHolders = 0; pAlloc->locked = 0; pAlloc->used = s_uUsedFlag; HB_CRITICAL_LOCK( hb_garbageAllocMutex ); s_uAllocated++; s_uAllocatedCnt++; hb_gcLink( &s_pCurrBlock, pAlloc ); HB_CRITICAL_UNLOCK( hb_garbageAllocMutex ); HB_TRACE( HB_TR_DEBUG, ( "hb_gcAlloc %p in %p", pAlloc + 1, pAlloc ) ); return (void *)( pAlloc + 1 ); /* hide the internal data */ } else { return NULL; } }
PHB_ITEM hb_gcGripGet( PHB_ITEM pOrigin ) { PHB_GARBAGE pAlloc = HB_GARBAGE_NEW( sizeof( HB_ITEM ) ); PHB_ITEM pItem = ( PHB_ITEM ) HB_BLOCK_PTR( pAlloc ); pAlloc->pFuncs = &s_gcGripFuncs; pAlloc->locked = 1; pAlloc->used = s_uUsedFlag; pItem->type = HB_IT_NIL; HB_GC_LOCK(); hb_gcLink( &s_pLockedBlock, pAlloc ); HB_GC_UNLOCK(); if( pOrigin ) hb_itemCopy( pItem, pOrigin ); return pItem; }
void hb_gcAttach( void * pBlock ) { PHB_GARBAGE pAlloc = HB_GC_PTR( pBlock ); if( pAlloc->locked ) { HB_GC_LOCK(); if( pAlloc->locked ) { if( --pAlloc->locked == 0 ) { pAlloc->used = s_uUsedFlag; hb_gcUnlink( &s_pLockedBlock, pAlloc ); hb_gcLink( &s_pCurrBlock, pAlloc ); HB_GC_AUTO_INC(); pAlloc = NULL; } } HB_GC_UNLOCK(); } if( pAlloc ) hb_xRefInc( pAlloc ); }
/* Check all memory block if they can be released */ void hb_gcCollectAll( HB_BOOL fForce ) { /* MTNOTE: it's not necessary to protect s_bCollecting with mutex * because it can be changed at RT only inside this procedure * when all other threads are stoped by hb_vmSuspendThreads(), * [druzus] */ if( ! s_bCollecting && hb_vmSuspendThreads( fForce ) ) { PHB_GARBAGE pAlloc, pDelete; if( ! s_pCurrBlock || s_bCollecting ) { hb_vmResumeThreads(); return; } s_bCollecting = HB_TRUE; /* Step 1 - mark */ /* All blocks are already marked because we are flipping * the used/unused flag */ /* Step 2 - sweep */ /* check all known places for blocks they are referring */ hb_vmIsStackRef(); hb_vmIsStaticRef(); hb_clsIsClassRef(); /* check list of locked block for blocks referenced from * locked block */ if( s_pLockedBlock ) { pAlloc = s_pLockedBlock; do { pAlloc->pFuncs->mark( HB_BLOCK_PTR( pAlloc ) ); pAlloc = pAlloc->pNext; } while( s_pLockedBlock != pAlloc ); } /* Step 3 - finalize */ /* Release all blocks that are still marked as unused */ /* * infinite loop can appear when we are executing clean-up functions * scanning s_pCurrBlock. It's possible that one of them will free * the GC block which we are using as stop condition. Only blocks * for which we set HB_GC_DELETE flag are guarded against releasing. * To avoid such situation first we are moving blocks which will be * deleted to separate list. It's additional operation but it can * even increase the speed when we are deleting only few percent * of all allocated blocks because in next passes we will scan only * deleted block list. [druzus] */ pAlloc = NULL; /* for stop condition */ do { if( s_pCurrBlock->used == s_uUsedFlag ) { pDelete = s_pCurrBlock; pDelete->used |= HB_GC_DELETE | HB_GC_DELETELST; hb_gcUnlink( &s_pCurrBlock, pDelete ); hb_gcLink( &s_pDeletedBlock, pDelete ); HB_GC_AUTO_DEC(); } else { /* at least one block will not be deleted, set new stop condition */ if( ! pAlloc ) pAlloc = s_pCurrBlock; s_pCurrBlock = s_pCurrBlock->pNext; } } while( pAlloc != s_pCurrBlock ); /* Step 4 - flip flag */ /* Reverse used/unused flag so we don't have to mark all blocks * during next collecting */ s_uUsedFlag ^= HB_GC_USED_FLAG; #ifdef HB_GC_AUTO /* store number of marked blocks for automatic GC activation */ s_ulBlocksMarked = s_ulBlocks; if( s_ulBlocksAuto == 0 ) s_ulBlocksCheck = HB_GC_AUTO_MAX; else { s_ulBlocksCheck = s_ulBlocksMarked + s_ulBlocksAuto; if( s_ulBlocksCheck <= s_ulBlocksMarked ) s_ulBlocksCheck = HB_GC_AUTO_MAX; } #endif /* call memory manager cleanup function */ hb_xclean(); /* resume suspended threads */ hb_vmResumeThreads(); /* do we have any deleted blocks? */ if( s_pDeletedBlock ) { /* call a cleanup function */ pAlloc = s_pDeletedBlock; do { s_pDeletedBlock->pFuncs->clear( HB_BLOCK_PTR( s_pDeletedBlock ) ); s_pDeletedBlock = s_pDeletedBlock->pNext; } while( pAlloc != s_pDeletedBlock ); /* release all deleted blocks */ do { pDelete = s_pDeletedBlock; hb_gcUnlink( &s_pDeletedBlock, pDelete ); if( hb_xRefCount( pDelete ) != 0 ) { pDelete->used = s_uUsedFlag; pDelete->locked = 0; HB_GC_LOCK(); hb_gcLink( &s_pCurrBlock, pDelete ); HB_GC_AUTO_INC(); HB_GC_UNLOCK(); if( hb_vmRequestQuery() == 0 ) hb_errRT_BASE( EG_DESTRUCTOR, 1302, NULL, "Reference to freed block", 0 ); } else HB_GARBAGE_FREE( pDelete ); } while( s_pDeletedBlock ); } s_bCollecting = HB_FALSE; } }