Пример #1
0
/* release a memory block allocated with hb_gcAlloc*() */
void hb_gcFree( void * pBlock )
{
   if( pBlock )
   {
      PHB_GARBAGE pAlloc = HB_GC_PTR( pBlock );

      /* Don't release the block that will be deleted during finalization */
      if( ! ( pAlloc->used & HB_GC_DELETE ) )
      {
         HB_GC_LOCK();
         if( pAlloc->locked )
            hb_gcUnlink( &s_pLockedBlock, pAlloc );
         else
         {
            hb_gcUnlink( &s_pCurrBlock, pAlloc );
            HB_GC_AUTO_DEC();
         }
         HB_GC_UNLOCK();

         HB_GARBAGE_FREE( pAlloc );
      }
   }
   else
   {
      hb_errInternal( HB_EI_XFREENULL, NULL, NULL, NULL );
   }
}
Пример #2
0
/* MTNOTE: It's executed at the end of HVM cleanup code just before
 *         application exit when other threads are destroyed, so it
 *         does not need additional protection code for MT mode, [druzus]
 */
void hb_gcReleaseAll( void )
{
   if( s_pCurrBlock )
   {
      PHB_GARBAGE pAlloc, pDelete;

      s_bCollecting = HB_TRUE;

      pAlloc = s_pCurrBlock;
      do
      {
         /* call a cleanup function */
         s_pCurrBlock->used |= HB_GC_DELETE | HB_GC_DELETELST;
         s_pCurrBlock->pFuncs->clear( HB_BLOCK_PTR( s_pCurrBlock ) );

         s_pCurrBlock = s_pCurrBlock->pNext;

      }
      while( s_pCurrBlock && pAlloc != s_pCurrBlock );

      do
      {
         HB_TRACE( HB_TR_INFO, ( "Release %p", s_pCurrBlock ) );
         pDelete = s_pCurrBlock;
         hb_gcUnlink( &s_pCurrBlock, pDelete );
         HB_GC_AUTO_DEC();
         HB_GARBAGE_FREE( pDelete );

      }
      while( s_pCurrBlock );
   }

   s_bCollecting = HB_FALSE;
}
Пример #3
0
/* release a memory block allocated with hb_gcAlloc() */
void hb_gcFree( void *pBlock )
{
   HB_TRACE( HB_TR_DEBUG, ( "hb_gcFree(%p)", pBlock ) );

   if( hb_gc_bReleaseAll )
   {
      HB_TRACE( HB_TR_DEBUG, ( "Aborted - hb_gcFree(%p)", pBlock ) );
      return;
   }

   if( pBlock )
   {
      HB_GARBAGE_PTR pAlloc = ( HB_GARBAGE_PTR ) pBlock;
      --pAlloc;

      if( pAlloc->locked )
      {
         HB_TRACE( HB_TR_DEBUG, ( "hb_gcFree(%p) *LOCKED* %p", pBlock, pAlloc ) );

         HB_THREAD_GUARD( hb_garbageAllocMutex, hb_gcUnlink( &s_pLockedBlock, pAlloc ) );

         HB_GARBAGE_FREE( pAlloc );
      }
      else
      {
         // Might already be marked for deletion.
         HB_CRITICAL_LOCK( hb_garbageAllocMutex );
         if( ! ( pAlloc->used & HB_GC_DELETE ) )
         {
            s_uAllocated--;
            s_uAllocatedCnt--;
            hb_gcUnlink( &s_pCurrBlock, pAlloc );
            HB_GARBAGE_FREE( pAlloc );
         }
         HB_CRITICAL_UNLOCK( hb_garbageAllocMutex );
      }
   }
   else
   {
      hb_errInternal( HB_EI_XFREENULL, NULL, NULL, NULL );
   }
}
Пример #4
0
void hb_gcRefFree( void * pBlock )
{
   if( pBlock )
   {
      PHB_GARBAGE pAlloc = HB_GC_PTR( pBlock );

      if( hb_xRefDec( pAlloc ) )
      {
         /* Don't release the block that will be deleted during finalization */
         if( ! ( pAlloc->used & HB_GC_DELETE ) )
         {
            pAlloc->used |= HB_GC_DELETE;

            /* execute clean-up function */
            pAlloc->pFuncs->clear( pBlock );

            if( hb_xRefCount( pAlloc ) != 0 )
            {
               if( pAlloc->used & HB_GC_DELETE )
               {
                  pAlloc->used = s_uUsedFlag;
                  if( hb_vmRequestQuery() == 0 )
                     hb_errRT_BASE( EG_DESTRUCTOR, 1301, NULL, "Reference to freed block", 0 );
               }
            }
            else
            {
               HB_GC_LOCK();
               if( pAlloc->locked )
                  hb_gcUnlink( &s_pLockedBlock, pAlloc );
               else
               {
                  hb_gcUnlink( &s_pCurrBlock, pAlloc );
                  HB_GC_AUTO_DEC();
               }
               HB_GC_UNLOCK();
               HB_GARBAGE_FREE( pAlloc );
            }
         }
      }
   }
   else
   {
      hb_errInternal( HB_EI_XFREENULL, NULL, NULL, NULL );
   }
}
Пример #5
0
void hb_gcGripDrop( HB_ITEM_PTR pItem )
{

   HB_TRACE( HB_TR_DEBUG, ( "hb_gcGripDrop(%p)", pItem ) );

   if( hb_gc_bReleaseAll )
   {
      HB_TRACE( HB_TR_DEBUG, ( "Aborted - hb_gcGripDrop(%p)", pItem ) );
      return;
   }

   if( pItem )
   {
      HB_GARBAGE_PTR pAlloc = ( HB_GARBAGE_PTR ) pItem;
      --pAlloc;

      HB_TRACE( HB_TR_INFO, ( "Drop %p %p", pItem, pAlloc ) );

      if( pAlloc->pFunc == hb_gcGripRelease )
      {
         if( HB_IS_COMPLEX( pItem ) )
         {
            hb_itemClear( pItem );    /* clear value stored in this item */
         }
      }

      HB_CRITICAL_LOCK( hb_garbageAllocMutex );

      hb_gcUnlink( &s_pLockedBlock, pAlloc );

      HB_CRITICAL_UNLOCK( hb_garbageAllocMutex );

      HB_GARBAGE_FREE( pAlloc );
   }

}
Пример #6
0
/* Check all memory block if they can be released
 */
void hb_gcCollectAll( HB_BOOL fForce )
{
   /* MTNOTE: it's not necessary to protect s_bCollecting with mutex
    *         because it can be changed at RT only inside this procedure
    *         when all other threads are stoped by hb_vmSuspendThreads(),
    *         [druzus]
    */
   if( ! s_bCollecting && hb_vmSuspendThreads( fForce ) )
   {
      PHB_GARBAGE pAlloc, pDelete;

      if( ! s_pCurrBlock || s_bCollecting )
      {
         hb_vmResumeThreads();
         return;
      }

      s_bCollecting = HB_TRUE;

      /* Step 1 - mark */
      /* All blocks are already marked because we are flipping
       * the used/unused flag
       */

      /* Step 2 - sweep */
      /* check all known places for blocks they are referring */
      hb_vmIsStackRef();
      hb_vmIsStaticRef();
      hb_clsIsClassRef();

      /* check list of locked block for blocks referenced from
       * locked block
       */
      if( s_pLockedBlock )
      {
         pAlloc = s_pLockedBlock;
         do
         {
            pAlloc->pFuncs->mark( HB_BLOCK_PTR( pAlloc ) );
            pAlloc = pAlloc->pNext;
         }
         while( s_pLockedBlock != pAlloc );
      }

      /* Step 3 - finalize */
      /* Release all blocks that are still marked as unused */

      /*
       * infinite loop can appear when we are executing clean-up functions
       * scanning s_pCurrBlock. It's possible that one of them will free
       * the GC block which we are using as stop condition. Only blocks
       * for which we set HB_GC_DELETE flag are guarded against releasing.
       * To avoid such situation first we are moving blocks which will be
       * deleted to separate list. It's additional operation but it can
       * even increase the speed when we are deleting only few percent
       * of all allocated blocks because in next passes we will scan only
       * deleted block list. [druzus]
       */

      pAlloc = NULL; /* for stop condition */
      do
      {
         if( s_pCurrBlock->used == s_uUsedFlag )
         {
            pDelete = s_pCurrBlock;
            pDelete->used |= HB_GC_DELETE | HB_GC_DELETELST;
            hb_gcUnlink( &s_pCurrBlock, pDelete );
            hb_gcLink( &s_pDeletedBlock, pDelete );
            HB_GC_AUTO_DEC();
         }
         else
         {
            /* at least one block will not be deleted, set new stop condition */
            if( ! pAlloc )
               pAlloc = s_pCurrBlock;
            s_pCurrBlock = s_pCurrBlock->pNext;
         }
      }
      while( pAlloc != s_pCurrBlock );

      /* Step 4 - flip flag */
      /* Reverse used/unused flag so we don't have to mark all blocks
       * during next collecting
       */
      s_uUsedFlag ^= HB_GC_USED_FLAG;

#ifdef HB_GC_AUTO
      /* store number of marked blocks for automatic GC activation */
      s_ulBlocksMarked = s_ulBlocks;
      if( s_ulBlocksAuto == 0 )
         s_ulBlocksCheck = HB_GC_AUTO_MAX;
      else
      {
         s_ulBlocksCheck = s_ulBlocksMarked + s_ulBlocksAuto;
         if( s_ulBlocksCheck <= s_ulBlocksMarked )
            s_ulBlocksCheck = HB_GC_AUTO_MAX;
      }
#endif

      /* call memory manager cleanup function */
      hb_xclean();

      /* resume suspended threads */
      hb_vmResumeThreads();

      /* do we have any deleted blocks? */
      if( s_pDeletedBlock )
      {
         /* call a cleanup function */
         pAlloc = s_pDeletedBlock;
         do
         {
            s_pDeletedBlock->pFuncs->clear( HB_BLOCK_PTR( s_pDeletedBlock ) );

            s_pDeletedBlock = s_pDeletedBlock->pNext;
         }
         while( pAlloc != s_pDeletedBlock );

         /* release all deleted blocks */
         do
         {
            pDelete = s_pDeletedBlock;
            hb_gcUnlink( &s_pDeletedBlock, pDelete );
            if( hb_xRefCount( pDelete ) != 0 )
            {
               pDelete->used = s_uUsedFlag;
               pDelete->locked = 0;
               HB_GC_LOCK();
               hb_gcLink( &s_pCurrBlock, pDelete );
               HB_GC_AUTO_INC();
               HB_GC_UNLOCK();
               if( hb_vmRequestQuery() == 0 )
                  hb_errRT_BASE( EG_DESTRUCTOR, 1302, NULL, "Reference to freed block", 0 );
            }
            else
               HB_GARBAGE_FREE( pDelete );
         }
         while( s_pDeletedBlock );
      }

      s_bCollecting = HB_FALSE;
   }
}
Пример #7
0
/* Check all memory blocks if they can be released
*/
void hb_gcCollectAll( BOOL bForce )
{
   HB_GARBAGE_PTR pAlloc, pDelete;

   HB_TRACE( HB_TR_INFO, ( "hb_gcCollectAll(%i), %p, %i", bForce, s_pCurrBlock, s_bCollecting ) );

   /* is anoter garbage in action? */
   #ifdef HB_THREAD_SUPPORT
      HB_CRITICAL_LOCK( hb_garbageAllocMutex );
      if ( s_pCurrBlock == NULL || ( bForce == FALSE && s_uAllocated < HB_GC_COLLECTION_JUSTIFIED ) )
      {
         HB_CRITICAL_UNLOCK( hb_garbageAllocMutex );
         return;
      }
      HB_CRITICAL_UNLOCK( hb_garbageAllocMutex );

      /* Force this thread to be an idle inspector: only this thread can run
         past this point; depending on settings, this thread may prevents others
         to regain control or just wait for a time where no thread is active. */
      hb_threadWaitForIdle();

   #else
      if ( s_bCollecting )  // note: 1) is volatile and 2) not very important if fails 1 time
      {
         return;
      }
      /* Even if not locked, a read only non-critical variable here
      should not be a problem */
      if( s_pCurrBlock == NULL || ( bForce == FALSE && s_uAllocated < HB_GC_COLLECTION_JUSTIFIED ) )
      {
         s_bCollecting = FALSE;
         return;
      }
   #endif

   /* By hypotesis, only one thread will be granted the right to be here;
   so cheching for consistency of s_pCurrBlock further is useless.*/

   /* Now that we are rightful owner of the GC process, we must
   * forbid all other threads from acting into the objects that
   * are going to be (in different times):
   * - scanned,
   * - freed (in their members)
   * - modified/released (in their strucure )
   *****/

   s_bCollecting = TRUE;
   s_uAllocated = 0;

   /* Step 1 - mark */
   /* All blocks are already marked because we are flipping
   * the used/unused flag
   */

   HB_TRACE( HB_TR_INFO, ( "Sweep Scan" ) );

   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "Sweep Scan\n" );
   #endif

   /* Step 1 - MARK */
   /* check all known places for blocks they are referring */
   #ifdef HB_THREAD_SUPPORT
      hb_threadIsLocalRef();
   #else
      hb_vmIsLocalRef();
   #endif

   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After LocalRef\n" );
   #endif

   hb_vmIsStaticRef();
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After StaticRef\n" );
   #endif

   hb_vmIsGlobalRef();
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After Globals\n" );
   #endif

   #ifndef HB_THREAD_SUPPORT
   /* JC1: under MT, each threadIsLocalRef does its memvar reffing */
   hb_memvarsIsMemvarRef();
   #endif
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After MemvarRef\n" );
   #endif

   hb_clsIsClassRef();
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After ClassRef\n" );
   #endif

   if( HB_IS_GCITEM( &hb_vm_BreakBlock ) )
   {
      hb_gcItemRef( &hb_vm_BreakBlock );
   }
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After BreakBlock\n" );
   #endif

   HB_TRACE( HB_TR_INFO, ( "Locked Scan" ) );

   /* check list of locked blocks for blocks referenced from
   * locked block
   */

   if( s_pLockedBlock )
   {
      pAlloc = s_pLockedBlock;

      do
      {
         /* it is not very elegant method but it works well */
         if( pAlloc->pFunc == hb_gcGripRelease )
         {
            hb_gcItemRef( ( HB_ITEM_PTR ) ( pAlloc + 1 ) );
         }
         else if( pAlloc->pFunc == hb_arrayReleaseGarbage )
         {
            HB_ITEM FakedItem;

            (&FakedItem)->type = HB_IT_ARRAY;
            (&FakedItem)->item.asArray.value = ( PHB_BASEARRAY )( pAlloc + 1 );

            hb_gcItemRef( &FakedItem );
         }
         else if( pAlloc->pFunc == hb_hashReleaseGarbage )
         {
            HB_ITEM FakedItem;

            (&FakedItem)->type = HB_IT_HASH;
            (&FakedItem)->item.asHash.value = ( PHB_BASEHASH )( pAlloc + 1 );

            hb_gcItemRef( &FakedItem );
         }
         else if( pAlloc->pFunc == hb_codeblockDeleteGarbage )
         {
            HB_ITEM FakedItem;

            (&FakedItem)->type = HB_IT_BLOCK;
            (&FakedItem)->item.asBlock.value = ( PHB_CODEBLOCK )( pAlloc + 1 );

            hb_gcItemRef( &FakedItem );
         }

         pAlloc = pAlloc->pNext;
      }
      while ( s_pLockedBlock != pAlloc );
   }
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After Lock scan\n" );
   #endif

   HB_TRACE( HB_TR_INFO, ( "Cleanup Scan" ) );

   /* Step 3 - Call Cleanup Functions  */

   pAlloc = s_pCurrBlock;
   do
   {
      if( s_pCurrBlock->used == s_uUsedFlag )
      {
         s_pCurrBlock->used |= HB_GC_DELETE;

         /* call the cleanup function - now for NON Blosks. */
         if( s_pCurrBlock->pFunc )
         {
            HB_TRACE( HB_TR_INFO, ( "Cleanup, %p", s_pCurrBlock ) );
            ( s_pCurrBlock->pFunc )( ( void *)( s_pCurrBlock + 1 ) );
            HB_TRACE( HB_TR_INFO, ( "DONE Cleanup, %p", s_pCurrBlock ) );
         }
      }

      s_pCurrBlock = s_pCurrBlock->pNext;
   }
   while ( s_pCurrBlock && ( s_pCurrBlock != pAlloc ) );
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After Cleanup scan\n" );
   #endif

   HB_TRACE( HB_TR_INFO, ( "Release Scan" ) );

   /* Step 4 - Release all blocks that are still marked as unused */
   pAlloc = s_pCurrBlock;
   do
   {
      NewTopBlock:

      if( s_pCurrBlock->used & HB_GC_DELETE )
      {
         HB_TRACE( HB_TR_INFO, ( "Delete, %p", s_pCurrBlock ) );

         pDelete = s_pCurrBlock;
         hb_gcUnlink( &s_pCurrBlock, s_pCurrBlock );

         /*
            Releasing the top block in the list, so we must mark the new top into pAlloc
            but we still need to process this new top. Without this goto, the while
            condition will immediatly fail. Using extra flags, and new conditions
            will adversly effect performance.
         */
         if( pDelete == pAlloc )
         {
            HB_TRACE( HB_TR_INFO, ( "New Top, %p", pDelete ) );

            pAlloc = s_pCurrBlock;
            HB_GARBAGE_FREE( pDelete );

            if( s_pCurrBlock )
            {
               goto NewTopBlock;
            }
         }
         else
         {
            HB_TRACE( HB_TR_INFO, ( "Free, %p", pDelete ) );
            HB_GARBAGE_FREE( pDelete );
            HB_TRACE( HB_TR_INFO, ( "DONE Free, %p", pDelete ) );
         }
      }
      else
      {
         s_pCurrBlock = s_pCurrBlock->pNext;
      }
   }
   while ( s_pCurrBlock && ( pAlloc != s_pCurrBlock ) );
   #ifdef TRACE_COLLECT
      TraceLog( NULL,  "After Release scan\n" );
   #endif

   s_pCurrBlock = pAlloc;

   /* Step 4 - flip flag */
   /* Reverse used/unused flag so we don't have to mark all blocks
   * during next collecting
   */
   s_uUsedFlag ^= HB_GC_USED_FLAG;

   /* Step 5: garbage requests will be now allowed again. */
   s_bCollecting = FALSE;

   /* Step 6: release all the locks on the scanned objects */
   /* Put itself back on machine execution count */

   #if defined( HB_THREAD_SUPPORT )
      hb_threadIdleEnd();
   #endif

}