Esempio n. 1
0
static void CollectorOff(Proc_t *proc)
{
  Thread_t *threadIterator = NULL;
  int isFirst;
  int nextGCType = Minor;      /* GCType will be written to during this function for the next GC
				  and so we save its value here for reading */
  procChangeState(proc, GCWork, 608);
  proc->segmentType |= FlipOff;

  if (collectDiag >= 2)
    printf("Proc %d: entered CollectorOff\n", proc->procid);
  assert(SetIsEmpty(&proc->work.objs));        /* Local stack must be empty */
  assert(GCStatus == GCPendingOff);
  memBarrier();

  PadCopyRange(&proc->copyRange);              /* Pad so that paranoid check works */

  isFirst = (weakBarrier(barriers,proc) == 0);
  if (isFirst) {
    ResetJob();
  }
  strongBarrier(barriers,proc);

  /* Local stacks must be empty. */
  assert(isLocalWorkEmpty(&proc->work));

  /* Replace all roots with replica */
  if (isFirst) 
    minor_global_scan(proc);   /* Even for a major GC since we already flipped global locs tenured when GC started */
  while ((threadIterator = NextJob()) != NULL) {
    complete_root_scan(proc, threadIterator);
    if (threadIterator->request == MajorGCRequestFromC) /* Runtime explicitly requests major GC */
      nextGCType = Major;
  }

  procChangeState(proc, GCWork, 611);
  proc->numRoot += SetLength(&proc->work.roots) + SetLength(&proc->work.globals);
  while (!SetIsEmpty(&proc->work.roots)) {
    ploc_t root = (ploc_t) SetPop(&proc->work.roots);
    flipRootLoc(GCType, root);
  }
  while (!SetIsEmpty(&proc->work.globals)) {
    ptr_t global = SetPop(&proc->work.globals);
    ploc_t replicaLoc = DupGlobal(global);
    flipRootLoc(GCType, replicaLoc);
  }
  FetchAndAdd(&totalReplicated, proc->segUsage.bytesReplicated + proc->cycleUsage.bytesReplicated);
  strongBarrier(barriers,proc);

  /* Only the designated thread needs to perform the following */
  if (isFirst) {
    if (GCType == Minor) {
      double liveRatio = 0.0;
      int i, copied = 0;
      paranoid_check_all(nursery, fromSpace, fromSpace, NULL, largeSpace);
      minor_global_promote(proc);
      for (i=0; i<NumProc; i++) {
	Proc_t *p = getNthProc(i);;
	copied += bytesCopied(&p->cycleUsage) + bytesCopied(&p->segUsage);
      }
      liveRatio = (double) (copied) / (double) Heap_GetUsed(nursery);
      add_statistic(&minorSurvivalStatistic, liveRatio);
    }
    else { /* Major */
      discardNextSharedStack(workStack); /* Discard nextBackObj/nextBackLocs on major GC */
      paranoid_check_all(nursery, fromSpace, toSpace, NULL, largeSpace);
      gc_large_endCollect();
      HeapAdjust2(totalRequest, totalUnused, totalReplicated,  
		  CollectionRate, doAgressive ? 2 : 1,
		  nursery, fromSpace, toSpace);
      reducedTenuredSize = Heap_GetSize(toSpace);
      expandedTenuredSize = reducedToExpanded(reducedTenuredSize, CollectionRate, doAgressive ? 2 : 1);
      Heap_Resize(fromSpace, 0, 1);
      typed_swap(Heap_t *, fromSpace, toSpace);
      NumMajorGC++;                          
    }
    typed_swap(int, primaryGlobalOffset, replicaGlobalOffset);
    typed_swap(int, primaryArrayOffset, replicaArrayOffset);
    typed_swap(int, primaryStackletOffset, replicaStackletOffset);
    Heap_Resize(nursery,reducedNurserySize,1);
    NumGC++;
    GCStatus = GCOff;
    if (Heap_GetAvail(fromSpace) < tenuredReserve + Heap_GetSize(nursery)) {
      /*  The next GC needs to be a major GC so we must begin allocation in the fromSpace immediately. 
	  We permit allocation to continue so we don't flip on again too soon.  However, allocation
	  is restricted so the major collection is started soon so that an accurate survival rate
	  can be computed. */
      GCType = Major;        
      fromSpace->top = fromSpace->cursor + (minOffRequest * NumProc) / sizeof(val_t);
    }
    else
      GCType = nextGCType;
  }

  /* All system threads need to reset their limit pointer */
  ResetAllocation(proc, NULL);
  proc->writelistCursor = proc->writelistStart;

  strongBarrier(barriers,proc);
  establishCopyRange(proc);    /* Called here to copyRanges are initialized for use in GCRelease */

  if (collectDiag >= 2)
    printf("Proc %d: leaving CollectorOff\n", proc->procid);
}
Esempio n. 2
0
static void CollectorTransition(Proc_t *proc)
{

  int isFirst = 0;
  Thread_t *threadIterator = NULL;

  /* Major vs Minor of current GC was determined at end of last GC */
  procChangeState(proc, GCWork, 604);
  proc->segmentType |= (FlipTransition | ((GCType == Major) ? MajorWork : MinorWork));

  switch (GCStatus) {
    case GCAgressive:             /* Signalling to other processors that collector is turning on */
      GCStatus = GCPendingOn;
      StopAllThreads();
      break;
    case GCPendingOn:             /* Responding to signal that collector is turning on */
      break;
    default: 
      DIE("CollectorTransition");
  }

  /* Collection cannot proceed until all processors have stopped running mutators.
     While waiting for the processors, the "first" processor begins to do some
     prelimiary work.  This work must be completed before any processor begins collection.
     As a result, the "first" processor is counted twice.
  */
  isFirst = (weakBarrier(barriers,proc) == 0);
  if (isFirst) {
    ResetJob();                               /* Reset counter so all user threads are scanned */
  }
  resetSharedStack(workStack,&proc->work, 0);
  strongBarrier(barriers,proc);

  /* Reset root lists, compute thread-specific roots in parallel,
     determine whether a major GC was explicitly requested. */
  FetchAndAdd(&totalUnused, sizeof(val_t) * (proc->allocLimit - proc->allocCursor));
  assert(SetIsEmpty(&proc->work.roots));
  while ((threadIterator = NextJob()) != NULL) {
    discard_root_scan(proc,threadIterator);
    if (threadIterator->used == 0)
      continue; 
    initial_root_scan(proc,threadIterator);
    if (threadIterator->requestInfo >= 0)  /* Allocation request */
      FetchAndAdd(&totalRequest, threadIterator->requestInfo);
  }
  strongBarrier(barriers,proc);

  /* The "first" processor is in charge of the globals but
     must wait until all threads are processed before knowing if GC is major. 
     The major GC does not take effect until the first minor GC is completed.
  */

  if (isFirst) 
    major_global_scan(proc);    /* Always a major_global_scan because we must flip all globals */
  strongBarrier(barriers, proc);

  /* Check local stack empty, prepare copy range,
     forward all the roots (first proc handles backpointers),
     transfer work from local to shared work stack */
  procChangeState(proc, GCWork, 607);
  assert(SetIsEmpty(&proc->work.objs));
  proc->numRoot += SetLength(&proc->work.roots) +
	SetLength(&proc->work.globals);

  /* Omit popSharedObjStack */
  pushSharedStack(0,workStack, &proc->work);
  GCStatus = GCOn;
  strongBarrier(barriers, proc);
}
Esempio n. 3
0
CSchemaBoolean& CSchemaBoolean::operator=( const bool bValue )	
{ 
	SetIsEmpty( false ); 
	m_Value = bValue; 
	return *this; 
}
Esempio n. 4
0
static void CollectorOn(Proc_t *proc)
{
  int isFirst = 0;
  Thread_t *threadIterator = NULL;

  /* Major vs Minor of current GC was determined at end of last GC */
  procChangeState(proc, GCWork, 600);
  proc->segmentType |= (FlipOn | ((GCType == Major) ? MajorWork : MinorWork));

  switch (GCStatus) {
    case GCOff:                   /* Signalling to other processors that collector is turning on */
      GCStatus = (GCType == Major ? doAgressive : doMinorAgressive) ? GCPendingAgressive : GCPendingOn;
      StopAllThreads();
      break;
    case GCPendingOn:             /* Responding to signal that collector is turning on */
    case GCPendingAgressive:
      break;
    default: 
      DIE("CollectorOn");
  }

  /* Collection cannot proceed until all processors have stopped running mutators.
     While waiting for the processors, the "first" processor begins to do some
     preliminary work.  This work must be completed before any processor begins collection.
     As a result, the "first" processor is counted twice.
  */
  isFirst = (weakBarrier(barriers,proc) == 0);
  if (isFirst) {
    Heap_ResetFreshPages(proc,nursery);
    if (GCType == Major) 
      Heap_ResetFreshPages(proc,fromSpace);
    if (GCType == Minor) {
      if (Heap_GetAvail(fromSpace) < Heap_GetSize(nursery)) {
	printf("Warning: fromSpace has less available space than total nursery size.\n"
	       "         Probably due to fromSpace pointer array allocation.\n");
	Heap_Resize(fromSpace, Heap_GetSize(fromSpace) + reducedNurserySize, 0);
	assert(Heap_GetAvail(fromSpace) >= Heap_GetSize(nursery));
      }
    }
    totalUnused = 0;
    totalRequest = 0;
    totalReplicated = 0;
    ResetJob();                               /* Reset counter so all user threads are scanned */
  }
  strongBarrier(barriers,proc);

  /* Reset root lists, compute thread-specific roots in parallel,
     determine whether a major GC was explicitly requested. */
  FetchAndAdd(&totalUnused, sizeof(val_t) * (proc->allocLimit - proc->allocCursor));
  assert(SetIsEmpty(&proc->work.roots));
  while ((threadIterator = NextJob()) != NULL) {
    initial_root_scan(proc,threadIterator);
    if (threadIterator->requestInfo >= 0)  /* Allocation request */
      FetchAndAdd(&totalRequest, threadIterator->requestInfo);
  }
  strongBarrier(barriers,proc);

  /* The "first" processor is in charge of the globals but
     must wait until all threads are processed before knowing if GC is major. 
     The major GC does not take effect until the first minor GC is completed.
  */
  if (isFirst) {
    paranoid_check_all(nursery, fromSpace, NULL, NULL, largeSpace);
    major_global_scan(proc);    /* Always a major_global_scan because we must flip all globals */
    if (GCType == Major) {
      Heap_Resize(fromSpace,expandedTenuredSize,0);
      Heap_Resize(toSpace,expandedTenuredSize,1);
      gc_large_startCollect();
    }
    else {
      /* Heap_Resize(nursery,expandedNurserySize,0); XXXXX */
       Heap_Resize(nursery,maximumNurserySize,0); /* Allocating large arrays makes this necesary */
    }
  }
  resetSharedStack(workStack,&proc->work, 1);
  strongBarrier(barriers, proc);

  /* Check local stack empty, prepare copy range,
     forward all the roots (first proc handles backpointers),
     transfer work from local to shared work stack */
  procChangeState(proc, GCWork, 603);

  assert(SetIsEmpty(&proc->work.objs));
  establishCopyRange(proc);

  proc->numRoot += SetLength(&proc->work.roots) +
	SetLength(&proc->work.globals);

  /* Omit popSharedObjStack */
  pushSharedStack(0,workStack, &proc->work);
  GCStatus = (GCType == Major ? doAgressive : doMinorAgressive) ? GCAgressive : GCOn;
  strongBarrier(barriers, proc);
}
Esempio n. 5
0
static void GCCollect_Semi(Proc_t *proc)
{
  long totalRequested = 0, totalUnused = 0;
  mem_t allocCursor = proc->allocCursor;
  mem_t allocLimit = proc->allocLimit;
  Thread_t *curThread = NULL;
  ploc_t globalLoc, rootLoc;

  /* Check that processor is unmapped, write list is not overflowed, allocation region intact */
  assert(NumProc == 1);
  assert(allocCursor <= allocLimit);
  assert(totalRequested >= 0);
  assert(primaryGlobalOffset == 0);
  assert(SetIsEmpty(&proc->work.roots));
  paranoid_check_all(fromSpace, NULL, NULL, NULL, NULL);

  /* Compute the roots from the stack and register set and globals */
  proc->segmentType |= MajorWork | FlipOn | FlipOff;
  totalUnused += sizeof(val_t) * (proc->allocLimit - proc->allocCursor);
  ResetJob();
  while ((curThread = NextJob()) != NULL) {
    if (curThread->requestInfo >= 0)
      totalRequested += curThread->requestInfo;
    thread_root_scan(proc,curThread);
  }
  major_global_scan(proc);

  /* Get toSpace ready for collection. Forward roots. Process gray objects */
  procChangeState(proc, GCWork, 203);
  Heap_Resize(toSpace, Heap_GetSize(fromSpace), 1);
  SetCopyRange(&proc->copyRange, proc, toSpace, NULL);

  if (!forceSpaceCheck)
    AllocEntireCopyRange(&proc->copyRange);                      /* no spacecheck - uniprocessor only */

  if (ordering == ImplicitOrder) {
    if (!forceSpaceCheck) {
      while (rootLoc = (ploc_t) SetPop(&proc->work.roots))
	locCopy1_noSpaceCheck(proc, rootLoc, fromSpace);
      while (globalLoc = (ploc_t) SetPop(&proc->work.globals)) 
	locCopy1_noSpaceCheck(proc, (ploc_t) globalLoc, fromSpace);
      scanUntil_locCopy1_noSpaceCheck(proc,toSpace->range.low,fromSpace);
    }
    else {
      while (rootLoc = (ploc_t) SetPop(&proc->work.roots))
	locCopy1(proc, rootLoc, fromSpace);
      while (globalLoc = (ploc_t) SetPop(&proc->work.globals)) 
	locCopy1(proc, (ploc_t) globalLoc, fromSpace);
      scanUntil_locCopy1(proc,toSpace->range.low,fromSpace);
    }
  }
  else if (ordering == HybridOrder) {
    assert(forceSpaceCheck == 1);
    while (rootLoc = (ploc_t) SetPop(&proc->work.roots))
      locCopy1(proc, rootLoc, fromSpace);
    while (globalLoc = (ploc_t) SetPop(&proc->work.globals)) 
      locCopy1(proc, (ploc_t) globalLoc, fromSpace);
    while (1) {
      mem_t start = NULL, stop;
      if (start = (mem_t) SetPop2(&proc->work.grayRegion, &stop))
	scanRegion_locCopy1(proc,start,stop,fromSpace);
      AddGrayCopyRange(&proc->copyRange);
      if (SetIsEmpty(&proc->work.grayRegion))
	break;
    }
    assert(SetIsEmpty(&proc->work.objs));
    assert(SetIsEmpty(&proc->work.grayRegion));
  }
  else {
    ptr_t gray;

    while (rootLoc = (ploc_t) SetPop(&proc->work.roots))
      locCopy1_replicaSet(proc, rootLoc,fromSpace); 
    while (globalLoc = (ploc_t) SetPop(&proc->work.globals))
      locCopy1_replicaSet(proc, globalLoc,fromSpace); 
    if (ordering == StackOrder) {
      while (gray = (ptr_t) SetPop(&proc->work.objs)) 
	(void) scanObj_locCopy1_replicaSet(proc,gray,fromSpace);   
    }
    else if (ordering == QueueOrder) {
      while (gray = (ptr_t) SetDequeue(&proc->work.objs)) 
	(void) scanObj_locCopy1_replicaSet(proc,gray,fromSpace);   
    }
    else
      DIE("bad ordering");
  }

  if (!forceSpaceCheck)
    ReturnCopyRange(&proc->copyRange);                           /* no spacecheck - uniprocessor only */

  ClearCopyRange(&proc->copyRange);
  assert(SetIsEmpty(&proc->work.roots));

  paranoid_check_all(fromSpace, NULL, toSpace, NULL, NULL);
  HeapAdjust1(totalRequested, totalUnused, 
	      0, CollectionRate, 0,
	      fromSpace, toSpace);
  Heap_Resize(fromSpace,0,1);
  typed_swap(Heap_t *, fromSpace, toSpace);
  NumGC++;

  ResetAllocation(proc, fromSpace);                        /* One processor can grab all of fromSpace for further allocation */
  assert(proc->writelistCursor == proc->writelistStart);
}