Void stopAnyPrinting() { /* terminate printing of expression,*/ if (printing) { /* after successful termination or */ printing = FALSE; /* runtime error (e.g. interrupt) */ Putchar('\n'); if (showStats) { #define plural(v) v, (v==1?"":"s") #if HUGS_FOR_WINDOWS { int svColor = SetForeColor(BLUE); #endif Printf("(%lu reduction%s, ",plural(numReductions)); Printf("%lu cell%s",plural(numCells)); if (numGcs>0) Printf(", %u garbage collection%s",plural(numGcs)); Printf(")\n"); #if HUGS_FOR_WINDOWS SetForeColor(svColor); } #endif #undef plural } #if OBSERVATIONS printObserve(ALLTAGS); if (obsCount) { ERRMSG(0) "Internal: observation sanity counter > 0\n" EEND; } if (showStats){ Int n = countObserve(); if (n > 0) Printf("%d observations recorded\n", n); } #endif FlushStdout(); garbageCollect(); } }
HeapPtr allocHeapCell(Tag tag, Heap* globHeap, HeapPtr* first, HeapPtr* second) { int nextFree = globHeap->nextFreeCell; if (nextFree >= globHeap->maxSize) { // printf("Trying GC!\n"); nextFree = garbageCollect(globHeap, first, second); // printf("%d Items copied during GC\n", nextFree); } HeapPtr heap = globHeap->toSpace; heap[nextFree].tag = tag; heap[nextFree].delayed = 0; switch (tag) { case FUN: heap[nextFree].fun.arity = -1; heap[nextFree].fun.code = NULL; break; case APP: heap[nextFree].app.leftArg = NULL; heap[nextFree].app.rightArg = NULL; break; case CONSTR: heap[nextFree].constr.id = -1; heap[nextFree].constr.arity = -1; break; case INTEGER: heap[nextFree].num = 0; break; default: heap[nextFree].indirection = NULL; break; } /* end of switch statement */ globHeap->nextFreeCell += 1; return &heap[nextFree]; }
int main(int argc, char * argv[]) { init(); printf("\nScheme!\n\n"); int garbage = 0; if (argc > 1) { if (!strcmp(argv[1], "ng")) { garbage = 1; } } while(1) { printf("s_exp >> "); List test = S_Expression(); printf("\n"); List result = eval(test, environment, 0); printf("value >> "); printList(result); printf("\n\n"); if (!garbage) { garbageCollect(); } } printf("\n\n"); return EXIT_SUCCESS; }
int main() { garbageCollect(); return 0; }
T* memAlloc(const size_t &elements) { managerInit(); T* ptr = NULL; size_t alloc_bytes = divup(sizeof(T) * elements, 1024) * 1024; if (elements > 0) { // FIXME: Add better checks for garbage collection // Perhaps look at total memory available as a metric if (memory_map.size() > MAX_BUFFERS || used_bytes >= MAX_BYTES) { garbageCollect(); } for(mem_iter iter = memory_map.begin(); iter != memory_map.end(); iter++) { mem_info info = iter->second; if (info.is_free && info.bytes == alloc_bytes) { iter->second.is_free = false; used_bytes += alloc_bytes; return (T *)iter->first; } } // Perform garbage collection if memory can not be allocated ptr = (T *)malloc(alloc_bytes); mem_info info = {false, alloc_bytes}; memory_map[ptr] = info; used_bytes += alloc_bytes; } return ptr; }
HeapPtr allocConstr(int id1, int arity1, Heap *h) { int n = numHeapCells(h); if (n < arity1 + 1) { garbageCollect(h, NULL, NULL); n = numHeapCells(h); if (n < arity1 + 1) { puts("Heap overflow: exiting"); exit(1); } } HeapPtr constrNode = allocHeapCell(CONSTR, h, NULL, NULL); constrNode->constr.id = id1; constrNode->constr.arity = arity1; if (arity1 > 0) { n = h->nextFreeCell; int i; for (i = 0; i < arity1; i++) allocHeapCell(FIELD_PTR, h, NULL, NULL); constrNode->constr.fields = h->toSpace + n; } else { constrNode->constr.fields = NULL; } return constrNode; }
explicit SharedPtrHandler(se::Class* clazz) : clazz_(clazz) { schedule( [this](float delta) { // garbageCollect(); }, this, 0.0f, "shared_ptr_handler"); }
void cMap_ResourceIDToScaledDC::free() { for (int i=0; i< GetSize(); i++) ElementAt(i)._lifespan = 0; garbageCollect(); //Garbage collection will get rid of everything now. ASSERT(!GetSize()); //Just to check that it works. }
cl::Buffer *bufferAlloc(const size_t &bytes) { int n = getActiveDeviceId(); cl::Buffer *ptr = NULL; size_t alloc_bytes = divup(bytes, memory_resolution) * memory_resolution; if (bytes > 0) { // FIXME: Add better checks for garbage collection // Perhaps look at total memory available as a metric if (memory_maps[n].size() >= MAX_BUFFERS || used_bytes[n] >= MAX_BYTES) { garbageCollect(); } for(mem_iter iter = memory_maps[n].begin(); iter != memory_maps[n].end(); ++iter) { mem_info info = iter->second; if ( info.is_free && !info.is_unlinked && info.bytes == alloc_bytes) { iter->second.is_free = false; used_bytes[n] += alloc_bytes; used_buffers[n]++; return iter->first; } } try { ptr = new cl::Buffer(getContext(), CL_MEM_READ_WRITE, alloc_bytes); } catch(...) { garbageCollect(); ptr = new cl::Buffer(getContext(), CL_MEM_READ_WRITE, alloc_bytes); } mem_info info = {false, false, alloc_bytes}; memory_maps[n][ptr] = info; used_bytes[n] += alloc_bytes; used_buffers[n]++; total_bytes[n] += alloc_bytes; } return ptr; }
T* memAlloc(const size_t &elements) { managerInit(); int n = getActiveDeviceId(); T* ptr = NULL; size_t alloc_bytes = divup(sizeof(T) * elements, memory_resolution) * memory_resolution; if (elements > 0) { // FIXME: Add better checks for garbage collection // Perhaps look at total memory available as a metric if (memory_maps[n].size() >= MAX_BUFFERS || used_bytes[n] >= MAX_BYTES) { garbageCollect(); } for(mem_iter iter = memory_maps[n].begin(); iter != memory_maps[n].end(); ++iter) { mem_info info = iter->second; if ( info.is_free && !info.is_unlinked && info.bytes == alloc_bytes) { iter->second.is_free = false; used_bytes[n] += alloc_bytes; used_buffers[n]++; return (T *)iter->first; } } // Perform garbage collection if memory can not be allocated if (cudaMalloc((void **)&ptr, alloc_bytes) != cudaSuccess) { garbageCollect(); CUDA_CHECK(cudaMalloc((void **)(&ptr), alloc_bytes)); } mem_info info = {false, false, alloc_bytes}; memory_maps[n][ptr] = info; used_bytes[n] += alloc_bytes; used_buffers[n]++; total_bytes[n] += alloc_bytes; } return ptr; }
void ThreadFactory::shutdown() { ENTER("ThreadFactory.shutdown"); lock(); garbageCollect(true); if (threads->getCount() != 0) { fprintf(stderr, "ThreadFactory.shutdown: Error: There are still some active threads\n"); } unlock(); EXIT("ThreadFactory.shutdown"); }
int s3c_g3d_release(struct inode *inode, struct file *file) { int *newid = file->private_data; if(mutex_lock_processID != 0 && mutex_lock_processID == (unsigned int)file->private_data) { mutex_unlock(&mem_sfr_lock); printk("Abnormal close of pid # %d\n", task_pid_nr(current)); } garbageCollect(newid); vfree(newid); return 0; }
void DerpVM::garbageCollectWithThreshold(void) { if(gcObjects.size() > objectCountGcThreshold) { garbageCollect(); // Readjust the object count threshold. objectCountGcThreshold = higherPow2(gcObjects.size()); if(objectCountGcThreshold <= GARBAGECOLLECT_MIN_THRESHOLD) { objectCountGcThreshold = GARBAGECOLLECT_MIN_THRESHOLD; } } }
Thread * ThreadFactory::createThread( Runnable *toRun, const char *name, bool isDaemon) { ENTER("ThreadFactory.createThread"); lock(); garbageCollect(); Thread *t = new Thread(toRun, name, isDaemon); threads->append(t); unlock(); EXIT("ThreadFactory.createThread"); return t; }
RideCache::RideCache(Context *context) : context(context) { progress_ = 100; exiting = false; // get the new zone configuration fingerprint fingerprint = static_cast<unsigned long>(context->athlete->zones()->getFingerprint()) + static_cast<unsigned long>(context->athlete->paceZones()->getFingerprint()) + static_cast<unsigned long>(context->athlete->hrZones()->getFingerprint()) + static_cast<unsigned long>(context->athlete->routes->getFingerprint()); // set the list // populate ride list RideItem *last = NULL; QStringListIterator i(RideFileFactory::instance().listRideFiles(context->athlete->home->activities())); while (i.hasNext()) { QString name = i.next(); QDateTime dt; if (RideFile::parseRideFileName(name, &dt)) { last = new RideItem(context->athlete->home->activities().canonicalPath(), name, dt, context); connect(last, SIGNAL(rideDataChanged()), this, SLOT(itemChanged())); connect(last, SIGNAL(rideMetadataChanged()), this, SLOT(itemChanged())); rides_ << last; } } // load the store - will unstale once cache restored load(); // now sort it qSort(rides_.begin(), rides_.end(), rideCacheLessThan); // set model once we have the basics model_ = new RideCacheModel(context, this); // now refresh just in case. refresh(); // do we have any stale items ? connect(context, SIGNAL(configChanged(qint32)), this, SLOT(configChanged(qint32))); // future watching connect(&watcher, SIGNAL(finished()), this, SLOT(garbageCollect())); connect(&watcher, SIGNAL(finished()), this, SLOT(save())); connect(&watcher, SIGNAL(finished()), context, SLOT(notifyRefreshEnd())); connect(&watcher, SIGNAL(started()), context, SLOT(notifyRefreshStart())); connect(&watcher, SIGNAL(progressValueChanged(int)), this, SLOT(progressing(int))); }
T* memAlloc(const size_t &elements) { managerInit(); T* ptr = NULL; size_t alloc_bytes = divup(sizeof(T) * elements, memory_resolution) * memory_resolution; if (elements > 0) { std::lock_guard<std::mutex> lock(memory_map_mutex); // FIXME: Add better checks for garbage collection // Perhaps look at total memory available as a metric if (memory_map.size() > MAX_BUFFERS || used_bytes >= MAX_BYTES) { garbageCollect(); } for(mem_iter iter = memory_map.begin(); iter != memory_map.end(); ++iter) { mem_info info = iter->second; if ( info.is_free && !info.is_unlinked && info.bytes == alloc_bytes) { iter->second.is_free = false; used_bytes += alloc_bytes; used_buffers++; return (T *)iter->first; } } // Perform garbage collection if memory can not be allocated ptr = (T *)malloc(alloc_bytes); if (ptr == NULL) { AF_ERROR("Can not allocate memory", AF_ERR_NO_MEM); } mem_info info = {false, false, alloc_bytes}; memory_map[ptr] = info; used_bytes += alloc_bytes; used_buffers++; total_bytes += alloc_bytes; } return ptr; }
Object GarbageCollector::allocate(size_t size) { assert(variableReference_ && globalVariable_); Object address = (Object)from_space_->allocateMemory(size); if (address == 0) { garbageCollect(); address = from_space_->allocateMemory(size); if (address == 0) { // error throw std::runtime_error("Allocate memory failure!"); } } return address; }
~Manager() { // Destructors should not through exceptions try { for(int i = 0; i < getDeviceCount(); i++) { setDevice(i); garbageCollect(); } pinnedGarbageCollect(); } catch (AfError &ex) { const char* perr = getenv("AF_PRINT_ERRORS"); if(perr && perr[0] != '0') { fprintf(stderr, "%s\n", ex.what()); } } }
/* This routine removes the current element from the list. If none available * it does nothing. */ void listRemove(list_t *list,void (*garbageCollect)(void *)) { /* Any elements in the list available? */ if(list->current!=NULL) { struct listElement *p; /* Yes, let's remove the current one. */ p=list->current; /* Is it the first element in the list? */ if(p->previous!=NULL) { /* No, so let the element before know what the new element after * it will be. */ list->current->previous->next=p->next; } else { /* Yes, so update the first meta data. */ list->first=p->next; } /* Is it the last element in the list? */ if(p->next!=NULL) { /* No, so let the element after it know what the new element * before it will be. */ p->next->previous=p->previous; } else { /* Yes, so update the last meta data. */ list->last=p->previous; } /* Now we need to update the current meta data. Does an element * after this element exist? */ if(p->next!=NULL) { /* Yes, this next element will be the new current element. */ list->current=p->next; } else { /* No, the element before this one will be the new current element. */ list->current=p->previous; } /* Run the garbage collector for this item if required. */ if(garbageCollect!=NULL) { garbageCollect(p->data); } /* Return system resources. */ free(p); /* Decrease the amount of elements known to the list. */ list->elements--; } }
DerpVM::~DerpVM(void) { // This will clean up global state except for things where // something still holds a reference. globalContext.clearAllVariables(); garbageCollect(); // Destroy all objects that have no external references. // Orphan everything else. while(gcObjects.size()) { if(gcObjects[0]->externalRefCount > 1) { unregisterObject(gcObjects[0]); } else { delete gcObjects[0]; } } }
/* This routine gracefully returns all system resources and empty the * list. */ void listDestroy(list_t *list,void (*garbageCollect)(void *)) { struct listElement *p; /* Search untill no elements are left... */ while(list->first!=NULL) { /* First save a reference to our next element. */ p=list->first->next; /* Run the garbage collector for this item if required. */ if(garbageCollect!=NULL) { garbageCollect(list->first->data); } /* Then return resources for the current one. */ free(list->first); /* And at last advance to our next element. */ list->first=p; } /* Reset all our meta data. */ list->first=list->last=list->current=NULL; list->elements=0; }
void MemoryManager::imageWrite(FILE* fp) { long i, size; garbageCollect(); fw(fp, (char *) &symbols, sizeof(object)); for (i = 0; i < objectTable.size(); i++) { if (objectTable[i].referenceCount > 0) { dummyObject.di = i; dummyObject.cl = objectTable[i]._class; dummyObject.ds = size = objectTable[i].size; fw(fp, (char *) &dummyObject, sizeof(dummyObject)); if (size < 0) size = ((- size) + 1) / 2; if (size != 0) fw(fp, (char *) objectTable[i].memory, sizeof(object) * size); } } }
inline void Solver::checkGarbage(double gf) { if (ca.wasted() > ca.size() * gf) garbageCollect(); }
void StringPool::garbageCollectIfNeeded() { if (strings.size() > minNumberOfStringsForGarbageCollection && Time::getApproximateMillisecondCounter() > lastGarbageCollectionTime + garbageCollectionInterval) garbageCollect(); }
object MemoryManager::allocObject(size_t memorySize) { int i; size_t position; bool done; TObjectFreeListIterator tpos; /* first try the free lists, this is fastest */ if((tpos = objectFreeList.find(memorySize)) != objectFreeList.end() && tpos->second != nilobj) { position = tpos->second; objectFreeList.erase(tpos); objectFreeListInv.erase(position); } /* if not there, next try making a size zero object and making it bigger */ else if ((tpos = objectFreeList.find(0)) != objectFreeList.end() && tpos->second != nilobj) { position = tpos->second; objectFreeList.erase(tpos); objectFreeListInv.erase(position); objectTable[position].size = memorySize; objectTable[position].memory = mBlockAlloc(memorySize); } else { /* not found, must work a bit harder */ done = false; /* first try making a bigger object smaller */ TObjectFreeListIterator tbigger = objectFreeList.upper_bound(memorySize); if(tbigger != objectFreeList.end() && tbigger->second != nilobj) { position = tbigger->second; objectFreeList.erase(tbigger); objectFreeListInv.erase(position); /* just trim it a bit */ objectTable[position].size = memorySize; done = true; } /* next try making a smaller object bigger */ if (! done) { TObjectFreeListIterator tsmaller = objectFreeList.lower_bound(memorySize); if(tsmaller != objectFreeList.begin() && (--tsmaller != objectFreeList.begin()) && tsmaller->second != nilobj) { position = tsmaller->second; objectFreeList.erase(tsmaller); objectFreeListInv.erase(position); objectTable[position].size = memorySize; free(objectTable[position].memory); objectTable[position].memory = mBlockAlloc(memorySize); done = true; } } /* if we STILL don't have it then there is nothing */ /* more we can do */ if (! done) { if(debugging) fprintf(stderr, "Failed to find an available object, trying GC\n"); if(garbageCollect() > 0) { return allocObject(memorySize); } else { if(debugging) fprintf(stderr, "No suitable objects available after GC, growing store.\n"); growObjectStore(growAmount); return allocObject(memorySize); } } } /* set class and type */ objectTable[position].referenceCount = 0; objectTable[position]._class = nilobj; objectTable[position].size = memorySize; return(position << 1); }