void barf(const char*s, ...) { va_list ap; va_start(ap,s); (*fatalInternalErrorFn)(s,ap); stg_exit(EXIT_INTERNAL_ERROR); // just in case fatalInternalErrorFn() returns va_end(ap); }
/* Returns NULL on failure; errno set */ static void * my_mmap (void *addr, W_ size, int operation) { void *ret; #if darwin_HOST_OS // Without MAP_FIXED, Apple's mmap ignores addr. // With MAP_FIXED, it overwrites already mapped regions, whic // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text // and replaces it with zeroes, causing instant death. // This behaviour seems to be conformant with IEEE Std 1003.1-2001. // Let's just use the underlying Mach Microkernel calls directly, // they're much nicer. kern_return_t err = 0; ret = addr; if(operation & MEM_RESERVE) { if(addr) // try to allocate at address err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE); if(!addr || err) // try to allocate anywhere err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE); } if(err) { // don't know what the error codes mean exactly, assume it's // not our problem though. errorBelch("memory allocation failed (requested %" FMT_Word " bytes)", size); stg_exit(EXIT_FAILURE); } if(operation & MEM_COMMIT) { vm_protect(mach_task_self(), (vm_address_t)ret, size, FALSE, VM_PROT_READ|VM_PROT_WRITE); } #else int prot, flags; if (operation & MEM_COMMIT) prot = PROT_READ | PROT_WRITE; else prot = PROT_NONE; if (operation == MEM_RESERVE) # if defined(MAP_NORESERVE) flags = MAP_NORESERVE; # else # ifdef USE_LARGE_ADDRESS_SPACE # error USE_LARGE_ADDRESS_SPACE needs MAP_NORESERVE # endif errorBelch("my_mmap(,,MEM_RESERVE) not supported on this platform"); # endif else if (operation == MEM_COMMIT)
failure(char *msg) { debugTrace(DEBUG_hpc,"hpc failure: %s\n",msg); fprintf(stderr,"Hpc failure: %s\n",msg); if (tixFilename) { fprintf(stderr,"(perhaps remove %s file?)\n",tixFilename); } else { fprintf(stderr,"(perhaps remove .tix file?)\n"); } stg_exit(1); }
void setExecutable (void *p, W_ len, bool exec) { DWORD dwOldProtect = 0; if (VirtualProtect (p, len, exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE, &dwOldProtect) == 0) { sysErrorBelch("setExecutable: failed to protect 0x%p; old protection: " "%lu\n", p, (unsigned long)dwOldProtect); stg_exit(EXIT_FAILURE); } }
void * stgCallocBytes (size_t count, size_t size, char *msg) { void *space; if ((space = calloc(count, size)) == NULL) { /* don't fflush(stdout); WORKAROUND bug in Linux glibc */ rtsConfig.mallocFailHook((W_) count*size, msg); stg_exit(EXIT_INTERNAL_ERROR); } return space; }
void * stgReallocBytes (void *p, size_t n, char *msg) { void *space; if ((space = realloc(p, n)) == NULL) { /* don't fflush(stdout); WORKAROUND bug in Linux glibc */ rtsConfig.mallocFailHook((W_) n, msg); stg_exit(EXIT_INTERNAL_ERROR); } return space; }
void * stgCallocBytes (int n, int m, char *msg) { char *space; if ((space = (char *) calloc((size_t) n, (size_t) m)) == NULL) { /* don't fflush(stdout); WORKAROUND bug in Linux glibc */ MallocFailHook((W_) n*m, msg); /*msg*/ stg_exit(EXIT_INTERNAL_ERROR); } return space; }
void initTicker (Time interval, TickProc handle_tick) { tick_interval = interval; tick_proc = handle_tick; timer_queue = CreateTimerQueue(); if (timer_queue == NULL) { sysErrorBelch("CreateTimerQueue"); stg_exit(EXIT_FAILURE); } }
void * stgReallocBytes (void *p, int n, char *msg) { char *space; size_t n2; n2 = (size_t) n; if ((space = (char *) realloc(p, (size_t) n2)) == NULL) { /* don't fflush(stdout); WORKAROUND bug in Linux glibc */ MallocFailHook((W_) n, msg); /*msg*/ stg_exit(EXIT_INTERNAL_ERROR); } return space; }
void initCondition( Condition* pCond ) { HANDLE h = CreateEvent(NULL, FALSE, /* auto reset */ FALSE, /* initially not signalled */ NULL); /* unnamed => process-local. */ if ( h == NULL ) { sysErrorBelch("initCondition: unable to create"); stg_exit(EXIT_FAILURE); } *pCond = h; return; }
void startTicker(void) { BOOL r; r = CreateTimerQueueTimer(&timer, timer_queue, tick_callback, 0, 0, TimeToUS(tick_interval) / 1000, // ms WT_EXECUTEINTIMERTHREAD); if (r == 0) { sysErrorBelch("CreateTimerQueueTimer"); stg_exit(EXIT_FAILURE); } }
Task * newBoundTask (void) { Task *task; if (!tasksInitialized) { errorBelch("newBoundTask: RTS is not initialised; call hs_init() first"); stg_exit(EXIT_FAILURE); } task = allocTask(); task->stopped = rtsFalse; newInCall(task); debugTrace(DEBUG_sched, "new task (taskCount: %d)", taskCount); return task; }
/* VirtualAlloc MEM_COMMIT can't cross boundaries of VirtualAlloc MEM_RESERVE, so we might need to do many VirtualAlloc MEM_COMMITs. We simply walk the (ordered) allocated blocks. */ static void commitBlocks(char* base, W_ size) { alloc_rec* it; it=allocs; for( ; it!=0 && (it->base+it->size)<=base; it=it->next ) {} for( ; it!=0 && size>0; it=it->next ) { W_ size_delta; void* temp; size_delta = it->size - (base-it->base); if(size_delta>size) size_delta=size; temp = VirtualAlloc(base, size_delta, MEM_COMMIT, PAGE_READWRITE); if(temp==0) { sysErrorBelch("getMBlocks: VirtualAlloc MEM_COMMIT failed"); stg_exit(EXIT_HEAPOVERFLOW); } size-=size_delta; base+=size_delta; } }
void osBindMBlocksToNode( void *addr, StgWord size, uint32_t node) { if (osNumaAvailable()) { void* temp; if (RtsFlags.GcFlags.numa) { /* Note [base memory] I would like to use addr here to specify the base memory of allocation. The problem is that the address we are requesting is too high. I can't figure out if it's because of my NUMA-emulation or a bug in the code. On windows also -xb is broken, it does nothing so that can't be used to tweak it (see #12577). So for now, just let the OS decide. */ temp = VirtualAllocExNuma( GetCurrentProcess(), NULL, // addr? See base memory size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE, node ); if (!temp) { if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY) { errorBelch("out of memory"); } else { sysErrorBelch( "osBindMBlocksToNode: VirtualAllocExNuma MEM_RESERVE %" FMT_Word " bytes " "at address %p bytes failed", size, addr); } stg_exit(EXIT_FAILURE); } } } }
long WINAPI __hs_exception_handler(struct _EXCEPTION_POINTERS *exception_data) { long action = EXCEPTION_CONTINUE_SEARCH; // When the system unwinds the VEH stack after having handled an excn, // return immediately. if ((exception_data->ExceptionRecord->ExceptionFlags & EH_UNWINDING) == 0) { // Error handling cases covered by this implementation. switch (exception_data->ExceptionRecord->ExceptionCode) { case EXCEPTION_FLT_DIVIDE_BY_ZERO: case EXCEPTION_INT_DIVIDE_BY_ZERO: fprintf(stdout, "divide by zero\n"); action = EXCEPTION_CONTINUE_EXECUTION; break; case EXCEPTION_STACK_OVERFLOW: fprintf(stdout, "C stack overflow in generated code\n"); action = EXCEPTION_CONTINUE_EXECUTION; break; case EXCEPTION_ACCESS_VIOLATION: fprintf(stdout, "Segmentation fault/access violation in generated code\n"); action = EXCEPTION_CONTINUE_EXECUTION; break; default:; } // If an error has occurred and we've decided to continue execution // then we've done so to prevent something else from handling the error. // But the correct action is still to exit as fast as possible. if (EXCEPTION_CONTINUE_EXECUTION == action) { fflush(stdout); stg_exit(1); } } return action; }
void sendIOManagerEvent (HsWord32 event) { #if defined(THREADED_RTS) ACQUIRE_LOCK(&event_buf_mutex); // debugBelch("sendIOManagerEvent: %d\n", event); if (io_manager_event != INVALID_HANDLE_VALUE) { if (next_event == EVENT_BUFSIZ) { errorBelch("event buffer overflowed; event dropped"); } else { if (!SetEvent(io_manager_event)) { sysErrorBelch("sendIOManagerEvent"); stg_exit(EXIT_FAILURE); } event_buf[next_event++] = (StgWord32)event; } } RELEASE_LOCK(&event_buf_mutex); #endif }
void startWorkerTask (Capability *cap) { int r; OSThreadId tid; Task *task; // A worker always gets a fresh Task structure. task = newTask(rtsTrue); // The lock here is to synchronise with taskStart(), to make sure // that we have finished setting up the Task structure before the // worker thread reads it. ACQUIRE_LOCK(&task->lock); // We don't emit a task creation event here, but in workerStart, // where the kernel thread id is known. task->cap = cap; // Give the capability directly to the worker; we can't let anyone // else get in, because the new worker Task has nowhere to go to // sleep so that it could be woken up again. ASSERT_LOCK_HELD(&cap->lock); cap->running_task = task; r = createOSThread(&tid, (OSThreadProc*)workerStart, task); if (r != 0) { sysErrorBelch("failed to create OS thread"); stg_exit(EXIT_FAILURE); } debugTrace(DEBUG_sched, "new worker task (taskCount: %d)", taskCount); task->id = tid; // ok, finished with the Task struct. RELEASE_LOCK(&task->lock); }
void initTicker (Time interval, TickProc handle_tick) { itimer_interval = interval; stopped = 0; exited = 0; initCondition(&start_cond); initMutex(&mutex); /* * We can't use the RTS's createOSThread here as we need to remain attached * to the thread we create so we can later join to it if requested */ if (! pthread_create(&thread, NULL, itimer_thread_func, (void*)handle_tick)) { #if defined(HAVE_PTHREAD_SETNAME_NP) pthread_setname_np(thread, "ghc_ticker"); #endif } else { sysErrorBelch("Itimer: Failed to spawn thread"); stg_exit(EXIT_FAILURE); } }
static void readTix(void) { unsigned int i; HpcModuleInfo *tmpModule; const HpcModuleInfo *lookup; ws(); expect('T'); expect('i'); expect('x'); ws(); expect('['); ws(); while(tix_ch != ']') { tmpModule = (HpcModuleInfo *)stgMallocBytes(sizeof(HpcModuleInfo), "Hpc.readTix"); tmpModule->from_file = true; expect('T'); expect('i'); expect('x'); expect('M'); expect('o'); expect('d'); expect('u'); expect('l'); expect('e'); ws(); tmpModule -> modName = expectString(); ws(); tmpModule -> hashNo = (unsigned int)expectWord64(); ws(); tmpModule -> tickCount = (int)expectWord64(); tmpModule -> tixArr = (StgWord64 *)calloc(tmpModule->tickCount,sizeof(StgWord64)); ws(); expect('['); ws(); for(i = 0;i < tmpModule->tickCount;i++) { tmpModule->tixArr[i] = expectWord64(); ws(); if (tix_ch == ',') { expect(','); ws(); } } expect(']'); ws(); lookup = lookupHashTable(moduleHash, (StgWord)tmpModule->modName); if (lookup == NULL) { debugTrace(DEBUG_hpc,"readTix: new HpcModuleInfo for %s", tmpModule->modName); insertHashTable(moduleHash, (StgWord)tmpModule->modName, tmpModule); } else { ASSERT(lookup->tixArr != 0); ASSERT(!strcmp(tmpModule->modName, lookup->modName)); debugTrace(DEBUG_hpc,"readTix: existing HpcModuleInfo for %s", tmpModule->modName); if (tmpModule->hashNo != lookup->hashNo) { fprintf(stderr,"in module '%s'\n",tmpModule->modName); failure("module mismatch with .tix/.mix file hash number"); if (tixFilename != NULL) { fprintf(stderr,"(perhaps remove %s ?)\n",tixFilename); } stg_exit(EXIT_FAILURE); } for (i=0; i < tmpModule->tickCount; i++) { lookup->tixArr[i] = tmpModule->tixArr[i]; } stgFree(tmpModule->tixArr); stgFree(tmpModule->modName); stgFree(tmpModule); } if (tix_ch == ',') { expect(','); ws(); } } expect(']'); fclose(tixFile); }
StgPtr allocate (Capability *cap, W_ n) { bdescr *bd; StgPtr p; TICK_ALLOC_HEAP_NOCTR(WDS(n)); CCS_ALLOC(cap->r.rCCCS,n); if (cap->r.rCurrentTSO != NULL) { // cap->r.rCurrentTSO->alloc_limit -= n*sizeof(W_) ASSIGN_Int64((W_*)&(cap->r.rCurrentTSO->alloc_limit), (PK_Int64((W_*)&(cap->r.rCurrentTSO->alloc_limit)) - n*sizeof(W_))); } if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { // The largest number of words such that // the computation of req_blocks will not overflow. W_ max_words = (HS_WORD_MAX & ~(BLOCK_SIZE-1)) / sizeof(W_); W_ req_blocks; if (n > max_words) req_blocks = HS_WORD_MAX; // signal overflow below else req_blocks = (W_)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; // Attempting to allocate an object larger than maxHeapSize // should definitely be disallowed. (bug #1791) if ((RtsFlags.GcFlags.maxHeapSize > 0 && req_blocks >= RtsFlags.GcFlags.maxHeapSize) || req_blocks >= HS_INT32_MAX) // avoid overflow when // calling allocGroup() below { heapOverflow(); // heapOverflow() doesn't exit (see #2592), but we aren't // in a position to do a clean shutdown here: we // either have to allocate the memory or exit now. // Allocating the memory would be bad, because the user // has requested that we not exceed maxHeapSize, so we // just exit. stg_exit(EXIT_HEAPOVERFLOW); } ACQUIRE_SM_LOCK bd = allocGroup(req_blocks); dbl_link_onto(bd, &g0->large_objects); g0->n_large_blocks += bd->blocks; // might be larger than req_blocks g0->n_new_large_words += n; RELEASE_SM_LOCK; initBdescr(bd, g0, g0); bd->flags = BF_LARGE; bd->free = bd->start + n; cap->total_allocated += n; return bd->start; } /* small allocation (<LARGE_OBJECT_THRESHOLD) */ bd = cap->r.rCurrentAlloc; if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) { if (bd) finishedNurseryBlock(cap,bd); // The CurrentAlloc block is full, we need to find another // one. First, we try taking the next block from the // nursery: bd = cap->r.rCurrentNursery->link; if (bd == NULL) { // The nursery is empty: allocate a fresh block (we can't // fail here). ACQUIRE_SM_LOCK; bd = allocBlock(); cap->r.rNursery->n_blocks++; RELEASE_SM_LOCK; initBdescr(bd, g0, g0); bd->flags = 0; // If we had to allocate a new block, then we'll GC // pretty quickly now, because MAYBE_GC() will // notice that CurrentNursery->link is NULL. } else { newNurseryBlock(bd); // we have a block in the nursery: take it and put // it at the *front* of the nursery list, and use it // to allocate() from. // // Previously the nursery looked like this: // // CurrentNursery // / // +-+ +-+ // nursery -> ... |A| -> |B| -> ... // +-+ +-+ // // After doing this, it looks like this: // // CurrentNursery // / // +-+ +-+ // nursery -> |B| -> ... -> |A| -> ... // +-+ +-+ // | // CurrentAlloc // // The point is to get the block out of the way of the // advancing CurrentNursery pointer, while keeping it // on the nursery list so we don't lose track of it. cap->r.rCurrentNursery->link = bd->link; if (bd->link != NULL) { bd->link->u.back = cap->r.rCurrentNursery; } } dbl_link_onto(bd, &cap->r.rNursery->blocks); cap->r.rCurrentAlloc = bd; IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery)); } p = bd->free; bd->free += n; IF_DEBUG(sanity, ASSERT(*((StgWord8*)p) == 0xaa)); return p; }
static StgCompactNFDataBlock * compactAllocateBlockInternal(Capability *cap, StgWord aligned_size, StgCompactNFDataBlock *first, AllocateOp operation) { StgCompactNFDataBlock *self; bdescr *block, *head; uint32_t n_blocks; generation *g; n_blocks = aligned_size / BLOCK_SIZE; // Attempting to allocate an object larger than maxHeapSize // should definitely be disallowed. (bug #1791) if ((RtsFlags.GcFlags.maxHeapSize > 0 && n_blocks >= RtsFlags.GcFlags.maxHeapSize) || n_blocks >= HS_INT32_MAX) // avoid overflow when // calling allocGroup() below { reportHeapOverflow(); // reportHeapOverflow() doesn't exit (see #2592), but we aren't // in a position to do a clean shutdown here: we // either have to allocate the memory or exit now. // Allocating the memory would be bad, because the user // has requested that we not exceed maxHeapSize, so we // just exit. stg_exit(EXIT_HEAPOVERFLOW); } // It is imperative that first is the first block in the compact // (or NULL if the compact does not exist yet) // because the evacuate code does not update the generation of // blocks other than the first (so we would get the statistics // wrong and crash in Sanity) if (first != NULL) { block = Bdescr((P_)first); g = block->gen; } else { g = g0; } ACQUIRE_SM_LOCK; block = allocGroup(n_blocks); switch (operation) { case ALLOCATE_NEW: ASSERT(first == NULL); ASSERT(g == g0); dbl_link_onto(block, &g0->compact_objects); g->n_compact_blocks += block->blocks; g->n_new_large_words += aligned_size / sizeof(StgWord); break; case ALLOCATE_IMPORT_NEW: dbl_link_onto(block, &g0->compact_blocks_in_import); /* fallthrough */ case ALLOCATE_IMPORT_APPEND: ASSERT(first == NULL); ASSERT(g == g0); g->n_compact_blocks_in_import += block->blocks; g->n_new_large_words += aligned_size / sizeof(StgWord); break; case ALLOCATE_APPEND: g->n_compact_blocks += block->blocks; if (g == g0) g->n_new_large_words += aligned_size / sizeof(StgWord); break; default: #if defined(DEBUG) ASSERT(!"code should not be reached"); #else RTS_UNREACHABLE; #endif } RELEASE_SM_LOCK; cap->total_allocated += aligned_size / sizeof(StgWord); self = (StgCompactNFDataBlock*) block->start; self->self = self; self->next = NULL; head = block; initBdescr(head, g, g); head->flags = BF_COMPACT; for (block = head + 1, n_blocks --; n_blocks > 0; block++, n_blocks--) { block->link = head; block->blocks = 0; block->flags = BF_COMPACT; } return self; }
static void * my_mmap (void *addr, W_ size, int operation) { void *ret; #if darwin_HOST_OS // Without MAP_FIXED, Apple's mmap ignores addr. // With MAP_FIXED, it overwrites already mapped regions, whic // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text // and replaces it with zeroes, causing instant death. // This behaviour seems to be conformant with IEEE Std 1003.1-2001. // Let's just use the underlying Mach Microkernel calls directly, // they're much nicer. kern_return_t err = 0; ret = addr; if(operation & MEM_RESERVE) { if(addr) // try to allocate at address err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE); if(!addr || err) // try to allocate anywhere err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE); } if(err) { // don't know what the error codes mean exactly, assume it's // not our problem though. errorBelch("memory allocation failed (requested %" FMT_Word " bytes)", size); stg_exit(EXIT_FAILURE); } if(operation & MEM_COMMIT) { vm_protect(mach_task_self(), (vm_address_t)ret, size, FALSE, VM_PROT_READ|VM_PROT_WRITE); } #else int prot, flags; if (operation & MEM_COMMIT) prot = PROT_READ | PROT_WRITE; else prot = PROT_NONE; if (operation == MEM_RESERVE) flags = MAP_NORESERVE; else if (operation == MEM_COMMIT) flags = MAP_FIXED; else flags = 0; #if defined(irix_HOST_OS) { if (operation & MEM_RESERVE) { int fd = open("/dev/zero",O_RDONLY); ret = mmap(addr, size, prot, flags | MAP_PRIVATE, fd, 0); close(fd); } else { ret = mmap(addr, size, prot, flags | MAP_PRIVATE, -1, 0); } } #elif hpux_HOST_OS ret = mmap(addr, size, prot, flags | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); #elif linux_HOST_OS ret = mmap(addr, size, prot, flags | MAP_ANON | MAP_PRIVATE, -1, 0); if (ret == (void *)-1 && errno == EPERM) { // Linux may return EPERM if it tried to give us // a chunk of address space below mmap_min_addr, // See Trac #7500. if (addr != 0 && (operation & MEM_RESERVE)) { // Try again with no hint address. // It's not clear that this can ever actually help, // but since our alternative is to abort, we may as well try. ret = mmap(0, size, prot, flags | MAP_ANON | MAP_PRIVATE, -1, 0); } if (ret == (void *)-1 && errno == EPERM) { // Linux is not willing to give us any mapping, // so treat this as an out-of-memory condition // (really out of virtual address space). errno = ENOMEM; } } #else ret = mmap(addr, size, prot, flags | MAP_ANON | MAP_PRIVATE, -1, 0); #endif #endif if (ret == (void *)-1) { if (errno == ENOMEM || (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) { // If we request more than 3Gig, then we get EINVAL // instead of ENOMEM (at least on Linux). errorBelch("out of memory (requested %" FMT_Word " bytes)", size); stg_exit(EXIT_FAILURE); } else { barf("getMBlock: mmap: %s", strerror(errno)); } } return ret; }
void vbarf(const char*s, va_list ap) { (*fatalInternalErrorFn)(s,ap); stg_exit(EXIT_INTERNAL_ERROR); // just in case fatalInternalErrorFn() returns }
void setupRtsFlags(int *argc, char *argv[], int *rts_argc, char *rts_argv[]) { rtsBool error = rtsFalse; I_ mode; I_ arg, total_arg; setProgName (argv); total_arg = *argc; arg = 1; *argc = 1; *rts_argc = 0; // process arguments from the ghc_rts_opts global variable first. // (arguments from the GHCRTS environment variable and the command // line override these). { if (ghc_rts_opts != NULL) { splitRtsFlags(ghc_rts_opts, rts_argc, rts_argv); } } // process arguments from the GHCRTS environment variable next // (arguments from the command line override these). { char *ghc_rts = getenv("GHCRTS"); if (ghc_rts != NULL) { if (rtsOptsEnabled != rtsOptsNone) { splitRtsFlags(ghc_rts, rts_argc, rts_argv); } else { errorBelch("Warning: Ignoring GHCRTS variable as RTS options are disabled.\n Link with -rtsopts to enable them."); // We don't actually exit, just warn } } } // Split arguments (argv) into PGM (argv) and RTS (rts_argv) parts // argv[0] must be PGM argument -- leave in argv for (mode = PGM; arg < total_arg; arg++) { // The '--RTS' argument disables all future +RTS ... -RTS processing. if (strequal("--RTS", argv[arg])) { arg++; break; } // The '--' argument is passed through to the program, but // disables all further +RTS ... -RTS processing. else if (strequal("--", argv[arg])) { break; } else if (strequal("+RTS", argv[arg])) { if (rtsOptsEnabled != rtsOptsNone) { mode = RTS; } else { errorBelch("RTS options are disabled. Link with -rtsopts to enable them."); stg_exit(EXIT_FAILURE); } } else if (strequal("-RTS", argv[arg])) { mode = PGM; } else if (mode == RTS && *rts_argc < MAX_RTS_ARGS-1) { rts_argv[(*rts_argc)++] = argv[arg]; } else if (mode == PGM) { argv[(*argc)++] = argv[arg]; } else { barf("too many RTS arguments (max %d)", MAX_RTS_ARGS-1); } } // process remaining program arguments for (; arg < total_arg; arg++) { argv[(*argc)++] = argv[arg]; } argv[*argc] = (char *) 0; rts_argv[*rts_argc] = (char *) 0; // Process RTS (rts_argv) part: mainly to determine statsfile for (arg = 0; arg < *rts_argc; arg++) { if (rts_argv[arg][0] != '-') { fflush(stdout); errorBelch("unexpected RTS argument: %s", rts_argv[arg]); error = rtsTrue; } else { switch(rts_argv[arg][1]) { case '-': if (strequal("info", &rts_argv[arg][2])) { printRtsInfo(); stg_exit(0); } break; default: break; } if (rtsOptsEnabled != rtsOptsAll) { errorBelch("Most RTS options are disabled. Link with -rtsopts to enable them."); stg_exit(EXIT_FAILURE); } switch(rts_argv[arg][1]) { /* process: general args, then PROFILING-only ones, then CONCURRENT-only, TICKY-only (same order as defined in RtsFlags.lh); within those groups, mostly in case-insensitive alphabetical order. Final group is x*, which allows for more options. */ #ifdef TICKY_TICKY # define TICKY_BUILD_ONLY(x) x #else # define TICKY_BUILD_ONLY(x) \ errorBelch("the flag %s requires the program to be built with -ticky", rts_argv[arg]); \ error = rtsTrue; #endif #ifdef PROFILING # define PROFILING_BUILD_ONLY(x) x #else # define PROFILING_BUILD_ONLY(x) \ errorBelch("the flag %s requires the program to be built with -prof", rts_argv[arg]); \ error = rtsTrue; #endif #ifdef TRACING # define TRACING_BUILD_ONLY(x) x #else # define TRACING_BUILD_ONLY(x) \ errorBelch("the flag %s requires the program to be built with -eventlog or -debug", rts_argv[arg]); \ error = rtsTrue; #endif #ifdef THREADED_RTS # define THREADED_BUILD_ONLY(x) x #else # define THREADED_BUILD_ONLY(x) \ errorBelch("the flag %s requires the program to be built with -threaded", rts_argv[arg]); \ error = rtsTrue; #endif #ifdef DEBUG # define DEBUG_BUILD_ONLY(x) x #else # define DEBUG_BUILD_ONLY(x) \ errorBelch("the flag %s requires the program to be built with -debug", rts_argv[arg]); \ error = rtsTrue; #endif /* =========== GENERAL ========================== */ case '?': error = rtsTrue; break; /* This isn't going to allow us to keep related options together as we add more --* flags. We really need a proper options parser. */ case '-': if (strequal("install-signal-handlers=yes", &rts_argv[arg][2])) { RtsFlags.MiscFlags.install_signal_handlers = rtsTrue; } else if (strequal("install-signal-handlers=no", &rts_argv[arg][2])) { RtsFlags.MiscFlags.install_signal_handlers = rtsFalse; } else if (strequal("machine-readable", &rts_argv[arg][2])) { RtsFlags.MiscFlags.machineReadable = rtsTrue; } else if (strequal("info", &rts_argv[arg][2])) { printRtsInfo(); stg_exit(0); } else { errorBelch("unknown RTS option: %s",rts_argv[arg]); error = rtsTrue; } break; case 'A': RtsFlags.GcFlags.minAllocAreaSize = decodeSize(rts_argv[arg], 2, BLOCK_SIZE, HS_INT_MAX) / BLOCK_SIZE; break; #ifdef USE_PAPI case 'a': switch(rts_argv[arg][2]) { case '1': RtsFlags.PapiFlags.eventType = PAPI_FLAG_CACHE_L1; break; case '2': RtsFlags.PapiFlags.eventType = PAPI_FLAG_CACHE_L2; break; case 'b': RtsFlags.PapiFlags.eventType = PAPI_FLAG_BRANCH; break; case 's': RtsFlags.PapiFlags.eventType = PAPI_FLAG_STALLS; break; case 'e': RtsFlags.PapiFlags.eventType = PAPI_FLAG_CB_EVENTS; break; case '+': case '#': if (RtsFlags.PapiFlags.numUserEvents >= MAX_PAPI_USER_EVENTS) { errorBelch("maximum number of PAPI events reached"); stg_exit(EXIT_FAILURE); } nat eventNum = RtsFlags.PapiFlags.numUserEvents++; char kind = rts_argv[arg][2]; nat eventKind = kind == '+' ? PAPI_PRESET_EVENT_KIND : PAPI_NATIVE_EVENT_KIND; RtsFlags.PapiFlags.userEvents[eventNum] = rts_argv[arg] + 3; RtsFlags.PapiFlags.eventType = PAPI_USER_EVENTS; RtsFlags.PapiFlags.userEventsKind[eventNum] = eventKind; break; default: bad_option( rts_argv[arg] ); } break; #endif case 'B': RtsFlags.GcFlags.ringBell = rtsTrue; break; case 'c': if (rts_argv[arg][2] != '\0') { RtsFlags.GcFlags.compactThreshold = atof(rts_argv[arg]+2); } else { RtsFlags.GcFlags.compact = rtsTrue; } break; case 'w': RtsFlags.GcFlags.sweep = rtsTrue; break; case 'F': RtsFlags.GcFlags.oldGenFactor = atof(rts_argv[arg]+2); if (RtsFlags.GcFlags.oldGenFactor < 0) bad_option( rts_argv[arg] ); break; case 'D': DEBUG_BUILD_ONLY( { char *c; for (c = rts_argv[arg] + 2; *c != '\0'; c++) { switch (*c) { case 's': RtsFlags.DebugFlags.scheduler = rtsTrue; break; case 'i': RtsFlags.DebugFlags.interpreter = rtsTrue; break; case 'w': RtsFlags.DebugFlags.weak = rtsTrue; break; case 'G': RtsFlags.DebugFlags.gccafs = rtsTrue; break; case 'g': RtsFlags.DebugFlags.gc = rtsTrue; break; case 'b': RtsFlags.DebugFlags.block_alloc = rtsTrue; break; case 'S': RtsFlags.DebugFlags.sanity = rtsTrue; break; case 't': RtsFlags.DebugFlags.stable = rtsTrue; break; case 'p': RtsFlags.DebugFlags.prof = rtsTrue; break; case 'l': RtsFlags.DebugFlags.linker = rtsTrue; break; case 'a': RtsFlags.DebugFlags.apply = rtsTrue; break; case 'm': RtsFlags.DebugFlags.stm = rtsTrue; break; case 'z': RtsFlags.DebugFlags.squeeze = rtsTrue; break; case 'c': RtsFlags.DebugFlags.hpc = rtsTrue; break; case 'r': RtsFlags.DebugFlags.sparks = rtsTrue; break; default: bad_option( rts_argv[arg] ); } } // -Dx also turns on -v. Use -l to direct trace // events to the .eventlog file instead. RtsFlags.TraceFlags.tracing = TRACE_STDERR; }) break; case 'K': RtsFlags.GcFlags.maxStkSize = decodeSize(rts_argv[arg], 2, sizeof(W_), HS_WORD_MAX) / sizeof(W_); break; case 'k': switch(rts_argv[arg][2]) { case 'c': RtsFlags.GcFlags.stkChunkSize = decodeSize(rts_argv[arg], 3, sizeof(W_), HS_WORD_MAX) / sizeof(W_); break; case 'b': RtsFlags.GcFlags.stkChunkBufferSize = decodeSize(rts_argv[arg], 3, sizeof(W_), HS_WORD_MAX) / sizeof(W_); break; case 'i': RtsFlags.GcFlags.initialStkSize = decodeSize(rts_argv[arg], 3, sizeof(W_), HS_WORD_MAX) / sizeof(W_); break; default: RtsFlags.GcFlags.initialStkSize = decodeSize(rts_argv[arg], 2, sizeof(W_), HS_WORD_MAX) / sizeof(W_); break; } break; case 'M': RtsFlags.GcFlags.maxHeapSize = decodeSize(rts_argv[arg], 2, BLOCK_SIZE, HS_WORD_MAX) / BLOCK_SIZE; /* user give size in *bytes* but "maxHeapSize" is in *blocks* */ break; case 'm': RtsFlags.GcFlags.pcFreeHeap = atof(rts_argv[arg]+2); if (RtsFlags.GcFlags.pcFreeHeap < 0 || RtsFlags.GcFlags.pcFreeHeap > 100) bad_option( rts_argv[arg] ); break; case 'G': RtsFlags.GcFlags.generations = decodeSize(rts_argv[arg], 2, 1, HS_INT_MAX); break; case 'H': if (rts_argv[arg][2] == '\0') { RtsFlags.GcFlags.heapSizeSuggestionAuto = rtsTrue; } else { RtsFlags.GcFlags.heapSizeSuggestion = (nat)(decodeSize(rts_argv[arg], 2, BLOCK_SIZE, HS_WORD_MAX) / BLOCK_SIZE); } break; #ifdef RTS_GTK_FRONTPANEL case 'f': RtsFlags.GcFlags.frontpanel = rtsTrue; break; #endif case 'I': /* idle GC delay */ if (rts_argv[arg][2] == '\0') { /* use default */ } else { I_ cst; /* tmp */ /* Convert to millisecs */ cst = (I_) ((atof(rts_argv[arg]+2) * 1000)); RtsFlags.GcFlags.idleGCDelayTime = cst; } break; case 'S': RtsFlags.GcFlags.giveStats = VERBOSE_GC_STATS; goto stats; case 's': RtsFlags.GcFlags.giveStats = SUMMARY_GC_STATS; goto stats; case 't': RtsFlags.GcFlags.giveStats = ONELINE_GC_STATS; goto stats; stats: { int r; r = open_stats_file(arg, *argc, argv, *rts_argc, rts_argv, NULL, &RtsFlags.GcFlags.statsFile); if (r == -1) { error = rtsTrue; } } break; case 'Z': RtsFlags.GcFlags.squeezeUpdFrames = rtsFalse; break; /* =========== PROFILING ========================== */ case 'P': /* detailed cost centre profiling (time/alloc) */ case 'p': /* cost centre profiling (time/alloc) */ PROFILING_BUILD_ONLY( switch (rts_argv[arg][2]) { case 'x': RtsFlags.CcFlags.doCostCentres = COST_CENTRES_XML; break; case 'a': RtsFlags.CcFlags.doCostCentres = COST_CENTRES_ALL; break; default: if (rts_argv[arg][1] == 'P') { RtsFlags.CcFlags.doCostCentres = COST_CENTRES_VERBOSE; } else { RtsFlags.CcFlags.doCostCentres = COST_CENTRES_SUMMARY; } break; } ) break; case 'R': PROFILING_BUILD_ONLY( RtsFlags.ProfFlags.maxRetainerSetSize = atof(rts_argv[arg]+2); ) break; case 'L': PROFILING_BUILD_ONLY( RtsFlags.ProfFlags.ccsLength = atof(rts_argv[arg]+2); if(RtsFlags.ProfFlags.ccsLength <= 0) { bad_option(rts_argv[arg]); } ) break;
void osReleaseFreeMemory(void) { alloc_rec *prev_a, *a; alloc_rec head_a; block_rec *prev_fb, *fb; block_rec head_fb; char *a_end, *fb_end; /* go through allocs and free_blocks in lockstep, looking for allocs that are completely free, and uncommit them */ head_a.base = 0; head_a.size = 0; head_a.next = allocs; head_fb.base = 0; head_fb.size = 0; head_fb.next = free_blocks; prev_a = &head_a; a = allocs; prev_fb = &head_fb; fb = free_blocks; while (a != NULL) { a_end = a->base + a->size; /* If a is freeable then there is a single freeblock in fb that covers it. The end of this free block must be >= the end of a, so skip anything in fb that ends before a. */ while (fb != NULL && fb->base + fb->size < a_end) { prev_fb = fb; fb = fb->next; } if (fb == NULL) { /* If we have nothing left in fb, then neither a nor anything later in the list is freeable, so we are done. */ break; } else { fb_end = fb->base + fb->size; /* We have a candidate fb. But does it really cover a? */ if (fb->base <= a->base) { /* Yes, the alloc is within the free block. Now we need to know if it sticks out at either end. */ if (fb_end == a_end) { if (fb->base == a->base) { /* fb and a are identical, so just free fb */ prev_fb->next = fb->next; stgFree(fb); fb = prev_fb->next; } else { /* fb begins earlier, so truncate it to not include a */ fb->size = a->base - fb->base; } } else { /* fb ends later, so we'll make fb just be the part after a. First though, if it also starts earlier, we make a new free block record for the before bit. */ if (fb->base != a->base) { block_rec *new_fb; new_fb = (block_rec *)stgMallocBytes(sizeof(block_rec), "osReleaseFreeMemory"); new_fb->base = fb->base; new_fb->size = a->base - fb->base; new_fb->next = fb; prev_fb->next = new_fb; } fb->size = fb_end - a_end; fb->base = a_end; } /* Now we can free the alloc */ prev_a->next = a->next; if(!VirtualFree((void *)a->base, 0, MEM_RELEASE)) { sysErrorBelch("freeAllMBlocks: VirtualFree MEM_RELEASE " "failed"); stg_exit(EXIT_FAILURE); } stgFree(a); a = prev_a->next; } else { /* Otherwise this alloc is not freeable, so go on to the next one */ prev_a = a; a = a->next; } } } allocs = head_a.next; free_blocks = head_fb.next; }
fdOutOfRange (int fd) { errorBelch("file descriptor %d out of range for select (0--%d).\nRecompile with -threaded to work around this.", fd, (int)FD_SETSIZE); stg_exit(EXIT_FAILURE); }