void initStats1 (void) { nat i; if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) { statsPrintf(" Alloc Copied Live GC GC TOT TOT Page Flts\n"); statsPrintf(" bytes bytes bytes user elap user elap\n"); } GC_coll_cpu = (Time *)stgMallocBytes( sizeof(Time)*RtsFlags.GcFlags.generations, "initStats"); GC_coll_elapsed = (Time *)stgMallocBytes( sizeof(Time)*RtsFlags.GcFlags.generations, "initStats"); GC_coll_max_pause = (Time *)stgMallocBytes( sizeof(Time)*RtsFlags.GcFlags.generations, "initStats"); for (i = 0; i < RtsFlags.GcFlags.generations; i++) { GC_coll_cpu[i] = 0; GC_coll_elapsed[i] = 0; GC_coll_max_pause[i] = 0; } }
void moreCapabilities (nat from USED_IF_THREADS, nat to USED_IF_THREADS) { #if defined(THREADED_RTS) nat i; Capability **old_capabilities = capabilities; capabilities = stgMallocBytes(to * sizeof(Capability*), "moreCapabilities"); if (to == 1) { // THREADED_RTS must work on builds that don't have a mutable // BaseReg (eg. unregisterised), so in this case // capabilities[0] must coincide with &MainCapability. capabilities[0] = &MainCapability; } for (i = 0; i < to; i++) { if (i < from) { capabilities[i] = old_capabilities[i]; } else { capabilities[i] = stgMallocBytes(sizeof(Capability), "moreCapabilities"); initCapability(capabilities[i], i); } } debugTrace(DEBUG_sched, "allocated %d more capabilities", to - from); if (old_capabilities != NULL) { stgFree(old_capabilities); } #endif }
void startupHpc(void) { char *hpc_tixdir; char *hpc_tixfile; if (moduleHash == NULL) { // no modules were registered with hs_hpc_module, so don't bother // creating the .tix file. return; } if (hpc_inited != 0) { return; } hpc_inited = 1; hpc_pid = getpid(); hpc_tixdir = getenv("HPCTIXDIR"); hpc_tixfile = getenv("HPCTIXFILE"); debugTrace(DEBUG_hpc,"startupHpc"); /* XXX Check results of mallocs/strdups, and check we are requesting enough bytes */ if (hpc_tixfile != NULL) { tixFilename = strdup(hpc_tixfile); } else if (hpc_tixdir != NULL) { /* Make sure the directory is present; * conditional code for mkdir lifted from lndir.c */ #ifdef WIN32 mkdir(hpc_tixdir); #else mkdir(hpc_tixdir,0777); #endif /* Then, try open the file */ tixFilename = (char *) stgMallocBytes(strlen(hpc_tixdir) + strlen(prog_name) + 12, "Hpc.startupHpc"); sprintf(tixFilename,"%s/%s-%d.tix",hpc_tixdir,prog_name,(int)hpc_pid); } else { tixFilename = (char *) stgMallocBytes(strlen(prog_name) + 6, "Hpc.startupHpc"); sprintf(tixFilename, "%s.tix", prog_name); } if (init_open(fopen(tixFilename,"r"))) { readTix(); } }
static void initCapability( Capability *cap, nat i ) { nat g; cap->no = i; cap->in_haskell = rtsFalse; cap->run_queue_hd = END_TSO_QUEUE; cap->run_queue_tl = END_TSO_QUEUE; #if defined(THREADED_RTS) initMutex(&cap->lock); cap->running_task = NULL; // indicates cap is free cap->spare_workers = NULL; cap->n_spare_workers = 0; cap->suspended_ccalls = NULL; cap->returning_tasks_hd = NULL; cap->returning_tasks_tl = NULL; cap->inbox = (Message*)END_TSO_QUEUE; cap->sparks_created = 0; cap->sparks_dud = 0; cap->sparks_converted = 0; cap->sparks_gcd = 0; cap->sparks_fizzled = 0; #endif cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info; cap->f.stgGCEnter1 = (StgFunPtr)__stg_gc_enter_1; cap->f.stgGCFun = (StgFunPtr)__stg_gc_fun; cap->mut_lists = stgMallocBytes(sizeof(bdescr *) * RtsFlags.GcFlags.generations, "initCapability"); cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) * RtsFlags.GcFlags.generations, "initCapability"); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { cap->mut_lists[g] = NULL; } cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE; cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE; cap->free_trec_chunks = END_STM_CHUNK_LIST; cap->free_trec_headers = NO_TREC; cap->transaction_tokens = 0; cap->context_switch = 0; cap->pinned_object_block = NULL; }
void* createAdjustor (int cconv, StgStablePtr hptr, StgFunPtr wptr, char *typeString) { ffi_cif *cif; ffi_type **arg_types; nat n_args, i; ffi_type *result_type; ffi_closure *cl; int r, abi; void *code; n_args = strlen(typeString) - 1; cif = stgMallocBytes(sizeof(ffi_cif), "createAdjustor"); arg_types = stgMallocBytes(n_args * sizeof(ffi_type*), "createAdjustor"); result_type = char_to_ffi_type(typeString[0]); for (i=0; i < n_args; i++) { arg_types[i] = char_to_ffi_type(typeString[i+1]); } switch (cconv) { #if defined(mingw32_HOST_OS) && defined(i386_HOST_ARCH) case 0: /* stdcall */ abi = FFI_STDCALL; break; #endif case 1: /* ccall */ abi = FFI_DEFAULT_ABI; break; default: barf("createAdjustor: convention %d not supported on this platform", cconv); } r = ffi_prep_cif(cif, abi, n_args, result_type, arg_types); if (r != FFI_OK) barf("ffi_prep_cif failed: %d", r); cl = allocateExec(sizeof(ffi_closure), &code); if (cl == NULL) { barf("createAdjustor: failed to allocate memory"); } r = ffi_prep_closure(cl, cif, (void*)wptr, hptr/*userdata*/); if (r != FFI_OK) barf("ffi_prep_closure failed: %d", r); return (void*)code; }
void storageAddCapabilities (nat from, nat to) { nat n, g, i; if (from > 0) { nurseries = stgReallocBytes(nurseries, to * sizeof(struct nursery_), "storageAddCapabilities"); } else { nurseries = stgMallocBytes(to * sizeof(struct nursery_), "storageAddCapabilities"); } // we've moved the nurseries, so we have to update the rNursery // pointers from the Capabilities. for (i = 0; i < to; i++) { capabilities[i].r.rNursery = &nurseries[i]; } /* The allocation area. Policy: keep the allocation area * small to begin with, even if we have a large suggested heap * size. Reason: we're going to do a major collection first, and we * don't want it to be a big one. This vague idea is borne out by * rigorous experimental evidence. */ allocNurseries(from, to); // allocate a block for each mut list for (n = from; n < to; n++) { for (g = 1; g < RtsFlags.GcFlags.generations; g++) { capabilities[n].mut_lists[g] = allocBlock(); } } initGcThreads(from, to); }
static void enlargeStablePtrTable(void) { uint32_t old_SPT_size = SPT_size; spEntry *new_stable_ptr_table; // 2nd and subsequent times SPT_size *= 2; /* We temporarily retain the old version instead of freeing it; see Note * [Enlarging the stable pointer table]. */ new_stable_ptr_table = stgMallocBytes(SPT_size * sizeof *stable_ptr_table, "enlargeStablePtrTable"); memcpy(new_stable_ptr_table, stable_ptr_table, old_SPT_size * sizeof *stable_ptr_table); ASSERT(n_old_SPTs < MAX_N_OLD_SPTS); old_SPTs[n_old_SPTs++] = stable_ptr_table; /* When using the threaded RTS, the update of stable_ptr_table is assumed to * be atomic, so that another thread simultaneously dereferencing a stable * pointer will always read a valid address. */ stable_ptr_table = new_stable_ptr_table; initSpEntryFreeList(stable_ptr_table + old_SPT_size, old_SPT_size, NULL); }
static void splitRtsFlags(char *s, int *rts_argc, char *rts_argv[]) { char *c1, *c2; c1 = s; do { while (isspace(*c1)) { c1++; }; c2 = c1; while (!isspace(*c2) && *c2 != '\0') { c2++; }; if (c1 == c2) { break; } if (*rts_argc < MAX_RTS_ARGS-1) { s = stgMallocBytes(c2-c1+1, "RtsFlags.c:splitRtsFlags()"); strncpy(s, c1, c2-c1); s[c2-c1] = '\0'; rts_argv[(*rts_argc)++] = s; } else { barf("too many RTS arguments (max %d)", MAX_RTS_ARGS-1); } c1 = c2; } while (*c1 != '\0'); }
/* Windows does it differently, though arguably the most sanely. * GetEnvironmentStrings() returns a pointer to a block of * environment vars with a double null terminator: * Var1=Value1\0 * Var2=Value2\0 * ... * VarN=ValueN\0\0 * But because everyone else (ie POSIX) uses a vector of strings, we convert * to that format. Fortunately this is just a matter of making an array of * offsets into the environment block. * * Note that we have to call FreeEnvironmentStrings() at the end. * */ void getProgEnvv(int *out_envc, char **out_envv[]) { int envc, i; char *env; char *envp; char **envv; /* For now, use the 'A'nsi not 'W'ide variant. Note: corresponding Free below must use the same 'A'/'W' variant. */ env = GetEnvironmentStringsA(); envc = 0; for (envp = env; *envp != 0; envp += strlen(envp) + 1) { envc++; } envv = stgMallocBytes(sizeof(char*) * (envc+1), "getProgEnvv"); i = 0; for (envp = env; *envp != 0; envp += strlen(envp) + 1) { envv[i] = envp; i++; } /* stash whole env in last+1 entry */ envv[envc] = env; *out_envc = envc; *out_envv = envv; }
void hs_hpc_module(char *modName, StgWord32 modCount, StgWord32 modHashNo, StgWord64 *tixArr) { HpcModuleInfo *tmpModule; uint32_t i; if (moduleHash == NULL) { moduleHash = allocStrHashTable(); } tmpModule = lookupHashTable(moduleHash, (StgWord)modName); if (tmpModule == NULL) { // Did not find entry so add one on. tmpModule = (HpcModuleInfo *)stgMallocBytes(sizeof(HpcModuleInfo), "Hpc.hs_hpc_module"); tmpModule->modName = modName; tmpModule->tickCount = modCount; tmpModule->hashNo = modHashNo; tmpModule->tixArr = tixArr; for(i=0;i < modCount;i++) { tixArr[i] = 0; } tmpModule->next = modules; tmpModule->from_file = false; modules = tmpModule; insertHashTable(moduleHash, (StgWord)modName, tmpModule); } else { if (tmpModule->tickCount != modCount) { failure("inconsistent number of tick boxes"); } ASSERT(tmpModule->tixArr != 0); if (tmpModule->hashNo != modHashNo) { fprintf(stderr,"in module '%s'\n",tmpModule->modName); failure("module mismatch with .tix/.mix file hash number"); if (tixFilename != NULL) { fprintf(stderr,"(perhaps remove %s ?)\n",tixFilename); } stg_exit(EXIT_FAILURE); } // The existing tixArr was made up when we read the .tix file, // whereas this is the real tixArr, so copy the data from the // .tix into the real tixArr. for(i=0;i < modCount;i++) { tixArr[i] = tmpModule->tixArr[i]; } if (tmpModule->from_file) { stgFree(tmpModule->modName); stgFree(tmpModule->tixArr); } tmpModule->from_file = false; } }
void outputAllRetainerSet(FILE *prof_file) { nat i, j; nat numSet; RetainerSet *rs, **rsArray, *tmp; // find out the number of retainer sets which have had a non-zero cost at // least once during retainer profiling numSet = 0; for (i = 0; i < HASH_TABLE_SIZE; i++) for (rs = hashTable[i]; rs != NULL; rs = rs->link) { if (rs->id < 0) numSet++; } if (numSet == 0) // retainer profiling was not done at all. return; // allocate memory rsArray = stgMallocBytes(numSet * sizeof(RetainerSet *), "outputAllRetainerSet()"); // prepare for sorting j = 0; for (i = 0; i < HASH_TABLE_SIZE; i++) for (rs = hashTable[i]; rs != NULL; rs = rs->link) { if (rs->id < 0) { rsArray[j] = rs; j++; } } ASSERT(j == numSet); // sort rsArray[] according to the id of each retainer set for (i = numSet - 1; i > 0; i--) { for (j = 0; j <= i - 1; j++) { // if (-(rsArray[j]->id) < -(rsArray[j + 1]->id)) if (rsArray[j]->id < rsArray[j + 1]->id) { tmp = rsArray[j]; rsArray[j] = rsArray[j + 1]; rsArray[j + 1] = tmp; } } } fprintf(prof_file, "\nRetainer sets created during profiling:\n"); for (i = 0;i < numSet; i++) { fprintf(prof_file, "SET %u = {", -(rsArray[i]->id)); for (j = 0; j < rsArray[i]->num - 1; j++) { printRetainer(prof_file, rsArray[i]->element[j]); fprintf(prof_file, ", "); } printRetainer(prof_file, rsArray[i]->element[j]); fprintf(prof_file, "}\n"); } stgFree(rsArray); }
int libdwForEachFrameOutwards(Backtrace *bt, int (*cb)(StgPtr, void*), void *user_data) { int n_chunks = bt->n_frames / BACKTRACE_CHUNK_SZ; if (bt->n_frames % BACKTRACE_CHUNK_SZ != 0) n_chunks++; BacktraceChunk **chunks = stgMallocBytes(n_chunks * sizeof(BacktraceChunk *), "libdwForEachFrameOutwards"); // First build a list of chunks, ending with the inner-most chunk int chunk_idx; chunks[0] = bt->last; for (chunk_idx = 1; chunk_idx < n_chunks; chunk_idx++) { chunks[chunk_idx] = chunks[chunk_idx-1]->next; } // Now iterate back through the frames int res = 0; for (chunk_idx = n_chunks-1; chunk_idx >= 0 && res == 0; chunk_idx--) { unsigned int i; BacktraceChunk *chunk = chunks[chunk_idx]; for (i = 0; i < chunk->n_frames; i++) { res = cb(chunk->frames[i], user_data); if (res != 0) break; } } free(chunks); return res; }
int lockFile(int fd, dev_t dev, ino_t ino, int for_writing) { Lock key, *lock; key.device = dev; key.inode = ino; lock = lookupHashTable(obj_hash, (StgWord)&key); if (lock == NULL) { lock = stgMallocBytes(sizeof(Lock), "lockFile"); lock->device = dev; lock->inode = ino; lock->readers = for_writing ? -1 : 1; insertHashTable(obj_hash, (StgWord)lock, (void *)lock); insertHashTable(fd_hash, fd, lock); return 0; } else { // single-writer/multi-reader locking: if (for_writing || lock->readers < 0) { return -1; } lock->readers++; return 0; } }
static BacktraceChunk *backtraceAllocChunk(BacktraceChunk *next) { BacktraceChunk *chunk = stgMallocBytes(sizeof(BacktraceChunk), "backtraceAllocChunk"); chunk->n_frames = 0; chunk->next = next; return chunk; }
static void insertFree(char* alloc_base, W_ alloc_size) { block_rec temp; block_rec* it; block_rec* prev; temp.base=0; temp.size=0; temp.next=free_blocks; it = free_blocks; prev = &temp; for( ; it!=0 && it->base<alloc_base; prev=it, it=it->next) {} if(it!=0 && alloc_base+alloc_size == it->base) { if(prev->base + prev->size == alloc_base) { /* Merge it, alloc, prev */ prev->size += alloc_size + it->size; prev->next = it->next; stgFree(it); } else { /* Merge it, alloc */ it->base = alloc_base; it->size += alloc_size; } } else if(prev->base + prev->size == alloc_base) { /* Merge alloc, prev */ prev->size += alloc_size; } else { /* Merge none */ block_rec* rec; rec = (block_rec*)stgMallocBytes(sizeof(block_rec), "getMBlocks: insertFree"); rec->base=alloc_base; rec->size=alloc_size; rec->next = it; prev->next=rec; } free_blocks=temp.next; }
static alloc_rec* allocNew(uint32_t n) { alloc_rec* rec; rec = (alloc_rec*)stgMallocBytes(sizeof(alloc_rec),"getMBlocks: allocNew"); rec->size = ((W_)n+1)*MBLOCK_SIZE; rec->base = VirtualAlloc(NULL, rec->size, MEM_RESERVE, PAGE_READWRITE); if(rec->base==0) { stgFree((void*)rec); rec=0; if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY) { errorBelch("Out of memory\n"); stg_exit(EXIT_HEAPOVERFLOW); } else { sysErrorBelch( "getMBlocks: VirtualAlloc MEM_RESERVE %d blocks failed", n); } } else { alloc_rec temp; temp.base=0; temp.size=0; temp.next=allocs; alloc_rec* it; it=&temp; for(; it->next!=0 && it->next->base<rec->base; it=it->next) ; rec->next=it->next; it->next=rec; allocs=temp.next; } return rec; }
/* MP_sync synchronises all nodes in a parallel computation: * sets: * thisPE - GlobalTaskId: node's own task Id * (logical node address for messages) * Returns: Bool: success (1) or failure (0) * * MPI Version: * the number of nodes is checked by counting nodes in WORLD * (could also be done by sync message) * Own ID is known before, but returned only here. */ rtsBool MP_sync(void) { // initialise counters/constants and allocate/attach the buffer // buffer size default is 20, use RTS option -qq<N> to change it maxMsgs = RtsFlags.ParFlags.sendBufferSize; // and resulting buffer space // checks inside RtsFlags.c: // DATASPACEWORDS * sizeof(StgWord) < INT_MAX / 2 // maxMsgs <= max(20,nPEs) // Howver, they might be just too much in combination. if (INT_MAX / sizeof(StgWord) < DATASPACEWORDS * maxMsgs) { IF_PAR_DEBUG(mpcomm, debugBelch("requested buffer sizes too large, adjusting...\n")); do { maxMsgs--; } while (maxMsgs > 0 && INT_MAX / sizeof(StgWord) < DATASPACEWORDS * maxMsgs); if (maxMsgs == 0) { // should not be possible with checks inside RtsFlags.c, see above barf("pack buffer too large to allocate, aborting program."); } else { IF_PAR_DEBUG(mpcomm, debugBelch("send buffer size reduced to %d messages.\n", maxMsgs)); } } bufsize = maxMsgs * DATASPACEWORDS * sizeof(StgWord); mpiMsgBuffer = (void*) stgMallocBytes(bufsize, "mpiMsgBuffer"); requests = (MPI_Request*) stgMallocBytes(maxMsgs * sizeof(MPI_Request),"requests"); msgCount = 0; // when maxMsgs reached thisPE = mpiMyRank + 1; IF_PAR_DEBUG(mpcomm, debugBelch("Node %d synchronising.\n", thisPE)); MPI_Barrier(MPI_COMM_WORLD); // unnecessary... // but currently used to synchronize system times return rtsTrue; }
CostCentre *mkCostCentre (char *label, char *module, char *srcloc) { CostCentre *cc = stgMallocBytes (sizeof(CostCentre), "mkCostCentre"); cc->label = label; cc->module = module; cc->srcloc = srcloc; return cc; }
void storageAddCapabilities (uint32_t from, uint32_t to) { uint32_t n, g, i, new_n_nurseries; if (RtsFlags.GcFlags.nurseryChunkSize == 0) { new_n_nurseries = to; } else { memcount total_alloc = to * RtsFlags.GcFlags.minAllocAreaSize; new_n_nurseries = stg_max(to, total_alloc / RtsFlags.GcFlags.nurseryChunkSize); } if (from > 0) { nurseries = stgReallocBytes(nurseries, new_n_nurseries * sizeof(struct nursery_), "storageAddCapabilities"); } else { nurseries = stgMallocBytes(new_n_nurseries * sizeof(struct nursery_), "storageAddCapabilities"); } // we've moved the nurseries, so we have to update the rNursery // pointers from the Capabilities. for (i = 0; i < to; i++) { capabilities[i]->r.rNursery = &nurseries[i]; } /* The allocation area. Policy: keep the allocation area * small to begin with, even if we have a large suggested heap * size. Reason: we're going to do a major collection first, and we * don't want it to be a big one. This vague idea is borne out by * rigorous experimental evidence. */ allocNurseries(n_nurseries, new_n_nurseries); n_nurseries = new_n_nurseries; /* * Assign each of the new capabilities a nursery. Remember to start from * next_nursery, because we may have already consumed some of the earlier * nurseries. */ assignNurseriesToCapabilities(from,to); // allocate a block for each mut list for (n = from; n < to; n++) { for (g = 1; g < RtsFlags.GcFlags.generations; g++) { capabilities[n]->mut_lists[g] = allocBlockOnNode(capNoToNumaNode(n)); } } #if defined(THREADED_RTS) && defined(llvm_CC_FLAVOR) && (CC_SUPPORTS_TLS == 0) newThreadLocalKey(&gctKey); #endif initGcThreads(from, to); }
/* borrowed from the MUSL libc project */ char *stgStrndup(const char *s, size_t n) { size_t l = strnlen(s, n); char *d = stgMallocBytes(l+1, "stgStrndup"); if (!d) return NULL; memcpy(d, s, l); d[l] = 0; return d; }
void hs_spt_insert(StgWord64 key[2], void *spe_closure) { // Cannot remove this indirection yet because getStablePtr() // might return NULL, in which case hs_spt_lookup() returns NULL // instead of the actual closure pointer. StgStablePtr * entry = stgMallocBytes( sizeof(StgStablePtr) , "hs_spt_insert: entry" ); *entry = getStablePtr(spe_closure); hs_spt_insert_stableptr(key, entry); }
extern void DEBUG_LoadSymbols( const char *name ) { bfd* abfd; char **matching; bfd_init(); abfd = bfd_openr(name, "default"); if (abfd == NULL) { barf("can't open executable %s to get symbol table", name); } if (!bfd_check_format_matches (abfd, bfd_object, &matching)) { barf("mismatch"); } { long storage_needed; asymbol **symbol_table; long number_of_symbols; long num_real_syms = 0; long i; storage_needed = bfd_get_symtab_upper_bound (abfd); if (storage_needed < 0) { barf("can't read symbol table"); } symbol_table = (asymbol **) stgMallocBytes(storage_needed,"DEBUG_LoadSymbols"); number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table); if (number_of_symbols < 0) { barf("can't canonicalise symbol table"); } if (add_to_fname_table == NULL) add_to_fname_table = allocHashTable(); for( i = 0; i != number_of_symbols; ++i ) { symbol_info info; bfd_get_symbol_info(abfd,symbol_table[i],&info); if (isReal(info.type, info.name)) { insertHashTable(add_to_fname_table, info.value, (void*)info.name); num_real_syms += 1; } } IF_DEBUG(interpreter, debugBelch("Loaded %ld symbols. Of which %ld are real symbols\n", number_of_symbols, num_real_syms) ); stgFree(symbol_table); } }
void initStablePtrTable(void) { if (SPT_size > 0) return; SPT_size = INIT_SPT_SIZE; stable_ptr_table = stgMallocBytes(SPT_size * sizeof(spEntry), "initStablePtrTable"); initSpEntryFreeList(stable_ptr_table,INIT_SPT_SIZE,NULL); #if defined(THREADED_RTS) initMutex(&stable_ptr_mutex); #endif }
// Begin a new arena Arena * newArena( void ) { Arena *arena; arena = stgMallocBytes(sizeof(Arena), "newArena"); arena->current = allocBlock_lock(); arena->current->link = NULL; arena->free = arena->current->start; arena->lim = arena->current->start + BLOCK_SIZE_W; arena_blocks++; return arena; }
static char *expectString(void) { char tmp[256], *res; // XXX int tmp_ix = 0; expect('"'); while (tix_ch != '"') { tmp[tmp_ix++] = tix_ch; tix_ch = getc(tixFile); } tmp[tmp_ix++] = 0; expect('"'); res = stgMallocBytes(tmp_ix,"Hpc.expectString"); strcpy(res,tmp); return res; }
void initStableTables(void) { if (SNT_size > 0) return; SNT_size = INIT_SNT_SIZE; stable_name_table = stgMallocBytes(SNT_size * sizeof *stable_name_table, "initStableNameTable"); /* we don't use index 0 in the stable name table, because that * would conflict with the hash table lookup operations which * return NULL if an entry isn't found in the hash table. */ initSnEntryFreeList(stable_name_table + 1,INIT_SNT_SIZE-1,NULL); addrToStableHash = allocHashTable(); if (SPT_size > 0) return; SPT_size = INIT_SPT_SIZE; stable_ptr_table = stgMallocBytes(SPT_size * sizeof *stable_ptr_table, "initStablePtrTable"); initSpEntryFreeList(stable_ptr_table,INIT_SPT_SIZE,NULL); #ifdef THREADED_RTS initMutex(&stable_mutex); #endif }
static void* findFreeBlocks(uint32_t n) { void* ret=0; block_rec* it; block_rec temp; block_rec* prev; W_ required_size; it=free_blocks; required_size = n*MBLOCK_SIZE; temp.next=free_blocks; temp.base=0; temp.size=0; prev=&temp; /* TODO: Don't just take first block, find smallest sufficient block */ for( ; it!=0 && it->size<required_size; prev=it, it=it->next ) {} if(it!=0) { if( (((W_)it->base) & MBLOCK_MASK) == 0) { /* MBlock aligned */ ret = (void*)it->base; if(it->size==required_size) { prev->next=it->next; stgFree(it); } else { it->base += required_size; it->size -=required_size; } } else { char* need_base; block_rec* next; int new_size; need_base = (char*)(((W_)it->base) & ((W_)~MBLOCK_MASK)) + MBLOCK_SIZE; next = (block_rec*)stgMallocBytes( sizeof(block_rec) , "getMBlocks: findFreeBlocks: splitting"); new_size = need_base - it->base; next->base = need_base +required_size; next->size = it->size - (new_size+required_size); it->size = new_size; next->next = it->next; it->next = next; ret=(void*)need_base; } } free_blocks=temp.next; return ret; }
/* --------------------------------------------------------------------------- * Function: initCapabilities() * * Purpose: set up the Capability handling. For the THREADED_RTS build, * we keep a table of them, the size of which is * controlled by the user via the RTS flag -N. * * ------------------------------------------------------------------------- */ void initCapabilities( void ) { #if defined(THREADED_RTS) nat i; #ifndef REG_Base // We can't support multiple CPUs if BaseReg is not a register if (RtsFlags.ParFlags.nNodes > 1) { errorBelch("warning: multiple CPUs not supported in this build, reverting to 1"); RtsFlags.ParFlags.nNodes = 1; } #endif n_capabilities = RtsFlags.ParFlags.nNodes; if (n_capabilities == 1) { capabilities = &MainCapability; // THREADED_RTS must work on builds that don't have a mutable // BaseReg (eg. unregisterised), so in this case // capabilities[0] must coincide with &MainCapability. } else { capabilities = stgMallocBytes(n_capabilities * sizeof(Capability), "initCapabilities"); } for (i = 0; i < n_capabilities; i++) { initCapability(&capabilities[i], i); } debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities); #else /* !THREADED_RTS */ n_capabilities = 1; capabilities = &MainCapability; initCapability(&MainCapability, 0); #endif // There are no free capabilities to begin with. We will start // a worker Task to each Capability, which will quickly put the // Capability on the free list when it finds nothing to do. last_free_capability = &capabilities[0]; }
static Task* newTask (rtsBool worker) { Task *task; #define ROUND_TO_CACHE_LINE(x) ((((x)+63) / 64) * 64) task = stgMallocBytes(ROUND_TO_CACHE_LINE(sizeof(Task)), "newTask"); task->cap = NULL; task->worker = worker; task->stopped = rtsFalse; task->running_finalizers = rtsFalse; task->n_spare_incalls = 0; task->spare_incalls = NULL; task->incall = NULL; #if defined(THREADED_RTS) initCondition(&task->cond); initMutex(&task->lock); task->wakeup = rtsFalse; #endif task->next = NULL; ACQUIRE_LOCK(&all_tasks_mutex); task->all_prev = NULL; task->all_next = all_tasks; if (all_tasks != NULL) { all_tasks->all_prev = task; } all_tasks = task; taskCount++; if (worker) { workerCount++; currentWorkerCount++; if (currentWorkerCount > peakWorkerCount) { peakWorkerCount = currentWorkerCount; } } RELEASE_LOCK(&all_tasks_mutex); return task; }
static void initCapability( Capability *cap, nat i ) { nat g; cap->no = i; cap->in_haskell = rtsFalse; cap->f.stgGCEnter1 = (F_)__stg_gc_enter_1; cap->f.stgGCFun = (F_)__stg_gc_fun; cap->mut_lists = stgMallocBytes(sizeof(bdescr *) * RtsFlags.GcFlags.generations, "initCapability"); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { cap->mut_lists[g] = NULL; } }