STATIC_INLINE void unthread( StgPtr p, StgWord free ) { StgWord q, r; StgPtr q0; q = *p; loop: switch (GET_CLOSURE_TAG((StgClosure *)q)) { case 0: // nothing to do; the chain is length zero return; case 1: q0 = (StgPtr)(q-1); r = *q0; // r is the info ptr, tagged with the pointer-tag *q0 = free; *p = (StgWord)UNTAG_CLOSURE((StgClosure *)r); return; case 2: q0 = (StgPtr)(q-2); r = *q0; *q0 = free; q = r; goto loop; default: barf("unthread"); } }
Ticks getThreadCPUTime(void) { #if USE_PAPI long long usec; if ((usec = PAPI_get_virt_usec()) < 0) { barf("PAPI_get_virt_usec: %lld", usec); } return ((usec * TICKS_PER_SECOND) / 1000000); #elif !defined(BE_CONSERVATIVE) && defined(HAVE_CLOCK_GETTIME) && defined (_SC_THREAD_CPUTIME) && defined(CLOCK_THREAD_CPUTIME_ID) && defined(HAVE_SYSCONF) { static int checked_sysconf = 0; static int sysconf_result = 0; if (!checked_sysconf) { sysconf_result = sysconf(_SC_THREAD_CPUTIME); checked_sysconf = 1; } if (sysconf_result != -1) { // clock_gettime() gives us per-thread CPU time. It isn't // reliable on Linux, but it's the best we have. struct timespec ts; int res; res = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts); if (res == 0) { return ((Ticks)ts.tv_sec * TICKS_PER_SECOND + ((Ticks)ts.tv_nsec * TICKS_PER_SECOND) / 1000000000); } } } #endif return getProcessCPUTime(); }
/* * As a yes/no question, prompting from the varargs string and using * default if user just hits return. */ Boolean y_or_n(Boolean def, const char *msg, ...) { va_list args; int ch = 0; FILE *tty; va_start(args, msg); /* * Need to open /dev/tty because file collection may have been * collected on stdin */ tty = fopen("/dev/tty", "r"); if (!tty) barf("Can't open /dev/tty!\n"); while (ch != 'Y' && ch != 'N') { vfprintf(stderr, msg, args); if (def) fprintf(stderr, " [yes]? "); else fprintf(stderr, " [no]? "); fflush(stderr); if (AutoAnswer) { ch = (AutoAnswer == YES) ? 'Y' : 'N'; fprintf(stderr, "%c\n", ch); } else ch = toupper(fgetc(tty)); if (ch == '\n') ch = (def) ? 'Y' : 'N'; } fclose(tty) ; return (ch == 'Y') ? TRUE : FALSE; }
void ShutdownIOManager ( bool wait_threads ) { int num; MMRESULT mmresult; SetEvent(ioMan->hExitEvent); if (wait_threads) { /* Wait for all worker threads to die. */ for (;;) { EnterCriticalSection(&ioMan->manLock); num = ioMan->numWorkers; LeaveCriticalSection(&ioMan->manLock); if (num == 0) break; Sleep(10); } FreeWorkQueue(ioMan->workQueue); CloseHandle(ioMan->hExitEvent); DeleteCriticalSection(&ioMan->active_work_lock); DeleteCriticalSection(&ioMan->manLock); mmresult = timeEndPeriod(ioMan->sleepResolution); if (mmresult != MMSYSERR_NOERROR) { barf("timeEndPeriod failed"); } free(ioMan); ioMan = NULL; } }
// str is a line of digits, starting with a line number. Parse it, // returning the first number in *lnno and the rest in a newly // allocated Counts struct. If lnno is non-NULL, treat the first // number as a line number and assign it to *lnno instead of // incorporating it in the counts array. static Counts* splitUpCountsLine ( SOURCE* s, /*OUT*/UWord* lnno, char* str ) { #define N_TMPC 50 Bool ok; Counts* counts; ULong tmpC[N_TMPC]; Int n_tmpC = 0; while (1) { ok = parse_ULong( &tmpC[n_tmpC], &str ); if (!ok) break; n_tmpC++; if (n_tmpC >= N_TMPC) barf(s, "N_TMPC too low. Increase and recompile."); } if (*str != 0) parseError(s, "garbage in counts line"); if (lnno ? (n_tmpC < 2) : (n_tmpC < 1)) parseError(s, "too few counts in count line"); if (lnno) { *lnno = (UWord)tmpC[0]; counts = new_Counts( n_tmpC-1, /*COPIED*/&tmpC[1] ); } else { counts = new_Counts( n_tmpC, /*COPIED*/&tmpC[0] ); } return counts; #undef N_TMPC }
static void splitRtsFlags(char *s, int *rts_argc, char *rts_argv[]) { char *c1, *c2; c1 = s; do { while (isspace(*c1)) { c1++; }; c2 = c1; while (!isspace(*c2) && *c2 != '\0') { c2++; }; if (c1 == c2) { break; } if (*rts_argc < MAX_RTS_ARGS-1) { s = stgMallocBytes(c2-c1+1, "RtsFlags.c:splitRtsFlags()"); strncpy(s, c1, c2-c1); s[c2-c1] = '\0'; rts_argv[(*rts_argc)++] = s; } else { barf("too many RTS arguments (max %d)", MAX_RTS_ARGS-1); } c1 = c2; } while (*c1 != '\0'); }
void * osGetMBlocks(nat n) { caddr_t ret; W_ size = MBLOCK_SIZE * (W_)n; if (next_request == 0) { // use gen_map_mblocks the first time. ret = gen_map_mblocks(size); } else { ret = my_mmap(next_request, size); if (((W_)ret & MBLOCK_MASK) != 0) { // misaligned block! #if 0 // defined(DEBUG) errorBelch("warning: getMBlock: misaligned block %p returned " "when allocating %d megablock(s) at %p", ret, n, next_request); #endif // unmap this block... if (munmap(ret, size) == -1) { barf("getMBlock: munmap failed"); } // and do it the hard way ret = gen_map_mblocks(size); } } // Next time, we'll try to allocate right after the block we just got. // ToDo: check that we haven't already grabbed the memory at next_request next_request = ret + size; return ret; }
static ffi_type * char_to_ffi_type(char c) { switch (c) { case 'v': return &ffi_type_void; case 'f': return &ffi_type_float; case 'd': return &ffi_type_double; case 'L': return &ffi_type_sint64; case 'l': return &ffi_type_uint64; case 'W': return &ffi_type_sint32; case 'w': return &ffi_type_uint32; case 'S': return &ffi_type_sint16; case 's': return &ffi_type_uint16; case 'B': return &ffi_type_sint8; case 'b': return &ffi_type_uint8; case 'p': return &ffi_type_pointer; default: barf("char_to_ffi_type: unknown type '%c'", c); } }
/* One receiver per fd */ static void *receiver(struct receiver_context* ctx) { unsigned int i; if (process_mode) close(ctx->in_fds[1]); /* Wait for start... */ ready(ctx->ready_out, ctx->wakefd); /* Receive them all */ for (i = 0; i < ctx->num_packets; i++) { char data[datasize]; int ret, done = 0; again: ret = read(ctx->in_fds[0], data + done, datasize - done); if (ret < 0) barf("SERVER: read"); done += ret; if (done < datasize) goto again; } if (ctx) { free(ctx); } return NULL; }
void * osGetMBlocks(uint32_t n) { void* ret; ret = findFreeBlocks(n); if(ret==0) { alloc_rec* alloc; alloc = allocNew(n); /* We already belch in allocNew if it fails */ if (alloc == 0) { stg_exit(EXIT_FAILURE); } else { insertFree(alloc->base, alloc->size); ret = findFreeBlocks(n); } } if(ret!=0) { /* (In)sanity tests */ if (((W_)ret & MBLOCK_MASK) != 0) { barf("getMBlocks: misaligned block returned"); } commitBlocks(ret, (W_)MBLOCK_SIZE*n); } return ret; }
// Traverse a threaded chain and pull out the info pointer at the end. // The info pointer is also tagged with the appropriate pointer tag // for this closure, which should be attached to the pointer // subsequently passed to unthread(). STATIC_INLINE StgWord get_threaded_info( StgPtr p ) { StgWord q; q = (W_)GET_INFO(UNTAG_CLOSURE((StgClosure *)p)); loop: switch (GET_CLOSURE_TAG((StgClosure *)q)) { case 0: ASSERT(LOOKS_LIKE_INFO_PTR(q)); return q; case 1: { StgWord r = *(StgPtr)(q-1); ASSERT(LOOKS_LIKE_INFO_PTR(UNTAG_CLOSURE((StgClosure *)r))); return r; } case 2: q = *(StgPtr)(q-2); goto loop; default: barf("get_threaded_info"); } }
int lookup(char *name) { struct addrinfo hints; char addrstr[128]; struct sockaddr_in *sa; struct addrinfo *ai; int r; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_DGRAM; hints.ai_flags = AI_PASSIVE; hints.ai_protocol = IPPROTO_UDP; hints.ai_canonname = NULL; hints.ai_addr = NULL; hints.ai_next = NULL; r = getaddrinfo(name, service, &hints, &ai); if (r<0) barf(strerror(errno)); /* grab the first address */ sa = (struct sockaddr_in *)ai->ai_addr; inet_ntop(AF_INET, &sa->sin_addr, addrstr, ai->ai_addrlen); bcopy(ai->ai_addr, &ctx.saddr, ai->ai_addrlen); freeaddrinfo(ai); return r; }
void sendMessage(Capability *from_cap, Capability *to_cap, Message *msg) { ACQUIRE_LOCK(&to_cap->lock); #ifdef DEBUG { const StgInfoTable *i = msg->header.info; if (i != &stg_MSG_THROWTO_info && i != &stg_MSG_BLACKHOLE_info && i != &stg_MSG_TRY_WAKEUP_info && i != &stg_IND_info && // can happen if a MSG_BLACKHOLE is revoked i != &stg_WHITEHOLE_info) { barf("sendMessage: %p", i); } } #endif msg->link = to_cap->inbox; to_cap->inbox = msg; recordClosureMutated(from_cap,(StgClosure*)msg); if (to_cap->running_task == NULL) { to_cap->running_task = myTask(); // precond for releaseCapability_() releaseCapability_(to_cap,rtsFalse); } else { interruptCapability(to_cap); } RELEASE_LOCK(&to_cap->lock); }
static void thread_static( StgClosure* p ) { const StgInfoTable *info; // keep going until we've threaded all the objects on the linked // list... while (p != END_OF_STATIC_LIST) { info = get_itbl(p); switch (info->type) { case IND_STATIC: thread(&((StgInd *)p)->indirectee); p = *IND_STATIC_LINK(p); continue; case THUNK_STATIC: p = *THUNK_STATIC_LINK(p); continue; case FUN_STATIC: p = *FUN_STATIC_LINK(p); continue; case CONSTR_STATIC: p = *STATIC_LINK(info,p); continue; default: barf("thread_static: strange closure %d", (int)(info->type)); } } }
rtsBool // returns True if we modified head or tail removeThreadFromDeQueue (Capability *cap, StgTSO **head, StgTSO **tail, StgTSO *tso) { StgTSO *t, *prev; rtsBool flag = rtsFalse; prev = NULL; for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) { if (t == tso) { if (prev) { setTSOLink(cap,prev,t->_link); flag = rtsFalse; } else { *head = t->_link; flag = rtsTrue; } t->_link = END_TSO_QUEUE; if (*tail == tso) { if (prev) { *tail = prev; } else { *tail = END_TSO_QUEUE; } return rtsTrue; } else { return flag; } } } barf("removeThreadFromDeQueue: not found"); }
/* Sender sprays LOOPS messages down each file descriptor */ static void sender(int out_fd[NUM_FDS], int ready_out, int wakefd) { char data[DATASIZE]; unsigned int i, j; ready(ready_out, wakefd); /* Now pump to every receiver. */ for (i = 0; i < LOOPS; i++) { for (j = 0; j < NUM_FDS; j++) { int ret; unsigned long done = 0; again: ret = write(out_fd[j], data + done, sizeof(data)-done); if (ret < 0) barf("SENDER: write"); done += ret; if (done < sizeof(data)) goto again; } } }
NewTextureProgram::NewTextureProgram(TextureVertexShader *v, TextureFragmentShader *f) : vs(v), fs(f) { program = glCreateProgram(); glAttachShader(program, vs->shader); glAttachShader(program, fs->shader); glLinkProgram(program); GLint length = 0; glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length); if (length > 1) { GLchar buf[length]; glGetProgramInfoLog(program, length, &length, buf); printf("linking texture program:\n%s\n\n", buf); } GLint success = 0; glGetProgramiv(program, GL_LINK_STATUS, &success); if (!success) barf(); #define ATTRIBUTE(name, type) \ name##Attribute = glGetAttribLocation(program, #name); \ if (name##Attribute == -1) barf(); #define UNIFORM(name, type) \ name##Uniform = glGetUniformLocation(program, #name); \ if (name##Uniform == -1) barf(); #include "vertex-texture.glsl.def" #include "fragment-texture.glsl.def" #undef ATTRIBUTE #undef UNIFORM }
// True if anything read, False if at EOF static Bool readline ( SOURCE* s ) { int ch, i = 0; line[0] = 0; while (1) { if (i >= M_LINEBUF-10) parseError(s, "Unexpected long line in input file"); ch = getc(s->fp); if (ch != EOF) { line[i++] = ch; line[i] = 0; if (ch == '\n') { line[i-1] = 0; s->lno++; break; } } else { if (ferror(s->fp)) { perror(argv0); barf(s, "I/O error while reading input file"); } else { // hit EOF break; } } } return line[0] != 0; }
/* Sender sprays loops messages down each file descriptor */ static void *sender(struct sender_context *ctx) { char data[datasize]; unsigned int i, j; ready(ctx->ready_out, ctx->wakefd); memset(&data, '-', datasize); /* Now pump to every receiver. */ for (i = 0; i < loops; i++) { for (j = 0; j < ctx->num_fds; j++) { int ret, done = 0; again: ret = write(ctx->out_fds[j], data + done, sizeof(data)-done); if (ret < 0) barf("SENDER: write"); done += ret; if (done < sizeof(data)) goto again; } } return NULL; }
static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag) { ACQUIRE_LOCK(&trace_utx); tracePreface(); switch (tag) { case EVENT_REQUEST_SEQ_GC: // (cap) debugBelch("cap %d: requesting sequential GC\n", cap->no); break; case EVENT_REQUEST_PAR_GC: // (cap) debugBelch("cap %d: requesting parallel GC\n", cap->no); break; case EVENT_GC_START: // (cap) debugBelch("cap %d: starting GC\n", cap->no); break; case EVENT_GC_END: // (cap) debugBelch("cap %d: finished GC\n", cap->no); break; case EVENT_GC_IDLE: // (cap) debugBelch("cap %d: GC idle\n", cap->no); break; case EVENT_GC_WORK: // (cap) debugBelch("cap %d: GC working\n", cap->no); break; case EVENT_GC_DONE: // (cap) debugBelch("cap %d: GC done\n", cap->no); break; default: barf("traceGcEvent: unknown event tag %d", tag); break; } RELEASE_LOCK(&trace_utx); }
void setThreadLocalVar (ThreadLocalKey *key, void *value) { int r; if ((r = pthread_setspecific(*key,value)) != 0) { barf("setThreadLocalVar: %s", strerror(r)); } }
std::shared_ptr<Graph> Graph::copy() { auto new_g = std::make_shared<Graph>(); auto env = [](Value *) -> Value* { barf("Graph::copy() encountered a use of a value not in scope. Run lint!"); }; new_g->block()->cloneFrom(this->block(), env); return new_g; }
void newThreadLocalKey (ThreadLocalKey *key) { int r; if ((r = pthread_key_create(key, NULL)) != 0) { barf("newThreadLocalKey: %s", strerror(r)); } }
/*void barf() { std::cout << "hello" << std::endl; }*/ int main() { Student s("Bobby Teenager", "Sophomore", 2.5); s.boost_grade(); s.display(); barf(); barfo(); }
void freeThreadLocalKey (ThreadLocalKey *key) { int r; if ((r = pthread_key_delete(*key)) != 0) { barf("freeThreadLocalKey: %s", strerror(r)); } }
static void insert( StgWord value, const char *name ) { if ( table_size >= max_table_size ) { barf( "Symbol table overflow\n" ); } table[table_size].value = value; table[table_size].name = name; table_size = table_size + 1; }
void check( int i ) { if( HugeArr[i].a[0] != 1 ) barf( "HugeArr", i, __LINE__ ); if( HugeArr[i].a[1] != i ) barf( "HugeArr", i, __LINE__ ); if( HugeArr[i].a[2] != 99 ) barf( "HugeArr", i, __LINE__ ); if( HugeArr[i].a[31] != 99 ) barf( "HugeArr", i, __LINE__ ); if( HugeMem[i].a[0] != 1 ) barf( "HugeMem", i, __LINE__ ); if( HugeMem[i].a[1] != i ) barf( "HugeMem", i, __LINE__ ); if( HugeMem[i].a[2] != 99 ) barf( "HugeMem", i, __LINE__ ); if( HugeMem[i].a[31] != 99 ) barf( "HugeMem", i, __LINE__ ); }
/* One group of senders and receivers */ static unsigned int group(int ready_out, int wakefd) { unsigned int i; int out_fds[NUM_FDS]; for (i = 0; i < NUM_FDS; i++) { int fds[2]; /* Create the pipe between client and server */ fdpair(fds); /* Fork the receiver. */ switch (fork()) { case -1: barf("fork()"); case 0: close(fds[1]); receiver(NUM_FDS*LOOPS, fds[0], ready_out, wakefd); exit(0); } out_fds[i] = fds[1]; close(fds[0]); } /* Now we have all the fds, fork the senders */ for (i = 0; i < NUM_FDS; i++) { switch (fork()) { case -1: barf("fork()"); case 0: sender(out_fds, ready_out, wakefd); exit(0); } } /* Close the fds we have left */ for (i = 0; i < NUM_FDS; i++) close(out_fds[i]); /* Return number of children to reap */ return NUM_FDS * 2; }
static void fdpair(int fds[2]) { if (use_pipes) { if (pipe(fds) == 0) return; } else { if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0) return; } barf("Creating fdpair"); }
/* Examine the client and work out which arch it is for */ static const char *select_arch( const char *clientname, cpu_type_t default_cputype, const char *default_arch) { uint8_t buf[4096]; ssize_t bytes; int fd = open(find_client(clientname), O_RDONLY); if (fd < 0) { barf("%s: %s", clientname, strerror(errno)); } bytes = read(fd, buf, sizeof(buf)); close(fd); if (bytes != sizeof(buf)) { return NULL; } // If it's thin, return that arch. { struct mach_header *mh = (struct mach_header *)buf; if (mh->magic == MH_MAGIC || mh->magic == MH_MAGIC_64) { return name_for_cputype(mh->cputype); } else if (mh->magic == MH_CIGAM || mh->magic == MH_CIGAM_64) { return name_for_cputype(OSSwapInt32(mh->cputype)); } } // If it's fat, look for a good arch. { struct fat_header *fh = (struct fat_header *)buf; if (ntohl(fh->magic) == FAT_MAGIC) { uint32_t nfat_arch = ntohl(fh->nfat_arch); int i; // If only one fat arch, use it. if (nfat_arch == 1) { struct fat_arch *fa = (struct fat_arch *)(fh+1); return name_for_cputype(ntohl(fa->cputype)); } // Scan fat headers for default arch. if (fat_has_cputype(fh, default_cputype)) { return default_arch; } // Scan fat headers for any supported arch. for (i = 0; i < valid_archs_count; i++) { if (fat_has_cputype(fh, valid_archs[i].cputype)) { return valid_archs[i].valgrind_name; } } } } return NULL; }