void Fragment::Script::ScriptEngine::ReportException(v8::Isolate *isolate, v8::TryCatch *try_catch) { v8::HandleScope handle_scope(isolate); v8::String::Utf8Value exception(try_catch->Exception()); const char *exception_string = ToCString(exception); v8::Local<v8::Message> message = try_catch->Message(); if (message.IsEmpty()) { // V8 didn't provide any extra information about this error; just // print the exception. fprintf(stderr, "%s\n", exception_string); } else { // Print (filename):(line number): (message). v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName()); v8::Local<v8::Context> context(isolate->GetCurrentContext()); const char *filename_string = ToCString(filename); int linenum = message->GetLineNumber(context).FromJust(); fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string); // Print line of source code. v8::String::Utf8Value sourceline( message->GetSourceLine(context).ToLocalChecked()); const char *sourceline_string = ToCString(sourceline); fprintf(stderr, "%s\n", sourceline_string); // Print wavy underline (GetUnderline is deprecated). int start = message->GetStartColumn(context).FromJust(); for (int i = 0; i < start; i++) { fprintf(stderr, " "); } int end = message->GetEndColumn(context).FromJust(); for (int i = start; i < end; i++) { fprintf(stderr, "^"); } fprintf(stderr, "\n"); v8::Local<v8::Value> stack_trace_string; if (try_catch->StackTrace(context).ToLocal(&stack_trace_string) && stack_trace_string->IsString() && v8::Local<v8::String>::Cast(stack_trace_string)->Length() > 0) { v8::String::Utf8Value stack_trace(stack_trace_string); const char *stack_trace_string = ToCString(stack_trace); fprintf(stderr, "%s\n", stack_trace_string); } } }
void ReportException (v8::TryCatch* try_catch) { v8::HandleScope handle_scope; v8::String::Utf8Value exception(try_catch->Exception()); const char* exception_string = ToCString(exception); v8::Handle<v8::Message> message = try_catch->Message(); if (message.IsEmpty()) { // V8 didn't provide any extra information about this error; just // print the exception. printf("%s\n", exception_string); } else { // Print (filename):(line number): (message). v8::String::Utf8Value filename(message->GetScriptResourceName()); const char* filename_string = ToCString(filename); int linenum = message->GetLineNumber(); printf("%s:%i: %s\n", filename_string, linenum, exception_string); // Print line of source code. v8::String::Utf8Value sourceline(message->GetSourceLine()); const char* sourceline_string = ToCString(sourceline); printf("%s\n", sourceline_string); // Print wavy underline (GetUnderline is deprecated). int start = message->GetStartColumn(); for (int i = 0; i < start; i++) { printf(" "); } int end = message->GetEndColumn(); for (int i = start; i < end; i++) { printf("^"); } printf("\n"); v8::String::Utf8Value stack_trace(try_catch->StackTrace()); if (stack_trace.length() > 0) { const char* stack_trace_string = ToCString(stack_trace); printf("%s\n", stack_trace_string); } else { printf("no stack trace available\n"); } } }
EXPORT_C void g_on_error_stack_trace (const gchar *prg_name) { #if defined(G_OS_UNIX) || defined(G_OS_BEOS) pid_t pid; gchar buf[16]; gchar *args[4] = { "gdb", NULL, NULL, NULL }; int status; if (!prg_name) return; _g_sprintf (buf, "%u", (guint) getpid ()); args[1] = (gchar*) prg_name; args[2] = buf; pid = fork (); if (pid == 0) { stack_trace (args); _exit (0); } else if (pid == (pid_t) -1) { perror ("unable to fork gdb"); return; } waitpid (pid, &status, 0); #elif defined(__SYMBIAN32__) abort(); #else if (IsDebuggerPresent ()) G_BREAKPOINT (); else abort (); #endif }
int gdsql_log_tail(int level) { FILE* f; f = get_stream(); if (f == 0) return level; if (level == LOG_FATAL || level == LOG_ERROR) { int errnum = errno; const char* errtxt = error_text(errnum); if (errtxt == 0) errtxt = "UNKNOWN"; fprintf(f, " (%d: %s)\n", errnum, errtxt); if (level == LOG_FATAL) stack_trace(level, 2); } else if (level != LOG_ALWAYS) fprintf(f, "\n"); fflush(f); return level; }
static void agent_deschedule(struct sched_state *s, int tid) { struct agent *a = agent_by_tid_or_null(&s->rq, tid); if (a != NULL) { Q_REMOVE(&s->rq, a, nobe); Q_INSERT_FRONT(&s->dq, a, nobe); /* If it's not on the runqueue, we must have already special-case moved * it off in the thread-change event. */ } else if (agent_by_tid_or_null(&s->sq, tid) == NULL) { /* Either it's on the sleep queue, or it vanished. */ if (agent_by_tid_or_null(&s->dq, tid) != NULL) { conf_object_t *cpu = SIM_get_object("cpu0"); char *stack = stack_trace(cpu, GET_CPU_ATTR(cpu, eip), s->cur_agent->tid); lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "TID %d is " "already off the runqueue at tell_off_rq(); " "probably incorrect annotations?\n", tid); lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Current stack: %s\n" COLOUR_DEFAULT, stack); assert(0); } } }
static void mem_exit_bad_place(struct ls_state *ls, bool in_kernel, unsigned int base) { struct mem_state *m = in_kernel ? &ls->kern_mem : &ls->user_mem; assert(m->in_alloc && "attempt to exit malloc without being in!"); assert(!m->in_free && "attempt to exit malloc while in free!"); assert(!m->in_mm_init && "attempt to exit malloc while in init!"); if (in_kernel != testing_userspace()) { lsprintf(DEV, "Malloc [0x%x | %d]\n", base, m->alloc_request_size); } if (in_kernel) { assert(KERNEL_MEMORY(base)); } else { assert(base == 0 || USER_MEMORY(base)); } if (base == 0) { lsprintf(INFO, "%s seems to be out of memory.\n", K_STR(in_kernel)); } else { struct chunk *chunk = MM_XMALLOC(1, struct chunk); chunk->base = base; chunk->len = m->alloc_request_size; chunk->id = m->heap_next_id; chunk->malloc_trace = stack_trace(ls); chunk->free_trace = NULL; m->heap_size += m->alloc_request_size; assert(m->heap_next_id != INT_MAX && "need a wider type"); m->heap_next_id++; insert_chunk(&m->heap, chunk, false); } m->in_alloc = false; }
/* vm86 fault handling */ static Bool vm86_GP_fault(xf86Int10InfoPtr pInt) { unsigned char *csp, *lina; CARD32 org_eip; int pref_seg; int done, is_rep, prefix66, prefix67; csp = lina = SEG_ADR((unsigned char *), X86_CS, IP); is_rep = 0; prefix66 = prefix67 = 0; pref_seg = -1; /* eat up prefixes */ done = 0; do { switch (MEM_RB(pInt, (int)csp++)) { case 0x66: /* operand prefix */ prefix66=1; break; case 0x67: /* address prefix */ prefix67=1; break; case 0x2e: /* CS */ pref_seg=X86_CS; break; case 0x3e: /* DS */ pref_seg=X86_DS; break; case 0x26: /* ES */ pref_seg=X86_ES; break; case 0x36: /* SS */ pref_seg=X86_SS; break; case 0x65: /* GS */ pref_seg=X86_GS; break; case 0x64: /* FS */ pref_seg=X86_FS; break; case 0xf0: /* lock */ break; case 0xf2: /* repnz */ case 0xf3: /* rep */ is_rep=1; break; default: done=1; } } while (!done); csp--; /* oops one too many */ org_eip = X86_EIP; X86_IP += (csp - lina); switch (MEM_RB(pInt, (int)csp)) { case 0x6c: /* insb */ /* NOTE: ES can't be overwritten; prefixes 66,67 should use esi,edi,ecx * but is anyone using extended regs in real mode? */ /* WARNING: no test for DI wrapping! */ X86_EDI += port_rep_inb(pInt, X86_DX, SEG_EADR((CARD32), X86_ES, DI), X86_FLAGS & DF, is_rep ? LWECX : 1); if (is_rep) LWECX_ZERO; X86_IP++; break; case 0x6d: /* (rep) insw / insd */ /* NOTE: ES can't be overwritten */ /* WARNING: no test for _DI wrapping! */ if (prefix66) { X86_DI += port_rep_inl(pInt, X86_DX, SEG_ADR((CARD32), X86_ES, DI), X86_EFLAGS & DF, is_rep ? LWECX : 1); } else { X86_DI += port_rep_inw(pInt, X86_DX, SEG_ADR((CARD32), X86_ES, DI), X86_FLAGS & DF, is_rep ? LWECX : 1); } if (is_rep) LWECX_ZERO; X86_IP++; break; case 0x6e: /* (rep) outsb */ if (pref_seg < 0) pref_seg = X86_DS; /* WARNING: no test for _SI wrapping! */ X86_SI += port_rep_outb(pInt, X86_DX, (CARD32)LIN_PREF_SI, X86_FLAGS & DF, is_rep ? LWECX : 1); if (is_rep) LWECX_ZERO; X86_IP++; break; case 0x6f: /* (rep) outsw / outsd */ if (pref_seg < 0) pref_seg = X86_DS; /* WARNING: no test for _SI wrapping! */ if (prefix66) { X86_SI += port_rep_outl(pInt, X86_DX, (CARD32)LIN_PREF_SI, X86_EFLAGS & DF, is_rep ? LWECX : 1); } else { X86_SI += port_rep_outw(pInt, X86_DX, (CARD32)LIN_PREF_SI, X86_FLAGS & DF, is_rep ? LWECX : 1); } if (is_rep) LWECX_ZERO; X86_IP++; break; case 0xe5: /* inw xx, inl xx */ if (prefix66) X86_EAX = x_inl(csp[1]); else X86_AX = x_inw(csp[1]); X86_IP += 2; break; case 0xe4: /* inb xx */ X86_AL = x_inb(csp[1]); X86_IP += 2; break; case 0xed: /* inw dx, inl dx */ if (prefix66) X86_EAX = x_inl(X86_DX); else X86_AX = x_inw(X86_DX); X86_IP += 1; break; case 0xec: /* inb dx */ X86_AL = x_inb(X86_DX); X86_IP += 1; break; case 0xe7: /* outw xx */ if (prefix66) x_outl(csp[1], X86_EAX); else x_outw(csp[1], X86_AX); X86_IP += 2; break; case 0xe6: /* outb xx */ x_outb(csp[1], X86_AL); X86_IP += 2; break; case 0xef: /* outw dx */ if (prefix66) x_outl(X86_DX, X86_EAX); else x_outw(X86_DX, X86_AX); X86_IP += 1; break; case 0xee: /* outb dx */ x_outb(X86_DX, X86_AL); X86_IP += 1; break; case 0xf4: #ifdef DEBUG ErrorF("hlt at %p\n", lina); #endif return FALSE; case 0x0f: xf86DrvMsg(pInt->scrnIndex, X_ERROR, "CPU 0x0f Trap at CS:EIP=0x%4.4x:0x%8.8lx\n", X86_CS, X86_EIP); goto op0ferr; default: xf86DrvMsg(pInt->scrnIndex, X_ERROR, "unknown reason for exception\n"); op0ferr: dump_registers(pInt); stack_trace(pInt); dump_code(pInt); xf86DrvMsg(pInt->scrnIndex, X_ERROR, "cannot continue\n"); return FALSE; } /* end of switch() */ return TRUE; }
asmlinkage void csyscall(struct pt_regs* regs) { unsigned long num = regs->gregs[13]; extern void stack_trace(void); extern void leave_kernel(struct pt_regs* regs); CHECK_STACK(); #if 0 if (user_mode(regs)) { printk("syscall %d; pc == 0x%8x\n", num, get_pc()); stack_trace(); } #endif if (num >= 0 && num < __NR_nocall) { switch(num) { /* * system calls that need the regs */ case __NR_fork: case __NR_clone: case __NR_execve: case __NR_sigsuspend: regs->gregs[0] = ((int (*)(int))(syscall_tab[num]))((int)regs); break; #ifdef DEBUG /* help debug user applications */ case __NR_dbg_break: printk("break: %s\n", regs->gregs[0]); system_break(); break; case __NR_dbg_hexprint: printk("value: %x\n", regs->gregs[0]); break; #endif case __NR_mmap: regs->gregs[0] = sys_mmap(regs); #if 0 dprintk("mmap: returning 0x%8x\n", regs->gregs[0]); #endif break; default: regs->gregs[0] = syscall_tab[num](regs->gregs[0], regs->gregs[1], regs->gregs[2], regs->gregs[3], regs->gregs[4]); break; } } else { regs->gregs[0] = -ENOSYS; } #if 0 printk("csyscall: returning %p\n", regs->gregs[0]); stack_trace(); #endif leave_kernel(regs); }
void sched_update(struct ls_state *ls) { struct sched_state *s = &ls->sched; int old_tid = s->cur_agent->tid; int new_tid; /* wait until the guest is ready */ if (!s->guest_init_done) { if (kern_sched_init_done(ls->eip)) { s->guest_init_done = true; /* Deprecated since kern_get_current_tid went away. */ // assert(old_tid == new_tid && "init tid mismatch"); } else { return; } } /* The Importance of Being Assertive, A Trivial Style Guideline for * Serious Programmers, by Ben Blum */ if (s->entering_timer) { assert(ls->eip == kern_get_timer_wrap_begin() && "simics is a clown and tried to delay our interrupt :<"); s->entering_timer = false; } else { if (kern_timer_entering(ls->eip)) { lsprintf(DEV, "A timer tick that wasn't ours (0x%x).\n", (int)READ_STACK(ls->cpu0, 0)); ls->eip = avoid_timer_interrupt_immediately(ls->cpu0); } } /********************************************************************** * Update scheduler state. **********************************************************************/ if (kern_thread_switch(ls->cpu0, ls->eip, &new_tid) && new_tid != old_tid) { /* * So, fork needs to be handled twice, both here and below in the * runnable case. And for kernels that trigger both, both places will * need to have a check for whether the newly forked thread exists * already. * * Sleep and vanish actually only need to happen here. They should * check both the rq and the dq, 'cause there's no telling where the * thread got moved to before. As for the descheduling case, that needs * to check for a new type of action flag "asleep" or "vanished" (and * I guess using last_vanished_agent might work), and probably just * assert that that condition holds if the thread couldn't be found * for the normal descheduling case. */ /* Has to be handled before updating cur_agent, of course. */ handle_sleep(s); handle_vanish(s); handle_unsleep(s, new_tid); /* Careful! On some kernels, the trigger for a new agent forking * (where it first gets added to the RQ) may happen AFTER its * tcb is set to be the currently running thread. This would * cause this case to be reached before agent_fork() is called, * so agent_by_tid would fail. Instead, we have an option to * find it later. (see the kern_thread_runnable case below.) */ struct agent *next = agent_by_tid_or_null(&s->rq, new_tid); if (next == NULL) next = agent_by_tid_or_null(&s->dq, new_tid); if (next != NULL) { lsprintf(DEV, "switched threads %d -> %d\n", old_tid, new_tid); s->last_agent = s->cur_agent; s->cur_agent = next; /* This fork check is for kernels which context switch to a * newly-forked thread before adding it to the runqueue - and * possibly won't do so at all (if current_extra_runnable). We * need to do agent_fork now. (agent_fork *also* needs to be * handled below, for kernels which don't c-s to the new thread * immediately.) The */ } else if (handle_fork(s, new_tid, false)) { next = agent_by_tid_or_null(&s->dq, new_tid); assert(next != NULL && "Newly forked thread not on DQ"); lsprintf(DEV, "switching threads %d -> %d\n", old_tid, new_tid); s->last_agent = s->cur_agent; s->cur_agent = next; } else { lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Couldn't find " "new thread %d; current %d; did you forget to " "tell_landslide_forking()?\n" COLOUR_DEFAULT, new_tid, s->cur_agent->tid); assert(0); } /* Some debug info to help the studence. */ if (s->cur_agent->tid == kern_get_init_tid()) { lsprintf(DEV, "Now running init.\n"); } else if (s->cur_agent->tid == kern_get_shell_tid()) { lsprintf(DEV, "Now running shell.\n"); } else if (kern_has_idle() && s->cur_agent->tid == kern_get_idle_tid()) { lsprintf(DEV, "Now idling.\n"); } } s->current_extra_runnable = kern_current_extra_runnable(ls->cpu0); int target_tid; int mutex_addr; /* Timer interrupt handling. */ if (kern_timer_entering(ls->eip)) { // XXX: same as the comment in the below condition. if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) { assert(!ACTION(s, handling_timer)); } else { lsprintf(DEV, "WARNING: allowing a nested timer on " "tid %d's stack\n", s->cur_agent->tid); } ACTION(s, handling_timer) = true; lsprintf(INFO, "%d timer enter from 0x%x\n", s->cur_agent->tid, (unsigned int)READ_STACK(ls->cpu0, 0)); } else if (kern_timer_exiting(ls->eip)) { if (ACTION(s, handling_timer)) { // XXX: This condition is a hack to compensate for when // simics "sometimes", when keeping a schedule-in- // flight, takes the caused timer interrupt immediately, // even before the iret. if (!kern_timer_exiting(READ_STACK(ls->cpu0, 0))) { ACTION(s, handling_timer) = false; s->just_finished_reschedule = true; } /* If the schedule target was in a timer interrupt when we * decided to schedule him, then now is when the operation * finishes landing. (otherwise, see below) * FIXME: should this be inside the above if statement? */ if (ACTION(s, schedule_target)) { ACTION(s, schedule_target) = false; s->schedule_in_flight = NULL; } } else { lsprintf(INFO, "WARNING: exiting a non-timer interrupt " "through a path shared with the timer..? (from 0x%x, #%d)\n", (int)READ_STACK(ls->cpu0, 0), (int)READ_STACK(ls->cpu0, -2)); } /* Context switching. */ } else if (kern_context_switch_entering(ls->eip)) { /* It -is- possible for a context switch to interrupt a * context switch if a timer goes off before c-s disables * interrupts. TODO: if we care, make this an int counter. */ ACTION(s, context_switch) = true; /* Maybe update the voluntary resched trace. See schedule.h */ if (!ACTION(s, handling_timer)) { lsprintf(DEV, "Voluntary resched tid "); print_agent(DEV, s->cur_agent); printf(DEV, "\n"); s->voluntary_resched_tid = s->cur_agent->tid; if (s->voluntary_resched_stack != NULL) MM_FREE(s->voluntary_resched_stack); s->voluntary_resched_stack = stack_trace(ls->cpu0, ls->eip, s->cur_agent->tid); } } else if (kern_context_switch_exiting(ls->eip)) { assert(ACTION(s, cs_free_pass) || ACTION(s, context_switch)); ACTION(s, context_switch) = false; ACTION(s, cs_free_pass) = false; /* For threads that context switched of their own accord. */ if (!HANDLING_INTERRUPT(s)) { s->just_finished_reschedule = true; if (ACTION(s, schedule_target)) { ACTION(s, schedule_target) = false; s->schedule_in_flight = NULL; } } /* Lifecycle. */ } else if (kern_forking(ls->eip)) { assert_no_action(s, "forking"); ACTION(s, forking) = true; } else if (kern_sleeping(ls->eip)) { assert_no_action(s, "sleeping"); ACTION(s, sleeping) = true; } else if (kern_vanishing(ls->eip)) { assert_no_action(s, "vanishing"); ACTION(s, vanishing) = true; } else if (kern_readline_enter(ls->eip)) { assert_no_action(s, "readlining"); ACTION(s, readlining) = true; } else if (kern_readline_exit(ls->eip)) { assert(ACTION(s, readlining)); ACTION(s, readlining) = false; /* Runnable state change (incl. consequences of fork, vanish, sleep). */ } else if (kern_thread_runnable(ls->cpu0, ls->eip, &target_tid)) { /* A thread is about to become runnable. Was it just spawned? */ if (!handle_fork(s, target_tid, true)) { agent_wake(s, target_tid); } } else if (kern_thread_descheduling(ls->cpu0, ls->eip, &target_tid)) { /* A thread is about to deschedule. Is it vanishing/sleeping? */ agent_deschedule(s, target_tid); /* Mutex tracking and noob deadlock detection */ } else if (kern_mutex_locking(ls->cpu0, ls->eip, &mutex_addr)) { //assert(!ACTION(s, mutex_locking)); assert(!ACTION(s, mutex_unlocking)); ACTION(s, mutex_locking) = true; s->cur_agent->blocked_on_addr = mutex_addr; } else if (kern_mutex_blocking(ls->cpu0, ls->eip, &target_tid)) { /* Possibly not the case - if this thread entered mutex_lock, * then switched and someone took it, these would be set already * assert(s->cur_agent->blocked_on == NULL); * assert(s->cur_agent->blocked_on_tid == -1); */ lsprintf(DEV, "mutex: on 0x%x tid %d blocks, owned by %d\n", s->cur_agent->blocked_on_addr, s->cur_agent->tid, target_tid); s->cur_agent->blocked_on_tid = target_tid; if (deadlocked(s)) { lsprintf(BUG, COLOUR_BOLD COLOUR_RED "DEADLOCK! "); print_deadlock(BUG, s->cur_agent); printf(BUG, "\n"); found_a_bug(ls); } } else if (kern_mutex_locking_done(ls->eip)) { //assert(ACTION(s, mutex_locking)); assert(!ACTION(s, mutex_unlocking)); ACTION(s, mutex_locking) = false; s->cur_agent->blocked_on = NULL; s->cur_agent->blocked_on_tid = -1; s->cur_agent->blocked_on_addr = -1; /* no need to check for deadlock; this can't create a cycle. */ mutex_block_others(&s->rq, mutex_addr, s->cur_agent, s->cur_agent->tid); } else if (kern_mutex_unlocking(ls->cpu0, ls->eip, &mutex_addr)) { /* It's allowed to have a mutex_unlock call inside a mutex_lock * (and it can happen), or mutex_lock inside of mutex_lock, but * not the other way around. */ assert(!ACTION(s, mutex_unlocking)); ACTION(s, mutex_unlocking) = true; mutex_block_others(&s->rq, mutex_addr, NULL, -1); } else if (kern_mutex_unlocking_done(ls->eip)) { assert(ACTION(s, mutex_unlocking)); ACTION(s, mutex_unlocking) = false; } /********************************************************************** * Exercise our will upon the guest kernel **********************************************************************/ /* Some checks before invoking the arbiter. First see if an operation of * ours is already in-flight. */ if (s->schedule_in_flight) { if (s->schedule_in_flight == s->cur_agent) { /* the in-flight schedule operation is cleared for * landing. note that this may cause another one to * be triggered again as soon as the context switcher * and/or the timer handler finishes; it is up to the * arbiter to decide this. */ assert(ACTION(s, schedule_target)); /* this condition should trigger in the middle of the * switch, rather than after it finishes. (which is also * why we leave the schedule_target flag turned on). * the special case is for newly forked agents that are * schedule targets - they won't exit timer or c-s above * so here is where we have to clear it for them. */ if (ACTION(s, just_forked)) { /* Interrupts are "probably" off, but that's why * just_finished_reschedule is persistent. */ lsprintf(DEV, "Finished flying to %d.\n", s->cur_agent->tid); ACTION(s, schedule_target) = false; ACTION(s, just_forked) = false; s->schedule_in_flight = NULL; s->just_finished_reschedule = true; } else { assert(ACTION(s, cs_free_pass) || ACTION(s, context_switch) || HANDLING_INTERRUPT(s)); } /* The schedule_in_flight flag itself is cleared above, * along with schedule_target. Sometimes sched_recover * sets in_flight and needs it not cleared here. */ } else { /* An undesirable thread has been context-switched away * from either from an interrupt handler (timer/kbd) or * of its own accord. We need to wait for it to get back * to its own execution before triggering an interrupt * on it; in the former case, this will be just after it * irets; in the latter, just after the c-s returns. */ if (kern_timer_exiting(ls->eip) || (!HANDLING_INTERRUPT(s) && kern_context_switch_exiting(ls->eip)) || ACTION(s, just_forked)) { /* an undesirable agent just got switched to; * keep the pending schedule in the air. */ // XXX: this seems to get taken too soon? change // it somehow to cause_.._immediately. and then // see the asserts/comments in the action // handling_timer sections above. /* some kernels (pathos) still have interrupts * off or scheduler locked at this point; so * properties of !R */ if (interrupts_enabled(ls->cpu0) && kern_ready_for_timer_interrupt(ls->cpu0)) { lsprintf(INFO, "keeping schedule in-" "flight at 0x%x\n", ls->eip); cause_timer_interrupt(ls->cpu0); s->entering_timer = true; s->delayed_in_flight = false; } else { lsprintf(INFO, "Want to keep schedule " "in-flight at 0x%x; have to " "delay\n", ls->eip); s->delayed_in_flight = true; } /* If this was the special case where the * undesirable thread was just forked, keeping * the schedule in flight will cause it to do a * normal context switch. So just_forked is no * longer needed. */ ACTION(s, just_forked) = false; } else if (s->delayed_in_flight && interrupts_enabled(ls->cpu0) && kern_ready_for_timer_interrupt(ls->cpu0)) { lsprintf(INFO, "Delayed in-flight timer tick " "at 0x%x\n", ls->eip); cause_timer_interrupt(ls->cpu0); s->entering_timer = true; s->delayed_in_flight = false; } else { /* they'd better not have "escaped" */ assert(ACTION(s, cs_free_pass) || ACTION(s, context_switch) || HANDLING_INTERRUPT(s) || !interrupts_enabled(ls->cpu0) || !kern_ready_for_timer_interrupt(ls->cpu0)); } } /* in any case we have no more decisions to make here */ return; } else if (ACTION(s, just_forked)) { ACTION(s, just_forked) = false; s->just_finished_reschedule = true; } assert(!s->schedule_in_flight); /* Can't do anything before the test actually starts. */ if (ls->test.current_test == NULL) { return; } /* XXX TODO: This will "leak" an undesirable thread to execute an * instruction if the timer/kbd handler is an interrupt gate, so check * also if we're about to iret and then examine the eflags on the * stack. Also, "sti" and "popf" are interesting, so check for those. * Also, do trap gates enable interrupts if they were off? o_O */ if (!interrupts_enabled(ls->cpu0)) { return; } /* If a schedule operation is just finishing, we should allow the thread * to get back to its own execution before making another choice. Note * that when we previously decided to interrupt the thread, it will have * executed the single instruction we made the choice at then taken the * interrupt, so we return to the next instruction, not the same one. */ if (ACTION(s, schedule_target)) { return; } /* TODO: have an extra mode which will allow us to preempt the timer * handler. */ if (HANDLING_INTERRUPT(s) || !kern_ready_for_timer_interrupt(ls->cpu0)) { return; } /* As kernel_specifics.h says, no preempting during mutex unblocking. */ if (ACTION(s, mutex_unlocking)) { return; } /* Okay, are we at a choice point? */ bool voluntary; bool just_finished_reschedule = s->just_finished_reschedule; s->just_finished_reschedule = false; /* TODO: arbiter may also want to see the trace_entry_t */ if (arbiter_interested(ls, just_finished_reschedule, &voluntary)) { struct agent *a; bool our_choice; /* TODO: as an optimisation (in serialisation state / etc), the * arbiter may return NULL if there was only one possible * choice. */ if (arbiter_choose(ls, &a, &our_choice)) { /* Effect the choice that was made... */ if (a != s->cur_agent) { lsprintf(CHOICE, "from agent %d, arbiter chose " "%d at 0x%x (called at 0x%x)\n", s->cur_agent->tid, a->tid, ls->eip, (unsigned int)READ_STACK(ls->cpu0, 0)); set_schedule_target(s, a); cause_timer_interrupt(ls->cpu0); s->entering_timer = true; } /* Record the choice that was just made. */ if (ls->test.test_ever_caused && ls->test.start_population != s->most_agents_ever) { save_setjmp(&ls->save, ls, a->tid, our_choice, false, voluntary); } } else { lsprintf(BUG, "no agent was chosen at eip 0x%x\n", ls->eip); } } /* XXX TODO: it may be that not every timer interrupt triggers a context * switch, so we should watch out if a handler doesn't enter the c-s. */ }
bool dbgsymengine::stack_trace(std::ostream& os, CONTEXT * pctx, unsigned skip, const char * fmt) { if (!fmt) return false; dbgsymengine sym(0); return stack_trace(os, sym, pctx, skip, fmt); }
void dump(StreamT &os) const { stack_trace(state_, os); }
int main(int argc,char** argv) { // fixme chdir("/Users/josh/Dev/ThunderBeastGames/Mongrel/artifacts"); try { GArgs.Init(argc, argv); if (SDL_Init(SDL_INIT_VIDEO) < 0) { Sys_Error("SDL_InitSubSystem(): %s\n",SDL_GetError()); } // Install signal handlers signal(SIGABRT, signal_handler); signal(SIGFPE, signal_handler); signal(SIGILL, signal_handler); signal(SIGSEGV, signal_handler); signal(SIGTERM, signal_handler); signal(SIGINT, signal_handler); signal(SIGKILL, signal_handler); signal(SIGQUIT, signal_handler); Host_Init(); while (1) { Host_Frame(); // Cmd processing was moved here (and removed from Host_Init/Host_Frame // a side effect of removing it from Host_Init is that it becomes impossible // to set some variables before refresh inits, etc // Though, for instance the +map command tries to load a level before the // refresh/graphics driver is initialized, which causes texture generation for example // to fail (refresh is initialized in first Host_Frame screen update //Process console commands GCmdBuf.Exec(); } } catch (VavoomError &e) { Host_Shutdown(); stack_trace(); printf("\n%s\n", e.message); dprintf("\n\nERROR: %s\n", e.message); SDL_Quit(); exit(1); } catch (...) { Host_Shutdown(); dprintf("\n\nExiting due to external exception\n"); fprintf(stderr, "\nExiting due to external exception\n"); throw; } }
exception::exception() : std::exception() { _trace = stack_trace(); }
static float nlms_pw(echo *e, float tx, float rx, int update) { int j = e->j; e->x[j] = rx; e->xf[j] = iir_highpass(e->Fx, rx); /* pre-whitening of x */ float dotp_w_x = dotp(e->w, e->x+j); float err = tx - dotp_w_x; float ef = iir_highpass(e->Fe, err); /* pre-whitening of err */ if (isnan(ef)) { DEBUG_LOG("%s\n", "ef went NaN"); stack_trace(1); } /* Iterative update */ e->dotp_xf_xf += (e->xf[j] * e->xf[j] - e->xf[j+NLMS_LEN-1] * e->xf[j+NLMS_LEN-1]); if (e->dotp_xf_xf == 0.0) { DEBUG_LOG("%s\n", "dotp_xf_xf went to zero"); int i; for (i = 0; i < NLMS_LEN; i++) { DEBUG_LOG("%.02f ", e->xf[j+i]); } DEBUG_LOG("%s\n\n", ""); stack_trace(1); } if (update) { float u_ef = STEPSIZE * ef / e->dotp_xf_xf; if (isinf(u_ef)) { DEBUG_LOG("%s\n", "u_ef went infinite"); DEBUG_LOG("ef: %f\tdotp_xf_xf: %f\n", ef, e->dotp_xf_xf); stack_trace(1); } /* Update tap weights */ int i; for (i = 0; i < NLMS_LEN; i += 2) { e->w[i] += u_ef*e->xf[j+i]; e->w[i+1] += u_ef*e->xf[j+i+1]; } } /* Keep us within our sample buffers */ if (--e->j < 0) { e->j = NLMS_EXT; memmove(e->x+e->j+1, e->x, (NLMS_LEN-1)*sizeof(float)); memmove(e->xf+e->j+1, e->xf, (NLMS_LEN-1)*sizeof(float)); } return err; }
exlib::string GetException(TryCatch &try_catch, result_t hr) { if (try_catch.HasCaught()) { v8::String::Utf8Value exception(try_catch.Exception()); v8::Local<v8::Message> message = try_catch.Message(); if (message.IsEmpty()) return ToCString(exception); else { v8::Local<v8::Value> trace_value = try_catch.StackTrace(); if (!IsEmpty(trace_value)) { v8::String::Utf8Value stack_trace(trace_value); const char* s = ToCString(stack_trace); const char* s1 = qstrchr(s, '\n'); while (s1 && qstrcmp(s1 + 1, " at ", 7)) s1 = qstrchr(s1 + 1, '\n'); if (s1) return exlib::string(ToCString(exception)) + s1; } exlib::string strError; v8::String::Utf8Value filename(message->GetScriptResourceName()); if (qstrcmp(ToCString(exception), "SyntaxError: ", 13)) { strError.append(ToCString(exception)); strError.append("\n at "); } else { strError.append((ToCString(exception) + 13)); strError.append("\n at "); } strError.append(ToCString(filename)); int32_t lineNumber = message->GetLineNumber(); if (lineNumber > 0) { char numStr[32]; strError.append(1, ':'); sprintf(numStr, "%d", lineNumber); strError.append(numStr); strError.append(1, ':'); sprintf(numStr, "%d", message->GetStartColumn() + 1); strError.append(numStr); } return strError; } } else if (hr < 0) return getResultMessage(hr); return ""; }
int main(void) { stack_t *stack; int getf,buffer[128],errcode; size_t counter,size; stack = stack_initialize(sizeof(int),DEFAULT_RELEASE_FUNCTION); if(!stack){ fputs("Error!\n",stderr); return -1; } while(1){ ignore_space(stdin); getf = fgetc(stdin); if(getf == 's'){ fputs("set ",stdout); counter = 0; while(counter != 128){ ignore_space(stdin); getf = fgetc(stdin); ungetc(getf,stdin); if(getf == '\n' || (getf != '+' && getf != '-' && !isdigit(getf))){ break; } buffer[counter] = read_integer(stdin); fprintf(stdout,"%d ",buffer[counter]); counter++; } ignore_to_newline(stdin); errcode = stack_push_many_elements(stack,counter,buffer); if(errcode){ fputs("Error!\n",stderr); return -1; } fputc('\n',stdout); fputs("stack trace : ",stdout); stack_trace(stack); } else if(getf == 'g'){ fputs("get ",stdout); ignore_space(stdin); size = read_integer(stdin); ignore_to_newline(stdin); errcode = stack_pop_many_elements(stack,size,buffer); if(errcode){ fputs("Error!\n",stderr); return -1; } counter = 0; while(counter != size){ fprintf(stdout,"%d ",buffer[counter]); counter++; } fputc('\n',stdout); fputs("stack trace : ",stdout); stack_trace(stack); } else if(!isspace(getf)){ break; } } stack_release(stack); return 0; }