/** * Try to execute |step|, adjusting for |req| if needed. Return 0 if * |step| was made, or nonzero if there was a trap or |step| needs * more work. */ static int try_one_trace_step(struct context* ctx, const struct rep_trace_step* step, const struct dbg_request* req) { int stepi = (DREQ_STEP == req->type && get_threadid(ctx) == req->target); switch (step->action) { case TSTEP_RETIRE: return 0; case TSTEP_ENTER_SYSCALL: return enter_syscall(ctx, step, stepi); case TSTEP_EXIT_SYSCALL: return exit_syscall(ctx, step, stepi); case TSTEP_DETERMINISTIC_SIGNAL: return emulate_deterministic_signal(ctx, step->signo, stepi); case TSTEP_PROGRAM_ASYNC_SIGNAL_INTERRUPT: return emulate_async_signal(ctx, step->target.rcb, step->target.regs, step->target.signo, stepi); default: fatal("Unhandled step type %d", step->action); return 0; } }
/* Expand the generator, gen_fmt, into the buffer gen that has size gensize. * return 0 if fits, 1 if it does not. * %p is converted to process ID. * %t is converted to thread ID. * if (gen_fmt[0] == 0) then just then pid.tid is used. * at most one %p and one %t are allowed. */ static int expand_gen(const char *gen_fmt, char *gen, int gensize) { int needed; char *pp; char *pt; pp = strstr(gen_fmt, "%p"); pt = strstr(gen_fmt, "%t"); if (gen_fmt[0] == 0) { /* Use default generator */ #ifdef NOTHREADID needed = snprintf(gen, gensize, "%ld", (long)getpid()); #else #ifdef LUSETHREAD needed = snprintf(gen, gensize, "%ld.%ld", (long)getpid(), get_threadid()); #else needed = snprintf(gen, gensize, "%ld", (long)getpid()); #endif #endif } else if ((pt == NULL) && (pp == NULL)) needed = snprintf(gen, gensize, "%s", gen_fmt); else if (pt == NULL) needed = snprintf(gen, gensize, "%.*s%ld%s", (int)(pp-gen_fmt), gen_fmt, (long)getpid(), pp+2); else if (pp == NULL) { needed = snprintf(gen, gensize, "%.*s%ld%s", (int)(pt-gen_fmt), gen_fmt, get_threadid(), pt+2); } else if (pp < pt) { needed = snprintf(gen, gensize, "%.*s%ld%.*s%ld%s", (int)(pp-gen_fmt), gen_fmt, (long)getpid(), (int)(pt-pp-2), pp+2, get_threadid(), pt+2); } else { needed = snprintf(gen, gensize, "%.*s%ld%.*s%ld%s", (int)(pt-gen_fmt), gen_fmt, get_threadid(), (int)(pp-pt-2), pt+2, (long)getpid(), pp+2); } if (needed >= gensize) return 1; return 0; }
/* * !!! WARNING !!! * * This function should be used ONLY after a fatal signal. We walk through the * JCR chain without doing any lock, Bacula should not be running. */ void dbg_print_jcr(FILE *fp) { char buf1[128], buf2[128], buf3[128], buf4[128]; if (!jcrs) { return; } fprintf(fp, "Attempt to dump current JCRs. njcrs=%d\n", jcrs->size()); for (JCR *jcr = (JCR *)jcrs->first(); jcr ; jcr = (JCR *)jcrs->next(jcr)) { fprintf(fp, "threadid=%p JobId=%d JobStatus=%c jcr=%p name=%s\n", get_threadid(jcr->my_thread_id), (int)jcr->JobId, jcr->JobStatus, jcr, jcr->Job); fprintf(fp, "threadid=%p killable=%d JobId=%d JobStatus=%c " "jcr=%p name=%s\n", get_threadid(jcr->my_thread_id), jcr->is_killable(), (int)jcr->JobId, jcr->JobStatus, jcr, jcr->Job); fprintf(fp, "\tuse_count=%i\n", jcr->use_count()); fprintf(fp, "\tJobType=%c JobLevel=%c\n", jcr->getJobType(), jcr->getJobLevel()); bstrftime(buf1, sizeof(buf1), jcr->sched_time); bstrftime(buf2, sizeof(buf2), jcr->start_time); bstrftime(buf3, sizeof(buf3), jcr->end_time); bstrftime(buf4, sizeof(buf4), jcr->wait_time); fprintf(fp, "\tsched_time=%s start_time=%s\n\tend_time=%s wait_time=%s\n", buf1, buf2, buf3, buf4); fprintf(fp, "\tdb=%p db_batch=%p batch_started=%i\n", jcr->db, jcr->db_batch, jcr->batch_started); /* * Call all the jcr debug hooks */ for(int i=0; i < dbg_jcr_handler_count; i++) { dbg_jcr_hook_t *hook = dbg_jcr_hooks[i]; hook(jcr, fp); } } }
void producer() { int i; for(i = 0; i < PRODUCE_LIMIT;) { if(count == BUF_SIZE) yield(); else { printf("%d --- %d --- produce \n",id,get_threadid()); buffer[count] = id; count++; id++; i++; } } }
void consumer() { int flag = 0; int item; while(flag != 2) { if(count == 0) { yield(); flag++; } else { flag = 0; item = buffer[--count]; printf("%d --- %d --- consume\n",item,get_threadid()); } } }
static void replay_one_trace_frame(struct dbg_context* dbg, struct context* ctx) { struct dbg_request req; struct rep_trace_step step; int event = ctx->trace.stop_reason; int stop_sig = 0; debug("%d: replaying event %s, state %s", ctx->rec_tid, strevent(event), statename(ctx->trace.state)); if (ctx->syscallbuf_hdr) { debug(" (syscllbufsz:%u, abrtcmt:%u)", ctx->syscallbuf_hdr->num_rec_bytes, ctx->syscallbuf_hdr->abort_commit); } /* Advance the trace until we've exec()'d the tracee before * processing debugger requests. Otherwise the debugger host * will be confused about the initial executable image, * rr's. */ if (validate) { req = process_debugger_requests(dbg, ctx); assert(dbg_is_resume_request(&req)); } /* print some kind of progress */ if (ctx->trace.global_time % 10000 == 0) { fprintf(stderr, "time: %u\n",ctx->trace.global_time); } if (ctx->child_sig != 0) { assert(event == -ctx->child_sig || event == -(ctx->child_sig | DET_SIGNAL_BIT)); ctx->child_sig = 0; } /* Ask the trace-interpretation code what to do next in order * to retire the current frame. */ memset(&step, 0, sizeof(step)); switch (event) { case USR_INIT_SCRATCH_MEM: { /* for checksumming: make a note that this area is * scratch and need not be validated. */ struct mmapped_file file; read_next_mmapped_file_stats(&file); replay_init_scratch_memory(ctx, &file); add_scratch((void*)ctx->trace.recorded_regs.eax, file.end - file.start); step.action = TSTEP_RETIRE; break; } case USR_EXIT: rep_sched_deregister_thread(&ctx); /* Early-return because |ctx| is gone now. */ return; case USR_ARM_DESCHED: case USR_DISARM_DESCHED: rep_skip_desched_ioctl(ctx); /* TODO */ step.action = TSTEP_RETIRE; break; case USR_SYSCALLBUF_ABORT_COMMIT: ctx->syscallbuf_hdr->abort_commit = 1; step.action = TSTEP_RETIRE; break; case USR_SYSCALLBUF_FLUSH: rep_process_flush(ctx, rr_flags->redirect); /* TODO */ step.action = TSTEP_RETIRE; break; case USR_SYSCALLBUF_RESET: ctx->syscallbuf_hdr->num_rec_bytes = 0; step.action = TSTEP_RETIRE; break; case USR_SCHED: step.action = TSTEP_PROGRAM_ASYNC_SIGNAL_INTERRUPT; step.target.rcb = ctx->trace.rbc; step.target.regs = &ctx->trace.recorded_regs; step.target.signo = 0; break; case SIG_SEGV_RDTSC: step.action = TSTEP_DETERMINISTIC_SIGNAL; step.signo = SIGSEGV; break; default: /* Pseudosignals are handled above. */ assert(event > LAST_RR_PSEUDOSIGNAL); if (FIRST_DET_SIGNAL <= event && event <= LAST_DET_SIGNAL) { step.action = TSTEP_DETERMINISTIC_SIGNAL; step.signo = (-event & ~DET_SIGNAL_BIT); stop_sig = step.signo; } else if (event < 0) { assert(FIRST_ASYNC_SIGNAL <= event && event <= LAST_ASYNC_SIGNAL); step.action = TSTEP_PROGRAM_ASYNC_SIGNAL_INTERRUPT; step.target.rcb = ctx->trace.rbc; step.target.regs = &ctx->trace.recorded_regs; step.target.signo = -event; stop_sig = step.target.signo; } else { assert(event > 0); /* XXX not so pretty ... */ validate |= (ctx->trace.state == STATE_SYSCALL_EXIT && event == SYS_execve); rep_process_syscall(ctx, rr_flags->redirect, &step); } } /* See the comment below about *not* resetting the hpc for * buffer flushes. Here, we're processing the *other* event, * just after the buffer flush, where the rcb matters. To * simplify the advance-to-target code that follows (namely, * making debugger interrupts simpler), pretend like the * execution in the BUFFER_FLUSH didn't happen by resetting * the rbc and compensating down the target rcb. */ if (TSTEP_PROGRAM_ASYNC_SIGNAL_INTERRUPT == step.action) { uint64_t rcb_now = read_rbc(ctx->hpc); assert(step.target.rcb >= rcb_now); step.target.rcb -= rcb_now; reset_hpc(ctx, 0); } /* Advance until |step| has been fulfilled. */ while (try_one_trace_step(ctx, &step, &req)) { struct user_regs_struct regs; /* Currently we only understand software breakpoints * and successful stepi's. */ assert(SIGTRAP == ctx->child_sig && "Unknown trap"); read_child_registers(ctx->child_tid, ®s); if (ip_is_breakpoint((void*)regs.eip)) { /* SW breakpoint: $ip is just past the * breakpoint instruction. Move $ip back * right before it. */ regs.eip -= sizeof(int_3_insn); write_child_registers(ctx->child_tid, ®s); } else { /* Successful stepi. Nothing else to do. */ assert(DREQ_STEP == req.type && req.target == get_threadid(ctx)); } /* Don't restart with SIGTRAP anywhere. */ ctx->child_sig = 0; /* Notify the debugger and process any new requests * that might have triggered before resuming. */ dbg_notify_stop(dbg, get_threadid(ctx), 0x05/*gdb mandate*/); req = process_debugger_requests(dbg, ctx); assert(dbg_is_resume_request(&req)); } if (dbg && stop_sig) { dbg_notify_stop(dbg, get_threadid(ctx), stop_sig); } /* We flush the syscallbuf in response to detecting *other* * events, like signal delivery. Flushing the syscallbuf is a * sort of side-effect of reaching the other event. But once * we've flushed the syscallbuf during replay, we still must * reach the execution point of the *other* event. For async * signals, that requires us to have an "intact" rbc, with the * same value as it was when the last buffered syscall was * retired during replay. We'll be continuing from that rcb * to reach the rcb we recorded at signal delivery. So don't * reset the counter for buffer flushes. (It doesn't matter * for non-async-signal types, which are deterministic.) */ switch (ctx->trace.stop_reason) { case USR_SYSCALLBUF_ABORT_COMMIT: case USR_SYSCALLBUF_FLUSH: case USR_SYSCALLBUF_RESET: break; default: reset_hpc(ctx, 0); } debug_memory(ctx); }
/** * Reply to debugger requests until the debugger asks us to resume * execution. */ static struct dbg_request process_debugger_requests(struct dbg_context* dbg, struct context* ctx) { if (!dbg) { return continue_all_tasks; } while (1) { struct dbg_request req = dbg_get_request(dbg); struct context* target = NULL; if (dbg_is_resume_request(&req)) { return req; } target = (req.target > 0) ? rep_sched_lookup_thread(req.target) : ctx; switch (req.type) { case DREQ_GET_CURRENT_THREAD: { dbg_reply_get_current_thread(dbg, get_threadid(ctx)); continue; } case DREQ_GET_IS_THREAD_ALIVE: dbg_reply_get_is_thread_alive(dbg, !!target); continue; case DREQ_GET_MEM: { size_t len; byte* mem = read_mem(target, req.mem.addr, req.mem.len, &len); dbg_reply_get_mem(dbg, mem, len); sys_free((void**)&mem); continue; } case DREQ_GET_OFFSETS: /* TODO */ dbg_reply_get_offsets(dbg); continue; case DREQ_GET_REG: { struct user_regs_struct regs; dbg_regvalue_t val; read_child_registers(target->child_tid, ®s); val.value = get_reg(®s, req.reg, &val.defined); dbg_reply_get_reg(dbg, val); continue; } case DREQ_GET_REGS: { struct user_regs_struct regs; struct dbg_regfile file; int i; dbg_regvalue_t* val; read_child_registers(target->child_tid, ®s); memset(&file, 0, sizeof(file)); for (i = DREG_EAX; i < DREG_NUM_USER_REGS; ++i) { val = &file.regs[i]; val->value = get_reg(®s, i, &val->defined); } val = &file.regs[DREG_ORIG_EAX]; val->value = get_reg(®s, DREG_ORIG_EAX, &val->defined); dbg_reply_get_regs(dbg, &file); continue; } case DREQ_GET_STOP_REASON: { dbg_reply_get_stop_reason(dbg, target->rec_tid, target->child_sig); continue; } case DREQ_GET_THREAD_LIST: { pid_t* tids; size_t len; rep_sched_enumerate_tasks(&tids, &len); dbg_reply_get_thread_list(dbg, tids, len); sys_free((void**)&tids); continue; } case DREQ_INTERRUPT: /* Tell the debugger we stopped and await * further instructions. */ dbg_notify_stop(dbg, get_threadid(ctx), 0); continue; case DREQ_SET_SW_BREAK: set_sw_breakpoint(target, &req); dbg_reply_watchpoint_request(dbg, 0); continue; case DREQ_REMOVE_SW_BREAK: remove_sw_breakpoint(target, &req); dbg_reply_watchpoint_request(dbg, 0); break; case DREQ_REMOVE_HW_BREAK: case DREQ_REMOVE_RD_WATCH: case DREQ_REMOVE_WR_WATCH: case DREQ_REMOVE_RDWR_WATCH: case DREQ_SET_HW_BREAK: case DREQ_SET_RD_WATCH: case DREQ_SET_WR_WATCH: case DREQ_SET_RDWR_WATCH: dbg_reply_watchpoint_request(dbg, -1); continue; default: fatal("Unknown debugger request %d", req.type); } } }