void pandalog_write_entry(Panda__LogEntry *entry) { // fill in required fields. // NOTE: any other fields will already have been filled in // by the plugin that made this call. if (panda_in_main_loop) { entry->pc = panda_current_pc(cpu_single_env); entry->instr = rr_get_guest_instr_count (); } else { entry->pc = -1; entry->instr = -1; } size_t n = panda__log_entry__get_packed_size(entry); resize_pandalog(n); panda__log_entry__pack(entry, pandalog_buf); // write size of log entry int x = gzwrite(pandalog_file, (void *) &n, sizeof(n)); //printf ("x=%d\n", x); if (x == 0) { printf("gzwrite for pandalog failed\n"); } // and then the entry itself x=gzwrite(pandalog_file, pandalog_buf, n); //printf ("x=%d\n", x); if (x == 0) { printf("gzwrite for pandalog failed\n"); } }
/* returns current asid or address-space id. architecture-independent */ target_ulong panda_current_asid(CPUState *env) { #if (defined TARGET_I386 || defined TARGET_X86_64) return env->cr[3]; #elif defined(TARGET_ARM) return arm_get_vaddr_table(env, panda_current_pc(env)); #else return 0; #endif }
/* returns current asid or address-space id. architecture-independent */ target_ulong panda_current_asid(CPUState *env) { #if (defined TARGET_I386 || defined TARGET_X86_64) return env->cr[3]; #elif defined(TARGET_ARM) return arm_get_vaddr_table(env, panda_current_pc(env)); #else #error "panda_current_asid() not implemented for target architecture." return 0; #endif }
void pandalog_write_entry(Panda__LogEntry *entry) { // fill in required fields. if (panda_in_main_loop) { entry->pc = panda_current_pc(first_cpu); entry->instr = rr_get_guest_instr_count (); } else { entry->pc = -1; entry->instr = -1; } size_t n = panda__log_entry__get_packed_size(entry); // possibly compress and write current chunk and move on to next chunk // but dont do so if it would spread log entries for same instruction between chunks // invariant: all log entries for an instruction belong in a single chunk if ((instr_last_entry != -1) // first entry written && (instr_last_entry != entry->instr) && (thePandalog->chunk.buf_p + n >= thePandalog->chunk.buf + thePandalog->chunk.size)) { // entry won't fit in current chunk // and new entry is a different instr from last entry written write_current_chunk(); } // sanity check. If this fails, that means a large number of pandalog entries // for same instr went off the end of a chunk, which was already allocated bigger than needed. // possible. but I'd rather assert its not and understand why before adding auto realloc here. // TRL 2016-05-10: Ok here's a time when this legit happens. When you pandalog in uninit_plugin // this can be a lot of entries for the same instr (the very last one in the trace). // So no more assert. if (thePandalog->chunk.buf_p + sizeof(uint32_t) + n >= thePandalog->chunk.buf + ((int)(floor(thePandalog->chunk.size)))) { uint32_t offset = thePandalog->chunk.buf_p - thePandalog->chunk.buf; uint32_t new_size = offset * 2; printf ("reallocing chunk.buf to %d bytes\n", new_size); thePandalog->chunk.buf = (unsigned char *) realloc(thePandalog->chunk.buf, new_size); thePandalog->chunk.buf_p = thePandalog->chunk.buf + offset; assert (thePandalog->chunk.buf != NULL); } // now write the entry itself to the buffer. size then entry itself *((uint32_t *) thePandalog->chunk.buf_p) = n; thePandalog->chunk.buf_p += sizeof(uint32_t); // and then the entry itself (packed) panda__log_entry__pack(entry, thePandalog->chunk.buf_p); thePandalog->chunk.buf_p += n; // remember instr for last entry instr_last_entry = entry->instr; thePandalog->chunk.ind_entry ++; }
void tbranch_on_branch_taint2(Addr a, uint64_t size) { if (pandalog) { // a is an llvm reg assert (a.typ == LADDR); // count number of tainted bytes on this reg // NB: assuming 8 bytes uint32_t num_tainted = 0; for (uint32_t o=0; o<size; o++) { Addr ao =a; ao.off = o; num_tainted += (taint2_query(ao) != 0); } if (num_tainted > 0) { if (summary) { CPUState *cpu = first_cpu; target_ulong asid = panda_current_asid(cpu); tainted_branch[asid].insert(panda_current_pc(cpu)); } else { Panda__TaintedBranch *tb = (Panda__TaintedBranch *) malloc(sizeof(Panda__TaintedBranch)); *tb = PANDA__TAINTED_BRANCH__INIT; tb->call_stack = pandalog_callstack_create(); tb->n_taint_query = num_tainted; tb->taint_query = (Panda__TaintQuery **) malloc (sizeof (Panda__TaintQuery *) * num_tainted); uint32_t i=0; for (uint32_t o=0; o<size; o++) { Addr ao = a; ao.off = o; if (taint2_query(ao)) { tb->taint_query[i++] = taint2_query_pandalog(ao, o); } } Panda__LogEntry ple = PANDA__LOG_ENTRY__INIT; ple.tainted_branch = tb; pandalog_write_entry(&ple); pandalog_callstack_free(tb->call_stack); for (uint32_t i=0; i<num_tainted; i++) { pandalog_taint_query_free(tb->taint_query[i]); } free(tb); } } } }
void pandalog_write_entry(Panda__LogEntry *entry) { // fill in required fields. if (panda_in_main_loop) { entry->pc = panda_current_pc(cpu_single_env); entry->instr = rr_get_guest_instr_count (); } else { entry->pc = -1; entry->instr = -1; } size_t n = panda__log_entry__get_packed_size(entry); // possibly compress and write current chunk and move on to next chunk // but dont do so if it would spread log entries for same instruction between chunks // invariant: all log entries for an instruction belong in a single chunk if ((instr_last_entry != -1) // first entry written && (instr_last_entry != entry->instr) && (thePandalog->chunk.buf_p + n >= thePandalog->chunk.buf + thePandalog->chunk.size)) { // entry won't fit in current chunk // and new entry is a different instr from last entry written write_current_chunk(); } // sanity check. If this fails, that means a large number of pandalog entries // for same instr went off the end of a chunk, which was already allocated bigger than needed. // possible. but I'd rather assert its not and understand why before adding auto realloc here. assert (thePandalog->chunk.buf_p + sizeof(uint32_t) + n < thePandalog->chunk.buf + ((int)(floor(thePandalog->chunk.size * SLACK_MULT)))); // now write the entry itself to the buffer. size then entry itself *((uint32_t *) thePandalog->chunk.buf_p) = n; thePandalog->chunk.buf_p += sizeof(uint32_t); // and then the entry itself (packed) panda__log_entry__pack(entry, thePandalog->chunk.buf_p); thePandalog->chunk.buf_p += n; // remember instr for last entry instr_last_entry = entry->instr; thePandalog->chunk.ind_entry ++; }
void write_dynval_buffer(DynValBuffer *dynval_buf, DynValEntry *entry){ if (tubtf_on) { // XXX Fixme: note that when using tubt format, we still create that DynValBuffer. Waste of memory uint64_t cr3, pc, typ; uint64_t arg1, arg2, arg3, arg4; arg1 = arg2 = arg3 = arg4 = 0; assert (tubtf->colw == TUBTF_COLW_64); uint32_t element_size = tubtf_element_size(); // assert that there must be enough room in dynval buffer uint32_t bytes_used = dynval_buf->ptr - dynval_buf->start; uint32_t bytes_left = dynval_buf->max_size - bytes_used; assert (bytes_left > element_size); cr3 = panda_current_asid(env); // virtual address space -- cr3 for x86 pc = panda_current_pc(env); typ = 0; switch (entry->entrytype) { case ADDRENTRY: { LogOp op = entry->entry.memaccess.op; assert (op == LOAD ||op == STORE); Addr *a = &(entry->entry.memaccess.addr); typ = TUBTFE_LLVM_DV_LOAD; if (op == STORE) { typ = TUBTFE_LLVM_DV_STORE; } // a->type fits easily in a byte -- 1 .. 5 arg1 = (a->typ) | ((a->flag & 0xff) << 8) | (a->off << 16); uint64_t val; switch (a->typ) { case HADDR: val = a->val.ha; break; case MADDR: val = a->val.ma; break; case IADDR: val = a->val.ia; break; case LADDR: val = a->val.la; break; case GREG: val = a->val.gr; break; case GSPEC: val = a->val.gs; break; case UNK: val = a->val.ua; break; case CONST: val = a->val.con; break; case RET: val = a->val.ret; break; default: assert (1==0); } arg2 = val; break; } case PADDRENTRY: { LogOp op = entry->entry.portaccess.op; assert (op == PLOAD ||op == PSTORE); Addr *a = &(entry->entry.portaccess.addr); typ = TUBTFE_LLVM_DV_LOAD; if (op == PSTORE) { typ = TUBTFE_LLVM_DV_STORE; } // a->type fits easily in a byte -- 1 .. 5 arg1 = (a->typ) | ((a->flag & 0xff) << 8) | (a->off << 16); uint64_t val; switch (a->typ) { case PADDR: val = a->val.pa; break; default: assert (1==0); } arg2 = val; break; } case BRANCHENTRY: { typ = TUBTFE_LLVM_DV_BRANCH; arg1 = entry->entry.branch.br; break; } case SELECTENTRY: { typ = TUBTFE_LLVM_DV_SELECT; arg1 = entry->entry.select.sel; break; } case SWITCHENTRY: { typ = TUBTFE_LLVM_DV_SWITCH; arg1 = entry->entry.switchstmt.cond; break; } case EXCEPTIONENTRY: { typ = TUBTFE_LLVM_EXCEPTION; } } tubtf_write_el_64(cr3, pc, typ, arg1, arg2, arg3, arg4); } else { uint32_t bytes_used = dynval_buf->ptr - dynval_buf->start; assert(dynval_buf->max_size - bytes_used >= sizeof(DynValEntry)); memcpy(dynval_buf->ptr, entry, sizeof(DynValEntry)); dynval_buf->ptr += sizeof(DynValEntry); dynval_buf->cur_size = dynval_buf->ptr - dynval_buf->start; } }
// Support all features of label and query program void i386_hypercall_callback(CPUState *env){ #if 0 if (EAX == 0xabcd) { printf ("\n hypercall pc=0x%x\n", (int) panda_current_pc(env)); for (uint32_t i=0; i<8; i++) { printf ("reg[%d] = 0x%x\n", i, (int) env->regs[i]); } } #endif //printf("taint2: Hypercall! B " TARGET_FMT_lx " C " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", // env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX]); #if 0 // Label op. // EBX contains addr of that data // ECX contains size of data // EDX contains the label; ~0UL for autoenc. if ((env->regs[R_EAX] == 7 || env->regs[R_EAX] == 8)) { printf ("hypercall -- EAX=0x%x\n", EAX); target_ulong addr = panda_virt_to_phys(env, env->regs[R_EBX]); target_ulong size = env->regs[R_ECX]; target_ulong label = env->regs[R_EDX]; if (!taintEnabled){ printf("taint2: Label operation detected @ %lu\n", rr_get_guest_instr_count()); printf("taint2: Labeling " TARGET_FMT_lx " to " TARGET_FMT_lx " with label " TARGET_FMT_lx ".\n", addr, addr + size, label); __taint2_enable_taint(); } LabelSetP ls = NULL; if (label != (target_ulong)~0UL) { ls = label_set_singleton(label); } // otherwise autoinc. qemu_log_mask(CPU_LOG_TAINT_OPS, "label: %lx[%lx+%lx] <- %lx (%lx)\n", (uint64_t)shadow->ram, (uint64_t)addr, (uint64_t)size, (uint64_t)label, (uint64_t)ls); for (unsigned i = 0; i < size; i++) { //printf("label %u\n", i); shadow->ram->set(addr + i, label_set_singleton(i)); } } #endif if (pandalog && env->regs[R_EAX] == 0xabcd) { // LAVA Hypercall target_ulong addr = panda_virt_to_phys(env, ECX); if ((int)addr == -1) { printf ("panda hypercall with ptr to invalid PandaHypercallStruct: vaddr=0x%x paddr=0x%x\n", (uint32_t) ECX, (uint32_t) addr); } else { PandaHypercallStruct phs; panda_virtual_memory_rw(env, ECX, (uint8_t *) &phs, sizeof(phs), false); if (phs.action == 11) { // it's a lava query lava_taint_query(phs); } if (phs.action == 12) { // it's an attack point sighting lava_attack_point(phs); } } } }