void uninit_plugin(void *self) { /* * XXX: Here, we unload our pass from the PassRegistry. This seems to work * fine, until we reload this plugin again into QEMU and we get an LLVM * assertion saying the pass is already registered. This seems like a bug * with LLVM. Switching between TCG and LLVM works fine when passes aren't * added to LLVM. */ llvm::PassRegistry *pr = llvm::PassRegistry::getPassRegistry(); const llvm::PassInfo *pi = //pr->getPassInfo(&llvm::PandaInstrFunctionPass::ID); pr->getPassInfo(llvm::StringRef("PandaInstr")); if (!pi){ printf("Unable to find 'PandaInstr' pass in pass registry\n"); } else { pr->unregisterPass(*pi); } if (taintfpm) delete taintfpm; // Delete function pass manager and pass if (shadow) tp_free(shadow); panda_disable_llvm(); panda_disable_memcb(); panda_enable_tb_chaining(); }
static inline void test_check_buffer_initialized(void) { struct tp req; tp_init(&req, NULL, 0, tp_realloc, NULL); tp_select(&req, 0, 0, 0, 0); /* could fail on assert */ tp_tuple(&req); tp_sz(&req, "key"); tp_free(&req); }
static inline int test_check_read_reply(int fd) { struct tp rep; tp_init(&rep, NULL, 0, tp_realloc, NULL); while (1) { ssize_t to_read = tp_req(&rep); if (to_read <= 0) break; ssize_t new_size = tp_ensure(&rep, to_read); if (new_size == -1) { // no memory (?) return 1; } ssize_t res = read(fd, rep.p, to_read); if (res == 0) { // eof return 1; } else if (res < 0) { // error return 1; } tp_use(&rep, res); } ssize_t server_code = tp_reply(&rep); if (server_code != 0) { printf("error: %-.*s\n", tp_replyerrorlen(&rep), tp_replyerror(&rep)); tp_free(&rep); return 1; } if (tp_replyop(&rep) == 17) { /* select */ reply_print(&rep); } else if (tp_replyop(&rep) == 13) { /* insert */ } else { return 1; } tp_free(&rep); return 0; }
void uninit_plugin(void *self) { printf ("uninit taint plugin\n"); if (shadow) tp_free(shadow); panda_disable_llvm(); panda_disable_memcb(); panda_enable_tb_chaining(); }
void tp_delete(TP,tp_obj v) { int type = v.type; if (type == TP_LIST) { _tp_list_free(tp, v.list.val); return; } else if (type == TP_DICT) { _tp_dict_free(tp, v.dict.val); return; } else if (type == TP_STRING) { tp_free(tp, v.string.info); return; } else if (type == TP_DATA) { if (v.data.info->free) { v.data.info->free(tp,v); } tp_free(tp, v.data.info); return; } else if (type == TP_FNC) { tp_free(tp, v.fnc.info); return; } tp_raise(,tp_string("(tp_delete) TypeError: ?")); }
void tp_delete(TP,tp_obj v) { int type = v.type; if (type == TP_LIST) { _tp_list_free(v.list.val); return; } else if (type == TP_DICT) { _tp_dict_free(v.dict.val); return; } else if (type == TP_STRING) { tp_free(v.string.info); return; } else if (type == TP_DATA) { if (v.data.meta && v.data.meta->free) { v.data.meta->free(tp,v); } tp_free(v.data.info); return; } else if (type == TP_FNC) { tp_free(v.fnc.val); return; } tp_raise(,"tp_delete(%s)",STR(v)); }
void uninit_plugin(void *self) { printf ("uninit taint plugin\n"); if (tainted_instructions) { for ( auto &kvp : shadow->tpc ) { uint64_t asid = kvp.first; printf ("asid = %lx\n", asid); for ( auto &pc : kvp.second ) { printf ("instr is tainted : asid=0x%lx : pc=0x%lx \n", asid, pc); } } } /* * XXX: Here, we unload our pass from the PassRegistry. This seems to work * fine, until we reload this plugin again into QEMU and we get an LLVM * assertion saying the pass is already registered. This seems like a bug * with LLVM. Switching between TCG and LLVM works fine when passes aren't * added to LLVM. */ llvm::PassRegistry *pr = llvm::PassRegistry::getPassRegistry(); const llvm::PassInfo *pi = //pr->getPassInfo(&llvm::PandaInstrFunctionPass::ID); pr->getPassInfo(llvm::StringRef("PandaInstr")); if (!pi){ printf("Unable to find 'PandaInstr' pass in pass registry\n"); } else { pr->unregisterPass(*pi); } if (taintfpm) delete taintfpm; // Delete function pass manager and pass if (shadow) tp_free(shadow); if (tob_io_thread) tob_delete(tob_io_thread); panda_disable_llvm(); panda_disable_memcb(); panda_enable_tb_chaining(); }
static inline int test_check_read(void) { int fd; struct sockaddr_in tt; if ((fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { printf("Failed to create socket\n"); return 1; } memset(&tt, 0, sizeof(tt)); tt.sin_family = AF_INET; tt.sin_addr.s_addr = inet_addr("127.0.0.1"); tt.sin_port = htons(33013); if (connect(fd, (struct sockaddr *) &tt, sizeof(tt)) < 0) { printf("Failed to connect\n"); return 1; } struct tp req; tp_init(&req, NULL, 0, tp_realloc, NULL); tp_insert(&req, 0, 0); tp_tuple(&req); tp_sz(&req, "_i32"); tp_sz(&req, "0e72ae1a-d0be-4e49-aeb9-aebea074363c"); tp_select(&req, 0, 0, 0, 1); tp_tuple(&req); tp_sz(&req, "_i32"); int rc = write(fd, tp_buf(&req), tp_used(&req)); if (rc != tp_used(&req)) return 1; tp_free(&req); rc = test_check_read_reply(fd); if (rc != 0) return 1; rc = test_check_read_reply(fd); if (rc != 0) return 1; close(fd); return 0; }
threadpool *tp_init(size_t num_ts) { threadpool *pool; if ((pool = (threadpool *) malloc(sizeof(threadpool))) == null) { goto err; } // init the pool pool->num_ts = 0; pool->shutdown = false; pool->started = 0; pool->tasks = h_init(32, tp_taskcomp); pool->ts = malloc(sizeof(pthread_t) * num_ts); // setup mutex and conditional notification if (pthread_mutex_init(&pool->lock, null) != 0 || pthread_cond_init(&pool->notify, null) != 0 || pool->ts == null) { goto err; } // spin up worker threads size_t i; for (i = 0; i < num_ts; i++) { if (pthread_create(&pool->ts[i], null, tp_thread, (void *) pool) != 0) { tp_dest(pool, 0); return null; } pool->num_ts++; pool->started++; } return pool; err: // initialization has failed somewhere, cleanup and return null if (pool) { tp_free(pool); } return null; }
int tp_dest(threadpool *pool, int flags) { int i, err = 0; if (pool == null) { return tp_invalid; } if (pthread_mutex_lock(&pool->lock) != 0) { return tp_lockfail; } // check that we're not shutting down already if (!pool->shutdown) { pool->shutdown = (flags & tpexit_graceful) ? tpsdown_soft : tpsdown_now; // wakeup worker threads if (pthread_cond_broadcast(&pool->notify) != 0 || pthread_mutex_unlock(&pool->lock) != 0) { err = tp_lockfail; } // recall worker threads for (i = 0; i < pool->num_ts; i++) { if (pthread_join(pool->ts[i], null) != 0) { err = tp_threadfail; } } } else { err = tp_shutdown; } // don't free if we hit an error if (!err) { tp_free(pool); } return err; }