bool analyzer_t::init_file_reader(const std::string &trace_path, int verbosity_in) { verbosity = verbosity_in; if (trace_path.empty()) { ERRMSG("Trace file name is empty\n"); return false; } for (int i = 0; i < num_tools; ++i) { if (parallel && !tools[i]->parallel_shard_supported()) { parallel = false; break; } } if (parallel && directory_iterator_t::is_directory(trace_path)) { directory_iterator_t end; directory_iterator_t iter(trace_path); if (!iter) { ERRMSG("Failed to list directory %s: %s", trace_path.c_str(), iter.error_string().c_str()); return false; } for (; iter != end; ++iter) { const std::string fname = *iter; if (fname == "." || fname == "..") continue; const std::string path = trace_path + DIRSEP + fname; std::unique_ptr<reader_t> reader = get_reader(path, verbosity); if (!reader) { return false; } thread_data.push_back(analyzer_shard_data_t( static_cast<int>(thread_data.size()), std::move(reader), path)); VPRINT(this, 2, "Opened reader for %s\n", path.c_str()); } // Like raw2trace, we use a simple round-robin static work assigment. This // could be improved later with dynamic work queue for better load balancing. if (worker_count <= 0) worker_count = std::thread::hardware_concurrency(); worker_tasks.resize(worker_count); int worker = 0; for (size_t i = 0; i < thread_data.size(); ++i) { VPRINT(this, 2, "Worker %d assigned trace shard %zd\n", worker, i); worker_tasks[worker].push_back(&thread_data[i]); thread_data[i].worker = worker; worker = (worker + 1) % worker_count; } } else { parallel = false; serial_trace_iter = get_reader(trace_path, verbosity); if (!serial_trace_iter) { return false; } VPRINT(this, 2, "Opened serial reader for %s\n", trace_path.c_str()); } // It's ok if trace_end is a different type from serial_trace_iter, they // will still compare true if both at EOF. trace_end = std::unique_ptr<default_file_reader_t>(new default_file_reader_t()); return true; }
unsigned int arpt_do_table(struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, struct xt_table *table) { static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; struct arpt_entry *e, *back; const char *indev, *outdev; void *table_base; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; local_bh_disable(); get_reader(&(table->private_lock)); addend = xt_write_recseq_begin(); private = table->private;
void cilk_read_reducer(void *reducer, void *rip, const char *function, int line) { viewread_stack_t *stack = &ctx_stack; reader_t *last_reader = get_reader((reducer_t)reducer, &memory); DisjointSet_t *rep = DisjointSet_find_set(last_reader->node); if (P == rep->type || last_reader->spawns != stack->bot->ancestor_spawns + stack->bot->local_spawns) { fprintf(stderr, "View-read race detected between %p and %p (%s:%d)\n", (void*)(last_reader->reader), rip, function, line); } else { if (P != rep->type) { /* fprintf(stderr, "Safe read; rep %p\n", rep); */ assert(stack->bot->ancestor_spawns + stack->bot->local_spawns == last_reader->spawns); } else { /* fprintf(stderr, "spawn counts are both %lx\n", */ /* stack->bot->ancestor_spawns + stack->bot->local_spawns); */ } } update_shadowmem(&memory, (reducer_t)reducer, (uintptr_t)rip, stack->bot->ancestor_spawns + stack->bot->local_spawns, stack->bot->ss_bag); }