void nested() { /* local variable definition */ sysClkRateSet(1000); sysTimestampEnable(); jiffies_per_tick = sysTimestampPeriod(); clock_frequency = sysTimestampFreq(); microseconds_per_tick = (jiffies_per_tick / clock_frequency)*1000000.0; microseconds_per_jiffy = microseconds_per_tick / jiffies_per_tick; int i, j, p; start_profiling(); for(i=2; i<100; i++) { start_profiling_in(); for(j=2; j <= (i/j); j++) if(!(i%j)) break; /* if factor found, not prime*/ stop_profiling_in(); if(j > (i/j)) p = i; } stop_profiling(); output_profiling("nested for outer loop including inner loop profiling"); start_profiling(); for(i=2; i<100; i++) { for(j=2; j <= (i/j); j++) if(!(i%j)) break; /* if factor found, not prime*/ if(j > (i/j)) p = i; } stop_profiling(); output_profiling("nested for outer loop without inner loop profiling"); }
// Attempts to attach the xdebug profiler to the current thread. Assumes it // is not already attached. Raises an error on failure. static void attach_xdebug_profiler() { assert(!XDEBUG_GLOBAL(ProfilerAttached)); if (s_profiler_factory->start(ProfilerKind::XDebug, 0, false)) { XDEBUG_GLOBAL(ProfilerAttached) = true; // Enable profiling and tracing if we need to auto profiler = xdebug_profiler(); if (XDebugProfiler::isProfilingNeeded()) { start_profiling(profiler); } if (XDebugProfiler::isTracingNeeded()) { start_tracing(profiler); } profiler->setCollectMemory(XDEBUG_GLOBAL(CollectMemory)); profiler->setCollectTime(XDEBUG_GLOBAL(CollectTime)); profiler->setMaxNestingLevel(XDEBUG_GLOBAL(MaxNestingLevel)); } else { raise_error("Could not start xdebug profiler. Another profiler is " "likely already attached to this thread."); } }
static ssize_t ibs_proc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char c; if (count) { if (get_user(c, buf)) return -EFAULT; if (c == 'b' && !running) { start_profiling(); running = 1; } else if (c == 'e' && running) { enable_carrefour = 1; stop_profiling(); running = 0; } else if (c == 'x' && running) { enable_carrefour = 0; stop_profiling(); #if ADAPTIVE_SAMPLING //printk("[ADAPTIVE] Carrefour disabled, reducing the IBS sampling rate\n"); sampling_rate = sampling_rate_cheap; #endif start_profiling(); } else if (c == 'k' && running) { enable_carrefour = 0; stop_profiling(); running = 0; } #if ENABLE_INTERLEAVING else if (c == 'i') { enable_interleaving = 0; } else if (c == 'I') { enable_interleaving = 1; } #endif else if (c == 'T') { if(count > 1) { /* get buffer size */ char * buf_tmp = kmalloc(count, GFP_KERNEL | __GFP_ZERO); char * index = buf_tmp; char * next_idx; int node = 0; if (copy_from_user(buf_tmp, buf, count)) { return -EFAULT; } // Skip the I index++; for (next_idx = index; next_idx < buf_tmp + count; next_idx++) { if(*next_idx == ',' || next_idx == (buf_tmp + count -1)) { unsigned long value; if(*next_idx == ',') { *next_idx = 0; } if(kstrtol(index, 10, &value) < 0) { printk("Value is %s (%lu)\n", index, value); printk(KERN_WARNING "Strange bug\n"); memset(&nr_accesses_node, 0, sizeof(unsigned long) * MAX_NUMNODES); break; } nr_accesses_node[node++] = value; index = next_idx+1; //printk("Node %d --> %lu\n", node -1, nr_accesses_node[node-1]); } } kfree(buf_tmp); } } #if ENABLE_REPLICATION && REPLICATION_PER_TID else if (c == 'Z') { int pid, enabled; int ret = sscanf(buf, "Z\t%d\t%d\n", &pid, &enabled); if(ret != 2) { printk("Error %s\n", buf); } else { printk("Replication for pid %d => %d\n", pid, enabled); change_replication_state(pid, enabled); if(enabled) { enable_replication = 1; } } } #elif ENABLE_REPLICATION else if (c == 'r') { enable_replication = 0; } else if (c == 'R') { enable_replication = 1; } #endif #if ENABLE_MIGRATION else if (c == 'M') { enable_migration = 1; } else if (c == 'm') { enable_migration = 0; } #endif #if ADAPTIVE_SAMPLING else if (c == 'F') { // Increases the ibs frequency sampling_rate = sampling_rate_accurate; } else if (c == 'f') { // Decreases the ibs frequency sampling_rate = sampling_rate_cheap; } #endif } return count; }
int main(int argc, char *argv[]) { init_sli(); __set_profiling(root); if (argc < 5) errx(1, "not enough arguments"); argv++; argc--; const char *binary = argv[0]; const char *typesdb = argv[1]; const char *callgraph = argv[2]; const char *staticdb = argv[3]; argv += 4; argc -= 4; bool assert_mode = false; bool double_free_mode = false; bool indirect_call_mode = false; if (!strcmp(argv[argc - 1], "assertions")) { assert_mode = true; argc--; } else if (!strcmp(argv[argc - 1], "doublefree")) { double_free_mode = true; argc--; } else if (!strcmp(argv[argc - 1], "icall")) { indirect_call_mode = true; argc--; } if (argc > 2) errx(1, "Too many arguments"); VexPtr<Oracle> oracle; { MachineState *ms = MachineState::readELFExec(binary); Thread *thr = ms->findThread(ThreadId(1)); oracle = new Oracle(ms, thr, typesdb); } oracle->loadCallGraph(oracle, callgraph, staticdb, ALLOW_GC); DumpFix df(oracle); LibVEX_gc(ALLOW_GC); int start_percentage; int end_percentage; start_percentage = 0; end_percentage = 100; AllowableOptimisations opt = AllowableOptimisations::defaultOptimisations .enableassumePrivateStack() .setAddressSpace(oracle->ms->addressSpace); if (assert_mode || double_free_mode) opt = opt.enableallPointersGood(); if (double_free_mode) opt = opt.enablefreeMightRace(); if (argc == 1 || argc == 2) { DynAnalysisRip vr; const char *succ; if (parseDynAnalysisRip(&vr, argv[0], &succ)) { int only_store_cfg = -1; int expected_nr_store_cfgs = -1; argc--; argv++; if (argc == 1) { if (sscanf(argv[0], "%d/%d", &only_store_cfg, &expected_nr_store_cfgs) != 2 || only_store_cfg < 0 || expected_nr_store_cfgs <= 0 || only_store_cfg >= expected_nr_store_cfgs) { errx(1, "expected final argument to be <store_cfg>/<nr_store_cfgs>, not %s", argv[0]); } } consider_rip(vr, 1, oracle, df, opt, only_store_cfg, expected_nr_store_cfgs, ALLOW_GC); df.finish(); return 0; } if (argc != 1 || sscanf(argv[0], "%d...%d", &start_percentage, &end_percentage) != 2) errx(1, "expect argument to be either a VexRip or s...d where s and d are start and end percentages, not %s", argv[0]); } std::vector<DynAnalysisRip> schedule; VexPtr<TypesDb::all_instrs_iterator> instrIterator; unsigned long total_instructions; bool use_schedule = false; /* Shut compiler up */ total_instructions = -1; if (getenv("SOS22_MINIMAL_DIRECT_INSTR_SCHEDULE")) { loadSchedule(getenv("SOS22_MINIMAL_DIRECT_INSTR_SCHEDULE"), schedule); use_schedule = true; } else if (assert_mode) { oracle->findAssertions(schedule); use_schedule = true; } else if (double_free_mode) { oracle->findFrees(schedule); use_schedule = true; } else if (indirect_call_mode) { oracle->findIndirectCalls(schedule); use_schedule = true; } else { instrIterator = oracle->type_db->enumerateAllInstructions(); total_instructions = oracle->type_db->nrDistinctInstructions(); } if (use_schedule) total_instructions = schedule.size(); printf("%ld instructions to protect\n", total_instructions); /* There are a couple of important properties here: -- 0...100 must precisely cover the entire range -- a...b and b...c must, between them, cover precisely the same range as a...c i.e. no duplicates or gaps. */ unsigned long start_instr = (total_instructions * start_percentage) / 100; unsigned long end_instr = end_percentage == 100 ? total_instructions - 1: (total_instructions * end_percentage) / 100 - 1; unsigned long instructions_to_process = end_instr - start_instr; printf("Processing instructions %ld to %ld\n", start_instr, end_instr); unsigned long cntr = 0; InstructionConsumer ic(start_instr, instructions_to_process, total_instructions, opt); if (use_schedule) { initialise_profiling(); start_profiling(); for (unsigned long idx = start_instr; idx <= end_instr; idx++) { ic(oracle, df, schedule[idx], cntr); cntr++; } stop_profiling(); dump_profiling_data(); } else { /* Skip the ones we've been told to skip. */ for (unsigned long a = 0; a < start_instr; a++) instrIterator->advance(); while (cntr < instructions_to_process) { assert(!instrIterator->finished()); DynAnalysisRip dar; instrIterator->fetch(&dar); instrIterator->advance(); ic(oracle, df, dar, cntr); cntr++; } } df.finish(); return 0; }