static int run_worker(coreid_t mycore) { errval_t err; trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_WAIT, 0); err = ns_barrier_worker((int)mycore, "mem_bench_ready"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "barrier_worker failed"); } trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_RUN, 0); run_benchmark(mycore, MAX_REQUESTS); trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_WAIT, 0); err = ns_barrier_worker((int)mycore, "mem_bench_finished"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "barrier_worker failed"); } trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_DONE, 0); return EXIT_SUCCESS; }
int main(int argc, char *argv[]) { struct adafs_touch_state ts[2] = { ADAFS_TOUCH_STATE_INIT(LEN_BITS), ADAFS_TOUCH_STATE_INIT(LEN_BITS) }; enum state s; double log_int; double timeout; if (argc != 4) { fprintf(stderr, "Usage: %s EventLog MultiThreshold IntervalThreshold\n", argv[0]); return -1; } freopen(argv[1], "r", stdin); THR_M = atof(argv[2]); THR_INT = atof(argv[3]); s = ST_CON; timeout = THR_INT; while (scanf("%lf", &log_int) == 1) { while (log_int > timeout) { log_int -= timeout; trace_event(EV_TIMER, log_int + timeout, s); timeout = transfer(ts, &s, EV_TIMER, log_int + timeout); trace_state(s, timeout); } trace_event(EV_USER, log_int, s); timeout = transfer(ts, &s, EV_USER, log_int); trace_state(s, timeout); } printf("%s:\t%d\t%d\t%f\t%f\n", argv[1], num_conflicts, num_pred, total_len, total_len/num_pred); return 0; }
/** Called when a worker spawns its first task to set its bot value so other * workers can steal tasks from it. */ static inline void set_bot(struct generic_task_desc * val) { trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING, 0); struct worker_desc * tls = (struct worker_desc *) thread_get_tls(); LOCK(tls->lock); trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING_END, 0); tls->bot = val; UNLOCK(tls->lock); }
/** Initializes _tweed_top_ to start of this worker's task block */ struct generic_task_desc * set_top(void) { trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING, 0); struct worker_desc * tls = (struct worker_desc *) thread_get_tls(); LOCK(tls->lock); trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING_END, 0); tls->bot = workers[tls->id].task_desc_stack; UNLOCK(tls->lock); return workers[tls->id].task_desc_stack; }
// This function is called whenever new data arrives for client void handle_data_arrived(char *payload, size_t data_len) { volatile uint8_t *b =(uint8_t *)payload; if (read_incoming) { for (int i = 0; i< data_len; i++) { acc += (b[i]); } } if (is_server) { return; } #if TRACE_ONLY_LLNET trace_event(TRACE_SUBSYS_LLNET, TRACE_EVENT_LLNET_APPRX, 0); #endif // TRACE_ONLY_LLNET // record completion time cycles_t tsc = rdtsc(); cycles_t result[1] = { tsc - sent_at, }; if (bench_ctl_add_run(bench_ctl, result)) { uint64_t tscperus = tscperms / 1000; printf("cycles per us %"PRIu64"\n", tscperus); // Output our results bench_ctl_dump_csv_bincounting(bench_ctl, 0, 100, 9 * tscperus, 25 * tscperus, out_prefix, tscperus); bench_ctl_dump_analysis(bench_ctl, 0, out_prefix, tscperus); // bench_ctl_dump_csv(bench_ctl, out_prefix, tscperus); #if TRACE_ONLY_LLNET trace_event(TRACE_SUBSYS_LLNET, TRACE_EVENT_LLNET_STOP, 0); size_t trsz = trace_dump(trbuf, sizeof(trbuf) - 1, NULL); trbuf[trsz] = 0; printf("\n\n\n\nTrace results:\n%s\n\n\n", trbuf); #endif // TRACE_ONLY_LLNET bench_ctl_destroy(bench_ctl); terminate_benchmark(); printf("pkt content some is %zd\n", acc); return; } start_next_iteration(); } // end function: handle_data_arrived
static int demo(int argc, char *argv[]) { int core; int pixwidth = PIXEL_WIDTH; int frames = FRAMES; if (!pixels_inited) pixels_init(); if (argc == 3) { pixwidth = atoi(argv[1]); frames = atoi(argv[2]); } int width = 8 * strlen(scroller); for (int x = 0; x < width - RENDER_WIDTH; x++) { // Repeat each frame a few times to slow down scrolling! for (int f = 0; f < frames; f++) { trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1); for(int i = 0; i < RENDER_WIDTH; i++) { int xpos = (x + i)%width; char ascii = scroller[xpos >> 3]; char c64char = c64map(ascii); int xsub = xpos & 7; acks = 0; for (core = 0 ;core < 8; core++) { unsigned char bits = font[c64char*8 + (7-core)]; if (bits & (1<<(7-xsub)) ) { my_pixels_bindings[core+2].tx_vtbl.display(&my_pixels_bindings[core+2], NOP_CONT, pixwidth); acks++; } } uint64_t now = rdtsc(); while (acks) { messages_wait_and_handle_next(); } while (rdtsc() - now < pixwidth) ; } trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0); } } return 0; }
int main(int argc, char *argv[]) { errval_t err; if (argc != 2) { printf("Usage %s: <Num additional threads>\n", argv[0]); exit(-1); } //printf("main running on %d\n", disp_get_core_id()); int cores = strtol(argv[1], NULL, 10) + 1; NPROC = cores -1; BARINIT(barrier, NPROC); uint64_t before = rdtsc(); times[0] = before; trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1); for (int i = 1; i < cores; i++) { err = domain_new_dispatcher(i + disp_get_core_id(), domain_spanned_callback, (void*)(uintptr_t)i); if (err_is_fail(err)) { USER_PANIC_ERR(err, "domain_new_dispatcher failed"); } } while (ndispatchers < cores) { thread_yield(); } uint64_t finish = rdtsc(); trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0); //sys_print("\nDone\n", 6); printf("spantest: Done in %"PRIu64" cycles\n", finish-before); //trace_dump(); for(int i = 1; i < cores; i++) { err = domain_thread_create_on(i, remote, NULL); assert(err_is_ok(err)); } messages_handler_loop(); return 0; }
int main(int argc, char *argv[]) { volatile uint64_t workcnt = 0; int nthreads; debug_printf("bomptest started.\n"); bench_init(); #if CONFIG_TRACE errval_t err = trace_control(TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0), 0); assert(err_is_ok(err)); #endif if(argc == 2) { nthreads = atoi(argv[1]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(NULL); omp_set_num_threads(nthreads); } else { assert(!"Specify number of threads"); } trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0); uint64_t start = bench_tsc(); #pragma omp parallel while(rdtsc() < start + 805000000ULL) { workcnt++; } uint64_t end = bench_tsc(); trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0); printf("done. time taken: %" PRIu64 " cycles.\n", end - start); #if CONFIG_TRACE char *buf = malloc(4096*4096); trace_dump(buf, 4096*4096, NULL); printf("%s\n", buf); #endif for(;;); return 0; }
static void run_benchmark(coreid_t core, int requests) { errval_t err; struct capref ramcap; int i = -1; int bits = MEM_BITS; debug_printf("starting benchmark. allocating mem of size: %d\n", bits); //debug_printf("starting benchmark. allocating mem of size: %d to %d\n", // MINSIZEBITS, MINSIZEBITS+requests-1); sleep_init(); do { i++; // bits = MINSIZEBITS+i; trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_ALLOC, i); err = ram_alloc(&ramcap, bits); // milli_sleep(1); /* if ((i % 500 == 0) && (i > 0)) { debug_printf("allocated %d caps\n", i); } */ } while (err_is_ok(err)); // && (i < requests)); debug_printf("done benchmark. allocated %d caps (%lu bytes)\n", i, i * (1UL << bits)); }
struct sysret sys_dispatcher_properties(struct capability *to, enum task_type type, unsigned long deadline, unsigned long wcet, unsigned long period, unsigned long release, unsigned short weight) { assert(to->type == ObjType_Dispatcher); #ifdef CONFIG_SCHEDULER_RBED struct dcb *dcb = to->u.dispatcher.dcb; assert(type >= TASK_TYPE_BEST_EFFORT && type <= TASK_TYPE_HARD_REALTIME); assert(wcet <= deadline); assert(wcet <= period); assert(type != TASK_TYPE_BEST_EFFORT || weight > 0); trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE, 152); scheduler_remove(dcb); /* Set task properties */ dcb->type = type; dcb->deadline = deadline; dcb->wcet = wcet; dcb->period = period; dcb->release_time = (release == 0) ? kernel_now : release; dcb->weight = weight; make_runnable(dcb); #endif return SYSRET(SYS_ERR_OK); }
NEOERR* member_new_data_add(CGI *cgi, HASH *dbh, HASH *evth, session_t *ses) { mevent_t *evt = hash_lookup(evth, "member"); char *mnick, *mname, *mid; MCS_NOT_NULLB(cgi->hdf, evt); HDF_GET_STR(cgi->hdf, PRE_QUERY".mnick", mnick); HDF_GET_STR(cgi->hdf, PRE_QUERY".mname", mname); LEGAL_CHECK_NICK(mnick); LEGAL_CHECK_NAME(mname); hdf_copy(evt->hdfsnd, NULL, hdf_get_obj(cgi->hdf, PRE_QUERY)); MEVENT_TRIGGER(evt, mname, REQ_CMD_MEMBER_ADD, FLAGS_SYNC); mid = hdf_get_value(evt->hdfrcv, "mid", NULL); member_after_login(cgi, evth, mname, mnick, mid); char *s; HDF_FETCH_STR(cgi->hdf, PRE_QUERY".mnick", s); hdf_set_value(cgi->hdf, PRE_RESERVE".event.es_one", s); HDF_FETCH_STR(cgi->hdf, PRE_QUERY".mname", s); hdf_set_value(cgi->hdf, PRE_RESERVE".event.es_two", s); HDF *node = hdf_get_obj(cgi->hdf, PRE_RESERVE".event"); return nerr_pass(trace_event(node, evth, ses, TRACE_TYPE_MEMBER_REG)); }
static int process_event(event_t *event, unsigned long offset, unsigned long head) { trace_event(event); switch (event->header.type) { case PERF_RECORD_MMAP ... PERF_RECORD_LOST: return 0; case PERF_RECORD_COMM: return process_comm_event(event, offset, head); case PERF_RECORD_EXIT ... PERF_RECORD_READ: return 0; case PERF_RECORD_SAMPLE: return process_sample_event(event, offset, head); case PERF_RECORD_MAX: default: return -1; } return 0; }
/** Handle stolen task */ int handle_stolen_task(struct generic_task_desc * _tweed_top_) { trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_WAIT, GET_THIEF(_tweed_top_)->core_id); while ((_tweed_top_->balarm & TWEED_TASK_COMPLETE) == 0) { if (!waiting(_tweed_top_)) { thread_yield(); } } trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_WAIT_END, GET_THIEF(_tweed_top_)->core_id); ; // update bot set_bot(_tweed_top_); return 0; }
static void fsb_init_msg(struct bench_binding *b, coreid_t id) { errval_t err; // change waitset of the binding waitset_init(&signal_waitset); err = b->change_waitset(b, &signal_waitset); assert(err_is_ok(err)); binding = b; reply_received = true; #if CONFIG_TRACE // configure tracing err = trace_control(TRACE_EVENT(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_STOP, 0), 0); if(err_is_fail(err)) { USER_PANIC_ERR(err, "trace_control failed"); } #endif // start tracing err = trace_event(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_BENCH_START, 0); if (err_is_fail(err)) { USER_PANIC_ERR(err, "trace_event failed"); } experiment(); }
static void handle_cache_load_done(void) { if (!are_all_pages_loaded()) { return; } DEBUGPRINT("initial_cache_load: entire cache loaded done\n"); cache_loading_phase = false; /* FIXME: stop the trace. */ #if ENABLE_WEB_TRACING trace_event(TRACE_SUBSYS_NET, TRACE_EVENT_NET_STOP, 0); char *buf = malloc(4096*4096); trace_dump(buf, 4096*4096, NULL); printf("%s\n", buf); #endif // ENABLE_WEB_TRACING // lwip_benchmark_control(1, BMS_STOP_REQUEST, 0, 0); // Report the cache loading time printf("Cache loading time %"PU"\n", in_seconds(rdtsc() - last_ts)); // lwip_print_interesting_stats(); /* continue with the web-server initialization. */ init_callback(); /* do remaining initialization! */ }
static void run_benchmark(coreid_t core, int requests) { debug_printf("starting benchmark. mallocing mem of size: %d\n",MALLOC_SIZE); //debug_printf("starting benchmark. allocating mem of size: %d to %d\n", // MINSIZEBITS, MINSIZEBITS+requests-1); int i = -1; char *mem; do { i++; trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_ALLOC, i); mem = malloc(MALLOC_SIZE); if (mem != NULL) { memset(mem, 'a', MALLOC_SIZE); } // milli_sleep(1); if ((i % 500 == 0) && (i > 0)) { debug_printf("performed %d allocs\n", i); } } while ((mem != NULL)); // && (i < requests)); debug_printf("done benchmark. allocated %d memory %d times: total: %lu\n", MALLOC_SIZE, i, (unsigned long)MALLOC_SIZE * i); }
// FIXME: error handling (not asserts) needed in this function static void mem_allocate_handler(struct mem_binding *b, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { struct capref *cap = malloc(sizeof(struct capref)); errval_t err, ret; trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits); /* refill slot allocator if needed */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); assert(err_is_ok(err)); /* refill slab allocator if needed */ while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) { struct capref frame; err = msa.a.alloc(&msa.a, &frame); assert(err_is_ok(err)); err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL); assert(err_is_ok(err)); void *buf; err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map_one_frame failed"); assert(buf); } slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8); } ret = mymm_alloc(cap, bits, minbase, maxlimit); if (err_is_ok(ret)) { mem_avail -= 1UL << bits; } else { // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed", // bits, minbase, maxlimit); *cap = NULL_CAP; } /* Reply */ err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap), ret, *cap); if (err_is_fail(err)) { if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct pending_reply *r = malloc(sizeof(struct pending_reply)); assert(r != NULL); r->b = b; r->err = ret; r->cap = cap; err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r)); assert(err_is_ok(err)); } else { DEBUG_ERR(err, "failed to reply to memory request"); allocate_response_done(cap); } } }
/** Steal work from another worker's task stack */ static int steal(struct generic_task_desc * _tweed_top_, struct worker_desc * victim) { struct generic_task_desc * stolenTask; struct worker_desc * me = (struct worker_desc *) thread_get_tls(); LOCK(victim->lock); stolenTask = victim->bot; // check if there is actually work to steal if (stolenTask != NULL && stolenTask->balarm == TWEED_TASK_NEW) { // try to steal task tweed_task_func_t func = steal_task(stolenTask, me); if (func == NULL) { // we didn't succeed in the steal, back off #ifndef TWEED_USE_CAS stolenTask->balarm = TWEED_TASK_INLINED; stolenTask->thief = NULL; #endif UNLOCK(victim->lock); return 0; // didn't steal anything } else { // we have stolen the task, update bot atomic_inc(&(victim->bot), stolenTask->size); UNLOCK(victim->lock); // and run task trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_STEAL, victim->core_id); func(_tweed_top_, stolenTask); trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_STEAL_END, victim->core_id); // signal task completion stolenTask->balarm |= TWEED_TASK_COMPLETE; return 1; } } else { UNLOCK(victim->lock); return 0; // didn't steal anything } }
/******************************************************* * Create a new traceable event type * Parameters : * pm_event_type, string describing event type * pm_event_desc, string used for standard formatting * pm_format_type, type of formatting used to log event * data * pm_format_data, data specific to format * pm_owner_pid, PID of event's owner (0 if none) * Return values : * New Event ID if all is OK * -ENOMEM, Unable to allocate new event *******************************************************/ int _trace_create_event(char* pm_event_type, char* pm_event_desc, int pm_format_type, char* pm_format_data, pid_t pm_owner_pid) { struct custom_event_desc* p_new_event; /* Newly created event */ /* Create event */ if((p_new_event = (struct custom_event_desc*) kmalloc(sizeof(struct custom_event_desc), GFP_ATOMIC)) == NULL) return -ENOMEM; /* Initialize event properties */ p_new_event->event.type[0] = '\0'; p_new_event->event.desc[0] = '\0'; p_new_event->event.form[0] = '\0'; /* Set basic event properties */ if(pm_event_type != NULL) strncpy(p_new_event->event.type, pm_event_type, CUSTOM_EVENT_TYPE_STR_LEN); if(pm_event_desc != NULL) strncpy(p_new_event->event.desc, pm_event_desc, CUSTOM_EVENT_DESC_STR_LEN); if(pm_format_data != NULL) strncpy(p_new_event->event.form, pm_format_data, CUSTOM_EVENT_FORM_STR_LEN); /* Ensure that strings are bound */ p_new_event->event.type[CUSTOM_EVENT_TYPE_STR_LEN - 1] = '\0'; p_new_event->event.desc[CUSTOM_EVENT_DESC_STR_LEN - 1] = '\0'; p_new_event->event.form[CUSTOM_EVENT_FORM_STR_LEN - 1] = '\0'; /* Set format type */ p_new_event->event.format_type = pm_format_type; /* Give the new event a unique event ID */ p_new_event->event.id = next_event_id; next_event_id++; /* Set event's owner */ p_new_event->owner_pid = pm_owner_pid; /* Insert new event in event list */ write_lock(&custom_list_lock); p_new_event->next = custom_events; p_new_event->prev = custom_events->prev; custom_events->prev->next = p_new_event; custom_events->prev = p_new_event; write_unlock(&custom_list_lock); /* Log the event creation event */ trace_event(TRACE_EV_NEW_EVENT, &(p_new_event->event)); /* Return new event ID */ return p_new_event->event.id; }
static void percore_steal_handler(struct mem_thc_service_binding_t *sv, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { errval_t ret; struct capref cap; ret = percore_steal_handler_common(bits, minbase, maxlimit, &cap); sv->send.steal(sv, ret, cap); trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0); }
/******************************************************* * Trace a formatted event * Parameters : * pm_event_id, the event Id provided upon creation * ..., printf-like data that will be used to fill the * event string. * Return values : * 0, all is OK * -ENOMEDIUM, there isn't a registered tracer or this * event doesn't exist. * -EBUSY, tracing hasn't started yet *******************************************************/ int trace_std_formatted_event(int pm_event_id, ...) { int l_string_size; /* Size of the string outputed by vsprintf() */ char l_string[CUSTOM_EVENT_FINAL_STR_LEN]; /* Final formatted string */ va_list l_var_arg_list; /* Variable argument list */ trace_custom l_custom; /* Custom event */ struct custom_event_desc* p_event_desc; /* Generic event description pointer */ /* Lock the table for reading */ read_lock(&custom_list_lock); /* Go through the event description list */ for(p_event_desc = custom_events->next; p_event_desc != custom_events; p_event_desc = p_event_desc->next) if(p_event_desc->event.id == pm_event_id) break; /* If we haven't found anything */ if(p_event_desc == custom_events) { /* Unlock the table for reading */ read_unlock(&custom_list_lock); /* No such thing */ return -ENOMEDIUM; } /* Set custom event Id */ l_custom.id = pm_event_id; /* Initialize variable argument list access */ va_start(l_var_arg_list, pm_event_id); /* Print the description out to the temporary buffer */ l_string_size = vsprintf(l_string, p_event_desc->event.desc, l_var_arg_list); /* Unlock the table for reading */ read_unlock(&custom_list_lock); /* Facilitate return to caller */ va_end(l_var_arg_list); /* Set the size of the event */ l_custom.data_size = (uint32_t) (l_string_size + 1); /* Set the pointer to the event data */ l_custom.data = l_string; /* Log the custom event */ return trace_event(TRACE_EV_CUSTOM, &l_custom); }
void qd_interrupt(bool is_rx, bool is_tx) { size_t count; #if TRACE_ETHERSRV_MODE trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_NI_I, 0); #endif // TRACE_ETHERSRV_MODE if (is_rx) { count = check_for_new_packets(0); if (count == 0) { //printf("No RX\n"); } } check_for_free_txbufs(); }
// called when a message is received static inline void message_received(void) { errval_t err; // save timestamp timestamps[i].time1 = bench_tsc(); // trace receive event err = trace_event(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_MESSAGE_RECEIVE, 0); if (err_is_fail(err)) { USER_PANIC_ERR(err, "trace_event failed"); } reply_received = true; }
static void fsb_buffer_request(struct bench_binding *b, uint8_t *payload, size_t size) { errval_t err; err = trace_event(TRACE_SUBSYS_MULTIHOP, TRACE_EVENT_MULTIHOP_MESSAGE_RECEIVE, 0); if (err_is_fail(err)) { USER_PANIC_ERR(err, "trace_event failed"); } err = b->tx_vtbl.fsb_buffer_reply(b, MKCONT(continue_signal, NULL), payload, size); if (err_is_fail(err)) { USER_PANIC_ERR(err, "error while sending reply message in client\n"); } free(payload); }
/******************************************************* * Relog the declarations of custom events. This is * necessary to make sure that even though the event * creation might not have taken place during a trace, * that all custom events be part of all traces. Hence, * if a custom event occurs during a trace, we can be * sure that it's definition is part of the trace. * Parameters : * NONE * Return values : * NONE *******************************************************/ void trace_reregister_custom_events(void) { struct custom_event_desc* p_event_desc; /* Generic event description pointer */ /* Lock the table for reading */ read_lock(&custom_list_lock); /* Go through the event description list */ for(p_event_desc = custom_events->next; p_event_desc != custom_events; p_event_desc = p_event_desc->next) /* Log the event creation event */ trace_event(TRACE_EV_NEW_EVENT, &(p_event_desc->event)); /* Unlock the table for reading */ read_unlock(&custom_list_lock); }
static void percore_allocate_handler(struct mem_thc_service_binding_t *sv, uint8_t bits, genpaddr_t minbase, genpaddr_t maxlimit) { errval_t ret; struct capref cap; ret = percore_allocate_handler_common(bits, minbase, maxlimit, &cap); sv->send.allocate(sv, ret, cap); if(!capref_is_null(cap)) { ret = cap_delete(cap); if(err_is_fail(ret)) { DEBUG_ERR(err, "cap_delete after send. This memory will leak."); } } trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0); }
// Informs the benchmarking code about initialization of connection void handle_connection_opened(void) { printf("Benchmark connection opened\n"); #if TRACE_ONLY_LLNET errval_t err; err = trace_control(TRACE_EVENT(TRACE_SUBSYS_LLNET, TRACE_EVENT_LLNET_START, 0), TRACE_EVENT(TRACE_SUBSYS_LLNET, TRACE_EVENT_LLNET_STOP, 0), 0); assert(err_is_ok(err)); trace_event(TRACE_SUBSYS_LLNET, TRACE_EVENT_LLNET_START, 0); #endif // TRACE_ONLY_LLNET started_at = rdtsc(); start_next_iteration(); }
/* * This function transmits a packet via the network. * * It has to make sure to properly mark the last packet of an array of * packets, so we can properly collect fully transmitted packets * asynchronously later on. Only the last packet is marked with a * write-back indication and we scan for that to know the packet was * written out. */ static errval_t transmit_pbuf_list_fn(struct driver_buffer *buffers, size_t count) { size_t i; size_t totallen = 0; size_t start = 0; DEBUG("Add buffer callback %d:\n", count); // TODO: Make sure there is room in TX queue for (i = 0; i < count; i++) { totallen += buffers[i].len; } // Prepare checksum offload if (buf_use_ipxsm(buffers)) { e10k_q_l4_type_t l4t = 0; uint8_t l4len = 0; if (buf_use_tcpxsm(buffers)) { l4t = e10k_q_tcp; l4len = buf_tcphdrlen(buffers); } else if (buf_use_udpxsm(buffers)) { l4t = e10k_q_udp; l4len = UDPHDR_LEN; } e10k_queue_add_txcontext(q, 0, ETHHDR_LEN, IPHDR_LEN, l4len, l4t); e10k_queue_add_txbuf_ctx(q, buffers[0].pa, buffers[0].len, buffers[0].opaque, 1, (count == 1), totallen, 0, true, l4len != 0); start++; } for (i = start; i < count; i++) { e10k_queue_add_txbuf(q, buffers[i].pa, buffers[i].len, buffers[i].opaque, (i == 0), (i == count - 1), totallen); } e10k_queue_bump_txtail(q); #if TRACE_ETHERSRV_MODE trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_DRV_SEE, 0); #endif // TRACE_ETHERSRV_MODE return SYS_ERR_OK; }
// send one message to server static void start_next_iteration(void) { int ret; #if TRACE_ONLY_LLNET trace_event(TRACE_SUBSYS_LLNET, TRACE_EVENT_LLNET_APPTX, 0); #endif // TRACE_ONLY_LLNET sent_at = rdtsc(); if (use_udp) { // send UDP datagram ret = send_udp_message_client(); assert(ret == 0); } else { // send TCP msg ret = send_message_client(data_to_send, payload_size); assert(ret == 0); } } // end function: start_next_iteration
static bool handle_free_tx_slot_fn(void) { void *op; if (e10k_queue_get_txbuf(q, &op) != 0) { return false; } DEBUG("handle_free_tx_slot_fn: Packet %p done\n", op); stats_dump(); #if TRACE_ETHERSRV_MODE trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_DRVTXDONE, 0); #endif // TRACE_ETHERSRV_MODE handle_tx_done(op); return true; }