/** * Return the measured tsc overhead */ cycles_t bench_tscoverhead(void) { if (!bench_is_initialized) { bench_init(); } return tsc_overhead; }
int main(int argc, char *argv[]) { errval_t err; /* Set my core id */ my_core_id = disp_get_core_id(); strcpy(my_name, argv[0]); printf("entered\n"); bench_init(); printf("bench_init done\n"); if (argc == 1) { /* server */ /* 1. spawn domain, 2. setup a server, 3. wait for client to connect, 4. run experiment */ char *xargv[] = { my_name, "dummy", "dummy", "dummy", NULL }; err = spawn_program(1, my_name, xargv, NULL, SPAWN_FLAGS_DEFAULT, NULL); assert(err_is_ok(err)); /* Setup a server */ err = bench_export(NULL, export_cb, connect_cb, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); assert(err_is_ok(err)); } else { /* Connect to the server */ printf("ns lookup\n"); err = nameservice_blocking_lookup("multihop_server", &iref); if (err_is_fail(err)) { DEBUG_ERR(err, "nameservice_blocking_lookup failed"); abort(); } printf("bench_bind\n"); // bind a first time for signaling err = bench_bind(iref, bind_signal_cb, NULL, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); if (err_is_fail(err)) { DEBUG_ERR(err, "bind failed"); abort(); } } messages_handler_loop(); return 0; }
int main(int argc, gchar **argv) { grn_rc rc; BenchmarkData data; BenchReporter *reporter; gint n = 100; rc = grn_init(); if (rc != GRN_SUCCESS) { g_print("failed to initialize Groonga: <%d>: %s\n", rc, grn_get_global_error_message()); return EXIT_FAILURE; } bench_init(&argc, &argv); data.context = g_new(grn_ctx, 1); data.base_dir = g_build_filename(g_get_tmp_dir(), "groonga-bench", NULL); data.name = "table"; data.name_size = strlen(data.name); data.path = g_build_filename(data.base_dir, "table", NULL); data.flags = DEFAULT_FLAGS; data.key_type = NULL; data.value_size = DEFAULT_VALUE_SIZE; data.encoding = GRN_ENC_DEFAULT; reporter = bench_reporter_new(); bench_reporter_register(reporter, "normal (persistent)", n, bench_setup, bench_normal, bench_teardown, &data); bench_reporter_register(reporter, "factory (persistent)", n, bench_setup, bench_factory, bench_teardown, &data); bench_reporter_register(reporter, "normal (temporary)", n, bench_setup, bench_normal_temporary, bench_teardown, &data); bench_reporter_register(reporter, "factory (temporary)", n, bench_setup, bench_factory_temporary, bench_teardown, &data); bench_reporter_run(reporter); g_object_unref(reporter); bench_utils_remove_path_recursive_force(data.base_dir); g_free(data.path); g_free(data.base_dir); g_free(data.context); bench_quit(); grn_fin(); return EXIT_SUCCESS; }
int main(int argc, char **argv){ int i = 0; unsigned long long start, end, delta; pthread_t tid[MAX_NUM_THREADS]; bench_log("Welcome to %s:\n\n", argv[0]); getopts(argc, argv); bench_init(1 /* start */); g_msgbuf = make_msgbuf(g_msgsize); if ( g_msgsize < 5 ) { bench_log(" Ridiculously short message %ld--exiting\n", g_msgsize); exit(1); } bench_log(" Starting %d thread(s)\n", g_numthreads); start = gettimestamp_milis(); for ( i = 0; i < g_numthreads; i++) pthread_create(&tid[i], NULL, thread_work, (void *)&i); for ( i = 0; i < g_numthreads; i++) pthread_join(tid[i], NULL); end = gettimestamp_milis(); delta = (end - start); #ifdef _WIN32 bench_log( "\n Time = %I64d milisec\n", delta); #else bench_log( "\n Time = %lld milisec\n", delta); #endif bench_init(0); return(0); }
/** * \brief computes the differences of two time stamps with respecting overflow * * This function also accounts for the overhead when taking timestamps * * \param tsc_start timestamp of start * \param tsc_end timestamp of end * * \return elaped cycles */ cycles_t bench_time_diff(cycles_t tsc_start, cycles_t tsc_end) { if (!bench_is_initialized) { bench_init(); } cycles_t result; if (tsc_end < tsc_start) { result = (LONG_MAX - tsc_start) + tsc_end - bench_tscoverhead(); } else { result = (tsc_end - tsc_start - bench_tscoverhead()); } return result; }
int main(int argc, char *argv[]) { bench_init(); int k = 300; while(k--) { uint64_t start = bench_tsc(); for (volatile int i = 0; i < ITERATIONS; i++); uint64_t end = bench_tsc(); printf("%"PRIu64"\n", end - start); } return EXIT_SUCCESS; }
int main(int argc, char *argv[]) { volatile uint64_t workcnt = 0; int nthreads; debug_printf("bomptest started.\n"); bench_init(); #if CONFIG_TRACE errval_t err = trace_control(TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0), 0); assert(err_is_ok(err)); #endif if(argc == 2) { nthreads = atoi(argv[1]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(NULL); omp_set_num_threads(nthreads); } else { assert(!"Specify number of threads"); } trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0); uint64_t start = bench_tsc(); #pragma omp parallel while(rdtsc() < start + 805000000ULL) { workcnt++; } uint64_t end = bench_tsc(); trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0); printf("done. time taken: %" PRIu64 " cycles.\n", end - start); #if CONFIG_TRACE char *buf = malloc(4096*4096); trace_dump(buf, 4096*4096, NULL); printf("%s\n", buf); #endif for(;;); return 0; }
int main(int argc, gchar **argv) { BenchmarkData data; BenchReporter *reporter; gint n = 1000; grn_init(); bench_init(&argc, &argv); data.report_result = g_getenv("GROONGA_BENCH_REPORT_RESULT") != NULL; data.context = g_new(grn_ctx, 1); { const gchar *groonga_bench_n; groonga_bench_n = g_getenv("GROONGA_BENCH_N"); if (groonga_bench_n) { n = atoi(groonga_bench_n); } } reporter = bench_reporter_new(); #define REGISTER(label, setup) \ bench_reporter_register(reporter, label, n, \ bench_setup_ ## setup, \ bench_geo_distance, \ bench_teardown, \ &data) REGISTER("rectangular (WGS84)", rectangular_wgs84); REGISTER("rectangular (TOKYO)", rectangular_tgs); REGISTER("spherical (WGS84)", spherical_wgs84); REGISTER("spherical (TOKYO)", spherical_tgs); REGISTER("hubeny (WGS84)", hubeny_wgs84); REGISTER("hubeny (TOKYO)", hubeny_tgs); #undef REGISTER bench_reporter_run(reporter); g_object_unref(reporter); g_free(data.context); bench_quit(); grn_fin(); return 0; }
int main(int argc, char** argv) { benchmark_type bench; auto init = [&] { pasl::util::cmdline::argmap<std::function<benchmark_type()>> m; m.add("fib", [&] { return fib_bench(); }); m.add("mfib", [&] { return mfib_bench(); }); m.add("map_incr", [&] { return map_incr_bench(); }); m.add("reduce", [&] { return reduce_bench(); }); m.add("scan", [&] { return scan_bench(); }); m.add("mcss", [&] { return mcss_bench(); }); m.add("dmdvmult", [&] { return dmdvmult_bench(); }); m.add("merge", [&] { return merge_bench(); }); m.add("quicksort", [&] { return sort_bench(); }); m.add("mergesort", [&] { return sort_bench(); }); m.add("mergesort_seqmerge", [&] { return sort_bench(); }); m.add("cilksort", [&] { return sort_bench(); }); m.add("graph", [&] { return graph_bench(); }); m.add("duplicate", [&] { return duplicate_bench(); }); m.add("ktimes", [&] { return ktimes_bench(); }); m.add("map_incr_ex", [&] { return map_incr_bench(true); }); m.add("sum_ex", [&] { return reduce_bench(reduce_plus_ex); }); m.add("max_ex", [&] { return reduce_bench(reduce_max_ex); }); m.add("reduce_ex", [&] { return reduce_bench(reduce_ex); }); m.add("duplicate_ex", [&] { return duplicate_bench(true); }); m.add("ktimes_ex", [&] { return ktimes_bench(true); }); m.add("filter_ex", [&] { return filter_bench(); }); m.add("mergesort_ex", [&] { return sort_bench(); }); bench = m.find_by_arg("bench")(); bench_init(bench); }; auto run = [&] (bool) { bench_run(bench); }; auto output = [&] { bench_output(bench); }; auto destroy = [&] { bench_destroy(bench); }; pasl::sched::launch(argc, argv, init, run, output, destroy); }
void ref_init(void) { if (inited++) return; mutex_init(); bench_init(); bench_event_ctor(&dooming, "del doomed objs"); mutex_ctor(&death_row_mutex, "death row"); SLIST_INIT(&death_row); log_category_ref_init(); rwlock_ctor(&rwlock, "doomer"); int err = pthread_create(&doomer_pth, NULL, doomer_thread, NULL); if (err) { SLOG(LOG_ERR, "Cannot pthread_create(): %s", strerror(err)); } }
int main(int argc, char *argv[]) { errval_t err; // initialization vfs_init(); bench_init(); // mount nfs err = vfs_mkdir("/nfs"); assert(err_is_ok(err)); err = vfs_mount("/nfs", "nfs://10.110.4.4/local/nfs"); assert(err_is_ok(err)); // argument processing if (argc == 3) { printf("Started vfs_bench in command-line mode\n"); int32_t chunksize = atol(argv[1]); int32_t repetitions = atol(argv[2]); single_run(chunksize, repetitions); } else { printf("Started vfs_bench.\n"); for (int32_t i = 1; i < 20; i++) { single_run(4096, i * 2000); } } //err = vfs_unmount("/nfs"); // unmount is NYI //assert(err_is_ok(err)); err = vfs_rmdir("/nfs"); assert(err_is_ok(err)); return 0; }
int main(int argc, char** argv) { benchmark_type bench; auto init = [&] { pasl::util::cmdline::argmap<std::function<benchmark_type()>> m; m.add("reduce", [&] { return reduce_bench(); }); m.add("mergesort", [&] { return mergesort_bench(); }); m.add("bfs", [&] { return bfs_bench(); }); bench = m.find_by_arg("bench")(); bench_init(bench); }; auto run = [&] (bool) { bench_run(bench); }; auto output = [&] { bench_output(bench); }; auto destroy = [&] { bench_destroy(bench); }; pasl::sched::launch(argc, argv, init, run, output, destroy); }
int main( int argc, char *argv[]) { int ret = -1; int world_rank = 0; MPI_Comm comm; PVFS_BMI_addr_t *bmi_peer_array; int *mpi_peer_array; int num_clients; struct bench_options opts; struct mem_buffers mpi_send_bufs; struct mem_buffers mpi_recv_bufs; struct mem_buffers bmi_send_bufs; struct mem_buffers bmi_recv_bufs; enum bmi_buffer_type buffer_type = BMI_EXT_ALLOC; double mpi_time, bmi_time; bmi_context_id context; /* start up benchmark environment */ ret = bench_init(&opts, argc, argv, &num_clients, &world_rank, &comm, &bmi_peer_array, &mpi_peer_array, &context); if (ret < 0) { fprintf(stderr, "bench_init() failure.\n"); return (-1); } /* verify that we didn't get any weird parameters */ if (num_clients > 1 || opts.num_servers > 1) { fprintf(stderr, "Too many procs specified.\n"); return (-1); } /* setup MPI buffers */ ret = alloc_buffers(&mpi_send_bufs, ITERATIONS, opts.message_len); ret += alloc_buffers(&mpi_recv_bufs, ITERATIONS, opts.message_len); if (ret < 0) { fprintf(stderr, "alloc_buffers() failure.\n"); return (-1); } /* setup BMI buffers (differs depending on command line args) */ if (opts.flags & BMI_ALLOCATE_MEMORY) { buffer_type = BMI_PRE_ALLOC; ret = BMI_alloc_buffers(&bmi_send_bufs, ITERATIONS, opts.message_len, bmi_peer_array[0], BMI_SEND); ret += BMI_alloc_buffers(&bmi_recv_bufs, ITERATIONS, opts.message_len, bmi_peer_array[0], BMI_RECV); if (ret < 0) { fprintf(stderr, "BMI_alloc_buffers() failure.\n"); return (-1); } } else { buffer_type = BMI_EXT_ALLOC; ret = alloc_buffers(&bmi_send_bufs, ITERATIONS, opts.message_len); ret += alloc_buffers(&bmi_recv_bufs, ITERATIONS, opts.message_len); if (ret < 0) { fprintf(stderr, "alloc_buffers() failure.\n"); return (-1); } } /* mark all send buffers */ ret = mark_buffers(&bmi_send_bufs); ret += mark_buffers(&mpi_send_bufs); if (ret < 0) { fprintf(stderr, "mark_buffers() failure.\n"); return (-1); } /******************************************************************/ /* Actually measure some stuff */ /* BMI series */ if (world_rank == 0) { ret = bmi_server(&opts, &bmi_recv_bufs, &bmi_send_bufs, bmi_peer_array[0], buffer_type, &bmi_time, context); } else { ret = bmi_client(&opts, &bmi_recv_bufs, &bmi_send_bufs, bmi_peer_array[0], buffer_type, &bmi_time, context); } if (ret < 0) { return (-1); } /* MPI series */ if (world_rank == 0) { ret = mpi_server(&opts, &mpi_recv_bufs, &mpi_send_bufs, mpi_peer_array[0], &mpi_time); } else { ret = mpi_client(&opts, &mpi_recv_bufs, &mpi_send_bufs, mpi_peer_array[0], &mpi_time); } if (ret < 0) { return (-1); } /******************************************************************/ #if 0 if (!(opts.flags & REUSE_BUFFERS)) { /* verify received buffers */ ret = check_buffers(&mpi_recv_bufs); if (ret < 0) { fprintf(stderr, "MPI buffer verification failed.\n"); return (-1); } ret = check_buffers(&bmi_recv_bufs); if (ret < 0) { fprintf(stderr, "BMI buffer verification failed.\n"); return (-1); } } #endif /* print out results */ if (world_rank == 0) { bench_args_dump(&opts); printf("number of iterations: %d\n", ITERATIONS); printf ("all times measure round trip in seconds unless otherwise noted\n"); printf("\"ave\" field is computed as (total time)/iterations\n"); } /* enforce output ordering */ fflush(stdout); MPI_Barrier(MPI_COMM_WORLD); if (world_rank != 0) { printf("%d\t%f\t%f\t(size,total,ave)", bmi_recv_bufs.size, bmi_time, (bmi_time / ITERATIONS)); printf(" bmi server\n"); printf("%d\t%f\t%f\t(size,total,ave)", mpi_recv_bufs.size, mpi_time, (mpi_time / ITERATIONS)); printf(" mpi server\n"); } /* enforce output ordering */ fflush(stdout); MPI_Barrier(MPI_COMM_WORLD); if (world_rank == 0) { printf("%d\t%f\t%f\t(size,total,ave)", bmi_recv_bufs.size, bmi_time, (bmi_time / ITERATIONS)); printf(" bmi client\n"); printf("%d\t%f\t%f\t(size,total,ave)", mpi_recv_bufs.size, mpi_time, (mpi_time / ITERATIONS)); printf(" mpi client\n"); } /* free buffers */ free_buffers(&mpi_send_bufs); free_buffers(&mpi_recv_bufs); if (opts.flags & BMI_ALLOCATE_MEMORY) { BMI_free_buffers(&bmi_send_bufs, bmi_peer_array[0], BMI_SEND); BMI_free_buffers(&bmi_recv_bufs, bmi_peer_array[0], BMI_RECV); } else { free_buffers(&bmi_send_bufs); free_buffers(&bmi_recv_bufs); } /* shutdown interfaces */ BMI_close_context(context); BMI_finalize(); MPI_Finalize(); return 0; }
/** * \brief Use cmdline args to figure out which core the monitor is running on * and which cores to boot. */ int main(int argc, char *argv[]) { printf("monitor: invoked as:"); for (int i = 0; i < argc; i++) { printf(" %s", argv[i]); } printf("\n"); errval_t err; /* Initialize the library */ bench_init(); /* Set core id */ err = invoke_kernel_get_core_id(cap_kernel, &my_core_id); assert(err_is_ok(err)); disp_set_core_id(my_core_id); // Setup all channels and channel support code err = monitor_client_setup_monitor(); assert(err_is_ok(err)); if (argc == 2) { /* Bsp monitor */ err = boot_bsp_core(argc, argv); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to boot BSP core"); return EXIT_FAILURE; } } else { /* Non bsp monitor */ err = boot_app_core(argc, argv); if(err_is_fail(err)) { USER_PANIC_ERR(err, "starting app monitor"); return EXIT_FAILURE; } } #if defined(TRACING_EXISTS) && defined(CONFIG_TRACE) err = trace_my_setup(); assert(err_is_ok(err)); trace_reset_buffer(); struct capref tracecap; err = trace_setup_on_core(&tracecap); if (err_is_fail(err)) { if(err_no(err) != TRACE_ERR_NO_BUFFER) { DEBUG_ERR(err, "trace_setup_on_core failed"); printf("Warning: tracing not available on core %d\n", my_core_id); } } else { err = invoke_trace_setup(tracecap); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke_trace_setup failed"); printf("Warning: tracing not available on core %d\n", my_core_id); } } #endif // tracing domain_mgmt_init(); #ifdef MONITOR_HEARTBEAT struct deferred_event ev; mon_heartbeat(&ev); #endif for(;;) { err = event_dispatch(get_default_waitset()); if(err_is_fail(err)) { USER_PANIC_ERR(err, "event_dispatch"); } if(update_ram_alloc_binding) { update_ram_alloc_binding = false; err = ram_alloc_set(NULL); if(err_is_fail(err)) { DEBUG_ERR(err, "ram_alloc_set to local allocator failed. " "Will stick with intermon memory allocation."); } } } }
/** * \brief initializes a DMA client device with the giving capability * * \param info stores information how to find the device driver service * \param dev returns a pointer to the device structure * * \returns SYS_ERR_OK on success * errval on error */ errval_t dma_client_device_init(struct dma_client_info *info, struct dma_client_device **dev) { errval_t err; struct dma_client_device *cdev = calloc(1, sizeof(*cdev)); if (cdev == NULL) { return LIB_ERR_MALLOC_FAIL; } #if DMA_BENCH_ENABLED bench_init(); #endif struct dma_device *dma_dev = (struct dma_device *) cdev; CLIENTDEV_DEBUG("initialzing new client device\n", device_id); iref_t service_iref = 0; switch (info->type) { case DMA_CLIENT_INFO_TYPE_ADDR: assert(!"NYI: lookup based on physical address range"); break; case DMA_CLIENT_INFO_TYPE_IREF: service_iref = info->args.iref; break; case DMA_CLIENT_INFO_TYPE_NAME: CLIENTDEV_DEBUG("looking up iref for name {%s}\n", device_id, info->args.name); err = nameservice_blocking_lookup(info->args.name, &service_iref); if (err_is_fail(err)) { free(cdev); return err; } CLIENTDEV_DEBUG("driver service {%s} @ iref:%"PRIxIREF"\n", device_id, info->args.name, service_iref); break; default: return DMA_ERR_DEVICE_UNSUPPORTED; break; } if (cdev->info.iref == 0) { err = dma_manager_lookup_by_iref(service_iref, &cdev->info); if (err_is_fail(err)) { CLIENTDEV_DEBUG("ERROR: obtaining driver info from DMA manager: %s\n", device_id, err_getstring(err)); free(cdev); return err; } } assert(service_iref != 0); dma_dev->type = DMA_DEV_TYPE_CLIENT; dma_dev->id = device_id++; dma_dev->channels.count = DMA_CLIENT_DEVICE_CONNECTIONS; dma_dev->channels.c = calloc(dma_dev->channels.count, sizeof(*dma_dev->channels.c)); if (dma_dev->channels.c == NULL) { free(cdev); return LIB_ERR_MALLOC_FAIL; } /* channel enumeration */ CLIENTDEV_DEBUG("doing channel enumeration. discovered %u channels\n", cdev->common.id, cdev->common.channels.count); for (uint8_t i = 0; i < dma_dev->channels.count; ++i) { struct dma_channel **chan = &dma_dev->channels.c[i]; err = dma_client_channel_init(cdev, i, service_iref, (struct dma_client_channel **) chan); if (err_is_fail(err)) { free(cdev->common.channels.c); free(cdev); return err; } } dma_dev->f.deregister_memory = dma_client_deregister_memory; dma_dev->f.register_memory = dma_client_register_memory; dma_dev->f.poll = dma_client_device_poll; *dev = cdev; return SYS_ERR_OK; }
/** * \brief initializes the XOMP worker library * * \param wid Xomp worker id * * \returns SYS_ERR_OK on success * errval on failure */ errval_t xomp_worker_init(xomp_wid_t wid) { errval_t err; worker_id = wid; XWI_DEBUG("initializing worker {%016lx} iref:%u\n", worker_id, svc_iref); #if XOMP_BENCH_WORKER_EN bench_init(); #endif struct capref frame = { .cnode = cnode_root, .slot = ROOTCN_SLOT_ARGCN }; struct frame_identity id; err = invoke_frame_identify(frame, &id); if (err_is_fail(err)) { return err_push(err, XOMP_ERR_INVALID_MSG_FRAME); } size_t frame_size = 0; if (svc_iref) { frame_size = XOMP_TLS_SIZE; } else { frame_size = XOMP_FRAME_SIZE; err = spawn_symval_cache_init(0); if (err_is_fail(err)) { return err; } } if ((1UL << id.bits) < XOMP_TLS_SIZE) { return XOMP_ERR_INVALID_MSG_FRAME; } msgframe = frame; err = vspace_map_one_frame(&msgbuf, frame_size, frame, NULL, NULL); if (err_is_fail(err)) { err_push(err, XOMP_ERR_WORKER_INIT_FAILED); } if (svc_iref) { tls = msgbuf; } else { tls = ((uint8_t *) msgbuf) + XOMP_MSG_FRAME_SIZE; } XWI_DEBUG("messaging frame mapped: [%016lx] @ [%016lx]\n", id.base, (lvaddr_t )msgbuf); struct bomp_thread_local_data *tlsinfo = malloc(sizeof(*tlsinfo)); tlsinfo->thr = thread_self(); tlsinfo->work = (struct bomp_work *) tls; tlsinfo->work->data = tlsinfo->work + 1; g_bomp_state->backend.set_tls(tlsinfo); #ifdef __k1om__ if (worker_id & XOMP_WID_GATEWAY_FLAG) { err = xomp_gateway_init(); } else { if (!svc_iref) { err = xomp_gateway_bind_svc(); } else { err = SYS_ERR_OK; } } if (err_is_fail(err)) { return err; } #endif #ifdef __k1om__ if (!svc_iref) { err = xeon_phi_client_init(disp_xeon_phi_id()); if (err_is_fail(err)) { err_push(err, XOMP_ERR_WORKER_INIT_FAILED); } xeon_phi_client_set_callbacks(&callbacks); } #endif struct waitset *ws = get_default_waitset(); // XXX: disabling DMA on the host as there is no replication used at this moment #if XOMP_WORKER_ENABLE_DMA && defined(__k1om__) /* XXX: use lib numa */ #ifndef __k1om__ uint8_t numanode = 0; if (disp_get_core_id() > 20) { numanode = 1; } err = dma_manager_wait_for_driver(dma_device_type, numanode); if (err_is_fail(err)) { USER_PANIC_ERR(err, "could not wait for the DMA driver"); } #endif char svc_name[30]; #ifdef __k1om__ snprintf(svc_name, 30, "%s", XEON_PHI_DMA_SERVICE_NAME); #else snprintf(svc_name, 30, "%s.%u", IOAT_DMA_SERVICE_NAME, numanode); #endif struct dma_client_info dma_info = { .type = DMA_CLIENT_INFO_TYPE_NAME, .device_type = dma_device_type, .args.name = svc_name }; err = dma_client_device_init(&dma_info, &dma_dev); if (err_is_fail(err)) { USER_PANIC_ERR(err, "DMA device initialization"); } #endif if (svc_iref) { err = xomp_bind(svc_iref, master_bind_cb, NULL, ws, IDC_EXPORT_FLAGS_DEFAULT); } else { struct xomp_frameinfo fi = { .sendbase = id.base, .inbuf = ((uint8_t *) msgbuf) + XOMP_MSG_CHAN_SIZE, .inbufsize = XOMP_MSG_CHAN_SIZE, .outbuf = ((uint8_t *) msgbuf), .outbufsize = XOMP_MSG_CHAN_SIZE }; err = xomp_connect(&fi, master_bind_cb, NULL, ws, IDC_EXPORT_FLAGS_DEFAULT); } if (err_is_fail(err)) { /* TODO: Clean up */ return err_push(err, XOMP_ERR_WORKER_INIT_FAILED); } XWI_DEBUG("Waiting until bound to master...\n"); while (!is_bound) { messages_wait_and_handle_next(); } if (xbinding == NULL) { return XOMP_ERR_WORKER_INIT_FAILED; } return SYS_ERR_OK; }
/** * \brief initializes a IOAT DMA device with the giving capability * * \param mmio capability representing the device's MMIO registers * \param dev returns a pointer to the device structure * * \returns SYS_ERR_OK on success * errval on error */ errval_t ioat_dma_device_init(struct capref mmio, struct ioat_dma_device **dev) { errval_t err; struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device)); if (ioat_device == NULL) { return LIB_ERR_MALLOC_FAIL; } #if DMA_BENCH_ENABLED bench_init(); #endif struct dma_device *dma_dev = &ioat_device->common; struct frame_identity mmio_id; err = invoke_frame_identify(mmio, &mmio_id); if (err_is_fail(err)) { free(ioat_device); return err; } dma_dev->id = device_id++; dma_dev->mmio.paddr = mmio_id.base; dma_dev->mmio.bytes = (1UL << mmio_id.bits); dma_dev->mmio.frame = mmio; IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n", dma_dev->id, mmio_id.base, 1 << mmio_id.bits); err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr, dma_dev->mmio.bytes, dma_dev->mmio.frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { free(ioat_device); return err; } ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr); ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device); IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n", dma_dev->id, dma_dev->mmio.vaddr, ioat_dma_cbver_major_extract(ioat_device->version), ioat_dma_cbver_minor_extract(ioat_device->version)); switch (ioat_dma_cbver_major_extract(ioat_device->version)) { case ioat_dma_cbver_1x: err = device_init_ioat_v1(ioat_device); break; case ioat_dma_cbver_2x: err = device_init_ioat_v2(ioat_device); break; case ioat_dma_cbver_3x: err = device_init_ioat_v3(ioat_device); break; default: err = DMA_ERR_DEVICE_UNSUPPORTED; } if (err_is_fail(err)) { vspace_unmap((void*) dma_dev->mmio.vaddr); free(ioat_device); return err; } dma_dev->f.deregister_memory = NULL; dma_dev->f.register_memory = NULL; dma_dev->f.poll = ioat_dma_device_poll_channels; *dev = ioat_device; return err; }
/** * After driver.c loads a new version of a program as a shared library, it * accesses the symbol "kitsune_init_inplace" from the library and calls it, * passing in: * * 1. env - the jmp_buf to use with longjmp when updating * 2. prev_handle - the handle to the previous version shared library * 4. argc/argv - the arguments to pass on to main */ int kitsune_init_inplace(jmp_buf *env, void *prev_handle, void *cur_handle, char **next_code, const char *bench_filename, int argc, char **argv) { /* We've beguin executing code in this version. */ is_loading = 0; jmp_env = env; prev_ver_handle = prev_handle; cur_ver_handle = cur_handle; next_version_code = next_code; bench_init(bench_filename); #ifdef ENABLE_THREADING ktthread_init(); #endif /* * If the handle to the previous version was NULL, we infer that we are the * first version starting up. */ if (kitsune_is_updating()) { kitsune_has_updated_p = 1; #ifdef ENABLE_THREADING /* * Wait for all child threads to reach update points (or terminate) */ ktthread_main_wait(); bench_quiesce_finish(); #endif /* * Setup handle to the old version's stack variables that were captured from * the stackvars API/compiler. */ stackvars_flip(); addresscheck_init(); transform_init(); /* * Get the pointer to the saved static variables. */ registervars_migrate(); } /* * Initialize the log. */ if(!kitsune_logging_init(argv[0])) { printf("Couldn't initialize logging!\n"); abort(); } /* initialize the memory allocation tracker tree*/ alloctrack_init(); /* * We may wish to perform some initialization (e.g., altering the set of * threads or the argc/argv arguments to the program) before entering main(). * Here, we call such a transformer if it exists. */ if (kitsune_is_updating()) { state_xform_fn_t ps_fn = kitsune_get_cur_val("_kitsune_prestart_xform"); if (ps_fn) { kitsune_log("Calling prestart transformation function."); ps_fn(); } } /* * After saving the information passed from the driver, we invoke the main * function of current version shared library. */ bench_restart_start(); kitsune_log("Entering target program: %s\n", argv[0]); return main(argc, argv); }
static int prepare_xomp(int argc, char *argv[]) { errval_t err; xomp_wloc_t location = XOMP_WORKER_LOC_MIXED; for (int i = 3; i < argc; ++i) { if (!strncmp(argv[i], "--location=", 11)) { char *p = strchr(argv[i], '='); p++; if (!strcmp(p, "local")) { location = XOMP_WORKER_LOC_LOCAL; } } } if (location == XOMP_WORKER_LOC_MIXED) { debug_printf("waiting for xeon phi to be ready\n"); err = xeon_phi_domain_blocking_lookup("xeon_phi.0.ready", NULL); EXPECT_SUCCESS(err, "nameservice_blocking_lookup"); err = xeon_phi_domain_blocking_lookup("xeon_phi.1.ready", NULL); EXPECT_SUCCESS(err, "nameservice_blocking_lookup"); #if XOMP_BENCH_ENABLED xomp_master_bench_enable(BENCH_RUN_COUNT, nthreads, XOMP_MASTER_BENCH_MEM_ADD); #endif } struct xomp_spawn local_info = { .argc = argc, .argv = argv, #ifdef __k1om__ .path = "/k1om/sbin/benchmarks/bomp_mm", #else .path = "/x86_64/sbin/benchmarks/bomp_mm", #endif }; struct xomp_spawn remote_info = { .argc = argc, .argv = argv, .path = "/k1om/sbin/benchmarks/bomp_mm", }; struct xomp_args xomp_arg = { .type = XOMP_ARG_TYPE_DISTINCT, .core_stride = 0, // use default .args = { .distinct = { .nthreads = nthreads, .worker_loc = location, .nphi = 2, .local = local_info, .remote = remote_info } } }; cycles_t tsc_start = bench_tsc(); if (bomp_xomp_init(&xomp_arg)) { debug_printf("bomp init failed!\n"); exit(1); } cycles_t tsc_end = bench_tsc(); timer_xompinit = bench_time_diff(tsc_start, tsc_end); return (location == XOMP_WORKER_LOC_LOCAL); } int main(int argc, char *argv[]) { errval_t err; xomp_wid_t wid; bench_init(); err = xomp_worker_parse_cmdline(argc, argv, &wid); if (err_is_ok(err)) { struct xomp_args xw_arg = { .type = XOMP_ARG_TYPE_WORKER, .args = { .worker = { .id = wid } } }; bomp_xomp_init(&xw_arg); } if (argc < 4) { debug_printf("Usage: %s <size> <numthreats>\n", argv[0]); exit(1); } nthreads = strtoul(argv[1], NULL, 10); if (nthreads == 0) { debug_printf("num threads must be >0\n"); exit(1); } DEBUG("\n"); DEBUG("======================================================\n"); debug_printf("Num Threads: %u\n", nthreads); uint8_t is_shared = 0; for (int i = 2; i < argc; ++i) { if (!strcmp(argv[i], "bomp")) { prepare_bomp(); is_shared = 1; } else if (!strcmp(argv[i], "xomp")) { is_shared = prepare_xomp(argc, argv); } else { debug_printf("ignoring argument {%s}\n", argv[i]); } } debug_printf("-------------------------------------\n"); debug_printf("init time: %lu\n", timer_xompinit); debug_printf("-------------------------------------\n"); #if XOMP_BENCH_ENABLED xomp_master_bench_print_results(); #endif while (1) ; }
int main(int argc, gchar **argv) { grn_rc rc; BenchReporter *reporter; gint n = 10; rc = grn_init(); if (rc != GRN_SUCCESS) { g_print("failed to initialize Groonga: <%d>: %s\n", rc, grn_get_global_error_message()); return EXIT_FAILURE; } g_print("Process %d times in each pattern\n", n); bench_init(&argc, &argv); reporter = bench_reporter_new(); { BenchmarkData data_small_with_mruby; BenchmarkData data_small_without_mruby; BenchmarkData data_medium_with_mruby; BenchmarkData data_medium_without_mruby; BenchmarkData data_large_with_mruby; BenchmarkData data_large_without_mruby; BenchmarkData data_very_large_with_mruby; BenchmarkData data_very_large_without_mruby; #define REGISTER(data, n_records_, min, max, use_mruby_) \ do { \ gchar *label; \ label = g_strdup_printf("(%6d, %6d] (%7d): %7s mruby", \ min, max, n_records_, \ use_mruby_ ? "with" : "without"); \ data.use_mruby = use_mruby_; \ data.n_records = n_records_; \ data.command = \ "select Entries --cache no " \ "--filter 'rank > " #min " && rank <= " #max "'"; \ bench_startup(&data); \ bench_reporter_register(reporter, label, \ n, \ NULL, \ bench, \ NULL, \ &data); \ g_free(label); \ } while(FALSE) REGISTER(data_small_with_mruby, 1000, 500, 600, GRN_TRUE); REGISTER(data_small_without_mruby, 1000, 500, 600, GRN_FALSE); REGISTER(data_medium_with_mruby, 10000, 5000, 5100, GRN_TRUE); REGISTER(data_medium_without_mruby, 10000, 5000, 5100, GRN_FALSE); REGISTER(data_large_with_mruby, 100000, 50000, 50100, GRN_TRUE); REGISTER(data_large_without_mruby, 100000, 50000, 50100, GRN_FALSE); REGISTER(data_very_large_with_mruby, 1000000, 500000, 500100, GRN_TRUE); REGISTER(data_very_large_without_mruby, 1000000, 500000, 500100, GRN_FALSE); #undef REGISTER bench_reporter_run(reporter); bench_shutdown(&data_small_with_mruby); bench_shutdown(&data_small_without_mruby); bench_shutdown(&data_medium_with_mruby); bench_shutdown(&data_medium_without_mruby); bench_shutdown(&data_large_with_mruby); bench_shutdown(&data_large_without_mruby); bench_shutdown(&data_very_large_with_mruby); bench_shutdown(&data_very_large_without_mruby); } g_object_unref(reporter); grn_fin(); return EXIT_SUCCESS; }
int main(int argc, char* argv[]) { size_t size_wanted = 1<<20; size_t runs = 100; struct reset_opt *reset = NULL; struct measure_opt *measure = NULL; bool dump = false; assert(argc>0); if (argc == 1) { usage(argv[0]); return 0; } bool args_ok = true; for (int arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "help") == 0 || strcmp(argv[arg], "--help") == 0 || strcmp(argv[arg], "-h") == 0) { usage(argv[0]); return 0; } if (strncmp(argv[arg], "size=", 5) == 0) { size_wanted = atol(argv[arg]+5); } if (strncmp(argv[arg], "logsize=", 8) == 0) { size_t logsize = atol(argv[arg]+8); if (logsize > 31) { printf("ERROR: logsize too big\n"); args_ok = false; } else { size_wanted = 1 << logsize; } } else if (strncmp(argv[arg], "count=", 6) == 0) { size_wanted = atol(argv[arg]+6)*sizeof(struct cte); } else if (strncmp(argv[arg], "logcount=", 9) == 0) { size_t logcount = atol(argv[arg]+9); if (logcount > (31-OBJBITS_CTE)) { printf("ERROR: logcount too big\n"); args_ok = false; } else { size_wanted = (1 << logcount)*sizeof(struct cte); } } else if (strncmp(argv[arg], "runs=", 5) == 0) { runs = atol(argv[arg]+5); } else if (strncmp(argv[arg], "reset=", 6) == 0) { char *name = argv[arg]+6; int i; for (i = 0; reset_opts[i].name; i++) { if (strcmp(reset_opts[i].name, name) == 0) { reset = &reset_opts[i]; break; } } if (!reset_opts[i].name) { args_ok = false; printf("ERROR: unkown reset \"%s\"\n", name); } } else if (strncmp(argv[arg], "measure=", 8) == 0) { char *name = argv[arg]+8; if (strcmp(name, "dump") == 0) { measure = NULL; dump = true; } else { int i; for (i = 0; measure_opts[i].name; i++) { if (strcmp(measure_opts[i].name, name) == 0) { measure = &measure_opts[i]; break; } } if (measure_opts[i].name) { dump = false; } else { args_ok = false; printf("ERROR: unkown measure \"%s\"\n", name); } } } else { args_ok = false; printf("ERROR: unkown argument %s\n", argv[arg]); } } if (!args_ok) { usage(argv[0]); return 1; } assert(size_wanted > 0); assert(runs > 0); assert(reset); assert(measure || dump); errval_t err; struct capref frame; size_t size; err = frame_alloc(&frame, size_wanted, &size); assert_err(err, "alloc"); assert(size >= size_wanted); printf("got %lu bytes\n", size); struct memobj *m; struct vregion *v; void *addr; err = vspace_map_one_frame(&addr, size, frame, &m, &v); assert_err(err, "map"); if (dump) { reset_and_dump(addr, size_wanted, runs, reset->fn, reset->name); } else { bench_init(); char *bench_name = malloc(strlen(reset->name)+strlen(measure->name)+2); strcpy(bench_name, reset->name); strcat(bench_name, ":"); strcat(bench_name, measure->name); test(addr, size_wanted, runs, reset->fn, measure->fn, bench_name); free(bench_name); } printf("client done\n"); vregion_destroy(v); cap_destroy(frame); return 0; }
int main(int argc, char *argv[]) { errval_t err; my_core_id = disp_get_core_id(); bench_init(); if (argc == 1) { /* server */ struct monitor_binding *mb = get_monitor_binding(); mb->rx_vtbl.num_cores_reply = num_cores_reply; // Get number of cores in the system err = mb->tx_vtbl.num_cores_request(mb, NOP_CONT); if (err_is_fail(err)) { USER_PANIC_ERR(err, "error sending num_core_request"); } // Spawn client on another core char *xargv[] = {"shared_mem_clock_bench", "dummy", NULL}; err = spawn_program_on_all_cores(false, xargv[0], xargv, NULL, SPAWN_FLAGS_DEFAULT, NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "error spawning on other cores"); } // Export service err = bench_export(NULL, export_cb, connect_cb, get_default_waitset(), IDC_EXPORT_FLAGS_DEFAULT); if (err_is_fail(err)) { USER_PANIC_ERR(err, "export failed"); } // Allocate a cap for the shared memory err = frame_alloc(&clock_frame, BASE_PAGE_SIZE, NULL); if (err_is_fail(err)) { USER_PANIC_ERR(err, "frame_alloc failed"); } err = clock_init(clock_frame); if (err_is_fail(err)) { USER_PANIC_ERR(err, "clock_init failed"); } // Wait for all connections to be established start_experiment_flag = false; while(!start_experiment_flag) { messages_wait_and_handle_next(); } // Start experiments start_experiment(); } else { /* client */ // Lookup service iref_t iref; err = nameservice_blocking_lookup("server", &iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "nameservice_blocking_lookup failed"); } // Bind to service err = bench_bind(iref, bind_cb, NULL, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); if (err_is_fail(err)) { USER_PANIC_ERR(err, "bind failed"); } } messages_handler_loop(); }