void test_unbuffered() { unlink("/tmp/.socket_stream_test"); auto server = fd::easy::unix_socket::server("/tmp/.socket_stream_test"); auto client = fd::easy::unix_socket::client("/tmp/.socket_stream_test"); auto conn = server.accept(); auto start = std::chrono::high_resolution_clock::now(); std::thread server_thread(unbuffered_server, std::ref(conn)); std::thread client_thread(unbuffered_client, std::ref(client)); server_thread.join(); client_thread.join(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> diff = end - start; double throughput = loop_count * sizeof(message) / diff.count(); std::cout << "---- Unbuffered ----\n" << "Elapsed: " << diff.count() << " s\n" << "Throughput: " << throughput / 1e6 << " mb/s\n"; }
void handle_client(tcp_acceptor::client_ptr &client, boost::shared_ptr<directory> directory) { boost::shared_ptr<abstract_client> const shared_client = movable_ptr<abstract_client>::to_shared(client); boost::thread client_thread(handle_request_threaded, shared_client, directory); client_thread.detach(); }
void Server::accept_handler(boost::shared_ptr<Connection> connection, const boost::system::error_code& error){ if(!error){ std::cout << "New Server Connection Accepted!" << std::endl; boost::thread client_thread(boost::bind(&Connection::thread_process, connection)); boost::shared_ptr<Connection> new_connection(new Connection(boost_io_service, filters)); tcp_acceptor.async_accept((*new_connection).get_socket(), boost::bind(&Server::accept_handler, this, new_connection, boost::asio::placeholders::error)); } else{ std::cout << error.message() << std::endl; } }
static int run_benchmark(char *socket_type, thread_args *client_args, thread_args *server_args) { gpr_thd_id tid; int rv = 0; rv = create_socket(socket_type, &client_args->fds, &server_args->fds); if (rv < 0) { return rv; } gpr_log(GPR_INFO, "Starting test %s %s %d", client_args->strategy_name, socket_type, client_args->msg_size); gpr_thd_new(&tid, server_thread_wrap, server_args, NULL); client_thread(client_args); return 0; }
int main(int argc, char *argv[]) { if (argc != 2) { std::cout << "sync interval [msec] must be given as argument!" << std::endl; return -1; } canopen::using_master_thread = true; canopen::syncInterval = std::chrono::milliseconds(std::stoi(std::string(argv[1]))); auto chainDesc = canopen::parseChainDescription("single_device.yaml"); canopen::initChainMap(chainDesc); std::cout << "1" << std::endl; if (!canopen::openConnection("/dev/pcan32")) { std::cout << "Cannot open CAN device; aborting." << std::endl; return -1; } std::cout << "2" << std::endl; canopen::initListenerThread(); std::cout << "3" << std::endl; canopen::initIncomingPDOProcessorThread(); std::cout << "4" << std::endl; std::this_thread::sleep_for(std::chrono::seconds(2)); canopen::initMasterThread(); std::cout << "5" << std::endl; canopen::initNMT(); for (auto it : canopen::chainMap) it.second->CANopenInit(); // client_thread simulates callback invocations from a client: std::thread client_thread(clientFunc); client_thread.detach(); while (true) std::this_thread::sleep_for(std::chrono::milliseconds(10)); return 0; }
int main (int argc, char ** argv) { int ch, ret, seed = 0, d = 0; /* set default value */ memset (&tcpgen, 0, sizeof (tcpgen)); tcpgen.flow_dist = FLOWDIST_SAME; tcpgen.flow_num = 1; tcpgen.data_len = 984; /* 1024 byte packet excluding ether header */ while ((ch = getopt (argc, argv, "d:B:scn:t:x:i:l:rm:pDv")) != -1) { switch (ch) { case 'd' : ret = inet_pton (AF_INET, optarg, &tcpgen.dst); if (ret < 0) { D ("invalid dst address %s", optarg); perror ("inet_pton"); return -1; } break; case 'B' : ret = inet_pton (AF_INET, optarg, &tcpgen.src); if (ret < 0) { D ("invalid src address %s", optarg); perror ("inet_pton"); return -1; } break; case 's' : tcpgen.server_mode = 1; break; case 'c' : tcpgen.client_mode = 1; break; case 'n' : tcpgen.flow_num = atoi (optarg); if (tcpgen.flow_num > MAX_FLOWNUM) { D ("max number of flows is %d", MAX_FLOWNUM); return -1; } break; case 't' : if (strncmp (optarg, "same", 4) == 0) tcpgen.flow_dist = FLOWDIST_SAME; else if (strncmp (optarg, "random", 6) == 0) tcpgen.flow_dist = FLOWDIST_RANDOM; else if (strncmp (optarg, "power", 5) == 0) tcpgen.flow_dist = FLOWDIST_POWER; else { D ("invalid distribution patter %s", optarg); return -1; } break; case 'x' : tcpgen.count = atoi (optarg); break; case 'i' : tcpgen.interval = atoi (optarg); break; case 'l' : tcpgen.data_len = atoi (optarg); break; case 'r' : tcpgen.randomized = 1; break; case 'm' : seed = atoi (optarg); break; case 'p' : tcpgen.thread_mode = 1; break; case 'D' : d = 1; break; case 'v' : tcpgen.verbose = 1; break; default : usage (); return -1; } } if (seed) srand (seed); else srand (time (NULL)); if (d) daemon (1, 0); if (tcpgen.server_mode) server_thread (NULL); else if (tcpgen.client_mode) client_thread (NULL); D ("tcpgen finished"); return 0; }
static void do_client(const char *server, short port, char *filename, afs_int32 command, afs_int32 times, afs_int32 bytes, afs_int32 sendbytes, afs_int32 readbytes, int dumpstats, int nojumbo, int maxmtu, int maxwsize, int minpeertimeout, int udpbufsz, int nostats, int hotthread, int threads) { struct rx_connection *conn; afs_uint32 addr; struct rx_securityClass *secureobj; int secureindex; int ret; char stamp[2048]; struct client_data *params; #ifdef AFS_PTHREAD_ENV int i; pthread_t thread[MAX_THREADS]; pthread_attr_t tattr; void *status; #endif params = calloc(1, sizeof(struct client_data)); #ifdef AFS_NT40_ENV if (afs_winsockInit() < 0) { printf("Can't initialize winsock.\n"); exit(1); } #endif if (hotthread) rx_EnableHotThread(); if (nostats) rx_enable_stats = 0; addr = str2addr(server); rx_SetUdpBufSize(udpbufsz); ret = rx_Init(0); if (ret) errx(1, "rx_Init failed"); if (nojumbo) rx_SetNoJumbo(); if (maxmtu) rx_SetMaxMTU(maxmtu); if (maxwsize) { rx_SetMaxReceiveWindow(maxwsize); rx_SetMaxSendWindow(maxwsize); } if (minpeertimeout) rx_SetMinPeerTimeout(minpeertimeout); get_sec(0, &secureobj, &secureindex); switch (command) { case RX_PERF_RPC: sprintf(stamp, "RPC: threads\t%d, times\t%d, write bytes\t%d, read bytes\t%d", threads, times, sendbytes, readbytes); break; case RX_PERF_RECV: sprintf(stamp, "RECV: threads\t%d, times\t%d, bytes\t%d", threads, times, bytes); break; case RX_PERF_SEND: sprintf(stamp, "SEND: threads\t%d, times\t%d, bytes\t%d", threads, times, bytes); break; case RX_PERF_FILE: sprintf(stamp, "FILE %s: threads\t%d, times\t%d, bytes\t%d", filename, threads, times, bytes); break; } conn = rx_NewConnection(addr, htons(port), RX_SERVER_ID, secureobj, secureindex); if (conn == NULL) errx(1, "failed to contact server"); #ifdef AFS_PTHREAD_ENV pthread_attr_init(&tattr); pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE); #endif params->conn = conn; params->filename = filename; params->command = command; params->times = times; params->bytes = bytes; params->sendbytes = sendbytes; params->readbytes = readbytes; start_timer(); #ifdef AFS_PTHREAD_ENV for ( i=0; i<threads; i++) { pthread_create(&thread[i], &tattr, client_thread, params); if ( (i + 1) % RX_MAXCALLS == 0 ) { conn = rx_NewConnection(addr, htons(port), RX_SERVER_ID, secureobj, secureindex); if (conn != NULL) { struct client_data *new_params = malloc(sizeof(struct client_data)); memcpy(new_params, params, sizeof(struct client_data)); new_params->conn = conn; params = new_params; } } } #else client_thread(params); #endif #ifdef AFS_PTHREAD_ENV for ( i=0; i<threads; i++) pthread_join(thread[i], &status); #endif switch (command) { case RX_PERF_RPC: end_and_print_timer(stamp, (long long)threads*times*(sendbytes+readbytes)); break; case RX_PERF_RECV: case RX_PERF_SEND: case RX_PERF_FILE: end_and_print_timer(stamp, (long long)threads*times*bytes); break; } DBFPRINT(("done for good\n")); if (dumpstats) { rx_PrintStats(stdout); rx_PrintPeerStats(stdout, rx_PeerOf(conn)); } rx_Finalize(); #ifdef AFS_PTHREAD_ENV pthread_attr_destroy(&tattr); #endif free(params); }
int server_run_one(SERVER *sp, int client_fd, int nofork) { CLIENT *cp; pthread_t tid; #ifndef HAVE_THREADS pid_t pid; int status; #endif int ecode; socklen_t slen; pthread_mutex_lock(&sp->clients_mtx); sp->clients_cur++; if (debug) fprintf(stderr, "server_run_one: Number of clients is now: %d\n", sp->clients_cur); pthread_mutex_unlock(&sp->clients_mtx); if (debug) fprintf(stderr, "server_run_one(..., %d, %d): Start\n", client_fd, nofork); A_NEW(cp); cp->fd = client_fd; cp->sp = sp; slen = sizeof(cp->rsin); if (getpeername(cp->fd, (struct sockaddr *) &cp->rsin, &slen) < 0) { syslog(LOG_ERR, "getpeername(%d): %m", cp->fd); if (debug) fprintf(stderr, "server_run_one(...): End: FAILURE\n"); return EXIT_FAILURE; } slen = sizeof(cp->lsin); if (getsockname(cp->fd, (struct sockaddr *) &cp->lsin, &slen) < 0) { syslog(LOG_ERR, "getsockname(%d): %m", cp->fd); if (debug) fprintf(stderr, "server_run_one(...): End: FAILURE\n"); return EXIT_FAILURE; } if (nofork) { (void) client_thread(cp); if (debug) fprintf(stderr, "server_run_one(...): End: OK\n"); return EXIT_SUCCESS; } else { #ifdef HAVE_THREADS ecode = pthread_create(&tid, &sp->ca_detached, client_thread, (void *) cp); if (ecode) { syslog(LOG_ERR, "pthread_create(client_thread) failed: %s", strerror(ecode)); if (debug) fprintf(stderr, "server_run_one(...): End: FAILURE\n"); return EXIT_FAILURE; } #else /* Try to reap the status of as many subprocesses as possible */ /* ** XXX: This will break if we are using multiple ** SERVER's in a single process and aren't using ** threads. */ while (sp->clients_cur > 0 && (pid = waitpid((pid_t) -1, &status, WNOHANG)) > 0) { if (WIFEXITED(status) || WIFSIGNALED(status)) { sp->clients_cur--; } } if (sp->clients_max > 0) { /* Wait for atleast one slot to be available */ while (sp->clients_cur >= sp->clients_max && (pid = waitpid((pid_t) -1, &status, 0)) > 0) { if (WIFEXITED(status) || WIFSIGNALED(status)) { sp->clients_cur--; } } } while ((status = fork()) < 0 && errno == EAGAIN) { /* Fork failed - too many processes */ sleep(1); pid = waitpid((pid_t) -1, &status, (sp->clients_cur > 0) ? 0 : WNOHANG); if (pid > 0 && (WIFEXITED(status) || WIFSIGNALED(status))) { sp->clients_cur--; } } if (status < 0) { syslog(LOG_ERR, "fork() failed: %m"); s_abort(); } if (status == 0) { /* In child process */ (void) client_thread(cp); _exit(EXIT_SUCCESS); } sp->clients_cur++; s_close(cp->fd); a_free(cp); #endif } if (debug) fprintf(stderr, "server_run_one(...): End: OK\n"); return EXIT_SUCCESS; }
void start_client(config_t *config) { thread_t *threads=NULL; int i; int j; int rc; int *status; int mode; conn_stats_t **tstat; pthread_attr_t attr; struct rlimit limits; struct sigaction action; sigset_t mask; getrlimit( RLIMIT_NPROC ,&limits); if(limits.rlim_max<config->threads+2) { error_at_line(0,0,__FILE__,__LINE__, "Insufficient resources: NPROC < %d\n", config->threads+2); return; } limits.rlim_cur=limits.rlim_max; setrlimit(RLIMIT_NPROC, &limits); threads=malloc(sizeof(thread_t)*config->threads); memset(threads, 0, sizeof(thread_t)*config->threads); for(i=0; i<config->threads; i++) { threads[i].thread_idx=i; if(config->recv_mode==ASYNC) { if(i%2) { mode=THREAD_ASYNC_RECV; } else { mode=THREAD_ASYNC_SEND; } } else { mode=THREAD_SYNC; } switch(mode) { case THREAD_SYNC: break; case THREAD_ASYNC_RECV: threads[i].mode=THREAD_ASYNC_RECV; break; case THREAD_ASYNC_SEND: threads[i].mode=THREAD_ASYNC_SEND; threads[i].read=&threads[i+1]; threads[i].efd_write=epoll_create1(0); threads[i].write=&threads[i]; //Since we are in ASYNC mode, we are hiding the extra 2x threads //for stats keeping. threads[i].thread_stats=&(config->stats->thread_stats[i/2]); default: break; } threads[i].mode=mode; if(mode==THREAD_SYNC || mode==THREAD_ASYNC_SEND) { threads[i].conns=malloc(sizeof(connection_t)*config->conns_per_thread); memset(threads[i].conns,0,sizeof(connection_t)*config->conns_per_thread); threads[i].write=&threads[i]; threads[i].efd_write=epoll_create1(0); if(mode==THREAD_SYNC) { threads[i].read=&threads[i]; threads[i].efd_read=epoll_create1(0); threads[i].thread_stats=&(config->stats->thread_stats[i]); } else { threads[i].read=&threads[i+1]; //Since we are in ASYNC mode, we are hiding the extra 2x threads //for stats keeping. threads[i].thread_stats=&(config->stats->thread_stats[i/2]); } } else { threads[i].read=&threads[i]; threads[i].write=&threads[i-1]; threads[i].efd_read=epoll_create1(0); threads[i].thread_stats=&(config->stats->thread_stats[i/2]); threads[i].conns=threads[i-1].conns; } threads[i].cfg=config; if(mode==SYNC || i%2==0) { for(j=0; j<config->conns_per_thread; j++) { tstat=&threads[i].conns[j].conn_stats; threads[i].conns[j].conn_stats=&threads[i].thread_stats->conn_stats[j]; threads[i].conns[j].cfg=config; init_connection_data(config, &threads[i].conns[j], mode); } } } if(config->mcast_wait) { //printf("mcast_wait\n"); mcast_wait(); } action.sa_flags=SA_SIGINFO; action.sa_sigaction=run_timeout; sigemptyset(&action.sa_mask); sigaction(SIG_RUNTIMEEXP, &action, NULL); sigemptyset(&mask); sigaddset(&mask, SIG_RUNTIMEEXP); if (sigprocmask(SIG_UNBLOCK, &mask, NULL) == -1) { perror("sigprocmask"); } if(config->threads==1) { rc=(long int)client_thread((void*)&threads[0]); } else { for(i=0; i<config->threads; i++) { pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); rc=pthread_create(&threads[i].id, &attr, client_thread, (void *)&threads[i]); pthread_attr_destroy(&attr); } for(i=0; i<config->threads; i++) { //printf("waiting on thread %d... ", i); rc=pthread_join(threads[i].id,(void **)&status); //printf("joined thread %d, status=%ld\n",(int)threads[i].id, (long int)status); } } sigemptyset(&mask); sigaddset(&mask, SIG_RUNTIMEEXP); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) { perror("sigprocmask"); } action.sa_flags=0; action.sa_handler=SIG_IGN; sigemptyset(&action.sa_mask); sigaction(SIG_RUNTIMEEXP, &action, NULL); for(i=0; i<config->threads; i++) { if(threads[i].conns) { for(j=0;j<config->conns_per_thread; j++) { if(threads[i].conns[j].req_buffer) { free(threads[i].conns[j].req_buffer); } if(threads[i].conns[j].rx_buff) { free(threads[i].conns[j].rx_buff); } if(threads[i].conns[j].tx_buffer) { free(threads[i].conns[j].tx_buffer); } if(threads[i].conns[j].frame_buffer) { free(threads[i].conns[j].frame_buffer); } //if(threads[i].conns[j].out_transactions) { // free(threads[i].conns[j].out_transactions); //} } free(threads[i].conns); threads[i].read->conns=NULL; threads[i].write->conns=NULL; } } free(threads); return; }
int main(int argc,char *argv[]) { char pathtobin[1024], testfile[1024]; struct server_data_t server_data; struct client_data_t client_data; void *thr_exit_status = (int)0; int parm, provided, rank, numranks, data; off_t maxbytes; MPI_Comm thread_comm; pthread_t thread; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); MPI_Comm_dup(MPI_COMM_WORLD, &thread_comm); client_data.thread_comm = &thread_comm; server_data.thread_comm = &thread_comm; MPI_Comm_rank(MPI_COMM_WORLD, &rank); //default parameters strcpy(testfile, "testmpiio"); client_data.maxreqs = OUTS_REQ; client_data.bsize = BLOCKSIZE; client_data.csize = CHUNKSIZE; maxbytes = MAXBYTES; // reading input parameters while ((parm = getopt (argc, argv, "b:r:f:c:s:h")) != -1) { switch (parm) { case 'b': client_data.bsize = units_convert(optarg); break; case 'r': client_data.maxreqs = units_convert(optarg); break; case 'f': strcpy(testfile, optarg); break; case 'c': client_data.csize = units_convert(optarg); break; case 's': maxbytes = units_convert(optarg); break; case 'h': print_help(rank); MPI_Finalize(); exit(0); default: print_help(rank); MPI_Finalize(); exit(0); } } // check for correct data if ((client_data.bsize == 0) || (client_data.maxreqs == 0) || \ (client_data.fname == "") || (client_data.csize == 0) || (maxbytes == 0) ) { print_help(rank); MPI_Finalize(); exit(0); } if (client_data.bsize > client_data.csize) { if (rank == 0) { printf("Requested blocksize larger that chunk size. Exiting.\n"); } MPI_Finalize(); exit(0); } server_data.num = (off_t)maxbytes/client_data.csize; server_data.csize = client_data.csize; if (client_data.csize > client_data.csize*server_data.num) { if (rank == 0) { printf("Requested chunk size larger that requested bytes to write. Exiting.\n"); } MPI_Finalize(); exit(0); } // print what we are going to do snprintf(client_data.fname, sizeof client_data.fname, "%s.%05d", testfile ,rank); if (rank == 0) { printf("Rank %05d | Blocksize: %d\n", rank, client_data.bsize); printf("Rank %05d | Max outstanding requests: %d\n", rank, client_data.maxreqs); printf("Rank %05d | File name: %s\n", rank, client_data.fname); printf("Rank %05d | Chunk size %lli\n", rank, client_data.csize); printf("Rank %05d | Number of chunks %lli\n", rank, server_data.num); printf("Rank %05d | Requested bytes to write: %lli\n", rank, maxbytes); printf("Rank %05d | Bytes to write: %lli\n", rank, client_data.csize*server_data.num); } // how many ranks do we have? client_data.rank = rank; MPI_Comm_size(MPI_COMM_WORLD,&numranks); server_data.numranks = numranks; // run manager process if rank==0 if (rank == 0) { pthread_create(&thread, NULL, server_thread, &server_data); } // run client on all ranks client_thread(&client_data); if (rank == 0 ) { pthread_join(thread, &thr_exit_status); } // exiting MPI_Barrier( MPI_COMM_WORLD ); MPI_Finalize(); exit(0); }
int main(int argc, char **argv) { int i, opt, num_threads; Octstr *proxy; List *exceptions; long proxy_port; int proxy_ssl = 0; Octstr *proxy_username; Octstr *proxy_password; Octstr *exceptions_regex; char *p; long threads[MAX_THREADS]; time_t start, end; double run_time; FILE *fp; int ssl = 0; gwlib_init(); proxy = NULL; proxy_port = -1; exceptions = gwlist_create(); proxy_username = NULL; proxy_password = NULL; exceptions_regex = NULL; num_threads = 1; file = 0; fp = NULL; while ((opt = getopt(argc, argv, "hv:qr:p:P:Se:t:i:a:u:sc:H:B:m:f")) != EOF) { switch (opt) { case 'v': log_set_output_level(atoi(optarg)); break; case 'q': verbose = 0; break; case 'r': max_requests = atoi(optarg); break; case 't': num_threads = atoi(optarg); if (num_threads > MAX_THREADS) num_threads = MAX_THREADS; break; case 'i': interval = atof(optarg); break; case 'u': file = 1; fp = fopen(optarg, "a"); if (fp == NULL) panic(0, "Cannot open message text file %s", optarg); msg_text = octstr_read_file(optarg); if (msg_text == NULL) panic(0, "Cannot read message text"); debug("", 0, "message text is"); octstr_dump(msg_text, 0); octstr_url_encode(msg_text); fclose(fp); break; case 'h': help(); exit(0); case 'p': proxy = octstr_create(optarg); break; case 'P': proxy_port = atoi(optarg); break; case 'S': proxy_ssl = 1; break; case 'e': p = strtok(optarg, ":"); while (p != NULL) { gwlist_append(exceptions, octstr_create(p)); p = strtok(NULL, ":"); } break; case 'E': exceptions_regex = octstr_create(optarg); break; case 'a': p = strtok(optarg, ":"); if (p != NULL) { auth_username = octstr_create(p); p = strtok(NULL, ""); if (p != NULL) auth_password = octstr_create(p); } break; case 's': ssl = 1; break; case 'c': octstr_destroy(ssl_client_certkey_file); ssl_client_certkey_file = octstr_create(optarg); break; case 'H': fp = fopen(optarg, "a"); if (fp == NULL) panic(0, "Cannot open header text file %s", optarg); extra_headers = octstr_read_file(optarg); if (extra_headers == NULL) panic(0, "Cannot read header text"); debug("", 0, "headers are"); octstr_dump(extra_headers, 0); split_headers(extra_headers, &split); fclose(fp); break; case 'B': content_file = octstr_create(optarg); break; case 'm': method_name = octstr_create(optarg); break; case 'f': follow_redirect = 0; break; case '?': default: error(0, "Invalid option %c", opt); help(); panic(0, "Stopping."); } } if (optind == argc) { help(); exit(0); } #ifdef HAVE_LIBSSL /* * check if we are doing a SSL-enabled client version here * load the required cert and key file */ if (ssl || proxy_ssl) { if (ssl_client_certkey_file != NULL) { use_global_client_certkey_file(ssl_client_certkey_file); } else { panic(0, "client certkey file need to be given!"); } } #endif if (method_name != NULL) { method = http_name2method(method_name); } if (proxy != NULL && proxy_port > 0) { http_use_proxy(proxy, proxy_port, proxy_ssl, exceptions, proxy_username, proxy_password, exceptions_regex); } octstr_destroy(proxy); octstr_destroy(proxy_username); octstr_destroy(proxy_password); octstr_destroy(exceptions_regex); gwlist_destroy(exceptions, octstr_destroy_item); urls = argv + optind; num_urls = argc - optind; time(&start); if (num_threads == 1) client_thread(http_caller_create()); else { for (i = 0; i < num_threads; ++i) threads[i] = gwthread_create(client_thread, http_caller_create()); for (i = 0; i < num_threads; ++i) gwthread_join(threads[i]); } time(&end); run_time = difftime(end, start); info(0, "%ld requests in %f seconds, %f requests/s.", (max_requests * num_threads), run_time, (max_requests * num_threads) / run_time); octstr_destroy(ssl_client_certkey_file); octstr_destroy(auth_username); octstr_destroy(auth_password); octstr_destroy(extra_headers); octstr_destroy(content_file); gwlist_destroy(split, octstr_destroy_item); gwlib_shutdown(); return 0; }