static int send_destroy_ctx(struct pingpong_context *ctx, struct perftest_parameters *user_param, struct mcast_parameters *mcg_params) { int i; if (user_param->use_mcg) { for (i=0; i < user_param->num_of_qps; i++) { if (ibv_detach_mcast(ctx->qp[i],&mcg_params->base_mgid,mcg_params->base_mlid)) { fprintf(stderr, "Couldn't dettach QP to MultiCast group\n"); return FAILURE; } } if (!strcmp(link_layer_str(user_param->link_type),"IB")) { if (join_multicast_group(SUBN_ADM_METHOD_DELETE,mcg_params)) { fprintf(stderr,"Couldn't Unregister the Mcast group on the SM\n"); return FAILURE; } memcpy(mcg_params->mgid.raw,mcg_params->base_mgid.raw,16); if (join_multicast_group(SUBN_ADM_METHOD_DELETE,mcg_params)) { fprintf(stderr,"Couldn't Unregister the Mcast group on the SM\n"); return FAILURE; } } } return destroy_ctx(ctx,user_param); }
int udp_setup(int udp_port) { int udp_socket; struct sockaddr_in server_udp_addr; /* create UDP socket */ if ((udp_socket = socket(PF_INET, SOCK_DGRAM, 0)) != -1) { /* bind UDP socket to port x */ memset(&server_udp_addr, 0, sizeof(server_udp_addr)); server_udp_addr.sin_family = AF_INET; server_udp_addr.sin_port = htons(udp_port); server_udp_addr.sin_addr.s_addr = INADDR_ANY; if (bind(udp_socket, (struct sockaddr*)&server_udp_addr, sizeof(struct sockaddr)) == -1) { fprintf(stderr, "Could not bind socket.\n"); close(udp_socket); return -1; } if (join_multicast_group(udp_socket, "224.0.0.42") == -1) { fprintf(stderr, "Could not join multicast group.\n"); } } else { fprintf(stderr, "Could not create socket.\n"); } return udp_socket; }
static int set_mcast_group(struct pingpong_context *ctx, struct perftest_parameters *user_param, struct mcast_parameters *mcg_params) { struct ibv_port_attr port_attr; if (ibv_query_gid(ctx->context,user_param->ib_port,user_param->gid_index,&mcg_params->port_gid)) { return 1; } if (ibv_query_pkey(ctx->context,user_param->ib_port,DEF_PKEY_IDX,&mcg_params->pkey)) { return 1; } if (ibv_query_port(ctx->context,user_param->ib_port,&port_attr)) { return 1; } mcg_params->sm_lid = port_attr.sm_lid; mcg_params->sm_sl = port_attr.sm_sl; mcg_params->ib_port = user_param->ib_port; if (!strcmp(link_layer_str(user_param->link_type),"IB")) { /* Request for Mcast group create registery in SM. */ if (join_multicast_group(SUBN_ADM_METHOD_SET,mcg_params)) { fprintf(stderr,"Couldn't Register the Mcast group on the SM\n"); return 1; } } return 0; }
int udp_connect_input(struct io *io) { struct sockaddr_storage addr; int addrlen = sizeof(addr); int sock = -1; memset(&addr, 0, sizeof(addr)); ts_LOGf("Connecting input to %s port %s\n", io->hostname, io->service); if (bind_addr(io->hostname, io->service, SOCK_DGRAM, &addr, &addrlen, &sock) < 0) return -1; /* Set receive buffer size to ~2.0MB */ int bufsize = (2000000 / 1316) * 1316; setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (void *)&bufsize, sizeof(bufsize)); if (is_multicast(&addr)) { if (join_multicast_group(sock, io->ttl, &addr) < 0) { close(sock); return -1; } } io->fd = sock; ts_LOGf("Input connected to fd:%d\n", io->fd); return 1; }
static int send_destroy_ctx( struct pingpong_context *ctx, struct perftest_parameters *user_param, struct mcast_parameters *mcg_params) { int i; if (user_param->use_mcg) { if (user_param->duplex || user_param->machine == SERVER) { for (i=0; i < user_param->num_of_qps; i++) { if (ibv_detach_mcast(ctx->qp[i],&mcg_params->mgid,mcg_params->mlid)) { fprintf(stderr, "Couldn't attach QP to MultiCast group"); return FAILURE; } } } /* Removal Request for Mcast group in SM if needed. */ if (!strcmp(link_layer_str(user_param->link_type),"IB")) { if (join_multicast_group(SUBN_ADM_METHOD_DELETE,mcg_params)) { fprintf(stderr,"Couldn't Unregister the Mcast group on the SM\n"); return FAILURE; } } } return destroy_ctx(ctx,user_param); }
/* * Sets additional interface details and adds routes to interface * addresses. Joins/drops multicast group for interfaces. */ void if_install(struct interface *ifp) { struct in6_addr *addr; /* ASSERT: ifp != lo0 */ install_address(&ifp->if_lladdr, ifp);/* DELETE or ADD local routes */ install_address(&ifp->if_sladdr, ifp); install_address(&ifp->if_ip6addr, ifp); if (ifp->if_lladdr && ifp->if_flag & IFF_UP) { /* at least one lladdr to send updates */ ci_cmsg(ifp->if_cinfo).cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); ci_cmsg(ifp->if_cinfo).cmsg_level = IPPROTO_IPV6; ci_cmsg(ifp->if_cinfo).cmsg_type = IPV6_PKTINFO; addr = &ci_info(ifp->if_cinfo).ipi6_addr; *addr = ifp->if_lladdr->pl_pref.prf_addr; ifp->if_flag |= IFF_RUNNING; /* has linklocal */ } else ifp->if_flag &= ~IFF_RUNNING; /* doesn't have linklocal */ /* * Join multicast group for the interface if not joined. * ... Even if it has no linklocal, it can receive multicast * ( but at least one IPv6 address needed ?) */ if (!(ifp->if_flag & IFF_JOINED) && (ifp->if_flag & IFF_UP)) join_multicast_group(ifp); return; }
int reconfigure_mdns_socket(void) { int err; // Unregister and reregister for MDNS multicast group membership. Doing this // will start giving us MDNS traffic on the new interface. // Leave the group with our previous table of interface IPs err = leave_multicast_group(); if (err != 0) { closesocket(mdns_socket); return err; } // Update the table of interface IPs err = refresh_ip_table(); if (err != 0) { closesocket(mdns_socket); return err; } // Rejoin with the new table err = join_multicast_group(); if (err != 0) { closesocket(mdns_socket); return err; } return 0; }
int udp_connect_output(struct io *io) { struct sockaddr_storage addr; int addrlen = sizeof(addr); int sock = -1; memset(&addr, 0, sizeof(addr)); ts_LOGf("Connecting output to %s port %s ttl: %d\n", io->hostname, io->service, io->ttl); if (bind_addr(io->hostname, io->service, SOCK_DGRAM, &addr, &addrlen, &sock) < 0) return -1; /* Set send buffer size to ~2.0MB */ int bufsize = (2000000 / 1316) * 1316; setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (void *)&bufsize, sizeof(bufsize)); if (is_multicast(&addr)) { if (join_multicast_group(sock, io->ttl, &addr) < 0) { close(sock); return -1; } else { if (addr.ss_family == AF_INET) { if (setsockopt(sock, IPPROTO_IP, IP_MULTICAST_IF, &io->intf, sizeof(io->intf)) < 0) { ts_LOGf("ERROR: setsockopt(IP_MUTICAST_IF %s): %s\n", inet_ntoa(io->intf), strerror(errno)); close(sock); return -1; } } if (addr.ss_family == AF_INET6 && io->v6_if_index > -1) { if (setsockopt(sock, IPPROTO_IPV6, IPV6_MULTICAST_IF, (void *)&io->v6_if_index, sizeof(io->v6_if_index)) < 0) { ts_LOGf("ERROR: setsockopt(IPV6_MUTICAST_IF %d): %s\n", io->v6_if_index, strerror(errno)); close(sock); return -1; } } } } if (addr.ss_family == AF_INET && io->tos > -1) { if (setsockopt(sock, IPPROTO_IP, IP_TOS, &io->tos, sizeof(io->tos)) < 0) { ts_LOGf("ERROR: setsockopt(IP_TOS 0x%02x): %s\n", io->tos, strerror(errno)); } } if (connect(sock, (struct sockaddr *)&addr, addrlen) < 0) { ts_LOGf("ERROR: udp_connect(): %s\n", strerror(errno)); close(sock); return -1; } io->fd = sock; ts_LOGf("Output connected to fd:%d\n", io->fd); return 1; }
int init_mdns_socket(void) { int err; struct sockaddr_in bindaddr; // Create the MDNS socket mdns_socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); if (mdns_socket == -1) { printf("Failed to create socket (Error: %d)\n", platform_last_error()); return -1; } // Bind to the relay port memset(&bindaddr, 0, sizeof(bindaddr)); bindaddr.sin_family = AF_INET; bindaddr.sin_port = htons(MDNS_RELAY_PORT); bindaddr.sin_addr.S_un.S_addr = htonl(INADDR_ANY); // Bind to the MDNS port on all interfaces err = bind(mdns_socket, (struct sockaddr*)&bindaddr, sizeof(bindaddr)); if (err != 0) { printf("Failed to bind socket (Error: %d)\n", platform_last_error()); closesocket(mdns_socket); return -1; } // Initialize IP table mutex platform_mutex_init(&iface_table_mutex); // Load initial IP table err = refresh_ip_table(); if (err != 0) { printf("Failed to load initial IP table\n"); closesocket(mdns_socket); return -1; } // Join the multicast group using the IP table err = join_multicast_group(); if (err != 0) { printf("Failed to join multicast group\n"); closesocket(mdns_socket); return -1; } return 0; }
int udp_connect_input(struct io *io) { struct sockaddr_storage addr; int addrlen = sizeof(addr); int sock = -1; memset(&addr, 0, sizeof(addr)); if (!io->isrc.s_addr) ts_LOGf("Connecting input to %s port %s\n", io->hostname, io->service); else ts_LOGf("Connecting input to %s port %s source %s\n", io->hostname, io->service, inet_ntoa(io->isrc)); if (get_input_socket(io->hostname, io->service, SOCK_DGRAM, &addr, &addrlen, &sock) < 0) return -1; /* Set receive buffer size to ~2.0MB */ int bufsize = (2000000 / 1316) * 1316; setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (void *)&bufsize, sizeof(bufsize)); if (is_multicast(&addr)) { if (join_multicast_group(sock, io->ttl, &addr) < 0) { close(sock); return -1; } else { #ifdef IP_ADD_SOURCE_MEMBERSHIP if (io->isrc.s_addr && addr.ss_family == AF_INET) { /* Source-specific multicast */ struct sockaddr_in *src = (struct sockaddr_in *)&addr; struct ip_mreq_source imr; memset(&imr, 0, sizeof(imr)); imr.imr_multiaddr = src->sin_addr; imr.imr_sourceaddr = io->isrc; if (setsockopt(sock, IPPROTO_IP, IP_ADD_SOURCE_MEMBERSHIP, (char *)&imr, sizeof(struct ip_mreq_source)) < 0) { char str_addr[INET6_ADDRSTRLEN]; my_inet_ntop(addr.ss_family, (struct sockaddr *)&addr, str_addr, sizeof(str_addr)); ts_LOGf("ERROR: Can't set multicast group %s source %s: %s\n", str_addr, inet_ntoa(io->isrc), strerror(errno)); } } #endif } } io->fd = sock; ts_LOGf("Input connected to fd:%d\n", io->fd); return 1; }
static int set_mcast_group(struct pingpong_context *ctx, struct perftest_parameters *user_param, struct mcast_parameters *mcg_params) { int i; struct ibv_port_attr port_attr; if (ibv_query_gid(ctx->context,user_param->ib_port,user_param->gid_index,&mcg_params->port_gid)) { return 1; } if (ibv_query_pkey(ctx->context,user_param->ib_port,DEF_PKEY_IDX,&mcg_params->pkey)) { return 1; } if (ibv_query_port(ctx->context,user_param->ib_port,&port_attr)) { return 1; } mcg_params->sm_lid = port_attr.sm_lid; mcg_params->sm_sl = port_attr.sm_sl; mcg_params->ib_port = user_param->ib_port; mcg_params->user_mgid = user_param->user_mgid; set_multicast_gid(mcg_params,ctx->qp[0]->qp_num,(int)user_param->machine); if (!strcmp(link_layer_str(user_param->link_type),"IB")) { // Request for Mcast group create registery in SM. if (join_multicast_group(SUBN_ADM_METHOD_SET,mcg_params)) { fprintf(stderr," Failed to Join Mcast request\n"); return 1; } } for (i=0; i < user_param->num_of_qps; i++) { if (ibv_attach_mcast(ctx->qp[i],&mcg_params->mgid,mcg_params->mlid)) { fprintf(stderr, "Couldn't attach QP to MultiCast group"); return 1; } } mcg_params->mcast_state |= MCAST_IS_ATTACHED; return 0; }
static void start_listening(messageStorage *msgStorage, const char *multicastAddress, const char *bindAddress) { struct addrinfo *multicastAddr = NULL, *bindAddr = NULL, *interfaceAddr = NULL; listenerThreadParams *parameter = NULL; const DWORD receiveTimeout = 500; const UINT reuseAddr = 1; HANDLE hThread; SOCKET s = 0; /* Resolve the multicast address */ multicastAddr = resolve_address(multicastAddress, SEND_PORT, AF_UNSPEC, SOCK_DGRAM, IPPROTO_UDP); if (multicastAddr == NULL) goto cleanup; /* Resolve the binding address */ bindAddr = resolve_address(bindAddress, SEND_PORT, multicastAddr->ai_family, multicastAddr->ai_socktype, multicastAddr->ai_protocol); if (bindAddr == NULL) goto cleanup; /* Resolve the multicast interface */ interfaceAddr = resolve_address(bindAddress, "0", multicastAddr->ai_family, multicastAddr->ai_socktype, multicastAddr->ai_protocol); if (interfaceAddr == NULL) goto cleanup; /* Create the socket */ s = socket(multicastAddr->ai_family, multicastAddr->ai_socktype, multicastAddr->ai_protocol); if (s == INVALID_SOCKET) goto cleanup; /* Ensure the socket can be reused */ if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *)&reuseAddr, sizeof(reuseAddr)) == SOCKET_ERROR) goto cleanup; /* Bind the socket to the local interface so we can receive data */ if (bind(s, bindAddr->ai_addr, bindAddr->ai_addrlen) == SOCKET_ERROR) goto cleanup; /* Join the multicast group */ if (join_multicast_group(s, multicastAddr, interfaceAddr) == SOCKET_ERROR) goto cleanup; /* Set the outgoing interface */ if (set_send_interface(s, interfaceAddr) == SOCKET_ERROR) goto cleanup; /* For IPv6, ensure the scope ID is zero */ if (multicastAddr->ai_family == AF_INET6) ((SOCKADDR_IN6 *)multicastAddr->ai_addr)->sin6_scope_id = 0; /* Set a 500ms receive timeout */ if (setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, (const char *)&receiveTimeout, sizeof(receiveTimeout)) == SOCKET_ERROR) goto cleanup; /* Allocate memory for thread parameters */ parameter = heap_alloc(sizeof(listenerThreadParams)); parameter->msgStorage = msgStorage; parameter->listeningSocket = s; hThread = CreateThread(NULL, 0, listening_thread, parameter, 0, NULL); if (hThread == NULL) goto cleanup; msgStorage->threadHandles[msgStorage->numThreadHandles] = hThread; msgStorage->numThreadHandles++; goto cleanup_addresses; cleanup: closesocket(s); heap_free(parameter); cleanup_addresses: freeaddrinfo(multicastAddr); freeaddrinfo(bindAddr); freeaddrinfo(interfaceAddr); }
int main(int argc, char *argv[]) { int i = 0; int size_max_pow = 24; int ret_val; struct report_options report; struct pingpong_context ctx; struct pingpong_dest *my_dest = NULL; struct pingpong_dest *rem_dest = NULL; struct mcast_parameters mcg_params; struct ibv_device *ib_dev = NULL; struct perftest_parameters user_param; struct perftest_comm user_comm; /* init default values to user's parameters */ memset(&ctx, 0, sizeof(struct pingpong_context)); memset(&user_param, 0, sizeof(struct perftest_parameters)); memset(&user_comm , 0, sizeof(struct perftest_comm)); memset(&mcg_params, 0, sizeof(struct mcast_parameters)); user_param.verb = SEND; user_param.tst = LAT; strncpy(user_param.version, VERSION, sizeof(user_param.version)); user_param.r_flag = &report; // Configure the parameters values according to user arguments or defalut values. ret_val = parser(&user_param,argv,argc); if (ret_val) { if (ret_val != VERSION_EXIT && ret_val != HELP_EXIT) fprintf(stderr," Parser function exited with Error\n"); return 1; } if(user_param.use_xrc || user_param.connection_type == DC) { user_param.num_of_qps *= 2; } //Checking that the user did not run with RawEth. for this we have raw_etherent_bw test. if (user_param.connection_type == RawEth) { fprintf(stderr," This test cannot run Raw Ethernet QPs (you have chosen RawEth as connection type\n"); return FAILURE; } // Finding the IB device selected (or defalut if no selected). ib_dev = ctx_find_dev(user_param.ib_devname); if (!ib_dev) { fprintf(stderr," Unable to find the Infiniband/RoCE device\n"); return 1; } if (user_param.use_mcg) GET_STRING(mcg_params.ib_devname,ibv_get_device_name(ib_dev)); // Getting the relevant context from the device ctx.context = ibv_open_device(ib_dev); if (!ctx.context) { fprintf(stderr, " Couldn't get context for the device\n"); return 1; } // See if MTU and link type are valid and supported. if (check_link(ctx.context,&user_param)) { fprintf(stderr, " Couldn't get context for the device\n"); return FAILURE; } // copy the relevant user parameters to the comm struct + creating rdma_cm resources. if (create_comm_struct(&user_comm,&user_param)) { fprintf(stderr," Unable to create RDMA_CM resources\n"); return 1; } if (user_param.output == FULL_VERBOSITY && user_param.machine == SERVER) { printf("\n************************************\n"); printf("* Waiting for client to connect... *\n"); printf("************************************\n"); } // Initialize the connection and print the local data. if (establish_connection(&user_comm)) { fprintf(stderr," Unable to init the socket connection\n"); return FAILURE; } exchange_versions(&user_comm, &user_param); check_sys_data(&user_comm, &user_param); // See if MTU and link type are valid and supported. if (check_mtu(ctx.context,&user_param, &user_comm)) { fprintf(stderr, " Couldn't get context for the device\n"); return FAILURE; } // Print basic test information. ctx_print_test_info(&user_param); ALLOCATE(my_dest , struct pingpong_dest , user_param.num_of_qps); memset(my_dest, 0, sizeof(struct pingpong_dest)*user_param.num_of_qps); ALLOCATE(rem_dest , struct pingpong_dest , user_param.num_of_qps); memset(rem_dest, 0, sizeof(struct pingpong_dest)*user_param.num_of_qps); // Allocating arrays needed for the test. alloc_ctx(&ctx,&user_param); // Create (if nessacery) the rdma_cm ids and channel. if (user_param.work_rdma_cm == ON) { if (user_param.machine == CLIENT) { if (retry_rdma_connect(&ctx,&user_param)) { fprintf(stderr,"Unable to perform rdma_client function\n"); return FAILURE; } } else { if (create_rdma_resources(&ctx,&user_param)) { fprintf(stderr," Unable to create the rdma_resources\n"); return FAILURE; } if (rdma_server_connect(&ctx,&user_param)) { fprintf(stderr,"Unable to perform rdma_client function\n"); return FAILURE; } } } else { // create all the basic IB resources (data buffer, PD, MR, CQ and events channel) if (ctx_init(&ctx,&user_param)) { fprintf(stderr, " Couldn't create IB resources\n"); return FAILURE; } } // Set up the Connection. if (send_set_up_connection(&ctx,&user_param,my_dest,&mcg_params,&user_comm)) { fprintf(stderr," Unable to set up socket connection\n"); return 1; } for (i=0; i < user_param.num_of_qps; i++) ctx_print_pingpong_data(&my_dest[i],&user_comm); user_comm.rdma_params->side = REMOTE; for (i=0; i < user_param.num_of_qps; i++) { // shaking hands and gather the other side info. if (ctx_hand_shake(&user_comm,&my_dest[i],&rem_dest[i])) { fprintf(stderr,"Failed to exchange data between server and clients\n"); return 1; } ctx_print_pingpong_data(&rem_dest[i],&user_comm); } if (user_param.work_rdma_cm == OFF) { if (ctx_check_gid_compatibility(&my_dest[0], &rem_dest[0])) { fprintf(stderr,"\n Found Incompatibility issue with GID types.\n"); fprintf(stderr," Please Try to use a different IP version.\n\n"); return 1; } } if (user_param.use_mcg) { memcpy(mcg_params.base_mgid.raw,mcg_params.mgid.raw,16); memcpy(mcg_params.mgid.raw,rem_dest[0].gid.raw,16); mcg_params.base_mlid = mcg_params.mlid; mcg_params.is_2nd_mgid_used = ON; if (!strcmp(link_layer_str(user_param.link_type),"IB")) { // Request for Mcast group create registery in SM. if (join_multicast_group(SUBN_ADM_METHOD_SET,&mcg_params)) { fprintf(stderr," Failed to Join Mcast request\n"); return 1; } } /* * The next stall in code (50 ms sleep) is a work around for fixing the * the bug this test had in Multicast for the past 1 year. * It appears, that when a switch involved, it takes ~ 10 ms for the join * request to propogate on the IB fabric, thus we need to wait for it. * what happened before this fix was reaching the post_send * code segment in about 350 ns from here, and the switch(es) dropped * the packet because join request wasn't finished. */ usleep(50000); } if (user_param.work_rdma_cm == OFF) { // Prepare IB resources for rtr/rts. if (ctx_connect(&ctx,rem_dest,&user_param,my_dest)) { fprintf(stderr," Unable to Connect the HCA's through the link\n"); return 1; } } // shaking hands and gather the other side info. if (ctx_hand_shake(&user_comm,&my_dest[0],&rem_dest[0])) { fprintf(stderr,"Failed to exchange data between server and clients\n"); return 1; } if (user_param.use_event) { if (ibv_req_notify_cq(ctx.send_cq, 0)) { fprintf(stderr, "Couldn't request RCQ notification\n"); return 1; } if (ibv_req_notify_cq(ctx.recv_cq, 0)) { fprintf(stderr, "Couldn't request RCQ notification\n"); return 1; } } if (user_param.output == FULL_VERBOSITY) { printf(RESULT_LINE); printf("%s",(user_param.test_type == ITERATIONS) ? RESULT_FMT_LAT : RESULT_FMT_LAT_DUR); printf((user_param.cpu_util_data.enable ? RESULT_EXT_CPU_UTIL : RESULT_EXT)); } ctx_set_send_wqes(&ctx,&user_param,rem_dest); if (user_param.test_method == RUN_ALL) { if (user_param.connection_type == UD) size_max_pow = (int)UD_MSG_2_EXP(MTU_SIZE(user_param.curr_mtu)) + 1; for (i = 1; i < size_max_pow ; ++i) { user_param.size = (uint64_t)1 << i; // Post recevie recv_wqes fo current message size if (ctx_set_recv_wqes(&ctx,&user_param)) { fprintf(stderr," Failed to post receive recv_wqes\n"); return 1; } // Sync between the client and server so the client won't send packets // Before the server has posted his receive wqes (in UC/UD it will result in a deadlock). if (ctx_hand_shake(&user_comm,&my_dest[0],&rem_dest[0])) { fprintf(stderr,"Failed to exchange data between server and clients\n"); return 1; } if(run_iter_lat_send(&ctx, &user_param)) return 17; user_param.test_type == ITERATIONS ? print_report_lat(&user_param) : print_report_lat_duration(&user_param); } } else { // Post recevie recv_wqes fo current message size if (ctx_set_recv_wqes(&ctx,&user_param)) { fprintf(stderr," Failed to post receive recv_wqes\n"); return 1; } // Sync between the client and server so the client won't send packets // Before the server has posted his receive wqes (in UC/UD it will result in a deadlock). if (ctx_hand_shake(&user_comm,my_dest,rem_dest)) { fprintf(stderr,"Failed to exchange data between server and clients\n"); return 1; } if(run_iter_lat_send(&ctx, &user_param)) return 17; user_param.test_type == ITERATIONS ? print_report_lat(&user_param) : print_report_lat_duration(&user_param); } if (user_param.output == FULL_VERBOSITY) { printf(RESULT_LINE); } if (ctx_close_connection(&user_comm,my_dest,rem_dest)) { fprintf(stderr,"Failed to close connection between server and client\n"); fprintf(stderr," Trying to close this side resources\n"); } return send_destroy_ctx(&ctx,&user_param,&mcg_params); }