static void * _server_socket_accept (void *arg) { thread_list_t _entry = (thread_list_t)arg; dconfig_data_t *config = _entry->config; int fd = _entry->fd; thread_list_t thread_list_head = NULL; int rc = 0; free (_entry); _entry = NULL; if (pipe (s_fd_accept_terminate) == -1) { common_log (LOG_FATAL, "pipe failed"); } while (rc != -1) { fd_set fdset; FD_ZERO (&fdset); FD_SET (s_fd_accept_terminate[0], &fdset); FD_SET (fd, &fdset); rc = select (FD_SETSIZE, &fdset, NULL, NULL, NULL); if (rc != -1 && rc != 0) { if (FD_ISSET (s_fd_accept_terminate[0], &fdset)) { accept_command_t cmd; if ( (rc = read ( s_fd_accept_terminate[0], &cmd, sizeof (cmd)) ) == sizeof (cmd) ) { if (cmd == ACCEPT_THREAD_STOP) { rc = -1; } else if (cmd == ACCEPT_THREAD_CLEAN) { thread_list_t entry = thread_list_head; thread_list_t prev = NULL; common_log (LOG_DEBUG, "Cleaning up closed thread"); while (entry != NULL) { if (entry->stopped) { thread_list_t temp = entry; common_log (LOG_DEBUG, "Cleaning up closed thread1"); pthread_join (entry->thread, NULL); close (entry->fd); if (prev == NULL) { thread_list_head = entry->next; } else { prev->next = entry->next; } entry = entry->next; free (temp); } else { prev = entry; entry = entry->next; } } } } } else if (FD_ISSET (fd, &fdset)) { struct sockaddr_un addr; socklen_t addrlen = sizeof (addr); int fd2; if ((rc = fd2 = accept (fd, (struct sockaddr *)&addr, &addrlen)) != -1) { thread_list_t entry = NULL; common_log (LOG_DEBUG, "Accepted new socket connection"); if ((entry = (thread_list_t)malloc (sizeof (struct thread_list_s))) == NULL) { common_log (LOG_FATAL, "malloc failed"); } memset (entry, 0, sizeof (struct thread_list_s)); entry->next = thread_list_head; entry->fd = fd2; entry->config = config; thread_list_head = entry; if ( pthread_create ( &entry->thread, NULL, _server_socket_command_handler, entry ) ) { common_log (LOG_FATAL, "pthread failed"); } } } } } common_log (LOG_DEBUG, "Cleaning up threads"); while (thread_list_head != NULL) { thread_list_t entry = thread_list_head; thread_list_head = thread_list_head->next; common_log (LOG_DEBUG, "Cleaning up thread1"); close (entry->fd); pthread_join (entry->thread, NULL); free (entry); } return NULL; }
/* * chitcpd_server_thread_func - Server thread function * * This function will spawn a handler thread (see handler.c) for each * new connection on the UNIX socket. * * args: arguments (a serverinfo_t variable in server_threads_args_t) * * Returns: Nothing. * */ void* chitcpd_server_thread_func(void *args) { socklen_t sunSize; handler_thread_t *handler_thread; server_thread_args_t *sta; serverinfo_t *si; handler_thread_args_t *ha; list_t handler_thread_list; int rc; ChitcpdMsg *req; ChitcpdInitArgs *init_args; ChitcpdConnectionType conntype; ChitcpdMsg resp_outer = CHITCPD_MSG__INIT; ChitcpdResp resp_inner = CHITCPD_RESP__INIT; resp_outer.code = CHITCPD_MSG_CODE__RESP; resp_outer.resp = &resp_inner; /* For naming the handler threads we create (for debugging/logging) */ int next_thread_id = 0; pthread_setname_np(pthread_self(), "unix_server"); /* Unpack arguments */ sta = (server_thread_args_t *) args; si = sta->si; list_init(&handler_thread_list); struct sockaddr_un client_addr; /* Accept connections on the UNIX socket */ for(;;) { socket_t client_socket; /* Accept a connection */ sunSize = sizeof(client_addr); if ((client_socket = accept(si->server_socket, (struct sockaddr *)&client_addr, &sunSize)) == -1) { /* If accept() returns in the CHITCPD_STATE_STOPPING, we don't * care what the error is. We just break out of the loop and * initiate an orderly shutdown. */ if(si->state == CHITCPD_STATE_STOPPING) break; /* If this particular connection fails, no need to kill the entire thread. */ perror("Could not accept() connection on UNIX socket"); continue; } /* We receive a single message, which has to be an INIT message */ rc = chitcpd_recv_msg(client_socket, &req); if (rc < 0) { if(si->state == CHITCPD_STATE_STOPPING) break; else { chilog(ERROR, "Error when receiving lead message through UNIX socket"); shutdown(client_socket, SHUT_RDWR); continue; } } if(req->code != CHITCPD_MSG_CODE__INIT) { chilog(ERROR, "Expected INIT message, instead got message code %i", req->code); chitcpd_msg__free_unpacked(req, NULL); shutdown(client_socket, SHUT_RDWR); continue; } /* Unpack INIT request */ assert(req->init_args != NULL); init_args = req->init_args; conntype = init_args->connection_type; /* There are two types of connections: command connections and debug * connections. * * When a command connection is created, a new thread is created to * handle the incoming chisocket commands on that connection (socket, * send, recv, etc.) * * When a debug connection is created, no additional thread is necessary. * The connection on the UNIX socket is simply "handed off" to a * debug monitor that will be associated with a chiTCP socket. * That UNIX socket is then used to send back debug messages. */ if (conntype == CHITCPD_CONNECTION_TYPE__COMMAND_CONNECTION) { /* Create arguments for handler thread */ ha = malloc(sizeof(handler_thread_args_t)); ha->si = si; handler_thread = malloc(sizeof(handler_thread_t)); handler_thread->handler_socket = client_socket; pthread_mutex_init(&handler_thread->handler_lock, NULL); /* Create handler thread to handle this connection */ ha->client_socket = handler_thread->handler_socket; ha->handler_lock = &handler_thread->handler_lock; snprintf(ha->thread_name, 16, "handler-%d", next_thread_id++); if (pthread_create(&handler_thread->thread, NULL, chitcpd_handler_dispatch, ha) != 0) { perror("Could not create a worker thread"); resp_outer.resp->ret = CHITCP_ETHREAD; resp_outer.resp->error_code = 0; rc = chitcpd_send_msg(client_socket, &resp_outer); free(ha); close(ha->client_socket); close(si->server_socket); // TODO: Perform an orderly shutdown instead of exiting pthread_exit(NULL); } resp_outer.resp->ret = CHITCP_OK; resp_outer.resp->error_code = 0; rc = chitcpd_send_msg(client_socket, &resp_outer); list_append(&handler_thread_list, handler_thread); } else if(conntype == CHITCPD_CONNECTION_TYPE__DEBUG_CONNECTION) { int debug_sockfd, debug_event_flags; ChitcpdDebugArgs *debug_args; /* Unpack debug parameter */ assert(init_args->debug != NULL); debug_args = init_args->debug; debug_sockfd = debug_args->sockfd; debug_event_flags = debug_args->event_flags; rc = chitcpd_init_debug_connection(si, debug_sockfd, debug_event_flags, client_socket); if(rc == CHITCP_OK) { resp_outer.resp->ret = CHITCP_OK; resp_outer.resp->error_code = 0; rc = chitcpd_send_msg(client_socket, &resp_outer); } else { chilog(ERROR, "Error when creating debug connection for socket %i", debug_sockfd); resp_outer.resp->ret = CHITCP_EINIT; resp_outer.resp->error_code = rc; rc = chitcpd_send_msg(client_socket, &resp_outer); shutdown(client_socket, SHUT_RDWR); } } else { chilog(ERROR, "Received INIT message with unknown connection type %i", conntype); resp_outer.resp->ret = CHITCP_EINVAL; resp_outer.resp->error_code = 0; rc = chitcpd_send_msg(client_socket, &resp_outer); shutdown(client_socket, SHUT_RDWR); } chitcpd_msg__free_unpacked(req, NULL); } while(!list_empty(&handler_thread_list)) { /* For each handler thread we spawned, we close its socket, which * will force the thread to exit (and we then join it). * * Note that closing a handler thread will also free up all chiTCP * sockets created through that thread, and will also terminate * all associated TCP threads. * * TODO: We should simply detach those threads, since they can exit * before an orderly shutdown and would be left lingering until * we call join here. */ handler_thread_t *ht = list_fetch(&handler_thread_list); /* We don't want to shutdown the handler's socket if an operation is * in progress. The handler thread may have read a command, but * not sent a response back yet */ pthread_mutex_lock(&ht->handler_lock); shutdown(ht->handler_socket, SHUT_RDWR); pthread_mutex_unlock(&ht->handler_lock); pthread_join(ht->thread, NULL); pthread_mutex_destroy(&ht->handler_lock); free(ht); } list_destroy(&handler_thread_list); pthread_exit(NULL); }
// This test is disable because it is for generating test data. TEST(libbacktrace, DISABLED_generate_offline_testdata) { // Create a thread to generate the needed stack and registers information. const size_t stack_size = 16 * 1024; void* stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); ASSERT_NE(MAP_FAILED, stack); uintptr_t stack_addr = reinterpret_cast<uintptr_t>(stack); pthread_attr_t attr; ASSERT_EQ(0, pthread_attr_init(&attr)); ASSERT_EQ(0, pthread_attr_setstack(&attr, reinterpret_cast<void*>(stack), stack_size)); pthread_t thread; OfflineThreadArg arg; arg.exit_flag = 0; ASSERT_EQ(0, pthread_create(&thread, &attr, OfflineThreadFunc, &arg)); // Wait for the offline thread to generate the stack and unw_context information. sleep(1); // Copy the stack information. std::vector<uint8_t> stack_data(reinterpret_cast<uint8_t*>(stack), reinterpret_cast<uint8_t*>(stack) + stack_size); arg.exit_flag = 1; ASSERT_EQ(0, pthread_join(thread, nullptr)); ASSERT_EQ(0, munmap(stack, stack_size)); std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid())); ASSERT_TRUE(map != nullptr); backtrace_stackinfo_t stack_info; stack_info.start = stack_addr; stack_info.end = stack_addr + stack_size; stack_info.data = stack_data.data(); // Generate offline testdata. std::string testdata; // 1. Dump pid, tid testdata += android::base::StringPrintf("pid: %d tid: %d\n", getpid(), arg.tid); // 2. Dump maps for (auto it = map->begin(); it != map->end(); ++it) { testdata += android::base::StringPrintf( "map: start: %" PRIxPTR " end: %" PRIxPTR " offset: %" PRIxPTR " load_bias: %" PRIxPTR " flags: %d name: %s\n", it->start, it->end, it->offset, it->load_bias, it->flags, it->name.c_str()); } // 3. Dump registers testdata += android::base::StringPrintf("registers: %zu ", sizeof(arg.unw_context)); testdata += RawDataToHexString(&arg.unw_context, sizeof(arg.unw_context)); testdata.push_back('\n'); // 4. Dump stack testdata += android::base::StringPrintf( "stack: start: %" PRIx64 " end: %" PRIx64 " size: %zu ", stack_info.start, stack_info.end, stack_data.size()); testdata += RawDataToHexString(stack_data.data(), stack_data.size()); testdata.push_back('\n'); // 5. Dump function symbols std::vector<FunctionSymbol> function_symbols = GetFunctionSymbols(); for (const auto& symbol : function_symbols) { testdata += android::base::StringPrintf( "function: start: %" PRIxPTR " end: %" PRIxPTR" name: %s\n", symbol.start, symbol.end, symbol.name.c_str()); } ASSERT_TRUE(android::base::WriteStringToFile(testdata, "offline_testdata")); }
int main(int argc, char **argv) { int ch; uint32_t i; int rv; unsigned int iter = 0; glob_arg.ifname[0] = '\0'; glob_arg.output_rings = 0; glob_arg.batch = DEF_BATCH; glob_arg.syslog_interval = DEF_SYSLOG_INT; while ( (ch = getopt(argc, argv, "i:p:b:B:s:")) != -1) { switch (ch) { case 'i': D("interface is %s", optarg); if (strlen(optarg) > MAX_IFNAMELEN - 8) { D("ifname too long %s", optarg); return 1; } if (strncmp(optarg, "netmap:", 7) && strncmp(optarg, "vale", 4)) { sprintf(glob_arg.ifname, "netmap:%s", optarg); } else { strcpy(glob_arg.ifname, optarg); } break; case 'p': if (parse_pipes(optarg)) { usage(); return 1; } break; case 'B': glob_arg.extra_bufs = atoi(optarg); D("requested %d extra buffers", glob_arg.extra_bufs); break; case 'b': glob_arg.batch = atoi(optarg); D("batch is %d", glob_arg.batch); break; case 's': glob_arg.syslog_interval = atoi(optarg); D("syslog interval is %d", glob_arg.syslog_interval); break; default: D("bad option %c %s", ch, optarg); usage(); return 1; } } if (glob_arg.ifname[0] == '\0') { D("missing interface name"); usage(); return 1; } /* extract the base name */ char *nscan = strncmp(glob_arg.ifname, "netmap:", 7) ? glob_arg.ifname : glob_arg.ifname + 7; strncpy(glob_arg.base_name, nscan, MAX_IFNAMELEN); for (nscan = glob_arg.base_name; *nscan && !index("-*^{}/@", *nscan); nscan++) ; *nscan = '\0'; if (glob_arg.num_groups == 0) parse_pipes(""); setlogmask(LOG_UPTO(LOG_INFO)); openlog("lb", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1); uint32_t npipes = glob_arg.output_rings; pthread_t stat_thread; ports = calloc(npipes + 1, sizeof(struct port_des)); if (!ports) { D("failed to allocate the stats array"); return 1; } struct port_des *rxport = &ports[npipes]; init_groups(); if (pthread_create(&stat_thread, NULL, print_stats, NULL) == -1) { D("unable to create the stats thread: %s", strerror(errno)); return 1; } /* we need base_req to specify pipes and extra bufs */ struct nmreq base_req; memset(&base_req, 0, sizeof(base_req)); base_req.nr_arg1 = npipes; base_req.nr_arg3 = glob_arg.extra_bufs; rxport->nmd = nm_open(glob_arg.ifname, &base_req, 0, NULL); if (rxport->nmd == NULL) { D("cannot open %s", glob_arg.ifname); return (1); } else { D("successfully opened %s (tx rings: %u)", glob_arg.ifname, rxport->nmd->req.nr_tx_slots); } uint32_t extra_bufs = rxport->nmd->req.nr_arg3; struct overflow_queue *oq = NULL; /* reference ring to access the buffers */ rxport->ring = NETMAP_RXRING(rxport->nmd->nifp, 0); if (!glob_arg.extra_bufs) goto run; D("obtained %d extra buffers", extra_bufs); if (!extra_bufs) goto run; /* one overflow queue for each output pipe, plus one for the * free extra buffers */ oq = calloc(npipes + 1, sizeof(struct overflow_queue)); if (!oq) { D("failed to allocated overflow queues descriptors"); goto run; } freeq = &oq[npipes]; rxport->oq = freeq; freeq->slots = calloc(extra_bufs, sizeof(struct netmap_slot)); if (!freeq->slots) { D("failed to allocate the free list"); } freeq->size = extra_bufs; snprintf(freeq->name, MAX_IFNAMELEN, "free queue"); /* * the list of buffers uses the first uint32_t in each buffer * as the index of the next buffer. */ uint32_t scan; for (scan = rxport->nmd->nifp->ni_bufs_head; scan; scan = *(uint32_t *)NETMAP_BUF(rxport->ring, scan)) { struct netmap_slot s; s.buf_idx = scan; ND("freeq <- %d", s.buf_idx); oq_enq(freeq, &s); } if (freeq->n != extra_bufs) { D("something went wrong: netmap reported %d extra_bufs, but the free list contained %d", extra_bufs, freeq->n); return 1; } rxport->nmd->nifp->ni_bufs_head = 0; run: /* we need to create the persistent vale ports */ if (create_custom_ports(rxport->nmd->req.nr_arg2)) { free_buffers(); return 1; } atexit(delete_custom_ports); atexit(free_buffers); int j, t = 0; for (j = 0; j < glob_arg.num_groups; j++) { struct group_des *g = &groups[j]; int k; for (k = 0; k < g->nports; ++k) { struct port_des *p = &g->ports[k]; char interface[25]; sprintf(interface, "netmap:%s{%d/xT", g->pipename, g->first_id + k); D("opening pipe named %s", interface); p->nmd = nm_open(interface, NULL, 0, rxport->nmd); if (p->nmd == NULL) { D("cannot open %s", interface); return (1); } else { D("successfully opened pipe #%d %s (tx slots: %d)", k + 1, interface, p->nmd->req.nr_tx_slots); p->ring = NETMAP_TXRING(p->nmd->nifp, 0); } D("zerocopy %s", (rxport->nmd->mem == p->nmd->mem) ? "enabled" : "disabled"); if (extra_bufs) { struct overflow_queue *q = &oq[t + k]; q->slots = calloc(extra_bufs, sizeof(struct netmap_slot)); if (!q->slots) { D("failed to allocate overflow queue for pipe %d", k); /* make all overflow queue management fail */ extra_bufs = 0; } q->size = extra_bufs; snprintf(q->name, MAX_IFNAMELEN, "oq %s{%d", g->pipename, k); p->oq = q; } } t += g->nports; } if (glob_arg.extra_bufs && !extra_bufs) { if (oq) { for (i = 0; i < npipes + 1; i++) { free(oq[i].slots); oq[i].slots = NULL; } free(oq); oq = NULL; } D("*** overflow queues disabled ***"); } sleep(2); struct pollfd pollfd[npipes + 1]; memset(&pollfd, 0, sizeof(pollfd)); signal(SIGINT, sigint_h); while (!do_abort) { u_int polli = 0; iter++; for (i = 0; i < npipes; ++i) { struct netmap_ring *ring = ports[i].ring; if (nm_ring_next(ring, ring->tail) == ring->cur) { /* no need to poll, there are no packets pending */ continue; } pollfd[polli].fd = ports[i].nmd->fd; pollfd[polli].events = POLLOUT; pollfd[polli].revents = 0; ++polli; } pollfd[polli].fd = rxport->nmd->fd; pollfd[polli].events = POLLIN; pollfd[polli].revents = 0; ++polli; //RD(5, "polling %d file descriptors", polli+1); rv = poll(pollfd, polli, 10); if (rv <= 0) { if (rv < 0 && errno != EAGAIN && errno != EINTR) RD(1, "poll error %s", strerror(errno)); continue; } if (oq) { /* try to push packets from the overflow queues * to the corresponding pipes */ for (i = 0; i < npipes; i++) { struct port_des *p = &ports[i]; struct overflow_queue *q = p->oq; struct group_des *g = p->group; uint32_t j, lim; struct netmap_ring *ring; struct netmap_slot *slot; if (oq_empty(q)) continue; ring = p->ring; lim = nm_ring_space(ring); if (!lim) continue; if (q->n < lim) lim = q->n; for (j = 0; j < lim; j++) { struct netmap_slot s = oq_deq(q), tmp; tmp.ptr = 0; slot = &ring->slot[ring->cur]; if (slot->ptr && !g->last) { tmp.buf_idx = forward_packet(g + 1, slot); /* the forwarding may have removed packets * from the current queue */ if (q->n < lim) lim = q->n; } else { tmp.buf_idx = slot->buf_idx; } oq_enq(freeq, &tmp); *slot = s; slot->flags |= NS_BUF_CHANGED; ring->cur = nm_ring_next(ring, ring->cur); } ring->head = ring->cur; forwarded += lim; p->ctr.pkts += lim; } } int batch = 0; for (i = rxport->nmd->first_rx_ring; i <= rxport->nmd->last_rx_ring; i++) { struct netmap_ring *rxring = NETMAP_RXRING(rxport->nmd->nifp, i); //D("prepare to scan rings"); int next_cur = rxring->cur; struct netmap_slot *next_slot = &rxring->slot[next_cur]; const char *next_buf = NETMAP_BUF(rxring, next_slot->buf_idx); while (!nm_ring_empty(rxring)) { struct netmap_slot *rs = next_slot; struct group_des *g = &groups[0]; // CHOOSE THE CORRECT OUTPUT PIPE uint32_t hash = pkt_hdr_hash((const unsigned char *)next_buf, 4, 'B'); if (hash == 0) { non_ip++; // XXX ?? } rs->ptr = hash | (1UL << 32); // prefetch the buffer for the next round next_cur = nm_ring_next(rxring, next_cur); next_slot = &rxring->slot[next_cur]; next_buf = NETMAP_BUF(rxring, next_slot->buf_idx); __builtin_prefetch(next_buf); // 'B' is just a hashing seed rs->buf_idx = forward_packet(g, rs); rs->flags |= NS_BUF_CHANGED; rxring->head = rxring->cur = next_cur; batch++; if (unlikely(batch >= glob_arg.batch)) { ioctl(rxport->nmd->fd, NIOCRXSYNC, NULL); batch = 0; } ND(1, "Forwarded Packets: %"PRIu64" Dropped packets: %"PRIu64" Percent: %.2f", forwarded, dropped, ((float)dropped / (float)forwarded * 100)); } } } pthread_join(stat_thread, NULL); printf("%"PRIu64" packets forwarded. %"PRIu64" packets dropped. Total %"PRIu64"\n", forwarded, dropped, forwarded + dropped); return 0; }
int main() { int failed = 0; int i; int first, last; pthread_t t[NUMTHREADS + 1]; assert((t[0] = pthread_self()).p != NULL); assert(cvthing.notbusy == PTHREAD_COND_INITIALIZER); assert(cvthing.lock == PTHREAD_MUTEX_INITIALIZER); (void) pthread_win32_getabstime_np(&abstime, &reltime); assert((t[0] = pthread_self()).p != NULL); awoken = 0; for (first = 1, last = NUMTHREADS / 2; first < NUMTHREADS; first = last + 1, last = NUMTHREADS) { assert(pthread_mutex_lock(&start_flag) == 0); for (i = first; i <= last; i++) { threadbag[i].started = 0; threadbag[i].threadnum = i; assert(pthread_create(&t[i], NULL, mythread, (void *) &threadbag[i]) == 0); } /* * Code to control or manipulate child threads should probably go here. */ cvthing.shared = 0; assert(pthread_mutex_unlock(&start_flag) == 0); /* * Give threads time to start. */ Sleep(100); assert(pthread_mutex_lock(&cvthing.lock) == 0); cvthing.shared++; assert(pthread_mutex_unlock(&cvthing.lock) == 0); assert(pthread_cond_broadcast(&cvthing.notbusy) == 0); /* * Give threads time to complete. */ for (i = first; i <= last; i++) { assert(pthread_join(t[i], NULL) == 0); } assert(awoken == (i - 1)); } /* * Standard check that all threads started. */ for (i = 1; i <= NUMTHREADS; i++) { failed = !threadbag[i].started; if (failed) { fprintf(stderr, "Thread %d: started %d\n", i, threadbag[i].started); } } /* * Cleanup the CV. */ assert(pthread_mutex_destroy(&cvthing.lock) == 0); assert(cvthing.lock == NULL); assert(pthread_cond_destroy(&cvthing.notbusy) == 0); assert(cvthing.notbusy == NULL); assert(!failed); /* * Check any results here. */ assert(awoken == NUMTHREADS); /* * Success. */ return 0; }
/* MSVM2 training function (Frank-Wolfe algorithm) Solves the dual problem wrt alpha: min 1/2 alpha' H_tilde alpha - (1/Q-1) sum (alpha) s.t. alpha_ik >= 0, for all i,k, such that 1 <= i <= m, 1 <= k != y_i <= Q sum_i (alpha_ik - average_alpha_k) = 0, for all k, 1 <= k <= Q where H_tilde_ik,jl = (delta_k,l - 1/Q) ( k(x_i,x_j) + (1/2C)delta_i,j ) */ long MSVM_2fw_train(struct Model *model, struct Data *training_set, long chunk_size, const double accuracy, int cache_memory, const int nprocs, char *alpha0_file, char *model_tmp_file, char *log_file) { long return_status = -1; FILE *fp; int t; pthread_t *threads = (pthread_t *)malloc(sizeof(pthread_t) * nprocs); // threads id void *status; // for pthread_join int rc; // return code of pthread functions pthread_t thread_monitor; enum AlphaInitType alphaiszero; if(training_set != NULL) { // Initialize model for this particular training set model_load_data(model, training_set); } if(model->nb_data > 2147483647) { printf("Cannot compute table chunk: number of data above random number limit\n"); exit(0); } // Check if a kernel_par has been set if(model->nature_kernel != LINEAR && model->kernel_par == NULL) set_default_kernel_par(model); // Log file if (log_file != NULL) { printf("\nLog training info in %s ...\n", log_file); fp = fopen(log_file,"a"); } else fp = NULL; // Initialize alpha and b if(training_set != NULL) // otherwise resume training init_alpha_b(model, alpha0_file); // use init_alpha_b(model, NULL); to initialize all alpha to 0 if(training_set == NULL || alpha0_file != NULL) alphaiszero = ALPHA_NOT_ZERO; else alphaiszero = ALPHA_IS_ZERO; EVAL = 0; // triggered by signal handler to call eval STOP = 0; // triggered by user to stop training // Prepare monitoring of model struct MonitorData monitor_data; monitor_data.model_tmp_file = model_tmp_file; monitor_data.period = MONITOR_PERIOD; monitor_data.model = model; pthread_create(&thread_monitor, NULL, MSVM_monitor_model_thread, (void *) &monitor_data); // Allocate memory for shared ressources double **gradient = matrix(model->nb_data, model->Q); double **H_alpha = matrix(model->nb_data, model->Q); double **H_tilde_alpha = matrix(model->nb_data, model->Q); double best_primal_upper_bound = HUGE_VAL; int *activeset = (int *)calloc(model->nb_data+1, sizeof(int)); double *lp_rhs = (double*)calloc(model->Q, sizeof(double)); // including the kernel cache: struct KernelCache kc; unsigned long cache_size = (unsigned long)cache_memory * MBYTES; kernel_initialize_cache(cache_size, &kc, model); printf("Initialization... ");fflush(stdout); // Initialize gradient MSVM2fw_init_gradient(alphaiszero, gradient, H_alpha,H_tilde_alpha, model); printf("Done.\n"); // Initialize display if(accuracy>0) print_training_info(0,model); else { printf("Training...");fflush(stdout); } model->iter = 1; // number of iterations (over all threads) // Prepare data for computing threads long nb_SV = 0; struct ThreadData thread_data; thread_data.nprocs = nprocs; thread_data.model = model; thread_data.kernelcache = &kc; thread_data.chunk_size = chunk_size; thread_data.accuracy = accuracy; thread_data.gradient = gradient; thread_data.H_alpha = H_alpha; thread_data.H_tilde_alpha = H_tilde_alpha; thread_data.best_primal_upper_bound = &best_primal_upper_bound; thread_data.activeset = activeset; thread_data.nb_SV = &nb_SV; thread_data.lp_rhs = lp_rhs; thread_data.logfile_ptr = fp; // Launch computing threads for(t=0;t<nprocs;t++) { pthread_mutex_lock(&thread_data_mutex); // Wait for thread_data to be read by previous thread thread_data.thread_id = t; rc = pthread_create(&threads[t], NULL, MSVM2fw_train_thread, (void *) &thread_data); } // Wait for threads to terminate for(t=0;t<nprocs;t++) { rc = pthread_join(threads[t],&status); // Check if optimum has been reached by this thread if((long long)status == 0) return_status=0; } // Cancel monitor thread rc = pthread_cancel(thread_monitor); rc = pthread_join(thread_monitor,&status); if(log_file != NULL) fclose(fp); free(gradient[1]);free(gradient); free(H_alpha[1]);free(H_alpha); free(H_tilde_alpha[1]);free(H_tilde_alpha); free(activeset); free(lp_rhs); kernel_free_cache(&kc); free(threads); if(model->iter == MSVM_TRAIN_MAXIT) return_status = 1; // max iterations. else if(return_status != 0) return_status = model->iter; // interrupted by user return return_status; }
void aguardarTerminoDeThreads(pthread_t thread[]){ int i; for(i = 0; i < NTHREADS; i++){ pthread_join(thread[i], NULL); } }
int main(int argc, char *argv[]) { char line,c; int i,j,e1, e2; edge e; createList(&edgeList, sizeof(edge), NULL); pthread_t thread_satcnf, thread_approx_1, thread_approx_2; loop:while(scanf(" %c", &line) != EOF) { switch(line) { case 'V': scanf(" %d", &numNodes); if(numNodes <= 0) { fprintf(stderr,"Error: Invalid number of vertices: %d!\n", numNodes); goto loop; } if(edgeList.length != 0) { destroy(&edgeList); } break; case 'E': scanf(" %c", &c); while(c != '}') { if(!scanf(" <%d,%d>", &e1,&e2)) goto loop; if( (e1 >= numNodes || e1 < 0) || (e2 >= numNodes || e2 < 0)) { fprintf(stderr,"Error: Invalid edge <%d,%d>!\n", e1, e2); destroy(&edgeList); goto loop; } e.p1 = e1; e.p2 = e2; append(&edgeList,&e); scanf("%c", &c); //scan ',' or '}' } thread_function_args thread_args[N]; /*initialize parameters for each thread function*/ for(i=0; i<N; i++) { thread_args[i].numNodes = numNodes; thread_args[i].edgeList = &edgeList; thread_args[i].vc = NULL; } int iter = 1; #ifdef DEBUG iter = 10; double ratio1,ratio2; double *runTimeSatCnf = (double *)malloc(iter*sizeof(double)); double *runTimeApprox1 = (double *)malloc(iter*sizeof(double)); double *runTimeApprox2 = (double *)malloc(iter*sizeof(double)); #endif for(j=0; j<iter; j++) { pthread_create(&thread_satcnf, NULL, &sat_cnf, &thread_args[0]); pthread_create(&thread_approx_1, NULL, &approx1, &thread_args[1]); pthread_create(&thread_approx_2, NULL, &approx2, &thread_args[2]); pthread_join(thread_satcnf, NULL); pthread_join(thread_approx_1, NULL); pthread_join(thread_approx_2, NULL); #ifdef DEBUG runTimeSatCnf[j] = thread_args[0].cputime; runTimeApprox1[j] = thread_args[1].cputime; runTimeApprox2[j] = thread_args[2].cputime; #endif } #ifdef DEBUG ratio1 = thread_args[1].vcSize / (double) thread_args[0].vcSize; ratio2 = thread_args[2].vcSize / (double) thread_args[0].vcSize; for(j=0; j<iter; j++) { //printf("%f,%f\n", runTimeApprox1[j],runTimeApprox2[j]); printf("%f,%f,%f\n", runTimeSatCnf[j],runTimeApprox1[j],runTimeApprox2[j]); fflush(stdout); } printf("%f,%f\n", ratio1,ratio2); printf("%f\n", ratio); fflush(stdout); for(i=0; i<N; i++) { free(thread_args[i].vc); } free(runTimeSatCnf); free(runTimeApprox1); free(runTimeApprox2); #else const char *name[N] = {"CNF-SAT-VC", "APPROX-VC-1", "APPROX-VC-2"}; for(i=0; i<N; i++) { printVC(thread_args[i].vcSize, thread_args[i].vc, name[i]); free(thread_args[i].vc); } #endif break; } } destroy(&edgeList); }
static void* input_thread(void *arg) { lsb_heka_message *msg = NULL; lsb_heka_message im, *pim = NULL; lsb_init_heka_message(&im, 8); lsb_heka_message am, *pam = NULL; lsb_init_heka_message(&am, 8); hs_output_plugin *p = (hs_output_plugin *)arg; hs_log(NULL, p->name, 6, "starting"); size_t discarded_bytes; size_t bytes_read[2] = { 0 }; int ret = 0; lsb_logger logger = {.context = NULL, .cb = hs_log}; #ifdef HINDSIGHT_CLI bool input_stop = false, analysis_stop = false; while (!(p->stop && input_stop && analysis_stop)) { #else while (!p->stop) { #endif if (p->input.fh && !pim) { if (lsb_find_heka_message(&im, &p->input.ib, true, &discarded_bytes, &logger)) { pim = &im; } else { bytes_read[0] = hs_read_file(&p->input); } if (!bytes_read[0]) { #ifdef HINDSIGHT_CLI size_t cid = p->input.cp.id; #endif // see if the next file is there yet hs_open_file(&p->input, hs_input_dir, p->input.cp.id + 1); #ifdef HINDSIGHT_CLI if (cid == p->input.cp.id && p->stop) { input_stop = true; } #endif } } else if (!p->input.fh) { // still waiting on the first file hs_open_file(&p->input, hs_input_dir, p->input.cp.id); #ifdef HINDSIGHT_CLI if (!p->input.fh && p->stop) { input_stop = true; } #endif } if (p->analysis.fh && !pam) { if (lsb_find_heka_message(&am, &p->analysis.ib, true, &discarded_bytes, &logger)) { pam = &am; } else { bytes_read[1] = hs_read_file(&p->analysis); } if (!bytes_read[1]) { #ifdef HINDSIGHT_CLI size_t cid = p->analysis.cp.id; #endif // see if the next file is there yet hs_open_file(&p->analysis, hs_analysis_dir, p->analysis.cp.id + 1); #ifdef HINDSIGHT_CLI if (cid == p->analysis.cp.id && p->stop) { analysis_stop = true; } #endif } } else if (!p->analysis.fh) { // still waiting on the first file hs_open_file(&p->analysis, hs_analysis_dir, p->analysis.cp.id); #ifdef HINDSIGHT_CLI if (!p->analysis.fh && p->stop) { analysis_stop = true; } #endif } // if we have one send the oldest first if (pim) { if (pam) { if (pim->timestamp <= pam->timestamp) { msg = pim; } else { msg = pam; } } else { msg = pim; } } else if (pam) { msg = pam; } if (msg) { if (msg == pim) { pim = NULL; p->cur.input.id = p->input.cp.id; p->cur.input.offset = p->input.cp.offset - (p->input.ib.readpos - p->input.ib.scanpos); } else { pam = NULL; p->cur.analysis.id = p->analysis.cp.id; p->cur.analysis.offset = p->analysis.cp.offset - (p->analysis.ib.readpos - p->analysis.ib.scanpos); } ret = output_message(p, msg); if (ret == LSB_HEKA_PM_RETRY) { while (!p->stop && ret == LSB_HEKA_PM_RETRY) { const char *err = lsb_heka_get_error(p->hsb); hs_log(NULL, p->name, 7, "retry message %llu err: %s", p->sequence_id, err); sleep(1); ret = output_message(p, msg); } } if (ret > 0) { break; // fatal error } msg = NULL; } else if (!bytes_read[0] && !bytes_read[1]) { // trigger any pending timer events lsb_clear_heka_message(&im); // create an idle/empty message msg = &im; output_message(p, msg); msg = NULL; sleep(1); } } shutdown_timer_event(p); lsb_free_heka_message(&am); lsb_free_heka_message(&im); // hold the current checkpoints in memory incase we restart it hs_update_input_checkpoint(&p->plugins->cfg->cp_reader, hs_input_dir, p->name, &p->cp.input); hs_update_input_checkpoint(&p->plugins->cfg->cp_reader, hs_analysis_dir, p->name, &p->cp.analysis); if (p->stop) { hs_log(NULL, p->name, 6, "shutting down"); } else { hs_log(NULL, p->name, 6, "detaching received: %d msg: %s", ret, lsb_heka_get_error(p->hsb)); pthread_mutex_lock(&p->plugins->list_lock); hs_output_plugins *plugins = p->plugins; plugins->list[p->list_index] = NULL; if (pthread_detach(p->thread)) { hs_log(NULL, p->name, 3, "thread could not be detached"); } destroy_output_plugin(p); --plugins->list_cnt; pthread_mutex_unlock(&plugins->list_lock); } pthread_exit(NULL); } static void remove_plugin(hs_output_plugins *plugins, int idx) { hs_output_plugin *p = plugins->list[idx]; plugins->list[idx] = NULL; p->stop = true; if (pthread_join(p->thread, NULL)) { hs_log(NULL, p->name, 3, "remove_plugin could not pthread_join"); } destroy_output_plugin(p); --plugins->list_cnt; }
bool Thread::start(classID (threadFunction)(classID), classID parameter){ //kill the previous thread this->kill(); //test if the function is true if(threadFunction){ //WINDOWS 32 #ifdef WIN32 DWORD flag; this->threadID = CreateThread(NULL, // (DWORD)NULL, // edkThreadFunc, // função da thread (void*)this, // parâmetro da thread (DWORD)NULL, // &flag); //test if create the thread if(this->threadID!=(HANDLE)0u){ #elif defined WIN64 //WINDOWS 64 DWORD flag; this->threadID = CreateThread(NULL, // (DWORD)NULL, // edkThreadFunc, // função da thread (void*)this, // parâmetro da thread (DWORD)NULL, // &flag); //test if create the thread if(this->threadID!=(HANDLE)0u){ #elif defined __linux__ //LINUX pthread_attr_t attr; pthread_attr_init(&attr); pthread_create(&threadID, &attr, edkThreadFunc, (void*)this); //test if create the thread if(this->threadID!=(pthread_t)0u){ #elif defined __APPLE__ //APPLE #endif //copy the function this->threadFunc=threadFunction; //copy the parameter this->funcParameter=parameter; //then return true; return true; } } //clean this->cleanThread(); //else he clean the func this->threadFunc=NULL; return false; } bool Thread::start(classID (threadFunction)(classID)){ return this->start(threadFunction,(void*)NULL); } bool Thread::startIn(classID (threadFunction)(classID), classID parameter, edk::uint32 core){ //kill the previous thread this->kill(); //test if the function is true and if the core exist if(threadFunction && core<this->cores){ //WINDOWS 32 #ifdef WIN32 DWORD flag; this->threadID = CreateThread(NULL, // (DWORD)NULL, // edkThreadFunc, // função da thread (void*)this, // parâmetro da thread (DWORD)NULL, // &flag); //test if create the thread if(this->threadID!=(HANDLE)0u){ DWORD_PTR mask = core; SetThreadAffinityMask(this->threadID, mask); #elif defined WIN64 //WINDOWS 64 DWORD flag; this->threadID = CreateThread(NULL, // (DWORD)NULL, // edkThreadFunc, // função da thread (void*)this, // parâmetro da thread (DWORD)NULL, // &flag); //test if create the thread if(this->threadID!=(HANDLE)0u){ DWORD_PTR mask = core; SetThreadAffinityMask(this->threadID, mask); #elif defined __linux__ //LINUX pthread_attr_t attr; CPU_SET(core, &this->cpus); //start the attribute pthread_attr_init(&attr); //set the core on the attribute pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &this->cpus); //set affinity pthread_create(&threadID, &attr, edkThreadFunc, (void*)this); //test if create the thread if(this->threadID!=(pthread_t)0u){ #elif defined __APPLE__ //APPLE #endif //copy the function this->threadFunc=threadFunction; //copy the parameter this->funcParameter=parameter; //then return true; return true; } } //clean this->cleanThread(); //else he clean the func this->threadFunc=NULL; return false; } bool Thread::startIn(classID (threadFunction)(classID), edk::uint32 core){ return this->startIn(threadFunction, NULL, core); } //change the threadCore bool Thread::changeCore(edk::uint32 core){ //test if have the core if(core<this->cores){ #ifdef WIN32 //test if create the thread if(this->threadID!=(HANDLE)0u){ DWORD_PTR mask = core; if(SetThreadAffinityMask(this->threadID, mask)){ return true; #elif defined WIN64 //WINDOWS 64 //test if create the thread if(this->threadID!=(HANDLE)0u){ DWORD_PTR mask = core; if(SetThreadAffinityMask(this->threadID, mask)){ return true; #elif defined __linux__ //test if have the thread if(this->threadID!=(pthread_t)0u){ CPU_ZERO(&this->cpus); CPU_SET(core, &this->cpus); //set the core if(!pthread_setaffinity_np(this->threadID,sizeof(cpu_set_t), &this->cpus)){ return true; } #elif defined __APPLE__ //APPLE #endif } } return false; } bool Thread::runFunc(){ if(this->threadFunc){ //test if have parameter if(this->funcParameter){ //then he cant run the function this->threadFunc((void*)this->funcParameter); } else{ //then he cant run the function this->threadFunc((void*)NULL); } //clean the function this->threadFunc=NULL; this->funcParameter=NULL; //return true; return true; } //else return false return false; } bool Thread::isAlive(){ //WINDOWS 32 #ifdef WIN32 if(this->threadID){ //Then wait for the thread if(WaitForSingleObject(threadID, 0u) == WAIT_TIMEOUT){ //thread still alive return true return true; } } #elif defined WIN64 //WINDOWS 64 if(this->threadID){ //Then wait for the thread if(WaitForSingleObject(threadID, 0u) == WAIT_TIMEOUT){ //thread still alive return true return true; } } #elif defined __linux__ //WINDOWS 64 if(this->threadID){ //Then wait for the thread if(pthread_kill(this->threadID, 0u)!=3u){ //thread still alive return true return true; } } #elif defined __APPLE__ //APPLE #endif //else return false; return false; } bool Thread::waitEnd(uint64 milliseconds){ //WINDOWS 32 #ifdef WIN32 if(this->threadID){ //Then wait for the thread if(WaitForSingleObject(threadID, milliseconds) == WAIT_TIMEOUT){ //thread still alive then return true; } } #elif defined WIN64 //WINDOWS 64 if(this->threadID){ //Then wait for the thread if(WaitForSingleObject(threadID, milliseconds) == WAIT_TIMEOUT){ //thread still alive then return true; } } #elif defined __linux__//Linux //first he sleep usleep(milliseconds*1000); //test if thread still alive if(this->isAlive()){ // return true; } #elif __APPLE__ //APPLE #endif //clean this->cleanThread(); //else return false; return false; } bool Thread::waitEnd(){ bool ret=false; //WINDOWS 32 #ifdef WIN32 if(this->threadID){ //Then wait for the thread WaitForSingleObject(threadID, INFINITE); //then return true ret = true; } #elif defined WIN64 //WINDOWS 64 if(this->threadID){ //Then wait for the thread WaitForSingleObject(threadID, INFINITE); //then return true ret = true; } #elif defined __linux__ //LINUX if(this->threadID){ //then wait the end of the thread pthread_join(this->threadID,NULL); //then return true ret = true; } #elif defined __APPLE__ //APPLE #endif //clean this->cleanThread(); //return true or false return ret; } bool Thread::kill(){ bool ret = false; //WINDOWS 32 #ifdef WIN32 if(this->threadID){ //Finish the thread TerminateThread(this->threadID ,(DWORD)NULL ); ret=true; } //clean ID this->threadID=(HANDLE)0u; #elif defined WIN64 //WINDOWS 64 if(this->threadID){ //Finish the thread TerminateThread(this->threadID ,(DWORD)NULL ); ret=true; } #elif defined __linux__ //LINUX if(this->threadID){ //Cancel the thread pthread_cancel(this->threadID); //pthread_attr_destroy(&attr); //Finish the thread ret=true; } #endif //clean this->cleanThread(); //return true or false return ret; } void Thread::killThisThread(){ //WINDOWS 32 #ifdef WIN32 //Finish the thread TerminateThread(NULL ,(DWORD)NULL ); #elif defined WIN64 //WINDOWS 64 //Finish the thread TerminateThread(NULL ,(DWORD)NULL ); #elif defined __linux__ //LINUX //Exit the process pthread_exit(NULL); #elif defined __linux__ //APPLE //Exit the process pthread_exit(NULL); #endif } void Thread::killAllThreads(){ //WINDOWS 32 #ifdef WIN32 /* //Finish the thread TerminateThread(NULL ,(DWORD)NULL ); */ #elif defined WIN64 //WINDOWS 64 /* //Finish the thread TerminateThread(NULL ,(DWORD)NULL ); */ #elif defined __linux__ //LINUX //Exit the process pthread_cancel((pthread_t)NULL); #elif defined __linux__ //APPLE //Exit the process pthread_cancel((pthread_t)NULL); #endif } #if __x86_64__ || __ppc64__ //get the thread id edk::uint64 Thread::getThisThreadID(){ #if WIN64 return GetCurrentThreadId(); #elif __linux__ return pthread_self(); #endif } #else //get the thread id edk::uint32 Thread::getThisThreadID(){ #if WIN32 return GetCurrentThreadId(); #elif __linux__ return pthread_self(); #endif } #endif //return the thread core edk::uint32 Thread::getThisThreadCore(){ #if defined(WIN32) || defined(WIN64) return 0; #elif __linux__ return sched_getcpu(); #endif } edk::uint32 Thread::numberOfCores(){ return edk::multi::Thread::cores; } }
int main(int argc, char *argv[]) { if (argc!=5) { fprintf(stderr, "Usage: in_file search_field id_field out_basename\n"); return 1; } char *in, *outbase; int in_len = strlen(argv[1]), out_len = strlen(argv[4]); in = malloc((in_len + 1) * sizeof(char)); if (!in) { fprintf(stderr, "failed to alloc 'in'\n"); return 1; } out_len += 1; outbase = malloc(out_len * sizeof(char)); if (!outbase) { fprintf(stderr, "failed to alloc 'outbase'\n"); return 1; } strcpy(in, argv[1]); strcpy(outbase, argv[4]); char *search_field, *id_field; int sf_len = strlen(argv[2]), idf_len = strlen(argv[3]); search_field = (char *) malloc((sf_len + 1) * sizeof(char)); if (!search_field) { fprintf(stderr, "failed to alloc 'search_field'\n"); return 1; } id_field = (char *) malloc((idf_len + 1) * sizeof(char)); if (!id_field) { fprintf(stderr, "failed to alloc 'id_field'\n"); return 1; } strcpy(search_field, argv[2]); strcpy(id_field, argv[3]); fprintf(stderr, "building indexes for %s...", in); struct chunks *chunks = NULL; struct indexes *indexes = NULL; if (!build_indexes(in, &indexes, &chunks, -1)) { fprintf(stderr, "failed to build indexes\n"); return 1; } fprintf(stderr, "done.\n"); char **out_files = malloc(sizeof(char *) * NUMCORES); if (!out_files) { fprintf(stderr, "failed to alloc out files\n"); return 1; } pthread_t *threads = malloc(sizeof(pthread_t) * NUMCORES); if (!threads) { fprintf(stderr, "failed to alloc threads\n"); return 1; } int *pt_ret = malloc(sizeof(int) * NUMCORES); if (!pt_ret) { fprintf(stderr, "failed to alloc pt_ret\n"); return 1; } struct find_field_args **args = malloc(sizeof(struct find_field_args *) * NUMCORES); if (!args) { fprintf(stderr, "failed to allocate args\n"); return 1; } char corestr[3]; int i, j; for (i=0; i<NUMCORES; i++) { sprintf(corestr, "%d", i); out_files[i] = malloc(sizeof(char) * (out_len + strlen(corestr) + 1)); if (!out_files[i]) { fprintf(stderr, "failed to alloc out file"); return 1; } strcpy(out_files[i], outbase); strcat(out_files[i], corestr); args[i] = malloc(sizeof(struct find_field_args)); args[i]->ioargs = malloc(sizeof(struct ioargs)); args[i]->ioargs->in_file = in; args[i]->ioargs->out_file = out_files[i]; args[i]->ioargs->chunk = &chunks[i]; args[i]->search_field = search_field; args[i]->id_field = id_field; int mb = args[i]->ioargs->chunk->size / (1024*1024); fprintf(stderr, "creating new thread[%d] to process %dMB of data\n", i, mb); pt_ret[i] = pthread_create(&threads[i], NULL, find_field, (void *) args[i]); } for (i=0; i<NUMCORES; i++) { pthread_join(threads[i], NULL); fprintf(stderr, "thread[%d] returned with status %d\n", i, pt_ret[i]); free(out_files[i]); free(args[i]->ioargs); free(args[i]); free_line_positions(chunks[i].lp); } if (indexes) { free_index(indexes->index); free_line_positions(indexes->lp); free(indexes); } free(chunks); free(out_files); free(args); free(in); free(outbase); free(search_field); free(id_field); free(pt_ret); free(threads); return 0; }
void join(){ if(!s) run(); pthread_join(thr, 0); s = 0; }
static int sock_ep_close(struct fid *fid) { struct sock_ep *sock_ep; char c = 0; switch(fid->fclass) { case FI_CLASS_EP: sock_ep = container_of(fid, struct sock_ep, ep.fid); break; case FI_CLASS_SEP: sock_ep = container_of(fid, struct sock_ep, ep.fid); break; default: return -FI_EINVAL; } if (atomic_get(&sock_ep->ref) || atomic_get(&sock_ep->num_rx_ctx) || atomic_get(&sock_ep->num_tx_ctx)) return -FI_EBUSY; if (sock_ep->fclass != FI_CLASS_SEP && !sock_ep->tx_shared) { sock_pe_remove_tx_ctx(sock_ep->tx_array[0]); sock_tx_ctx_free(sock_ep->tx_array[0]); } if (sock_ep->fclass != FI_CLASS_SEP && !sock_ep->rx_shared) { sock_pe_remove_rx_ctx(sock_ep->rx_array[0]); sock_rx_ctx_free(sock_ep->rx_array[0]); } free(sock_ep->tx_array); free(sock_ep->rx_array); if (sock_ep->src_addr) free(sock_ep->src_addr); if (sock_ep->dest_addr) free(sock_ep->dest_addr); if (sock_ep->ep_type == FI_EP_MSG) { sock_ep->cm.do_listen = 0; if (write(sock_ep->cm.signal_fds[0], &c, 1) != 1) { SOCK_LOG_INFO("Failed to signal\n"); } if (sock_ep->cm.listener_thread && pthread_join(sock_ep->cm.listener_thread, NULL)) { SOCK_LOG_ERROR("pthread join failed (%d)\n", errno); } close(sock_ep->cm.signal_fds[0]); close(sock_ep->cm.signal_fds[1]); } sock_ep->listener.do_listen = 0; if (write(sock_ep->listener.signal_fds[0], &c, 1) != 1) { SOCK_LOG_INFO("Failed to signal\n"); } if (pthread_join(sock_ep->listener.listener_thread, NULL)) { SOCK_LOG_ERROR("pthread join failed (%d)\n", errno); } close(sock_ep->listener.signal_fds[0]); close(sock_ep->listener.signal_fds[1]); sock_fabric_remove_service(sock_ep->domain->fab, atoi(sock_ep->listener.service)); atomic_dec(&sock_ep->domain->ref); free(sock_ep); return 0; }
int main(int argc, char *argv[]){ int N, i, rc; str_tiratore *tiratore; void *status; pthread_attr_t attr; if(argc!=5){ printf("Numero dei parametri inseriti errato....\n"); exit(EXIT_FAILURE); } N=atoi(argv[1]); //numero appassionati di tiro con l'arco K=atoi(argv[2]); //numero dei tiri a disposizione A=atoi(argv[3]); //numero di archi I=atoi(argv[4]); //numero di freccie if((N<10)||(N>30)||(A<1)||(A>3)||(I<3)||(I>6)){ printf("NON consentito\n"); exit(-1); } tiratore=(str_tiratore *)malloc(N*sizeof(str_tiratore)); //inizializzazione semafori sem_init(&archi,0,A); //semaforo x gli archi sem_init(&freccie,0,I); //semaforo per le freccie //inizializzazione mutex pthread_mutex_init(&bersaglio, NULL); //inizializzazione dei thread pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_JOINABLE); for(i=0; i<N; i++){ tiratore[i].id=i; tiratore[i].parziale=0; tiratore[i].totale=0; rc=pthread_create(&tiratore[i].tid,&attr,routine_tiro,(void *)(&tiratore[i])); if(rc){ printf("ERRORE.....\n"); getchar(); //premere invio exit(-1);} } rc=pthread_create(&gestore,&attr,routine_gestore,NULL); if(rc){ printf("ERRORE.....\n"); getchar(); //premere invio exit(-1);} //rilascio delle risorse per i thread pthread_attr_destroy(&attr); for(i=0; i<N; i++) pthread_join(tiratore[i].tid,&status); //si attende che finiscano tutti i thread printf("\n CLASSIFICA \n"); for(i=0; i<N; i++){ printf("tiratore %d, ha totalizzato %d punti\n",tiratore[i].id,tiratore[i].totale); } //rilascio delle risorse per il mutex pthread_mutex_destroy(&bersaglio); //rilascio delle risorse per i semafori sem_destroy(&archi); sem_destroy(&freccie); pthread_exit(NULL); }
/* * Daemonize and persist pid */ int daemon_start() { struct sigaction sig_action; sigset_t sig_set; pid_t otherpid; int curPID; pthread_t tcp4_thread, udp4_thread; pthread_t tcp6_thread, udp6_thread; /* Check if we can acquire the pid file */ pfh = pidfile_open(NULL, 0644, &otherpid); if (pfh == NULL) { if (errno == EEXIST) { errx(EXIT_FAILURE, "Daemon already running, pid: %jd.", (intmax_t)otherpid); } err(EXIT_FAILURE, "Cannot open or create pidfile"); } init_logger(); /* Initialize TCP46 and UDP46 sockets */ if (init_tcp() == EXIT_FAILURE) return (EXIT_FAILURE); if (init_udp() == EXIT_FAILURE) return (EXIT_FAILURE); /* start daemonizing */ curPID = fork(); switch (curPID) { case 0: /* This process is the child */ break; case -1: /* fork() failed, should exit */ perror("fork"); return (EXIT_FAILURE); default: /* fork() successful, should exit * (parent) */ return (EXIT_SUCCESS); } /* we are the child, complete the daemonization */ /* Close standard IO */ fclose(stdin); fclose(stdout); fclose(stderr); /* Block unnecessary signals */ sigemptyset(&sig_set); sigaddset(&sig_set, SIGCHLD); /* ignore child - i.e. we don't need * to wait for it */ sigaddset(&sig_set, SIGTSTP); /* ignore tty stop signals */ sigaddset(&sig_set, SIGTTOU); /* ignore tty background writes */ sigaddset(&sig_set, SIGTTIN); /* ignore tty background reads */ sigprocmask(SIG_BLOCK, &sig_set, NULL); /* Block the above specified * signals */ /* Catch necessary signals */ sig_action.sa_handler = signal_handler; sigemptyset(&sig_action.sa_mask); sig_action.sa_flags = 0; sigaction(SIGTERM, &sig_action, NULL); sigaction(SIGHUP, &sig_action, NULL); sigaction(SIGINT, &sig_action, NULL); /* create new session and process group */ setsid(); /* persist pid */ pidfile_write(pfh); /* Create TCP and UDP listener threads */ pthread_create(&tcp4_thread, NULL, tcp4_handler, NULL); pthread_create(&udp4_thread, NULL, udp4_handler, NULL); #ifdef PF_INET6 pthread_create(&tcp6_thread, NULL, tcp6_handler, NULL); pthread_create(&udp6_thread, NULL, udp6_handler, NULL); #endif /* * Wait for threads to terminate, which normally shouldn't ever * happen */ pthread_join(tcp4_thread, NULL); pthread_join(udp4_thread, NULL); #ifdef PF_INET6 pthread_join(tcp6_thread, NULL); pthread_join(udp6_thread, NULL); #endif return (EXIT_SUCCESS); }
int sim_entry(struct ftm_param *param, void *priv) { bool exit = false; bool evdoDtSupport = false; int passCount = 0; struct sim_factory *sim = (struct sim_factory*)priv; struct itemview *iv = NULL; LOGD(TAG "%s: Start\n", __FUNCTION__); strcpy(sim->info, ""); init_text(&sim->title, param->name, COLOR_YELLOW); init_text(&sim->text, &sim->info[0], COLOR_YELLOW); if(NULL == sim->iv) { iv = ui_new_itemview(); if(!iv) { LOGD(TAG "No memory for item view"); return -1; } sim->iv = iv; } iv = sim->iv; iv->set_title(iv, &sim->title); iv->set_items(iv, sim_items, 0); iv->set_text(iv, &sim->text); iv->start_menu(iv,0); iv->redraw(iv); sim->exit_thread = false; #ifdef EVDO_DT_VIA_SUPPORT evdoDtSupport = true; #endif if(MTK_DT_SUPPORT && !evdoDtSupport) { snprintf(dev_node_1, 32, "%s", ccci_get_node_name(USR_FACTORY_SIM, MD_SYS1)); snprintf(dev_node_2, 32, "%s", ccci_get_node_name(USR_FACTORY_SIM, MD_SYS2)); pthread_create(&sim->update_thread, NULL, sim_update_thread_for_dualtalk, priv); } else { if (MTK_ENABLE_MD1) { snprintf(dev_node, 32, "%s", ccci_get_node_name(USR_FACTORY_SIM, MD_SYS1)); } else if (MTK_ENABLE_MD2) { snprintf(dev_node, 32, "%s", ccci_get_node_name(USR_FACTORY_SIM, MD_SYS2)); } else if (MTK_ENABLE_MD5){ snprintf(dev_node, 32, "%s", ccci_get_node_name(USR_FACTORY_SIM, MD_SYS5)); } else { LOGD("not open md1,md2,md5"); } pthread_create(&sim->update_thread, NULL, sim_update_thread, priv); } #if 0 while(!exit) { int chosen = iv->run(iv, &exit); switch(chosen) { case ITEM_SIM1: sim->sim_id = SIM_ID_1; sim->test_done = false; exit = false; break; case ITEM_SIM2: sim->sim_id = SIM_ID_2; sim->test_done = false; exit = false; break; case ITEM_PASS: case ITEM_FAIL: if(ITEM_PASS == chosen) { sim->mod->test_result = FTM_TEST_PASS; } else { sim->mod->test_result = FTM_TEST_FAIL; } sim->exit_thread = true; sim->test_done = true; exit = true; break; default: sim->exit_thread = true; sim->test_done = true; exit = true; LOGD(TAG "DEFAULT EXIT\n"); break; } // end switch(chosen) if(exit) { sim->exit_thread = true; } } // end while(!exit) #endif //Detect SIM 1 // strcpy(sim->info, ""); memset(sim->info, 0, sizeof(sim->info) / sizeof(*(sim->info))); sim->sim_id = SIM_ID_1; sim->test_done = false; while (strlen(sim->info) == 0) { LOGD (TAG "detect slot 1:enter"); LOGD (TAG "sim_entry:sim->info:%s, lenth:%d",sim->info,strlen(sim->info)); usleep(200000); if (strstr(sim->info, uistr_info_pass)) { passCount++; } } LOGD(TAG "[SLOT 1]passCount = %d\n", passCount); LOGD (TAG "begin redraw"); iv->redraw(iv); LOGD (TAG "end redraw"); #if defined(GEMINI) || defined(MTK_GEMINI_3SIM_SUPPORT)|| defined(EVDO_DT_VIA_SUPPORT) || defined(FTM_SIM_USE_USIMSMT) //Detect SIM 2 // strcpy(sim->info, ""); memset(sim->info, 0, sizeof(sim->info) / sizeof(*(sim->info))); sim->sim_id = SIM_ID_2; sim->test_done = false; while (strlen(sim->info) == 0) { LOGD (TAG "detect slot 2:enter"); LOGD (TAG "sim_entry:sim->info:%s, lenth:%d",sim->info,strlen(sim->info)); usleep(200000); if (strstr(sim->info, uistr_info_pass)) { passCount++; } } LOGD(TAG "[SLOT 2]passCount = %d\n", passCount); LOGD (TAG "begin redraw"); iv->redraw(iv); LOGD (TAG "end redraw"); #else passCount++; LOGD(TAG "GEMINI is not defined, do not need to check SIM2\n"); #endif #if defined(MTK_GEMINI_3SIM_SUPPORT) //Detect SIM 3 // strcpy(sim->info, ""); memset(sim->info, 0, sizeof(sim->info) / sizeof(*(sim->info))); sim->sim_id = SIM_ID_3; sim->test_done = false; while (strlen(sim->info) == 0) { LOGD (TAG "detect slot 3:enter"); LOGD (TAG "sim_entry:sim->info:%s, lenth:%d",sim->info,strlen(sim->info)); usleep(200000); if (strstr(sim->info, uistr_info_pass)) { passCount++; } } LOGD(TAG "[SLOT 3]passCount = %d\n", passCount); LOGD (TAG "begin redraw"); iv->redraw(iv); LOGD (TAG "end redraw"); #else passCount++; LOGD(TAG "MTK_GEMINI_3SIM_SUPPORT is not defined, do not need to check SIM3\n"); #endif //Exit SIM detect thread sim->exit_thread = true; sim->test_done = true; pthread_join(sim->update_thread, NULL); //Check test result if (passCount == 3) { //SIM1, SIM2 and SIM3 are detected. sim->mod->test_result = FTM_TEST_PASS; } else { sim->mod->test_result = FTM_TEST_FAIL; } LOGD(TAG "%s: End\n", __FUNCTION__); return 0; }
void *MSVM2fw_train_thread(void *th_data) { // Recover data struct ThreadData *data = (struct ThreadData *)th_data; const int thread_id = data->thread_id; const int nprocs = data->nprocs; struct Model *model = data->model; struct KernelCache *kernelcache = data->kernelcache; long chunk_size = data->chunk_size; const double accuracy = data->accuracy; double **gradient = data->gradient; double **H_alpha = data->H_alpha; double **H_tilde_alpha = data->H_tilde_alpha; double *best_primal_upper_bound = data->best_primal_upper_bound; int *activeset = data->activeset; long *nb_SV = data->nb_SV; double *lp_rhs = data->lp_rhs; FILE *fp = data->logfile_ptr; pthread_mutex_unlock(&thread_data_mutex); // Release thread_data for next thread // Local variables int do_eval; char yesno; long long return_status = -1; // Prepare the cache struct TrainingCache cache; cache.chunk_size = chunk_size; MSVM2fw_alloc_memory(&cache, model->Q, model->nb_data, chunk_size); cache.kc = kernelcache; cache.activeset = activeset; cache.lp_rhs = lp_rhs; double **delta = matrix(chunk_size, model->Q); double theta_opt; int jump = false; if(accuracy == 0) do_eval = 0; else do_eval = 1; /* Prepare parallel gradient computations: - the gradient vector is split into NUMTHREADS_GRAD parts (along i) - each part is updated by a different thread */ // max number of threads for gradient updates is nprocs pthread_t *grad_threads = (pthread_t *)malloc(sizeof(pthread_t) * nprocs); // start with 1 thread (main load on kernel evaluations) int numthreads_grad = 1; void *status; int rc; long k; struct ThreadGradient_data *grad_data = (struct ThreadGradient_data *)malloc(sizeof(struct ThreadGradient_data) * nprocs); // Disable parallel gradient computation for small data sets int parallel_gradient_update = 1; if(model->nb_data < 5000 || nprocs == 1) parallel_gradient_update = 0; if(parallel_gradient_update) { for(k=0;k<nprocs;k++) { grad_data[k].gradient = gradient; grad_data[k].H_alpha = H_alpha; grad_data[k].H_tilde_alpha = H_tilde_alpha; grad_data[k].cache = &cache; grad_data[k].model = model; } grad_data[0].start_i = 1; grad_data[0].end_i = model->nb_data / numthreads_grad; for(k=1;k<numthreads_grad-1;k++) { grad_data[k].start_i = grad_data[k-1].end_i + 1; grad_data[k].end_i = grad_data[k].start_i + model->nb_data / numthreads_grad -1; } if(numthreads_grad>1) { grad_data[numthreads_grad-1].start_i = grad_data[numthreads_grad-2].end_i + 1; grad_data[numthreads_grad-1].end_i = model->nb_data; } } #ifdef _WIN32 // Init POOL TP_WORK ** work; if(parallel_gradient_update) { work = malloc(sizeof(TP_WORK *) * nprocs); for(k=0;k<nprocs;k++) work[k] = CreateThreadpoolWork(MSVM2fw_update_gradient_thread2, (void *) &grad_data[k], NULL); } #endif // Switch to nprocs/4 threads for gradient update when 25% of the kernel matrix is cached int percentage_step = 1; long percentage = model->nb_data / 4; int next_numthreads_grad = nprocs/4; if(next_numthreads_grad == 0) next_numthreads_grad = 1; // Main loop int thread_stop = 0; do { if((TRAIN_SMALL_STEP < TRAIN_STEP) && (model->iter%TRAIN_SMALL_STEP) == 0) { printf("."); fflush(stdout); } // Select a random chunk of data to optimize select_random_chunk(&cache,model); // Compute the kernel submatrix for this chunk compute_K(&cache,model); // Enter Critical Section (using and modifying the model) pthread_mutex_lock(&(model->mutex)); jump = MSVM2fw_solve_lp(gradient, &cache, model); if(jump == false) jump = MSVM2fw_check_opt_sol(gradient,&cache,model); if(jump == false) { MSVM2fw_compute_delta(delta,&cache,model); theta_opt = MSVM2fw_compute_theta_opt(delta, &cache, model); *nb_SV += MSVM2fw_compute_new_alpha(theta_opt,&cache,model); if(parallel_gradient_update && numthreads_grad > 1) { // Update gradient in parallel for(k=0;k<numthreads_grad;k++) { #ifdef _WIN32 SubmitThreadpoolWork(work[k]); #else rc = pthread_create(&grad_threads[k], NULL, MSVM2fw_update_gradient_thread, (void *) &grad_data[k]); #endif } // Wait for gradient computations to terminate for(k=0;k<numthreads_grad;k++) { #ifdef _WIN32 WaitForThreadpoolWorkCallbacks(work[k], FALSE); #else rc = pthread_join(grad_threads[k],&status); #endif } } else { // old-style non-threaded gradient update (for small data sets) MSVM2fw_update_gradient(gradient,H_alpha, H_tilde_alpha, &cache,model); } } if((do_eval && (model->iter%TRAIN_STEP) == 0) || EVAL || STOP || (do_eval && model->ratio >= accuracy) ) { if(fp != NULL) fprintf(fp,"%ld ",model->iter); if(EVAL) printf("\n\n*** Evaluating the model at iteration %ld...\n",model->iter); // Evaluate how far we are in the optimization // (prints more info if interrutped by user) model->ratio = MSVM_eval(best_primal_upper_bound, gradient, H_alpha, H_tilde_alpha, model, EVAL, fp); print_training_info(*nb_SV, model); if(EVAL) // if interrupted by user (otherwise let the ratio decide if we go on training) { printf("\n *** Do you want to continue training ([y]/n)? "); yesno = getchar(); if(yesno=='n') { STOP = 1; } EVAL = 0; // reset interruption trigger } } // Release kernel submatrix in cache release_K(&cache); // Check if a sufficient % of the kernel matrix is cached if( parallel_gradient_update && cache.kc->max_idx >= percentage ) { // and switch thread to compute gradient updates instead of kernel rows if it is thread_stop = switch_thread(nprocs, &numthreads_grad, &next_numthreads_grad, &percentage, &percentage_step, grad_data, thread_id, model->nb_data); // (threads are actually stopped to leave the CPUs // to other threads that will compute gradient updates) } model->iter++; // Release mutex: End of critical section (see below) pthread_mutex_unlock(&(model->mutex)); } while(model->iter <= MSVM_TRAIN_MAXIT && (!do_eval || model->ratio < accuracy) && !STOP && !thread_stop); #ifdef _WIN32 if(parallel_gradient_update){ for(k=0;k<numthreads_grad;k++) CloseThreadpoolWork(work[k]); } #endif // Compute return_status if(do_eval && model->ratio >= accuracy) return_status = 0; // optimum reached. // Free memory MSVM2fw_free_memory(&cache); free(delta[1]);free(delta); free(grad_threads); free(grad_data); pthread_exit((void*)return_status); }
int main (int argc, char **argv) { if(argc > 1) { //fprintf(stdout,"%s\n",argv[1]); int server_fd; unsigned short remote_port; remote_port = 8000; struct sockaddr_in remote_ipv4_address; memset(&remote_ipv4_address,0,sizeof(remote_ipv4_address)); remote_ipv4_address.sin_family = AF_INET; remote_ipv4_address.sin_port = htons(remote_port); inet_pton(AF_INET,argv[1],&remote_ipv4_address.sin_addr); //pause(); ssize_t receive; ssize_t total = 0; ssize_t send; char buffer[BUFFER_SIZE]; while(1) { if((server_fd = socket(PF_INET,SOCK_STREAM,0)) < 0){ fprintf(stderr,"socket create failed,%s\n",strerror(errno)); exit(1); } fprintf(stdout,"Socket create successed,server fd %d\n",server_fd); if(connect(server_fd,(struct sockaddr *)&remote_ipv4_address,sizeof(remote_ipv4_address)) < 0){ fprintf(stderr,"connect to remote server %s : %d failed,%s\n",argv[1],remote_port,strerror(errno)); close(server_fd); exit(1); } fprintf(stdout,"Connected to %s:%d success.\n",argv[1],remote_port); receive = read(STDIN_FILENO,buffer,sizeof(buffer)); if(strncmp(buffer,"ls",2) == 0){ send = write(server_fd,buffer,receive); if(send < 0){ fprintf(stderr,"send command to server failed,%s\n",strerror(errno)); exit(2); } fprintf(stdout,"Send %d bytes to server successed.\n",send); while(1) { receive = read(server_fd,buffer,BUFFER_SIZE - 1); if(receive < 0){ if(errno == EINTR) continue; fprintf(stderr,"receive data failed,%s\n",strerror(errno)); exit(3); }else if(receive == 0){ break; } write(STDOUT_FILENO,buffer,receive); total += receive; } fprintf(stdout,"Received %d bytes.\n",total); total = 0; }else if(strncmp(buffer,"cp",2) == 0){ total = 0; send = write(server_fd,buffer,receive); if(send < 0){ fprintf(stderr,"send command to server failed,%s\n",strerror(errno)); exit(2); } fprintf(stdout,"Send %d bytes to server successed.\n",send); char rbuffer[BUFFER_SIZE]; receive = read(server_fd,rbuffer,BUFFER_SIZE - 1); if(strncmp(rbuffer,"Bad",3) == 0){ write(STDOUT_FILENO,rbuffer,receive); exit(3); } if(strncmp(rbuffer,"File",4) == 0){ write(STDOUT_FILENO,rbuffer,receive); exit(3); } char *ptr; buffer[send - 1] = '\0'; fprintf(stdout,"%s\n",buffer); ptr = str_proc(buffer + 2); if(ptr == NULL){ fprintf(stderr,"string process failed\n"); exit(3); } char path[4096]; snprintf(path,sizeof(path),"./res/jpg/%s",ptr); fprintf(stdout,"path %s\n",path); int file_fd = open(path,O_RDWR | O_CREAT | O_TRUNC,0644); if(file_fd < 0){ fprintf(stderr,"open file %s failed,%s\n",path,strerror(errno)); exit(3); } write(file_fd,rbuffer,receive); total += receive; while((receive = read(server_fd,rbuffer,BUFFER_SIZE - 1)) != 0) { write(file_fd,rbuffer,receive); total += receive; } fprintf(stdout,"Download success.Received %d bytes.\n",total); close(file_fd); }else if(strncmp(buffer,"exit",4) == 0){ break; } close(server_fd); } } else{ if(init_fb(&fb_inf) < 0){ fprintf(stderr,"init fb failed,%s\n",strerror(errno)); exit(1); } screen_size = fb_inf.w * fb_inf.h * fb_inf.bpp / 8; fprintf(stdout,"%d\n",screen_size); int err_code; #if 1 pthread_t mou_tid; if((err_code = pthread_create(&mou_tid,NULL,mouse_ops,NULL)) != 0 ) { fprintf(stderr,"create pthread failed,%s\n",strerror(err_code)); exit(5); } #endif #if 1 char *file_desk[1024]; int pic_num = 0; if(read_jpg_dir("./res/jpg",file_desk,&pic_num) < 0){ fprintf(stderr,"read_jpg_dir failed.\n"); int pic_num = 0; if(read_jpg_dir("./res/jpg",file_desk,&pic_num) < 0){ fprintf(stderr,"read_jpg_dir failed.\n"); exit(4); } exit(4); } tranves_file_desk(file_desk,pic_num); fprintf(stdout,"sum %d\n",pic_num); #if 1 err_code = init_ft("./res/fonts/fanxinshu.TTF",36); if(err_code != 0){ fprintf(stderr,"init_ft failed\n"); exit(1); } #endif fun play_funs[DISPLAY_FUNS] = {disp_jpeg2,fang_picture_l,right_mid_left,fang_picture_h,down_in,right_in,bai_ye_chuang,up_down,left_right,rand_picture,crilepicture_big,crilepicture_small,up_mid_down,left_fix_right,Random,Box_radom,dissolve}; unsigned int index = 0; unsigned int fun_ind = 0; int flag = 1; char pathname[1024]; welcome_menu: rool_flag = 0; welcome_flag = 1; #if 1 err_code = init_ft("./res/fonts/fanxinshu.TTF",36); if(err_code != 0){ fprintf(stderr,"init_ft failed\n"); exit(1); } #endif pthread_mutex_lock(&mutex_lock); play_funs[0]("./res/welcome/welcome.jpg",fb_inf); display_string("自动播放",200,200,fb_inf,0x0930e); display_string("手动播放",400,400,fb_inf,0xc9112d); display_string("音乐播放",600,600,fb_inf,0xe68500); display_string("退出",950,750,fb_inf,0x9f0521); memcpy(screen_save,fb_inf.fbmem,screen_size); pthread_mutex_unlock(&mutex_lock); while(!rool_flag) sleep(1); welcome_flag = 0; while(flag == 1) { //fprintf(stdout,"%s,%d,flag %d\n",pathname,fun_ind,mouse_global_flag); #if 1 switch(mouse_global_flag) { case 0: #if 1 snprintf(pathname,sizeof(pathname),"./res/jpg/%s",file_desk[index]); pthread_mutex_lock(&mutex_lock); play_funs[fun_ind](pathname,fb_inf); init_ft("./res/fonts/fanxinshu.TTF",22); display_string(file_desk[index],10,20,fb_inf,0xaffff); memcpy(screen_save,fb_inf.fbmem,screen_size); pthread_mutex_unlock(&mutex_lock); fun_ind++; fun_ind = fun_ind % DISPLAY_FUNS; index++; index = index % pic_num; #endif sleep(1); break; case 1: index++; index = index % pic_num; snprintf(pathname,sizeof(pathname),"./res/jpg/%s",file_desk[index]); pthread_mutex_lock(&mutex_lock); play_funs[15](pathname,fb_inf); init_ft("./res/fonts/stsong.ttf",30); display_string("返回",950,750,fb_inf,0x9f0521); init_ft("./res/fonts/fanxinshu.TTF",22); display_string(file_desk[index],10,20,fb_inf,0xaffff); memcpy(screen_save,fb_inf.fbmem,screen_size); pthread_mutex_unlock(&mutex_lock); mouse_global_flag = 10; sleep(1); break; case -1: index--; index = index % pic_num; snprintf(pathname,sizeof(pathname),"./res/jpg/%s",file_desk[index]); pthread_mutex_lock(&mutex_lock); play_funs[16](pathname,fb_inf); init_ft("./res/fonts/stsong.ttf",30); display_string("返回",950,750,fb_inf,0x9f0521); init_ft("./res/fonts/fanxinshu.TTF",22); display_string(file_desk[index],10,20,fb_inf,0xaffff); memcpy(screen_save,fb_inf.fbmem,screen_size); pthread_mutex_unlock(&mutex_lock); mouse_global_flag = 10; sleep(1); break; case 2: goto welcome_menu; break; case 3: flag = 0; break; default: break; } #endif } pthread_mutex_lock(&mutex_lock); play_funs[9]("./res/end/end.jpg",fb_inf); init_ft("./res/fonts/fanxinshu.TTF",90); display_string("谢谢观赏",360,300,fb_inf,0xb8264a); memcpy(screen_save,fb_inf.fbmem,screen_size); pthread_mutex_unlock(&mutex_lock); pthread_mutex_destroy(&mutex_lock); pthread_cancel(mou_tid); pthread_join(mou_tid,NULL); destroy_file_desk(file_desk,pic_num); #endif if(munmap(fb_inf.fbmem,fb_inf.w * fb_inf.h * fb_inf.bpp / 8) < 0){ fprintf(stderr,"mmunmap failed,%s\n",strerror(errno)); exit(5); } } return 0; }
/*---------------------------------------------------------------------------*/ int ps_cali_entry(struct ftm_param *param, void *priv) { char *ptr; int chosen; bool exit = false; struct lps_data *dat = (struct lps_data *)priv; struct textview *tv; struct itemview *iv; struct statfs stat; int err,op; FLPLOGD(TAG "%s\n", __FUNCTION__); init_text(&dat->title, param->name, COLOR_YELLOW); init_text(&dat->text, &dat->info[0], COLOR_YELLOW); init_text(&dat->left_btn, "Fail", COLOR_YELLOW); init_text(&dat->center_btn, "Pass", COLOR_YELLOW); init_text(&dat->right_btn, "Back", COLOR_YELLOW); snprintf(dat->info, sizeof(dat->info), "初始化...\n"); dat->exit_thd = false; bUpToDate = false; if (!dat->iv) { iv = ui_new_itemview(); if (!iv) { FLPLOGD(TAG "No memory"); return -1; } dat->iv = iv; } iv = dat->iv; iv->set_title(iv, &dat->title); iv->set_items(iv, ps_cali_items, 0); iv->set_text(iv, &dat->text); pthread_create(&dat->update_thd, NULL, ps_update_iv_thread, priv); do { chosen = iv->run(iv, &exit); pthread_mutex_lock(&dat->lps.evtmutex); op = dat->lps.pending_op; pthread_mutex_unlock(&dat->lps.evtmutex); if ((chosen != ITEM_EXIT) && (op != PS_OP_NONE)) /*some OP is pending*/ continue; switch (chosen) { case ITEM_CLEAR: pthread_mutex_lock(&dat->lps.evtmutex); dat->lps.pending_op = PS_OP_CLEAR; FLPLOGD("chosen clear: %d\n", dat->lps.pending_op); pthread_mutex_unlock(&dat->lps.evtmutex); break; case ITEM_DOCALI: pthread_mutex_lock(&dat->lps.evtmutex); dat->lps.pending_op = PS_OP_CALI_PRE; dat->lps.cali_delay = 50; //50ms dat->lps.cali_num = 20; //Use 20 samples FLPLOGD("chosen DOCALI\n"); pthread_mutex_unlock(&dat->lps.evtmutex); break; case ITEM_EXIT: exit = true; break; } if (exit) { dat->exit_thd = true; break; } } while (1); pthread_join(dat->update_thd, NULL); return 0; }
ThreadLoader::~ThreadLoader() { if (!pthread_equal(_thread, pthread_self())) pthread_join(_thread, 0); }
int main (int argc, char *argv[]) { ENetAddress address; ENetEvent event; ChatContext cc, *pcc = &cc; char host_ip[32] = {0}; char peer_info[32] = {0}; int lastEvent = -1; int enableDebug = 0; int trycount = 0; int ret; memset(pcc, 0, sizeof(*pcc)); /* Initialize the ENet */ if (enet_initialize() != 0) { fprintf(stderr, "An error (%s) occured while initializing ENet.\n", strerror(errno)); return EXIT_FAILURE; } atexit(enet_deinitialize); /* Create the client host */ pcc->client = enet_host_create(NULL, SHUTTLE_MAX_CLIENT_NUM, SHUTTLE_MAX_CHANNEL_NUM, SHUTTLE_MAX_INCOMING_BANDWIDTH, SHUTTLE_MAX_OUTGOING_BANDWIDTH); if (pcc->client == NULL) { fprintf(stderr, "An error (%s) occured while trying to create an ENet client host.\n", strerror(errno)); exit(EXIT_FAILURE); } /* Connect to the server */ enet_address_set_host(&address, SHUTTLE_SERVER_HOST); address.port = SHUTTLE_SERVER_PORT; pcc->peer = enet_host_connect(pcc->client, &address, SHUTTLE_MAX_CHANNEL_NUM, 0); if (pcc->peer == NULL) { fprintf(stderr, "No available peers for initializing an ENet connection.\n"); exit(EXIT_FAILURE); } do { trycount++; printf("(Peer) Try to connect to server: the %dth tryouts.\n", trycount); if (enet_host_service(pcc->client, &event, 1000) > 0 && event.type == ENET_EVENT_TYPE_CONNECT) { /* We can send packet to server only after we have received ENET_EVENT_TYPE_CONNECT */ pcc->connected = 1; enet_address_get_host_ip(&event.peer->address, host_ip, sizeof(host_ip) - 1); snprintf(peer_info, sizeof(peer_info), "[%s:%d]", host_ip, event.peer->address.port); if (event.peer->data) free(event.peer->data); event.peer->data = malloc(strlen(peer_info) + 1); if (event.peer->data) strcpy(event.peer->data, peer_info); printf("(Peer) Connected to server (%s:%d).\n", host_ip, event.peer->address.port); } } while (trycount < 4 && !pcc->connected); if (!pcc->connected) { fprintf(stderr, "Fail to connect to server.\n"); enet_peer_reset(pcc->peer); enet_host_destroy(pcc->client); exit(EXIT_FAILURE); } /* We do not block the main event dispatcher */ ret = pthread_create(&pcc->thread, NULL, peer_chater, pcc); if (ret) { pcc->thread = 0; fprintf(stderr, "Fail to create thread.\n"); goto cleanup_pos; } while (1) { /* Event dispatcher: MUST not be hanged up */ int eventStatus = enet_host_service(pcc->client, &event, 10); if (eventStatus >= 0) { switch (event.type) { case ENET_EVENT_TYPE_NONE: /* Silence huge repeated NONE events */ if (lastEvent != ENET_EVENT_TYPE_NONE) { if (enableDebug) printf("(Peer) No event.\n"); } break; case ENET_EVENT_TYPE_CONNECT: /* Store any relevant client information here. */ pcc->connected = 1; enet_address_get_host_ip(&event.peer->address, host_ip, sizeof(host_ip) - 1); snprintf(peer_info, sizeof(peer_info), "[%s:%d]", host_ip, event.peer->address.port); if (event.peer->data) free(event.peer->data); event.peer->data = malloc(strlen(peer_info)); if (event.peer->data) strcpy(event.peer->data, peer_info); printf("(Peer) Connected to server (%s:%d).\n", host_ip, event.peer->address.port); break; case ENET_EVENT_TYPE_RECEIVE: if (event.channelID == SHUTTLE_CHANNEL_NOTIFY) printf("(Peer) Got a notification message : %s.\n", (char*)event.packet->data); else printf("(Peer) Got a chat message: %s.\n", (char*)event.packet->data); /* Clean up the packet now that we're done using it. */ enet_packet_destroy(event.packet); break; case ENET_EVENT_TYPE_DISCONNECT: /* A connected peer has either explicitly disconnected or timed out. */ printf("(Peer) Connection status: %d.\n", pcc->connected); if (event.peer->data) { printf("(Peer) %s is disconnected.\n", (char*)event.peer->data); free(event.peer->data); } else { /* We fail to receive CONNECT event becasue the server is down. */ enet_address_get_host_ip(&event.peer->address, host_ip, sizeof(host_ip) - 1); snprintf(peer_info, sizeof(peer_info), "[%s:%d]", host_ip, event.peer->address.port); printf("(Peer) Unknown (%s) connection is disconnected.\n", peer_info); } /* Reset the peer's information. */ event.peer->data = NULL; pcc->connected = 0; lastEvent = -1; enet_peer_reset(event.peer); /* Reconnect the server */ pcc->peer = enet_host_connect(pcc->client, &address, SHUTTLE_MAX_CHANNEL_NUM, 0); if (pcc->peer == NULL) { fprintf(stderr, "No available peers for initializing an ENet connection.\n"); enet_host_destroy(pcc->client); ret = EXIT_FAILURE; goto cleanup_pos; } break; default: assert(0); break; } lastEvent = event.type; } else { fprintf(stderr, "(Peer) Something went wrong: %d.\n", eventStatus); lastEvent = -1; pcc->connected = 0; enet_peer_reset(pcc->peer); ret = eventStatus; goto cleanup_pos; } } ret = 0; cleanup_pos: /* Terminate the chater thread. */ pcc->terminated = 1; if (pcc->thread) { pthread_join(pcc->thread, NULL); pcc->thread = 0; } enet_host_destroy(pcc->client); printf("Client is terminated.\n"); return ret; }
int main () { std::ifstream ifile; ifile.open("inp-params.txt"); long long N; int ecount, dcount, l1, l2; while(!ifile.eof()) { ifile >> N >> ecount >> dcount >> l1 >> l2; } ifile.close(); LockBasedQueue * q = new LockBasedQueue (N); pthread_t enqueuer, dequeuer; //the threads struct ThreadMeta enqMeta, deqMeta; enqMeta.count = ecount; enqMeta.l = l1; enqMeta.q = q; deqMeta.count = dcount; deqMeta.l = l2; deqMeta.q = q; int check = pthread_create (&enqueuer, NULL, enqueue, (void*)&enqMeta); if (check) { std::cout << "Error in creating enqueuer" << std::endl; } int check2 = pthread_create (&dequeuer, NULL, dequeue, (void*)&deqMeta); if (check2) { std::cout << "Error in creating dequeuer" << std::endl; } pthread_join (enqueuer, NULL); pthread_join (dequeuer, NULL); std::ofstream ofile; ofile.open("output.txt"); for (std::vector<std::string>::iterator it = list.begin(); it != list.end(); ++it) { ofile << *it << std::endl; } delete q; ofile.close(); std::ofstream outfile; outfile.open("serial.txt"); while (enqlist.size() > 0 && deqlist.size() > 0) { if (enqlist[0].restime.count() < deqlist[0].invtime.count()) { outfile << enqlist[0].count + 1 << suffix(enqlist[0].count + 1) << " Enq("; if (enqlist[0].nilflag) { outfile << "nil"; }else { outfile << enqlist[0].val; } outfile << ").inv" << std::endl; outfile << enqlist[0].count + 1 << suffix(enqlist[0].count + 1) << " Enq("; if (enqlist[0].nilflag) { outfile << "nil"; } else { outfile << enqlist[0].val; } outfile << ").res" << std::endl; enqlist.erase(enqlist.begin()); } else if (deqlist[0].restime.count() < enqlist[0].invtime.count()) { outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").inv" << std::endl; outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").res" << std::endl; deqlist.erase(deqlist.begin()); } else { if (enqlist[0].val == deqlist[0].val) { outfile << enqlist[0].count + 1 << suffix(enqlist[0].count + 1) << " Enq("; if (enqlist[0].nilflag) { outfile << "nil"; }else { outfile << enqlist[0].val; } outfile << ").inv" << std::endl; outfile << enqlist[0].count + 1 << suffix(enqlist[0].count + 1) << " Enq("; if (enqlist[0].nilflag) { outfile << "nil"; } else { outfile << enqlist[0].val; } outfile << ").res" << std::endl; enqlist.erase(enqlist.begin()); outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").inv" << std::endl; outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").res" << std::endl; deqlist.erase(deqlist.begin()); } else { outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").inv" << std::endl; outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").res" << std::endl; deqlist.erase(deqlist.begin()); } } } if (enqlist.size() == 0) { while(deqlist.size() > 0) { outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").inv" << std::endl; outfile << deqlist[0].count + 1 << suffix(deqlist[0].count + 1) << " Deq("; if (deqlist[0].nilflag) { outfile << "nil"; } else { outfile << deqlist[0].val; } outfile << ").res" << std::endl; deqlist.erase(deqlist.begin()); } } if (deqlist.size() == 0) { while (enqlist.size() > 0) { outfile << enqlist[0].count + 1 << suffix(enqlist[0].count + 1) << " Enq("; if (enqlist[0].nilflag) { outfile << "nil"; }else { outfile << enqlist[0].val; } outfile << ").inv" << std::endl; outfile << enqlist[0].count + 1 << suffix(enqlist[0].count + 1) << " Enq("; if (enqlist[0].nilflag) { outfile << "nil"; } else { outfile << enqlist[0].val; } outfile << ").res" << std::endl; enqlist.erase(enqlist.begin()); } } outfile.close(); deqsum = deqsum / (dcount * 1.0); enqsum = enqsum / (ecount * 1.0); std::cout << "Average time to enqueue: " << enqsum << "useconds" << std::endl; std::cout << "Average time to dequeue: " << deqsum << "useconds" << std::endl; }
/** * @brief Classifies a set of test points using a set of training points. * * @param k The number of k nearest neighbours. * @param test_points The set of test points. * @param training_points The set of training points. * * @return An array of calculated labels for the set of test points. * The element at the first position represents the calculated * label of the first test points. */ unsigned char *classify(int k, Points<unsigned char, unsigned char> &test_points, Points<unsigned char, unsigned char> &training_points) { time_t start_time, end_time; time(&start_time); cb.k = k; cb.values_size = training_points.getVSize(); cb.label_size = training_points.getLSize(); cb.training_dimension = training_points.getDimension(); cb.training_count = training_points.getCount(); cb.training_data_size = training_points.getCount() * training_points.getVSize(); cb.training_points_per_transfer = TRAINING_VALUES_MAX_SIZE / training_points.getVSize(); cb.test_dimension = test_points.getDimension(); cb.test_count = test_points.getCount(); cb.test_data_size = test_points.getCount() * test_points.getVSize(); cb.test_points_per_transfer = TEST_VALUES_MAX_SIZE / test_points.getVSize(); cb.ea_training_points = (uint64_t) training_points.getValues(0); cb.ea_training_labels = (uint64_t) training_points.getLabel(0); cb.ea_test_points = (uint64_t) test_points.getValues(0); cb.ea_test_labels = (uint64_t) test_points.getLabel(0); Points<unsigned char, unsigned char> test_points_results(test_points.getCount(), test_points.getDimension()); cb.ea_test_labels_calculated = (uint64_t) ((char *) test_points_results.getLabel(0)); cb.num_spes = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1); if (cb.num_spes > MAX_NUM_SPES) { cb.num_spes = MAX_NUM_SPES; } #ifdef PRINT printf("PPE:\t Num spes = %d\n", cb.num_spes); #endif uint32_t num; printf("PPE:\t Start calculating\n"); fflush(stdout); // create SPE context and load SPE program into the SPE context for (num=0; num<cb.num_spes; num++) { if ((data[num].spe_ctx = spe_context_create(SPE_MAP_PS |SPE_CFG_SIGNOTIFY1_OR|SPE_CFG_SIGNOTIFY2_OR, NULL))==NULL) { perror("Failed creating context"); exit(1); } if (spe_program_load(data[num].spe_ctx, &cellknn_spu)) { perror("Failed loading program"); exit(1); } } // create SPE pthreads for (num=0; num<cb.num_spes; num++) { if (pthread_create(&data[num].pthread, NULL, &spu_pthread, &data[num])) { perror("Failed creating thread"); exit(1); } } // map SPE's MFC problem state to main storage (get effective address) for (num=0; num<cb.num_spes; num++) { if ((cb.spu_mfc_ctl[num] = (uint64_t)spe_ps_area_get(data[num].spe_ctx, SPE_CONTROL_AREA))==0) { perror("Failed mapping MFC control area"); exit(1); } if ((cb.spu_ls[num] = (uint64_t)spe_ls_area_get(data[num].spe_ctx))==0) { perror("Failed mapping SPU local store"); exit(1); } if ((cb.spu_sig1[num] = (uint64_t)spe_ps_area_get(data[num].spe_ctx, SPE_SIG_NOTIFY_1_AREA))==0) { perror("Failed mapping Signal1 area"); exit(1); } if ((cb.spu_sig2[num] = (uint64_t)spe_ps_area_get(data[num].spe_ctx, SPE_SIG_NOTIFY_2_AREA))==0) { perror("Failed mapping Signal2 area"); exit(1); } } // send each SPE its number using BLOCKING mailbox write for (num=0; num<cb.num_spes; num++) { // write 1 entry to in_mailbox - we don't know if we have availalbe space so use blocking // cb parameter have to be loaded after receiving local id!!! spe_in_mbox_write(data[num].spe_ctx, (uint32_t*)&num, 1, SPE_MBOX_ALL_BLOCKING); } // wait for all SPEs to complete for (num=0; num<cb.num_spes; num++) { // wait for all the SPE pthread to complete if (pthread_join(data[num].pthread, NULL)) { perror("Failed joining thread"); exit(1); } // destroy the SPE contexts if (spe_context_destroy(data[num].spe_ctx)) { perror("Failed spe_context_destroy"); exit(1); } } time(&end_time); double difference = difftime(end_time, start_time); printf("It took %.2lf seconds to calculate %d test points and %d training points\n", difference, cb.test_count, cb.training_count); // We have to create a new array, since the Points object is destroyed after this block. // This array has to be freed somewhere outside this function. unsigned char *result = (unsigned char *) malloc(test_points.getCount() * sizeof(unsigned char)); for (int i = 0; i < test_points.getCount(); i++) { result[i] = test_points_results.getLabel(i)[0]; } return result; }
int main(void) { pthread_t tid1, tid2, tid3, tid4; void *res; int err; err = pthread_create(&tid1, NULL, tfn1, NULL); if (err != 0) { printf("can't create thread %s\n", strerror(err)); exit(1); } err = pthread_join(tid1, &res); if (err != 0) { printf("can't join thread %s\n", strerror(err)); exit(1); } printf("lst result: %d, %d\n", ((struct a *)res)->b, ((struct a *)res)->c); err = pthread_create(&tid2, NULL, tfn2, NULL); if (err != 0) { printf("can't create thread %s\n", strerror(err)); exit(1); } err = pthread_join(tid2, &res); if (err != 0) { printf("can't join thread %s\n", strerror(err)); exit(1); } printf("2nd result: %d, %d\n", ((struct a *)res)->b, ((struct a *)res)->c); free(res); err = pthread_create(&tid3, NULL, tfn3, NULL); if (err != 0) { printf("can't create thread %s\n", strerror(err)); exit(1); } err = pthread_join(tid3, &res); if (err != 0) { printf("can't join thread %s\n", strerror(err)); exit(1); } printf("3rd result: %d, %d\n", ((struct a *)res)->b, ((struct a *)res)->c); struct a *p; p = (struct a *)malloc(sizeof(struct a)); p->b = 10; p->c = 11; err = pthread_create(&tid4, NULL, tfn4, (void *)p); if (err != 0) { printf("can't create thread %s\n", strerror(err)); exit(1); } err = pthread_join(tid4, &res); if (err != 0) { printf("can't join thread %s\n", strerror(err)); exit(1); } printf("4th result: %d, %d\n", ((struct a *)res)->b, ((struct a *)res)->c); free(p); return 0; }
void Thread::join() { pthread_join(t_id, NULL); }
void kosh_join() { assert( kosh_exit != KE_JOINED ); pthread_join(thd, NULL); kosh_exit = KE_JOINED; }
/* * chitcpd_server_network_thread_func - Server thread function * * This function will spawn a connection thread (see connection.c) * for each new connection on the UNIX socket. * * args: arguments (a serverinfo_t variable in network_thread_args_t) * * Returns: Nothing. * */ void* chitcpd_server_network_thread_func(void *args) { socklen_t sunSize; network_thread_args_t *nta; socket_t realsocket; serverinfo_t *si; tcpconnentry_t* connection; char addr_str[100]; pthread_setname_np(pthread_self(), "network_server"); /* Unpack arguments */ nta = (network_thread_args_t *) args; si = nta->si; struct sockaddr_storage client_addr; /* Accept connections on the TCP socket */ for(;;) { /* Accept a connection */ sunSize = sizeof(client_addr); if ((realsocket = accept(si->network_socket, (struct sockaddr *)&client_addr, &sunSize)) == -1) { /* If accept() returns in the CHITCPD_STATE_STOPPING, we don't * care what the error is. We just break out of the loop and * initiate an orderly shutdown. */ if(si->state == CHITCPD_STATE_STOPPING) break; /* If this particular connection fails, no need to kill the entire thread. */ perror("Could not accept() connection on network socket"); continue; } chitcp_addr_str((struct sockaddr *) &client_addr, addr_str, sizeof(addr_str)); chilog(INFO, "TCP connection received from %s", addr_str); /* Check whether the connection already exists. */ connection = chitcpd_get_connection(si, (struct sockaddr *) &client_addr); if (connection != NULL) { /* If this is a loopback connection, there is already an entry in * the connection table, but we need to update its receive socket * and create its connection thread. */ if(chitcp_addr_is_loopback((struct sockaddr *) &client_addr)) { connection->realsocket_recv = realsocket; if(chitcpd_create_connection_thread(si, connection) != CHITCP_OK) { perror("Could not create connection thread."); // TODO: Perform orderly shutdown pthread_exit(NULL); } continue; } else /* Otherwise, this is an error. The peer chiTCP daemon tried to create * a second connection, which shouldn't happen. */ { perror("Peer chiTCP daemon tried to establish more than one connection."); close(realsocket); close(si->server_socket); // TODO: Perform orderly shutdown instead of just exiting pthread_exit(NULL); } } /* If this is not a loopback connection, we need to add an entry * for this connection */ connection = chitcpd_add_connection(si, realsocket, realsocket, (struct sockaddr*) &client_addr); if (!connection) { perror("Could not create a connection to a peer chiTCP daemon"); // TODO: Perform orderly shutdown pthread_exit(NULL); } if(chitcpd_create_connection_thread(si, connection) != CHITCP_OK) { perror("Could not create connection thread."); // TODO: Perform orderly shutdown pthread_exit(NULL); } } /* Close all TCP connections. This will force an exit of the * corresponding connection threads. */ for(int i=0; i < si->connection_table_size; i++) { connection = &si->connection_table[i]; if(!connection->available) { shutdown(connection->realsocket_recv, SHUT_RDWR); if (connection->realsocket_recv != connection->realsocket_send) shutdown(connection->realsocket_send, SHUT_RDWR); pthread_join(connection->thread, NULL); } } pthread_exit(NULL); }
int mono_gc_pthread_join (pthread_t thread, void **retval) { return pthread_join (thread, retval); }
int recvloop_th(int *socketds, unsigned nsockets, struct cl_engine *engine, unsigned int dboptions, const struct optstruct *opts) { int max_threads, max_queue, readtimeout, ret = 0; unsigned int options = 0; char timestr[32]; #ifndef _WIN32 struct sigaction sigact; sigset_t sigset; struct rlimit rlim; #endif mode_t old_umask; const struct optstruct *opt; char buff[BUFFSIZE + 1]; pid_t mainpid; int idletimeout; unsigned long long val; size_t i, j, rr_last = 0; pthread_t accept_th; pthread_mutex_t fds_mutex = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t recvfds_mutex = PTHREAD_MUTEX_INITIALIZER; struct acceptdata acceptdata = ACCEPTDATA_INIT(&fds_mutex, &recvfds_mutex); struct fd_data *fds = &acceptdata.recv_fds; time_t start_time, current_time; unsigned int selfchk; threadpool_t *thr_pool; #if defined(FANOTIFY) || defined(CLAMAUTH) pthread_t fan_pid; pthread_attr_t fan_attr; struct thrarg *tharg = NULL; /* shut up gcc */ #endif #ifndef _WIN32 memset(&sigact, 0, sizeof(struct sigaction)); #endif /* set up limits */ if((opt = optget(opts, "MaxScanSize"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_SCANSIZE, opt->numarg))) { logg("!cl_engine_set_num(CL_ENGINE_MAX_SCANSIZE) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_SCANSIZE, NULL); if(val) logg("Limits: Global size limit set to %llu bytes.\n", val); else logg("^Limits: Global size limit protection disabled.\n"); if((opt = optget(opts, "MaxFileSize"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_FILESIZE, opt->numarg))) { logg("!cl_engine_set_num(CL_ENGINE_MAX_FILESIZE) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_FILESIZE, NULL); if(val) logg("Limits: File size limit set to %llu bytes.\n", val); else logg("^Limits: File size limit protection disabled.\n"); #ifndef _WIN32 if(getrlimit(RLIMIT_FSIZE, &rlim) == 0) { if(rlim.rlim_cur < (rlim_t) cl_engine_get_num(engine, CL_ENGINE_MAX_FILESIZE, NULL)) logg("^System limit for file size is lower than engine->maxfilesize\n"); if(rlim.rlim_cur < (rlim_t) cl_engine_get_num(engine, CL_ENGINE_MAX_SCANSIZE, NULL)) logg("^System limit for file size is lower than engine->maxscansize\n"); } else { logg("^Cannot obtain resource limits for file size\n"); } #endif if((opt = optget(opts, "MaxRecursion"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_RECURSION, opt->numarg))) { logg("!cl_engine_set_num(CL_ENGINE_MAX_RECURSION) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_RECURSION, NULL); if(val) logg("Limits: Recursion level limit set to %u.\n", (unsigned int) val); else logg("^Limits: Recursion level limit protection disabled.\n"); if((opt = optget(opts, "MaxFiles"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_FILES, opt->numarg))) { logg("!cl_engine_set_num(CL_ENGINE_MAX_FILES) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_FILES, NULL); if(val) logg("Limits: Files limit set to %u.\n", (unsigned int) val); else logg("^Limits: Files limit protection disabled.\n"); #ifndef _WIN32 if (getrlimit(RLIMIT_CORE, &rlim) == 0) { logg("*Limits: Core-dump limit is %lu.\n", (unsigned long)rlim.rlim_cur); } #endif /* Engine max sizes */ if((opt = optget(opts, "MaxEmbeddedPE"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_EMBEDDEDPE, opt->numarg))) { logg("!cli_engine_set_num(CL_ENGINE_MAX_EMBEDDEDPE) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_EMBEDDEDPE, NULL); logg("Limits: MaxEmbeddedPE limit set to %llu bytes.\n", val); if((opt = optget(opts, "MaxHTMLNormalize"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_HTMLNORMALIZE, opt->numarg))) { logg("!cli_engine_set_num(CL_ENGINE_MAX_HTMLNORMALIZE) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_HTMLNORMALIZE, NULL); logg("Limits: MaxHTMLNormalize limit set to %llu bytes.\n", val); if((opt = optget(opts, "MaxHTMLNoTags"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_HTMLNOTAGS, opt->numarg))) { logg("!cli_engine_set_num(CL_ENGINE_MAX_HTMLNOTAGS) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_HTMLNOTAGS, NULL); logg("Limits: MaxHTMLNoTags limit set to %llu bytes.\n", val); if((opt = optget(opts, "MaxScriptNormalize"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_SCRIPTNORMALIZE, opt->numarg))) { logg("!cli_engine_set_num(CL_ENGINE_MAX_SCRIPTNORMALIZE) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_SCRIPTNORMALIZE, NULL); logg("Limits: MaxScriptNormalize limit set to %llu bytes.\n", val); if((opt = optget(opts, "MaxZipTypeRcg"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_ZIPTYPERCG, opt->numarg))) { logg("!cli_engine_set_num(CL_ENGINE_MAX_ZIPTYPERCG) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_ZIPTYPERCG, NULL); logg("Limits: MaxZipTypeRcg limit set to %llu bytes.\n", val); if((opt = optget(opts, "MaxPartitions"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_PARTITIONS, opt->numarg))) { logg("!cli_engine_set_num(MaxPartitions) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_PARTITIONS, NULL); logg("Limits: MaxPartitions limit set to %llu.\n", val); if((opt = optget(opts, "MaxIconsPE"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MAX_ICONSPE, opt->numarg))) { logg("!cli_engine_set_num(MaxIconsPE) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MAX_ICONSPE, NULL); logg("Limits: MaxIconsPE limit set to %llu.\n", val); if((opt = optget(opts, "PCREMatchLimit"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_PCRE_MATCH_LIMIT, opt->numarg))) { logg("!cli_engine_set_num(PCREMatchLimit) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_PCRE_MATCH_LIMIT, NULL); logg("Limits: PCREMatchLimit limit set to %llu.\n", val); if((opt = optget(opts, "PCRERecMatchLimit"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_PCRE_RECMATCH_LIMIT, opt->numarg))) { logg("!cli_engine_set_num(PCRERecMatchLimit) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_PCRE_RECMATCH_LIMIT, NULL); logg("Limits: PCRERecMatchLimit limit set to %llu.\n", val); if((opt = optget(opts, "PCREMaxFileSize"))->active) { if((ret = cl_engine_set_num(engine, CL_ENGINE_PCRE_MAX_FILESIZE, opt->numarg))) { logg("!cli_engine_set_num(PCREMaxFileSize) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_PCRE_MAX_FILESIZE, NULL); logg("Limits: PCREMaxFileSize limit set to %llu.\n", val); if(optget(opts, "ScanArchive")->enabled) { logg("Archive support enabled.\n"); options |= CL_SCAN_ARCHIVE; if(optget(opts, "ArchiveBlockEncrypted")->enabled) { logg("Archive: Blocking encrypted archives.\n"); options |= CL_SCAN_BLOCKENCRYPTED; } } else { logg("Archive support disabled.\n"); } if(optget(opts, "AlgorithmicDetection")->enabled) { logg("Algorithmic detection enabled.\n"); options |= CL_SCAN_ALGORITHMIC; } else { logg("Algorithmic detection disabled.\n"); } if(optget(opts, "ScanPE")->enabled) { logg("Portable Executable support enabled.\n"); options |= CL_SCAN_PE; } else { logg("Portable Executable support disabled.\n"); } if(optget(opts, "ScanELF")->enabled) { logg("ELF support enabled.\n"); options |= CL_SCAN_ELF; } else { logg("ELF support disabled.\n"); } if(optget(opts, "ScanPE")->enabled || optget(opts, "ScanELF")->enabled) { if(optget(opts, "DetectBrokenExecutables")->enabled) { logg("Detection of broken executables enabled.\n"); options |= CL_SCAN_BLOCKBROKEN; } } if(optget(opts, "ScanMail")->enabled) { logg("Mail files support enabled.\n"); options |= CL_SCAN_MAIL; if(optget(opts, "ScanPartialMessages")->enabled) { logg("Mail: RFC1341 handling enabled.\n"); options |= CL_SCAN_PARTIAL_MESSAGE; } } else { logg("Mail files support disabled.\n"); } if(optget(opts, "ScanOLE2")->enabled) { logg("OLE2 support enabled.\n"); options |= CL_SCAN_OLE2; if(optget(opts, "OLE2BlockMacros")->enabled) { logg("OLE2: Blocking all VBA macros.\n"); options |= CL_SCAN_BLOCKMACROS; } } else { logg("OLE2 support disabled.\n"); } if(optget(opts, "ScanPDF")->enabled) { logg("PDF support enabled.\n"); options |= CL_SCAN_PDF; } else { logg("PDF support disabled.\n"); } if(optget(opts, "ScanSWF")->enabled) { logg("SWF support enabled.\n"); options |= CL_SCAN_SWF; } else { logg("SWF support disabled.\n"); } if(optget(opts, "ScanHTML")->enabled) { logg("HTML support enabled.\n"); options |= CL_SCAN_HTML; } else { logg("HTML support disabled.\n"); } if(optget(opts,"PhishingScanURLs")->enabled) { if(optget(opts,"PhishingAlwaysBlockCloak")->enabled) { options |= CL_SCAN_PHISHING_BLOCKCLOAK; logg("Phishing: Always checking for cloaked urls\n"); } if(optget(opts,"PhishingAlwaysBlockSSLMismatch")->enabled) { options |= CL_SCAN_PHISHING_BLOCKSSL; logg("Phishing: Always checking for ssl mismatches\n"); } } if(optget(opts,"PartitionIntersection")->enabled) { options |= CL_SCAN_PARTITION_INTXN; logg("Raw DMG: Always checking for partitons intersections\n"); } if(optget(opts,"HeuristicScanPrecedence")->enabled) { options |= CL_SCAN_HEURISTIC_PRECEDENCE; logg("Heuristic: precedence enabled\n"); } if(optget(opts, "StructuredDataDetection")->enabled) { options |= CL_SCAN_STRUCTURED; if((opt = optget(opts, "StructuredMinCreditCardCount"))->enabled) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MIN_CC_COUNT, opt->numarg))) { logg("!cl_engine_set_num(CL_ENGINE_MIN_CC_COUNT) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MIN_CC_COUNT, NULL); logg("Structured: Minimum Credit Card Number Count set to %u\n", (unsigned int) val); if((opt = optget(opts, "StructuredMinSSNCount"))->enabled) { if((ret = cl_engine_set_num(engine, CL_ENGINE_MIN_SSN_COUNT, opt->numarg))) { logg("!cl_engine_set_num(CL_ENGINE_MIN_SSN_COUNT) failed: %s\n", cl_strerror(ret)); cl_engine_free(engine); return 1; } } val = cl_engine_get_num(engine, CL_ENGINE_MIN_SSN_COUNT, NULL); logg("Structured: Minimum Social Security Number Count set to %u\n", (unsigned int) val); if(optget(opts, "StructuredSSNFormatNormal")->enabled) options |= CL_SCAN_STRUCTURED_SSN_NORMAL; if(optget(opts, "StructuredSSNFormatStripped")->enabled) options |= CL_SCAN_STRUCTURED_SSN_STRIPPED; } #ifdef HAVE__INTERNAL__SHA_COLLECT if(optget(opts, "DevCollectHashes")->enabled) options |= CL_SCAN_INTERNAL_COLLECT_SHA; #endif selfchk = optget(opts, "SelfCheck")->numarg; if(!selfchk) { logg("Self checking disabled.\n"); } else { logg("Self checking every %u seconds.\n", selfchk); } /* save the PID */ mainpid = getpid(); if((opt = optget(opts, "PidFile"))->enabled) { FILE *fd; old_umask = umask(0002); if((fd = fopen(opt->strarg, "w")) == NULL) { logg("!Can't save PID in file %s\n", opt->strarg); } else { if (fprintf(fd, "%u\n", (unsigned int) mainpid)<0) { logg("!Can't save PID in file %s\n", opt->strarg); } fclose(fd); } umask(old_umask); } logg("*Listening daemon: PID: %u\n", (unsigned int) mainpid); max_threads = optget(opts, "MaxThreads")->numarg; max_queue = optget(opts, "MaxQueue")->numarg; acceptdata.commandtimeout = optget(opts, "CommandReadTimeout")->numarg; readtimeout = optget(opts, "ReadTimeout")->numarg; #if !defined(_WIN32) && defined(RLIMIT_NOFILE) if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { /* don't warn if default value is too high, silently fix it */ unsigned maxrec; int max_max_queue; unsigned warn = optget(opts, "MaxQueue")->active; const unsigned clamdfiles = 6; /* Condition to not run out of file descriptors: * MaxThreads * MaxRecursion + (MaxQueue - MaxThreads) + CLAMDFILES < RLIMIT_NOFILE * CLAMDFILES is 6: 3 standard FD + logfile + 2 FD for reloading the DB * */ #ifdef C_SOLARIS #ifdef HAVE_ENABLE_EXTENDED_FILE_STDIO if (enable_extended_FILE_stdio(-1, -1) == -1) { logg("^Unable to set extended FILE stdio, clamd will be limited to max 256 open files\n"); rlim.rlim_cur = rlim.rlim_cur > 255 ? 255 : rlim.rlim_cur; } #elif !defined(_LP64) if (rlim.rlim_cur > 255) { rlim.rlim_cur = 255; logg("^Solaris only supports 256 open files for 32-bit processes, you need at least Solaris 10u4, or compile as 64-bit to support more!\n"); } #endif #endif opt = optget(opts,"MaxRecursion"); maxrec = opt->numarg; max_max_queue = rlim.rlim_cur - maxrec * max_threads - clamdfiles + max_threads; if (max_queue < max_threads) { max_queue = max_threads; if (warn) logg("^MaxQueue value too low, increasing to: %d\n", max_queue); } if (max_max_queue < max_threads) { logg("^MaxThreads * MaxRecursion is too high: %d, open file descriptor limit is: %lu\n", maxrec*max_threads, (unsigned long)rlim.rlim_cur); max_max_queue = max_threads; } if (max_queue > max_max_queue) { max_queue = max_max_queue; if (warn) logg("^MaxQueue value too high, lowering to: %d\n", max_queue); } else if (max_queue < 2*max_threads && max_queue < max_max_queue) { max_queue = 2*max_threads; if (max_queue > max_max_queue) max_queue = max_max_queue; /* always warn here */ logg("^MaxQueue is lower than twice MaxThreads, increasing to: %d\n", max_queue); } } #endif logg("*MaxQueue set to: %d\n", max_queue); acceptdata.max_queue = max_queue; if(optget(opts, "ScanOnAccess")->enabled) #if defined(FANOTIFY) || defined(CLAMAUTH) { do { if(pthread_attr_init(&fan_attr)) break; pthread_attr_setdetachstate(&fan_attr, PTHREAD_CREATE_JOINABLE); if(!(tharg = (struct thrarg *) malloc(sizeof(struct thrarg)))) break; tharg->opts = opts; tharg->engine = engine; tharg->options = options; if(!pthread_create(&fan_pid, &fan_attr, onas_fan_th, tharg)) break; free(tharg); tharg=NULL; } while(0); if (!tharg) logg("!Unable to start on-access scan\n"); } #else logg("!On-access scan is not available\n"); #endif #ifndef _WIN32 /* set up signal handling */ sigfillset(&sigset); sigdelset(&sigset, SIGINT); sigdelset(&sigset, SIGTERM); sigdelset(&sigset, SIGSEGV); sigdelset(&sigset, SIGHUP); sigdelset(&sigset, SIGPIPE); sigdelset(&sigset, SIGUSR2); /* The behavior of a process is undefined after it ignores a * SIGFPE, SIGILL, SIGSEGV, or SIGBUS signal */ sigdelset(&sigset, SIGFPE); sigdelset(&sigset, SIGILL); sigdelset(&sigset, SIGSEGV); #ifdef SIGBUS sigdelset(&sigset, SIGBUS); #endif sigdelset(&sigset, SIGTSTP); sigdelset(&sigset, SIGCONT); sigprocmask(SIG_SETMASK, &sigset, NULL); /* SIGINT, SIGTERM, SIGSEGV */ sigact.sa_handler = sighandler_th; sigemptyset(&sigact.sa_mask); sigaddset(&sigact.sa_mask, SIGINT); sigaddset(&sigact.sa_mask, SIGTERM); sigaddset(&sigact.sa_mask, SIGHUP); sigaddset(&sigact.sa_mask, SIGPIPE); sigaddset(&sigact.sa_mask, SIGUSR2); sigaction(SIGINT, &sigact, NULL); sigaction(SIGTERM, &sigact, NULL); sigaction(SIGHUP, &sigact, NULL); sigaction(SIGPIPE, &sigact, NULL); sigaction(SIGUSR2, &sigact, NULL); #endif idletimeout = optget(opts, "IdleTimeout")->numarg; for (i=0;i < nsockets;i++) if (fds_add(&acceptdata.fds, socketds[i], 1, 0) == -1) { logg("!fds_add failed\n"); cl_engine_free(engine); return 1; } #ifdef _WIN32 event_wake_accept = CreateEvent(NULL, TRUE, FALSE, NULL); event_wake_recv = CreateEvent(NULL, TRUE, FALSE, NULL); #else if (pipe(acceptdata.syncpipe_wake_recv) == -1 || (pipe(acceptdata.syncpipe_wake_accept) == -1)) { logg("!pipe failed\n"); exit(-1); } syncpipe_wake_recv_w = acceptdata.syncpipe_wake_recv[1]; if (fds_add(fds, acceptdata.syncpipe_wake_recv[0], 1, 0) == -1 || fds_add(&acceptdata.fds, acceptdata.syncpipe_wake_accept[0], 1, 0)) { logg("!failed to add pipe fd\n"); exit(-1); } #endif if ((thr_pool = thrmgr_new(max_threads, idletimeout, max_queue, scanner_thread)) == NULL) { logg("!thrmgr_new failed\n"); exit(-1); } if (pthread_create(&accept_th, NULL, acceptloop_th, &acceptdata)) { logg("!pthread_create failed\n"); exit(-1); } time(&start_time); for(;;) { int new_sd; /* Block waiting for connection on any of the sockets */ pthread_mutex_lock(fds->buf_mutex); fds_cleanup(fds); /* signal that we can accept more connections */ if (fds->nfds <= (unsigned)max_queue) pthread_cond_signal(&acceptdata.cond_nfds); new_sd = fds_poll_recv(fds, selfchk ? (int)selfchk : -1, 1, event_wake_recv); #ifdef _WIN32 ResetEvent(event_wake_recv); #else if (!fds->nfds) { /* at least the dummy/sync pipe should have remained */ logg("!All recv() descriptors gone: fatal\n"); pthread_mutex_lock(&exit_mutex); progexit = 1; pthread_mutex_unlock(&exit_mutex); pthread_mutex_unlock(fds->buf_mutex); break; } #endif if (new_sd == -1 && errno != EINTR) { logg("!Failed to poll sockets, fatal\n"); pthread_mutex_lock(&exit_mutex); progexit = 1; pthread_mutex_unlock(&exit_mutex); } if(fds->nfds) i = (rr_last + 1) % fds->nfds; for (j = 0; j < fds->nfds && new_sd >= 0; j++, i = (i+1) % fds->nfds) { size_t pos = 0; int error = 0; struct fd_buf *buf = &fds->buf[i]; if (!buf->got_newdata) continue; #ifndef _WIN32 if (buf->fd == acceptdata.syncpipe_wake_recv[0]) { /* dummy sync pipe, just to wake us */ if (read(buf->fd, buff, sizeof(buff)) < 0) { logg("^Syncpipe read failed\n"); } continue; } #endif if (buf->got_newdata == -1) { if (buf->mode == MODE_WAITREPLY) { logg("$mode WAIT_REPLY -> closed\n"); buf->fd = -1; thrmgr_group_terminate(buf->group); thrmgr_group_finished(buf->group, EXIT_ERROR); continue; } else { logg("$client read error or EOF on read\n"); error = 1; } } if (buf->fd != -1 && buf->got_newdata == -2) { logg("$Client read timed out\n"); mdprintf(buf->fd, "COMMAND READ TIMED OUT\n"); error = 1; } rr_last = i; if (buf->mode == MODE_WAITANCILL) { buf->mode = MODE_COMMAND; logg("$mode -> MODE_COMMAND\n"); } while (!error && buf->fd != -1 && buf->buffer && pos < buf->off && buf->mode != MODE_WAITANCILL) { client_conn_t conn; const char *cmd = NULL; int rc; /* New data available to read on socket. */ memset(&conn, 0, sizeof(conn)); conn.scanfd = buf->recvfd; buf->recvfd = -1; conn.sd = buf->fd; conn.options = options; conn.opts = opts; conn.thrpool = thr_pool; conn.engine = engine; conn.group = buf->group; conn.id = buf->id; conn.quota = buf->quota; conn.filename = buf->dumpname; conn.mode = buf->mode; conn.term = buf->term; /* Parse & dispatch command */ cmd = parse_dispatch_cmd(&conn, buf, &pos, &error, opts, readtimeout); if (conn.mode == MODE_COMMAND && !cmd) break; if (!error) { if (buf->mode == MODE_WAITREPLY && buf->off) { /* Client is not supposed to send anything more */ logg("^Client sent garbage after last command: %lu bytes\n", (unsigned long)buf->off); buf->buffer[buf->off] = '\0'; logg("$Garbage: %s\n", buf->buffer); error = 1; } else if (buf->mode == MODE_STREAM) { rc = handle_stream(&conn, buf, opts, &error, &pos, readtimeout); if (rc == -1) break; else continue; } } if (error && error != CL_ETIMEOUT) { conn_reply_error(&conn, "Error processing command."); } } if (error) { if (buf->dumpfd != -1) { close(buf->dumpfd); if (buf->dumpname) { cli_unlink(buf->dumpname); free(buf->dumpname); } buf->dumpfd = -1; } thrmgr_group_terminate(buf->group); if (thrmgr_group_finished(buf->group, EXIT_ERROR)) { if (buf->fd < 0) { logg("$Skipping shutdown of bad socket after error (FD %d)\n", buf->fd); } else { logg("$Shutting down socket after error (FD %d)\n", buf->fd); shutdown(buf->fd, 2); closesocket(buf->fd); } } else logg("$Socket not shut down due to active tasks\n"); buf->fd = -1; } } pthread_mutex_unlock(fds->buf_mutex); /* handle progexit */ pthread_mutex_lock(&exit_mutex); if (progexit) { pthread_mutex_unlock(&exit_mutex); pthread_mutex_lock(fds->buf_mutex); for (i=0;i < fds->nfds; i++) { if (fds->buf[i].fd == -1) continue; thrmgr_group_terminate(fds->buf[i].group); if (thrmgr_group_finished(fds->buf[i].group, EXIT_ERROR)) { logg("$Shutdown closed fd %d\n", fds->buf[i].fd); shutdown(fds->buf[i].fd, 2); closesocket(fds->buf[i].fd); fds->buf[i].fd = -1; } } pthread_mutex_unlock(fds->buf_mutex); break; } pthread_mutex_unlock(&exit_mutex); /* SIGHUP */ if (sighup) { logg("SIGHUP caught: re-opening log file.\n"); logg_close(); sighup = 0; if(!logg_file && (opt = optget(opts, "LogFile"))->enabled) logg_file = opt->strarg; } /* SelfCheck */ if(selfchk) { time(¤t_time); if((current_time - start_time) >= (time_t)selfchk) { if(reload_db(engine, dboptions, opts, TRUE, &ret)) { pthread_mutex_lock(&reload_mutex); reload = 1; pthread_mutex_unlock(&reload_mutex); } time(&start_time); } } /* DB reload */ pthread_mutex_lock(&reload_mutex); if(reload) { pthread_mutex_unlock(&reload_mutex); engine = reload_db(engine, dboptions, opts, FALSE, &ret); if(ret) { logg("Terminating because of a fatal error.\n"); if(new_sd >= 0) closesocket(new_sd); break; } pthread_mutex_lock(&reload_mutex); reload = 0; time(&reloaded_time); pthread_mutex_unlock(&reload_mutex); #if defined(FANOTIFY) || defined(CLAMAUTH) if(optget(opts, "ScanOnAccess")->enabled && tharg) { tharg->engine = engine; } #endif time(&start_time); } else { pthread_mutex_unlock(&reload_mutex); } } pthread_mutex_lock(&exit_mutex); progexit = 1; pthread_mutex_unlock(&exit_mutex); #ifdef _WIN32 SetEvent(event_wake_accept); #else if (write(acceptdata.syncpipe_wake_accept[1], "", 1) < 0) { logg("^Write to syncpipe failed\n"); } #endif /* Destroy the thread manager. * This waits for all current tasks to end */ logg("*Waiting for all threads to finish\n"); thrmgr_destroy(thr_pool); #if defined(FANOTIFY) || defined(CLAMAUTH) if(optget(opts, "ScanOnAccess")->enabled && tharg) { logg("Stopping on-access scan\n"); pthread_mutex_lock(&logg_mutex); pthread_kill(fan_pid, SIGUSR1); pthread_mutex_unlock(&logg_mutex); pthread_join(fan_pid, NULL); free(tharg); } #endif if(engine) { thrmgr_setactiveengine(NULL); cl_engine_free(engine); } pthread_join(accept_th, NULL); fds_free(fds); pthread_mutex_destroy(fds->buf_mutex); pthread_cond_destroy(&acceptdata.cond_nfds); #ifdef _WIN32 CloseHandle(event_wake_accept); CloseHandle(event_wake_recv); #else close(acceptdata.syncpipe_wake_accept[1]); close(acceptdata.syncpipe_wake_recv[1]); #endif if(dbstat.entries) cl_statfree(&dbstat); logg("*Shutting down the main socket%s.\n", (nsockets > 1) ? "s" : ""); for (i = 0; i < nsockets; i++) shutdown(socketds[i], 2); if((opt = optget(opts, "PidFile"))->enabled) { if(unlink(opt->strarg) == -1) logg("!Can't unlink the pid file %s\n", opt->strarg); else logg("Pid file removed.\n"); } time(¤t_time); logg("--- Stopped at %s", cli_ctime(¤t_time, timestr, sizeof(timestr))); return ret; }
int main(int argc, char *argv[]) { if (argc != 5) { printf("You should input five arguments: TCP/UDP, server ip address, buffer size and number of threads.\n"); return(1); } //Get parameters from the input. char *server_ip = argv[2]; int buffer_size = atoi(argv[3]); int num_thr = atoi(argv[4]); if (buffer_size == 65536) { buffer_size = 65507; } //Define and initialize the socket related parameters. struct sockaddr_in server_addr; server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = inet_addr(server_ip); server_addr.sin_port = htons(4567); //Initializing threads and assign related parameters. struct thread_arg *thr_arg = (struct thread_arg *)malloc(sizeof(struct thread_arg)); thr_arg -> server_addr = server_addr; thr_arg -> buf_size = buffer_size; pthread_t threads[num_thr]; //Define the start time and end time. int i = 0; struct timeval start_time, end_time; //Start executing communication using multi-threads, get the start time and end time. gettimeofday(&start_time, NULL); if (strcmp(argv[1], "TCP") == 0) { for (i = 0; i < num_thr; ++i) { pthread_create(&threads[i], NULL, tcpClient, thr_arg); } for (i = 0; i < num_thr; ++i) { pthread_join(threads[i], NULL); } } else { for (i = 0; i < num_thr; ++i) { pthread_create(&threads[i], NULL, udpClient, thr_arg); } for (i = 0; i < num_thr; ++i) { pthread_join(threads[i], NULL); } } gettimeofday(&end_time, NULL); //Calculate the execte time and throughput. double execute_time = (1000.0 * (end_time.tv_sec - start_time.tv_sec) + (end_time.tv_usec - start_time.tv_usec) / 1000.0); double throughput = (num_thr * LOOPS * buffer_size / (1024.0 * 1024.0)) / (execute_time / 1000.0); double latency = execute_time / (num_thr * LOOPS * buffer_size); printf("%d threads: the latency is %10.9f ms and the throughput is %10f MB/S\n", num_thr, latency, throughput); return 0; }