PixelPipeline::~PixelPipeline() { wait_for_workers(); #if defined(WIN32) && defined(PROFILE_PIPELINE) profiler.end_time = __rdtsc(); #endif event_stop.set(); for (std::vector<Thread>::size_type i = 0; i < worker_threads.size(); i++) worker_threads[i].join(); for (size_t i = 0; i < queue_max; i++) { delete command_queue[i]; command_queue[i] = 0; } if (cur_block && cur_block->refcount == 1) delete[] (char*) cur_block; #if defined(WIN32) && defined(PROFILE_PIPELINE) MessageBoxA(0, cl_format("Queue = %1\r\nSetEvent = %2\r\nWaitForWorkers = %3\r\nWaitForSpace = %4\r\nAllocFree = %5", (int)(profiler.queue_time*100/(profiler.end_time-profiler.start_time)), (int)(profiler.set_event_time*100/(profiler.end_time-profiler.start_time)), (int)(profiler.wait_for_workers_time*100/(profiler.end_time-profiler.start_time)), (int)(profiler.wait_for_space_time*100/(profiler.end_time-profiler.start_time)), (int)(profiler.alloc_time*100/(profiler.end_time-profiler.start_time))).c_str(), "DEBUG", MB_OK); #endif }
/* * Signal handler. */ static void killer (int sig) { unregister_services(); if (num_threads > 1) { /* play Kronos and eat our children */ kill(0, SIGTERM); wait_for_workers(); } cleanup_lockfiles(); xlog (L_FATAL, "Caught signal %d, un-registering and exiting.", sig); }
/* Fork num_threads worker children and wait for them */ static void fork_workers(void) { int i; pid_t pid; xlog(L_NOTICE, "mountd: starting %d threads\n", num_threads); for (i = 0 ; i < num_threads ; i++) { pid = fork(); if (pid < 0) { xlog(L_FATAL, "mountd: cannot fork: %s\n", strerror(errno)); } if (pid == 0) { /* worker child */ /* Re-enable the default action on SIGTERM et al * so that workers die naturally when sent them. * Only the parent unregisters with pmap and * hence needs to do special SIGTERM handling. */ struct sigaction sa; sa.sa_handler = SIG_DFL; sa.sa_flags = 0; sigemptyset(&sa.sa_mask); sigaction(SIGHUP, &sa, NULL); sigaction(SIGINT, &sa, NULL); sigaction(SIGTERM, &sa, NULL); /* fall into my_svc_run in caller */ return; } } /* in parent */ wait_for_workers(); unregister_services(); cleanup_lockfiles(); xlog(L_NOTICE, "mountd: no more workers, exiting\n"); exit(0); }
int main(int argc, char* argv[]) { app_params p = {0}; parse_args(argc, argv, &p); struct rte_ring *tx_rings[p.nb_rx_queue]; rx_worker_params rx_params[p.nb_rx_workers]; tx_worker_params tx_params[p.nb_tx_workers]; // initialize kaf_init(p.nb_tx_workers, p.kafka_topic, p.kafka_config_path, p.kafka_stats_path); init_receive(p.enabled_port_mask, p.nb_rx_queue, p.nb_rx_desc); init_transmit(tx_rings, p.nb_rx_queue, p.tx_ring_size); // start receive and transmit workers start_workers(rx_params, tx_params, tx_rings, &p); monitor_workers(rx_params, p.nb_rx_workers, tx_params, p.nb_tx_workers); wait_for_workers(); // clean up kaf_close(); return 0; }
static void wait_for_workers_timeit() { BENCHDECLARE BENCHBEGIN(BS-2) wait_for_workers(); BENCHADD(BS-1) }