void PoolFrontend::ProxyLoop() { zmq_proxy(mRouter, mDealer, 0); printf("PoolFrontend shutdown.\n"); }
int main (void) { void *context = zmq_ctx_new (); // Socket to talk to clients void *clients = zmq_socket (context, ZMQ_ROUTER); zmq_bind (clients, "tcp://*:5555"); // Socket to talk to workers void *workers = zmq_socket (context, ZMQ_DEALER); zmq_bind (workers, "inproc://workers"); // Launch pool of worker threads int thread_nbr; for (thread_nbr = 0; thread_nbr < 5; thread_nbr++) { pthread_t worker; pthread_create (&worker, NULL, worker_routine, context); } // Connect work threads to client threads via a queue proxy zmq_proxy (clients, workers, NULL); // We never get here but clean up anyhow zmq_close (clients); zmq_close (workers); zmq_ctx_destroy (context); return 0; }
int main() { void *context = zmq_ctx_new(); // Socket to talk to clients void *clients = zmq_socket(context, ZMQ_ROUTER); zmq_bind(clients, "tcp://*:5555"); // Socket to talk to workers void *workers = zmq_socket(context, ZMQ_DEALER); zmq_bind(workers, "inproc://workers"); // Launch pool of worker threads const int THREAD_NUM = 5; int thread_nbr; WorkerThread *threads[THREAD_NUM]; for (thread_nbr = 0; thread_nbr < THREAD_NUM; ++thread_nbr) { threads[thread_nbr] = new WorkerThread(thread_nbr, context); threads[thread_nbr]->run(); } zmq_proxy(clients, workers, NULL); for (thread_nbr = 0; thread_nbr < THREAD_NUM; ++thread_nbr) { delete threads[thread_nbr]; } zmq_close(clients); zmq_close(workers); zmq_ctx_destroy(context); return 0; }
int main(int argc, char* argv[]) { void* ctx = zmq_ctx_new(); void* rep = zmq_socket(ctx, ZMQ_ROUTER); void* workers = zmq_socket(ctx, ZMQ_DEALER); zmq_bind(rep, "tcp://*:5555"); zmq_bind(workers, "inproc://workers"); fprintf(stdout, "reply server init success ...\n"); do { int i, thread_num; if (argc < 2) { fprintf(stderr, "arguments error ...\n"); break; } thread_num = atoi(argv[1]); for (i = 0; i < thread_num; ++i) { pthread_t pid; pthread_create(&pid, NULL, routine, ctx); } zmq_proxy(rep, workers, NULL); } while (0); zmq_close(workers); zmq_close(rep); zmq_ctx_destroy(ctx); return 0; }
void *server_task (void *server_args) { // Frontend socket talks to clients over TCP zctx_t *ctx = zctx_new (); void *frontend = zsocket_new(ctx, ZMQ_ROUTER); char str[20]; strcpy(str, "tcp://*:"); strcat(str, PORT); zsocket_bind(frontend, str); // Backend socket talks to workers over inproc void *backend = zsocket_new (ctx, ZMQ_DEALER); zsocket_bind (backend, "inproc://backend"); // Launch pool of worker threads, precise number is not critical // for (thread_nbr = 0; thread_nbr < 5; thread_nbr++) zthread_fork (ctx, server_worker, server_args); // Connect backend to frontend via a proxy zmq_proxy (frontend, backend, NULL); printf("back\n"); zctx_destroy (&ctx); return NULL; }
int main(int argc, char** argv) { if(argc < 3) { std::cout << "usage: " << argv[0] << " <front-end URI> <back-end URI>" << std::endl; std::cout << "Example: broker \"tcp://*:5555\" \"tcp://*:6666\"\n"; return 0; } void* ctx = zmq_ctx_new(); void* frontend = zmq_socket(ctx, ZMQ_XPUB); void* backend = zmq_socket(ctx, ZMQ_XSUB); const char* brokerURI = argv[1]; const char* backendURI = argv[2]; int rc = zmq_bind(frontend, brokerURI); assert(rc == 0); rc = zmq_bind(backend, backendURI); assert(rc == 0); while(1) { zmq_proxy(frontend, backend, 0); } rc = zmq_close(frontend); assert(rc == 0); rc = zmq_close(backend); assert(rc == 0); rc = zmq_ctx_destroy(ctx); assert(rc == 0); return 0; }
//----------------------------------------------------------------------------- void connection_c::route( connection_c& dealer ) { // sanity checks assert( _role == ROUTER && dealer._role == DEALER ); zmq_proxy( _socket, dealer._socket, NULL ); // Note per zeromq API: zmq_proxy always returns -1 and errno == ETERM // but that annotation is ambiguous at best. }
void run_proxy(zmq::context_t &ctx, const std::string &name, const std::string &frontend_endpoint, const std::string &backend_endpoint, zmq::socket_t *capture_socket_ptr) { boost::format fmt("Starting the proxy... %1% -> %2%"); log(name, fmt % frontend_endpoint % backend_endpoint); zmq::socket_t frontend_socket(ctx, ZMQ_PULL); zmq::socket_t backend_socket(ctx, ZMQ_PUSH); frontend_socket.bind(frontend_endpoint.c_str()); backend_socket.bind(backend_endpoint.c_str()); zmq_proxy(frontend_socket, backend_socket, capture_socket_ptr); // it blocks log(name, "End of the proxy"); }
static void * broker_task (void *args) { // Prepare our context and sockets zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_DEALER); zsocket_bind (frontend, "tcp://*:5555"); void *backend = zsocket_new (ctx, ZMQ_DEALER); zsocket_bind (backend, "tcp://*:5556"); zmq_proxy (frontend, backend, NULL); zctx_destroy (&ctx); return NULL; }
int raptor_proxy_init(RaptorProxy* rp) { rp->zctx = zmq_init(1); if (!rp->zctx) { dzlog_error("zmq_init failed, err: %s", zmq_strerror(errno)); return -1; } rp->client_socket = zmq_socket(rp->zctx, ZMQ_ROUTER); if (!rp->client_socket) { dzlog_error("create client socket failed, err: %s", zmq_strerror(errno)); return -1; } if (zmq_bind(rp->client_socket, rp->address) != 0) { dzlog_error("client socket bind failed, err: %s", zmq_strerror(errno)); return -1; } rp->worker_socket = zmq_socket(rp->zctx, ZMQ_DEALER); if (!rp->worker_socket) { dzlog_error("create worker socket failed, err: %s", zmq_strerror(errno)); return -1; } if (zmq_bind(rp->worker_socket, "inproc://workers") != 0) { dzlog_error("worker socket bind failed, err: %s", zmq_strerror(errno)); return -1; } // launch pool of worker threads int count; for (count = 0; count < rp->workers; count++) { pthread_t worker; pthread_create(&worker, NULL, worker_routine, rp->zctx); } // connect work threads to client threads via a queue proxy zmq_proxy (rp->client_socket, rp->worker_socket, NULL); return 0; }
/** * Called by Java's ZMQ::proxy(). */ JNIEXPORT void JNICALL Java_org_zeromq_ZMQ_run_1proxy (JNIEnv *env, jclass cls, jobject frontend_, jobject backend_, jobject capture_) { #if ZMQ_VERSION >= ZMQ_MAKE_VERSION(3,2,2) void *frontend = get_socket (env, frontend_, true); void *backend = get_socket (env, backend_, true); void *capture = NULL; if (capture_ != NULL) capture = get_socket (env, capture_, true); zmq_proxy (frontend, backend, capture); #endif }
int main (void) { // Start child threads zctx_t *ctx = zctx_new (); zthread_fork (ctx, publisher_thread, NULL); zthread_fork (ctx, subscriber_thread, NULL); void *subscriber = zsocket_new (ctx, ZMQ_XSUB); zsocket_connect (subscriber, "tcp://localhost:6000"); void *publisher = zsocket_new (ctx, ZMQ_XPUB); zsocket_bind (publisher, "tcp://*:6001"); void *listener = zthread_fork (ctx, listener_thread, NULL); zmq_proxy (subscriber, publisher, listener); puts (" interrupted"); // Tell attached threads to exit zctx_destroy (&ctx); return 0; }
void ZqlDaemon::run() { _context = zmq_ctx_new(); _frontend_socket = zmq_socket(_context, ZMQ_ROUTER); _backend_socket = zmq_socket(_context, ZMQ_DEALER); zmq_bind(_frontend_socket, "tcp://*:9990"); zmq_bind(_backend_socket, "inproc://zql"); Worker *workers[WORKER_COUNT]; for(int i = 0; i < WORKER_COUNT; i++) { workers[i] = new Worker(this, i + 1); } zmq_proxy(_frontend_socket, _backend_socket, NULL); zmq_close(_frontend_socket); zmq_close(_backend_socket); zmq_ctx_destroy(_context); }
int main (void) { void *context = zmq_ctx_new (); // This is where the weather server sits void *frontend = zmq_socket (context, ZMQ_XSUB); zmq_connect (frontend, "tcp://192.168.55.210:5556"); // This is our public endpoint for subscribers void *backend = zmq_socket (context, ZMQ_XPUB); zmq_bind (backend, "tcp://10.1.1.0:8100"); // Run the proxy until the user interrupts us zmq_proxy (frontend, backend, NULL); zmq_close (frontend); zmq_close (backend); zmq_ctx_destroy (context); return 0; }
int main (void) { zctx_t *ctx = zctx_new (); int size; int io_threads = 4; zctx_set_iothreads (ctx, io_threads); void *subscriber, *publisher; int accept_port = scan_and_bind_socket(ctx, &subscriber, ZMQ_XSUB, DYNAMIC_ADDR); int publish_port = scan_and_bind_socket(ctx, &publisher, ZMQ_XPUB, DYNAMIC_ADDR); char ** registration_response = register_publisher_service(ctx, accept_port, publish_port, &size); free(registration_response); //void *listener = zthread_fork (ctx, listener_thread, NULL); zmq_proxy (subscriber, publisher, NULL); zctx_destroy (&ctx); return 0; }
int main (void) { void *context = zmq_ctx_new (); // Socket facing clients void *frontend = zmq_socket (context, ZMQ_ROUTER); int rc = zmq_bind (frontend, "tcp://*:5559"); assert (rc == 0); // Socket facing services void *backend = zmq_socket (context, ZMQ_DEALER); rc = zmq_bind (backend, "tcp://*:5560"); assert (rc == 0); // Start the proxy zmq_proxy (frontend, backend, NULL); // We never get here... zmq_close (frontend); zmq_close (backend); zmq_ctx_destroy (context); return 0; }
void *server_task () { // Frontend socket talks to clients over TCP zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "tcp://*:5570"); // Backend socket talks to workers over inproc inproc线程间通信 void *backend = zsocket_new (ctx, ZMQ_DEALER); //zsocket_bind (backend, "inproc://backend"); zsocket_bind (backend, "tcp://5571"); // Launch pool of worker threads, precise number is not critical int thread_nbr; // for (thread_nbr = 0; thread_nbr < 5; thread_nbr++) // zthread_fork (ctx, server_worker, NULL); // Connect backend to frontend via a proxy zmq_proxy (frontend, backend, NULL); zctx_destroy (&ctx); return NULL; }
int main (int argc, char **argv) { int i, opt, thread = 1; char *command = NULL; char *frontendpoint = ZLMB_WORKER_SOCKET, *backendpoint = NULL; void *context, *frontend = NULL, *backend = NULL; zlmb_worker_t **worker = NULL; size_t size; const struct option long_options[] = { { "endpoint", 1, NULL, 'e' }, { "command", 1, NULL, 'c' }, { "thread", 1, NULL, 't' }, { "syslog", 0, NULL, 's' }, { "verbose", 0, NULL, 'v' }, { "help", 0, NULL, 'h' }, { NULL, 0, NULL, 0 } }; while ((opt = getopt_long(argc, argv, "e:c:t:svh", long_options, NULL)) != -1) { switch (opt) { case 'e': frontendpoint = optarg; break; case 'c': command = optarg; break; case 't': thread = atoi(optarg); break; case 's': _syslog = 1; break; case 'v': _verbose = 1; break; default: _usage(argv[0], NULL); return -1; } } _LOG_OPEN(ZLMB_SYSLOG_IDENT); _INFO("Connect endpoint: %s\n", frontendpoint); _INFO("Execute command: %s\n", command); _INFO("Thread count: %d\n", thread); context = zmq_ctx_new(); if (!context) { _ERR("ZeroMQ context: %s\n", zmq_strerror(errno)); _LOG_CLOSE(); return -1; } /* if (zmq_ctx_set(context, ZMQ_IO_THREADS, 1) == -1) { _ERR("%s", zmq_strerror(errno)); zmq_ctx_destroy(context); return -1; } if (zmq_ctx_set(context, ZMQ_MAX_SOCKETS, 1024) == -1) { _ERR("%s", zmq_strerror(errno)); zmq_ctx_destroy(context); return -1; } */ /* backend: command */ if (command) { backend = zmq_socket(context, ZMQ_PUSH); if (!backend) { _ERR("ZeroMQ backend socket: %s\n", zmq_strerror(errno)); zmq_ctx_destroy(context); _LOG_CLOSE(); return -1; } if (zlmb_utils_asprintf(&backendpoint, "%s.%d", ZLMB_WORKER_BACKEND_SOCKET, getpid()) == -1) { _ERR("Allocate string backend point.\n"); zmq_ctx_destroy(context); _LOG_CLOSE(); return -1; } if (zmq_bind(backend, backendpoint) == -1) { _ERR("ZeroMQ backend bind: %s: %s\n", backendpoint, zmq_strerror(errno)); zmq_close(backend); zmq_ctx_destroy(context); _LOG_CLOSE(); return -1; } _VERBOSE("ZeroMQ backend bind: %s\n", backendpoint); /* backend: command thread */ if (thread <= 0) { thread = 1; } size = sizeof(zlmb_worker_t *) * thread; worker = (zlmb_worker_t **)malloc(size); if (!worker) { _ERR("Memory allocate worker command.\n"); zmq_close(backend); zmq_ctx_destroy(context); free(backendpoint); _LOG_CLOSE(); return -1; } memset(worker, 0, size); for (i = 0; i != thread; i++) { worker[i] = (zlmb_worker_t *)malloc(sizeof(zlmb_worker_t)); if (!worker[i]) { _ERR("Memory allocate worker command.\n"); _worker_destroy(worker, thread, 500); zmq_close(backend); zmq_ctx_destroy(context); free(backendpoint); return -1; } worker[i]->thread = 0; worker[i]->context = context; worker[i]->command = command; worker[i]->endpoint = backendpoint; worker[i]->argv = argv; worker[i]->argc = argc; worker[i]->optind = optind; if (pthread_create(&(worker[i]->thread), NULL, _worker_command, (void *)worker[i]) == -1) { _ERR("Create command worker thread(#%d).\n", i+1); _worker_destroy(worker, thread, 500); zmq_close(backend); zmq_ctx_destroy(context); free(backendpoint); _LOG_CLOSE(); return -1; } } } /* frontend */ frontend = zmq_socket(context, ZMQ_PULL); if (!frontend) { _ERR("ZeroMQ frontend socket: %s\n", zmq_strerror(errno)); if (worker) { _worker_destroy(worker, thread, 500); zmq_close(backend); free(backendpoint); } zmq_ctx_destroy(context); _LOG_CLOSE(); return -1; } if (zmq_connect(frontend, frontendpoint) == -1) { _ERR("ZeroMQ frontend connect: %s: %s\n", frontendpoint, zmq_strerror(errno)); if (worker) { _worker_destroy(worker, thread, 500); zmq_close(backend); free(backendpoint); } zmq_close(frontend); zmq_ctx_destroy(context); _LOG_CLOSE(); return -1; } _VERBOSE("ZeroMQ frontend connect: %s\n", frontendpoint); _signals(); _VERBOSE("ZeroMQ start proxy.\n"); if (backend) { zmq_proxy(frontend, backend, NULL); } else { zmq_pollitem_t pollitems[] = { { frontend, 0, ZMQ_POLLIN, 0 } }; _NOTICE("default receive process.\n"); while (!_interrupted) { if (zmq_poll(pollitems, 1, -1) == -1) { break; } if (pollitems[0].revents & ZMQ_POLLIN) { int more; size_t moresz = sizeof(more); _DEBUG("ZeroMQ receive in poll event.\n"); while (!_interrupted) { zmq_msg_t zmsg; if (zmq_msg_init(&zmsg) != 0) { break; } _DEBUG("ZeroMQ receive message.\n"); if (zmq_recvmsg(frontend, &zmsg, 0) == -1) { _ERR("ZeroMQ frontend socket receive: %s\n", zmq_strerror(errno)); zmq_msg_close(&zmsg); break; } if (zmq_getsockopt(frontend, ZMQ_RCVMORE, &more, &moresz) == -1) { _ERR("ZeroMQ frontend socket option receive: %s\n", zmq_strerror(errno)); //zmq_msg_close(&zmsg); //break; more = 0; } #ifndef NDEBUG zlmb_dump_printmsg(stderr, &zmsg); #endif zmq_msg_close(&zmsg); if (!more) { break; } } } } } _VERBOSE("ZeroMQ end proxy.\n"); _VERBOSE("ZeroMQ close sockets.\n"); zmq_close(frontend); if (worker) { _worker_destroy(worker, thread, 0); zmq_close(backend); free(backendpoint); } _VERBOSE("ZeroMQ destory context.\n"); zmq_ctx_destroy(context); _LOG_CLOSE(); return 0; }
int main(int argc, char** argv) { long mashine_number = -1; char *bind_address = NULL; long port = -1; int opt; char *endptr; /* parse command line options */ while ((opt = getopt(argc, argv, "m:b:p:h")) != -1) { switch (opt) { case 'm': errno = 0; mashine_number = strtol(optarg, &endptr, 10); if ((errno != 0 && mashine_number == 0) || (*endptr != '\0') || (mashine_number < 0 || mashine_number > 15)) { fprintf(stderr, "invalid mashine number\n"); return EXIT_FAILURE; } break; case 'b': bind_address = strndup(optarg, 45); break; case 'p': errno = 0; port = strtol(optarg, &endptr, 10); if ((errno != 0) || (*endptr != '\0') || (port <= 0 || port > 65535)) { fprintf(stderr, "invalid port number\n"); return EXIT_FAILURE; } break; case 'h': print_usage(stdout); return EXIT_SUCCESS; case '?': print_usage(stderr); return EXIT_FAILURE; default: return EXIT_FAILURE; } } if (mashine_number == -1) { print_usage(stderr); return EXIT_FAILURE; } if (bind_address == NULL) { fprintf(stdout, "No bind address specified. Using default address: %s\n", BIND_ADDRESS); bind_address = BIND_ADDRESS; } if (port == -1) { fprintf(stdout, "No port number specified. Using default port number: %d\n", PORT); port = PORT; } /* create context object */ zctx_t *ctx = zctx_new(); /* create socket objects */ void *server = zsocket_new(ctx, ZMQ_ROUTER); void *dispatcher = zsocket_new(ctx, ZMQ_DEALER); /* bind server/dispatcher socket */ zsocket_bind(server, "tcp://%s:%ld", bind_address, port); zsocket_bind(dispatcher, "inproc://zid"); /* create worker threads */ setting_t *s; int i; for (i = 0; i < NUMBER_OF_WORKERS; i++) { s = (setting_t *) malloc(sizeof(setting_t)); s->mashine_number = mashine_number; s->thread_number = i; zthread_fork(ctx, worker, s); } /* zmq_proxy runs in current thread */ zmq_proxy(server, dispatcher, NULL); zctx_destroy (&ctx); return EXIT_SUCCESS; }
zmqpp::proxy::proxy(socket &sa, socket &sb) { zmq_proxy(static_cast<void *>(sa), static_cast<void *>(sb), nullptr); }
zmqpp::proxy::proxy(zmqpp::socket &sa, zmqpp::socket &sb, zmqpp::socket &capture) { zmq_proxy(static_cast<void *>(sa), static_cast<void *>(sb), static_cast<void *>(capture)); }
int main(int argc, char *argv[]) { pthread_attr_t pthread_attr; int opt, /* Буфер для распознания опций argv через getopt */ opt_t = 0; /* Указан ли ключ '-t' */ int null_fd; int rc; /* return code */ #if CATCH_SIGNAL struct sigaction sa; sigset_t sa_set; #endif #ifdef NDEBUG pid_t pid, sid; /* Отделяемся от родительского процесса */ pid = fork(); /* Если не проходит даже форк - значит дела совсем плохи - * завершаем работу тут же. */ if (pid < 0) { perror("fork()"); exit(EXIT_FAILURE); } /* Если дочерний процесс порождён успешно, то родительский процесс можно завершить. */ if (pid > 0) { exit(EXIT_SUCCESS); } #endif /* Открытие журнала на запись */ openlog(SELF_NAME, LOG_ODELAY|LOG_PERROR|LOG_PID, LOG_DAEMON); #ifdef NDEBUG /* Создание нового SID для дочернего процесса */ sid = setsid(); /* Если получить sid не удалось - пытаемся сделать это ещё SETSID_ATEMPTS_COUNT раз */ TRY_N_TIMES(SETSID_ATEMPTS_COUNT, (sid = setsid()), (sid < 0), "setsid()", LOG_CRIT); /* Если после вышеописанных попыток sid всё равно не получен - завершаем работу. */ if (sid < 0) { syslog(LOG_EMERG, "setsid(): %s.", strerror(errno)); exit(EXIT_FAILURE); } #endif #if CATCH_SIGNAL sigemptyset(&sa_set); sigaddset(&sa_set, SIGHUP); sigprocmask(SIG_BLOCK, &sa_set, 0); sa.sa_mask = sa_set; sa.sa_flags = SA_NOMASK; sa.sa_handler = correct_exit; sigaction(SIGTSTP, &sa, 0); sigaction(SIGINT, &sa, 0); sigaction(SIGTERM, &sa, 0); sigaction(SIGQUIT, &sa, 0); #endif /* Изменяем файловую маску */ umask(0); /* Изменяем текущий рабочий каталог */ if (chdir("/") < 0) { syslog(LOG_WARNING, "chdir(): %s.", strerror(errno)); } /* Закрываем стандартные файловые дескрипторы - * теперь вместо них будет /dev/null */ null_fd = open(DEV_NULL_PATH, O_RDWR | O_NONBLOCK); if(null_fd < 0){ syslog(LOG_EMERG, "open(\""DEV_NULL_PATH"\"): %s.", strerror(errno)); exit(EXIT_FAILURE); } close_and_dup_stdfd(STDIN_FILENO, null_fd); #ifdef NDEBUG close_and_dup_stdfd(STDOUT_FILENO, null_fd); close_and_dup_stdfd(STDERR_FILENO, null_fd); #endif /* Заполним страктуру значениями по-умолчанию */ /* server_pool.be_verbose = 0; */ server_pool.threads_count = DEFAULT_THREADS_COUNT; /* Проверим все переданные аргументы */ while ((opt = getopt(argc, argv, "vt:")) != -1) { switch (opt) { case 'v': /* verbose */ server_pool.be_verbose = 1; break; /*case 'c': cache file path break;*/ case 't': /* thread count */ server_pool.threads_count = atoi(optarg); opt_t = 1; if(server_pool.threads_count < 1){ syslog(LOG_WARNING, "Threads count incorrect." "Use default: " DEFAULT_THREADS_COUNT_S); } break; default: /* '?' */ break; } } if(opt_t == 0 && server_pool.be_verbose){ syslog(LOG_NOTICE, "Threads count not specified. " "Use default: " DEFAULT_THREADS_COUNT_S); } /* Получаем адрес сервера. */ if (optind >= argc) { if(server_pool.be_verbose){ syslog(LOG_NOTICE, "Server addres not specified. " "Use default: '" DEFAULT_SERVER_ADDR "'"); } strncpy(server_pool.server_addr, DEFAULT_SERVER_ADDR, MAX_SERVER_ADDR_SIZE); } else{ strncpy(server_pool.server_addr, argv[optind], MAX_SERVER_ADDR_SIZE); } /* Инициализируем барьер. */ if(pthread_barrier_init(&server_pool.proxy_barr, NULL, server_pool.threads_count + 1) ){ syslog(LOG_ERR, "Error in barrier creating."); server_pool.no_barr = 1; } else{ server_pool.no_barr = 0; } /* Резервируем место под нити */ server_pool.tids = (pthread_t *) malloc(sizeof(pthread_t) * server_pool.threads_count); server_pool.context = zmq_ctx_new(); server_pool.clients = zmq_socket(server_pool.context, ZMQ_ROUTER); server_pool.workers = zmq_socket(server_pool.context, ZMQ_DEALER); zmq_bind(server_pool.clients, server_pool.server_addr); zmq_bind(server_pool.workers, ZMQ_INPROC_ADDR); pthread_attr_init(&pthread_attr); pthread_attr_setdetachstate(&pthread_attr, PTHREAD_CREATE_DETACHED); /* sync pthread init */ if(pthread_create(&server_pool.sync_tid, &pthread_attr, &thread_synchronizer, NULL) != 0){ syslog(LOG_EMERG, "Error in sync thread creating."); correct_exit(); } /* cmd pthread init */ /* client pthreads init */ for(int i = 0; i < server_pool.threads_count; i++){ if(pthread_create(&server_pool.tids[i], &pthread_attr, &thread_operator, (void *) i) != 0){ syslog(LOG_EMERG, "Error in thread creating."); correct_exit(); } } pthread_attr_destroy(&pthread_attr); if(server_pool.be_verbose){ syslog(LOG_INFO, "Initialize complete. Start main cycle."); } /* Перед запуском zmq_proxy необходимо открыть все нужные сокеты. * Ожидаем, когда все клиентские нити сделают это. */ if(!server_pool.no_barr){ rc = pthread_barrier_wait(&server_pool.proxy_barr); if(rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) syslog(LOG_ERR, "Cannot wait on barrier."); else{ pthread_barrier_destroy(&server_pool.proxy_barr); #ifndef NDEBUG syslog(LOG_DEBUG, "Barrier destroy. Start proxy."); #endif } } zmq_proxy(server_pool.clients, server_pool.workers, NULL); return EXIT_SUCCESS; }
inline void proxy (void *frontend, void *backend, void *capture) { int rc = zmq_proxy (frontend, backend, capture); if (rc != 0) throw error_t (); }
int main (void) { printf ("lightweight zeromq message hub\n"); // exit on SIGINT signal (SIGINT, onSignal); KeySet * config = ksNew (2, KS_END); Key * parentKey = keyNew ("/sw/elektra/hub-zeromq/#0/current", KEY_END); Key * configXSubEndpoint = keyDup (parentKey); keyAddBaseName (configXSubEndpoint, "bind_xsub"); Key * configXPubEndpoint = keyDup (parentKey); keyAddBaseName (configXPubEndpoint, "bind_xpub"); KDB * kdb = kdbOpen (parentKey); if (kdb == NULL) { printf ("could not open KDB. aborting\n"); return -1; } const char * xSubEndpoint = "tcp://127.0.0.1:6000"; const char * xPubEndpoint = "tcp://127.0.0.1:6001"; kdbGet (kdb, config, parentKey); Key * xSubEndpointKey = ksLookup (config, configXSubEndpoint, 0); if (xSubEndpointKey) { xSubEndpoint = keyString (xSubEndpointKey); } Key * xPubEndpointKey = ksLookup (config, configXPubEndpoint, 0); if (xPubEndpointKey) { xPubEndpoint = keyString (xPubEndpointKey); } keyDel (configXSubEndpoint); keyDel (configXPubEndpoint); kdbClose (kdb, parentKey); keyDel (parentKey); context = zmq_ctx_new (); xSubSocket = zmq_socket (context, ZMQ_XSUB); xPubSocket = zmq_socket (context, ZMQ_XPUB); int result; result = zmq_bind (xSubSocket, xSubEndpoint); if (result != 0) { printf ("could not bind XSUB on %s socket: %s\n", xSubEndpoint, zmq_strerror (zmq_errno ())); zmq_close (xSubSocket); zmq_close (xPubSocket); zmq_ctx_destroy (context); return -1; } result = zmq_bind (xPubSocket, xPubEndpoint); if (result != 0) { printf ("could not bind XPUB on %s socket: %s\n", xPubEndpoint, zmq_strerror (zmq_errno ())); zmq_close (xSubSocket); zmq_close (xPubSocket); zmq_ctx_destroy (context); return -1; } printf ("listening on %s (XSUB for zeromqsend)\n", xSubEndpoint); printf ("listening on %s (XPUB for zeromqrecv)\n", xPubEndpoint); ksDel (config); // forward messages between sockets // will return on zmq_ctx_destroy() zmq_proxy (xPubSocket, xSubSocket, NULL); return 0; }