int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering2 me {you}...\n"); exit (EXIT_FAILURE); } self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); zctx_t *ctx = zctx_new (); // Bind cloud frontend to endpoint void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudfe, self); zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self); // Connect cloud backend to all peers void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudbe, self); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer); } // Prepare local frontend and backend void *localfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localfe, "ipc://%s-localfe.ipc", self); void *localbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localbe, "ipc://%s-localbe.ipc", self); // Get user to tell us when we can start... printf ("Press Enter when all brokers are started: "); getchar (); // Start local workers int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); // .split request-reply handling // Here we handle the request-reply flow. We're using the LRU approach // to poll workers at all times, and clients only when there are one or // more workers available. // Least recently used queue of available workers int capacity = 0; zlist_t *workers = zlist_new (); while (true) { // First, route any waiting replies from workers zmq_pollitem_t backends [] = { { localbe, 0, ZMQ_POLLIN, 0 }, { cloudbe, 0, ZMQ_POLLIN, 0 } }; // If we have no workers anyhow, wait indefinitely int rc = zmq_poll (backends, 2, capacity? 1000 * ZMQ_POLL_MSEC: -1); if (rc == -1) break; // Interrupted // Handle reply from local worker zmsg_t *msg = NULL; if (backends [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localbe); if (!msg) break; // Interrupted zframe_t *address = zmsg_unwrap (msg); zlist_append (workers, address); capacity++; // If it's READY, don't route the message any further zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), LRU_READY, 1) == 0) zmsg_destroy (&msg); } // Or handle reply from peer broker else if (backends [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudbe); if (!msg) break; // Interrupted // We don't use peer broker address for anything zframe_t *address = zmsg_unwrap (msg); zframe_destroy (&address); } // Route reply to cloud if it's addressed to a broker for (argn = 2; msg && argn < argc; argn++) { char *data = (char *) zframe_data (zmsg_first (msg)); size_t size = zframe_size (zmsg_first (msg)); if (size == strlen (argv [argn]) && memcmp (data, argv [argn], size) == 0) zmsg_send (&msg, cloudfe); } // Route reply to client if we still need to if (msg) zmsg_send (&msg, localfe); // .split route client requests // Now we route as many client requests as we have worker capacity // for. We may reroute requests from our local frontend, but not from // the cloud frontend. We reroute randomly now, just to test things // out. In the next version we'll do this properly by calculating // cloud capacity: while (capacity) { zmq_pollitem_t frontends [] = { { localfe, 0, ZMQ_POLLIN, 0 }, { cloudfe, 0, ZMQ_POLLIN, 0 } }; rc = zmq_poll (frontends, 2, 0); assert (rc >= 0); int reroutable = 0; // We'll do peer brokers first, to prevent starvation if (frontends [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudfe); reroutable = 0; } else if (frontends [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localfe); reroutable = 1; } else break; // No work, go back to backends // If reroutable, send to cloud 20% of the time // Here we'd normally use cloud status information // if (reroutable && argc > 2 && randof (5) == 0) { // Route to random broker peer int random_peer = randof (argc - 2) + 2; zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer])); zmsg_send (&msg, cloudbe); } else { zframe_t *frame = (zframe_t *) zlist_pop (workers); zmsg_wrap (msg, frame); zmsg_send (&msg, localbe); capacity--; } } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return EXIT_SUCCESS; }
int socket::bind(const std::string& address) { return zsocket_bind(self_, address.c_str()); }
void s_broker_bind (broker_t *self, char *endpoint) { zsocket_bind (self->socket, endpoint); zclock_log ("I: MDP broker/0.1.1 is active at %s", endpoint); }
int zre_msg_test (bool verbose) { printf (" * zre_msg: "); // Simple create/destroy test zre_msg_t *self = zre_msg_new (0); assert (self); zre_msg_destroy (&self); // Create pair of sockets we can send through zctx_t *ctx = zctx_new (); assert (ctx); void *output = zsocket_new (ctx, ZMQ_DEALER); assert (output); zsocket_bind (output, "inproc://selftest"); void *input = zsocket_new (ctx, ZMQ_ROUTER); assert (input); zsocket_connect (input, "inproc://selftest"); // Encode/send/decode and verify each message type self = zre_msg_new (ZRE_MSG_HELLO); zre_msg_sequence_set (self, 123); zre_msg_ipaddress_set (self, "Life is short but Now lasts for ever"); zre_msg_mailbox_set (self, 123); zre_msg_groups_append (self, "Name: %s", "Brutus"); zre_msg_groups_append (self, "Age: %d", 43); zre_msg_status_set (self, 123); zre_msg_headers_insert (self, "Name", "Brutus"); zre_msg_headers_insert (self, "Age", "%d", 43); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); assert (streq (zre_msg_ipaddress (self), "Life is short but Now lasts for ever")); assert (zre_msg_mailbox (self) == 123); assert (zre_msg_groups_size (self) == 2); assert (streq (zre_msg_groups_first (self), "Name: Brutus")); assert (streq (zre_msg_groups_next (self), "Age: 43")); assert (zre_msg_status (self) == 123); assert (zre_msg_headers_size (self) == 2); assert (streq (zre_msg_headers_string (self, "Name", "?"), "Brutus")); assert (zre_msg_headers_number (self, "Age", 0) == 43); zre_msg_destroy (&self); self = zre_msg_new (ZRE_MSG_WHISPER); zre_msg_sequence_set (self, 123); zre_msg_content_set (self, zframe_new ("Captcha Diem", 12)); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); assert (zframe_streq (zre_msg_content (self), "Captcha Diem")); zre_msg_destroy (&self); self = zre_msg_new (ZRE_MSG_SHOUT); zre_msg_sequence_set (self, 123); zre_msg_group_set (self, "Life is short but Now lasts for ever"); zre_msg_content_set (self, zframe_new ("Captcha Diem", 12)); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); assert (streq (zre_msg_group (self), "Life is short but Now lasts for ever")); assert (zframe_streq (zre_msg_content (self), "Captcha Diem")); zre_msg_destroy (&self); self = zre_msg_new (ZRE_MSG_JOIN); zre_msg_sequence_set (self, 123); zre_msg_group_set (self, "Life is short but Now lasts for ever"); zre_msg_status_set (self, 123); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); assert (streq (zre_msg_group (self), "Life is short but Now lasts for ever")); assert (zre_msg_status (self) == 123); zre_msg_destroy (&self); self = zre_msg_new (ZRE_MSG_LEAVE); zre_msg_sequence_set (self, 123); zre_msg_group_set (self, "Life is short but Now lasts for ever"); zre_msg_status_set (self, 123); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); assert (streq (zre_msg_group (self), "Life is short but Now lasts for ever")); assert (zre_msg_status (self) == 123); zre_msg_destroy (&self); self = zre_msg_new (ZRE_MSG_PING); zre_msg_sequence_set (self, 123); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); zre_msg_destroy (&self); self = zre_msg_new (ZRE_MSG_PING_OK); zre_msg_sequence_set (self, 123); zre_msg_send (&self, output); self = zre_msg_recv (input); assert (self); assert (zre_msg_sequence (self) == 123); zre_msg_destroy (&self); zctx_destroy (&ctx); printf ("OK\n"); return 0; }
int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering3 me {you}...\n"); exit (EXIT_FAILURE); } self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); zctx_t *ctx = zctx_new (); // Prepare local frontend and backend void *localfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localfe, "ipc://%s-localfe.ipc", self); void *localbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localbe, "ipc://%s-localbe.ipc", self); // Bind cloud frontend to endpoint void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudfe, self); zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self); // Connect cloud backend to all peers void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudbe, self); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer); } // Bind state backend to endpoint void *statebe = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (statebe, "ipc://%s-state.ipc", self); // Connect state frontend to all peers void *statefe = zsocket_new (ctx, ZMQ_SUB); zsockopt_set_subscribe (statefe, ""); for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to state backend at '%s'\n", peer); zsocket_connect (statefe, "ipc://%s-state.ipc", peer); } // Prepare monitor socket void *monitor = zsocket_new (ctx, ZMQ_PULL); zsocket_bind (monitor, "ipc://%s-monitor.ipc", self); // .split start child tasks // After binding and connecting all our sockets, we start our child // tasks - workers and clients: int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); // Queue of available workers int local_capacity = 0; int cloud_capacity = 0; zlist_t *workers = zlist_new (); // .split main loop // The main loop has two parts. First we poll workers and our two service // sockets (statefe and monitor), in any case. If we have no ready workers, // there's no point in looking at incoming requests. These can remain on // their internal 0MQ queues: while (true) { zmq_pollitem_t primary [] = { { localbe, 0, ZMQ_POLLIN, 0 }, { cloudbe, 0, ZMQ_POLLIN, 0 }, { statefe, 0, ZMQ_POLLIN, 0 }, { monitor, 0, ZMQ_POLLIN, 0 } }; // If we have no workers ready, wait indefinitely int rc = zmq_poll (primary, 4, local_capacity? 1000 * ZMQ_POLL_MSEC: -1); if (rc == -1) break; // Interrupted // Track if capacity changes during this iteration int previous = local_capacity; // Handle reply from local worker zmsg_t *msg = NULL; if (primary [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localbe); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); local_capacity++; // If it's READY, don't route the message any further zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) zmsg_destroy (&msg); } // Or handle reply from peer broker else if (primary [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudbe); if (!msg) break; // Interrupted // We don't use peer broker identity for anything zframe_t *identity = zmsg_unwrap (msg); zframe_destroy (&identity); } // Route reply to cloud if it's addressed to a broker for (argn = 2; msg && argn < argc; argn++) { char *data = (char *) zframe_data (zmsg_first (msg)); size_t size = zframe_size (zmsg_first (msg)); if (size == strlen (argv [argn]) && memcmp (data, argv [argn], size) == 0) zmsg_send (&msg, cloudfe); } // Route reply to client if we still need to if (msg) zmsg_send (&msg, localfe); // .split handle state messages // If we have input messages on our statefe or monitor sockets we // can process these immediately: if (primary [2].revents & ZMQ_POLLIN) { char *peer = zstr_recv (statefe); char *status = zstr_recv (statefe); cloud_capacity = atoi (status); free (peer); free (status); } if (primary [3].revents & ZMQ_POLLIN) { char *status = zstr_recv (monitor); printf ("%s\n", status); free (status); } // .split route client requests // Now route as many clients requests as we can handle. If we have // local capacity we poll both localfe and cloudfe. If we have cloud // capacity only, we poll just localfe. We route any request locally // if we can, else we route to the cloud. while (local_capacity + cloud_capacity) { zmq_pollitem_t secondary [] = { { localfe, 0, ZMQ_POLLIN, 0 }, { cloudfe, 0, ZMQ_POLLIN, 0 } }; if (local_capacity) rc = zmq_poll (secondary, 2, 0); else rc = zmq_poll (secondary, 1, 0); assert (rc >= 0); if (secondary [0].revents & ZMQ_POLLIN) msg = zmsg_recv (localfe); else if (secondary [1].revents & ZMQ_POLLIN) msg = zmsg_recv (cloudfe); else break; // No work, go back to primary if (local_capacity) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zmsg_wrap (msg, frame); zmsg_send (&msg, localbe); local_capacity--; } else { // Route to random broker peer int random_peer = randof (argc - 2) + 2; zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer])); zmsg_send (&msg, cloudbe); } } // .split broadcast capacity // We broadcast capacity messages to other peers; to reduce chatter // we do this only if our capacity changed. if (local_capacity != previous) { // We stick our own identity onto the envelope zstr_sendm (statebe, self); // Broadcast new capacity zstr_send (statebe, "%d", local_capacity); } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return EXIT_SUCCESS; }
static int s_agent_handle_control (agent_t *self) { // Get the whole message off the control socket in one go zmsg_t *request = zmsg_recv (self->control); char *command = zmsg_popstr (request); if (!command) return -1; // Interrupted if (streq (command, "SET")) { char *name = zmsg_popstr (request); char *value = zmsg_popstr (request); zhash_insert (self->metadata, name, value); free (name); free (value); } else if (streq (command, "VERBOSE")) { char *verbose = zmsg_popstr (request); self->verbose = *verbose == '1'; free (verbose); } else if (streq (command, "MAX CLIENTS")) { char *limit = zmsg_popstr (request); self->max_clients = atoi (limit); free (limit); } else if (streq (command, "MAX PENDING")) { char *limit = zmsg_popstr (request); self->max_pending = atoi (limit); free (limit); } else if (streq (command, "CLIENT TTL")) { char *limit = zmsg_popstr (request); self->client_ttl = atoi (limit); free (limit); } else if (streq (command, "PENDING TTL")) { char *limit = zmsg_popstr (request); self->pending_ttl = atoi (limit); free (limit); } else if (streq (command, "BIND")) { char *endpoint = zmsg_popstr (request); int rc = zsocket_bind (self->router, endpoint); assert (rc != -1); free (endpoint); } else if (streq (command, "UNBIND")) { char *endpoint = zmsg_popstr (request); int rc = zsocket_unbind (self->router, endpoint); assert (rc != -1); free (endpoint); } else if (streq (command, "TERMINATE")) { self->terminated = true; zstr_send (self->control, "OK"); } free (command); zmsg_destroy (&request); return 0; }
void line_listener(void * cvoid, zctx_t * context, void * pipe) { lineconfig_t * config = (lineconfig_t*) cvoid; // atm, topic == outpipe, but this is coincidental... zmsg_t * msg; dump_lineconfig(config); channel_memory_t channel_memory = { strdup("unknown"), strdup("unknown"), 0 }; // void * monitor_controller = zsocket_new(config->context, ZMQ_PUB); // zsocket_bind(monitor_controller, "inproc://monitor_controller"); // int trigger_capacity = 1; void * lineout = zsocket_new(context, ZMQ_PUB); char * outpipe = to_linesocket(config->line_id); zclock_log("binding line |%s|", outpipe); zsocket_bind(lineout, outpipe); void * subscriber = zsocket_new(context, ZMQ_SUB); zclock_log("subscribing to line |%d|", config->line_id); zsockopt_set_unsubscribe(subscriber, ""); char * topic = to_line(config->line_id); // zsockopt_set_subscribe(subscriber, topic); zclock_log("subscribing to literal line |%s|", topic); zsocket_connect(subscriber, "inproc://line"); child_handshake(pipe); zsocket_destroy(context, pipe); while(1) { msg = zmsg_recv(subscriber); if(!msg) { zclock_log("line quitting!"); return; } // zmsg_dump(msg); char * recv_topic = zmsg_popstr(msg); // zclock_log("line got topic\nreceived: %s\nexpected: %s\n", recv_topic, config->topic); //fflush(stdout); assert(strcmp(recv_topic, topic)==0); free(recv_topic); assert(zmsg_size(msg) == 2); char * channel = zmsg_popstr(msg); zmsg_t * out = zmsg_new(); // originally, I thought it was a neat trick to not mention the // channel in every message. unfortunately, this screws up the // case where a new trigger gets introduced: at the beginning, it // has no idea what the channel currently is. // rather than trying to micro-optimise, let's just keep repeating // the channel in the value update too. if (port_changed(channel, &channel_memory)) { zmsg_pushstr(out, channel_memory.current_channel); zmsg_pushstr(out, "CHANNEL_CHANGE"); zmsg_send(&out, lineout); } // only send a value if we're all settled down if(strcmp(channel, channel_memory.current_channel)==0) { out = zmsg_new(); zmsg_pushstr(out, channel_memory.current_channel); zmsg_push(out, zmsg_pop(msg)); zmsg_pushstr(out, "VALUE"); zmsg_send(&out, lineout); } free(channel); zmsg_destroy(&msg); } free(config); }
int zsocket_test (bool verbose) { printf (" * zsocket: "); // @selftest zctx_t *ctx = zctx_new (); assert (ctx); // Create a detached thread, let it run char *interf = "*"; char *domain = "localhost"; int service = 5560; void *writer = zsocket_new (ctx, ZMQ_PUSH); assert (writer); void *reader = zsocket_new (ctx, ZMQ_PULL); assert (reader); assert (streq (zsocket_type_str (writer), "PUSH")); assert (streq (zsocket_type_str (reader), "PULL")); int rc = zsocket_bind (writer, "tcp://%s:%d", interf, service); assert (rc == service); rc = zsocket_connect (reader, "tcp://%s:%d", domain, service); assert (rc == 0); zstr_send (writer, "HELLO"); char *message = zstr_recv (reader); assert (message); assert (streq (message, "HELLO")); free (message); // Test binding to ports int port = zsocket_bind (writer, "tcp://%s:*", interf); assert (port >= ZSOCKET_DYNFROM && port <= ZSOCKET_DYNTO); assert (zsocket_poll (writer, 100) == false); rc = zsocket_connect (reader, "txp://%s:%d", domain, service); assert (rc == -1); // Test sending frames to socket rc = zsocket_sendmem (writer,"ABC", 3, ZFRAME_MORE); assert (rc == 0); rc = zsocket_sendmem (writer, "DEFG", 4, 0); assert (rc == 0); zframe_t *frame = zframe_recv (reader); assert (frame); assert (zframe_streq (frame, "ABC")); assert (zframe_more (frame)); zframe_destroy (&frame); frame = zframe_recv (reader); assert (frame); assert (zframe_streq (frame, "DEFG")); assert (!zframe_more (frame)); zframe_destroy (&frame); // Test zframe_sendmem_zero_copy rc = zsocket_sendmem_zero_copy (writer, strdup ("ABC"), 3, s_test_free_str_cb, NULL, ZFRAME_MORE); assert (rc == 0); rc = zsocket_sendmem_zero_copy (writer, strdup ("DEFG"), 4, s_test_free_str_cb, NULL, 0); assert (rc == 0); frame = zframe_recv (reader); assert (frame); assert (zframe_streq (frame, "ABC")); assert (zframe_more (frame)); zframe_destroy (&frame); frame = zframe_recv (reader); assert (frame); assert (zframe_streq (frame, "DEFG")); assert (!zframe_more (frame)); zframe_destroy (&frame); zsocket_destroy (ctx, writer); zctx_destroy (&ctx); // @end printf ("OK\n"); return 0; }
int main(int argc, char** argv) { long mashine_number = -1; char *bind_address = NULL; long port = -1; int opt; char *endptr; /* parse command line options */ while ((opt = getopt(argc, argv, "m:b:p:h")) != -1) { switch (opt) { case 'm': errno = 0; mashine_number = strtol(optarg, &endptr, 10); if ((errno != 0 && mashine_number == 0) || (*endptr != '\0') || (mashine_number < 0 || mashine_number > 15)) { fprintf(stderr, "invalid mashine number\n"); return EXIT_FAILURE; } break; case 'b': bind_address = strndup(optarg, 45); break; case 'p': errno = 0; port = strtol(optarg, &endptr, 10); if ((errno != 0) || (*endptr != '\0') || (port <= 0 || port > 65535)) { fprintf(stderr, "invalid port number\n"); return EXIT_FAILURE; } break; case 'h': print_usage(stdout); return EXIT_SUCCESS; case '?': print_usage(stderr); return EXIT_FAILURE; default: return EXIT_FAILURE; } } if (mashine_number == -1) { print_usage(stderr); return EXIT_FAILURE; } if (bind_address == NULL) { fprintf(stdout, "No bind address specified. Using default address: %s\n", BIND_ADDRESS); bind_address = BIND_ADDRESS; } if (port == -1) { fprintf(stdout, "No port number specified. Using default port number: %d\n", PORT); port = PORT; } /* create context object */ zctx_t *ctx = zctx_new(); /* create socket objects */ void *server = zsocket_new(ctx, ZMQ_ROUTER); void *dispatcher = zsocket_new(ctx, ZMQ_DEALER); /* bind server/dispatcher socket */ zsocket_bind(server, "tcp://%s:%ld", bind_address, port); zsocket_bind(dispatcher, "inproc://zid"); /* create worker threads */ setting_t *s; int i; for (i = 0; i < NUMBER_OF_WORKERS; i++) { s = (setting_t *) malloc(sizeof(setting_t)); s->mashine_number = mashine_number; s->thread_number = i; zthread_fork(ctx, worker, s); } /* zmq_proxy runs in current thread */ zmq_proxy(server, dispatcher, NULL); zctx_destroy (&ctx); return EXIT_SUCCESS; }
int main (int argc, char *argv[]) { int rc; zctx_t *zctx; void *zs; pthread_t tid; pthread_attr_t attr; zmsg_t *zmsg; int i, ch; const char *uritmpl = "ipc://*"; int timeout_sec = 1; log_init (basename (argv[0])); while ((ch = getopt_long (argc, argv, OPTIONS, longopts, NULL)) != -1) { switch (ch) { case 'r': raw = true; break; case 'l': lopt = true; linger = strtol (optarg, NULL, 10); break; case 'i': iopt = true; imm = strtol (optarg, NULL, 10); break; case 't': uritmpl = "tcp://*:*"; break; case 'T': timeout_sec = strtoul (optarg, NULL, 10); break; case 's': bufsize = strtoul (optarg, NULL, 10); break; case 'v': vopt = true; break; case 'S': sleep_usec = strtoul (optarg, NULL, 10);; break; default: usage (); /*NOTREACHED*/ } } if (optind != argc - 1) usage (); iter = strtoul (argv[optind++], NULL, 10); /* Create socket and bind to it. * Store uri in global variable. */ if (!(zctx = zctx_new ())) log_err_exit ("S: zctx_new"); if (!(zs = zsocket_new (zctx, ZMQ_ROUTER))) log_err_exit ("S: zsocket_new"); zsocket_set_rcvhwm (zs, 0); /* unlimited */ if (zsocket_bind (zs, "%s", uritmpl) < 0) log_err_exit ("S: zsocket_bind"); uri = zsocket_last_endpoint (zs); /* Spawn thread which will be our client. */ if ((rc = pthread_attr_init (&attr))) log_errn (rc, "S: pthread_attr_init"); if ((rc = pthread_create (&tid, &attr, thread, NULL))) log_errn (rc, "S: pthread_create"); /* Handle 'iter' client messages with timeout */ for (i = 0; i < iter; i++) { alarm (timeout_sec); if (!(zmsg = zmsg_recv (zs))) log_err_exit ("S: zmsg_recv"); zmsg_destroy (&zmsg); alarm (0); if (vopt) log_msg ("received message %d of %d", i + 1, iter); } /* Wait for thread to terminate, then clean up. */ if ((rc = pthread_join (tid, NULL))) log_errn (rc, "S: pthread_join"); zctx_destroy (&zctx); /* destroys sockets too */ if (strstr (uri, "ipc://")) (void)unlink (uri); log_fini (); return 0; }
int main (int argc, char *argv []) { if (argc != 4) { printf ("usage: %s <bind-to> <message-size> " "<roundtrip-count>\n", argv[0]); return 1; } char* bind_to = argv [1]; long message_size = atoi (argv [2]); long roundtrip_count = atoi (argv [3]); char check_dropped_packets = 1; if(message_size < GetNumberOfDigits(roundtrip_count)) { printf("CAUTION: Message size too small to check for dropped packets\r\n"); check_dropped_packets = 0; } zctx_t *context = zctx_new (); void *publisher = zsocket_new (context, ZMQ_XPUB); zctx_set_linger(context, 1000); zsocket_set_sndhwm(publisher, 1000); int hwm = zsocket_sndhwm(publisher); printf("HMW=%d\r\n", hwm); #ifdef ZMQ_PUB_RELIABLE // set PUB_RELIABLE int pub_reliable = 1; int rc = zmq_setsockopt(publisher, ZMQ_PUB_RELIABLE, &pub_reliable, sizeof(pub_reliable)); if (rc != 0) { printf ("error in zmq_setsockopt (ZMQ_PUB_RELIABLE): %s\n", zmq_strerror (errno)); return -1; } #endif printf("Connecting to %s\r\n", bind_to); zsocket_bind (publisher, bind_to); //Wait for sub connection printf("Waiting for subscriber.\r\n"); zmsg_t* connection = zmsg_recv(publisher); zmsg_destroy(&connection); printf("Subscriber connected!\r\n"); int i = 0; while (i<roundtrip_count && !zctx_interrupted) { void* data = malloc(message_size); bzero(data, message_size); if(check_dropped_packets) { sprintf(data, "%d", i); } zmsg_t* msg = zmsg_new(); zmsg_addstr(msg, "TEST", 4); zmsg_addmem(msg, data, message_size); //zmsg_dump(msg); zmsg_send (&msg, publisher); i++; free(data); } zctx_destroy (&context); return 0; }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "tcp://*:5555"); // For clients zsocket_bind (backend, "tcp://*:5556"); // For workers // List of available workers zlist_t *workers = zlist_new (); // Send out heartbeats at regular intervals uint64_t heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL; while (true) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, HEARTBEAT_INTERVAL * ZMQ_POLL_MSEC); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker identity for load-balancing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted // Any sign of life from worker means it's ready zframe_t *identity = zmsg_unwrap (msg); worker_t *worker = s_worker_new (identity); s_worker_ready (worker, workers); // Validate control message, or return reply to client if (zmsg_size (msg) == 1) { zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), PPP_READY, 1) && memcmp (zframe_data (frame), PPP_HEARTBEAT, 1)) { printf ("E: invalid message from worker"); zmsg_dump (msg); } zmsg_destroy (&msg); } else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *msg = zmsg_recv (frontend); if (!msg) break; // Interrupted zmsg_push (msg, s_workers_next (workers)); zmsg_send (&msg, backend); } // .split handle heartbeating // We handle heartbeating after any socket activity. First, we send // heartbeats to any idle workers if it's time. Then, we purge any // dead workers: if (zclock_time () >= heartbeat_at) { worker_t *worker = (worker_t *) zlist_first (workers); while (worker) { zframe_send (&worker->identity, backend, ZFRAME_REUSE + ZFRAME_MORE); zframe_t *frame = zframe_new (PPP_HEARTBEAT, 1); zframe_send (&frame, backend, 0); worker = (worker_t *) zlist_next (workers); } heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL; } s_workers_purge (workers); } // When we're done, clean up properly while (zlist_size (workers)) { worker_t *worker = (worker_t *) zlist_pop (workers); s_worker_destroy (&worker); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
int main (int argc, char *argv[]) { if (argc != 4) { exit (-1); } int numb_msgs = atoi (argv[2]); int numb_unimp_threads = atoi (argv[3]); zctx_t *ctx = zctx_new (); void *router_imp = zsocket_new (ctx, ZMQ_ROUTER); zsocket_set_rcvhwm (router_imp, 500000000); zsocket_bind (router_imp, "%s:9000", argv[1]); void *router_unimp = zsocket_new (ctx, ZMQ_ROUTER); zsocket_set_rcvhwm (router_unimp, 500000000); zsocket_bind (router_unimp, "%s:9001", argv[1]); void *pub = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (pub, "%s:9002", argv[1]); int64_t time[3]; int64_t idle=0; int64_t diff; zclock_sleep (1000); //send the signal to start zmsg_t *amsg = zmsg_new (); zmsg_add (amsg, zframe_new ("all", 4)); zmsg_send (&amsg, pub); zmq_pollitem_t pollitem[2] = { {router_imp, 0, ZMQ_POLLIN} , {router_unimp, 0, ZMQ_POLLIN} }; unsigned long av_dropped_priority=0; unsigned long dropped=0; unsigned long av_processed_priority=0; unsigned long processed=0; int i; int imp_counter = 0; int once = 1; int ndiffs = 0; unsigned char drop_priority = 0; for (i = 0; i < 2 * numb_msgs; i++) { diff = zclock_time (); if (zmq_poll (pollitem, 2, -1) == -1) { exit (-1); } diff = zclock_time () - diff; idle = idle + diff; update_drop_rate(&diff,&drop_priority,&ndiffs); if (pollitem[0].revents & ZMQ_POLLIN) { zmsg_t *msg = zmsg_recv (router_imp); if (!msg) { exit (-1); } zmsg_destroy (&msg); imp_counter++; if (imp_counter == numb_msgs) { time[0] = zclock_time (); } } else { if (pollitem[1].revents & ZMQ_POLLIN) { if (once) { time[1] = zclock_time (); once = 0; } zmsg_t *msg = zmsg_recv (router_unimp); if (!msg) { exit (-1); } zframe_t *frame = zmsg_unwrap (msg); zframe_destroy (&frame); unsigned char priority; memcpy (&priority, zframe_data (zmsg_first (msg)), 1); if (priority < drop_priority) { av_dropped_priority+=priority; dropped++; // printf ("dropped:%u\n", priority); zmsg_destroy (&msg); } else { av_processed_priority+=priority; processed++; // printf ("received:%u\n", priority); zmsg_destroy (&msg); } } } } time[2] = zclock_time (); printf ("msgs received:%d\n", i); amsg = zmsg_new (); zmsg_add (amsg, zframe_new ("all", 4)); zmsg_send (&amsg, pub); zmsg_t *msg = zmsg_recv (router_imp); zframe_t *frame = zmsg_unwrap (msg); zframe_destroy (&frame); int64_t time_imp[2]; frame = zmsg_pop (msg); memcpy (time_imp, zframe_data (frame), zframe_size (frame)); zframe_destroy (&frame); zmsg_destroy (&msg); int64_t time_unimp[numb_unimp_threads][2]; for(i=0; i<numb_unimp_threads; i++){ msg = zmsg_recv (router_unimp); frame = zmsg_unwrap (msg); zframe_destroy (&frame); frame = zmsg_pop (msg); memcpy (time_unimp[i], zframe_data (frame), zframe_size (frame)); zframe_destroy (&frame); zmsg_destroy (&msg); } //compute average latency printf ("\nTime when important msgs started to be sent: %lld\n", time_imp[0]); printf ("\nTime when important msgs were processed: %lld\n", time[0]); printf ("\nDifference: %lld\n", time[0] - time_imp[0]); for(i=0; i<numb_unimp_threads; i++){ printf ("\nTime when unimportant msgs started to be sent: %lld\n", time_unimp[i][0]); printf ("\nTime when unimportant msgs were processed: %lld\n", time[2]); printf ("\nDifference: %lld\n", time[2] - time_unimp[i][0]); } printf ("\nTime when unimportant msgs started to be processed: %lld\n", time[1]); printf ("idle time:%llu\n",idle); printf ("dropped msgs:%llu\n",dropped); printf ("av_dropped_priority:%llu\n",av_dropped_priority/dropped); printf ("processed msgs:%llu\n",processed); printf ("av_processed_priority:%llu\n",av_processed_priority/processed); }
int main (void) { // initialize logging setlogmask(LOG_UPTO(LOG_DEBUG)); openlog(PROGRAM_NAME, LOG_CONS | LOG_PID | LOG_PERROR, LOG_USER); syslog(LOG_INFO, "broker starting up"); zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, CLIENT_ENDPOINT); zsocket_bind (backend, WORKER_ENDPOINT); uint32_t frx = 0, ftx = 0, brx = 0, btx = 0, nworkers = 0, npoll = 0; // Queue of available workers zlist_t *workers = zlist_new (); while (true) { if (++npoll % 1000 == 0) syslog(LOG_INFO, "broker: frx %04d ftx %04d brx %04d btx %04d / %d workers\n", frx, ftx, brx, btx, nworkers); zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers uint32_t rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker identity for load-balancing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) { zmsg_destroy (&msg); nworkers++; } else { brx++; zmsg_send (&msg, frontend); ftx++; } } if (items [1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv (frontend); frx++; if (msg) { zmsg_wrap (msg, (zframe_t *) zlist_pop (workers)); zmsg_send (&msg, backend); btx++; } } } // When we're done, clean up properly syslog(LOG_INFO, "broker terminating"); while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
int main (int argc, char *argv []) { clonesrv_t *self = (clonesrv_t *) zmalloc (sizeof (clonesrv_t)); if (argc == 2 && streq (argv [1], "-p")) { zclock_log ("I: primary master, waiting for backup (slave)"); self->bstar = bstar_new (BSTAR_PRIMARY, "tcp://*:5003", "tcp://localhost:5004"); bstar_voter (self->bstar, "tcp://*:5556", ZMQ_ROUTER, s_snapshots, self); self->port = 5556; self->peer = 5566; self->primary = TRUE; } else if (argc == 2 && streq (argv [1], "-b")) { zclock_log ("I: backup slave, waiting for primary (master)"); self->bstar = bstar_new (BSTAR_BACKUP, "tcp://*:5004", "tcp://localhost:5003"); bstar_voter (self->bstar, "tcp://*:5566", ZMQ_ROUTER, s_snapshots, self); self->port = 5566; self->peer = 5556; self->primary = FALSE; } else { printf ("Usage: clonesrv4 { -p | -b }\n"); free (self); exit (0); } // Primary server will become first master if (self->primary) self->kvmap = zhash_new (); self->ctx = zctx_new (); self->pending = zlist_new (); bstar_set_verbose (self->bstar, TRUE); // Set up our clone server sockets self->publisher = zsocket_new (self->ctx, ZMQ_PUB); self->collector = zsocket_new (self->ctx, ZMQ_SUB); zsockopt_set_subscribe (self->collector, ""); zsocket_bind (self->publisher, "tcp://*:%d", self->port + 1); zsocket_bind (self->collector, "tcp://*:%d", self->port + 2); // Set up our own clone client interface to peer self->subscriber = zsocket_new (self->ctx, ZMQ_SUB); zsockopt_set_subscribe (self->subscriber, ""); zsocket_connect (self->subscriber, "tcp://localhost:%d", self->peer + 1); // .split main task body // After we've set-up our sockets we register our binary star // event handlers, and then start the bstar reactor. This finishes // when the user presses Ctrl-C, or the process receives a SIGINT // interrupt: // Register state change handlers bstar_new_master (self->bstar, s_new_master, self); bstar_new_slave (self->bstar, s_new_slave, self); // Register our other handlers with the bstar reactor zmq_pollitem_t poller = { self->collector, 0, ZMQ_POLLIN }; zloop_poller (bstar_zloop (self->bstar), &poller, s_collector, self); zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_flush_ttl, self); zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_send_hugz, self); // Start the Bstar reactor bstar_start (self->bstar); // Interrupted, so shut down while (zlist_size (self->pending)) { kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending); kvmsg_destroy (&kvmsg); } zlist_destroy (&self->pending); bstar_destroy (&self->bstar); zhash_destroy (&self->kvmap); zctx_destroy (&self->ctx); free (self); return 0; }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "ipc://frontend.ipc"); zsocket_bind (backend, "ipc://backend.ipc"); int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Queue of available workers zlist_t *workers = zlist_new (); // Here is the main loop for the load balancer. It works the same way // as the previous example, but is a lot shorter because CZMQ gives // us an API that does more with fewer calls: while (true) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker identity for load-balancing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) zmsg_destroy (&msg); else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv (frontend); if (msg) { zmsg_wrap (msg, (zframe_t *) zlist_pop (workers)); zmsg_send (&msg, backend); } } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
int main(void) { gBlock.set_height(0); gClientName = sysinfo::GetClientName(); gClientID = sysinfo::GetClientID(); gInstanceID = gClientID * (unsigned)time(0); srand(gInstanceID); std::string frontHost; unsigned frontPort; Configuration* cfg = Configuration::create(); try{ cfg->parse("config.txt"); frontHost = cfg->lookupString("", "server", "localhost"); frontPort = cfg->lookupInt("", "port", 6666); gAddr = cfg->lookupString("", "address", ""); gClientName = cfg->lookupString("", "name", gClientName.c_str()); }catch(const ConfigurationException& ex){ printf("ERROR: %s\n", ex.c_str()); printf("hit return to exit...\n"); std::string line; std::getline(std::cin, line); exit(EXIT_FAILURE); } if(!gClientName.size()) gClientName = sysinfo::GetClientName(); printf("madPrimeMiner-v%d.%d\n", gClientVersion/10, gClientVersion%10); printf("ClientName = '%s' ClientID = %u InstanceID = %u\n", gClientName.c_str(), gClientID, gInstanceID); printf("Address = '%s'\n", gAddr.c_str()); if(!gAddr.size()){ printf("ERROR: address not specified in config.txt\n"); printf("hit return to exit...\n"); std::string line; std::getline(std::cin, line); exit(EXIT_FAILURE); } gCtx = zctx_new(); gWorkers = zsocket_new(gCtx, ZMQ_PULL); zsocket_bind(gWorkers, "inproc://shares"); gClient = new XPMClient(gCtx); gExit = !gClient->Initialize(cfg); while(!gExit){ printf("Connecting to frontend: %s:%d ...\n", frontHost.c_str(), frontPort); gBlock.Clear(); proto::Reply rep; gExit = true; while(gExit){ zsocket_destroy(gCtx, gFrontend); gFrontend = zsocket_new(gCtx, ZMQ_DEALER); int err = zsocket_connect(gFrontend, "tcp://%s:%d", frontHost.c_str(), frontPort); if(err){ printf("ERROR: invalid hostname and/or port.\n"); exit(EXIT_FAILURE); } proto::Request req; req.set_type(proto::Request::CONNECT); req.set_reqid(++gNextReqID); req.set_version(gClientVersion); req.set_height(0); GetNewReqNonce(req); Send(req, gFrontend); bool ready = zsocket_poll(gFrontend, 3*1000); if(zctx_interrupted) break; if(!ready) continue; Receive(rep, gFrontend); if(rep.error() != proto::Reply::NONE){ printf("ERROR: %s\n", proto::Reply::ErrType_Name(rep.error()).c_str()); if(rep.has_errstr()) printf("Message from server: %s\n", rep.errstr().c_str()); } if(!rep.has_sinfo()) break; gServerInfo = rep.sinfo(); bool ret = false; ret |= !ConnectBitcoin(); ret |= !ConnectSignals(); if(ret) break; gExit = false; } zsocket_disconnect(gFrontend, "tcp://%s:%d", frontHost.c_str(), frontPort); if(gExit) break; zloop_t* wloop = zloop_new(); zmq_pollitem_t item_server = {gServer, 0, ZMQ_POLLIN, 0}; int err = zloop_poller(wloop, &item_server, &HandleReply, 0); assert(!err); zmq_pollitem_t item_signals = {gSignals, 0, ZMQ_POLLIN, 0}; err = zloop_poller(wloop, &item_signals, &HandleSignal, 0); assert(!err); zmq_pollitem_t item_workers = {gWorkers, 0, ZMQ_POLLIN, 0}; err = zloop_poller(wloop, &item_workers, &HandleWorkers, 0); assert(!err); err = zloop_timer(wloop, 60*1000, 0, &HandleTimer, 0); assert(err >= 0); gHeartBeat = true; gExit = true; if(rep.has_block()) HandleNewBlock(rep.block()); else RequestWork(); gClient->Toggle(); zloop_start(wloop); gClient->Toggle(); zloop_destroy(&wloop); zsocket_destroy(gCtx, gServer); zsocket_destroy(gCtx, gSignals); gServer = 0; gSignals = 0; } delete gClient; zsocket_destroy(gCtx, gWorkers); zsocket_destroy(gCtx, gFrontend); zctx_destroy(&gCtx); return EXIT_SUCCESS; }