bstar_t * bstar_new (int primary, char *local, char *remote) { bstar_t *self; self = (bstar_t *) zmalloc (sizeof (bstar_t)); // Initialize the Binary Star self->ctx = zctx_new (); self->loop = zloop_new (); self->state = primary? STATE_PRIMARY: STATE_BACKUP; // Create publisher for state going to peer self->statepub = zsocket_new (self->ctx, ZMQ_PUB); zsocket_bind (self->statepub, local); // Create subscriber for state coming from peer self->statesub = zsocket_new (self->ctx, ZMQ_SUB); zsockopt_set_subscribe (self->statesub, ""); zsocket_connect (self->statesub, remote); // Set-up basic reactor events zloop_timer (self->loop, BSTAR_HEARTBEAT, 0, s_send_state, self); zmq_pollitem_t poller = { self->statesub, 0, ZMQ_POLLIN }; zloop_poller (self->loop, &poller, s_recv_state, self); return self; }
void * zsocket_new (zctx_t *ctx, int type) { void *socket = zctx__socket_new (ctx, type); if (type == ZMQ_SUB) zsockopt_set_subscribe (socket, ""); return socket; }
int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering1 me {you}...\n"); exit (EXIT_FAILURE); } char *self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); zctx_t *ctx = zctx_new (); // Bind state backend to endpoint void *statebe = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (statebe, "ipc://%s-state.ipc", self); // Connect statefe to all peers void *statefe = zsocket_new (ctx, ZMQ_SUB); zsockopt_set_subscribe (statefe, ""); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to state backend at '%s'\n", peer); zsocket_connect (statefe, "ipc://%s-state.ipc", peer); } // .split main loop // The main loop sends out status messages to peers, and collects // status messages back from peers. The zmq_poll timeout defines // our own heartbeat: while (1) { // Poll for activity, or 1 second timeout zmq_pollitem_t items [] = { { statefe, 0, ZMQ_POLLIN, 0 } }; int rc = zmq_poll (items, 1, 1000 * ZMQ_POLL_MSEC); if (rc == -1) break; // Interrupted // Handle incoming status messages if (items [0].revents & ZMQ_POLLIN) { char *peer_name = zstr_recv (statefe); char *available = zstr_recv (statefe); printf ("%s - %s workers free\n", peer_name, available); free (peer_name); free (available); } else { // Send random values for worker availability zstr_sendm (statebe, self); zstr_sendf (statebe, "%d", randof (10)); } } zctx_destroy (&ctx); return EXIT_SUCCESS; }
static void subscriber_thread (void *args, zctx_t *ctx, void *pipe) { // Subscribe to "A" and "B" void *subscriber = zsocket_new (ctx, ZMQ_SUB); zsocket_connect (subscriber, "tcp://localhost:6001"); zsockopt_set_subscribe (subscriber, "A"); zsockopt_set_subscribe (subscriber, "B"); int count = 0; while (count < 5) { char *string = zstr_recv (subscriber); if (!string) break; // Interrupted free (string); count++; } zsocket_destroy (ctx, subscriber); }
int main (int argc, char *argv []) { // Arguments can be either of: // -p primary server, at tcp://localhost:5001 // -b backup server, at tcp://localhost:5002 zctx_t *ctx = zctx_new (); void *statepub = zsocket_new (ctx, ZMQ_PUB); void *statesub = zsocket_new (ctx, ZMQ_SUB); zsockopt_set_subscribe (statesub, ""); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); bstar_t fsm = { 0 }; if (argc == 2 && streq (argv [1], "-p")) { printf ("I: Primary master, waiting for backup (slave)\n"); zsocket_bind (frontend, "tcp://*:5001"); zsocket_bind (statepub, "tcp://*:5003"); zsocket_connect (statesub, "tcp://localhost:5004"); fsm.state = STATE_PRIMARY; } else if (argc == 2 && streq (argv [1], "-b")) { printf ("I: Backup slave, waiting for primary (master)\n"); zsocket_bind (frontend, "tcp://*:5002"); zsocket_bind (statepub, "tcp://*:5004"); zsocket_connect (statesub, "tcp://localhost:5003"); fsm.state = STATE_BACKUP; } else { printf ("Usage: bstarsrv { -p | -b }\n"); zctx_destroy (&ctx); exit (0); } // Set timer for next outgoing state message int64_t send_state_at = zclock_time () + HEARTBEAT; while (!zctx_interrupted) { zmq_pollitem_t items [] = { { frontend, 0, ZMQ_POLLIN, 0 }, { statesub, 0, ZMQ_POLLIN, 0 } }; int time_left = (int) ((send_state_at - zclock_time ())); if (time_left < 0) time_left = 0; int rc = zmq_poll (items, 2, time_left * ZMQ_POLL_MSEC); if (rc == -1) break; // Context has been shut down if (items [0].revents & ZMQ_POLLIN) { // Have a client request zmsg_t *msg = zmsg_recv (frontend); fsm.event = CLIENT_REQUEST; if (s_state_machine (&fsm) == FALSE) // Answer client by echoing request back zmsg_send (&msg, frontend); else zmsg_destroy (&msg); } if (items [1].revents & ZMQ_POLLIN) { // Have state from our peer, execute as event char *message = zstr_recv (statesub); fsm.event = atoi (message); free (message); if (s_state_machine (&fsm)) break; // Error, so exit fsm.peer_expiry = zclock_time () + 2 * HEARTBEAT; } // If we timed-out, send state to peer if (zclock_time () >= send_state_at) { char message [2]; sprintf (message, "%d", fsm.state); zstr_send (statepub, message); send_state_at = zclock_time () + HEARTBEAT; } } if (zctx_interrupted) printf ("W: interrupted\n"); // Shutdown sockets and context zctx_destroy (&ctx); return 0; }
void watch_port(void *cvoid, zctx_t * context, void * pipe ) { zclock_log("watch_port started!"); monitorconfig_t * config = (monitorconfig_t*) cvoid; dump_monitorconfig(config); void * linein = zsocket_new(context, ZMQ_SUB); char * listen_socket = to_linesocket(config->line_id); char line_id[16]; snprintf(line_id, 15, "%d", config->line_id); zsocket_connect(linein, listen_socket); zsockopt_set_unsubscribe(linein, ""); zsockopt_set_subscribe(linein, "CLEAR_MONITORS"); zsockopt_set_subscribe(linein, "VALUE"); // have set up subscription, can signal parent that we're ok. child_handshake(pipe); zsocket_destroy(context, pipe); // no longer require pipe void * lineout = zsocket_new(context, ZMQ_PUB); zsocket_connect(lineout, config->out_socket); time_t until = time(NULL) + 60; while(time(NULL)<until) { zmsg_t * msg = zmsg_recv(linein); if(!msg) { zclock_log("monitor quitting!"); return; } zframe_t * cmd = zmsg_pop(msg); if(zframe_streq(cmd, "CLEAR_MONITORS")) { zclock_log("ephemeral monitor quitting"); zmsg_destroy(&msg); zframe_destroy(&cmd); break; } else if (zframe_streq(cmd, "VALUE")) { // TODO perhaps some rate limiting necessary assert(zmsg_size(msg) == 2); zframe_t * value = zmsg_pop(msg); int res = *(int*)zframe_data(value); char * new_channel = zmsg_popstr(msg); if(strcmp(new_channel, config->channel)!=0) { zclock_log("monitor on %d: listening for %s, channel changed to %s quitting", config->line_id, config->channel, new_channel); zmsg_destroy(&msg); zframe_destroy(&cmd); break; } zmsg_t * to_send = zmsg_new(); char buf[1024]; snprintf(buf,1023, "%d", res); zmsg_pushstr(to_send, buf); zmsg_pushstr(to_send, line_id); zmsg_pushstr(to_send, config->source_worker); zclock_log("%s sending line %s -> %s", config->source_worker, line_id, buf); zmsg_send(&to_send, lineout); // don't destroy value frame, now owned by zmsg } // else ignore zmsg_destroy(&msg); zframe_destroy(&cmd); } zclock_log("monitor on %d: listening for %s, expiring naturally", config->line_id, config->channel); //cleanup zsocket_destroy(context, linein); zsocket_destroy(context, lineout); }
int main (void) { // Prepare our context and subscriber zctx_t *ctx = zctx_new (); void *snapshot = zsocket_new (ctx, ZMQ_DEALER); zsocket_connect (snapshot, "tcp://localhost:5556"); void *subscriber = zsocket_new (ctx, ZMQ_SUB); zsockopt_set_subscribe (subscriber, ""); zsocket_connect (subscriber, "tcp://localhost:5557"); void *publisher = zsocket_new (ctx, ZMQ_PUSH); zsocket_connect (publisher, "tcp://localhost:5558"); zhash_t *kvmap = zhash_new (); srandom ((unsigned) time (NULL)); // Get state snapshot int64_t sequence = 0; zstr_send (snapshot, "ICANHAZ?"); while (TRUE) { kvmsg_t *kvmsg = kvmsg_recv (snapshot); if (!kvmsg) break; // Interrupted if (streq (kvmsg_key (kvmsg), "KTHXBAI")) { sequence = kvmsg_sequence (kvmsg); printf ("I: received snapshot=%d\n", (int) sequence); kvmsg_destroy (&kvmsg); break; // Done } kvmsg_store (&kvmsg, kvmap); } int64_t alarm = zclock_time () + 1000; while (!zctx_interrupted) { zmq_pollitem_t items [] = { { subscriber, 0, ZMQ_POLLIN, 0 } }; int tickless = (int) ((alarm - zclock_time ())); if (tickless < 0) tickless = 0; int rc = zmq_poll (items, 1, tickless * ZMQ_POLL_MSEC); if (rc == -1) break; // Context has been shut down if (items [0].revents & ZMQ_POLLIN) { kvmsg_t *kvmsg = kvmsg_recv (subscriber); if (!kvmsg) break; // Interrupted // Discard out-of-sequence kvmsgs, incl. heartbeats if (kvmsg_sequence (kvmsg) > sequence) { sequence = kvmsg_sequence (kvmsg); kvmsg_store (&kvmsg, kvmap); printf ("I: received update=%d\n", (int) sequence); } else kvmsg_destroy (&kvmsg); } // If we timed-out, generate a random kvmsg if (zclock_time () >= alarm) { kvmsg_t *kvmsg = kvmsg_new (0); kvmsg_fmt_key (kvmsg, "%d", randof (10000)); kvmsg_fmt_body (kvmsg, "%d", randof (1000000)); kvmsg_send (kvmsg, publisher); kvmsg_destroy (&kvmsg); alarm = zclock_time () + 1000; } } printf (" Interrupted\n%d messages in\n", (int) sequence); zhash_destroy (&kvmap); zctx_destroy (&ctx); return 0; }
void trigger(void *cvoid, zctx_t * context, void * control) { triggerconfig_t * c = (triggerconfig_t*) cvoid; //set up msgpack stuff zclock_log("watch_port started!"); msgpack_zone mempool; msgpack_zone_init(&mempool, 2048); // TODO char * user_id = "17"; // TODO get broker in somehow char * broker = "tcp://au.ninjablocks.com:5773"; mdcli_t * client = mdcli_new(broker, 1); //VERBOSE triggermemory_t trigger_memory; msgpack_object * addins_obj = parse_msgpack(&mempool, c->addins); if(!parse_addins(addins_obj, &trigger_memory)) { //bad message zclock_log("bad trigger definition"); msgpack_object_print(stdout, *addins_obj); send_sync("bad trigger", control); return; } zclock_log("Creating trigger: target %s, rule_id %s, name %s", c->target_worker, c->rule_id, c->trigger_name); dump_trigger(&trigger_memory); triggerfunction trigger_func; if(!(trigger_func = find_trigger(c->channel, c->trigger_name))) { zclock_log("no trigger found for channel %s, trigger %s", c->channel, c->trigger_name); send_sync("no such trigger", control); return; } void * line = zsocket_new(context, ZMQ_SUB); // what line are we on? // this comes in the addins. char * linesocket = to_linesocket(trigger_memory.line_id); zclock_log("trigger is connecting to listen on %s", linesocket); zsocket_connect(line, linesocket); zsockopt_set_unsubscribe(line, ""); zsockopt_set_subscribe(line, "VALUE"); recv_sync("ping", control); send_sync("pong", control); zmq_pollitem_t items [] = { { line, 0, ZMQ_POLLIN, 0 }, { control, 0, ZMQ_POLLIN, 0 } }; while(1) { // listen on control and line zmq_poll (items, 2, -1); if (items[1].revents & ZMQ_POLLIN) { zclock_log("rule %s received message on control pipe", c->rule_id); // control message // really only expecting DESTROY zmsg_t * msg = zmsg_recv(control); char * str = zmsg_popstr(msg); zmsg_destroy(&msg); if (strcmp("Destroy", str) == 0) { zclock_log("rule %s will quit on request", c->rule_id); free(str); send_sync("ok", control); zclock_log("rule %s quitting on request", c->rule_id); break; } else { zclock_log("unexpected command %s for rule %s", str, c->rule_id); free(str); send_sync("ok", control); } } if (items[0].revents & ZMQ_POLLIN) { // serial update zmsg_t * msg = zmsg_recv(line); zframe_t * cmd = zmsg_pop(msg); if(zframe_streq(cmd, "CHANNEL_CHANGE")) { // TODO // must have been dormant to have gotten this char * new_channel = zmsg_popstr(msg); if(strcmp(c->channel, new_channel) == 0) { // oh, happy day! We're relevant again. // reactivate and start looking at reset levels. zclock_log("line %d: changed channel from %s to %s: trigger coming back to life", trigger_memory.line_id, c->channel, new_channel); zsockopt_set_subscribe(line, "VALUE"); zsockopt_set_unsubscribe(line, "CHANNEL_CHANGE"); } free(new_channel); } else if (zframe_streq(cmd, "VALUE")) { zframe_t * vframe = zmsg_pop(msg); int value; memcpy(&value, zframe_data(vframe), sizeof(int)); char * update_channel = zmsg_popstr(msg); if(strcmp(c->channel, update_channel) != 0) { // channel changed, go dormant // this is legit according to my tests at // https://gist.github.com/2042350 zclock_log("line %d: changed channel from %s to %s: trigger going dormant", trigger_memory.line_id, c->channel, update_channel); zsockopt_set_subscribe(line, "CHANNEL_CHANGE"); zsockopt_set_unsubscribe(line, "VALUE"); } else if(trigger_func(&trigger_memory, value)) { send_trigger(client, c->target_worker, c->rule_id, value, user_id); } free(update_channel); } else { // shouldn't ever happen. zclock_log("shouldn't have received command %s\n", zframe_strdup(cmd)); } zmsg_destroy(&msg); zframe_destroy(&cmd); } } msgpack_zone_destroy(&mempool); }
int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering3 me {you}...\n"); exit (EXIT_FAILURE); } self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); zctx_t *ctx = zctx_new (); // Prepare local frontend and backend void *localfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localfe, "ipc://%s-localfe.ipc", self); void *localbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localbe, "ipc://%s-localbe.ipc", self); // Bind cloud frontend to endpoint void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudfe, self); zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self); // Connect cloud backend to all peers void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudbe, self); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer); } // Bind state backend to endpoint void *statebe = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (statebe, "ipc://%s-state.ipc", self); // Connect state frontend to all peers void *statefe = zsocket_new (ctx, ZMQ_SUB); zsockopt_set_subscribe (statefe, ""); for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to state backend at '%s'\n", peer); zsocket_connect (statefe, "ipc://%s-state.ipc", peer); } // Prepare monitor socket void *monitor = zsocket_new (ctx, ZMQ_PULL); zsocket_bind (monitor, "ipc://%s-monitor.ipc", self); // .split start child tasks // After binding and connecting all our sockets, we start our child // tasks - workers and clients: int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); // Queue of available workers int local_capacity = 0; int cloud_capacity = 0; zlist_t *workers = zlist_new (); // .split main loop // The main loop has two parts. First we poll workers and our two service // sockets (statefe and monitor), in any case. If we have no ready workers, // there's no point in looking at incoming requests. These can remain on // their internal 0MQ queues: while (true) { zmq_pollitem_t primary [] = { { localbe, 0, ZMQ_POLLIN, 0 }, { cloudbe, 0, ZMQ_POLLIN, 0 }, { statefe, 0, ZMQ_POLLIN, 0 }, { monitor, 0, ZMQ_POLLIN, 0 } }; // If we have no workers ready, wait indefinitely int rc = zmq_poll (primary, 4, local_capacity? 1000 * ZMQ_POLL_MSEC: -1); if (rc == -1) break; // Interrupted // Track if capacity changes during this iteration int previous = local_capacity; // Handle reply from local worker zmsg_t *msg = NULL; if (primary [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localbe); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); local_capacity++; // If it's READY, don't route the message any further zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) zmsg_destroy (&msg); } // Or handle reply from peer broker else if (primary [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudbe); if (!msg) break; // Interrupted // We don't use peer broker identity for anything zframe_t *identity = zmsg_unwrap (msg); zframe_destroy (&identity); } // Route reply to cloud if it's addressed to a broker for (argn = 2; msg && argn < argc; argn++) { char *data = (char *) zframe_data (zmsg_first (msg)); size_t size = zframe_size (zmsg_first (msg)); if (size == strlen (argv [argn]) && memcmp (data, argv [argn], size) == 0) zmsg_send (&msg, cloudfe); } // Route reply to client if we still need to if (msg) zmsg_send (&msg, localfe); // .split handle state messages // If we have input messages on our statefe or monitor sockets we // can process these immediately: if (primary [2].revents & ZMQ_POLLIN) { char *peer = zstr_recv (statefe); char *status = zstr_recv (statefe); cloud_capacity = atoi (status); free (peer); free (status); } if (primary [3].revents & ZMQ_POLLIN) { char *status = zstr_recv (monitor); printf ("%s\n", status); free (status); } // .split route client requests // Now route as many clients requests as we can handle. If we have // local capacity we poll both localfe and cloudfe. If we have cloud // capacity only, we poll just localfe. We route any request locally // if we can, else we route to the cloud. while (local_capacity + cloud_capacity) { zmq_pollitem_t secondary [] = { { localfe, 0, ZMQ_POLLIN, 0 }, { cloudfe, 0, ZMQ_POLLIN, 0 } }; if (local_capacity) rc = zmq_poll (secondary, 2, 0); else rc = zmq_poll (secondary, 1, 0); assert (rc >= 0); if (secondary [0].revents & ZMQ_POLLIN) msg = zmsg_recv (localfe); else if (secondary [1].revents & ZMQ_POLLIN) msg = zmsg_recv (cloudfe); else break; // No work, go back to primary if (local_capacity) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zmsg_wrap (msg, frame); zmsg_send (&msg, localbe); local_capacity--; } else { // Route to random broker peer int random_peer = randof (argc - 2) + 2; zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer])); zmsg_send (&msg, cloudbe); } } // .split broadcast capacity // We broadcast capacity messages to other peers; to reduce chatter // we do this only if our capacity changed. if (local_capacity != previous) { // We stick our own identity onto the envelope zstr_sendm (statebe, self); // Broadcast new capacity zstr_send (statebe, "%d", local_capacity); } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return EXIT_SUCCESS; }
int main (int argc, char *argv []) { clonesrv_t *self = (clonesrv_t *) zmalloc (sizeof (clonesrv_t)); if (argc == 2 && streq (argv [1], "-p")) { zclock_log ("I: primary master, waiting for backup (slave)"); self->bstar = bstar_new (BSTAR_PRIMARY, "tcp://*:5003", "tcp://localhost:5004"); bstar_voter (self->bstar, "tcp://*:5556", ZMQ_ROUTER, s_snapshots, self); self->port = 5556; self->peer = 5566; self->primary = TRUE; } else if (argc == 2 && streq (argv [1], "-b")) { zclock_log ("I: backup slave, waiting for primary (master)"); self->bstar = bstar_new (BSTAR_BACKUP, "tcp://*:5004", "tcp://localhost:5003"); bstar_voter (self->bstar, "tcp://*:5566", ZMQ_ROUTER, s_snapshots, self); self->port = 5566; self->peer = 5556; self->primary = FALSE; } else { printf ("Usage: clonesrv4 { -p | -b }\n"); free (self); exit (0); } // Primary server will become first master if (self->primary) self->kvmap = zhash_new (); self->ctx = zctx_new (); self->pending = zlist_new (); bstar_set_verbose (self->bstar, TRUE); // Set up our clone server sockets self->publisher = zsocket_new (self->ctx, ZMQ_PUB); self->collector = zsocket_new (self->ctx, ZMQ_SUB); zsockopt_set_subscribe (self->collector, ""); zsocket_bind (self->publisher, "tcp://*:%d", self->port + 1); zsocket_bind (self->collector, "tcp://*:%d", self->port + 2); // Set up our own clone client interface to peer self->subscriber = zsocket_new (self->ctx, ZMQ_SUB); zsockopt_set_subscribe (self->subscriber, ""); zsocket_connect (self->subscriber, "tcp://localhost:%d", self->peer + 1); // .split main task body // After we've set-up our sockets we register our binary star // event handlers, and then start the bstar reactor. This finishes // when the user presses Ctrl-C, or the process receives a SIGINT // interrupt: // Register state change handlers bstar_new_master (self->bstar, s_new_master, self); bstar_new_slave (self->bstar, s_new_slave, self); // Register our other handlers with the bstar reactor zmq_pollitem_t poller = { self->collector, 0, ZMQ_POLLIN }; zloop_poller (bstar_zloop (self->bstar), &poller, s_collector, self); zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_flush_ttl, self); zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_send_hugz, self); // Start the Bstar reactor bstar_start (self->bstar); // Interrupted, so shut down while (zlist_size (self->pending)) { kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending); kvmsg_destroy (&kvmsg); } zlist_destroy (&self->pending); bstar_destroy (&self->bstar); zhash_destroy (&self->kvmap); zctx_destroy (&self->ctx); free (self); return 0; }
void line_listener(void * cvoid, zctx_t * context, void * pipe) { lineconfig_t * config = (lineconfig_t*) cvoid; // atm, topic == outpipe, but this is coincidental... zmsg_t * msg; dump_lineconfig(config); channel_memory_t channel_memory = { strdup("unknown"), strdup("unknown"), 0 }; // void * monitor_controller = zsocket_new(config->context, ZMQ_PUB); // zsocket_bind(monitor_controller, "inproc://monitor_controller"); // int trigger_capacity = 1; void * lineout = zsocket_new(context, ZMQ_PUB); char * outpipe = to_linesocket(config->line_id); zclock_log("binding line |%s|", outpipe); zsocket_bind(lineout, outpipe); void * subscriber = zsocket_new(context, ZMQ_SUB); zclock_log("subscribing to line |%d|", config->line_id); zsockopt_set_unsubscribe(subscriber, ""); char * topic = to_line(config->line_id); // zsockopt_set_subscribe(subscriber, topic); zclock_log("subscribing to literal line |%s|", topic); zsocket_connect(subscriber, "inproc://line"); child_handshake(pipe); zsocket_destroy(context, pipe); while(1) { msg = zmsg_recv(subscriber); if(!msg) { zclock_log("line quitting!"); return; } // zmsg_dump(msg); char * recv_topic = zmsg_popstr(msg); // zclock_log("line got topic\nreceived: %s\nexpected: %s\n", recv_topic, config->topic); //fflush(stdout); assert(strcmp(recv_topic, topic)==0); free(recv_topic); assert(zmsg_size(msg) == 2); char * channel = zmsg_popstr(msg); zmsg_t * out = zmsg_new(); // originally, I thought it was a neat trick to not mention the // channel in every message. unfortunately, this screws up the // case where a new trigger gets introduced: at the beginning, it // has no idea what the channel currently is. // rather than trying to micro-optimise, let's just keep repeating // the channel in the value update too. if (port_changed(channel, &channel_memory)) { zmsg_pushstr(out, channel_memory.current_channel); zmsg_pushstr(out, "CHANNEL_CHANGE"); zmsg_send(&out, lineout); } // only send a value if we're all settled down if(strcmp(channel, channel_memory.current_channel)==0) { out = zmsg_new(); zmsg_pushstr(out, channel_memory.current_channel); zmsg_push(out, zmsg_pop(msg)); zmsg_pushstr(out, "VALUE"); zmsg_send(&out, lineout); } free(channel); zmsg_destroy(&msg); } free(config); }