static void s_broker_worker_msg(broker_t *self, zframe_t *sender, zmsg_t *msg) { assert (zmsg_size(msg) >= 1); // At least, command zframe_t *command = zmsg_pop(msg); char *id_string = zframe_strhex(sender); int worker_ready = (zhash_lookup(self->workers, id_string) != NULL); free (id_string); worker_t *worker = s_worker_require(self, sender); if (zframe_streq(command, MDPW_READY)) { if (worker_ready) { // Not first command in session s_worker_delete(worker, 1); // Додумать, по идеи синоним сердцебиения } else { if (zframe_size(sender) >= 4 && memcmp(zframe_data (sender), "mmi.", 4) == 0) { s_worker_delete(worker, 1); // Додумать, по идеи синоним сердцебиения } else { // Attach worker to service and mark as idle zframe_t *service_frame = zmsg_pop(msg); worker->service = s_service_require(self, service_frame); worker->service->workers++; s_worker_waiting(worker); zframe_destroy(&service_frame); } } } else if (zframe_streq(command, MDPW_REPLY)) { if (worker_ready) { // Remove and save client return envelope and insert the // protocol header and service name, then rewrap envelope. zframe_t *client = zmsg_unwrap(msg); zmsg_pushstr(msg, worker->service->name); zmsg_pushstr(msg, MDPC_CLIENT); zmsg_wrap(msg, client); zmsg_send(&msg, self->socket); s_worker_waiting(worker); } else { // Просто обрыв связи между воркером и брокером // синоним сердцебиения s_worker_delete(worker, 1); } } else if (zframe_streq(command, MDPW_HEARTBEAT)) { if (worker_ready) { worker->expiry = zclock_time () + HEARTBEAT_EXPIRY; } else { // Просто обрыв связи между воркером и брокером // синоним сердцебиения s_worker_delete(worker, 1); } } else if (zframe_streq (command, MDPW_DISCONNECT)) { s_worker_delete(worker, 0); } else { zclock_log ("E: invalid input message"); zmsg_dump (msg); } free (command); zmsg_destroy (&msg); }
static void s_broker_client_msg(broker_t *self, zframe_t *sender, zmsg_t *msg) { assert(zmsg_size(msg) >= 2); zframe_t *service_frame = zmsg_pop(msg); service_t *service = s_service_require(self, service_frame); zmsg_wrap(msg, zframe_dup(sender)); if (zframe_size(service_frame) >= 4 && memcmp(zframe_data(service_frame), "mmi.", 4) == 0){ char *return_code; if (zframe_streq(service_frame, "mmi.service")){ char *name = zframe_strdup(zmsg_last(msg)); service_t *service = (service_t *)zhash_lookup(self->services, name); return_code = service && service->workers ? "200" : "404"; free(name); } else return_code = "501"; zframe_reset(zmsg_last(msg), return_code, strlen(return_code)); zframe_t *client = zmsg_unwrap(msg); zmsg_prepend(msg, &service_frame); zmsg_pushstr(msg, MDPC_CLIENT); zmsg_wrap(msg, client); zmsg_send(&msg, self->socket); } else s_service_dispatch(service, msg); zframe_destroy(&service_frame); }
// Handle input from worker, on backend int s_handle_backend(zloop_t *loop, zmq_pollitem_t *poller, void *arg) { // Use worker identity for load-balancing lbbroker_t *self = (lbbroker_t *)arg; zmsg_t *msg = zmsg_recv(self->backend); if (msg) { zframe_t *identity = zmsg_unwrap(msg); zlist_append(self->workers, identity); // Enable reader on frontend if we went from 0 to 1 workers if (zlist_size(self->workers) == 1) { zmq_pollitem_t poller = { self->frontend, 0, ZMQ_POLLIN }; zloop_poller(loop, &poller, s_handle_frontend, self); } // Forward message to client if it's not a READY zframe_t *frame = zmsg_first(msg); if (memcmp(zframe_data(frame), WORKER_READY, strlen(WORKER_READY)) == 0) { zmsg_destroy(&msg); } else { zmsg_send(&msg, self->frontend); } } return 0; }
static void s_worker_process (broker_t *self, zframe_t *sender, zmsg_t *msg) { assert (zmsg_size (msg) >= 1); // At least, command zframe_t *command = zmsg_pop (msg); char *identity = zframe_strhex (sender); int worker_ready = (zhash_lookup (self->workers, identity) != NULL); free (identity); worker_t *worker = s_worker_require (self, sender); if (zframe_streq (command, MDPW_READY)) { if (worker_ready) // Not first command in session s_worker_delete (self, worker, 1); else if (zframe_size (sender) >= 4 // Reserved service name && memcmp (zframe_data (sender), "mmi.", 4) == 0) s_worker_delete (self, worker, 1); else { // Attach worker to service and mark as idle zframe_t *service_frame = zmsg_pop (msg); worker->service = s_service_require (self, service_frame); worker->service->workers++; s_worker_waiting (self, worker); zframe_destroy (&service_frame); } } else if (zframe_streq (command, MDPW_REPLY)) { if (worker_ready) { // Remove & save client return envelope and insert the // protocol header and service name, then rewrap envelope. zframe_t *client = zmsg_unwrap (msg); zmsg_pushstr (msg, worker->service->name); zmsg_pushstr (msg, MDPC_CLIENT); zmsg_wrap (msg, client); zmsg_send (&msg, self->socket); s_worker_waiting (self, worker); } else s_worker_delete (self, worker, 1); } else if (zframe_streq (command, MDPW_HEARTBEAT)) { if (worker_ready) worker->expiry = zclock_time () + HEARTBEAT_EXPIRY; else s_worker_delete (self, worker, 1); } else if (zframe_streq (command, MDPW_DISCONNECT)) s_worker_delete (self, worker, 0); else { zclock_log ("E: invalid input message"); zmsg_dump (msg); } free (command); zmsg_destroy (&msg); }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "tcp://*:5555"); // For clients zsocket_bind (backend, "tcp://*:5556"); // For workers // Queue of available workers zlist_t *workers = zlist_new (); // The body of this example is exactly the same as lruqueue2. // .skip while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker address for LRU routing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted zframe_t *address = zmsg_unwrap (msg); zlist_append (workers, address); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), LRU_READY, 1) == 0) zmsg_destroy (&msg); else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv (frontend); if (msg) { zmsg_wrap (msg, (zframe_t *) zlist_pop (workers)); zmsg_send (&msg, backend); } } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; // .until }
zmsg_t *create_sendback_message(zmsg_t *msg) { zframe_t *cli_identity = zmsg_unwrap(msg); zmsg_t *sendback_msg = zmsg_new(); zmsg_wrap(sendback_msg, cli_identity); return sendback_msg; }
static void s_worker_process (broker_t *self, char *sender, zmsg_t *msg) { assert (zmsg_parts (msg) >= 1); // At least, command char *command = zmsg_pop (msg); int worker_ready = (zhash_lookup (self->workers, sender) != NULL); worker_t *worker = s_worker_require (self, sender); if (streq (command, MDPW_READY)) { if (worker_ready) // Not first command in session s_worker_delete (self, worker, 1); else if (strlen (sender) >= 4 // Reserved service name && memcmp (sender, "mmi.", 4) == 0) s_worker_delete (self, worker, 1); else { // Attach worker to service and mark as idle char *service_name = zmsg_pop (msg); worker->service = s_service_require (self, service_name); worker->service->workers++; s_worker_waiting (self, worker); free (service_name); } } else if (streq (command, MDPW_REPLY)) { if (worker_ready) { // Remove & save client return envelope and insert the // protocol header and service name, then rewrap envelope. char *client = zmsg_unwrap (msg); zmsg_wrap (msg, MDPC_CLIENT, worker->service->name); zmsg_wrap (msg, client, ""); free (client); zmsg_send (&msg, self->socket); s_worker_waiting (self, worker); } else s_worker_delete (self, worker, 1); } else if (streq (command, MDPW_HEARTBEAT)) { if (worker_ready) worker->expiry = s_clock () + HEARTBEAT_EXPIRY; else s_worker_delete (self, worker, 1); } else if (streq (command, MDPW_DISCONNECT)) s_worker_delete (self, worker, 0); else { s_console ("E: invalid input message (%d)", (int) *command); zmsg_dump (msg); } free (command); zmsg_destroy (&msg); }
static void s_broker_worker_msg(broker_t *self, zframe_t *sender, zmsg_t *msg) { assert(zmsg_size(msg) >= 1); zframe_t *command = zmsg_pop(msg); char *id_string = zframe_strhex(sender); int worker_ready = (zhash_lookup(self->workers, id_string) != NULL); free(id_string); worker_t *worker = s_worker_require(self, sender); if (zframe_streq(command, MDPW_READY)){ if (worker_ready) s_worker_delete(worker, 1); else if (zframe_size(sender) >= 4 && memcmp(zframe_data(sender), "mmi.", 4) == 0) s_worker_delete(worker, 1); else { zframe_t *service_frame = zmsg_pop(msg); worker->service = s_service_require(self, service_frame); worker->service->workers++; s_worker_waiting(worker); zframe_destroy(&service_frame); } } else if (zframe_streq(command, MDPW_REPLY)){ if (worker_ready){ zframe_t *client = zmsg_unwrap(msg); zmsg_pushstr(msg, worker->service->name); zmsg_pushstr(msg, MDPC_CLIENT); zmsg_wrap(msg, client); zmsg_send(&msg, self->socket); s_worker_waiting(worker); } else s_worker_delete(worker, 1); } else if (zframe_streq(command, MDPW_HEARTBEAT)){ if (worker_ready) worker->expiry = zclock_time() + HEARTBEAT_EXPIRY; else s_worker_delete(worker, 1); } else if (zframe_streq(command, MDPW_DISCONNECT)) s_worker_delete(worker, 0); else { zclock_log("E: invalid input message"); zmsg_dump(msg); } free(command); zmsg_destroy(&msg); }
int main (void) { s_version_assert (2, 1); // Prepare our context and sockets void *context = zmq_init (1); void *frontend = zmq_socket (context, ZMQ_XREP); void *backend = zmq_socket (context, ZMQ_XREP); zmq_bind (frontend, "tcp://*:5555"); // For clients zmq_bind (backend, "tcp://*:5556"); // For workers // Queue of available workers int available_workers = 0; char *worker_queue [MAX_WORKERS]; while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers if (available_workers) zmq_poll (items, 2, -1); else zmq_poll (items, 1, -1); // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { zmsg_t *zmsg = zmsg_recv (backend); // Use worker address for LRU routing assert (available_workers < MAX_WORKERS); worker_queue [available_workers++] = zmsg_unwrap (zmsg); // Return reply to client if it's not a READY if (strcmp (zmsg_address (zmsg), "READY") == 0) zmsg_destroy (&zmsg); else zmsg_send (&zmsg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *zmsg = zmsg_recv (frontend); // REQ socket in worker needs an envelope delimiter zmsg_wrap (zmsg, worker_queue [0], ""); zmsg_send (&zmsg, backend); // Dequeue and drop the next worker address free (worker_queue [0]); DEQUEUE (worker_queue); available_workers--; } } // We never exit the main loop return 0; }
void pss_response(void *spss, req_store_t * req_store, void *sweb, void *sgraph) { zmsg_t *msg = zmsg_recv(spss); zframe_t *null = zmsg_unwrap(msg); zframe_destroy(&null); json_error_t error; printf("\nbroker:spss received: %s\n", (const char *)zframe_data(zmsg_first(msg))); const char *data; size_t data_size = zframe_size(zmsg_first(msg)); data = zframe_data(zmsg_first(msg)); json_t *pss_resp_json = json_loadb(data, data_size, 0, &error); zmsg_destroy(&msg); //identify the request int32_t requestId = json_integer_value(json_object_get(pss_resp_json, "requestId")); req_t *req = request_store_req(req_store, requestId); json_t *response = json_object_get(pss_resp_json, "response"); json_incref(response); json_decref(pss_resp_json); const char *resp_type = json_string_value(json_object_get(response, "type")); const char *req_type = json_string_value(json_object_get (json_object_get (json_object_get(req->request, "clientRequest"), "request"), "type")); if ((strcmp(resp_type, "searchResponse") == 0) && (strcmp(req_type, "searchRequest") == 0)) pss_response_searchResponse(req, response, requestId, sgraph); else if ((strcmp(resp_type, "newNodeResponse") == 0) && (strcmp(req_type, "newNode") == 0)) pss_response_newNodeResponse(req, response, requestId, sweb, req_store); else if ((strcmp(resp_type, "delNode") == 0) && (strcmp(req_type, "delNode") == 0)) pss_response_delNode(req, response, requestId, sweb, req_store); else { } }
static void s_broker_client_msg(broker_t *self, zframe_t *sender, zmsg_t *msg) { assert (zmsg_size(msg) >= 2); // Service name + body zframe_t *service_frame = zmsg_pop(msg); service_t *service = s_service_require(self, service_frame); // Не должен создавать сервис, в случаи запроса от клиента // Set reply return identity to client sender zmsg_wrap(msg, zframe_dup(sender)); // If we got a MMI service request, process that internally if (zframe_size(service_frame) >= 4 && memcmp(zframe_data(service_frame), "mmi.", 4) == 0) { char *return_code; if (zframe_streq(service_frame, "mmi.service")) { char *name = zframe_strdup (zmsg_last (msg)); service_t *service = (service_t *) zhash_lookup(self->services, name); if (service) { if (service->workers) { return_code = "200"; } else { return_code = "404"; } } else { return_code = "401"; } free(name); } else { return_code = "501"; } zframe_reset(zmsg_last(msg), return_code, strlen(return_code)); // Remove & save client return envelope and insert the // protocol header and service name, then rewrap envelope. zframe_t *client = zmsg_unwrap (msg); zmsg_push(msg, zframe_dup(service_frame)); zmsg_pushstr(msg, MDPC_CLIENT); zmsg_wrap(msg, client); zmsg_send(&msg, self->socket); } else { // Else dispatch the message to the requested service s_service_dispatch(service, msg); } zframe_destroy(&service_frame); }
static void s_service_internal (broker_t *self, char *service_name, zmsg_t *msg) { if (streq (service_name, "mmi.service")) { service_t *service = (service_t *) zhash_lookup (self->services, zmsg_body (msg)); if (service && service->workers) zmsg_body_set (msg, "200"); else zmsg_body_set (msg, "404"); } else zmsg_body_set (msg, "501"); // Remove & save client return envelope and insert the // protocol header and service name, then rewrap envelope. char *client = zmsg_unwrap (msg); zmsg_wrap (msg, MDPC_CLIENT, service_name); zmsg_wrap (msg, client, ""); free (client); zmsg_send (&msg, self->socket); }
static void s_service_internal (broker_t *self, zframe_t *service_frame, zmsg_t *msg) { char *return_code; if (zframe_streq (service_frame, "mmi.service")) { char *name = zframe_strdup (zmsg_last (msg)); service_t *service = (service_t *) zhash_lookup (self->services, name); return_code = service && service->workers? "200": "404"; free (name); } else return_code = "501"; zframe_reset (zmsg_last (msg), return_code, strlen (return_code)); // Remove & save client return envelope and insert the // protocol header and service name, then rewrap envelope. zframe_t *client = zmsg_unwrap (msg); zmsg_push (msg, zframe_dup (service_frame)); zmsg_pushstr (msg, MDPC_CLIENT); zmsg_wrap (msg, client); zmsg_send (&msg, self->socket); }
zmsg_t * mdp_worker_recv (mdp_worker_t *self, zframe_t **reply_to_p) { while (TRUE) { zmq_pollitem_t items [] = { { self->worker, 0, ZMQ_POLLIN, 0 } }; int rc = zmq_poll (items, 1, self->heartbeat * ZMQ_POLL_MSEC); if (rc == -1) break; // Interrupted if (items [0].revents & ZMQ_POLLIN) { zmsg_t *msg = zmsg_recv (self->worker); if (!msg) break; // Interrupted if (self->verbose) { zclock_log ("I: received message from broker:"); zmsg_dump (msg); } self->liveness = HEARTBEAT_LIVENESS; // Don't try to handle errors, just assert noisily assert (zmsg_size (msg) >= 3); zframe_t *empty = zmsg_pop (msg); assert (zframe_streq (empty, "")); zframe_destroy (&empty); zframe_t *header = zmsg_pop (msg); assert (zframe_streq (header, MDPW_WORKER)); zframe_destroy (&header); zframe_t *command = zmsg_pop (msg); if (zframe_streq (command, MDPW_REQUEST)) { // We should pop and save as many addresses as there are // up to a null part, but for now, just save one... zframe_t *reply_to = zmsg_unwrap (msg); if (reply_to_p) *reply_to_p = reply_to; else zframe_destroy (&reply_to); zframe_destroy (&command); // Here is where we actually have a message to process; we // return it to the caller application return msg; // We have a request to process } else if (zframe_streq (command, MDPW_HEARTBEAT)) ; // Do nothing for heartbeats else if (zframe_streq (command, MDPW_DISCONNECT)) s_mdp_worker_connect_to_broker (self); else { zclock_log ("E: invalid input message"); zmsg_dump (msg); } zframe_destroy (&command); zmsg_destroy (&msg); } else if (--self->liveness == 0) { if (self->verbose) zclock_log ("W: disconnected from broker - retrying..."); zclock_sleep (self->reconnect); s_mdp_worker_connect_to_broker (self); } // Send HEARTBEAT if it's time if (zclock_time () > self->heartbeat_at) { s_mdp_worker_send_to_broker (self, MDPW_HEARTBEAT, NULL, NULL); self->heartbeat_at = zclock_time () + self->heartbeat; } } if (zctx_interrupted) printf ("W: interrupt received, killing worker...\n"); return NULL; }
int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering3 me {you}...\n"); exit (EXIT_FAILURE); } self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); zctx_t *ctx = zctx_new (); // Prepare local frontend and backend void *localfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localfe, "ipc://%s-localfe.ipc", self); void *localbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localbe, "ipc://%s-localbe.ipc", self); // Bind cloud frontend to endpoint void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudfe, self); zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self); // Connect cloud backend to all peers void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudbe, self); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer); } // Bind state backend to endpoint void *statebe = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (statebe, "ipc://%s-state.ipc", self); // Connect state frontend to all peers void *statefe = zsocket_new (ctx, ZMQ_SUB); zsockopt_set_subscribe (statefe, ""); for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to state backend at '%s'\n", peer); zsocket_connect (statefe, "ipc://%s-state.ipc", peer); } // Prepare monitor socket void *monitor = zsocket_new (ctx, ZMQ_PULL); zsocket_bind (monitor, "ipc://%s-monitor.ipc", self); // .split start child tasks // After binding and connecting all our sockets, we start our child // tasks - workers and clients: int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); // Queue of available workers int local_capacity = 0; int cloud_capacity = 0; zlist_t *workers = zlist_new (); // .split main loop // The main loop has two parts. First we poll workers and our two service // sockets (statefe and monitor), in any case. If we have no ready workers, // there's no point in looking at incoming requests. These can remain on // their internal 0MQ queues: while (true) { zmq_pollitem_t primary [] = { { localbe, 0, ZMQ_POLLIN, 0 }, { cloudbe, 0, ZMQ_POLLIN, 0 }, { statefe, 0, ZMQ_POLLIN, 0 }, { monitor, 0, ZMQ_POLLIN, 0 } }; // If we have no workers ready, wait indefinitely int rc = zmq_poll (primary, 4, local_capacity? 1000 * ZMQ_POLL_MSEC: -1); if (rc == -1) break; // Interrupted // Track if capacity changes during this iteration int previous = local_capacity; // Handle reply from local worker zmsg_t *msg = NULL; if (primary [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localbe); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); local_capacity++; // If it's READY, don't route the message any further zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) zmsg_destroy (&msg); } // Or handle reply from peer broker else if (primary [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudbe); if (!msg) break; // Interrupted // We don't use peer broker identity for anything zframe_t *identity = zmsg_unwrap (msg); zframe_destroy (&identity); } // Route reply to cloud if it's addressed to a broker for (argn = 2; msg && argn < argc; argn++) { char *data = (char *) zframe_data (zmsg_first (msg)); size_t size = zframe_size (zmsg_first (msg)); if (size == strlen (argv [argn]) && memcmp (data, argv [argn], size) == 0) zmsg_send (&msg, cloudfe); } // Route reply to client if we still need to if (msg) zmsg_send (&msg, localfe); // .split handle state messages // If we have input messages on our statefe or monitor sockets we // can process these immediately: if (primary [2].revents & ZMQ_POLLIN) { char *peer = zstr_recv (statefe); char *status = zstr_recv (statefe); cloud_capacity = atoi (status); free (peer); free (status); } if (primary [3].revents & ZMQ_POLLIN) { char *status = zstr_recv (monitor); printf ("%s\n", status); free (status); } // .split route client requests // Now route as many clients requests as we can handle. If we have // local capacity we poll both localfe and cloudfe. If we have cloud // capacity only, we poll just localfe. We route any request locally // if we can, else we route to the cloud. while (local_capacity + cloud_capacity) { zmq_pollitem_t secondary [] = { { localfe, 0, ZMQ_POLLIN, 0 }, { cloudfe, 0, ZMQ_POLLIN, 0 } }; if (local_capacity) rc = zmq_poll (secondary, 2, 0); else rc = zmq_poll (secondary, 1, 0); assert (rc >= 0); if (secondary [0].revents & ZMQ_POLLIN) msg = zmsg_recv (localfe); else if (secondary [1].revents & ZMQ_POLLIN) msg = zmsg_recv (cloudfe); else break; // No work, go back to primary if (local_capacity) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zmsg_wrap (msg, frame); zmsg_send (&msg, localbe); local_capacity--; } else { // Route to random broker peer int random_peer = randof (argc - 2) + 2; zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer])); zmsg_send (&msg, cloudbe); } } // .split broadcast capacity // We broadcast capacity messages to other peers; to reduce chatter // we do this only if our capacity changed. if (local_capacity != previous) { // We stick our own identity onto the envelope zstr_sendm (statebe, self); // Broadcast new capacity zstr_send (statebe, "%d", local_capacity); } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return EXIT_SUCCESS; }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "ipc://frontend.ipc"); zsocket_bind (backend, "ipc://backend.ipc"); int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Queue of available workers zlist_t *workers = zlist_new (); // Here is the main loop for the load-balancer. It works the same way // as the previous example, but is a lot shorter because CZMQ gives // us an API that does more with fewer calls: while (true) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker identity for load-balancing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) zmsg_destroy (&msg); else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv (frontend); if (msg) { zmsg_wrap (msg, (zframe_t *) zlist_pop (workers)); zmsg_send (&msg, backend); } } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering2 me {you}...\n"); return 0; } self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); zctx_t *ctx = zctx_new (); // Bind cloud frontend to endpoint void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_set_identity (cloudfe, self); zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self); // Connect cloud backend to all peers void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_set_identity (cloudbe, self); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer); } // Prepare local frontend and backend void *localfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localfe, "ipc://%s-localfe.ipc", self); void *localbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localbe, "ipc://%s-localbe.ipc", self); // Get user to tell us when we can start... printf ("Press Enter when all brokers are started: "); getchar (); // Start local workers int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (worker_task, NULL); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (client_task, NULL); // .split request-reply handling // Here, we handle the request-reply flow. We're using load-balancing // to poll workers at all times, and clients only when there are one // or more workers available. // Least recently used queue of available workers int capacity = 0; zlist_t *workers = zlist_new (); while (true) { // First, route any waiting replies from workers zmq_pollitem_t backends [] = { { localbe, 0, ZMQ_POLLIN, 0 }, { cloudbe, 0, ZMQ_POLLIN, 0 } }; // If we have no workers, wait indefinitely int rc = zmq_poll (backends, 2, capacity? 1000 * ZMQ_POLL_MSEC: -1); if (rc == -1) break; // Interrupted // Handle reply from local worker zmsg_t *msg = NULL; if (backends [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localbe); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); capacity++; // If it's READY, don't route the message any further zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) zmsg_destroy (&msg); } // Or handle reply from peer broker else if (backends [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudbe); if (!msg) break; // Interrupted // We don't use peer broker identity for anything zframe_t *identity = zmsg_unwrap (msg); zframe_destroy (&identity); } // Route reply to cloud if it's addressed to a broker for (argn = 2; msg && argn < argc; argn++) { char *data = (char *) zframe_data (zmsg_first (msg)); size_t size = zframe_size (zmsg_first (msg)); if (size == strlen (argv [argn]) && memcmp (data, argv [argn], size) == 0) zmsg_send (&msg, cloudfe); } // Route reply to client if we still need to if (msg) zmsg_send (&msg, localfe); // .split route client requests // Now we route as many client requests as we have worker capacity // for. We may reroute requests from our local frontend, but not from // the cloud frontend. We reroute randomly now, just to test things // out. In the next version, we'll do this properly by calculating // cloud capacity: while (capacity) { zmq_pollitem_t frontends [] = { { localfe, 0, ZMQ_POLLIN, 0 }, { cloudfe, 0, ZMQ_POLLIN, 0 } }; rc = zmq_poll (frontends, 2, 0); assert (rc >= 0); int reroutable = 0; // We'll do peer brokers first, to prevent starvation if (frontends [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudfe); reroutable = 0; } else if (frontends [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localfe); reroutable = 1; } else break; // No work, go back to backends // If reroutable, send to cloud 20% of the time // Here we'd normally use cloud status information // if (reroutable && argc > 2 && randof (5) == 0) { // Route to random broker peer int peer = randof (argc - 2) + 2; zmsg_pushmem (msg, argv [peer], strlen (argv [peer])); zmsg_send (&msg, cloudbe); } else { zframe_t *frame = (zframe_t *) zlist_pop (workers); zmsg_wrap (msg, frame); zmsg_send (&msg, localbe); capacity--; } } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return EXIT_SUCCESS; }
void zmsg_test (bool verbose) { printf (" * zmsg: "); int rc = 0; // @selftest // Create two PAIR sockets and connect over inproc zsock_t *output = zsock_new_pair ("@inproc://zmsg.test"); assert (output); zsock_t *input = zsock_new_pair (">inproc://zmsg.test"); assert (input); // Test send and receive of single-frame message zmsg_t *msg = zmsg_new (); assert (msg); zframe_t *frame = zframe_new ("Hello", 5); assert (frame); zmsg_prepend (msg, &frame); assert (zmsg_size (msg) == 1); assert (zmsg_content_size (msg) == 5); rc = zmsg_send (&msg, output); assert (msg == NULL); assert (rc == 0); msg = zmsg_recv (input); assert (msg); assert (zmsg_size (msg) == 1); assert (zmsg_content_size (msg) == 5); zmsg_destroy (&msg); // Test send and receive of multi-frame message msg = zmsg_new (); assert (msg); rc = zmsg_addmem (msg, "Frame0", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame1", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame2", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame3", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame4", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame5", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame6", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame7", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame8", 6); assert (rc == 0); rc = zmsg_addmem (msg, "Frame9", 6); assert (rc == 0); zmsg_t *copy = zmsg_dup (msg); assert (copy); rc = zmsg_send (©, output); assert (rc == 0); rc = zmsg_send (&msg, output); assert (rc == 0); copy = zmsg_recv (input); assert (copy); assert (zmsg_size (copy) == 10); assert (zmsg_content_size (copy) == 60); zmsg_destroy (©); msg = zmsg_recv (input); assert (msg); assert (zmsg_size (msg) == 10); assert (zmsg_content_size (msg) == 60); // create empty file for null test FILE *file = fopen ("zmsg.test", "w"); assert (file); fclose (file); file = fopen ("zmsg.test", "r"); zmsg_t *null_msg = zmsg_load (NULL, file); assert (null_msg == NULL); fclose (file); remove ("zmsg.test"); // Save to a file, read back file = fopen ("zmsg.test", "w"); assert (file); rc = zmsg_save (msg, file); assert (rc == 0); fclose (file); file = fopen ("zmsg.test", "r"); rc = zmsg_save (msg, file); assert (rc == -1); fclose (file); zmsg_destroy (&msg); file = fopen ("zmsg.test", "r"); msg = zmsg_load (NULL, file); assert (msg); fclose (file); remove ("zmsg.test"); assert (zmsg_size (msg) == 10); assert (zmsg_content_size (msg) == 60); // Remove all frames except first and last int frame_nbr; for (frame_nbr = 0; frame_nbr < 8; frame_nbr++) { zmsg_first (msg); frame = zmsg_next (msg); zmsg_remove (msg, frame); zframe_destroy (&frame); } // Test message frame manipulation assert (zmsg_size (msg) == 2); frame = zmsg_last (msg); assert (zframe_streq (frame, "Frame9")); assert (zmsg_content_size (msg) == 12); frame = zframe_new ("Address", 7); assert (frame); zmsg_prepend (msg, &frame); assert (zmsg_size (msg) == 3); rc = zmsg_addstr (msg, "Body"); assert (rc == 0); assert (zmsg_size (msg) == 4); frame = zmsg_pop (msg); zframe_destroy (&frame); assert (zmsg_size (msg) == 3); char *body = zmsg_popstr (msg); assert (streq (body, "Frame0")); free (body); zmsg_destroy (&msg); // Test encoding/decoding msg = zmsg_new (); assert (msg); byte *blank = (byte *) zmalloc (100000); assert (blank); rc = zmsg_addmem (msg, blank, 0); assert (rc == 0); rc = zmsg_addmem (msg, blank, 1); assert (rc == 0); rc = zmsg_addmem (msg, blank, 253); assert (rc == 0); rc = zmsg_addmem (msg, blank, 254); assert (rc == 0); rc = zmsg_addmem (msg, blank, 255); assert (rc == 0); rc = zmsg_addmem (msg, blank, 256); assert (rc == 0); rc = zmsg_addmem (msg, blank, 65535); assert (rc == 0); rc = zmsg_addmem (msg, blank, 65536); assert (rc == 0); rc = zmsg_addmem (msg, blank, 65537); assert (rc == 0); free (blank); assert (zmsg_size (msg) == 9); byte *buffer; size_t buffer_size = zmsg_encode (msg, &buffer); zmsg_destroy (&msg); msg = zmsg_decode (buffer, buffer_size); assert (msg); free (buffer); zmsg_destroy (&msg); // Test submessages msg = zmsg_new (); assert (msg); zmsg_t *submsg = zmsg_new (); zmsg_pushstr (msg, "matr"); zmsg_pushstr (submsg, "joska"); rc = zmsg_addmsg (msg, &submsg); assert (rc == 0); assert (submsg == NULL); submsg = zmsg_popmsg (msg); assert (submsg == NULL); // string "matr" is not encoded zmsg_t, so was discarded submsg = zmsg_popmsg (msg); assert (submsg); body = zmsg_popstr (submsg); assert (streq (body, "joska")); free (body); zmsg_destroy (&submsg); frame = zmsg_pop (msg); assert (frame == NULL); zmsg_destroy (&msg); // Test comparison of two messages msg = zmsg_new (); zmsg_addstr (msg, "One"); zmsg_addstr (msg, "Two"); zmsg_addstr (msg, "Three"); zmsg_t *msg_other = zmsg_new (); zmsg_addstr (msg_other, "One"); zmsg_addstr (msg_other, "Two"); zmsg_addstr (msg_other, "One-Hundred"); zmsg_t *msg_dup = zmsg_dup (msg); zmsg_t *empty_msg = zmsg_new (); zmsg_t *empty_msg_2 = zmsg_new (); assert (zmsg_eq (msg, msg_dup)); assert (!zmsg_eq (msg, msg_other)); assert (zmsg_eq (empty_msg, empty_msg_2)); assert (!zmsg_eq (msg, NULL)); assert (!zmsg_eq (NULL, empty_msg)); assert (!zmsg_eq (NULL, NULL)); zmsg_destroy (&msg); zmsg_destroy (&msg_other); zmsg_destroy (&msg_dup); zmsg_destroy (&empty_msg); zmsg_destroy (&empty_msg_2); // Test signal messages msg = zmsg_new_signal (0); assert (zmsg_signal (msg) == 0); zmsg_destroy (&msg); msg = zmsg_new_signal (-1); assert (zmsg_signal (msg) == 255); zmsg_destroy (&msg); // Now try methods on an empty message msg = zmsg_new (); assert (msg); assert (zmsg_size (msg) == 0); assert (zmsg_unwrap (msg) == NULL); assert (zmsg_first (msg) == NULL); assert (zmsg_last (msg) == NULL); assert (zmsg_next (msg) == NULL); assert (zmsg_pop (msg) == NULL); // Sending an empty message is valid and destroys the message assert (zmsg_send (&msg, output) == 0); assert (!msg); zsock_destroy (&input); zsock_destroy (&output); // @end printf ("OK\n"); }
int main (int argc, char *argv[]) { // Prepare our context and sockets void *context = zmq_init (1); void *frontend = zmq_socket (context, ZMQ_XREP); void *backend = zmq_socket (context, ZMQ_XREP); zmq_bind (frontend, "ipc://frontend.ipc"); zmq_bind (backend, "ipc://backend.ipc"); int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) { pthread_t client; pthread_create (&client, NULL, client_thread, context); } int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) { pthread_t worker; pthread_create (&worker, NULL, worker_thread, context); } // Logic of LRU loop // - Poll backend always, frontend only if 1+ worker ready // - If worker replies, queue worker as ready and forward reply // to client if necessary // - If client requests, pop next worker and send request to it // Queue of available workers int available_workers = 0; char *worker_queue [NBR_WORKERS]; while (1) { // Initialize poll set zmq_pollitem_t items [] = { // Always poll for worker activity on backend { backend, 0, ZMQ_POLLIN, 0 }, // Poll front-end only if we have available workers { frontend, 0, ZMQ_POLLIN, 0 } }; if (available_workers) zmq_poll (items, 2, -1); else zmq_poll (items, 1, -1); // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { zmsg_t *zmsg = zmsg_recv (backend); // Use worker address for LRU routing assert (available_workers < NBR_WORKERS); worker_queue [available_workers++] = zmsg_unwrap (zmsg); // Forward message to client if it's not a READY if (strcmp (zmsg_address (zmsg), "READY") == 0) zmsg_destroy (&zmsg); else { zmsg_send (&zmsg, frontend); if (--client_nbr == 0) break; // Exit after N messages } } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *zmsg = zmsg_recv (frontend); zmsg_wrap (zmsg, worker_queue [0], ""); zmsg_send (&zmsg, backend); // Dequeue and drop the next worker address free (worker_queue [0]); DEQUEUE (worker_queue); available_workers--; } } sleep (1); zmq_term (context); return 0; }
int main (void) { // initialize logging setlogmask(LOG_UPTO(LOG_DEBUG)); openlog(PROGRAM_NAME, LOG_CONS | LOG_PID | LOG_PERROR, LOG_USER); syslog(LOG_INFO, "broker starting up"); zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, CLIENT_ENDPOINT); zsocket_bind (backend, WORKER_ENDPOINT); uint32_t frx = 0, ftx = 0, brx = 0, btx = 0, nworkers = 0, npoll = 0; // Queue of available workers zlist_t *workers = zlist_new (); while (true) { if (++npoll % 1000 == 0) syslog(LOG_INFO, "broker: frx %04d ftx %04d brx %04d btx %04d / %d workers\n", frx, ftx, brx, btx, nworkers); zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers uint32_t rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker identity for load-balancing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted zframe_t *identity = zmsg_unwrap (msg); zlist_append (workers, identity); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0) { zmsg_destroy (&msg); nworkers++; } else { brx++; zmsg_send (&msg, frontend); ftx++; } } if (items [1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv (frontend); frx++; if (msg) { zmsg_wrap (msg, (zframe_t *) zlist_pop (workers)); zmsg_send (&msg, backend); btx++; } } } // When we're done, clean up properly syslog(LOG_INFO, "broker terminating"); while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
int main(void) { zctx_t *ctx = zctx_new(); void *frontend = zsocket_new(ctx, ZMQ_ROUTER); void *backend = zsocket_new(ctx, ZMQ_ROUTER); zsocket_bind(frontend, "tcp://127.0.0.1:5555"); zsocket_bind(backend, "tcp://127.0.0.1:5556"); zlist_t *workers = zlist_new(); uint64_t heartbeat_at = zclock_time() + HEARTBEAT_INTERVAL; while (true){ zmq_pollitem_t items [] ={ {backend, 0, ZMQ_POLLIN, 0}, {frontend, 0, ZMQ_POLLIN, 0} }; int rc = zmq_poll(items, zlist_size(worker)?2:1); if (rc == -1) break; if (items[0].revents & ZMQ_POLLIN){ zmsg_t *msg = zmsg_recv(backend); if (!msg) break; zframe_t *identity = zmsg_unwrap(msg); worker_t *worker = s_worker_new(identity); s_worker_ready(worker, workers); if (zmsg_size(msg) == 1){ zframe_t *frame = zmsg_first(msg); if (memcmp(zframe_data(frame), PPP_READY, 1) && memcmp(zframe_data(frame), PPP_HEARTBEAT, 1)){ printf("E: invalid message from worker"); zmsg_dump(msg); } zmsg_destroy(&msg); } else zmsg_send(&msg, frontend); } if (items[1].revents & ZMQ_POLLIN){ zmsg_t *msg = zmsg_recv(frontend); if (!msg) break; zframe_t *identity = s_workers_next(workers); zmsg_prepend(msg, &identity); zmsg_send(&msg, backend); } if (zclock_time() >= heartbeat_at){ worker_t *worker = (worker_t *)zlist_first(workers); while (worker){ zframe_send(&worker->identity, backend, ZFRAME_REUSE + ZFRAME_MORE); zframe_t *frame = zframe_new(PPP_HEARTBEAT, 1); zframe_send(&frame, backend, 0); worker = (worker_t *)zlist_next(workers); } heartbeat_at = zclock_time() + HEARTBEAT_INTERVAL; } s_workers_purge(workers); } while (zlist_size(workers)){ worker_t *worker = (worker_t *)zlist_pop(workers); s_worker_destroy(&worker); } zlist_destroy(&workers); zctx_destroy(&ctx); return 0; }
void benchmark_notree (void *push, void *pub, void *router, unsigned int N_KEYS, unsigned int *keys,int N_THREADS) { printf ("\nCleaning the pagecache"); /*cleaning cache */ system ("./script.sh"); printf ("\n starting random read without a rb_btree"); int64_t diff = zclock_time (); unsigned int iter; int stop; unsigned int counter = 0; for (iter = 0; iter < N_KEYS; iter++) { unsigned int key = keys[iter]; size_t vallen; zframe_t *frame = zframe_new (&key, 4); zframe_send (&frame, push, 0); } stop = 1; zframe_t *frame = zframe_new (&stop, 4); zframe_send (&frame, pub, 0); iter = 0; while (iter < N_THREADS) { unsigned int temp; zmsg_t *msg = zmsg_recv (router); zframe_t *frame = zmsg_unwrap (msg); zframe_destroy (&frame); frame=zmsg_first (msg); if (zframe_size (frame) == strlen ("m")) { } else { memcpy (&temp, zframe_data (frame), 4); counter = counter + temp; iter++; } zmsg_destroy (&msg); } printf ("\nkeys processed:%u", counter); diff = zclock_time () - diff; float stat = ((float) counter * 1000) / (float) diff; printf ("\nrandom read without an rb_tree: %f keys per sec\n", stat); }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "tcp://*:5555"); // For clients zsocket_bind (backend, "tcp://*:5556"); // For workers // List of available workers zlist_t *workers = zlist_new (); // Send out heartbeats at regular intervals uint64_t heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL; while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, HEARTBEAT_INTERVAL * ZMQ_POLL_MSEC); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker address for LRU routing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted // Any sign of life from worker means it's ready zframe_t *address = zmsg_unwrap (msg); worker_t *worker = s_worker_new (address); s_worker_ready (worker, workers); // Validate control message, or return reply to client if (zmsg_size (msg) == 1) { zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), PPP_READY, 1) && memcmp (zframe_data (frame), PPP_HEARTBEAT, 1)) { printf ("E: invalid message from worker"); zmsg_dump (msg); } zmsg_destroy (&msg); } else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *msg = zmsg_recv (frontend); if (!msg) break; // Interrupted zmsg_push (msg, s_workers_next (workers)); zmsg_send (&msg, backend); } // .split handle heartbeating // We handle heartbeating after any socket activity. First we send // heartbeats to any idle workers if it's time. Then we purge any // dead workers: if (zclock_time () >= heartbeat_at) { worker_t *worker = (worker_t *) zlist_first (workers); while (worker) { zframe_send (&worker->address, backend, ZFRAME_REUSE + ZFRAME_MORE); zframe_t *frame = zframe_new (PPP_HEARTBEAT, 1); zframe_send (&frame, backend, 0); worker = (worker_t *) zlist_next (workers); } heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL; } s_workers_purge (workers); } // When we're done, clean up properly while (zlist_size (workers)) { worker_t *worker = (worker_t *) zlist_pop (workers); s_worker_destroy (&worker); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
void benchmark_tree (void *push, void *pub, void *router, unsigned int N_KEYS, unsigned int *keys, struct dbkey_rb_t dbkey_rb,int N_THREADS ) { /*cleaning cache */ system ("./script.sh"); float stat; int64_t diff = zclock_time (); unsigned int iter; int stop; unsigned int counter = 0; int more_requested = 0; iter = 0; while (iter < N_KEYS) { dbkey_t *dbkey; size_t vallen; int64_t diff2 = zclock_time (); while (1) { if(zclock_time()-diff2>1){ zframe_t *frame = zframe_recv_nowait (router); if (frame != NULL) { zframe_destroy (&frame); frame = zframe_recv_nowait (router); if (zframe_size (frame) == strlen ("m")) { zframe_destroy (&frame); break; } } diff2=zclock_time(); } dbkey = (dbkey_t *) malloc (sizeof (dbkey_t)); dbkey->key = keys[iter]; RB_INSERT (dbkey_rb_t, &dbkey_rb, dbkey); if (iter == N_KEYS - 1) { iter++; break; } else { iter++; } } dbkey_t *tr_iter = RB_MIN (dbkey_rb_t, &dbkey_rb); while (tr_iter) { zframe_t *frame = zframe_new (&(tr_iter->key), 4); zframe_send (&frame, push, 0); dbkey_t *temp = tr_iter; tr_iter = RB_NEXT (dbkey_rb_t, &dbkey_rb, tr_iter); RB_REMOVE (dbkey_rb_t, &dbkey_rb, temp); free (temp); } } stop = 1; zframe_t *frame = zframe_new (&stop, 4); zframe_send (&frame, pub, 0); iter = 0; while (iter < N_THREADS) { unsigned int temp; zmsg_t *msg = zmsg_recv (router); zframe_t *frame = zmsg_unwrap (msg); zframe_destroy (&frame); frame=zmsg_first (msg); if (zframe_size (frame) == strlen ("m")) { } else { memcpy (&temp, zframe_data (frame), 4); counter = counter + temp; iter++; } zmsg_destroy (&msg); } printf ("\nkeys processed:%u", counter); diff = zclock_time () - diff; stat = ((float) counter * 1000) / (float) diff; printf ("\nrandom read with an rb_tree: %f keys per sec\n", stat); }
int main(int argc, char* argv[]) { int i, client_num, worker_num; zctx_t* ctx; void* frontend; void* backend; zlist_t* workers; if (argc < 3) { fprintf(stderr, "arguments error ...\n"); return 1; } client_num = atoi(argv[1]); worker_num = atoi(argv[2]); ctx = zctx_new(); frontend = zsocket_new(ctx, ZMQ_ROUTER); backend = zsocket_new(ctx, ZMQ_ROUTER); zsocket_bind(frontend, "ipc://frontend.ipc"); zsocket_bind(backend, "ipc://backend.ipc"); for (i = 0; i < client_num; ++i) zthread_new(client_routine, NULL); for (i = 0; i < worker_num; ++i) zthread_new(worker_routine, NULL); workers = zlist_new(); while (1) { zmq_pollitem_t items[] = { {backend, 0, ZMQ_POLLIN, 0}, {frontend, 0, ZMQ_POLLIN, 0}, }; int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1); if (-1 == rc) break; if (items[0].revents & ZMQ_POLLIN) { zmsg_t* msg; zframe_t* identity; zframe_t* frame; msg = zmsg_recv(backend); if (NULL == msg) break; identity = zmsg_unwrap(msg); zlist_append(workers, identity); frame = zmsg_first(msg); if (0 == memcmp(zframe_data(frame), WORKER_READY, 1)) zmsg_destroy(&msg); else zmsg_send(&msg, frontend); } if (items[1].revents & ZMQ_POLLIN) { zmsg_t* msg = zmsg_recv(frontend); if (NULL != msg) { zmsg_wrap(msg, (zframe_t*)zlist_pop(workers)); zmsg_send(&msg, backend); } } } while (zlist_size(workers)) { zframe_t* frame = (zframe_t*)zlist_pop(workers); zframe_destroy(&frame); } zlist_destroy(&workers); zctx_destroy(&ctx); return 0; }
int main(int argc, char* argv[]) { zctx_t* ctx; void* cloudfe; void* cloudbe; int i; void* localfe; void* localbe; int capacity = 0; zlist_t* workers; if (argc < 2) { fprintf(stderr, "syntax: peering me {you} ...\n"); return 1; } self = argv[1]; fprintf(stdout, "I: preparing broker at %s ...\n", self); srand((unsigned int)time(NULL)); ctx = zctx_new(); cloudfe = zsocket_new(ctx, ZMQ_ROUTER); zsockopt_set_identity(cloudfe, self); zsocket_bind(cloudfe, "ipc://%s-cloud.ipc", self); cloudbe = zsocket_new(ctx, ZMQ_ROUTER); zsockopt_set_identity(cloudbe, self); for (i = 2; i < argc; ++i) { char* peer = argv[i]; fprintf(stdout, "I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect(cloudbe, "ipc://%s-cloud.ipc", peer); } localfe = zsocket_new(ctx, ZMQ_ROUTER); zsocket_bind(localfe, "ipc://%s-localfe.ipc", self); localbe = zsocket_new(ctx, ZMQ_ROUTER); zsocket_bind(localbe, "ipc://%s-localbe.ipc", self); fprintf(stdout, "Press Enter when all brokers are started: "); getchar(); for (i = 0; i < NUM_WORKERS; ++i) zthread_new(worker_routine, NULL); for (i = 0; i < NUM_CLIENTS; ++i) zthread_new(client_routine, NULL); workers = zlist_new(); while (1) { zmsg_t* msg; zmq_pollitem_t backends[] = { {localbe, 0, ZMQ_POLLIN, 0}, {cloudbe, 0, ZMQ_POLLIN, 0}, }; int r = zmq_poll(backends, 2, capacity ? 1000 * ZMQ_POLL_MSEC : -1); if (-1 == r) break; msg = NULL; if (backends[0].revents & ZMQ_POLLIN) { zframe_t* identity; zframe_t* frame; msg = zmsg_recv(localbe); if (!msg) break; identity = zmsg_unwrap(msg); zlist_append(workers, identity); ++capacity; frame = zmsg_first(msg); if (0 == memcmp(zframe_data(frame), WORKER_READY, 1)) zmsg_destroy(&msg); } else if (backends[1].revents & ZMQ_POLLIN) { zframe_t* identity; msg = zmsg_recv(cloudbe); if (!msg) break; identity = zmsg_unwrap(msg); zframe_destroy(&identity); } for (i = 2; msg && i < argc; ++i) { char* data = (char*)zframe_data(zmsg_first(msg)); size_t size = zframe_size(zmsg_first(msg)); if (size == strlen(argv[i]) && 0 == memcmp(data, argv[i], size)) zmsg_send(&msg, cloudfe); } if (msg) zmsg_send(&msg, localfe); while (capacity) { int reroutable = 0; zmq_pollitem_t frontends[] = { {localfe, 0, ZMQ_POLLIN, 0}, {cloudfe, 0, ZMQ_POLLIN, 0}, }; r = zmq_poll(frontends, 2, 0); assert(r >= 0); if (frontends[1].revents & ZMQ_POLLIN) { msg = zmsg_recv(cloudfe); reroutable = 0; } else if (frontends[0].revents & ZMQ_POLLIN) { msg = zmsg_recv(localfe); reroutable = 1; } else break; if (0 != reroutable && argc > 2 && 0 == rand() % 5) { int random_peer = rand() % (argc - 2) + 2; zmsg_pushmem(msg, argv[random_peer], strlen(argv[random_peer])); zmsg_send(&msg, cloudbe); } else { zframe_t* frame = (zframe_t*)zlist_pop(workers); zmsg_wrap(msg, frame); zmsg_send(&msg, localbe); --capacity; } } } while (zlist_size(workers)) { zframe_t* frame = (zframe_t*)zlist_pop(workers); zframe_destroy(&frame); } zlist_destroy(&workers); zctx_destroy(&ctx); return 0; }
int main(void) { zctx_t *ctx = zctx_new(); void *frontend = zsocket_new(ctx, ZMQ_ROUTER); void *backend = zsocket_new(ctx, ZMQ_ROUTER); // IPC doesn't yet work on MS Windows. #if (defined (WIN32)) zsocket_bind(frontend, "tcp://*:5672"); zsocket_bind(backend, "tcp://*:5673"); #else zsocket_bind(frontend, "ipc://frontend.ipc"); zsocket_bind(backend, "ipc://backend.ipc"); #endif int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new(client_task, NULL); int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new(worker_task, NULL); // Queue of available workers zlist_t *workers = zlist_new(); // .split main load-balancer loop // Here is the main loop for the load balancer. It works the same way // as the previous example, but is a lot shorter because CZMQ gives // us an API that does more with fewer calls: while (1) { zmq_pollitem_t items[] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items[0].revents & ZMQ_POLLIN) { // Use worker identity for load-balancing zmsg_t *msg = zmsg_recv(backend); if (!msg) break; // Interrupted #if 0 // zmsg_unwrap is DEPRECATED as over-engineered, poor style zframe_t *identity = zmsg_unwrap(msg); #else zframe_t *identity = zmsg_pop(msg); zframe_t *delimiter = zmsg_pop(msg); zframe_destroy(&delimiter); #endif zlist_append(workers, identity); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first(msg); if (memcmp(zframe_data(frame), WORKER_READY, strlen(WORKER_READY)) == 0) { zmsg_destroy(&msg); } else { zmsg_send(&msg, frontend); if (--client_nbr == 0) break; // Exit after N messages } } if (items[1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv(frontend); if (msg) { #if 0 // zmsg_wrap is DEPRECATED as unsafe zmsg_wrap(msg, (zframe_t *)zlist_pop(workers)); #else zmsg_pushmem(msg, NULL, 0); // delimiter zmsg_push(msg, (zframe_t *)zlist_pop(workers)); #endif zmsg_send(&msg, backend); } } } // When we're done, clean up properly while (zlist_size(workers)) { zframe_t *frame = (zframe_t *)zlist_pop(workers); zframe_destroy(&frame); } zlist_destroy(&workers); zctx_destroy(&ctx); return 0; }
void web_request(void *sweb, req_store_t * req_store, void *spss, void *sgraph) { zmsg_t *msg = zmsg_recv(sweb); zframe_t *address = zmsg_unwrap(msg); json_t *req_json; json_error_t error; printf("\nbroker:sweb received: %s\n", (const char *)zframe_data(zmsg_first(msg))); const char *data; size_t data_size = zframe_size(zmsg_first(msg)); data = zframe_data(zmsg_first(msg)); req_json = json_loadb(data, data_size, 0, &error); zmsg_destroy(&msg); int32_t requestId = request_store_add(req_store, address, req_json); json_t *clientRequest = json_object_get(req_json, "clientRequest"); json_t *request = json_object_get(clientRequest, "request"); const char *type = json_string_value(json_object_get(request, "type")); if (strcmp(type, "searchRequest") == 0) web_request_searchRequest(requestId, request, spss); else if (strcmp(type, "newNode") == 0) web_request_newNode(requestId, request, sgraph); else if (strcmp(type, "newPosition") == 0) web_request_newPosition(requestId, request, spss); else if (strcmp(type, "newLink") == 0) web_request_newLink(requestId, request, sgraph); else if (strcmp(type, "delLink") == 0) web_request_delLink(requestId, request, sgraph); else if (strcmp(type, "delNode") == 0) web_request_delNode(requestId, request, sgraph); else if (strcmp(type, "newNodeData") == 0) web_request_newNodeData(requestId, request, sgraph); else if (strcmp(type, "newLinkData") == 0) web_request_newLinkData(requestId, request, sgraph); else { //TODO process request //malformed request printf("\ni received a malformed request : %s", type); //delete request zframe_destroy(&address); request_store_delete(req_store, requestId); } }
int main (int argc, char *argv []) { // First argument is this broker's name // Other arguments are our peers' names // if (argc < 2) { printf ("syntax: peering3 me {you}...\n"); exit (EXIT_FAILURE); } self = argv [1]; printf ("I: preparing broker at %s...\n", self); srandom ((unsigned) time (NULL)); // Prepare our context and sockets zctx_t *ctx = zctx_new (); char endpoint [256]; // Bind cloud frontend to endpoint void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudfe, self); zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self); // Bind state backend / publisher to endpoint void *statebe = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (statebe, "ipc://%s-state.ipc", self); // Connect cloud backend to all peers void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER); zsockopt_set_identity (cloudbe, self); int argn; for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to cloud frontend at '%s'\n", peer); zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer); } // Connect statefe to all peers void *statefe = zsocket_new (ctx, ZMQ_SUB); for (argn = 2; argn < argc; argn++) { char *peer = argv [argn]; printf ("I: connecting to state backend at '%s'\n", peer); zsocket_connect (statefe, "ipc://%s-state.ipc", peer); } // Prepare local frontend and backend void *localfe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localfe, "ipc://%s-localfe.ipc", self); void *localbe = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (localbe, "ipc://%s-localbe.ipc", self); // Prepare monitor socket void *monitor = zsocket_new (ctx, ZMQ_PULL); zsocket_bind (monitor, "ipc://%s-monitor.ipc", self); // Start local workers int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) zthread_new (ctx, worker_task, NULL); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) zthread_new (ctx, client_task, NULL); // Interesting part // ------------------------------------------------------------- // Publish-subscribe flow // - Poll statefe and process capacity updates // - Each time capacity changes, broadcast new value // Request-reply flow // - Poll primary and process local/cloud replies // - While worker available, route localfe to local or cloud // Queue of available workers int local_capacity = 0; int cloud_capacity = 0; zlist_t *workers = zlist_new (); while (1) { zmq_pollitem_t primary [] = { { localbe, 0, ZMQ_POLLIN, 0 }, { cloudbe, 0, ZMQ_POLLIN, 0 }, { statefe, 0, ZMQ_POLLIN, 0 }, { monitor, 0, ZMQ_POLLIN, 0 } }; // If we have no workers anyhow, wait indefinitely int rc = zmq_poll (primary, 4, local_capacity? 1000 * ZMQ_POLL_MSEC: -1); if (rc == -1) break; // Interrupted // Track if capacity changes during this iteration int previous = local_capacity; // Handle reply from local worker zmsg_t *msg = NULL; if (primary [0].revents & ZMQ_POLLIN) { msg = zmsg_recv (localbe); if (!msg) break; // Interrupted zframe_t *address = zmsg_unwrap (msg); zlist_append (workers, address); local_capacity++; // If it's READY, don't route the message any further zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), LRU_READY, 1) == 0) zmsg_destroy (&msg); } // Or handle reply from peer broker else if (primary [1].revents & ZMQ_POLLIN) { msg = zmsg_recv (cloudbe); if (!msg) break; // Interrupted // We don't use peer broker address for anything zframe_t *address = zmsg_unwrap (msg); zframe_destroy (&address); } // Route reply to cloud if it's addressed to a broker for (argn = 2; msg && argn < argc; argn++) { char *data = (char *) zframe_data (zmsg_first (msg)); size_t size = zframe_size (zmsg_first (msg)); if (size == strlen (argv [argn]) && memcmp (data, argv [argn], size) == 0) zmsg_send (&msg, cloudfe); } // Route reply to client if we still need to if (msg) zmsg_send (&msg, localfe); // Handle capacity updates if (primary [2].revents & ZMQ_POLLIN) { char *status = zstr_recv (statefe); cloud_capacity = atoi (status); free (status); } // Handle monitor message if (primary [3].revents & ZMQ_POLLIN) { char *status = zstr_recv (monitor); printf ("%s\n", status); free (status); } // Now route as many clients requests as we can handle // - If we have local capacity we poll both localfe and cloudfe // - If we have cloud capacity only, we poll just localfe // - Route any request locally if we can, else to cloud // while (local_capacity + cloud_capacity) { zmq_pollitem_t secondary [] = { { localfe, 0, ZMQ_POLLIN, 0 }, { cloudfe, 0, ZMQ_POLLIN, 0 } }; if (local_capacity) rc = zmq_poll (secondary, 2, 0); else rc = zmq_poll (secondary, 1, 0); assert (rc >= 0); if (secondary [0].revents & ZMQ_POLLIN) msg = zmsg_recv (localfe); else if (secondary [1].revents & ZMQ_POLLIN) msg = zmsg_recv (cloudfe); else break; // No work, go back to primary if (local_capacity) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zmsg_wrap (msg, frame); zmsg_send (&msg, localbe); local_capacity--; } else { // Route to random broker peer int random_peer = randof (argc - 2) + 2; zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer])); zmsg_send (&msg, cloudbe); } } if (local_capacity != previous) { // We stick our own address onto the envelope zstr_sendm (statebe, self); // Broadcast new capacity zstr_sendf (statebe, "%d", local_capacity); } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return EXIT_SUCCESS; }
int main (int argc, char *argv[]) { if (argc != 4) { exit (-1); } int numb_msgs = atoi (argv[2]); int numb_unimp_threads = atoi (argv[3]); zctx_t *ctx = zctx_new (); void *router_imp = zsocket_new (ctx, ZMQ_ROUTER); zsocket_set_rcvhwm (router_imp, 500000000); zsocket_bind (router_imp, "%s:9000", argv[1]); void *router_unimp = zsocket_new (ctx, ZMQ_ROUTER); zsocket_set_rcvhwm (router_unimp, 500000000); zsocket_bind (router_unimp, "%s:9001", argv[1]); void *pub = zsocket_new (ctx, ZMQ_PUB); zsocket_bind (pub, "%s:9002", argv[1]); int64_t time[3]; int64_t idle=0; int64_t diff; zclock_sleep (1000); //send the signal to start zmsg_t *amsg = zmsg_new (); zmsg_add (amsg, zframe_new ("all", 4)); zmsg_send (&amsg, pub); zmq_pollitem_t pollitem[2] = { {router_imp, 0, ZMQ_POLLIN} , {router_unimp, 0, ZMQ_POLLIN} }; unsigned long av_dropped_priority=0; unsigned long dropped=0; unsigned long av_processed_priority=0; unsigned long processed=0; int i; int imp_counter = 0; int once = 1; int ndiffs = 0; unsigned char drop_priority = 0; for (i = 0; i < 2 * numb_msgs; i++) { diff = zclock_time (); if (zmq_poll (pollitem, 2, -1) == -1) { exit (-1); } diff = zclock_time () - diff; idle = idle + diff; update_drop_rate(&diff,&drop_priority,&ndiffs); if (pollitem[0].revents & ZMQ_POLLIN) { zmsg_t *msg = zmsg_recv (router_imp); if (!msg) { exit (-1); } zmsg_destroy (&msg); imp_counter++; if (imp_counter == numb_msgs) { time[0] = zclock_time (); } } else { if (pollitem[1].revents & ZMQ_POLLIN) { if (once) { time[1] = zclock_time (); once = 0; } zmsg_t *msg = zmsg_recv (router_unimp); if (!msg) { exit (-1); } zframe_t *frame = zmsg_unwrap (msg); zframe_destroy (&frame); unsigned char priority; memcpy (&priority, zframe_data (zmsg_first (msg)), 1); if (priority < drop_priority) { av_dropped_priority+=priority; dropped++; // printf ("dropped:%u\n", priority); zmsg_destroy (&msg); } else { av_processed_priority+=priority; processed++; // printf ("received:%u\n", priority); zmsg_destroy (&msg); } } } } time[2] = zclock_time (); printf ("msgs received:%d\n", i); amsg = zmsg_new (); zmsg_add (amsg, zframe_new ("all", 4)); zmsg_send (&amsg, pub); zmsg_t *msg = zmsg_recv (router_imp); zframe_t *frame = zmsg_unwrap (msg); zframe_destroy (&frame); int64_t time_imp[2]; frame = zmsg_pop (msg); memcpy (time_imp, zframe_data (frame), zframe_size (frame)); zframe_destroy (&frame); zmsg_destroy (&msg); int64_t time_unimp[numb_unimp_threads][2]; for(i=0; i<numb_unimp_threads; i++){ msg = zmsg_recv (router_unimp); frame = zmsg_unwrap (msg); zframe_destroy (&frame); frame = zmsg_pop (msg); memcpy (time_unimp[i], zframe_data (frame), zframe_size (frame)); zframe_destroy (&frame); zmsg_destroy (&msg); } //compute average latency printf ("\nTime when important msgs started to be sent: %lld\n", time_imp[0]); printf ("\nTime when important msgs were processed: %lld\n", time[0]); printf ("\nDifference: %lld\n", time[0] - time_imp[0]); for(i=0; i<numb_unimp_threads; i++){ printf ("\nTime when unimportant msgs started to be sent: %lld\n", time_unimp[i][0]); printf ("\nTime when unimportant msgs were processed: %lld\n", time[2]); printf ("\nDifference: %lld\n", time[2] - time_unimp[i][0]); } printf ("\nTime when unimportant msgs started to be processed: %lld\n", time[1]); printf ("idle time:%llu\n",idle); printf ("dropped msgs:%llu\n",dropped); printf ("av_dropped_priority:%llu\n",av_dropped_priority/dropped); printf ("processed msgs:%llu\n",processed); printf ("av_processed_priority:%llu\n",av_processed_priority/processed); }