示例#1
0
static void
s_broker_client_msg(broker_t *self, zframe_t *sender, zmsg_t *msg)
{
	assert(zmsg_size(msg) >= 2);

	zframe_t *service_frame = zmsg_pop(msg);
	service_t *service = s_service_require(self, service_frame);

	zmsg_wrap(msg, zframe_dup(sender));

	if (zframe_size(service_frame) >= 4 && memcmp(zframe_data(service_frame), "mmi.", 4) == 0){
		char *return_code;
		if (zframe_streq(service_frame, "mmi.service")){
			char *name = zframe_strdup(zmsg_last(msg));
			service_t *service = (service_t *)zhash_lookup(self->services, name);
			return_code = service && service->workers ? "200" : "404";
			free(name);
		}
		else
			return_code = "501";

		zframe_reset(zmsg_last(msg), return_code, strlen(return_code));

		zframe_t *client = zmsg_unwrap(msg);
		zmsg_prepend(msg, &service_frame);
		zmsg_pushstr(msg, MDPC_CLIENT);
		zmsg_wrap(msg, client);
		zmsg_send(&msg, self->socket);
	}
	else 
		s_service_dispatch(service, msg);
	zframe_destroy(&service_frame);
}
示例#2
0
static void
s_worker_process (broker_t *self, char *sender, zmsg_t *msg)
{
    assert (zmsg_parts (msg) >= 1);     //  At least, command

    char *command = zmsg_pop (msg);
    int worker_ready = (zhash_lookup (self->workers, sender) != NULL);
    worker_t *worker = s_worker_require (self, sender);

    if (streq (command, MDPW_READY)) {
        if (worker_ready)               //  Not first command in session
            s_worker_delete (self, worker, 1);
        else
        if (strlen (sender) >= 4  //  Reserved service name
        &&  memcmp (sender, "mmi.", 4) == 0)
            s_worker_delete (self, worker, 1);
        else {
            //  Attach worker to service and mark as idle
            char *service_name = zmsg_pop (msg);
            worker->service = s_service_require (self, service_name);
            worker->service->workers++;
            s_worker_waiting (self, worker);
            free (service_name);
        }
    }
    else
    if (streq (command, MDPW_REPLY)) {
        if (worker_ready) {
            //  Remove & save client return envelope and insert the
            //  protocol header and service name, then rewrap envelope.
            char *client = zmsg_unwrap (msg);
            zmsg_wrap (msg, MDPC_CLIENT, worker->service->name);
            zmsg_wrap (msg, client, "");
            free (client);
            zmsg_send (&msg, self->socket);
            s_worker_waiting (self, worker);
        }
        else
            s_worker_delete (self, worker, 1);
    }
    else
    if (streq (command, MDPW_HEARTBEAT)) {
        if (worker_ready)
            worker->expiry = s_clock () + HEARTBEAT_EXPIRY;
        else
            s_worker_delete (self, worker, 1);
    }
    else
    if (streq (command, MDPW_DISCONNECT))
        s_worker_delete (self, worker, 0);
    else {
        s_console ("E: invalid input message (%d)", (int) *command);
        zmsg_dump (msg);
    }
    free (command);
    zmsg_destroy (&msg);
}
示例#3
0
文件: broker.c 项目: tnako/DP
static void s_broker_client_msg(broker_t *self, zframe_t *sender, zmsg_t *msg)
{
    assert (zmsg_size(msg) >= 2);     //  Service name + body

    zframe_t *service_frame = zmsg_pop(msg);
    service_t *service = s_service_require(self, service_frame);
 // Не должен создавать сервис, в случаи запроса от клиента

    //  Set reply return identity to client sender
    zmsg_wrap(msg, zframe_dup(sender));

    //  If we got a MMI service request, process that internally
    if (zframe_size(service_frame) >= 4 &&  memcmp(zframe_data(service_frame), "mmi.", 4) == 0) {
        char *return_code;
		
        if (zframe_streq(service_frame, "mmi.service")) {
            char *name = zframe_strdup (zmsg_last (msg));
            service_t *service = (service_t *) zhash_lookup(self->services, name);
			if (service) {
			  if (service->workers) {
				return_code = "200";
			  } else {
				return_code = "404";
			  }
			} else {
			  return_code = "401";
			}
            free(name);
        } else {
            return_code = "501";
		}

        zframe_reset(zmsg_last(msg), return_code, strlen(return_code));

        //  Remove & save client return envelope and insert the
        //  protocol header and service name, then rewrap envelope.
        zframe_t *client = zmsg_unwrap (msg);
        zmsg_push(msg, zframe_dup(service_frame));
        zmsg_pushstr(msg, MDPC_CLIENT);
        zmsg_wrap(msg, client);
        zmsg_send(&msg, self->socket);
    } else {
        //  Else dispatch the message to the requested service
        s_service_dispatch(service, msg);
	}
	
    zframe_destroy(&service_frame);
}
示例#4
0
文件: broker.c 项目: tnako/DP
static void s_broker_worker_msg(broker_t *self, zframe_t *sender, zmsg_t *msg)
{
    assert (zmsg_size(msg) >= 1);     //  At least, command

    zframe_t *command = zmsg_pop(msg);
    char *id_string = zframe_strhex(sender);
    int worker_ready = (zhash_lookup(self->workers, id_string) != NULL);
    free (id_string);
	
    worker_t *worker = s_worker_require(self, sender);

    if (zframe_streq(command, MDPW_READY)) {
        if (worker_ready) {               //  Not first command in session
            s_worker_delete(worker, 1);
			// Додумать, по идеи синоним сердцебиения
		} else {
			if (zframe_size(sender) >= 4 &&  memcmp(zframe_data (sender), "mmi.", 4) == 0) {
				s_worker_delete(worker, 1);
				// Додумать, по идеи синоним сердцебиения
			} else {
				//  Attach worker to service and mark as idle
				zframe_t *service_frame = zmsg_pop(msg);
				worker->service = s_service_require(self, service_frame);
				worker->service->workers++;
				s_worker_waiting(worker);
				zframe_destroy(&service_frame);
			}
		}
    } else if (zframe_streq(command, MDPW_REPLY)) {
        if (worker_ready) {
            //  Remove and save client return envelope and insert the
            //  protocol header and service name, then rewrap envelope.
            zframe_t *client = zmsg_unwrap(msg);
            zmsg_pushstr(msg, worker->service->name);
            zmsg_pushstr(msg, MDPC_CLIENT);
            zmsg_wrap(msg, client);
            zmsg_send(&msg, self->socket);
            s_worker_waiting(worker);
        } else {
			// Просто обрыв связи между воркером и брокером
			// синоним сердцебиения
            s_worker_delete(worker, 1);
		}
    } else if (zframe_streq(command, MDPW_HEARTBEAT)) {
        if (worker_ready) {
            worker->expiry = zclock_time () + HEARTBEAT_EXPIRY;
		} else {
			// Просто обрыв связи между воркером и брокером
			// синоним сердцебиения
            s_worker_delete(worker, 1);
		}
    } else if (zframe_streq (command, MDPW_DISCONNECT)) {
        s_worker_delete(worker, 0);
	} else {
        zclock_log ("E: invalid input message");
        zmsg_dump (msg);
    }
    free (command);
    zmsg_destroy (&msg);
}
示例#5
0
static void
s_worker_process (broker_t *self, zframe_t *sender, zmsg_t *msg)
{
    assert (zmsg_size (msg) >= 1);     //  At least, command

    zframe_t *command = zmsg_pop (msg);
    char *identity = zframe_strhex (sender);
    int worker_ready = (zhash_lookup (self->workers, identity) != NULL);
    free (identity);
    worker_t *worker = s_worker_require (self, sender);

    if (zframe_streq (command, MDPW_READY)) {
        if (worker_ready)               //  Not first command in session
            s_worker_delete (self, worker, 1);
        else
        if (zframe_size (sender) >= 4  //  Reserved service name
        &&  memcmp (zframe_data (sender), "mmi.", 4) == 0)
            s_worker_delete (self, worker, 1);
        else {
            //  Attach worker to service and mark as idle
            zframe_t *service_frame = zmsg_pop (msg);
            worker->service = s_service_require (self, service_frame);
            worker->service->workers++;
            s_worker_waiting (self, worker);
            zframe_destroy (&service_frame);
        }
    }
    else
    if (zframe_streq (command, MDPW_REPLY)) {
        if (worker_ready) {
            //  Remove & save client return envelope and insert the
            //  protocol header and service name, then rewrap envelope.
            zframe_t *client = zmsg_unwrap (msg);
            zmsg_pushstr (msg, worker->service->name);
            zmsg_pushstr (msg, MDPC_CLIENT);
            zmsg_wrap (msg, client);
            zmsg_send (&msg, self->socket);
            s_worker_waiting (self, worker);
        }
        else
            s_worker_delete (self, worker, 1);
    }
    else
    if (zframe_streq (command, MDPW_HEARTBEAT)) {
        if (worker_ready)
            worker->expiry = zclock_time () + HEARTBEAT_EXPIRY;
        else
            s_worker_delete (self, worker, 1);
    }
    else
    if (zframe_streq (command, MDPW_DISCONNECT))
        s_worker_delete (self, worker, 0);
    else {
        zclock_log ("E: invalid input message");
        zmsg_dump (msg);
    }
    free (command);
    zmsg_destroy (&msg);
}
示例#6
0
文件: spqueue.c 项目: Carl4/zguide
int main (void)
{
    zctx_t *ctx = zctx_new ();
    void *frontend = zsocket_new (ctx, ZMQ_ROUTER);
    void *backend = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (frontend, "tcp://*:5555");    //  For clients
    zsocket_bind (backend,  "tcp://*:5556");    //  For workers

    //  Queue of available workers
    zlist_t *workers = zlist_new ();
    
    //  The body of this example is exactly the same as lruqueue2.
    //  .skip
    while (1) {
        zmq_pollitem_t items [] = {
            { backend,  0, ZMQ_POLLIN, 0 },
            { frontend, 0, ZMQ_POLLIN, 0 }
        };
        //  Poll frontend only if we have available workers
        int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Handle worker activity on backend
        if (items [0].revents & ZMQ_POLLIN) {
            //  Use worker address for LRU routing
            zmsg_t *msg = zmsg_recv (backend);
            if (!msg)
                break;          //  Interrupted
            zframe_t *address = zmsg_unwrap (msg);
            zlist_append (workers, address);

            //  Forward message to client if it's not a READY
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), LRU_READY, 1) == 0)
                zmsg_destroy (&msg);
            else
                zmsg_send (&msg, frontend);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Get client request, route to first available worker
            zmsg_t *msg = zmsg_recv (frontend);
            if (msg) {
                zmsg_wrap (msg, (zframe_t *) zlist_pop (workers));
                zmsg_send (&msg, backend);
            }
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return 0;
    //  .until
}
示例#7
0
zmsg_t *create_sendback_message(zmsg_t *msg)
{
    zframe_t *cli_identity = zmsg_unwrap(msg);

    zmsg_t *sendback_msg = zmsg_new();
    zmsg_wrap(sendback_msg, cli_identity);

    return sendback_msg;
}
示例#8
0
static void
s_broker_worker_msg(broker_t *self, zframe_t *sender, zmsg_t *msg)
{
	assert(zmsg_size(msg) >= 1);

	zframe_t *command = zmsg_pop(msg);
	char *id_string = zframe_strhex(sender);
	int worker_ready = (zhash_lookup(self->workers, id_string) != NULL);
	free(id_string);
	worker_t *worker = s_worker_require(self, sender);

	if (zframe_streq(command, MDPW_READY)){
		if (worker_ready)
			s_worker_delete(worker, 1);
		else 
		if (zframe_size(sender) >= 4 && memcmp(zframe_data(sender), "mmi.", 4) == 0)
			s_worker_delete(worker, 1);
		else {
			zframe_t *service_frame = zmsg_pop(msg);
			worker->service = s_service_require(self, service_frame);
			worker->service->workers++;
			s_worker_waiting(worker);
			zframe_destroy(&service_frame);
		}
	}
	else
	if (zframe_streq(command, MDPW_REPLY)){
		if (worker_ready){
			zframe_t *client = zmsg_unwrap(msg);
			zmsg_pushstr(msg, worker->service->name);
			zmsg_pushstr(msg, MDPC_CLIENT);
			zmsg_wrap(msg, client);
			zmsg_send(&msg, self->socket);
			s_worker_waiting(worker);
		}
		else 
			s_worker_delete(worker, 1);
	}
	else
	if (zframe_streq(command, MDPW_HEARTBEAT)){
		if (worker_ready)
			worker->expiry = zclock_time() + HEARTBEAT_EXPIRY;
		else
			s_worker_delete(worker, 1);
	}
	else
	if (zframe_streq(command, MDPW_DISCONNECT))
		s_worker_delete(worker, 0);
	else {
		zclock_log("E: invalid input message");
		zmsg_dump(msg);
	}
	free(command);
	zmsg_destroy(&msg);

}
示例#9
0
文件: spqueue.c 项目: tzuryby/zguide
int main (void)
{
    s_version_assert (2, 1);

    //  Prepare our context and sockets
    void *context = zmq_init (1);
    void *frontend = zmq_socket (context, ZMQ_XREP);
    void *backend  = zmq_socket (context, ZMQ_XREP);
    zmq_bind (frontend, "tcp://*:5555");    //  For clients
    zmq_bind (backend,  "tcp://*:5556");    //  For workers

    //  Queue of available workers
    int available_workers = 0;
    char *worker_queue [MAX_WORKERS];

    while (1) {
        zmq_pollitem_t items [] = {
            { backend,  0, ZMQ_POLLIN, 0 },
            { frontend, 0, ZMQ_POLLIN, 0 }
        };
        //  Poll frontend only if we have available workers
        if (available_workers)
            zmq_poll (items, 2, -1);
        else
            zmq_poll (items, 1, -1);

        //  Handle worker activity on backend
        if (items [0].revents & ZMQ_POLLIN) {
            zmsg_t *zmsg = zmsg_recv (backend);
            //  Use worker address for LRU routing
            assert (available_workers < MAX_WORKERS);
            worker_queue [available_workers++] = zmsg_unwrap (zmsg);

            //  Return reply to client if it's not a READY
            if (strcmp (zmsg_address (zmsg), "READY") == 0)
                zmsg_destroy (&zmsg);
            else
                zmsg_send (&zmsg, frontend);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Now get next client request, route to next worker
            zmsg_t *zmsg = zmsg_recv (frontend);
            //  REQ socket in worker needs an envelope delimiter
            zmsg_wrap (zmsg, worker_queue [0], "");
            zmsg_send (&zmsg, backend);

            //  Dequeue and drop the next worker address
            free (worker_queue [0]);
            DEQUEUE (worker_queue);
            available_workers--;
        }
    }
    //  We never exit the main loop
    return 0;
}
示例#10
0
void
mdp_worker_send (mdp_worker_t *self, zmsg_t **report_p, zframe_t *reply_to)
{
    assert (report_p);
    zmsg_t *report = *report_p;
    assert (report);
    assert (reply_to);
    // Add client address
    zmsg_wrap (report, zframe_dup (reply_to));
    s_mdp_worker_send_to_broker (self, MDPW_REPORT, NULL, report);
    zmsg_destroy (report_p);
}
示例#11
0
static void
s_service_internal (broker_t *self, char *service_name, zmsg_t *msg)
{
    if (streq (service_name, "mmi.service")) {
        service_t *service = 
            (service_t *) zhash_lookup (self->services, zmsg_body (msg));
        if (service && service->workers)
            zmsg_body_set (msg, "200");
        else
            zmsg_body_set (msg, "404");
    }
    else
        zmsg_body_set (msg, "501");

    //  Remove & save client return envelope and insert the
    //  protocol header and service name, then rewrap envelope.
    char *client = zmsg_unwrap (msg);
    zmsg_wrap (msg, MDPC_CLIENT, service_name);
    zmsg_wrap (msg, client, "");
    free (client);
    zmsg_send (&msg, self->socket);
}
示例#12
0
void pss_response_delNode(req_t * req, json_t * response, int32_t requestId,
			  void *sweb, req_store_t * req_store)
{
	const char *ack = json_string_value(json_object_get(response,
							    "ack"));

	if (strcmp(ack, "ok") == 0) {

		json_t *web_resp = json_object();
		json_object_set_new(web_resp, "type", json_string("newData"));
		//TODO at the moment only the original node gets the update, which is good enough for me
		json_t *sessionIds = json_array();
		json_array_append(sessionIds,
				  json_object_get(req->request, "sessionId"));
		json_object_set_new(web_resp, "sessionIds", sessionIds);

		json_t *newData = json_object();
		json_t *deletedNodes = json_array();
		json_array_append(deletedNodes,
				  json_object_get
				  (json_object_get
				   (json_object_get
				    (req->request,
				     "clientRequest"), "request"), "id"));
		json_object_set_new(newData, "deletedNodes", deletedNodes);

		json_object_set_new(web_resp, "newData", newData);

		zmsg_t *res = zmsg_new();
		char *web_res_str = json_dumps(web_resp,
					       JSON_COMPACT);

		printf("\nbroker:sweb sent: %s\n", web_res_str);
		zmsg_addstr(res, web_res_str);
		free(web_res_str);
		zmsg_wrap(res, req->address);
		zmsg_send(&res, sweb);
		json_decref(web_resp);

	} else {

		if (strcmp(ack, "fail") == 0) {
			//TODO ??
		}
	}
	request_store_delete(req_store, requestId);
	json_decref(response);

}
示例#13
0
// Handle input from client, on frontend
int s_handle_frontend(zloop_t *loop, zmq_pollitem_t *poller, void *arg)
{
	lbbroker_t *self = (lbbroker_t *)arg;
	zmsg_t *msg = zmsg_recv(self->frontend);
	if (msg) {
		zmsg_wrap(msg, (zframe_t *)zlist_pop(self->workers));
		zmsg_send(&msg, self->backend);

		// Cancel reader on frontend if we went from 1 to 0 workers
		if (zlist_size(self->workers) == 0) {
			zmq_pollitem_t poller = { self->frontend, 0, ZMQ_POLLIN };
			zloop_poller_end(loop, &poller);
		}
	}
	return 0;
}
示例#14
0
static void
s_client_process (broker_t *self, char *sender, zmsg_t *msg)
{
    assert (zmsg_parts (msg) >= 2);     //  Service name + body

    char *service_name = zmsg_pop (msg);
    service_t *service = s_service_require (self, service_name);
    //  Set reply return address to client sender
    zmsg_wrap (msg, sender, "");
    if (strlen (service_name) >= 4
    &&  memcmp (service_name, "mmi.", 4) == 0)
        s_service_internal (self, service_name, msg);
    else
        s_service_dispatch (self, service, msg);
    free (service_name);
}
示例#15
0
文件: broker.c 项目: tnako/DP
static void s_worker_send(worker_t *self, char *command, zmsg_t *msg)
{
    msg = (msg ? zmsg_dup(msg): zmsg_new());
	
    zmsg_pushstr(msg, command);
    zmsg_pushstr(msg, MDPW_WORKER);

    //  Stack routing envelope to start of message
    zmsg_wrap(msg, zframe_dup(self->identity));

    if (self->broker->verbose) {
        zclock_log ("I: sending %s to worker", mdps_commands [(int) *command]);
        zmsg_dump(msg);
    }
    zmsg_send(&msg, self->broker->socket);
}
示例#16
0
static void
s_client_process (broker_t *self, zframe_t *sender, zmsg_t *msg)
{
    assert (zmsg_size (msg) >= 2);     //  Service name + body

    zframe_t *service_frame = zmsg_pop (msg);
    service_t *service = s_service_require (self, service_frame);

    //  Set reply return address to client sender
    zmsg_wrap (msg, zframe_dup (sender));
    if (zframe_size (service_frame) >= 4
    &&  memcmp (zframe_data (service_frame), "mmi.", 4) == 0)
        s_service_internal (self, service_frame, msg);
    else
        s_service_dispatch (self, service, msg);
    zframe_destroy (&service_frame);
}
示例#17
0
static void
s_worker_send(worker_t *self, char *command, char *option, zmsg_t *msg)
{
	msg = msg ? zmsg_dup(msg) : zmsg_new();

	if (option)
		zmsg_pushstr(msg, option);
	zmsg_pushstr(msg, command);
	zmsg_pushstr(msg, MDPW_WORKER);

	zmsg_wrap(msg, zframe_dup(self->identity));

	if (self->broker->verbose){
		zclock_log("I: sending %s to worker", mdps_commands[(int) *command]);
		zmsg_dump(msg);
	}
	zmsg_send(&msg, self->broker->socket);

}
示例#18
0
void graph_response_newNodeDataResponse(req_t * req, json_t * request,
					json_t * response, int32_t requestId,
					void *sweb, req_store_t * req_store)
{

	if (strcmp
	    (json_string_value(json_object_get(response, "ack")), "ok") == 0) {

		json_t *nodeData = json_object_get(request, "nodeData");
		json_t *id = json_object_get(request, "id");

		json_t *web_resp = json_object();
		json_object_set_new(web_resp, "type", json_string("newData"));
		//TODO at the moment only the original node gets the update, which is good enough for me
		json_t *sessionIds = json_array();
		json_array_append
		    (sessionIds, json_object_get(req->request, "sessionId"));
		json_object_set_new(web_resp, "sessionIds", sessionIds);

		json_t *newData = json_object();
		json_t *newNodeData = json_array();
		json_t *element = json_object();
		json_object_set(element, "id", id);
		json_object_set(element, "nodeData", nodeData);
		json_array_append(newNodeData, element);
		json_object_set_new(newData, "newNodeData", newNodeData);

		json_object_set_new(web_resp, "newData", newData);

		zmsg_t *res = zmsg_new();
		char *web_res_str = json_dumps(web_resp,
					       JSON_COMPACT);
		printf("\nbroker:sweb sent: %s\n", web_res_str);
		zmsg_addstr(res, web_res_str);
		free(web_res_str);
		zmsg_wrap(res, req->address);
		zmsg_send(&res, sweb);
		json_decref(web_resp);
	} else {
	}
	request_store_delete(req_store, requestId);

}
示例#19
0
static void
s_worker_send (
    broker_t *self, worker_t *worker,
    char *command, char *option, zmsg_t *msg)
{
    msg = msg? zmsg_dup (msg): zmsg_new (NULL);

    //  Stack protocol envelope to start of message
    if (option)                 //  Optional frame after command
        zmsg_push (msg, option);
    zmsg_push (msg, command);
    zmsg_push (msg, MDPW_WORKER);
    //  Stack routing envelope to start of message
    zmsg_wrap (msg, worker->identity, "");

    if (self->verbose) {
        s_console ("I: sending %s to worker",
            mdps_commands [(int) *command]);
        zmsg_dump (msg);
    }
    zmsg_send (&msg, self->socket);
}
示例#20
0
static void
s_worker_send (broker_t *self, worker_t *worker, char *command,
               char *option, zmsg_t *msg)
{
    msg = msg? zmsg_dup (msg): zmsg_new ();

    //  Stack protocol envelope to start of message
    if (option)
        zmsg_pushstr (msg, option);
    zmsg_pushstr (msg, command);
    zmsg_pushstr (msg, MDPW_WORKER);

    //  Stack routing envelope to start of message
    zmsg_wrap (msg, zframe_dup (worker->address));

    if (self->verbose) {
        zclock_log ("I: sending %s to worker",
            mdps_commands [(int) *command]);
        zmsg_dump (msg);
    }
    zmsg_send (&msg, self->socket);
}
示例#21
0
static void
s_service_internal (broker_t *self, zframe_t *service_frame, zmsg_t *msg)
{
    char *return_code;
    if (zframe_streq (service_frame, "mmi.service")) {
        char *name = zframe_strdup (zmsg_last (msg));
        service_t *service =
            (service_t *) zhash_lookup (self->services, name);
        return_code = service && service->workers? "200": "404";
        free (name);
    }
    else
        return_code = "501";

    zframe_reset (zmsg_last (msg), return_code, strlen (return_code));

    //  Remove & save client return envelope and insert the
    //  protocol header and service name, then rewrap envelope.
    zframe_t *client = zmsg_unwrap (msg);
    zmsg_push (msg, zframe_dup (service_frame));
    zmsg_pushstr (msg, MDPC_CLIENT);
    zmsg_wrap (msg, client);
    zmsg_send (&msg, self->socket);
}
示例#22
0
int main(void)
{
	zctx_t *ctx = zctx_new();
	void *frontend = zsocket_new(ctx, ZMQ_ROUTER);
	void *backend = zsocket_new(ctx, ZMQ_ROUTER);

	// IPC doesn't yet work on MS Windows.
#if (defined (WIN32))
	zsocket_bind(frontend, "tcp://*:5672");
	zsocket_bind(backend, "tcp://*:5673");
#else
	zsocket_bind(frontend, "ipc://frontend.ipc");
	zsocket_bind(backend, "ipc://backend.ipc");
#endif

	int client_nbr;
	for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
		zthread_new(client_task, NULL);
	int worker_nbr;
	for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
		zthread_new(worker_task, NULL);

	//  Queue of available workers
	zlist_t *workers = zlist_new();

	//  .split main load-balancer loop
	//  Here is the main loop for the load balancer. It works the same way
	//  as the previous example, but is a lot shorter because CZMQ gives
	//  us an API that does more with fewer calls:
	while (1) {
		zmq_pollitem_t items[] = {
				{ backend, 0, ZMQ_POLLIN, 0 },
				{ frontend, 0, ZMQ_POLLIN, 0 }
		};
		//  Poll frontend only if we have available workers
		int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1);
		if (rc == -1)
			break;              //  Interrupted

		//  Handle worker activity on backend
		if (items[0].revents & ZMQ_POLLIN) {
			//  Use worker identity for load-balancing
			zmsg_t *msg = zmsg_recv(backend);
			if (!msg)
				break;          //  Interrupted

#if 0
			// zmsg_unwrap is DEPRECATED as over-engineered, poor style
			zframe_t *identity = zmsg_unwrap(msg);
#else
			zframe_t *identity = zmsg_pop(msg);
			zframe_t *delimiter = zmsg_pop(msg);
			zframe_destroy(&delimiter); 
#endif

			zlist_append(workers, identity);

			//  Forward message to client if it's not a READY
			zframe_t *frame = zmsg_first(msg);
			if (memcmp(zframe_data(frame), WORKER_READY, strlen(WORKER_READY)) == 0) {
				zmsg_destroy(&msg);
			} else {
				zmsg_send(&msg, frontend);
				if (--client_nbr == 0)
					break; // Exit after N messages
			}
		}
		if (items[1].revents & ZMQ_POLLIN) {
			//  Get client request, route to first available worker
			zmsg_t *msg = zmsg_recv(frontend);
			if (msg) {
#if 0
				// zmsg_wrap is DEPRECATED as unsafe
				zmsg_wrap(msg, (zframe_t *)zlist_pop(workers));
#else
				zmsg_pushmem(msg, NULL, 0); // delimiter
				zmsg_push(msg, (zframe_t *)zlist_pop(workers));
#endif

				zmsg_send(&msg, backend);
			}
		}
	}
	//  When we're done, clean up properly
	while (zlist_size(workers)) {
		zframe_t *frame = (zframe_t *)zlist_pop(workers);
		zframe_destroy(&frame);
	}
	zlist_destroy(&workers);
	zctx_destroy(&ctx);
	return 0;
}
示例#23
0
文件: lbbroker.c 项目: hbfhaapy/study
int 
main(int argc, char* argv[])
{
  int i, client_num, worker_num;
  zctx_t* ctx;
  void* frontend;
  void* backend;
  zlist_t* workers;

  if (argc < 3) {
    fprintf(stderr, "arguments error ...\n");
    return 1;
  }
  client_num = atoi(argv[1]);
  worker_num = atoi(argv[2]);

  ctx = zctx_new();


  frontend = zsocket_new(ctx, ZMQ_ROUTER);
  backend = zsocket_new(ctx, ZMQ_ROUTER);
  zsocket_bind(frontend, "ipc://frontend.ipc");
  zsocket_bind(backend, "ipc://backend.ipc");

  for (i = 0; i < client_num; ++i)
    zthread_new(client_routine, NULL);
  for (i = 0; i < worker_num; ++i)
    zthread_new(worker_routine, NULL);

  workers = zlist_new();
  while (1) {
    zmq_pollitem_t items[] = {
      {backend, 0, ZMQ_POLLIN, 0}, 
      {frontend, 0, ZMQ_POLLIN, 0}, 
    };
    int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1);
    if (-1 == rc)
      break;

    if (items[0].revents & ZMQ_POLLIN) {
      zmsg_t* msg;
      zframe_t* identity;
      zframe_t* frame;

      msg = zmsg_recv(backend);
      if (NULL == msg)
        break;
      identity = zmsg_unwrap(msg);
      zlist_append(workers, identity);

      frame = zmsg_first(msg);
      if (0 == memcmp(zframe_data(frame), WORKER_READY, 1))
        zmsg_destroy(&msg);
      else 
        zmsg_send(&msg, frontend);
    }
    if (items[1].revents & ZMQ_POLLIN) {
      zmsg_t* msg = zmsg_recv(frontend);
      if (NULL != msg) {
        zmsg_wrap(msg, (zframe_t*)zlist_pop(workers));
        zmsg_send(&msg, backend);
      }
    }
  }

  while (zlist_size(workers)) {
    zframe_t* frame = (zframe_t*)zlist_pop(workers);
    zframe_destroy(&frame);
  }


  zlist_destroy(&workers);
  zctx_destroy(&ctx);
  return 0;
}
示例#24
0
int main (int argc, char *argv[])
{
    //  Prepare our context and sockets
    void *context = zmq_init (1);
    void *frontend = zmq_socket (context, ZMQ_XREP);
    void *backend  = zmq_socket (context, ZMQ_XREP);
    zmq_bind (frontend, "ipc://frontend.ipc");
    zmq_bind (backend,  "ipc://backend.ipc");

    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) {
        pthread_t client;
        pthread_create (&client, NULL, client_thread, context);
    }
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) {
        pthread_t worker;
        pthread_create (&worker, NULL, worker_thread, context);
    }
    //  Logic of LRU loop
    //  - Poll backend always, frontend only if 1+ worker ready
    //  - If worker replies, queue worker as ready and forward reply
    //    to client if necessary
    //  - If client requests, pop next worker and send request to it

    //  Queue of available workers
    int available_workers = 0;
    char *worker_queue [NBR_WORKERS];

    while (1) {
        //  Initialize poll set
        zmq_pollitem_t items [] = {
            //  Always poll for worker activity on backend
            { backend,  0, ZMQ_POLLIN, 0 },
            //  Poll front-end only if we have available workers
            { frontend, 0, ZMQ_POLLIN, 0 }
        };
        if (available_workers)
            zmq_poll (items, 2, -1);
        else
            zmq_poll (items, 1, -1);

        //  Handle worker activity on backend
        if (items [0].revents & ZMQ_POLLIN) {
            zmsg_t *zmsg = zmsg_recv (backend);
            //  Use worker address for LRU routing
            assert (available_workers < NBR_WORKERS);
            worker_queue [available_workers++] = zmsg_unwrap (zmsg);

            //  Forward message to client if it's not a READY
            if (strcmp (zmsg_address (zmsg), "READY") == 0)
                zmsg_destroy (&zmsg);
            else {
                zmsg_send (&zmsg, frontend);
                if (--client_nbr == 0)
                    break;      //  Exit after N messages
            }
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Now get next client request, route to next worker
            zmsg_t *zmsg = zmsg_recv (frontend);
            zmsg_wrap (zmsg, worker_queue [0], "");
            zmsg_send (&zmsg, backend);

            //  Dequeue and drop the next worker address
            free (worker_queue [0]);
            DEQUEUE (worker_queue);
            available_workers--;
        }
    }
    sleep (1);
    zmq_term (context);
    return 0;
}
示例#25
0
zmsg_t *
mdp_worker_recv (mdp_worker_t *self, zmsg_t **reply_p)
{
    //  Format and send the reply if we were provided one
    assert (reply_p);
    zmsg_t *reply = *reply_p;
    assert (reply || !self->expect_reply);
    if (reply) {
        assert (self->reply_to);
        zmsg_wrap (reply, self->reply_to);
        s_mdp_worker_send_to_broker (self, MDPW_FINAL, NULL, reply);
        zmsg_destroy (reply_p);
    }
    self->expect_reply = 1;

    while (TRUE) {
        zmq_pollitem_t items [] = {
            { self->worker,  0, ZMQ_POLLIN, 0 } };
        int rc = zmq_poll (items, 1, self->heartbeat * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Interrupted

        if (items [0].revents & ZMQ_POLLIN) {
            zmsg_t *msg = zmsg_recv (self->worker);
            if (!msg)
                break;          //  Interrupted
            if (self->verbose) {
                zclock_log ("I: received message from broker:");
                zmsg_dump (msg);
            }
            self->liveness = HEARTBEAT_LIVENESS;

            //  Don't try to handle errors, just assert noisily
            assert (zmsg_size (msg) >= 3);

            zframe_t *empty = zmsg_pop (msg);
            assert (zframe_streq (empty, ""));
            zframe_destroy (&empty);

            zframe_t *header = zmsg_pop (msg);
            assert (zframe_streq (header, MDPW_WORKER));
            zframe_destroy (&header);

            zframe_t *command = zmsg_pop (msg);
            if (zframe_streq (command, MDPW_REQUEST)) {
                //  We should pop and save as many addresses as there are
                //  up to a null part, but for now, just save one...
                self->reply_to = zmsg_unwrap (msg);
                zframe_destroy (&command);
                //  Here is where we actually have a message to process; we
                //  return it to the caller application
                return msg;     //  We have a request to process
            }
            else
            if (zframe_streq (command, MDPW_HEARTBEAT))
                ;               //  Do nothing for heartbeats
            else
            if (zframe_streq (command, MDPW_DISCONNECT))
                s_mdp_worker_connect_to_broker (self);
            else {
                zclock_log ("E: invalid input message");
                zmsg_dump (msg);
            }
            zframe_destroy (&command);
            zmsg_destroy (&msg);
        }
        else
        if (--self->liveness == 0) {
            if (self->verbose)
                zclock_log ("W: disconnected from broker - retrying...");
            zclock_sleep (self->reconnect);
            s_mdp_worker_connect_to_broker (self);
        }
        //  Send HEARTBEAT if it's time
        if (zclock_time () > self->heartbeat_at) {
            s_mdp_worker_send_to_broker (self, MDPW_HEARTBEAT, NULL, NULL);
            self->heartbeat_at = zclock_time () + self->heartbeat;
        }
    }
    if (zctx_interrupted)
        printf ("W: interrupt received, killing worker...\n");
    return NULL;
}
示例#26
0
int main (void)
{
    zctx_t *ctx = zctx_new ();
    void *frontend = zsocket_new (ctx, ZMQ_ROUTER);
    void *backend = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (frontend, "ipc://frontend.ipc");
    zsocket_bind (backend, "ipc://backend.ipc");

    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (client_task, NULL);
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (worker_task, NULL);

    //  Queue of available workers
    zlist_t *workers = zlist_new ();

    //  Here is the main loop for the load-balancer. It works the same way
    //  as the previous example, but is a lot shorter because CZMQ gives
    //  us an API that does more with fewer calls:
    while (true) {
        zmq_pollitem_t items [] = {
            { backend,  0, ZMQ_POLLIN, 0 },
            { frontend, 0, ZMQ_POLLIN, 0 }
        };
        //  Poll frontend only if we have available workers
        int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Handle worker activity on backend
        if (items [0].revents & ZMQ_POLLIN) {
            //  Use worker identity for load-balancing
            zmsg_t *msg = zmsg_recv (backend);
            if (!msg)
                break;          //  Interrupted
            zframe_t *identity = zmsg_unwrap (msg);
            zlist_append (workers, identity);

            //  Forward message to client if it's not a READY
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0)
                zmsg_destroy (&msg);
            else
                zmsg_send (&msg, frontend);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Get client request, route to first available worker
            zmsg_t *msg = zmsg_recv (frontend);
            if (msg) {
                zmsg_wrap (msg, (zframe_t *) zlist_pop (workers));
                zmsg_send (&msg, backend);
            }
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return 0;
}
示例#27
0
int main (int argc, char *argv [])
{
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argc < 2) {
        printf ("syntax: peering2 me {you}...\n");
        return 0;
    }
    self = argv [1];
    printf ("I: preparing broker at %s...\n", self);
    srandom ((unsigned) time (NULL));

    zctx_t *ctx = zctx_new ();

    //  Bind cloud frontend to endpoint
    void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_set_identity (cloudfe, self);
    zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self);

    //  Connect cloud backend to all peers
    void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_set_identity (cloudbe, self);
    int argn;
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to cloud frontend at '%s'\n", peer);
        zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer);
    }
    //  Prepare local frontend and backend
    void *localfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localfe, "ipc://%s-localfe.ipc", self);
    void *localbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localbe, "ipc://%s-localbe.ipc", self);

    //  Get user to tell us when we can start...
    printf ("Press Enter when all brokers are started: ");
    getchar ();

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (worker_task, NULL);

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (client_task, NULL);

    //  .split request-reply handling
    //  Here, we handle the request-reply flow. We're using load-balancing
    //  to poll workers at all times, and clients only when there are one 
    //  or more workers available.

    //  Least recently used queue of available workers
    int capacity = 0;
    zlist_t *workers = zlist_new ();

    while (true) {
        //  First, route any waiting replies from workers
        zmq_pollitem_t backends [] = {
            { localbe, 0, ZMQ_POLLIN, 0 },
            { cloudbe, 0, ZMQ_POLLIN, 0 }
        };
        //  If we have no workers, wait indefinitely
        int rc = zmq_poll (backends, 2,
            capacity? 1000 * ZMQ_POLL_MSEC: -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Handle reply from local worker
        zmsg_t *msg = NULL;
        if (backends [0].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (localbe);
            if (!msg)
                break;          //  Interrupted
            zframe_t *identity = zmsg_unwrap (msg);
            zlist_append (workers, identity);
            capacity++;

            //  If it's READY, don't route the message any further
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0)
                zmsg_destroy (&msg);
        }
        //  Or handle reply from peer broker
        else
        if (backends [1].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (cloudbe);
            if (!msg)
                break;          //  Interrupted
            //  We don't use peer broker identity for anything
            zframe_t *identity = zmsg_unwrap (msg);
            zframe_destroy (&identity);
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 2; msg && argn < argc; argn++) {
            char *data = (char *) zframe_data (zmsg_first (msg));
            size_t size = zframe_size (zmsg_first (msg));
            if (size == strlen (argv [argn])
            &&  memcmp (data, argv [argn], size) == 0)
                zmsg_send (&msg, cloudfe);
        }
        //  Route reply to client if we still need to
        if (msg)
            zmsg_send (&msg, localfe);

        //  .split route client requests
        //  Now we route as many client requests as we have worker capacity
        //  for. We may reroute requests from our local frontend, but not from 
        //  the cloud frontend. We reroute randomly now, just to test things
        //  out. In the next version, we'll do this properly by calculating
        //  cloud capacity:

        while (capacity) {
            zmq_pollitem_t frontends [] = {
                { localfe, 0, ZMQ_POLLIN, 0 },
                { cloudfe, 0, ZMQ_POLLIN, 0 }
            };
            rc = zmq_poll (frontends, 2, 0);
            assert (rc >= 0);
            int reroutable = 0;
            //  We'll do peer brokers first, to prevent starvation
            if (frontends [1].revents & ZMQ_POLLIN) {
                msg = zmsg_recv (cloudfe);
                reroutable = 0;
            }
            else
            if (frontends [0].revents & ZMQ_POLLIN) {
                msg = zmsg_recv (localfe);
                reroutable = 1;
            }
            else
                break;      //  No work, go back to backends

            //  If reroutable, send to cloud 20% of the time
            //  Here we'd normally use cloud status information
            //
            if (reroutable && argc > 2 && randof (5) == 0) {
                //  Route to random broker peer
                int peer = randof (argc - 2) + 2;
                zmsg_pushmem (msg, argv [peer], strlen (argv [peer]));
                zmsg_send (&msg, cloudbe);
            }
            else {
                zframe_t *frame = (zframe_t *) zlist_pop (workers);
                zmsg_wrap (msg, frame);
                zmsg_send (&msg, localbe);
                capacity--;
            }
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return EXIT_SUCCESS;
}
示例#28
0
文件: peering.c 项目: hbfhaapy/study
int 
main(int argc, char* argv[])
{
  zctx_t* ctx;
  void* cloudfe;
  void* cloudbe;
  int i;
  void* localfe;
  void* localbe;
  int capacity = 0;
  zlist_t* workers;

  if (argc < 2) {
    fprintf(stderr, "syntax: peering me {you} ...\n");
    return 1;
  }
  self = argv[1];
  fprintf(stdout, "I: preparing broker at %s ...\n", self);
  srand((unsigned int)time(NULL));

  ctx = zctx_new();

  cloudfe = zsocket_new(ctx, ZMQ_ROUTER);
  zsockopt_set_identity(cloudfe, self);
  zsocket_bind(cloudfe, "ipc://%s-cloud.ipc", self);

  cloudbe = zsocket_new(ctx, ZMQ_ROUTER);
  zsockopt_set_identity(cloudbe, self);
  for (i = 2; i < argc; ++i) {
    char* peer = argv[i];
    fprintf(stdout, "I: connecting to cloud frontend at '%s'\n", peer);
    zsocket_connect(cloudbe, "ipc://%s-cloud.ipc", peer);
  }

  localfe = zsocket_new(ctx, ZMQ_ROUTER);
  zsocket_bind(localfe, "ipc://%s-localfe.ipc", self);
  localbe = zsocket_new(ctx, ZMQ_ROUTER);
  zsocket_bind(localbe, "ipc://%s-localbe.ipc", self);

  fprintf(stdout, "Press Enter when all brokers are started: ");
  getchar();

  for (i = 0; i < NUM_WORKERS; ++i)
    zthread_new(worker_routine, NULL);
  for (i = 0; i < NUM_CLIENTS; ++i)
    zthread_new(client_routine, NULL);

  workers = zlist_new();
  while (1) {
    zmsg_t* msg;
    zmq_pollitem_t backends[] = {
      {localbe, 0, ZMQ_POLLIN, 0}, 
      {cloudbe, 0, ZMQ_POLLIN, 0}, 
    };
    int r = zmq_poll(backends, 2, capacity ? 1000 * ZMQ_POLL_MSEC : -1);
    if (-1 == r)
      break;

    msg = NULL;
    if (backends[0].revents & ZMQ_POLLIN) {
      zframe_t* identity;
      zframe_t* frame;
      msg = zmsg_recv(localbe);
      if (!msg)
        break;

      identity = zmsg_unwrap(msg);
      zlist_append(workers, identity);
      ++capacity;

      frame = zmsg_first(msg);
      if (0 == memcmp(zframe_data(frame), WORKER_READY, 1))
        zmsg_destroy(&msg);
    }
    else if (backends[1].revents & ZMQ_POLLIN) {
      zframe_t* identity;
      msg = zmsg_recv(cloudbe);
      if (!msg)
        break;

      identity = zmsg_unwrap(msg);
      zframe_destroy(&identity);
    }
    for (i = 2; msg && i < argc; ++i) {
      char* data = (char*)zframe_data(zmsg_first(msg));
      size_t size = zframe_size(zmsg_first(msg));
      if (size == strlen(argv[i]) && 0 == memcmp(data, argv[i], size))
        zmsg_send(&msg, cloudfe);
    }
    if (msg) 
      zmsg_send(&msg, localfe);


    while (capacity) {
      int reroutable = 0;
      zmq_pollitem_t frontends[] = {
        {localfe, 0, ZMQ_POLLIN, 0}, 
        {cloudfe, 0, ZMQ_POLLIN, 0},
      };
      r = zmq_poll(frontends, 2, 0);
      assert(r >= 0);

      if (frontends[1].revents & ZMQ_POLLIN) {
        msg = zmsg_recv(cloudfe);
        reroutable = 0;
      }
      else if (frontends[0].revents & ZMQ_POLLIN) {
        msg = zmsg_recv(localfe);
        reroutable = 1;
      }
      else 
        break;


      if (0 != reroutable && argc > 2 && 0 == rand() % 5) {
        int random_peer = rand() % (argc - 2) + 2;
        zmsg_pushmem(msg, argv[random_peer], strlen(argv[random_peer]));
        zmsg_send(&msg, cloudbe);
      }
      else {
        zframe_t* frame = (zframe_t*)zlist_pop(workers);
        zmsg_wrap(msg, frame);
        zmsg_send(&msg, localbe);
        --capacity;
      }
    }
  }

  while (zlist_size(workers)) {
    zframe_t* frame = (zframe_t*)zlist_pop(workers);
    zframe_destroy(&frame);
  }
  zlist_destroy(&workers);

  zctx_destroy(&ctx);
  return 0;
}
示例#29
0
int main (int argc, char *argv [])
{
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argc < 2) {
        printf ("syntax: peering3 me {you}...\n");
        exit (EXIT_FAILURE);
    }
    self = argv [1];
    printf ("I: preparing broker at %s...\n", self);
    srandom ((unsigned) time (NULL));

    //  Prepare our context and sockets
    zctx_t *ctx = zctx_new ();
    char endpoint [256];

    //  Bind cloud frontend to endpoint
    void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudfe, self);
    zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self);

    //  Bind state backend / publisher to endpoint
    void *statebe = zsocket_new (ctx, ZMQ_PUB);
    zsocket_bind (statebe, "ipc://%s-state.ipc", self);

    //  Connect cloud backend to all peers
    void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudbe, self);
    int argn;
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to cloud frontend at '%s'\n", peer);
        zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer);
    }

    //  Connect statefe to all peers
    void *statefe = zsocket_new (ctx, ZMQ_SUB);
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to state backend at '%s'\n", peer);
        zsocket_connect (statefe, "ipc://%s-state.ipc", peer);
    }
    //  Prepare local frontend and backend
    void *localfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localfe, "ipc://%s-localfe.ipc", self);

    void *localbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localbe, "ipc://%s-localbe.ipc", self);

    //  Prepare monitor socket
    void *monitor = zsocket_new (ctx, ZMQ_PULL);
    zsocket_bind (monitor, "ipc://%s-monitor.ipc", self);

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (ctx, worker_task, NULL);

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (ctx, client_task, NULL);

    //  Interesting part
    //  -------------------------------------------------------------
    //  Publish-subscribe flow
    //  - Poll statefe and process capacity updates
    //  - Each time capacity changes, broadcast new value
    //  Request-reply flow
    //  - Poll primary and process local/cloud replies
    //  - While worker available, route localfe to local or cloud

    //  Queue of available workers
    int local_capacity = 0;
    int cloud_capacity = 0;
    zlist_t *workers = zlist_new ();

    while (1) {
        zmq_pollitem_t primary [] = {
            { localbe, 0, ZMQ_POLLIN, 0 },
            { cloudbe, 0, ZMQ_POLLIN, 0 },
            { statefe, 0, ZMQ_POLLIN, 0 },
            { monitor, 0, ZMQ_POLLIN, 0 }
        };
        //  If we have no workers anyhow, wait indefinitely
        int rc = zmq_poll (primary, 4,
                           local_capacity? 1000 * ZMQ_POLL_MSEC: -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Track if capacity changes during this iteration
        int previous = local_capacity;

        //  Handle reply from local worker
        zmsg_t *msg = NULL;

        if (primary [0].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (localbe);
            if (!msg)
                break;          //  Interrupted
            zframe_t *address = zmsg_unwrap (msg);
            zlist_append (workers, address);
            local_capacity++;

            //  If it's READY, don't route the message any further
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), LRU_READY, 1) == 0)
                zmsg_destroy (&msg);
        }
        //  Or handle reply from peer broker
        else if (primary [1].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (cloudbe);
            if (!msg)
                break;          //  Interrupted
            //  We don't use peer broker address for anything
            zframe_t *address = zmsg_unwrap (msg);
            zframe_destroy (&address);
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 2; msg && argn < argc; argn++) {
            char *data = (char *) zframe_data (zmsg_first (msg));
            size_t size = zframe_size (zmsg_first (msg));
            if (size == strlen (argv [argn])
                    &&  memcmp (data, argv [argn], size) == 0)
                zmsg_send (&msg, cloudfe);
        }
        //  Route reply to client if we still need to
        if (msg)
            zmsg_send (&msg, localfe);

        //  Handle capacity updates
        if (primary [2].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (statefe);
            cloud_capacity = atoi (status);
            free (status);
        }
        //  Handle monitor message
        if (primary [3].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (monitor);
            printf ("%s\n", status);
            free (status);
        }

        //  Now route as many clients requests as we can handle
        //  - If we have local capacity we poll both localfe and cloudfe
        //  - If we have cloud capacity only, we poll just localfe
        //  - Route any request locally if we can, else to cloud
        //
        while (local_capacity + cloud_capacity) {
            zmq_pollitem_t secondary [] = {
                { localfe, 0, ZMQ_POLLIN, 0 },
                { cloudfe, 0, ZMQ_POLLIN, 0 }
            };
            if (local_capacity)
                rc = zmq_poll (secondary, 2, 0);
            else
                rc = zmq_poll (secondary, 1, 0);
            assert (rc >= 0);

            if (secondary [0].revents & ZMQ_POLLIN)
                msg = zmsg_recv (localfe);
            else if (secondary [1].revents & ZMQ_POLLIN)
                msg = zmsg_recv (cloudfe);
            else
                break;      //  No work, go back to primary

            if (local_capacity) {
                zframe_t *frame = (zframe_t *) zlist_pop (workers);
                zmsg_wrap (msg, frame);
                zmsg_send (&msg, localbe);
                local_capacity--;
            }
            else {
                //  Route to random broker peer
                int random_peer = randof (argc - 2) + 2;
                zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer]));
                zmsg_send (&msg, cloudbe);
            }
        }
        if (local_capacity != previous) {
            //  We stick our own address onto the envelope
            zstr_sendm (statebe, self);
            //  Broadcast new capacity
            zstr_sendf (statebe, "%d", local_capacity);
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return EXIT_SUCCESS;
}
示例#30
0
void graph_response_retrieveResponse(req_t * req, json_t * response,
				     int32_t requestId, void *sweb,
				     req_store_t * req_store)
{

	json_t *nodeArray = json_object_get(req->response, "nodeArray");
	json_t *nodeDataArray = json_object_get(response, "nodeArray");

//TODO handle this case
/*	assert(json_array_size(nodeArray) == json_array_size(nodeDataArray));  */
	//add the content data to the location data
	int i;
	for (i = 0; i < json_array_size(nodeArray); i++) {

		json_t *node = json_array_get(nodeArray, i);

		int64_t id = json_integer_value(json_object_get(node, "id"));
		int j;
		for (j = 0; j < json_array_size(nodeDataArray); j++) {

			json_t *dataNode = json_array_get(nodeDataArray, j);
			if (id ==
			    json_integer_value(json_object_get
					       (dataNode, "id"))) {

				json_object_set(node, "node", dataNode);
				break;
			}

		}

		//TODO Put an assert here so that we are sure that all nodes have content

	}

	json_t *web_resp = json_object();

	json_object_set(web_resp, "sessionId",
			json_object_get(req->request, "sessionId"));
	json_object_set_new(web_resp, "type", json_string("response"));
	json_t *clientResponse = json_object();
	json_object_set(clientResponse, "clientRequestId",
			json_object_get(json_object_get
					(req->request,
					 "clientRequest"), "clientRequestId"));
	json_t *cl_response = json_object();
	json_object_set_new(cl_response, "type", json_string("searchResponse"));
	json_object_set(cl_response, "nodeArray", nodeArray);
	json_object_set_new(clientResponse, "response", cl_response);
	json_object_set_new(web_resp, "clientResponse", clientResponse);
	zmsg_t *res = zmsg_new();
	char *web_res_str = json_dumps(web_resp,
				       JSON_COMPACT);
	printf("\nbroker:sweb sent: %s\n", web_res_str);
	zmsg_addstr(res, web_res_str);
	free(web_res_str);
	zmsg_wrap(res, req->address);
	zmsg_send(&res, sweb);
	request_store_delete(req_store, requestId);
	json_decref(web_resp);

}