Пример #1
0
static void
test_tcp_req (void *args, zctx_t *ctx, void *pipe)
{
    vtx_t *vtx = vtx_new (ctx);
    int rc = vtx_tcp_load (vtx, FALSE);
    assert (rc == 0);
    char *port = zstr_recv (pipe);

    void *client = vtx_socket (vtx, ZMQ_REQ);
    assert (client);
    rc = vtx_connect (vtx, client, "tcp://localhost:%s", port);
    assert (rc == 0);
    int sent = 0;
    int recd = 0;

    while (!zctx_interrupted) {
        zstr_send (client, "ICANHAZ?");
        sent++;
        zmq_pollitem_t items [] = {
            { pipe, 0, ZMQ_POLLIN, 0 },
            { client, 0, ZMQ_POLLIN, 0 }
        };
        int rc = zmq_poll (items, 2, 500 * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Context has been shut down
        if (items [0].revents & ZMQ_POLLIN) {
            free (zstr_recv (pipe));
            zstr_send (pipe, "OK");
            break;
        }
        if (items [1].revents & ZMQ_POLLIN) {
            free (zstr_recv (client));
            recd++;
        }
        else {
            //  No response, close socket and start a new one
            vtx_close (vtx, client);
            client = vtx_socket (vtx, ZMQ_REQ);
            rc = vtx_connect (vtx, client, "tcp://localhost:%s", port);
        }
    }
    zclock_log ("I: REQ: sent=%d recd=%d", sent, recd);
    free (port);
    vtx_destroy (&vtx);
}
Пример #2
0
static void
_zmq2agent_worker (struct _zmq2agent_ctx_s *ctx)
{
	/* XXX(jfs): a dedicated PRNG avoids locking the glib's PRNG for each call
	   (such global locks are present in the GLib) and opening it with a seed
	   from the glib's PRNG avoids syscalls to the special file /dev/urandom */
	GRand *r = g_rand_new_with_seed (g_random_int ());

	gint64 last_debug = oio_ext_monotonic_time ();

	zmq_pollitem_t pi[2] = {
		{ctx->zpull, -1, ZMQ_POLLIN, 0},
		{ctx->zagent, -1, ZMQ_POLLIN, 0},
	};

	for (gboolean run = TRUE; run ;) {
		int rc = zmq_poll (pi, 2, 1000);
		if (rc < 0) {
			int err = zmq_errno();
			if (err != ETERM && err != EINTR)
				GRID_WARN("ZMQ poll error : (%d) %s", err, zmq_strerror(err));
			if (err != EINTR)
				break;
		}
		if (pi[1].revents)
			_zmq2agent_receive_acks (ctx);
		_retry_events (ctx);
		if (pi[0].revents)
			run = _zmq2agent_receive_events (r, ctx);

		/* Periodically write stats in the log */
		gint64 now = oio_ext_monotonic_time ();
		if ((now - last_debug) > 2 * G_TIME_SPAN_MINUTE) {
			GRID_INFO("ZMQ2AGENT recv=%"G_GINT64_FORMAT" sent=%"G_GINT64_FORMAT
					" ack=%"G_GINT64_FORMAT"+%"G_GINT64_FORMAT" queue=%u",
					ctx->q->counter_received, ctx->q->counter_sent,
					ctx->q->counter_ack, ctx->q->counter_ack_notfound,
					ctx->q->gauge_pending);
			last_debug = now;
		}
	}

	g_rand_free (r);
	GRID_INFO ("Thread stopping [NOTIFY-ZMQ2AGENT]");
}
Пример #3
0
void *
zpoller_wait (zpoller_t *self, int timeout)
{
    assert (self);
    self->expired = false;
    if (zsys_interrupted && !self->nonstop) {
        self->terminated = true;
        return NULL;
    }
    else
        self->terminated = false;

#ifdef ZMQ_HAVE_POLLER
    zmq_poller_event_t event;
    if (!zmq_poller_wait (self->zmq_poller, &event, timeout * ZMQ_POLL_MSEC))
        return event.user_data;
    else
    if (errno == ETIMEDOUT || errno == EAGAIN)
        self->expired = true;
    else
    if (zsys_interrupted && !self->nonstop)
        self->terminated = true;

    return NULL;
#else
    if (self->need_rebuild)
        s_rebuild_poll_set (self);
    int rc = zmq_poll (self->poll_set, (int) self->poll_size, timeout * ZMQ_POLL_MSEC);
    if (rc > 0) {
        uint reader = 0;
        for (reader = 0; reader < self->poll_size; reader++)
            if (self->poll_set [reader].revents & ZMQ_POLLIN)
                return self->poll_readers [reader];
    }
    else
    if (rc == -1 || (zsys_interrupted && !self->nonstop))
        self->terminated = true;
    else
    if (rc == 0)
        self->expired = true;

    return NULL;
#endif
}
Пример #4
0
//  Request-reply client using REQ socket
//  To simulate load, clients issue a burst of requests and then
//  sleep for a random period.
//
static void *
client_task (void *args)
{
    zctx_t *ctx = zctx_new ();
    void *client = zsocket_new (ctx, ZMQ_REQ);
    zsocket_connect (client, "ipc://%s-localfe.ipc", self);
    void *monitor = zsocket_new (ctx, ZMQ_PUSH);
    zsocket_connect (monitor, "ipc://%s-monitor.ipc", self);

    while (1) {
        sleep (randof (5));
        int burst = randof (15);
        while (burst--) {
            char task_id [5];
            sprintf (task_id, "%04X", randof (0x10000));

            //  Send request with random hex ID
            zstr_send (client, task_id);

            //  Wait max ten seconds for a reply, then complain
            zmq_pollitem_t pollset [1] = { { client, 0, ZMQ_POLLIN, 0 } };
            int rc = zmq_poll (pollset, 1, 10 * 1000 * ZMQ_POLL_MSEC);
            if (rc == -1)
                break;          //  Interrupted

            if (pollset [0].revents & ZMQ_POLLIN) {
                char *reply = zstr_recv (client);
                if (!reply)
                    break;              //  Interrupted
                //  Worker is supposed to answer us with our task id
                puts (reply);
                assert (streq (reply, task_id));
                free (reply);
            }
            else {
                zstr_sendf (monitor,
                            "E: CLIENT EXIT - lost task %s", task_id);
                return NULL;
            }
        }
    }
    zctx_destroy (&ctx);
    return NULL;
}
Пример #5
0
	UInt32 CZmq::PollEvent(UInt32 iEvents, Int32 iTimeout)
	{
		if (m_pHandle)
		{
			zmq_pollitem_t items[] = 
			{
				{ m_pHandle, 0, iEvents, 0 },
			};

			Int32 iRet = zmq_poll(items, 1, iTimeout);
			if(iRet == UTIL_ERROR)
			{
				FillErr();
				return 0;
			}
			return items[0].revents;
		}
		return 0;
	}
Пример #6
0
int ZMQPollData::poll(int64_t timeout, VRefParam readable, VRefParam writable) {
  errors.clear();

  auto rVar = readable.getVariantOrNull();
  Array rArr;
  if (rVar && rVar->isArray()) {
    rArr = rVar->asArrRef();
    rArr.clear();
  }

  auto wVar = writable.getVariantOrNull();
  Array wArr;
  if (wVar && wVar->isArray()) {
    wArr = wVar->asArrRef();
    wArr.clear();
  }

  assert(items.size() == php_items.size());

  int rc = zmq_poll(items.data(), items.size(), timeout);
  if (rc == -1) {
    return -1;
  }

  if (rc > 0) {
    for (size_t i = 0; i < items.size(); i++) {
      if (rVar && (items[i].revents & ZMQ_POLLIN)) {
        rArr.append(php_items[i].entry);
      }
      if (wVar && (items[i].revents & ZMQ_POLLOUT)) {
        wArr.append(php_items[i].entry);
      }

      if (items[i].revents & ZMQ_POLLERR) {
        errors.append(php_items[i].key);
      }
    }
  }

  readable.assignIfRef(rArr);
  writable.assignIfRef(wArr);
  return rc;
}
Пример #7
0
void test_round_robin_out (void *ctx)
{
    void *req = zmq_socket (ctx, ZMQ_REQ);
    assert (req);

    int rc = zmq_bind (req, bind_address);
    assert (rc == 0);

    const size_t services = 5;
    void *rep [services];
    for (size_t peer = 0; peer < services; peer++) {
        rep [peer] = zmq_socket (ctx, ZMQ_REP);
        assert (rep [peer]);

        int timeout = 100;
        rc = zmq_setsockopt (rep [peer], ZMQ_RCVTIMEO, &timeout, sizeof (int));
        assert (rc == 0);

        rc = zmq_connect (rep [peer], connect_address);
        assert (rc == 0);
    }
    //  We have to give the connects time to finish otherwise the requests 
    //  will not properly round-robin. We could alternatively connect the
    //  REQ sockets to the REP sockets.
    struct timespec t = { 0, 250 * 1000000 };
    nanosleep (&t, NULL);
    
    // Send our peer-replies, and expect every REP it used once in order
    for (size_t peer = 0; peer < services; peer++) {
        s_send_seq (req, "ABC", SEQ_END);
        s_recv_seq (rep [peer], "ABC", SEQ_END);
        s_send_seq (rep [peer], "DEF", SEQ_END);
        s_recv_seq (req, "DEF", SEQ_END);
    }

    close_zero_linger (req);
    for (size_t peer = 0; peer < services; peer++)
        close_zero_linger (rep [peer]);

    // Wait for disconnects.
    rc = zmq_poll (0, 0, 100);
    assert (rc == 0);
}
Пример #8
0
zmsg_t *
mdcli_recv (mdcli_t *self)
{
    assert (self);

    //  Poll socket for a reply, with timeout
    zmq_pollitem_t items [] = { { self->client, 0, ZMQ_POLLIN, 0 } };
    int rc = zmq_poll (items, 1, self->timeout * ZMQ_POLL_MSEC);
    if (rc == -1)
        return NULL;            //  Interrupted

    //  If we got a reply, process it
    if (items [0].revents & ZMQ_POLLIN) {
        zmsg_t *msg = zmsg_recv (self->client);
        if (self->verbose) {
            zclock_log ("I: received reply:");
            zmsg_dump (msg);
        }
        //  Don't try to handle errors, just assert noisily
        assert (zmsg_size (msg) >= 4);

        zframe_t *empty = zmsg_pop (msg);
        assert (zframe_streq (empty, ""));
        zframe_destroy (&empty);

        zframe_t *header = zmsg_pop (msg);
        assert (zframe_streq (header, MDPC_CLIENT));
        zframe_destroy (&header);

        zframe_t *service = zmsg_pop (msg);
        zframe_destroy (&service);

        return msg;     //  Success
    }
    if (zctx_interrupted)
        printf ("W: interrupt received, killing client...\n");
    else
    if (self->verbose)
        zclock_log ("W: permanent error, abandoning request");

    return NULL;
}
Пример #9
0
static zmsg_t *
s_try_request (zctx_t *ctx, char *endpoint, zmsg_t *request)
{
    printf ("I: trying echo service at %s...\n", endpoint);
    void *client = zsocket_new (ctx, ZMQ_REQ);
    zsocket_connect (client, endpoint);

    //  Send request, wait safely for reply
    zmsg_t *msg = zmsg_dup (request);
    zmsg_send (&msg, client);
    zmq_pollitem_t items [] = { { client, 0, ZMQ_POLLIN, 0 } };
    zmq_poll (items, 1, REQUEST_TIMEOUT * ZMQ_POLL_MSEC);
    zmsg_t *reply = NULL;
    if (items [0].revents & ZMQ_POLLIN)
        reply = zmsg_recv (client);

    //  Close socket in any case, we're done with it now
    zsocket_destroy (ctx, client);
    return reply;
}
Пример #10
0
zmsg_t *
mdcli_recv (mdcli_t *self)
{
    assert (self);

    //  Poll socket for a reply, with timeout
    zmq_pollitem_t items [] = { { self->client, 0, ZMQ_POLLIN, 0 } };
    zmq_poll (items, 1, self->timeout * 1000);

    //  If we got a reply, process it
    if (items [0].revents & ZMQ_POLLIN) {
        zmsg_t *msg = zmsg_recv (self->client);
        if (self->verbose) {
            s_console ("I: received reply:");
            zmsg_dump (msg);
        }
        //  Don't try to handle errors, just assert noisily
        assert (zmsg_parts (msg) >= 4);

        char *empty = zmsg_pop (msg);
        assert (streq (empty, ""));
        free (empty);

        char *header = zmsg_pop (msg);
        assert (streq (header, MDPC_CLIENT));
        free (header);

        char *service = zmsg_pop (msg);
        assert (streq (service, service));
        free (service);

        return msg;     //  Success
    }
    if (s_interrupted)
        printf ("W: interrupt received, killing client...\n");
    else
    if (self->verbose)
        s_console ("W: permanent error, abandoning request");

    return NULL;
}
Пример #11
0
int main (void) 
{
    void *context = zmq_init (1);

    //  Connect to task ventilator
    void *receiver = zmq_socket (context, ZMQ_PULL);
    zmq_connect (receiver, "tcp://localhost:5557");

    //  Connect to weather server
    void *subscriber = zmq_socket (context, ZMQ_SUB);
    zmq_connect (subscriber, "tcp://localhost:5556");
    zmq_setsockopt (subscriber, ZMQ_SUBSCRIBE, "10001 ", 6);

    //  Initialize poll set
    zmq_pollitem_t items [] = {
        { receiver, 0, ZMQ_POLLIN, 0 },
        { subscriber, 0, ZMQ_POLLIN, 0 }
    };
    //  Process messages from both sockets
    while (1) {
        zmq_msg_t message;
        zmq_poll (items, 2, -1);
        if (items [0].revents & ZMQ_POLLIN) {
            zmq_msg_init (&message);
            zmq_recv (receiver, &message, 0);
            //  Process task
            zmq_msg_close (&message);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            zmq_msg_init (&message);
            zmq_recv (subscriber, &message, 0);
            //  Process weather update
            zmq_msg_close (&message);
        }
    }
    //  We never get here
    zmq_close (receiver);
    zmq_close (subscriber);
    zmq_term (context);
    return 0;
}
Пример #12
0
int main(void)
{
    // connect to task ventilator
    void *context = zmq_ctx_new();
    void *receiver = zmq_socket(context, ZMQ_PULL);
    zmq_connect(receiver, "tcp://localhost:5557");

    // connect to weather server
    void *subscriber = zmq_socket(context, ZMQ_SUB);
    zmq_connect(subscriber, "tcp://localhost:5556");
    zmq_setsockopt(subscriber, ZMQ_SUBSCRIBE, "", 0);

    // Process messages from both sockets
    while (1) {
        char msg[256];
        zmq_pollitem_t items[] = {
                { receiver, 0, ZMQ_POLLIN, 0 },
                { subscriber, 0, ZMQ_POLLIN, 0 },
        };
        zmq_poll(items, 2, -1);
        if (items[0].revents & ZMQ_POLLIN) {
            int size = zmq_recv(receiver, msg, 255, 0);
            if (size != -1) {
                break;
            }
        }
        if (items[1].revents & ZMQ_POLLIN) {
            //int size = zmq_recv(subscriber, msg, 255, 0);
            char *msg = s_recv(subscriber);
            if (msg != NULL) {
                // process weather update
                puts(msg);
                free(msg);
            }
        }
    }
    zmq_close(subscriber);
    zmq_ctx_destroy(context);
    return 0;
}
Пример #13
0
int main(void)
{
        void *context = zmq_ctx_new();

        void *receiver = zmq_socket(context, ZMQ_PULL);
        zmq_connect(receiver, "tcp://localhost:5557");

        void *subscriber = zmq_socket(context, ZMQ_SUB);
        zmq_connect(subscriber, "tcp://localhost:5556");
        zmq_setsockopt(subscriber, ZMQ_SUBSCRIBE, "10001 ", 6);

        zmq_pollitem_t items[] = {
                {receiver, 0, ZMQ_POLLIN, 0},
                {subscriber, 0, ZMQ_POLLIN, 0},
        };

        while(1) {
                zmq_msg_t message;
                zmq_poll(items, 2, -1);
                if (items[0].revents & ZMQ_POLLIN) {
                        zmq_msg_init(&message);
                        zmq_msg_recv(&message, receiver, 0);

                        printf("receiver: %s\n", (char *)zmq_msg_data(&message));
                        zmq_msg_close(&message);
                }
                if (items[1].revents & ZMQ_POLLIN) {
                        zmq_msg_init(&message);
                        zmq_msg_recv(&message, subscriber, 0);

                        printf("subscribe: %s\n", (char *)zmq_msg_data(&message));
                        zmq_msg_close(&message);
                }
        }

        zmq_close(receiver);
        zmq_close(subscriber);
        zmq_ctx_destroy(context);
        return 0;
}
Пример #14
0
/* wrapper around zmq_poll which may return zero without reaching the
 * specified timeout */
int
my_zmqpoll(zmq_pollitem_t *items, const int nitems, const long timeout)
{
	struct timeval tv, te;
	int rc, ret;
	long tmleft;

	/* Populate te with timeout value */
	te.tv_sec = timeout / 1000000;
	te.tv_usec = timeout - (te.tv_sec * 1000000);

	rc = gettimeofday(&tv, NULL);
	assert(rc == 0);

	/* Add current time to the timeout (end time) */
	te.tv_sec += tv.tv_sec;
	te.tv_usec += tv.tv_usec;
	te.tv_sec += te.tv_usec / 1000000;
	te.tv_usec %= 1000000;

	/* Loop over, return either >0, or 0 after a timeout */
	tmleft = timeout;
	while (1) {
		ret = zmq_poll(items, nitems, tmleft);
		assert(ret >= 0);

		rc = gettimeofday(&tv, NULL);
		assert(rc == 0);

		if (ret == 0) {
			/* Keep on looping unless time's up */
			if (te.tv_sec < tv.tv_sec ||
					(te.tv_sec == tv.tv_sec && te.tv_usec <= tv.tv_usec))
				return ret;
			tmleft = ((te.tv_sec - tv.tv_sec) * 1000000) + (te.tv_usec - tv.tv_usec);
		} else {
			return ret;
		}
	}
}
Пример #15
0
void
zbeacon (zsock_t *pipe, void *args)
{
    self_t *self = s_self_new (pipe);
    assert (self);
    //  Signal successful initialization
    zsock_signal (pipe, 0);

    while (!self->terminated) {
        //  Poll on API pipe and on UDP socket
        zmq_pollitem_t pollitems [] = {
            { zsock_resolve (self->pipe), 0, ZMQ_POLLIN, 0 },
            { NULL, self->udpsock, ZMQ_POLLIN, 0 }
        };
        long timeout = -1;
        if (self->transmit) {
            timeout = (long) (self->ping_at - zclock_mono ());
            if (timeout < 0)
                timeout = 0;
        }
        int pollset_size = self->udpsock? 2: 1;
        if (zmq_poll (pollitems, pollset_size, timeout * ZMQ_POLL_MSEC) == -1)
            break;              //  Interrupted

        if (pollitems [0].revents & ZMQ_POLLIN)
            s_self_handle_pipe (self);
        if (pollitems [1].revents & ZMQ_POLLIN)
            s_self_handle_udp (self);

        if (self->transmit
        &&  zclock_mono () >= self->ping_at) {
            //  Send beacon to any listening peers
            if (zsys_udp_send (self->udpsock, self->transmit, &self->broadcast, sizeof (inaddr_t)))
                //  Try to recreate UDP socket on interface
                s_self_prepare_udp (self);
            self->ping_at = zclock_mono () + self->interval;
        }
    }
    s_self_destroy (&self);
}
Пример #16
0
static flux_msg_t *op_recv (void *impl, int flags)
{
    ctx_t *ctx = impl;
    assert (ctx->magic == MODHANDLE_MAGIC);
    zmq_pollitem_t zp = {
        .events = ZMQ_POLLIN, .socket = ctx->sock, .revents = 0, .fd = -1,
    };
    flux_msg_t *msg = NULL;

    if (connect_socket (ctx) < 0)
        goto done;
    if ((flags & FLUX_O_NONBLOCK)) {
        int n;
        if ((n = zmq_poll (&zp, 1, 0L)) < 0)
            goto done; /* likely: EWOULDBLOCK | EAGAIN */
        assert (n == 1);
        assert (zp.revents == ZMQ_POLLIN);
    }
    msg = zmsg_recv (ctx->sock);
done:
    return msg;
}

static int op_event_subscribe (void *impl, const char *topic)
{
    ctx_t *ctx = impl;
    assert (ctx->magic == MODHANDLE_MAGIC);
    JSON in = Jnew ();
    int rc = -1;

    if (connect_socket (ctx) < 0)
        goto done;
    Jadd_str (in, "topic", topic);
    if (flux_json_rpc (ctx->h, FLUX_NODEID_ANY, "cmb.sub", in, NULL) < 0)
        goto done;
    rc = 0;
done:
    Jput (in);
    return rc;
}
Пример #17
0
static void
test_tcp_router (void *args, zctx_t *ctx, void *pipe)
{
    vtx_t *vtx = vtx_new (ctx);
    int rc = vtx_tcp_load (vtx, FALSE);
    assert (rc == 0);
    char *port = zstr_recv (pipe);

    void *router = vtx_socket (vtx, ZMQ_ROUTER);
    assert (router);
    rc = vtx_bind (vtx, router, "tcp://*:%s", port);
    assert (rc == 0);
    int sent = 0;

    while (!zctx_interrupted) {
        zmq_pollitem_t items [] = {
            { pipe, 0, ZMQ_POLLIN, 0 },
            { router, 0, ZMQ_POLLIN, 0 }
        };
        int rc = zmq_poll (items, 2, 500 * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Context has been shut down
        if (items [1].revents & ZMQ_POLLIN) {
            char *address = zstr_recv (router);
            free (zstr_recv (router));
            zstr_sendm (router, address);
            zstr_send (router, "CHEEZBURGER");
            free (address);
            sent++;
        }
        if (items [0].revents & ZMQ_POLLIN) {
            free (zstr_recv (pipe));
            zstr_send (pipe, "OK");
            break;
        }
    }
    zclock_log ("I: ROUTER: sent=%d", sent);
    free (port);
    vtx_destroy (&vtx);
}
Пример #18
0
int main(void)
{
    struct heartbeat_socket_info * receiver;
    struct heartbeat_socket_info * subscriber;

    receiver = heartbeat_connect_socket(ZMQ_PULL, "tcp://localhost:5557");
    subscriber = heartbeat_connect_socket(ZMQ_SUB, "tcp://localhost:5556");
    zmq_setsockopt(subscriber, ZMQ_SUBSCRIBE, "10001", 6);
    char *string1;
    char *string2;

    while (1) {
        char msg[256];
        zmq_pollitem_t items[] = {
            { receiver, 0, ZMQ_POLLIN, 0},
            { subscriber, 0, ZMQ_POLLIN, 0}
        };
        zmq_poll(items, 2, -1);
        if (items[0].revents & ZMQ_POLLIN) {
            string1 = heartbeat_recv_msg(receiver, 0);
            if (strlen(string1) != -1)
            {
            }
            free(string1);
        }

        if (items[1].revents & ZMQ_POLLIN) {
            string2 = heartbeat_recv_msg(subscriber, 0);
            if (strlen(string2) != -1)
            {
            }
            free(string2);
        }
    }
    heartbeat_disconnect_socket(subscriber);
    heartbeat_disconnect_socket(receiver);

    return 0;
}
Пример #19
0
static void
chat_task (void *args, zctx_t *ctx, void *pipe) 
{
    zyre_t *node = zyre_new (ctx);
    zyre_start (node);
    zyre_join (node, "CHAT");
    
    zmq_pollitem_t items [] = {
        { pipe, 0, ZMQ_POLLIN, 0 },
        { zyre_socket (node), 0, ZMQ_POLLIN, 0 }
    };
    while (true) {
        if (zmq_poll (items, 2, -1) == -1)
            break;              //  Interrupted
            
        //  Activity on my pipe
        if (items [0].revents & ZMQ_POLLIN) {
            zmsg_t *msg = zmsg_recv (pipe);
            zyre_shout (node, "CHAT", &msg);
        }
        //  Activity on my node handle
        if (items [1].revents & ZMQ_POLLIN) {
            zmsg_t *msg = zyre_recv (node);
            zmsg_dump (msg);
            char *command = zmsg_popstr (msg);
            if (streq (command, "SHOUT")) {
                //  Discard sender and group name
                free (zmsg_popstr (msg));
                free (zmsg_popstr (msg));
                char *message = zmsg_popstr (msg);
                printf ("%s", message);
                free (message);
            }
            free (command);
            zmsg_destroy (&msg);
        }
    }
    zyre_destroy (&node);
}
Пример #20
0
/* Poll related. */
SEXP R_zmq_poll(SEXP R_socket, SEXP R_type, SEXP R_timeout){
	int C_ret = -1, C_errno, i;

	PBD_POLLITEM_LENGTH = LENGTH(R_socket);
	if(PBD_POLLITEM_LENGTH > PBD_POLLITEM_MAXSIZE){
		REprintf("Too many sockets (%d) are asked.\n", PBD_POLLITEM_LENGTH);
	}

	PBD_POLLITEM = (zmq_pollitem_t *) malloc(PBD_POLLITEM_LENGTH * sizeof(zmq_pollitem_t));
	for(i = 0; i < PBD_POLLITEM_LENGTH; i++){
		PBD_POLLITEM[i].socket = R_ExternalPtrAddr(VECTOR_ELT(R_socket, i));
		PBD_POLLITEM[i].events = (short) INTEGER(R_type)[i];
	}

	C_ret = zmq_poll(PBD_POLLITEM, PBD_POLLITEM_LENGTH, (long) INTEGER(R_timeout)[0]);
	if(C_ret == -1){
		C_errno = zmq_errno();
		warning("R_zmq_poll: %d strerror: %s\n",
			C_errno, zmq_strerror(C_errno));
	}
	return(AsInt(C_ret));
} /* End of R_zmq_poll(). */
Пример #21
0
//  Finally here's the server thread itself, which polls its two
//  sockets and processes incoming messages
static void
server_thread (void *args, zctx_t *ctx, void *pipe)
{
    server_t *self = server_new (ctx, pipe);
    zmq_pollitem_t items [] = {
        { self->pipe, 0, ZMQ_POLLIN, 0 },
        { self->router, 0, ZMQ_POLLIN, 0 }
    };
    self->monitor_at = zclock_time () + self->monitor;
    while (!self->stopped && !zctx_interrupted) {
        //  Calculate tickless timer, up to interval seconds
        uint64_t tickless = zclock_time () + self->monitor;
        zhash_foreach (self->clients, client_tickless, &tickless);

        //  Poll until at most next timer event
        int rc = zmq_poll (items, 2,
            (tickless - zclock_time ()) * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Context has been shut down

        //  Process incoming message from either socket
        if (items [0].revents & ZMQ_POLLIN)
            server_control_message (self);

        if (items [1].revents & ZMQ_POLLIN)
            server_client_message (self);

        //  Send heartbeats to idle clients as needed
        zhash_foreach (self->clients, client_ping, self);

        //  If clock went past timeout, then monitor server
        if (zclock_time () >= self->monitor_at) {
            monitor_the_server (self, NULL);
            self->monitor_at = zclock_time () + self->monitor;
        }
    }
    server_destroy (&self);
}
Пример #22
0
zmsg_t *
flclient_request (flclient_t *self, zmsg_t **request_p)
{
    assert (self);
    assert (*request_p);
    zmsg_t *request = *request_p;
    
    //  Prefix request with sequence number and empty envelope
    char sequence_text [10];
    sprintf (sequence_text, "%u", ++self->sequence);
    zmsg_push (request, sequence_text);
    zmsg_push (request, "");
    
    //  Blast the request to all connected servers
    int server;
    for (server = 0; server < self->servers; server++) {
        zmsg_t *msg = zmsg_dup (request);
        zmsg_send (&msg, self->socket);
    }
    //  Wait for a matching reply to arrive from anywhere
    //  Since we can poll several times, calculate each one
    zmsg_t *reply = NULL;
    uint64_t endtime = s_clock () + GLOBAL_TIMEOUT;
    while (s_clock () < endtime) {
        zmq_pollitem_t items [] = { { self->socket, 0, ZMQ_POLLIN, 0 } };
        zmq_poll (items, 1, (endtime - s_clock ()) * 1000);
        if (items [0].revents & ZMQ_POLLIN) {
            reply = zmsg_recv (self->socket);
            assert (zmsg_parts (reply) == 3);
            free (zmsg_pop (reply));
            if (atoi (zmsg_address (reply)) == self->sequence)
                break;
            zmsg_destroy (&reply);
        }
    }
    zmsg_destroy (request_p);
    return reply;
}
Пример #23
0
int
oor_api_recv(oor_api_connection_t *conn, void *buffer, int flags)
{
    int nbytes;
    int zmq_flags = 0;
    zmq_pollitem_t items [1];
    int poll_timeout;
    int poll_rc;

    if (flags == OOR_API_DONTWAIT){
        zmq_flags = ZMQ_DONTWAIT;
        poll_timeout = 0; //Return immediately
    }else{
    	poll_timeout = -1; //Wait indefinitely
    }

    items[0].socket = conn->socket;
    items[0].events = ZMQ_POLLIN; //Check for incoming packets on socket

    // Poll for packets on socket for poll_timeout time
    poll_rc = zmq_poll (items, 1, poll_timeout);

    if (poll_rc == 0) { //There is nothing to read on the socket
    	return (OOR_API_NOTHINGTOREAD);
    }

    OOR_LOG(LDBG_3,"LMAPI: Data available in API socket\n");

    nbytes = zmq_recv(conn->socket, buffer, MAX_API_PKT_LEN, zmq_flags);
    OOR_LOG(LDBG_3,"LMAPI: Bytes read from API socket: %d. ",nbytes);

    if (nbytes == -1){
    	OOR_LOG(LERR,"LMAPI: Error while ZMQ receiving: %s\n",zmq_strerror (errno));
    	return (OOR_API_ERROR);
    }

    return (nbytes);
}
Пример #24
0
//  Finally here's the client thread itself, which polls its two
//  sockets and processes incoming messages
static void
client_thread (void *args, zctx_t *ctx, void *pipe)
{
    client_t *self = client_new (ctx, pipe);
    int pollset_size = 1;
    zmq_pollitem_t pollset [MAX_SERVERS] = {
        { self->pipe, 0, ZMQ_POLLIN, 0 }
    };
    while (!self->stopped && !zctx_interrupted) {
        //  Rebuild pollset if we need to
        int server_nbr;
        if (self->dirty) {
            for (server_nbr = 0; server_nbr < self->nbr_servers; server_nbr++) {
                pollset [1 + server_nbr].socket = self->servers [server_nbr]->dealer;
                pollset [1 + server_nbr].events = ZMQ_POLLIN;
            }
            pollset_size = 1 + self->nbr_servers;
        }
        if (zmq_poll (pollset, pollset_size, self->heartbeat * ZMQ_POLL_MSEC) == -1)
            break;              //  Context has been shut down

        //  Process incoming messages; either of these can
        //  throw events into the state machine
        if (pollset [0].revents & ZMQ_POLLIN)
            client_control_message (self);

        //  Here, array of sockets to servers
        for (server_nbr = 0; server_nbr < self->nbr_servers; server_nbr++) {
            if (pollset [1 + server_nbr].revents & ZMQ_POLLIN) {
                server_t *server = self->servers [server_nbr];
                client_server_message (self, server);
            }
        }
    }
    client_destroy (&self);
}
Пример #25
0
	UInt32 CZmq::UpdateMonitor(OctetsStream* pData)
	{
		if (m_pMonitor)
		{
			zmq_pollitem_t items[] = 
			{
				{ m_pMonitor, 0, HEVENT_READ, 0 },
			};

			Int32 iRet = zmq_poll(items, 1, 0);
			if(iRet == UTIL_ERROR)
			{
				FillErr();
				return 0;
			}

			if (items[0].revents & HEVENT_READ)
			{
				zmq_event_t sEvent;
				Int32 iRecv = zmq_recv(m_pMonitor, &sEvent, sizeof(sEvent), 0);
				if(iRecv == UTIL_ERROR)
				{
					FillErr();
					return 0;
				}
				else if (iRecv == sizeof(sEvent))
				{
					if (pData)
						pData->Replace(&sEvent, sizeof(sEvent));

					return sEvent.event;
				}
			}
		}
		return 0;
	}
Пример #26
0
// This must be called on the socket thread.
// BUGBUG: zeromq 4.2 has an overflow bug in timer parameterization.
// The timeout is typed as 'long' by zeromq. This is 32 bit on windows and
// actually less (potentially 1000 or 1 second) on other platforms.
// On non-windows platforms negative doesn't actually produce infinity.
identifiers poller::wait(int32_t timeout_milliseconds)
{
    ///////////////////////////////////////////////////////////////////////////
    // Critical Section
    shared_lock lock(mutex_);

    const auto size = pollers_.size();
    BITCOIN_ASSERT(size <= max_int32);

    const auto item_count = static_cast<int32_t>(size);
    const auto items = reinterpret_cast<zmq_pollitem_t*>(pollers_.data());
    const auto signaled = zmq_poll(items, item_count, timeout_milliseconds);

    // Either one of the sockets was terminated or a signal intervened.
    if (signaled < 0)
    {
        terminated_ = true;
        return{};
    }

    // No events have been signaled and no failure, so ther timer expired.
    if (signaled == 0)
    {
        expired_ = true;
        return{};
    }

    identifiers result;
    for (const auto& poller: pollers_)
        if ((poller.revents & ZMQ_POLLIN) != 0)
            result.push(poller.socket);

    // At least one event was signaled, but this poll-in set may be empty.
    return result;
    ///////////////////////////////////////////////////////////////////////////
}
Пример #27
0
static void
test_tcp_sub (void *args, zctx_t *ctx, void *pipe)
{
    vtx_t *vtx = vtx_new (ctx);
    int rc = vtx_tcp_load (vtx, FALSE);
    assert (rc == 0);
    char *port = zstr_recv (pipe);

    void *subscriber = vtx_socket (vtx, ZMQ_SUB);
    assert (subscriber);
    rc = vtx_connect (vtx, subscriber, "tcp://localhost:%s", port);
    assert (rc == 0);
    int recd = 0;

    while (!zctx_interrupted) {
        zmq_pollitem_t items [] = {
            { pipe, 0, ZMQ_POLLIN, 0 },
            { subscriber, 0, ZMQ_POLLIN, 0 }
        };
        int rc = zmq_poll (items, 2, 500 * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Context has been shut down
        if (items [0].revents & ZMQ_POLLIN) {
            free (zstr_recv (pipe));
            zstr_send (pipe, "OK");
            break;
        }
        if (items [1].revents & ZMQ_POLLIN) {
            free (zstr_recv (subscriber));
            recd++;
        }
    }
    zclock_log ("I: SUB: recd=%d", recd);
    free (port);
    vtx_destroy (&vtx);
}
Пример #28
0
bool
heartbeats_collector_t::get_metainfo_from_endpoint(const inetv4_endpoint_t& endpoint,
												   std::string& response_t)
{
	// create req socket
	std::auto_ptr<zmq::socket_t> zmq_socket;
	zmq_socket.reset(new zmq::socket_t(*(context()->zmq_context()), ZMQ_REQ));
	std::string ex_err;

	// connect to host
	std::string host_ip_str = nutils::ipv4_to_str(endpoint.host.ip);
	std::string connection_str = "tcp://" + host_ip_str + ":";
	connection_str += boost::lexical_cast<std::string>(endpoint.port);

	int timeout = 0;
	zmq_socket->setsockopt(ZMQ_LINGER, &timeout, sizeof(timeout));
	zmq_socket->setsockopt(ZMQ_IDENTITY, m_uuid.c_str(), m_uuid.length());
	zmq_socket->connect(connection_str.c_str());

	// send request for cocaine metadata
	Json::Value msg(Json::objectValue);
	Json::FastWriter writer;

	msg["version"] = 2;
	msg["action"] = "info";

	std::string info_request = writer.write(msg);
	zmq::message_t message(info_request.length());
	memcpy((void *)message.data(), info_request.c_str(), info_request.length());

	bool sent_request_ok = true;

	try {
		sent_request_ok = zmq_socket->send(message);
	}
	catch (const std::exception& ex) {
		sent_request_ok = false;
		ex_err = ex.what();
	}

	if (!sent_request_ok) {
		// in case of bad send
		std::string error_msg = "heartbeats - could not send metadata request to endpoint: " + endpoint.as_string();
		log(PLOG_WARNING, error_msg + ex_err);

		return false;
	}

	// create polling structure
	zmq_pollitem_t poll_items[1];
	poll_items[0].socket = *zmq_socket;
	poll_items[0].fd = 0;
	poll_items[0].events = ZMQ_POLLIN;
	poll_items[0].revents = 0;

	// poll for responce
	int res = zmq_poll(poll_items, 1, host_socket_ping_timeout * 1000);
	if (res <= 0) {
		return false;
	}

	if ((ZMQ_POLLIN & poll_items[0].revents) != ZMQ_POLLIN) {
		return false;
	}

	// receive cocaine control data
	zmq::message_t reply;
	bool received_response_ok = true;

	try {
		received_response_ok = zmq_socket->recv(&reply);
		response_t = std::string(static_cast<char*>(reply.data()), reply.size());
	}
	catch (const std::exception& ex) {
		received_response_ok = false;
		ex_err = ex.what();
	}

	if (!received_response_ok) {
		std::string error_msg = "heartbeats - could not receive metadata response from endpoint: " + endpoint.as_string();
		log(PLOG_WARNING, error_msg + ex_err);

		return false;
	}

	return true;
}
Пример #29
0
int main (int argc, char *argv [])
{
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argc < 2) {
        printf ("syntax: peering3 me {you}...\n");
        exit (EXIT_FAILURE);
    }
    self = argv [1];
    printf ("I: preparing broker at %s...\n", self);
    srandom ((unsigned) time (NULL));

    //  Prepare our context and sockets
    zctx_t *ctx = zctx_new ();
    char endpoint [256];

    //  Bind cloud frontend to endpoint
    void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudfe, self);
    zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self);

    //  Bind state backend / publisher to endpoint
    void *statebe = zsocket_new (ctx, ZMQ_PUB);
    zsocket_bind (statebe, "ipc://%s-state.ipc", self);

    //  Connect cloud backend to all peers
    void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudbe, self);
    int argn;
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to cloud frontend at '%s'\n", peer);
        zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer);
    }

    //  Connect statefe to all peers
    void *statefe = zsocket_new (ctx, ZMQ_SUB);
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to state backend at '%s'\n", peer);
        zsocket_connect (statefe, "ipc://%s-state.ipc", peer);
    }
    //  Prepare local frontend and backend
    void *localfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localfe, "ipc://%s-localfe.ipc", self);

    void *localbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localbe, "ipc://%s-localbe.ipc", self);

    //  Prepare monitor socket
    void *monitor = zsocket_new (ctx, ZMQ_PULL);
    zsocket_bind (monitor, "ipc://%s-monitor.ipc", self);

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (ctx, worker_task, NULL);

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (ctx, client_task, NULL);

    //  Interesting part
    //  -------------------------------------------------------------
    //  Publish-subscribe flow
    //  - Poll statefe and process capacity updates
    //  - Each time capacity changes, broadcast new value
    //  Request-reply flow
    //  - Poll primary and process local/cloud replies
    //  - While worker available, route localfe to local or cloud

    //  Queue of available workers
    int local_capacity = 0;
    int cloud_capacity = 0;
    zlist_t *workers = zlist_new ();

    while (1) {
        zmq_pollitem_t primary [] = {
            { localbe, 0, ZMQ_POLLIN, 0 },
            { cloudbe, 0, ZMQ_POLLIN, 0 },
            { statefe, 0, ZMQ_POLLIN, 0 },
            { monitor, 0, ZMQ_POLLIN, 0 }
        };
        //  If we have no workers anyhow, wait indefinitely
        int rc = zmq_poll (primary, 4,
                           local_capacity? 1000 * ZMQ_POLL_MSEC: -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Track if capacity changes during this iteration
        int previous = local_capacity;

        //  Handle reply from local worker
        zmsg_t *msg = NULL;

        if (primary [0].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (localbe);
            if (!msg)
                break;          //  Interrupted
            zframe_t *address = zmsg_unwrap (msg);
            zlist_append (workers, address);
            local_capacity++;

            //  If it's READY, don't route the message any further
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), LRU_READY, 1) == 0)
                zmsg_destroy (&msg);
        }
        //  Or handle reply from peer broker
        else if (primary [1].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (cloudbe);
            if (!msg)
                break;          //  Interrupted
            //  We don't use peer broker address for anything
            zframe_t *address = zmsg_unwrap (msg);
            zframe_destroy (&address);
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 2; msg && argn < argc; argn++) {
            char *data = (char *) zframe_data (zmsg_first (msg));
            size_t size = zframe_size (zmsg_first (msg));
            if (size == strlen (argv [argn])
                    &&  memcmp (data, argv [argn], size) == 0)
                zmsg_send (&msg, cloudfe);
        }
        //  Route reply to client if we still need to
        if (msg)
            zmsg_send (&msg, localfe);

        //  Handle capacity updates
        if (primary [2].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (statefe);
            cloud_capacity = atoi (status);
            free (status);
        }
        //  Handle monitor message
        if (primary [3].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (monitor);
            printf ("%s\n", status);
            free (status);
        }

        //  Now route as many clients requests as we can handle
        //  - If we have local capacity we poll both localfe and cloudfe
        //  - If we have cloud capacity only, we poll just localfe
        //  - Route any request locally if we can, else to cloud
        //
        while (local_capacity + cloud_capacity) {
            zmq_pollitem_t secondary [] = {
                { localfe, 0, ZMQ_POLLIN, 0 },
                { cloudfe, 0, ZMQ_POLLIN, 0 }
            };
            if (local_capacity)
                rc = zmq_poll (secondary, 2, 0);
            else
                rc = zmq_poll (secondary, 1, 0);
            assert (rc >= 0);

            if (secondary [0].revents & ZMQ_POLLIN)
                msg = zmsg_recv (localfe);
            else if (secondary [1].revents & ZMQ_POLLIN)
                msg = zmsg_recv (cloudfe);
            else
                break;      //  No work, go back to primary

            if (local_capacity) {
                zframe_t *frame = (zframe_t *) zlist_pop (workers);
                zmsg_wrap (msg, frame);
                zmsg_send (&msg, localbe);
                local_capacity--;
            }
            else {
                //  Route to random broker peer
                int random_peer = randof (argc - 2) + 2;
                zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer]));
                zmsg_send (&msg, cloudbe);
            }
        }
        if (local_capacity != previous) {
            //  We stick our own address onto the envelope
            zstr_sendm (statebe, self);
            //  Broadcast new capacity
            zstr_sendf (statebe, "%d", local_capacity);
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return EXIT_SUCCESS;
}
Пример #30
-1
static flux_msg_t *op_recv (void *impl, int flags)
{
    ctx_t *ctx = impl;
    assert (ctx->magic == MODHANDLE_MAGIC);
    zmq_pollitem_t zp = {
        .events = ZMQ_POLLIN, .socket = ctx->sock, .revents = 0, .fd = -1,
    };
    flux_msg_t *msg = NULL;

    if (connect_socket (ctx) < 0)
        goto done;
    if ((flags & FLUX_O_NONBLOCK)) {
        int n;
        if ((n = zmq_poll (&zp, 1, 0L)) <= 0) {
            if (n == 0)
                errno = EWOULDBLOCK;
            goto done;
        }
    }
    msg = flux_msg_recvzsock (ctx->sock);
done:
    return msg;
}

static int op_event_subscribe (void *impl, const char *topic)
{
    ctx_t *ctx = impl;
    assert (ctx->magic == MODHANDLE_MAGIC);
    json_object *in = Jnew ();
    flux_rpc_t *rpc = NULL;
    int rc = -1;

    if (connect_socket (ctx) < 0)
        goto done;
    Jadd_str (in, "topic", topic);
    if (!(rpc = flux_rpc (ctx->h, "cmb.sub", Jtostr (in), FLUX_NODEID_ANY, 0))
            || flux_rpc_get (rpc, NULL) < 0)
        goto done;
    rc = 0;
done:
    Jput (in);
    flux_rpc_destroy (rpc);
    return rc;
}