示例#1
0
//  If key-value pair has expired, delete it and publish the
//  fact to listening clients.
static int
s_flush_single (char *key, void *data, void *args)
{
    clonesrv_t *self = (clonesrv_t *) args;

    kvmsg_t *kvmsg = (kvmsg_t *) data;
    int64_t ttl;
    sscanf (kvmsg_get_prop (kvmsg, "ttl"), "%" PRId64, &ttl);
    if (ttl && zclock_time () >= ttl) {
        kvmsg_set_sequence (kvmsg, ++self->sequence);
        kvmsg_set_body (kvmsg, (byte *) "", 0);
        kvmsg_send (kvmsg, self->publisher);
        kvmsg_store (&kvmsg, self->kvmap);
        zclock_log ("I: publishing delete=%d", (int) self->sequence);
    }
    return 0;
}
示例#2
0
static int timer_event(zloop_t *loop, int timer_id, void *arg)
{
    static size_t last_received_count   = 0;
    static size_t last_received_bytes   = 0;
    static size_t last_decompressed_count = 0;
    static size_t last_decompressed_bytes = 0;

    size_t message_count    = received_messages_count - last_received_count;
    size_t message_bytes    = received_messages_bytes - last_received_bytes;
    size_t decompressed_count = decompressed_messages_count - last_decompressed_count;
    size_t decompressed_bytes = decompressed_messages_bytes - last_decompressed_bytes;

    double avg_msg_size        = message_count ? (message_bytes / 1024.0) / message_count : 0;
    double max_msg_size        = received_messages_max_bytes / 1024.0;
    double avg_decompressed_size = decompressed_count ? (decompressed_bytes / 1024.0) / decompressed_count : 0;
    double max_decompressed_size = decompressed_messages_max_bytes / 1024.0;

    if (!quiet) {
        printf("[I] processed    %zu messages (%.2f KB), avg: %.2f KB, max: %.2f KB\n",
               message_count, message_bytes/1024.0, avg_msg_size, max_msg_size);

        printf("[I] decompressed %zu messages (%.2f KB), avg: %.2f KB, max: %.2f KB\n",
               decompressed_count, decompressed_bytes/1024.0, avg_decompressed_size, max_decompressed_size);
    }

    last_received_count = received_messages_count;
    last_received_bytes = received_messages_bytes;
    received_messages_max_bytes = 0;
    last_decompressed_count = decompressed_messages_count;
    last_decompressed_bytes = decompressed_messages_bytes;
    decompressed_messages_max_bytes = 0;

    global_time = zclock_time();

    static size_t ticks = 0;
    bool terminate = (++ticks % CONFIG_FILE_CHECK_INTERVAL == 0) && config_file_has_changed();
    if (terminate) {
        printf("[I] detected config change. terminating.\n");
        zsys_interrupted = 1;
    }
    if (ticks % HEART_BEAT_INTERVAL == 0)
        device_tracker_reconnect_stale_devices(tracker);

    return 0;
}
示例#3
0
static int
s_collector (zloop_t *loop, zmq_pollitem_t *poller, void *args)
{
    clonesrv_t *self = (clonesrv_t *) args;

    kvmsg_t *kvmsg = kvmsg_recv (poller->socket);
    if (kvmsg) {
        kvmsg_set_sequence (kvmsg, ++self->sequence);
        kvmsg_send (kvmsg, self->publisher);
        int ttl = atoi (kvmsg_get_prop (kvmsg, "ttl"));
        if (ttl)
            kvmsg_set_prop (kvmsg, "ttl",
                "%" PRId64, zclock_time () + ttl * 1000);
        kvmsg_store (&kvmsg, self->kvmap);
        zclock_log ("I: publishing update=%d", (int) self->sequence);
    }
    return 0;
}
示例#4
0
mlm_msg_t *
mlm_msg_new (
    const char *sender, const char *address, const char *subject,
    const char *tracker, uint timeout, zmsg_t *content)
{
    assert (sender);
    mlm_msg_t *self = (mlm_msg_t *) zmalloc (sizeof (mlm_msg_t));
    if (self) {
        self->sender = sender? strdup (sender): NULL;
        self->address = address? strdup (address): NULL;
        self->subject = subject? strdup (subject): NULL;
        self->tracker = tracker? strdup (tracker): NULL;
        self->expiry = zclock_time () + timeout;
        self->content = content;
        self->refcount = zmq_atomic_counter_new ();
        zmq_atomic_counter_set (self->refcount, 1);
    }
    return self;
}
示例#5
0
void rrwrk_connect_to_broker(rrwrk_t *self)
{
    self->worker = _rrwrk_create_socket(self->ctx, self->worker, ZMQ_DEALER);

    //** Recreate uuid for each new connection
    if (self->uuid_str) {
        free(self->uuid_str);
        uuid_clear(self->uuid);
    }

    uuid_generate(self->uuid);
    self->uuid_str = rr_uuid_str(self->uuid);
    zsocket_set_identity(self->worker, self->uuid_str);

    zsocket_connect(self->worker, self->broker);

    //** Tell broker we are ready for work
    rrwrk_send_to_broker(self, RRWRK_READY, NULL);

    //** If liveness hits zero, queue is considered disconnected
    self->liveness = HEARTBEAT_LIVENESS;
    self->heartbeat_at = zclock_time() + self->heartbeat;
}
示例#6
0
文件: bstar.c 项目: rryqszq4/MadzMQ
static void
s_update_peer_expiry (bstar_t *self)
{
    self->peer_expiry = zclock_time () + 2 * BSTAR_HEARTBEAT;
}
示例#7
0
int main (void)
{
    //  Prepare our context and subscriber
    zctx_t *ctx = zctx_new ();
    void *snapshot = zsocket_new (ctx, ZMQ_DEALER);
    zsocket_connect (snapshot, "tcp://localhost:5556");
    void *subscriber = zsocket_new (ctx, ZMQ_SUB);
    zsockopt_set_subscribe (subscriber, "");
    zsocket_connect (subscriber, "tcp://localhost:5557");
    void *publisher = zsocket_new (ctx, ZMQ_PUSH);
    zsocket_connect (publisher, "tcp://localhost:5558");

    zhash_t *kvmap = zhash_new ();
    srandom ((unsigned) time (NULL));

    //  Get state snapshot
    int64_t sequence = 0;
    zstr_send (snapshot, "ICANHAZ?");
    while (TRUE) {
        kvmsg_t *kvmsg = kvmsg_recv (snapshot);
        if (!kvmsg)
            break;          //  Interrupted
        if (streq (kvmsg_key (kvmsg), "KTHXBAI")) {
            sequence = kvmsg_sequence (kvmsg);
            printf ("I: received snapshot=%d\n", (int) sequence);
            kvmsg_destroy (&kvmsg);
            break;          //  Done
        }
        kvmsg_store (&kvmsg, kvmap);
    }
    int64_t alarm = zclock_time () + 1000;
    while (!zctx_interrupted) {
        zmq_pollitem_t items [] = { { subscriber, 0, ZMQ_POLLIN, 0 } };
        int tickless = (int) ((alarm - zclock_time ()));
        if (tickless < 0)
            tickless = 0;
        int rc = zmq_poll (items, 1, tickless * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Context has been shut down

        if (items [0].revents & ZMQ_POLLIN) {
            kvmsg_t *kvmsg = kvmsg_recv (subscriber);
            if (!kvmsg)
                break;          //  Interrupted

            //  Discard out-of-sequence kvmsgs, incl. heartbeats
            if (kvmsg_sequence (kvmsg) > sequence) {
                sequence = kvmsg_sequence (kvmsg);
                kvmsg_store (&kvmsg, kvmap);
                printf ("I: received update=%d\n", (int) sequence);
            }
            else
                kvmsg_destroy (&kvmsg);
        }
        //  If we timed-out, generate a random kvmsg
        if (zclock_time () >= alarm) {
            kvmsg_t *kvmsg = kvmsg_new (0);
            kvmsg_fmt_key  (kvmsg, "%d", randof (10000));
            kvmsg_fmt_body (kvmsg, "%d", randof (1000000));
            kvmsg_send     (kvmsg, publisher);
            kvmsg_destroy (&kvmsg);
            alarm = zclock_time () + 1000;
        }
    }
    printf (" Interrupted\n%d messages in\n", (int) sequence);
    zhash_destroy (&kvmap);
    zctx_destroy (&ctx);
    return 0;
}
示例#8
0
文件: zloop.c 项目: xekoukou/czmq
int
zloop_start (zloop_t *self)
{
    assert (self);
    int rc = 0;

    //  Recalculate all timers now
    s_timer_t *timer = (s_timer_t *) zlist_first (self->timers);
    while (timer) {
        timer->when = timer->delay + zclock_time ();
        timer = (s_timer_t *) zlist_next (self->timers);
    }
    //  Main reactor loop
    while (!zctx_interrupted) {
        if (self->dirty) {
            // If s_rebuild_pollset() fails, break out of the loop and
            // return its error
            rc = s_rebuild_pollset (self);
            if (rc)
                break;
        }
        rc = zmq_poll (self->pollset, (int) self->poll_size,
                       s_tickless_timer (self) * ZMQ_POLL_MSEC);
        if (rc == -1 || zctx_interrupted) {
            if (self->verbose)
                zclock_log ("I: zloop: interrupted (%d) - %s", rc, strerror (errno));
            rc = 0;
            break;              //  Context has been shut down
        }
        //  Handle any timers that have now expired
        timer = (s_timer_t *) zlist_first (self->timers);
        while (timer) {
            if (zclock_time () >= timer->when && timer->when != -1) {
                if (self->verbose)
                    zclock_log ("I: zloop: call timer handler");
                rc = timer->handler (self, NULL, timer->arg);
                if (rc == -1)
                    break;      //  Timer handler signaled break
                if (timer->times && --timer->times == 0) {
                    zlist_remove (self->timers, timer);
                    free (timer);
                }
                else
                    timer->when = timer->delay + zclock_time ();
            }
            timer = (s_timer_t *) zlist_next (self->timers);
        }
        //  Handle any pollers that are ready
        size_t item_nbr;
        for (item_nbr = 0; item_nbr < self->poll_size && rc >= 0; item_nbr++) {
            s_poller_t *poller = &self->pollact [item_nbr];
            assert (self->pollset [item_nbr].socket == poller->item.socket);
            
            if ((self->pollset [item_nbr].revents & ZMQ_POLLERR)
            && !poller->ignore_errors) {
                if (self->verbose)
                    zclock_log ("I: zloop: can't poll %s socket (%p, %d): %s",
                        poller->item.socket?
                            zsocket_type_str (poller->item.socket): "FD",
                        poller->item.socket, poller->item.fd,
                        strerror (errno));
                //  Give handler one chance to handle error, then kill
                //  poller because it'll disrupt the reactor otherwise.
                if (poller->errors++) {
                    zloop_poller_end (self, &poller->item);
                    self->pollset [item_nbr].revents = 0;
                }
            }
            else
                poller->errors = 0;     //  A non-error happened

            if (self->pollset [item_nbr].revents) {
                if (self->verbose)
                    zclock_log ("I: zloop: call %s socket handler (%p, %d)",
                        poller->item.socket?
                            zsocket_type_str (poller->item.socket): "FD",
                        poller->item.socket, poller->item.fd);
                rc = poller->handler (self, &self->pollset [item_nbr], poller->arg);
                if (rc == -1)
                    break;      //  Poller handler signaled break
            }
        }
        //  Now handle any timer zombies
        //  This is going to be slow if we have many zombies
        while (zlist_size (self->zombies)) {
            void *arg = zlist_pop (self->zombies);
            timer = (s_timer_t *) zlist_first (self->timers);
            while (timer) {
                if (timer->arg == arg) {
                    zlist_remove (self->timers, timer);
                    free (timer);
                    break;
                }
                timer = (s_timer_t *) zlist_next (self->timers);
            }
        }
        if (rc == -1)
            break;
    }
    return rc;
}
示例#9
0
static Bool
s_state_machine (bstar_t *fsm)
{
    Bool exception = FALSE;
    //  Primary server is waiting for peer to connect
    //  Accepts CLIENT_REQUEST events in this state
    if (fsm->state == STATE_PRIMARY) {
        if (fsm->event == PEER_BACKUP) {
            printf ("I: connected to backup (slave), ready as master\n");
            fsm->state = STATE_ACTIVE;
        }
        else
        if (fsm->event == PEER_ACTIVE) {
            printf ("I: connected to backup (master), ready as slave\n");
            fsm->state = STATE_PASSIVE;
        }
    }
    else
    //  Backup server is waiting for peer to connect
    //  Rejects CLIENT_REQUEST events in this state
    if (fsm->state == STATE_BACKUP) {
        if (fsm->event == PEER_ACTIVE) {
            printf ("I: connected to primary (master), ready as slave\n");
            fsm->state = STATE_PASSIVE;
        }
        else
        if (fsm->event == CLIENT_REQUEST)
            exception = TRUE;
    }
    else
    //  Server is active
    //  Accepts CLIENT_REQUEST events in this state
    if (fsm->state == STATE_ACTIVE) {
        if (fsm->event == PEER_ACTIVE) {
            //  Two masters would mean split-brain
            printf ("E: fatal error - dual masters, aborting\n");
            exception = TRUE;
        }
    }
    else
    //  Server is passive
    //  CLIENT_REQUEST events can trigger failover if peer looks dead
    if (fsm->state == STATE_PASSIVE) {
        if (fsm->event == PEER_PRIMARY) {
            //  Peer is restarting - become active, peer will go passive
            printf ("I: primary (slave) is restarting, ready as master\n");
            fsm->state = STATE_ACTIVE;
        }
        else
        if (fsm->event == PEER_BACKUP) {
            //  Peer is restarting - become active, peer will go passive
            printf ("I: backup (slave) is restarting, ready as master\n");
            fsm->state = STATE_ACTIVE;
        }
        else
        if (fsm->event == PEER_PASSIVE) {
            //  Two passives would mean cluster would be non-responsive
            printf ("E: fatal error - dual slaves, aborting\n");
            exception = TRUE;
        }
        else
        if (fsm->event == CLIENT_REQUEST) {
            //  Peer becomes master if timeout has passed
            //  It's the client request that triggers the failover
            assert (fsm->peer_expiry > 0);
            if (zclock_time () >= fsm->peer_expiry) {
                //  If peer is dead, switch to the active state
                printf ("I: failover successful, ready as master\n");
                fsm->state = STATE_ACTIVE;
            }
            else
                //  If peer is alive, reject connections
                exception = TRUE;
        }
    }
    return exception;
}
示例#10
0
static void
node_task (void *args, zctx_t *ctx, void *pipe)
{
    zyre_t *node = zyre_new (ctx);
    if (!node)
        return;                 //  Could not create new node
    zyre_set_verbose (node);
    zyre_start (node);

    int64_t counter = 0;
    char *to_peer = NULL;        //  Either of these set,
    char *to_group = NULL;       //    and we set a message
    char *cookie = NULL;

    zpoller_t *poller = zpoller_new (pipe, zyre_socket (node), NULL);
    int64_t trigger = zclock_time () + 1000;
    while (!zctx_interrupted) {
        void *which = zpoller_wait (poller, randof (1000));

        //  Any command from parent means EXIT
        if (which == pipe)
            break;

        //  Process an event from node
        if (which == zyre_socket (node)) {
            zmsg_t *incoming = zyre_recv (node);
            if (!incoming)
                break;              //  Interrupted

            char *event = zmsg_popstr (incoming);
            if (streq (event, "ENTER")) {
                //  Always say hello to new peer
                to_peer = zmsg_popstr (incoming);
            }
            else
            if (streq (event, "EXIT")) {
                //  Always try talk to departed peer
                to_peer = zmsg_popstr (incoming);
            }
            else
            if (streq (event, "WHISPER")) {
                //  Send back response 1/2 the time
                if (randof (2) == 0) {
                    to_peer = zmsg_popstr (incoming);
                    cookie = zmsg_popstr (incoming);
                }
            }
            else
            if (streq (event, "SHOUT")) {
                to_peer = zmsg_popstr (incoming);
                to_group = zmsg_popstr (incoming);
                cookie = zmsg_popstr (incoming);
                //  Send peer response 1/3rd the time
                if (randof (3) > 0) {
                    free (to_peer);
                    to_peer = NULL;
                }
                //  Send group response 1/3rd the time
                if (randof (3) > 0) {
                    free (to_group);
                    to_group = NULL;
                }
            }
            else
            if (streq (event, "JOIN")) {
                char *from_peer = zmsg_popstr (incoming);
                char *group = zmsg_popstr (incoming);
                printf ("I: %s joined %s\n", from_peer, group);
                free (from_peer);
                free (group);
            }
            else
            if (streq (event, "LEAVE")) {
                char *from_peer = zmsg_popstr (incoming);
                char *group = zmsg_popstr (incoming);
                printf ("I: %s left %s\n", from_peer, group);
                free (from_peer);
                free (group);
            }
            else
            if (streq (event, "DELIVER")) {
                char *filename = zmsg_popstr (incoming);
                char *fullname = zmsg_popstr (incoming);
                printf ("I: received file %s\n", fullname);
                free (fullname);
                free (filename);
            }
            free (event);
            zmsg_destroy (&incoming);

            //  Send outgoing messages if needed
            if (to_peer) {
                zyre_whispers (node, to_peer, "%lu", counter++);
                free (to_peer);
                to_peer = NULL;
            }
            if (to_group) {
                zyre_shouts (node, to_group, "%lu", counter++);
                free (to_group);
                to_group = NULL;
            }
            if (cookie) {
                free (cookie);
                cookie = NULL;
            }
        }
        if (zclock_time () >= trigger) {
            trigger = zclock_time () + 1000;
            char group [10];
            sprintf (group, "GROUP%03d", randof (MAX_GROUP));
            if (randof (4) == 0)
                zyre_join (node, group);
            else
            if (randof (3) == 0)
                zyre_leave (node, group);
        }
    }
    zpoller_destroy (&poller);
    zyre_destroy (&node);
}
示例#11
0
int main(void)
{
	zctx_t *ctx = zctx_new();
	void *frontend = zsocket_new(ctx, ZMQ_ROUTER);
	void *backend = zsocket_new(ctx, ZMQ_ROUTER);
	zsocket_bind(frontend, "tcp://127.0.0.1:5555");
	zsocket_bind(backend, "tcp://127.0.0.1:5556");

	zlist_t *workers = zlist_new();

	uint64_t heartbeat_at = zclock_time() + HEARTBEAT_INTERVAL;

	while (true){
		zmq_pollitem_t items [] ={
			{backend, 0, ZMQ_POLLIN, 0},
			{frontend, 0, ZMQ_POLLIN, 0}
		};

		int rc = zmq_poll(items, zlist_size(worker)?2:1);
		if (rc == -1)
			break;

		if (items[0].revents & ZMQ_POLLIN){
			zmsg_t *msg = zmsg_recv(backend);
			if (!msg)
				break;

			zframe_t *identity = zmsg_unwrap(msg);
			worker_t *worker = s_worker_new(identity);
			s_worker_ready(worker, workers);

			if (zmsg_size(msg) == 1){
				zframe_t *frame = zmsg_first(msg);
				if (memcmp(zframe_data(frame), PPP_READY, 1) && memcmp(zframe_data(frame), PPP_HEARTBEAT, 1)){
					printf("E: invalid message from worker");
					zmsg_dump(msg);
				}
				zmsg_destroy(&msg);
			}
			else
				zmsg_send(&msg, frontend);
		}
		if (items[1].revents & ZMQ_POLLIN){
			zmsg_t *msg = zmsg_recv(frontend);
			if (!msg)
				break;
			zframe_t *identity = s_workers_next(workers);
			zmsg_prepend(msg, &identity);
			zmsg_send(&msg, backend);
		}

		if (zclock_time() >= heartbeat_at){
			worker_t *worker = (worker_t *)zlist_first(workers);
			while (worker){
				zframe_send(&worker->identity, backend, ZFRAME_REUSE + ZFRAME_MORE);
				zframe_t *frame = zframe_new(PPP_HEARTBEAT, 1);
				zframe_send(&frame, backend, 0);
				worker = (worker_t *)zlist_next(workers);
			}
			heartbeat_at = zclock_time() + HEARTBEAT_INTERVAL;
		}
	s_workers_purge(workers);
	}

	while (zlist_size(workers)){
		worker_t *worker = (worker_t *)zlist_pop(workers);
		s_worker_destroy(&worker);
	}
	zlist_destroy(&workers);
	zctx_destroy(&ctx);
	return 0;
}
示例#12
0
文件: bstarsrv.c 项目: fantix/zguide
static bool
s_state_machine (bstar_t *fsm)
{
    bool exception = false;

    //  These are the PRIMARY and BACKUP states; we're waiting to become
    //  ACTIVE or PASSIVE depending on events we get from our peer:
    if (fsm->state == STATE_PRIMARY) {
        if (fsm->event == PEER_BACKUP) {
            printf ("I: connected to backup (passive), ready active\n");
            fsm->state = STATE_ACTIVE;
        }
        else if (fsm->event == PEER_ACTIVE) {
            printf ("I: connected to backup (active), ready passive\n");
            fsm->state = STATE_PASSIVE;
        }
        //  Accept client connections
    }
    else if (fsm->state == STATE_BACKUP) {
        if (fsm->event == PEER_ACTIVE) {
            printf ("I: connected to primary (active), ready passive\n");
            fsm->state = STATE_PASSIVE;
        }
        else
            //  Reject client connections when acting as backup
            if (fsm->event == CLIENT_REQUEST)
                exception = true;
    }
    else
        //  .split active and passive states
        //  These are the ACTIVE and PASSIVE states:

        if (fsm->state == STATE_ACTIVE) {
            if (fsm->event == PEER_ACTIVE) {
                //  Two actives would mean split-brain
                printf ("E: fatal error - dual actives, aborting\n");
                exception = true;
            }
        }
        else
            //  Server is passive
            //  CLIENT_REQUEST events can trigger failover if peer looks dead
            if (fsm->state == STATE_PASSIVE) {
                if (fsm->event == PEER_PRIMARY) {
                    //  Peer is restarting - become active, peer will go passive
                    printf ("I: primary (passive) is restarting, ready active\n");
                    fsm->state = STATE_ACTIVE;
                }
                else if (fsm->event == PEER_BACKUP) {
                    //  Peer is restarting - become active, peer will go passive
                    printf ("I: backup (passive) is restarting, ready active\n");
                    fsm->state = STATE_ACTIVE;
                }
                else if (fsm->event == PEER_PASSIVE) {
                    //  Two passives would mean cluster would be non-responsive
                    printf ("E: fatal error - dual passives, aborting\n");
                    exception = true;
                }
                else if (fsm->event == CLIENT_REQUEST) {
                    //  Peer becomes active if timeout has passed
                    //  It's the client request that triggers the failover
                    assert (fsm->peer_expiry > 0);
                    if (zclock_time () >= fsm->peer_expiry) {
                        //  If peer is dead, switch to the active state
                        printf ("I: failover successful, ready active\n");
                        fsm->state = STATE_ACTIVE;
                    }
                    else
                        //  If peer is alive, reject connections
                        exception = true;
                }
            }
    return exception;
}
示例#13
0
int main (int argc, char *argv [])
{
    int verbose = 0;
    int daemonize = 0;
    for (int i = 1; i < argc; i++)
    {
        if (streq(argv[i], "-v")) verbose = 1;
        else if (streq(argv[i], "-d")) daemonize = 1;
        else if (streq(argv[i], "-h"))
        {
            printf("%s [-h] | [-d] [-v] [broker url]\n\t-h This help message\n\t-d Daemon mode.\n\t-v Verbose output\n\tbroker url defaults to tcp://*:5555\n", argv[0]);
            return -1;
        }
    }

    if (daemonize != 0)
    {
        daemon(0, 0);
    }

    broker_t *self = s_broker_new (verbose);
    /* did the user specify a bind address? */
    if (argc > 1)
    {
        s_broker_bind (self, argv[argc-1]);
        printf("Bound to %s\n", argv[argc-1]);
    }
    else
    {
        /* default */
        s_broker_bind (self, "tcp://*:5555");
        printf("Bound to tcp://*:5555\n");
    }

    //  Get and process messages forever or until interrupted
    while (true) {
        zmq_pollitem_t items [] = {
            { self->socket,  0, ZMQ_POLLIN, 0 } };
        int rc = zmq_poll (items, 1, HEARTBEAT_INTERVAL * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Interrupted

        //  Process next input message, if any
        if (items [0].revents & ZMQ_POLLIN) {
            zmsg_t *msg = zmsg_recv (self->socket);
            if (!msg)
                break;          //  Interrupted
            if (self->verbose) {
                zclock_log ("I: received message:");
                zmsg_dump (msg);
            }
            zframe_t *sender = zmsg_pop (msg);
            zframe_t *empty  = zmsg_pop (msg);
            zframe_t *header = zmsg_pop (msg);

            if (zframe_streq (header, MDPC_CLIENT))
                s_broker_client_msg (self, sender, msg);
            else
            if (zframe_streq (header, MDPW_WORKER))
                s_broker_worker_msg (self, sender, msg);
            else {
                zclock_log ("E: invalid message:");
                zmsg_dump (msg);
                zmsg_destroy (&msg);
            }
            zframe_destroy (&sender);
            zframe_destroy (&empty);
            zframe_destroy (&header);
        }
        //  Disconnect and delete any expired workers
        //  Send heartbeats to idle workers if needed
        if (zclock_time () > self->heartbeat_at) {
            s_broker_purge (self);
            worker_t *worker = (worker_t *) zlist_first (self->waiting);
            while (worker) {
                s_worker_send (worker, MDPW_HEARTBEAT, NULL, NULL);
                worker = (worker_t *) zlist_next (self->waiting);
            }
            self->heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL;
        }
    }
    if (zctx_interrupted)
        printf ("W: interrupt received, shutting down...\n");

    s_broker_destroy (&self);
    return 0;
}
示例#14
0
static void
s_broker_worker_msg (broker_t *self, zframe_t *sender, zmsg_t *msg)
{
    assert (zmsg_size (msg) >= 1);     //  At least, command

    zframe_t *command = zmsg_pop (msg);
    char *identity = zframe_strhex (sender);
    int worker_ready = (zhash_lookup (self->workers, identity) != NULL);
    free (identity);
    worker_t *worker = s_worker_require (self, sender);

    if (zframe_streq (command, MDPW_READY)) {
        if (worker_ready)               //  Not first command in session
            s_worker_delete (worker, 1);
        else
        if (zframe_size (sender) >= 4  //  Reserved service name
        &&  memcmp (zframe_data (sender), "mmi.", 4) == 0)
            s_worker_delete (worker, 1);
        else {
            //  Attach worker to service and mark as idle
            zframe_t *service_frame = zmsg_pop (msg);
            worker->service = s_service_require (self, service_frame);
            zlist_append (self->waiting, worker);
            zlist_append (worker->service->waiting, worker);
            worker->service->workers++;
            worker->expiry = zclock_time () + HEARTBEAT_EXPIRY;
            s_service_dispatch (worker->service);
            zframe_destroy (&service_frame);
            zclock_log ("worker created");
        }
    }
    else
    if (zframe_streq (command, MDPW_REPORT)) {
        if (worker_ready) {
            //  Remove & save client return envelope and insert the
            //  protocol header and service name, then rewrap envelope.
            zframe_t *client = zmsg_unwrap (msg);
            zmsg_pushstr (msg, worker->service->name);
            zmsg_pushstr (msg, MDPC_REPORT);
            zmsg_pushstr (msg, MDPC_CLIENT);
            zmsg_wrap (msg, client);
            zmsg_send (&msg, self->socket);
        }
        else
            s_worker_delete (worker, 1);
    }
    else
    if (zframe_streq (command, MDPW_HEARTBEAT)) {
        if (worker_ready) {
            if (zlist_size (self->waiting) > 1) {
                // Move worker to the end of the waiting queue,
                // so s_broker_purge will only check old worker(s)
                zlist_remove (self->waiting, worker);
                zlist_append (self->waiting, worker);
            }
            worker->expiry = zclock_time () + HEARTBEAT_EXPIRY;
        }
        else
            s_worker_delete (worker, 1);
    }
    else
    if (zframe_streq (command, MDPW_DISCONNECT))
        s_worker_delete (worker, 0);
    else {
        zclock_log ("E: invalid input message");
        zmsg_dump (msg);
    }
    zframe_destroy (&command);
    zmsg_destroy (&msg);
}
示例#15
0
//  .split main task
//  We have a single task that implements the worker side of the
//  Paranoid Pirate Protocol (PPP). The interesting parts here are
//  the heartbeating, which lets the worker detect if the queue has
//  died, and vice versa:
static void ppworker_actor(zsock_t *pipe, void *args)
{
	ubx_block_t *b = (ubx_block_t *) args;
	struct czmq_ppworker_info *inf = (struct czmq_ppworker_info*) b->private_data;
    zsock_t *worker = s_worker_socket ();

    //  If liveness hits zero, queue is considered disconnected
    size_t liveness = HEARTBEAT_LIVENESS;
    size_t interval = INTERVAL_INIT;

    //  Send out heartbeats at regular intervals
    uint64_t heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL;

    srandom ((unsigned) time (NULL));

    printf("ppworker: actor started.\n");
    // send signal on pipe socket to acknowledge initialisation
    zsock_signal (pipe, 0);

    while (true) {
        zmq_pollitem_t items [] = { { zsock_resolve(worker),  0, ZMQ_POLLIN, 0 } };
        int rc = zmq_poll (items, 1, HEARTBEAT_INTERVAL * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Interrupted

        if (items [0].revents & ZMQ_POLLIN) {
            //  Get message
            //  - 3-part envelope + content -> request
            //  - 1-part HEARTBEAT -> heartbeat
            zmsg_t *msg = zmsg_recv (worker);
            if (!msg)
                break;          //  Interrupted

            if (zmsg_size (msg) == 3) {
                printf ("I: normal reply\n");
                byte *buffer;
                size_t buffer_size = zmsg_encode (msg, &buffer);
                ubx_type_t* type =  ubx_type_get(b->ni, "unsigned char");
                ubx_data_t umsg;
                umsg.data = (void *)buffer;
                umsg.len = buffer_size;
                umsg.type = type;
                __port_write(inf->ports.zmq_in, &umsg);
                zmsg_send (&msg, worker);
                liveness = HEARTBEAT_LIVENESS;
                sleep (1);              //  Do some heavy work
                if (zsys_interrupted)
                    break;
            }
            else
            //  .split handle heartbeats
            //  When we get a heartbeat message from the queue, it means the
            //  queue was (recently) alive, so we must reset our liveness
            //  indicator:
            if (zmsg_size (msg) == 1) {
                zframe_t *frame = zmsg_first (msg);
                if (memcmp (zframe_data (frame), PPP_HEARTBEAT, 1) == 0)
                    liveness = HEARTBEAT_LIVENESS;
                else {
                    printf ("E: invalid message\n");
                    zmsg_dump (msg);
                }
                zmsg_destroy (&msg);
            }
            else {
                printf ("E: invalid message\n");
                zmsg_dump (msg);
            }
            interval = INTERVAL_INIT;
        }
        else
        //  .split detecting a dead queue
        //  If the queue hasn't sent us heartbeats in a while, destroy the
        //  socket and reconnect. This is the simplest most brutal way of
        //  discarding any messages we might have sent in the meantime:
        if (--liveness == 0) {
            printf ("W: heartbeat failure, can't reach queue\n");
            printf ("W: reconnecting in %zd msec...\n", interval);
            zclock_sleep (interval);

            if (interval < INTERVAL_MAX)
                interval *= 2;
            zsock_destroy(&worker);
            worker = s_worker_socket ();
            liveness = HEARTBEAT_LIVENESS;
        }
        //  Send heartbeat to queue if it's time
        if (zclock_time () > heartbeat_at) {
            heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL;
            printf ("I: worker heartbeat\n");
            zframe_t *frame = zframe_new (PPP_HEARTBEAT, 1);
            zframe_send (&frame, worker, 0);
        }
    }
}
void
benchmark_tree (void *push, void *pub, void *router, unsigned int N_KEYS,
		unsigned int *keys, struct dbkey_rb_t dbkey_rb,int N_THREADS
		)
{

/*cleaning cache   */
  system ("./script.sh");



  float stat;
  int64_t diff = zclock_time ();
  unsigned int iter;
  int stop;
  unsigned int counter = 0;
  int more_requested = 0;


  iter = 0;
  while (iter < N_KEYS)
    {
      dbkey_t *dbkey;
      size_t vallen;

      int64_t diff2 = zclock_time ();
      while (1)
	{
          if(zclock_time()-diff2>1){
	  zframe_t *frame = zframe_recv_nowait (router);
	  if (frame != NULL)
	    {
	      zframe_destroy (&frame);
	      frame = zframe_recv_nowait (router);
              
	      if (zframe_size (frame) == strlen ("m"))
		{
	          zframe_destroy (&frame);
		  break;
		}
	    }
            diff2=zclock_time();
          }





	  dbkey = (dbkey_t *) malloc (sizeof (dbkey_t));
	  dbkey->key = keys[iter];


	  RB_INSERT (dbkey_rb_t, &dbkey_rb, dbkey);



	  if (iter == N_KEYS - 1)
	    {
	      iter++;
	      break;
	    }
	  else
	    {

	      iter++;
	    }
	}

      dbkey_t *tr_iter = RB_MIN (dbkey_rb_t, &dbkey_rb);

      while (tr_iter)
	{

	  zframe_t *frame = zframe_new (&(tr_iter->key), 4);
	  zframe_send (&frame, push, 0);


	  dbkey_t *temp = tr_iter;
	  tr_iter = RB_NEXT (dbkey_rb_t, &dbkey_rb, tr_iter);

	  RB_REMOVE (dbkey_rb_t, &dbkey_rb, temp);
	  free (temp);
	}


    }

  stop = 1;
  zframe_t *frame = zframe_new (&stop, 4);
  zframe_send (&frame, pub, 0);

  iter = 0;
  while (iter < N_THREADS)
    {
      unsigned int temp;
      zmsg_t *msg = zmsg_recv (router);
      zframe_t *frame = zmsg_unwrap (msg);
      zframe_destroy (&frame);
      frame=zmsg_first (msg);
      if (zframe_size (frame) == strlen ("m"))
	{
	}
      else
	{
	  memcpy (&temp, zframe_data (frame), 4);
	  counter = counter + temp;
	  iter++;
	}
      zmsg_destroy (&msg);
    }

  printf ("\nkeys processed:%u", counter);

  diff = zclock_time () - diff;

  stat = ((float) counter * 1000) / (float) diff;
  printf ("\nrandom read with an rb_tree:  %f keys per sec\n", stat);

}
void
benchmark_notree (void *push, void *pub, void *router, unsigned int N_KEYS,
		  unsigned int *keys,int N_THREADS)
{
  printf ("\nCleaning the pagecache");

/*cleaning cache */
  system ("./script.sh");

  printf ("\n starting random read without a rb_btree");



  int64_t diff = zclock_time ();
  unsigned int iter;
  int stop;
  unsigned int counter = 0;

  for (iter = 0; iter < N_KEYS; iter++)
    {
      unsigned int key = keys[iter];

      size_t vallen;

      zframe_t *frame = zframe_new (&key, 4);
      zframe_send (&frame, push, 0);

    }
  stop = 1;
  zframe_t *frame = zframe_new (&stop, 4);
  zframe_send (&frame, pub, 0);

  iter = 0;
  while (iter < N_THREADS)
    {
      unsigned int temp;
      zmsg_t *msg = zmsg_recv (router);
      zframe_t *frame = zmsg_unwrap (msg);
      zframe_destroy (&frame);
      frame=zmsg_first (msg);
      if (zframe_size (frame) == strlen ("m"))
	{
	}
      else
	{
	  memcpy (&temp, zframe_data (frame), 4);
	  counter = counter + temp;
	  iter++;
	}
      zmsg_destroy (&msg);
    }



  printf ("\nkeys processed:%u", counter);

  diff = zclock_time () - diff;

  float stat = ((float) counter * 1000) / (float) diff;
  printf ("\nrandom read without an rb_tree:  %f keys per sec\n", stat);
}
示例#18
0
文件: bstar.c 项目: rryqszq4/MadzMQ
static int
s_execute_fsm (bstar_t *self)
{
    int rc = 0;
    //  Primary server is waiting for peer to connect
    //  Accepts CLIENT_REQUEST events in this state
    if (self->state == STATE_PRIMARY) {
        if (self->event == PEER_BACKUP) {
            zclock_log ("I: connected to backup (passive), ready as active");
            self->state = STATE_ACTIVE;
            if (self->active_fn)
                (self->active_fn) (self->loop, NULL, self->active_arg);
        }
        else
        if (self->event == PEER_ACTIVE) {
            zclock_log ("I: connected to backup (active), ready as passive");
            self->state = STATE_PASSIVE;
            if (self->passive_fn)
                (self->passive_fn) (self->loop, NULL, self->passive_arg);
        }
        else
        if (self->event == CLIENT_REQUEST) {
            // Allow client requests to turn us into the active if we've
            // waited sufficiently long to believe the backup is not
            // currently acting as active (i.e., after a failover)
            assert (self->peer_expiry > 0);
            if (zclock_time () >= self->peer_expiry) {
                zclock_log ("I: request from client, ready as active");
                self->state = STATE_ACTIVE;
                if (self->active_fn)
                    (self->active_fn) (self->loop, NULL, self->active_arg);
            } else
                // Don't respond to clients yet - it's possible we're
                // performing a failback and the backup is currently active
                rc = -1;
        }
    }
    else
    //  Backup server is waiting for peer to connect
    //  Rejects CLIENT_REQUEST events in this state
    if (self->state == STATE_BACKUP) {
        if (self->event == PEER_ACTIVE) {
            zclock_log ("I: connected to primary (active), ready as passive");
            self->state = STATE_PASSIVE;
            if (self->passive_fn)
                (self->passive_fn) (self->loop, NULL, self->passive_arg);
        }
        else
        if (self->event == CLIENT_REQUEST)
            rc = -1;
    }
    else
    //  Server is active
    //  Accepts CLIENT_REQUEST events in this state
    //  The only way out of ACTIVE is death
    if (self->state == STATE_ACTIVE) {
        if (self->event == PEER_ACTIVE) {
            //  Two actives would mean split-brain
            zclock_log ("E: fatal error - dual actives, aborting");
            rc = -1;
        }
    }
    else
    //  Server is passive
    //  CLIENT_REQUEST events can trigger failover if peer looks dead
    if (self->state == STATE_PASSIVE) {
        if (self->event == PEER_PRIMARY) {
            //  Peer is restarting - become active, peer will go passive
            zclock_log ("I: primary (passive) is restarting, ready as active");
            self->state = STATE_ACTIVE;
        }
        else
        if (self->event == PEER_BACKUP) {
            //  Peer is restarting - become active, peer will go passive
            zclock_log ("I: backup (passive) is restarting, ready as active");
            self->state = STATE_ACTIVE;
        }
        else
        if (self->event == PEER_PASSIVE) {
            //  Two passives would mean cluster would be non-responsive
            zclock_log ("E: fatal error - dual passives, aborting");
            rc = -1;
        }
        else
        if (self->event == CLIENT_REQUEST) {
            //  Peer becomes active if timeout has passed
            //  It's the client request that triggers the failover
            assert (self->peer_expiry > 0);
            if (zclock_time () >= self->peer_expiry) {
                //  If peer is dead, switch to the active state
                zclock_log ("I: failover successful, ready as active");
                self->state = STATE_ACTIVE;
            }
            else
                //  If peer is alive, reject connections
                rc = -1;
        }
        //  Call state change handler if necessary
        if (self->state == STATE_ACTIVE && self->active_fn)
            (self->active_fn) (self->loop, NULL, self->active_arg);
    }
    return rc;
}
示例#19
0
zmsg_t *
mdp_worker_recv (mdp_worker_t *self, zframe_t **reply_to_p)
{
    while (TRUE) {
        zmq_pollitem_t items [] = {
            { self->worker,  0, ZMQ_POLLIN, 0 } };
        int rc = zmq_poll (items, 1, self->heartbeat * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Interrupted

        if (items [0].revents & ZMQ_POLLIN) {
            zmsg_t *msg = zmsg_recv (self->worker);
            if (!msg)
                break;          //  Interrupted
            if (self->verbose) {
                zclock_log ("I: received message from broker:");
                zmsg_dump (msg);
            }
            self->liveness = HEARTBEAT_LIVENESS;

            //  Don't try to handle errors, just assert noisily
            assert (zmsg_size (msg) >= 3);

            zframe_t *empty = zmsg_pop (msg);
            assert (zframe_streq (empty, ""));
            zframe_destroy (&empty);

            zframe_t *header = zmsg_pop (msg);
            assert (zframe_streq (header, MDPW_WORKER));
            zframe_destroy (&header);

            zframe_t *command = zmsg_pop (msg);
            if (zframe_streq (command, MDPW_REQUEST)) {
                //  We should pop and save as many addresses as there are
                //  up to a null part, but for now, just save one...
                zframe_t *reply_to = zmsg_unwrap (msg);
                if (reply_to_p)
                    *reply_to_p = reply_to;
                else
                    zframe_destroy (&reply_to);

                zframe_destroy (&command);
                //  Here is where we actually have a message to process; we
                //  return it to the caller application
                return msg;     //  We have a request to process
            }
            else
            if (zframe_streq (command, MDPW_HEARTBEAT))
                ;               //  Do nothing for heartbeats
            else
            if (zframe_streq (command, MDPW_DISCONNECT))
                s_mdp_worker_connect_to_broker (self);
            else {
                zclock_log ("E: invalid input message");
                zmsg_dump (msg);
            }
            zframe_destroy (&command);
            zmsg_destroy (&msg);
        }
        else
        if (--self->liveness == 0) {
            if (self->verbose)
                zclock_log ("W: disconnected from broker - retrying...");
            zclock_sleep (self->reconnect);
            s_mdp_worker_connect_to_broker (self);
        }
        //  Send HEARTBEAT if it's time
        if (zclock_time () > self->heartbeat_at) {
            s_mdp_worker_send_to_broker (self, MDPW_HEARTBEAT, NULL, NULL);
            self->heartbeat_at = zclock_time () + self->heartbeat;
        }
    }
    if (zctx_interrupted)
        printf ("W: interrupt received, killing worker...\n");
    return NULL;
}
bool client_state_is_time_to_heartbeat(client_state* state)
{
    //printf("HEARTBEAT %lld, %lld \n",zclock_time(),state->heartbeat_interval);
    return zclock_time() > state->heartbeat_time;
}
void client_state_set_heartbeat_time(client_state* state)
{
    state->heartbeat_time =  zclock_time () + state->heartbeat_interval;     
}
示例#22
0
int rrwrk_start(rrwrk_t *self, wrk_task_fn *cb)
{
    self->heartbeat_at = zclock_time() + self->heartbeat;
    self->cb = cb;

    //** Start task thread and wait for synchronization signal
    self->pipe = zthread_fork(self->ctx, rrtask_manager_fn, (void *)self);
    assert(self->pipe);
    free(zstr_recv(self->pipe));
    //self->liveness = HEARTBEAT_LIVENESS; //** Don't do reconnect before the first connection established

    while(!zctx_interrupted) {
        zmq_pollitem_t items[] = {{self->worker, 0, ZMQ_POLLIN, 0}, {self->pipe, 0, ZMQ_POLLIN, 0}}; //** Be aware: this must be within while loop!!
        int rc = zmq_poll(items, 2, self->heartbeat * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;	//** Interrupted

        if (items[0].revents & ZMQ_POLLIN) { //** Data from broker is ready
            zmsg_t *msg = zmsg_recv(self->worker);
            if (!msg)
                break; //** Interrupted. Need to do more research to confirm it
            self->liveness = HEARTBEAT_LIVENESS;
            self->last_heartbeat = zclock_time();

            //** Dont try to handle errors, just assert noisily
            assert(zmsg_size(msg) >= 3); //** empty + header + command + ...

            zframe_t *empty = zmsg_pop(msg);
            assert(zframe_streq(empty, ""));
            zframe_destroy(&empty);

            zframe_t *header = zmsg_pop(msg);
            assert(zframe_streq(header, RR_WORKER));
            zframe_destroy(&header);

            zframe_t *command = zmsg_pop(msg);
            if (zframe_streq(command, RRWRK_REQUEST)) {
                assert(zmsg_size(msg) == 3); //** UUID + SOURCE + INPUT DATA
                self->total_received++;
                zmq_pollitem_t item = {self->pipe, 0, ZMQ_POLLOUT, 0};
                int rc = zmq_poll(&item, 1, 0);
                assert(rc != -1);
                if (item.revents & ZMQ_POLLOUT) { //** Dispatch it if worker is ready
                    //** Send task to task manager
                    zmsg_send(&msg, self->pipe);
                } else { //** Otherwise put it on waiting list
                    zlist_push(self->data, zmsg_dup(msg));
                }
            } else if (zframe_streq(command, RRWRK_HEARTBEAT)) {
                ; //** Do nothing for heartbeat
            } else if (zframe_streq(command, RRWRK_DISCONNECT)) {
                rrwrk_connect_to_broker(self);
            } else {
                log_printf(0, "E: invalid input message\n");
            }

            zframe_destroy(&command);
            zmsg_destroy(&msg);
        } else if ((zclock_time() - self->heartbeat) > self->last_heartbeat) {
            if(--self->liveness == 0) {
                rrwrk_print(self);
                log_printf(0, "W: Disconnected from broker - retrying ...\n");
                rrwrk_print(self);
                zclock_sleep(self->reconnect);
                rrwrk_connect_to_broker(self);
            }
        }

        if (items[1].revents & ZMQ_POLLIN) { //** Data from pipe is ready
            zmsg_t *output = zmsg_recv(self->pipe);
            assert(zmsg_size(output) == 3); //** UUID + SOURCE + DATA

            self->total_finished++;

            zmsg_t *reply = zmsg_new();
            //** Adds UUID + SOURCE to reply message
            zframe_t *uuid = zframe_dup(zmsg_first(output));
            zframe_t *source = zframe_dup(zmsg_next(output));
            zmsg_add(reply, uuid);
            zmsg_add(reply, source);

            //** Sends reply to broker
            rrwrk_send_to_broker(self, RRWRK_REPLY, reply);

            //** Sends output to sinker
            //zmsg_send(&output, self->sender);
            rrwrk_send_to_sinker(self, RRWRK_OUTPUT, output);

            zmsg_destroy(&output);
            zmsg_destroy(&reply);
        }

        //** Dispatch task if any
        while (true) {
            zmq_pollitem_t pipe_write = {self->pipe, 0, ZMQ_POLLOUT, 0};
            zmq_poll(&pipe_write, 1, 0);
            if ((pipe_write.revents & ZMQ_POLLOUT) && (zlist_size(self->data))) {
                zmsg_t* data = (zmsg_t *)zlist_pop(self->data);
                zmsg_send(&data, self->pipe);
                printf("Dispatched one task.\n");
            } else
                break;
        }

        //** Send HEARTBEAT if it's time
        if (zclock_time() > self->heartbeat_at) {
            rrwrk_print(self);
            rrwrk_send_to_broker(self, RRWRK_HEARTBEAT, NULL);
            self->heartbeat_at = zclock_time() + self->heartbeat;
        }

    }

    if (zctx_interrupted)
        log_printf(0, "W: interrupt received. Killing worker...\n");

    return -1;
}
示例#23
0
int main(int argc, char * const *argv)
{
    int rc = 0;
    process_arguments(argc, argv);

    setvbuf(stdout, NULL, _IOLBF, 0);
    setvbuf(stderr, NULL, _IOLBF, 0);

    if (!quiet)
        printf("[I] started %s\n"
               "[I] sub-port:    %d\n"
               "[I] push-port:   %d\n"
               "[I] io-threads:  %lu\n"
               "[I] rcv-hwm:  %d\n"
               "[I] snd-hwm:  %d\n"
               , argv[0], pull_port, pub_port, io_threads, rcv_hwm, snd_hwm);

    // load config
    config_file_exists = zsys_file_exists(config_file_name);
    if (config_file_exists) {
        config_file_init();
        config = zconfig_load((char*)config_file_name);
    }

    // set global config
    zsys_init();
    zsys_set_rcvhwm(10000);
    zsys_set_sndhwm(10000);
    zsys_set_pipehwm(1000);
    zsys_set_linger(100);
    zsys_set_io_threads(io_threads);

    // create socket to receive messages on
    zsock_t *receiver = zsock_new(ZMQ_SUB);
    assert_x(receiver != NULL, "sub socket creation failed", __FILE__, __LINE__);
    zsock_set_rcvhwm(receiver, rcv_hwm);

    // bind externally
    char* host = zlist_first(hosts);
    while (host) {
        if (!quiet)
            printf("[I] connecting to: %s\n", host);
        rc = zsock_connect(receiver, "%s", host);
        assert_x(rc == 0, "sub socket connect failed", __FILE__, __LINE__);
        host = zlist_next(hosts);
    }
    tracker = device_tracker_new(hosts, receiver);

    // create socket for publishing
    zsock_t *publisher = zsock_new(ZMQ_PUSH);
    assert_x(publisher != NULL, "pub socket creation failed", __FILE__, __LINE__);
    zsock_set_sndhwm(publisher, snd_hwm);

    rc = zsock_bind(publisher, "tcp://%s:%d", "*", pub_port);
    assert_x(rc == pub_port, "pub socket bind failed", __FILE__, __LINE__);

    // create compressor sockets
    zsock_t *compressor_input = zsock_new(ZMQ_PUSH);
    assert_x(compressor_input != NULL, "compressor input socket creation failed", __FILE__, __LINE__);
    rc = zsock_bind(compressor_input, "inproc://compressor-input");
    assert_x(rc==0, "compressor input socket bind failed", __FILE__, __LINE__);

    zsock_t *compressor_output = zsock_new(ZMQ_PULL);
    assert_x(compressor_output != NULL, "compressor output socket creation failed", __FILE__, __LINE__);
    rc = zsock_bind(compressor_output, "inproc://compressor-output");
    assert_x(rc==0, "compressor output socket bind failed", __FILE__, __LINE__);

    // create compressor agents
    zactor_t *compressors[MAX_COMPRESSORS];
    for (size_t i = 0; i < num_compressors; i++)
        compressors[i] = message_decompressor_new(i);

    // set up event loop
    zloop_t *loop = zloop_new();
    assert(loop);
    zloop_set_verbose(loop, 0);

    // calculate statistics every 1000 ms
    int timer_id = zloop_timer(loop, 1000, 0, timer_event, NULL);
    assert(timer_id != -1);

    // setup handler for the receiver socket
    publisher_state_t publisher_state = {
        .receiver = zsock_resolve(receiver),
        .publisher = zsock_resolve(publisher),
        .compressor_input = zsock_resolve(compressor_input),
        .compressor_output = zsock_resolve(compressor_output),
    };

    // setup handler for compression results
    rc = zloop_reader(loop, compressor_output, read_zmq_message_and_forward, &publisher_state);
    assert(rc == 0);
    zloop_reader_set_tolerant(loop, compressor_output);

    // setup handdler for messages incoming from the outside or rabbit_listener
    rc = zloop_reader(loop, receiver, read_zmq_message_and_forward, &publisher_state);
    assert(rc == 0);
    zloop_reader_set_tolerant(loop, receiver);

    // initialize clock
    global_time = zclock_time();

    // setup subscriptions
    if (subscriptions == NULL || zlist_size(subscriptions) == 0) {
        if (!quiet)
            printf("[I] subscribing to all log messages\n");
        zsock_set_subscribe(receiver, "");
    } else {
        char *subscription = zlist_first(subscriptions);
        while (subscription) {
            if (!quiet)
                printf("[I] subscribing to %s\n", subscription);
            zsock_set_subscribe(receiver, subscription);
            subscription = zlist_next(subscriptions);
        }
        zsock_set_subscribe(receiver, "heartbeat");
    }

    // run the loop
    if (!zsys_interrupted) {
        if (verbose)
            printf("[I] starting main event loop\n");
        bool should_continue_to_run = getenv("CPUPROFILE") != NULL;
        do {
            rc = zloop_start(loop);
            should_continue_to_run &= errno == EINTR && !zsys_interrupted;
            log_zmq_error(rc, __FILE__, __LINE__);
        } while (should_continue_to_run);
        if (verbose)
            printf("[I] main event zloop terminated with return code %d\n", rc);
    }

    zloop_destroy(&loop);
    assert(loop == NULL);

    if (!quiet) {
        printf("[I] received %zu messages\n", received_messages_count);
        printf("[I] shutting down\n");
    }

    zlist_destroy(&hosts);
    zlist_destroy(&subscriptions);
    zsock_destroy(&receiver);
    zsock_destroy(&publisher);
    zsock_destroy(&compressor_input);
    zsock_destroy(&compressor_output);
    device_tracker_destroy(&tracker);
    for (size_t i = 0; i < num_compressors; i++)
        zactor_destroy(&compressors[i]);
    zsys_shutdown();

    if (!quiet)
        printf("[I] terminated\n");

    return rc;
}
int
main ()
{
unsigned int N_KEYS;
unsigned int N_BATCH;

printf("\n number of keys:");
scanf("%u",&N_KEYS);

printf("\n batch size:");
scanf("%u",&N_BATCH);






  tinymt32_t tinymt32;
//initialize random generator
  tinymt32_init (&tinymt32, 0);


  int iter;
  int sec_iter;
  int stop;
  int counter;


//initialize database
  char *errptr = NULL;

  leveldb_options_t *options = leveldb_options_create ();

/* initializeOptions */
  leveldb_options_set_create_if_missing (options, 1);
  leveldb_options_set_write_buffer_size(options,120000000 );
  leveldb_options_set_max_open_files(options,800000);
/*open Database */

  leveldb_t *db_helper =
    leveldb_open (options, "/mnt/database/database_helper", &errptr);
  leveldb_t *db = leveldb_open (options, "/mnt/database/database", &errptr);



  leveldb_readoptions_t *readoptions = leveldb_readoptions_create ();

  leveldb_writeoptions_t *writeoptions = leveldb_writeoptions_create ();

leveldb_writebatch_t* batch=leveldb_writebatch_create();


  int64_t diff = zclock_time ();

//write into database_helper
  iter=0;
  while( iter < N_KEYS)
    {
     sec_iter=0;
     while((iter<N_KEYS) && (sec_iter<N_BATCH)){
      unsigned int key = tinymt32_generate_uint32 (&tinymt32);
      unsigned int val = tinymt32_generate_uint32 (&tinymt32);
      leveldb_writebatch_put (batch,
		   (const char *) &key, sizeof (int),
		   (const char *) &val, sizeof (int));
    sec_iter++;
    iter++;
    }
      leveldb_write(
    db_helper,
    writeoptions,
    batch,
    &errptr);

      if(errptr!=NULL){
      printf("\n%s",errptr);
      }
      assert (errptr == NULL);
      leveldb_writebatch_clear(batch);
    }
  
      leveldb_writebatch_destroy(batch);

  diff = zclock_time () - diff;

  printf ("\nrandom write:  %d", diff);


  diff = zclock_time ();
//write sequentially into db

  leveldb_iterator_t *liter = leveldb_create_iterator (db_helper,
						       readoptions);

  leveldb_iter_seek_to_first (liter);

  while (leveldb_iter_valid (liter))
    {
      size_t length;
      char key[4];
      memcpy (key, leveldb_iter_key (liter, &length), 4);
      char val[4];
      memcpy (val, leveldb_iter_value (liter, &length), 4);

      leveldb_iter_get_error (liter, &errptr);
      assert (errptr == NULL);
      leveldb_put (db,
		   writeoptions,
		   (const char *) &key, sizeof (int),
		   (const char *) &val, sizeof (int), &errptr);
      assert (errptr == NULL);
      leveldb_iter_next (liter);
    }


  diff = zclock_time () - diff;




  leveldb_close (db);
  leveldb_close (db_helper);





}
示例#25
0
文件: ppqueue.c 项目: Carl4/zguide
int main (void)
{
    zctx_t *ctx = zctx_new ();
    void *frontend = zsocket_new (ctx, ZMQ_ROUTER);
    void *backend = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (frontend, "tcp://*:5555");    //  For clients
    zsocket_bind (backend,  "tcp://*:5556");    //  For workers

    //  List of available workers
    zlist_t *workers = zlist_new ();

    //  Send out heartbeats at regular intervals
    uint64_t heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL;

    while (1) {
        zmq_pollitem_t items [] = {
            { backend,  0, ZMQ_POLLIN, 0 },
            { frontend, 0, ZMQ_POLLIN, 0 }
        };
        //  Poll frontend only if we have available workers
        int rc = zmq_poll (items, zlist_size (workers)? 2: 1,
            HEARTBEAT_INTERVAL * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Interrupted

        //  Handle worker activity on backend
        if (items [0].revents & ZMQ_POLLIN) {
            //  Use worker address for LRU routing
            zmsg_t *msg = zmsg_recv (backend);
            if (!msg)
                break;          //  Interrupted

            //  Any sign of life from worker means it's ready
            zframe_t *address = zmsg_unwrap (msg);
            worker_t *worker = s_worker_new (address);
            s_worker_ready (worker, workers);

            //  Validate control message, or return reply to client
            if (zmsg_size (msg) == 1) {
                zframe_t *frame = zmsg_first (msg);
                if (memcmp (zframe_data (frame), PPP_READY, 1)
                &&  memcmp (zframe_data (frame), PPP_HEARTBEAT, 1)) {
                    printf ("E: invalid message from worker");
                    zmsg_dump (msg);
                }
                zmsg_destroy (&msg);
            }
            else
                zmsg_send (&msg, frontend);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Now get next client request, route to next worker
            zmsg_t *msg = zmsg_recv (frontend);
            if (!msg)
                break;          //  Interrupted
            zmsg_push (msg, s_workers_next (workers));
            zmsg_send (&msg, backend);
        }

        //  .split handle heartbeating
        //  We handle heartbeating after any socket activity. First we send
        //  heartbeats to any idle workers if it's time. Then we purge any
        //  dead workers:
        
        if (zclock_time () >= heartbeat_at) {
            worker_t *worker = (worker_t *) zlist_first (workers);
            while (worker) {
                zframe_send (&worker->address, backend,
                             ZFRAME_REUSE + ZFRAME_MORE);
                zframe_t *frame = zframe_new (PPP_HEARTBEAT, 1);
                zframe_send (&frame, backend, 0);
                worker = (worker_t *) zlist_next (workers);
            }
            heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL;
        }
        s_workers_purge (workers);
    }

    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        worker_t *worker = (worker_t *) zlist_pop (workers);
        s_worker_destroy (&worker);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return 0;
}
示例#26
0
int
main (int argc, char *argv [])
{
    //  Get number of remote nodes to simulate, default 100
    //  If we run multiple zre_perf_remote on multiple machines,
    //  max_node must be sum of all the remote node counts.
    int max_node = 100;
    int max_message = 10000;
    int nbr_node = 0;
    int nbr_hello_response = 0;
    int nbr_message = 0;
    int nbr_message_response = 0;

    if (argc > 1)
        max_node = atoi (argv [1]);
    if (argc > 2)
        max_message = atoi (argv [2]);

    zre_node_t *node = zre_node_new ();
    zre_node_join (node, "GLOBAL");

    int64_t start = zclock_time ();
    int64_t elapse;

    char **peers = zmalloc (sizeof (char *) * max_node);

    while (true) {
        zmsg_t *incoming = zre_node_recv (node);
        if (!incoming)
            break;              //  Interrupted

        //  If new peer, say hello to it and wait for it to answer us
        char *event = zmsg_popstr (incoming);
        if (streq (event, "ENTER")) {
            char *peer = zmsg_popstr (incoming);
            peers[nbr_node++] = peer;

            if (nbr_node == max_node) {
                // got HELLO from the all remote nodes
                elapse = zclock_time () - start;
                printf ("Took %ld ms to coordinate with all remote\n", (long)elapse);
            }
        }
        else
        if (streq (event, "WHISPER")) {
            char *peer = zmsg_popstr (incoming);
            char *cookie = zmsg_popstr (incoming);

            if (streq (cookie, "R:HELLO")) {
                if (++nbr_hello_response == max_node) {
                    // got HELLO from the all remote nodes
                    elapse = zclock_time () - start;
                    printf ("Took %ld ms to get greeting from all remote\n", (long)elapse);
                }
            }
            free (peer);
            free (cookie);
        }
        free (event);
        zmsg_destroy (&incoming);

        if (nbr_node == max_node && nbr_hello_response == max_node)
            break;
    }

    zmq_pollitem_t pollitems [] = {
        { zre_node_handle (node), 0, ZMQ_POLLIN, 0 }
    };

    //  send WHISPER message
    start = zclock_time ();
    for (nbr_message = 0; nbr_message < max_message; nbr_message++) {
        zmsg_t *outgoing = zmsg_new ();
        zmsg_addstr (outgoing, peers [nbr_message % max_node]);
        zmsg_addstr (outgoing, "S:WHISPER");
        zre_node_whisper (node, &outgoing);

        while (zmq_poll (pollitems, 1, 0) > 0) {
            if (s_node_recv (node, "WHISPER", "R:WHISPER"))
                nbr_message_response++;
        }
    }

    while (nbr_message_response < max_message)
        if (s_node_recv (node, "WHISPER", "R:WHISPER"))
            nbr_message_response++;

    // got WHISPER response from the all remote nodes
    elapse = zclock_time () - start;
    printf ("Took %ld ms to send/receive %d message. %.2f msg/s \n", (long)elapse, max_message, (float) max_message * 1000 / elapse);

    //  send SPOUT message
    start = zclock_time ();
    nbr_message = 0;
    nbr_message_response = 0;

    max_message = max_message / max_node;

    for (nbr_message = 0; nbr_message < max_message; nbr_message++) {
        zmsg_t *outgoing = zmsg_new ();
        zmsg_addstr (outgoing, "GLOBAL");
        zmsg_addstr (outgoing, "S:SHOUT");
        zre_node_shout (node, &outgoing);

        while (zmq_poll (pollitems, 1, 0) > 0) {
            if (s_node_recv (node, "SHOUT", "R:SHOUT"))
                nbr_message_response++;
        }
    }

    while (nbr_message_response < max_message * max_node)
        if (s_node_recv (node, "SHOUT", "R:SHOUT"))
            nbr_message_response++;

    // got SHOUT response from the all remote nodes
    elapse = zclock_time () - start;
    printf ("Took %ld ms to send %d, recv %d GROUP message. %.2f msg/s \n",
            (long) elapse, max_message, max_node * max_message,
            (float) max_node * max_message * 1000 / elapse);


    zre_node_destroy (&node);
    for (nbr_node = 0; nbr_node < max_node; nbr_node++) {
        free (peers[nbr_node]);
    }
    free (peers);
    return 0;
}
示例#27
0
int main (int argc, char *argv [])
{
    //  Arguments can be either of:
    //      -p  primary server, at tcp://localhost:5001
    //      -b  backup server, at tcp://localhost:5002
    zctx_t *ctx = zctx_new ();
    void *statepub = zsocket_new (ctx, ZMQ_PUB);
    void *statesub = zsocket_new (ctx, ZMQ_SUB);
    zsockopt_set_subscribe (statesub, "");
    void *frontend = zsocket_new (ctx, ZMQ_ROUTER);
    bstar_t fsm = { 0 };

    if (argc == 2 && streq (argv [1], "-p")) {
        printf ("I: Primary master, waiting for backup (slave)\n");
        zsocket_bind (frontend, "tcp://*:5001");
        zsocket_bind (statepub, "tcp://*:5003");
        zsocket_connect (statesub, "tcp://localhost:5004");
        fsm.state = STATE_PRIMARY;
    }
    else
    if (argc == 2 && streq (argv [1], "-b")) {
        printf ("I: Backup slave, waiting for primary (master)\n");
        zsocket_bind (frontend, "tcp://*:5002");
        zsocket_bind (statepub, "tcp://*:5004");
        zsocket_connect (statesub, "tcp://localhost:5003");
        fsm.state = STATE_BACKUP;
    }
    else {
        printf ("Usage: bstarsrv { -p | -b }\n");
        zctx_destroy (&ctx);
        exit (0);
    }
    //  Set timer for next outgoing state message
    int64_t send_state_at = zclock_time () + HEARTBEAT;

    while (!zctx_interrupted) {
        zmq_pollitem_t items [] = {
            { frontend, 0, ZMQ_POLLIN, 0 },
            { statesub, 0, ZMQ_POLLIN, 0 }
        };
        int time_left = (int) ((send_state_at - zclock_time ()));
        if (time_left < 0)
            time_left = 0;
        int rc = zmq_poll (items, 2, time_left * ZMQ_POLL_MSEC);
        if (rc == -1)
            break;              //  Context has been shut down

        if (items [0].revents & ZMQ_POLLIN) {
            //  Have a client request
            zmsg_t *msg = zmsg_recv (frontend);
            fsm.event = CLIENT_REQUEST;
            if (s_state_machine (&fsm) == FALSE)
                //  Answer client by echoing request back
                zmsg_send (&msg, frontend);
            else
                zmsg_destroy (&msg);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Have state from our peer, execute as event
            char *message = zstr_recv (statesub);
            fsm.event = atoi (message);
            free (message);
            if (s_state_machine (&fsm))
                break;          //  Error, so exit
            fsm.peer_expiry = zclock_time () + 2 * HEARTBEAT;
        }
        //  If we timed-out, send state to peer
        if (zclock_time () >= send_state_at) {
            char message [2];
            sprintf (message, "%d", fsm.state);
            zstr_send (statepub, message);
            send_state_at = zclock_time () + HEARTBEAT;
        }
    }
    if (zctx_interrupted)
        printf ("W: interrupted\n");

    //  Shutdown sockets and context
    zctx_destroy (&ctx);
    return 0;
}
示例#28
0
static void
node_actor (zsock_t *pipe, void *args)
{
    zyre_t *node = zyre_new (NULL);
    if (!node)
        return;                 //  Could not create new node
    zyre_set_verbose (node);

    zyre_set_endpoint (node, "inproc://%s", (char *) args);
    free (args);

    //  Connect to test hub
    zyre_gossip_connect (node, "inproc://zyre-hub");
    zyre_start (node);
    zsock_signal (pipe, 0);     //  Signal "ready" to caller

    int counter = 0;
    char *to_peer = NULL;        //  Either of these set,
    char *to_group = NULL;       //    and we set a message
    char *cookie = NULL;

    zpoller_t *poller = zpoller_new (pipe, zyre_socket (node), NULL);
    int64_t trigger = zclock_time () + 1000;
    while (true) {
        void *which = zpoller_wait (poller, randof (1000));
        if (!which)
            break;              //  Interrupted

        //  $TERM from parent means exit; anything else is breach of
        //  contract so we should assert
        if (which == pipe) {
            char *command = zstr_recv (pipe);
            assert (streq (command, "$TERM"));
            zstr_free (&command);
            break;              //  Finished
        }
        //  Process an event from node
        if (which == zyre_socket (node)) {
            zmsg_t *incoming = zyre_recv (node);
            if (!incoming)
                break;          //  Interrupted

            char *event = zmsg_popstr (incoming);
            char *peer = zmsg_popstr (incoming);
            char *name = zmsg_popstr (incoming);
            if (streq (event, "ENTER"))
                //  Always say hello to new peer
                to_peer = strdup (peer);
            else
            if (streq (event, "EXIT"))
                //  Always try talk to departed peer
                to_peer = strdup (peer);
            else
            if (streq (event, "WHISPER")) {
                //  Send back response 1/2 the time
                if (randof (2) == 0) {
                    to_peer = strdup (peer);
                    cookie = zmsg_popstr (incoming);
                }
            }
            else
            if (streq (event, "SHOUT")) {
                to_peer = strdup (peer);
                to_group = zmsg_popstr (incoming);
                cookie = zmsg_popstr (incoming);
                //  Send peer response 1/3rd the time
                if (randof (3) > 0) {
                    free (to_peer);
                    to_peer = NULL;
                }
                //  Send group response 1/3rd the time
                if (randof (3) > 0) {
                    free (to_group);
                    to_group = NULL;
                }
            }
            else
            if (streq (event, "JOIN")) {
                char *group = zmsg_popstr (incoming);
                printf ("I: %s joined %s\n", name, group);
                free (group);
            }
            else
            if (streq (event, "LEAVE")) {
                char *group = zmsg_popstr (incoming);
                printf ("I: %s left %s\n", name, group);
                free (group);
            }
            free (event);
            free (peer);
            free (name);
            zmsg_destroy (&incoming);

            //  Send outgoing messages if needed
            if (to_peer) {
                zyre_whispers (node, to_peer, "%d", counter++);
                free (to_peer);
                to_peer = NULL;
            }
            if (to_group) {
                zyre_shouts (node, to_group, "%d", counter++);
                free (to_group);
                to_group = NULL;
            }
            if (cookie) {
                free (cookie);
                cookie = NULL;
            }
        }
        if (zclock_time () >= trigger) {
            trigger = zclock_time () + 1000;
            char group [10];
            sprintf (group, "GROUP%03d", randof (MAX_GROUP));
            if (randof (4) == 0)
                zyre_join (node, group);
            else
            if (randof (3) == 0)
                zyre_leave (node, group);
        }
    }
    zpoller_destroy (&poller);
    zyre_destroy (&node);
}
示例#29
0
int main(void){
  zctx_t *ctx=zctx_new();
  void *worker=_worker_socket(ctx);

  size_t liveness=HEARTBEAT_LIVENESS;
  size_t interval=INTERVAL_INIT;

  uint64_t heartbeat_at=zclock_time()+HEARTBEAT_INTERVAL;

  srandom((unsigned)time(NULL));
  int cycles=0;
  while (true){
    zmq_pollitem_t items[]={{worker, 0, ZMQ_POLLIN, 0}};
    int rc=zmq_poll(items, 1, HEARTBEAT_INTERVAL*ZMQ_POLL_MSEC);
    if (rc==-1){
      break;
    }

    if (items[0].revents & ZMQ_POLLIN){
      zmsg_t *msg=zmsg_recv(worker);
      if (!msg){
        break;
      }

      if (zmsg_size(msg)==3){
        ++cycles;
        if (cycles>3 && randof(5)==0){
          debug_log("I: simulating a crash\n");
          zmsg_destroy(&msg);
          break;
        } else if (cycles>3 && randof(5)==0){
          debug_log("I: simulating CPU overload\n");
          sleep(3);
          if (zctx_interrupted){
            break;
          }
        } else{
          debug_log("I: normal reply\n");
          zmsg_send(&msg, worker);
          sleep(1);
          if (zctx_interrupted){
            break;
          }
        }
      } else if (zmsg_size(msg)==1){
        zframe_t *frame=zmsg_first(msg);
        if (memcmp(zframe_data(frame), PPP_HEARTBEAT, 1)==0){
          liveness=HEARTBEAT_LIVENESS;
        } else{
          debug_log(ERROR_COLOR"E: inval message\n"NORMAL_COLOR);
          zmsg_dump(msg);
        }
        zmsg_destroy(&msg);
      } else{
        debug_log(ERROR_COLOR"E: invalid message\n"NORMAL_COLOR);
        zmsg_dump(msg);
      }
      interval=INTERVAL_INIT;
    } else if (--liveness==0){
      debug_log(WARN_COLOR"W: heartbeat failure, can't reach queue\n"
                NORMAL_COLOR);
      debug_log(WARN_COLOR"W: reconnecting in %zd msec"STR_ELLIPSIS"\n"
                NORMAL_COLOR, interval);
      zclock_sleep(interval);

      if (interval<INTERVAL_MAX){
        interval*=2;
      }

      zsocket_destroy(ctx, worker);
      worker=_worker_socket(ctx);
      liveness=HEARTBEAT_LIVENESS;
    }

    if (zclock_time()>heartbeat_at){
      heartbeat_at=zclock_time()+HEARTBEAT_INTERVAL;
      debug_log("I: worker heartbeat\n");
      zframe_t *frame=zframe_new(PPP_HEARTBEAT, 1);
      zframe_send(&frame, worker, 0);
    }
  }
  
  zctx_destroy(&ctx);
  return 0;
}
示例#30
0
int
zloop_start (zloop_t *self)
{
    assert (self);
    int rc = 0;

    //  Recalculate all timers now
    s_timer_t *timer = (s_timer_t *) zlist_first (self->timers);
    while (timer) {
        timer->when = timer->delay + zclock_time ();
        timer = (s_timer_t *) zlist_next (self->timers);
    }
    //  Main reactor loop
    while (!zsys_interrupted) {
        if (self->need_rebuild) {
            //  If s_rebuild_pollset() fails, break out of the loop and
            //  return its error
            rc = s_rebuild_pollset (self);
            if (rc)
                break;
        }
        rc = zmq_poll (self->pollset, (int) self->poll_size,
                       s_tickless_timer (self) * ZMQ_POLL_MSEC);
        if (rc == -1 || zsys_interrupted) {
            if (self->verbose)
                zsys_debug ("zloop: interrupted (%d) - %s", rc,
                            zmq_strerror (zmq_errno ()));
            rc = 0;
            break;              //  Context has been shut down
        }
        //  Handle any timers that have now expired
        timer = (s_timer_t *) zlist_first (self->timers);
        while (timer) {
            if (zclock_time () >= timer->when && timer->when != -1) {
                if (self->verbose)
                    zsys_debug ("zloop: call timer id=%d handler", timer->timer_id);
                rc = timer->handler (self, timer->timer_id, timer->arg);
                if (rc == -1)
                    break;      //  Timer handler signaled break
                if (timer->times && --timer->times == 0) {
                    zlist_remove (self->timers, timer);
                    free (timer);
                }
                else
                    timer->when = timer->delay + zclock_time ();
            }
            timer = (s_timer_t *) zlist_next (self->timers);
        }
        //  Handle any readers and pollers that are ready
        size_t item_nbr;
        for (item_nbr = 0; item_nbr < self->poll_size && rc >= 0; item_nbr++) {
            s_reader_t *reader = &self->readact [item_nbr];
            if (reader->handler) {
                if ((self->pollset [item_nbr].revents & ZMQ_POLLERR)
                && !reader->tolerant) {
                    if (self->verbose)
                        zsys_warning ("zloop: can't read %s socket: %s",
                            zsock_type_str (reader->sock),
                            zmq_strerror (zmq_errno ()));
                    //  Give handler one chance to handle error, then kill
                    //  reader because it'll disrupt the reactor otherwise.
                    if (reader->errors++) {
                        zloop_reader_end (self, reader->sock);
                        self->pollset [item_nbr].revents = 0;
                    }
                }
                else
                    reader->errors = 0;     //  A non-error happened

                if (self->pollset [item_nbr].revents) {
                    if (self->verbose)
                        zsys_debug ("zloop: call %s socket handler",
                            zsock_type_str (reader->sock));
                    rc = reader->handler (self, reader->sock, reader->arg);
                    if (rc == -1 || self->need_rebuild)
                        break;
                }
            }
            else {
                s_poller_t *poller = &self->pollact [item_nbr];
                assert (self->pollset [item_nbr].socket == poller->item.socket);
            
                if ((self->pollset [item_nbr].revents & ZMQ_POLLERR)
                && !poller->tolerant) {
                    if (self->verbose)
                        zsys_warning ("zloop: can't poll %s socket (%p, %d): %s",
                            poller->item.socket?
                                zsocket_type_str (poller->item.socket): "FD",
                            poller->item.socket, poller->item.fd,
                            zmq_strerror (zmq_errno ()));
                    //  Give handler one chance to handle error, then kill
                    //  poller because it'll disrupt the reactor otherwise.
                    if (poller->errors++) {
                        zloop_poller_end (self, &poller->item);
                        self->pollset [item_nbr].revents = 0;
                    }
                }
                else
                    poller->errors = 0;     //  A non-error happened

                if (self->pollset [item_nbr].revents) {
                    if (self->verbose)
                        zsys_debug ("zloop: call %s socket handler (%p, %d)",
                            poller->item.socket?
                                zsocket_type_str (poller->item.socket): "FD",
                            poller->item.socket, poller->item.fd);
                    rc = poller->handler (self, &self->pollset [item_nbr], poller->arg);
                    if (rc == -1 || self->need_rebuild)
                        break;
                }
            }
        }
        //  Now handle any timer zombies
        //  This is going to be slow if we have many timers; we might use
        //  a faster lookup on the timer list.
        while (zlist_size (self->zombies)) {
            //  Get timer_id back from pointer
            int timer_id = (byte *) zlist_pop (self->zombies) - (byte *) NULL;
            s_timer_remove (self, timer_id);
        }
        if (rc == -1)
            break;
    }
    self->terminated = true;
    return rc;
}