static zframe_t * s_workers_next (zlist_t *workers) { worker_t *worker = zlist_pop (workers); assert (worker); zframe_t *frame = worker->address; worker->address = NULL; s_worker_destroy (&worker); return frame; }
static void s_workers_purge (zlist_t *workers) { worker_t *worker = (worker_t *) zlist_first (workers); while (worker) { if (zclock_time () < worker->expiry) break; // Worker is alive, we're done here zlist_remove (workers, worker); s_worker_destroy (&worker); worker = (worker_t *) zlist_first (workers); } }
static void s_worker_ready (worker_t *self, zlist_t *workers) { worker_t *worker = (worker_t *) zlist_first (workers); while (worker) { if (streq (self->identity, worker->identity)) { zlist_remove (workers, worker); s_worker_destroy (&worker); break; } worker = (worker_t *) zlist_next (workers); } zlist_append (workers, self); }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "tcp://*:5555"); // For clients zsocket_bind (backend, "tcp://*:5556"); // For workers // List of available workers zlist_t *workers = zlist_new (); // Send out heartbeats at regular intervals uint64_t heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL; while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, HEARTBEAT_INTERVAL * ZMQ_POLL_MSEC); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker address for LRU routing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted // Any sign of life from worker means it's ready zframe_t *address = zmsg_unwrap (msg); worker_t *worker = s_worker_new (address); s_worker_ready (worker, workers); // Validate control message, or return reply to client if (zmsg_size (msg) == 1) { zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), PPP_READY, 1) && memcmp (zframe_data (frame), PPP_HEARTBEAT, 1)) { printf ("E: invalid message from worker"); zmsg_dump (msg); } zmsg_destroy (&msg); } else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *msg = zmsg_recv (frontend); if (!msg) break; // Interrupted zmsg_push (msg, s_workers_next (workers)); zmsg_send (&msg, backend); } // .split handle heartbeating // We handle heartbeating after any socket activity. First we send // heartbeats to any idle workers if it's time. Then we purge any // dead workers: if (zclock_time () >= heartbeat_at) { worker_t *worker = (worker_t *) zlist_first (workers); while (worker) { zframe_send (&worker->address, backend, ZFRAME_REUSE + ZFRAME_MORE); zframe_t *frame = zframe_new (PPP_HEARTBEAT, 1); zframe_send (&frame, backend, 0); worker = (worker_t *) zlist_next (workers); } heartbeat_at = zclock_time () + HEARTBEAT_INTERVAL; } s_workers_purge (workers); } // When we're done, clean up properly while (zlist_size (workers)) { worker_t *worker = (worker_t *) zlist_pop (workers); s_worker_destroy (&worker); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; }
static inline lagopus_result_t s_init_stage(lagopus_pipeline_stage_t *sptr, const char *name, bool is_heap_allocd, size_t n_workers, size_t event_size, size_t max_batch_size, lagopus_pipeline_stage_pre_pause_proc_t pre_pause_proc, lagopus_pipeline_stage_sched_proc_t sched_proc, lagopus_pipeline_stage_setup_proc_t setup_proc, lagopus_pipeline_stage_fetch_proc_t fetch_proc, lagopus_pipeline_stage_main_proc_t main_proc, lagopus_pipeline_stage_throw_proc_t throw_proc, lagopus_pipeline_stage_shutdown_proc_t shutdown_proc, lagopus_pipeline_stage_finalize_proc_t final_proc, lagopus_pipeline_stage_freeup_proc_t freeup_proc) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; /* * Note that receiving a pipeline stage as a reference * (lagopus_pipeline_stage_t *) IS VERY IMPOETANT in order to * create workers by calling the s_worker_create(). */ if (sptr != NULL && *sptr != NULL) { lagopus_pipeline_stage_t ps = *sptr; worker_main_proc_t proc = s_find_worker_proc(fetch_proc, main_proc, throw_proc); if (proc != NULL) { (void)memset((void *)ps, 0, DEFAULT_STAGE_ALLOC_SZ); if (((ret = lagopus_mutex_create(&(ps->m_lock))) == LAGOPUS_RESULT_OK) && ((ret = lagopus_mutex_create(&(ps->m_final_lock))) == LAGOPUS_RESULT_OK) && ((ret = lagopus_cond_create(&(ps->m_cond))) == LAGOPUS_RESULT_OK) && ((ret = lagopus_barrier_create(&(ps->m_pause_barrier), n_workers)) == LAGOPUS_RESULT_OK) && ((ret = lagopus_mutex_create(&(ps->m_pause_lock))) == LAGOPUS_RESULT_OK) && ((ret = lagopus_cond_create(&(ps->m_pause_cond))) == LAGOPUS_RESULT_OK) && ((ret = lagopus_cond_create(&(ps->m_resume_cond))) == LAGOPUS_RESULT_OK)) { if ((ps->m_name = strdup(name)) != NULL && (ps->m_workers = (lagopus_pipeline_worker_t *) malloc(sizeof(lagopus_pipeline_worker_t) * n_workers)) != NULL) { size_t i; ps->m_event_size = event_size; ps->m_max_batch = max_batch_size; ps->m_batch_buffer_size = event_size * max_batch_size; for (i = 0; i < n_workers && ret == LAGOPUS_RESULT_OK; i++) { ret = s_worker_create(&(ps->m_workers[i]), sptr, i, proc); } if (ret == LAGOPUS_RESULT_OK) { ps->m_pre_pause_proc = pre_pause_proc; ps->m_sched_proc = sched_proc; ps->m_setup_proc = setup_proc; ps->m_fetch_proc = fetch_proc; ps->m_main_proc = main_proc; ps->m_throw_proc = throw_proc; ps->m_shutdown_proc = shutdown_proc; ps->m_final_proc = final_proc; ps->m_freeup_proc = freeup_proc; ps->m_n_workers = n_workers; ps->m_is_heap_allocd = is_heap_allocd; ps->m_do_loop = false; ps->m_sg_lvl = SHUTDOWN_UNKNOWN; ps->m_status = STAGE_STATE_INITIALIZED; ps->m_n_canceled_workers = 0LL; ps->m_n_shutdown_workers = 0LL; ps->m_pause_requested = false; ps->m_maint_proc = NULL; ps->m_maint_arg = NULL; ps->m_post_start_proc = NULL; ps->m_post_start_arg = NULL; /* * finally. */ ret = LAGOPUS_RESULT_OK; } else { size_t n_created = i; for (i = 0; i < n_created; i++) { s_worker_destroy(&(ps->m_workers[i])); } } } else { free((void *)(ps->m_name)); ps->m_name = NULL; free((void *)(ps->m_workers)); ps->m_workers = NULL; ret = LAGOPUS_RESULT_NO_MEMORY; } } } else { ret = LAGOPUS_RESULT_INVALID_ARGS; } } else { ret = LAGOPUS_RESULT_INVALID_ARGS; } return ret; }
static inline void s_destroy_stage(lagopus_pipeline_stage_t ps, bool is_clean_finish) { if (ps != NULL) { s_lock_stage(ps); { if (is_clean_finish == true) { ps->m_status = STAGE_STATE_DESTROYING; s_notify_stage(ps); (void)s_cancel_stage(ps, ps->m_n_workers); (void)s_wait_stage(ps, ps->m_n_workers, -1LL, true); if (ps->m_n_workers > 0 && ps->m_workers != NULL) { size_t i; for (i = 0; i < ps->m_n_workers; i++) { s_worker_destroy(&(ps->m_workers[i])); } } if (ps->m_freeup_proc != NULL) { (ps->m_freeup_proc)(&ps); } } s_delete_stage(ps); free((void *)(ps->m_name)); free((void *)(ps->m_workers)); if (ps->m_cond != NULL) { lagopus_cond_destroy(&(ps->m_cond)); ps->m_cond = NULL; } if (ps->m_final_lock != NULL) { lagopus_mutex_destroy(&(ps->m_final_lock)); ps->m_final_lock = NULL; } if (ps->m_pause_barrier != NULL) { lagopus_barrier_destroy(&(ps->m_pause_barrier)); ps->m_pause_barrier = NULL; } if (ps->m_pause_lock != NULL) { lagopus_mutex_destroy(&(ps->m_pause_lock)); ps->m_pause_lock = NULL; } if (ps->m_pause_cond != NULL) { lagopus_cond_destroy(&(ps->m_pause_cond)); ps->m_pause_cond = NULL; } if (ps->m_resume_cond != NULL) { lagopus_cond_destroy(&(ps->m_resume_cond)); ps->m_resume_cond = NULL; } } s_unlock_stage(ps); if (ps->m_lock != NULL) { lagopus_mutex_destroy(&(ps->m_lock)); ps->m_lock = NULL; } if (ps->m_is_heap_allocd == true) { free((void *)ps); } } }
int main(void) { zctx_t *ctx = zctx_new(); void *frontend = zsocket_new(ctx, ZMQ_ROUTER); void *backend = zsocket_new(ctx, ZMQ_ROUTER); zsocket_bind(frontend, "tcp://127.0.0.1:5555"); zsocket_bind(backend, "tcp://127.0.0.1:5556"); zlist_t *workers = zlist_new(); uint64_t heartbeat_at = zclock_time() + HEARTBEAT_INTERVAL; while (true){ zmq_pollitem_t items [] ={ {backend, 0, ZMQ_POLLIN, 0}, {frontend, 0, ZMQ_POLLIN, 0} }; int rc = zmq_poll(items, zlist_size(worker)?2:1); if (rc == -1) break; if (items[0].revents & ZMQ_POLLIN){ zmsg_t *msg = zmsg_recv(backend); if (!msg) break; zframe_t *identity = zmsg_unwrap(msg); worker_t *worker = s_worker_new(identity); s_worker_ready(worker, workers); if (zmsg_size(msg) == 1){ zframe_t *frame = zmsg_first(msg); if (memcmp(zframe_data(frame), PPP_READY, 1) && memcmp(zframe_data(frame), PPP_HEARTBEAT, 1)){ printf("E: invalid message from worker"); zmsg_dump(msg); } zmsg_destroy(&msg); } else zmsg_send(&msg, frontend); } if (items[1].revents & ZMQ_POLLIN){ zmsg_t *msg = zmsg_recv(frontend); if (!msg) break; zframe_t *identity = s_workers_next(workers); zmsg_prepend(msg, &identity); zmsg_send(&msg, backend); } if (zclock_time() >= heartbeat_at){ worker_t *worker = (worker_t *)zlist_first(workers); while (worker){ zframe_send(&worker->identity, backend, ZFRAME_REUSE + ZFRAME_MORE); zframe_t *frame = zframe_new(PPP_HEARTBEAT, 1); zframe_send(&frame, backend, 0); worker = (worker_t *)zlist_next(workers); } heartbeat_at = zclock_time() + HEARTBEAT_INTERVAL; } s_workers_purge(workers); } while (zlist_size(workers)){ worker_t *worker = (worker_t *)zlist_pop(workers); s_worker_destroy(&worker); } zlist_destroy(&workers); zctx_destroy(&ctx); return 0; }