static service_t * s_service_require (broker_t *self, zframe_t *service_frame) { assert (service_frame); char *name = zframe_strdup (service_frame); service_t *service = (service_t *) zhash_lookup (self->services, name); if (service == NULL) { service = (service_t *) zmalloc (sizeof (service_t)); service->broker = self; service->name = name; service->requests = zlist_new (); service->waiting = zlist_new (); service->blacklist = zlist_new (); zhash_insert (self->services, name, service); zhash_freefn (self->services, name, s_service_destroy); if (self->verbose) zclock_log ("I: added service: %s", name); } else free (name); return service; }
broker_t * broker_new (const char *contexts_uri, const char *executors_uri) { broker_t *self = (broker_t *) malloc (sizeof (broker_t)); assert (self); printf ("[BROKER] binding to frontend %s\n", contexts_uri); self->contexts = zsock_new_router (contexts_uri); assert(self->contexts); printf ("[BROKER] binding to backend %s\n", executors_uri); self->executors = zsock_new_router (executors_uri); assert (self->executors); zsock_set_router_mandatory (self->executors, true); // Only poll on executors until we have executors available. self->poller = zpoller_new (self->executors, NULL); assert (self->poller); self->executor_lb = zlist_new (); assert (self->executor_lb); self->backlog = zlist_new (); assert (self->backlog); return self; }
zloop_t * zloop_new (void) { zloop_t *self; self = (zloop_t *) zmalloc (sizeof (zloop_t)); if (self) { self->readers = zlist_new (); self->pollers = zlist_new (); self->timers = zlist_new (); self->zombies = zlist_new (); self->last_timer_id = 0; if (!self->readers || !self->pollers || !self->timers || !self->zombies) { zlist_destroy (&self->readers); zlist_destroy (&self->pollers); zlist_destroy (&self->timers); zlist_destroy (&self->zombies); free (self); return NULL; } } return self; }
static VALUE rb_czmq_ctx_socket(VALUE obj, VALUE type) { VALUE socket; int socket_type; struct nogvl_socket_args args; zmq_sock_wrapper *sock = NULL; errno = 0; ZmqGetContext(obj); if (TYPE(type) != T_FIXNUM && TYPE(type) != T_SYMBOL) rb_raise(rb_eTypeError, "wrong socket type %s (expected Fixnum or Symbol)", RSTRING_PTR(rb_obj_as_string(type))); socket_type = FIX2INT((SYMBOL_P(type)) ? rb_const_get_at(rb_mZmq, rb_to_id(type)) : type); socket = Data_Make_Struct(rb_czmq_ctx_socket_klass(socket_type), zmq_sock_wrapper, rb_czmq_mark_sock, rb_czmq_free_sock_gc, sock); args.ctx = ctx->ctx; args.type = socket_type; sock->socket = (void*)rb_thread_blocking_region(rb_czmq_nogvl_socket_new, (void *)&args, RUBY_UBF_IO, 0); ZmqAssertObjOnAlloc(sock->socket, sock); #ifndef HAVE_RB_THREAD_BLOCKING_REGION sock->str_buffer = zlist_new(); sock->frame_buffer = zlist_new(); sock->msg_buffer = zlist_new(); #endif sock->handler = Qnil; sock->flags = 0; sock->ctx = ctx->ctx; sock->verbose = FALSE; sock->state = ZMQ_SOCKET_PENDING; sock->endpoint = Qnil; sock->thread = rb_thread_current(); sock->recv_timeout = ZMQ_SOCKET_DEFAULT_TIMEOUT; sock->send_timeout = ZMQ_SOCKET_DEFAULT_TIMEOUT; rb_obj_call_init(socket, 0, NULL); return socket; }
static struct dispatch *dispatch_get (flux_t h) { struct dispatch *d = flux_aux_get (h, "flux::dispatch"); if (!d) { flux_reactor_t *r = flux_get_reactor (h); if (!(d = malloc (sizeof (*d)))) goto nomem; memset (d, 0, sizeof (*d)); d->usecount = 1; if (!(d->handlers = zlist_new ())) goto nomem; if (!(d->handlers_new = zlist_new ())) goto nomem; d->h = h; d->w = flux_handle_watcher_create (r, h, FLUX_POLLIN, handle_cb, d); if (!d->w) goto nomem; fastpath_init (&d->norm); fastpath_init (&d->group); flux_aux_set (h, "flux::dispatch", d, dispatch_destroy); } return d; nomem: dispatch_destroy (d); errno = ENOMEM; return NULL; }
static zsock_t* subscriber_sub_socket_new(subscriber_state_t *state) { zsock_t *socket = zsock_new(ZMQ_SUB); assert(socket); zsock_set_rcvhwm(socket, state->rcv_hwm); // set subscription if (!state->subscriptions || zlist_size(state->subscriptions) == 0) { if (!state->subscriptions) state->subscriptions = zlist_new(); zlist_append(state->subscriptions, zconfig_resolve(state->config, "/logjam/subscription", "")); } char *subscription = zlist_first(state->subscriptions); bool subscribed_to_all = false; while (subscription) { printf("[I] subscriber: subscribing to '%s'\n", subscription); if (streq(subscription, "")) subscribed_to_all = true; zsock_set_subscribe(socket, subscription); subscription = zlist_next(state->subscriptions); } if (!subscribed_to_all) zsock_set_subscribe(socket, "heartbeat"); if (!state->devices || zlist_size(state->devices) == 0) { // convert config file to list of devices if (!state->devices) state->devices = zlist_new(); zconfig_t *endpoints = zconfig_locate(state->config, "/logjam/endpoints"); if (!endpoints) { zlist_append(state->devices, "tcp://localhost:9606"); } else { zconfig_t *endpoint = zconfig_child(endpoints); while (endpoint) { char *spec = zconfig_value(endpoint); char *new_spec = augment_zmq_connection_spec(spec, 9606); zlist_append(state->devices, new_spec); endpoint = zconfig_next(endpoint); } } } char* device = zlist_first(state->devices); while (device) { printf("[I] subscriber: connecting SUB socket to logjam-device via %s\n", device); int rc = zsock_connect(socket, "%s", device); log_zmq_error(rc, __FILE__, __LINE__); assert(rc == 0); device = zlist_next(state->devices); } return socket; }
static engine_t * s_engine_new (char *broker, char *service, int verbose) { engine_t *self = (engine_t *) zmalloc (sizeof *self); // Initialize engine state self->worker = mdp_worker_new (NULL, broker, service, verbose); self->sell_orders = zlist_new (); self->buy_orders = zlist_new (); return self; }
static vocket_t * vocket_new (driver_t *driver, int socktype, char *vtxname) { assert (driver); vocket_t *self = (vocket_t *) zmalloc (sizeof (vocket_t)); self->driver = driver; self->vtxname = strdup (vtxname); self->binding_hash = zhash_new (); self->peering_hash = zhash_new (); self->peering_list = zlist_new (); self->live_peerings = zlist_new (); self->socktype = socktype; uint index; for (index = 0; index < tblsize (s_vocket_config); index++) if (socktype == s_vocket_config [index].socktype) break; if (index < tblsize (s_vocket_config)) { self->routing = s_vocket_config [index].routing; self->nomnom = s_vocket_config [index].nomnom; self->min_peerings = s_vocket_config [index].min_peerings; self->max_peerings = s_vocket_config [index].max_peerings; } else { zclock_log ("E: invalid vocket type %d", socktype); exit (1); } // Create msgpipe vocket and connect over inproc to vtxname self->msgpipe = zsocket_new (driver->ctx, ZMQ_PAIR); assert (self->msgpipe); zsocket_connect (self->msgpipe, "inproc://%s", vtxname); // If we drop on no peerings, start routing input now if (self->min_peerings == 0) { // Ask reactor to start monitoring vocket's msgpipe pipe zmq_pollitem_t item = { self->msgpipe, 0, ZMQ_POLLIN, 0 }; zloop_poller (driver->loop, &item, s_vocket_input, self); } // Store this vocket per driver so that driver can cleanly destroy // all its vockets when it is destroyed. zlist_push (driver->vockets, self); //* Start transport-specific work self->inbuf_max = VTX_TCP_INBUF_MAX; self->outbuf_max = VTX_TCP_OUTBUF_MAX; //* End transport-specific work return self; }
static client_t * client_create (ctx_t *ctx, int fd) { client_t *c; socklen_t crlen = sizeof (c->ucred); flux_t h = ctx->h; c = xzmalloc (sizeof (*c)); c->fd = fd; if (!(c->uuid = zuuid_new ())) oom (); c->ctx = ctx; if (!(c->disconnect_notify = zhash_new ())) oom (); if (!(c->subscriptions = zlist_new ())) oom (); if (!(c->outqueue = zlist_new ())) oom (); if (getsockopt (fd, SOL_SOCKET, SO_PEERCRED, &c->ucred, &crlen) < 0) { flux_log (h, LOG_ERR, "getsockopt SO_PEERCRED: %s", strerror (errno)); goto error; } assert (crlen == sizeof (c->ucred)); /* Deny connections by uid other than session owner for now. */ if (c->ucred.uid != ctx->session_owner) { flux_log (h, LOG_ERR, "connect by uid=%d pid=%d denied", c->ucred.uid, (int)c->ucred.pid); goto error; } c->inw = flux_fd_watcher_create (fd, FLUX_POLLIN, client_read_cb, c); c->outw = flux_fd_watcher_create (fd, FLUX_POLLOUT, client_write_cb, c); if (!c->inw || !c->outw) { flux_log (h, LOG_ERR, "flux_fd_watcher_create: %s", strerror (errno)); goto error; } flux_fd_watcher_start (h, c->inw); flux_msg_iobuf_init (&c->inbuf); flux_msg_iobuf_init (&c->outbuf); if (set_nonblock (c->fd, true) < 0) { flux_log (h, LOG_ERR, "set_nonblock: %s", strerror (errno)); goto error; } return (c); error: client_destroy (c); return NULL; }
static ctx_t *getctx (flux_t h) { int saved_errno; ctx_t *ctx = (ctx_t *)flux_aux_get (h, "req"); if (!ctx) { ctx = xzmalloc (sizeof (*ctx)); ctx->h = h; ctx->ping_requests = zhash_new (); ctx->clog_requests = zlist_new (); if (!ctx->clog_requests || !ctx->ping_requests) { saved_errno = ENOMEM; goto error; } if (flux_get_rank (h, &ctx->rank) < 0) { saved_errno = errno; goto error; } flux_aux_set (h, "req", ctx, freectx); } return ctx; error: freectx (ctx); errno = saved_errno; return NULL; }
int sched_loop_setup (flux_t h) { curr_reservation_depth = 0; if (!completion_times) completion_times = zlist_new (); return 0; }
zctx_t * zctx_new (void) { zctx_t *self; self = (zctx_t *) zmalloc (sizeof (zctx_t)); if (!self) return NULL; self->sockets = zlist_new (); if (!self->sockets) { free (self); return NULL; } self->iothreads = 1; self->main = TRUE; self->hwm = 1; #if defined (__UNIX__) // Install signal handler for SIGINT and SIGTERM struct sigaction action; action.sa_handler = s_signal_handler; action.sa_flags = 0; sigemptyset (&action.sa_mask); sigaction (SIGINT, &action, NULL); sigaction (SIGTERM, &action, NULL); #endif return self; }
zlist_t * get_update (uint64_t from_state) { printf("[ST] GET_UPDATE\n"); zlist_t *filemeta_list = zlist_new (); DIR *dir; struct dirent *ent; if ((dir = opendir ("./syncfolder")) != NULL) { /* print all the files and directories within directory */ while ((ent = readdir (dir)) != NULL) { if (strcmp (ent->d_name, ".") != 0 && strcmp (ent->d_name, "..") != 0) { struct stat st; stat(ent->d_name, &st); zs_fmetadata_t *fmetadata = zs_fmetadata_new (); zs_fmetadata_set_path (fmetadata, "%s", ent->d_name); zs_fmetadata_set_size (fmetadata, st.st_size); zs_fmetadata_set_operation (fmetadata, ZS_FILE_OP_UPD); zs_fmetadata_set_timestamp (fmetadata, st.st_ctime); zs_fmetadata_set_checksum (fmetadata, 0x3312AFFDE12); zlist_append(filemeta_list, fmetadata); } } closedir (dir); } if (zlist_size (filemeta_list) > 0) { return filemeta_list; } else { return NULL; } }
kdt_point_t * kdt_point_new (void) { kdt_point_t *self = (kdt_point_t *) zmalloc (sizeof (kdt_point_t)); assert (self); self->list = zlist_new (); return self; }
sprk_block_t * sprk_block_new (sprk_descriptor_t *descriptor, zlist_t *applied_transforms) { sprk_block_t *self = (sprk_block_t *) zmalloc (sizeof (sprk_block_t)); assert (self); if (!applied_transforms) { applied_transforms = zlist_new(); } self->descriptor = descriptor; self->applied_transforms = applied_transforms; self->queued_transforms = zlist_new (); return self; }
int wait_destroy_match (waitqueue_t *q, wait_compare_f cb, void *arg) { zlist_t *tmp = NULL; wait_t *w; int rc = -1; int count = 0; assert (q->magic == WAITQUEUE_MAGIC); w = zlist_first (q->q); while (w) { if (w->hand.msg && cb != NULL && cb (w->hand.msg, arg)) { if (!tmp && !(tmp = zlist_new ())) oom (); if (zlist_append (tmp, w) < 0) oom (); w->hand.cb = NULL; // prevent wait_runone from restarting handler count++; } w = zlist_next (q->q); } if (tmp) { while ((w = zlist_pop (tmp))) { zlist_remove (q->q, w); if (--w->usecount == 0) wait_destroy (w, NULL); } } rc = count; zlist_destroy (&tmp); return rc; }
flux_kvsitr_t *flux_kvsitr_create (const flux_kvsdir_t *dir) { flux_kvsitr_t *itr = NULL; const char *key; json_t *dirdata, *value; if (!dir) { errno = EINVAL; goto error; } if (!(itr = calloc (1, sizeof (*itr)))) goto error; if (!(itr->keys = zlist_new ())) goto error; dirdata = treeobj_get_data (dir->dirobj); json_object_foreach (dirdata, key, value) { if (zlist_push (itr->keys, (char *)key) < 0) goto error; } zlist_sort (itr->keys, sort_cmp); itr->reset = true; return itr; error: flux_kvsitr_destroy (itr); return NULL; }
static subscriber_state_t* subscriber_state_new(zsock_t *pipe, zconfig_t* config, zlist_t *devices) { // figure out devices specs if (devices == NULL) devices = zlist_new(); if (zlist_size(devices) == 0) { zlist_destroy(&devices); devices = extract_devices_from_config(config); } if (zlist_size(devices) == 0) zlist_append(devices, augment_zmq_connection_spec("localhost", sub_port)); //create the state subscriber_state_t *state = zmalloc(sizeof(*state)); state->pipe = pipe; state->devices = devices; state->sub_socket = subscriber_sub_socket_new(config, state->devices); state->tracker = device_tracker_new(devices, state->sub_socket); state->pull_socket = subscriber_pull_socket_new(config); state->router_socket = subscriber_router_socket_new(config); state->push_socket = subscriber_push_socket_new(config); if (PUBLISH_DUPLICATES) state->pub_socket = subscriber_pub_socket_new(config); return state; }
void send_frames_at_server(zhash_t *frames, void *worker, enum SEND_TYPE type, int n, ...) { char *key; va_list valist; int i; va_start(valist, n); zlist_t *names = zlist_new(); for(i=0; i < n; i++ ) { key = va_arg(valist, char *); zframe_t *frame = (zframe_t *)zhash_lookup(frames, key); assert(frame!=NULL); assert(zframe_is(frame)); zlist_append(names, key); if( i == n-1 && type==SEND_FINAL) { zframe_send(&frame, worker, ZFRAME_REUSE); } else zframe_send(&frame, worker, ZFRAME_REUSE + ZFRAME_MORE); } va_end(valist); if(DEBUG_MODE) print_out_hash_in_order(frames, names); zlist_purge(names); zlist_destroy(&names); }
zctx_t * zctx_new (void) { zctx_t *self = (zctx_t *) zmalloc (sizeof (zctx_t)); if (!self) return NULL; self->sockets = zlist_new (); self->mutex = zmutex_new (); if (!self->sockets || !self->mutex) { zctx_destroy (&self); return NULL; } self->iothreads = 1; self->pipehwm = 1000; self->sndhwm = 1000; self->rcvhwm = 1000; // Catch SIGINT and SIGTERM unless ZSYS_SIGHANDLER=false if ( getenv ("ZSYS_SIGHANDLER") == NULL || strneq (getenv ("ZSYS_SIGHANDLER"), "false")) zsys_catch_interrupts (); return self; }
zlist_t * kvs_jobid_list (plugin_ctx_t *p) { zlist_t *zl = NULL; json_object_iter i; json_object *val = NULL; json_object *reply = NULL; json_object *request = util_json_object_new_object (); json_object_object_add (request, "lwj.id-list", NULL); reply = flux_rpc (p, request, "kvs.get.dir"); if (!reply) { err ("kvs_job_list: plugin request failed!"); goto done; } if (!(val = json_object_object_get (reply, "lwj.ids"))) { errno = ENOENT; err ("kvs_job_list: %s", strerror (errno)); goto done; } if (!(zl = zlist_new ())) { errno = ENOMEM; goto done; } json_object_object_foreachC (val, i) { json_object *co; if ((co = json_object_object_get (i.val, "FILEVAL"))) { zlist_append (zl, strdup (json_object_to_json_string_ext (co, JSON_C_TO_STRING_PLAIN))); } }
zctx_t * zctx_new (void) { zctx_t *self; self = (zctx_t *) zmalloc (sizeof (zctx_t)); if (!self) return NULL; self->sockets = zlist_new (); self->mutex = zmutex_new (); if (!self->sockets || !self->mutex) { zlist_destroy (&self->sockets); zmutex_destroy (&self->mutex); free (self); return NULL; } self->iothreads = 1; self->pipehwm = 1000; self->sndhwm = 1000; self->rcvhwm = 1000; zsys_handler_set (s_signal_handler); return self; }
/* timer adds zmsgs to zlist, then stops reactor after 100. */ void test_ev_zlist (void) { struct ev_loop *loop; ev_zlist list_w; ev_timer timer_w; zlist_t *l; zmsg_t *zmsg; ok ((loop = ev_loop_new (EVFLAG_AUTO)) != NULL, "ev_loop_new works"); if (!(l = zlist_new ()) || !(zmsg = zmsg_new ()) || zlist_append (l, zmsg) < 0) oom (); ev_zlist_init (&list_w, list_cb, l, EV_READ); ev_timer_init (&timer_w, list_timer_cb, 1E-3, 1E-3); timer_w.data = l; ev_zlist_start (loop, &list_w); ev_timer_start (loop, &timer_w); ok (ev_run (loop, 0) != 0 && zlist_size (l) == 0, "ev_zlist handler ran 100 times"); ev_zlist_stop (loop, &list_w); ev_timer_stop (loop, &timer_w); if (l) zlist_destroy (&l); ev_loop_destroy (loop); }
zctx_t * zctx_shadow (zctx_t *ctx) { zctx_t *self; // Shares same 0MQ context but has its own list of sockets so that // we create, use, and destroy sockets only within a single thread. self = (zctx_t *) zmalloc (sizeof (zctx_t)); if (!self) return NULL; self->sockets = zlist_new (); self->mutex = zmutex_new (); if (!self->sockets || !self->mutex) { zlist_destroy (&self->sockets); zmutex_destroy (&self->mutex); free (self); return NULL; } self->context = ctx->context; self->pipehwm = ctx->pipehwm; self->sndhwm = ctx->sndhwm; self->rcvhwm = ctx->rcvhwm; self->linger = ctx->linger; self->shadow = true; // This is a shadow context return self; }
static int attr_list_rpc (attr_ctx_t *ctx) { flux_future_t *f; json_t *array, *value; size_t index; int rc = -1; if (!(f = flux_rpc (ctx->h, "attr.list", NULL, FLUX_NODEID_ANY, 0))) goto done; if (flux_rpc_get_unpack (f, "{s:o}", "names", &array) < 0) goto done; zlist_destroy (&ctx->names); if (!(ctx->names = zlist_new ())) goto done; json_array_foreach (array, index, value) { const char *name = json_string_value (value); if (!name) { errno = EPROTO; goto done; } if (zlist_append (ctx->names, strdup (name)) < 0) { errno = ENOMEM; goto done; } } zlist_sort (ctx->names, (zlist_compare_fn *)attr_strcmp); rc = 0; done: flux_future_destroy (f); return rc; }
JNIEXPORT jlong JNICALL Java_org_zeromq_czmq_Zlist__1_1new (JNIEnv *env, jclass c) { // Disable CZMQ signal handling; allow Java to deal with it zsys_handler_set (NULL); jlong new_ = (jlong) (intptr_t) zlist_new (); return new_; }
static service_t * s_service_require(server_t *self, const char *service_name) { char *name = strdup(service_name); service_t *service = (service_t *) zhash_lookup(self->services, name); if (service == NULL) { service = (service_t *) zmalloc(sizeof(service_t)); service->broker = self; service->name = name; service->requests = zlist_new(); service->waiting = zlist_new(); zhash_insert(self->services, name, service); zhash_freefn(self->services, name, s_service_destroy); } else zstr_free(&name); return service; }
static int defer_enqueue (zlist_t **l, flux_msg_t *msg) { if ((!*l && !(*l = zlist_new ())) || zlist_append (*l, msg) < 0) { errno = ENOMEM; return -1; } return 0; }
waitqueue_t *wait_queue_create (void) { waitqueue_t *q = xzmalloc (sizeof (*q)); if (!(q->q = zlist_new ())) oom (); q->magic = WAITQUEUE_MAGIC; return q; }
int main (void) { zctx_t *ctx = zctx_new (); void *frontend = zsocket_new (ctx, ZMQ_ROUTER); void *backend = zsocket_new (ctx, ZMQ_ROUTER); zsocket_bind (frontend, "tcp://*:5555"); // For clients zsocket_bind (backend, "tcp://*:5556"); // For workers // Queue of available workers zlist_t *workers = zlist_new (); // The body of this example is exactly the same as lruqueue2. // .skip while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Use worker address for LRU routing zmsg_t *msg = zmsg_recv (backend); if (!msg) break; // Interrupted zframe_t *address = zmsg_unwrap (msg); zlist_append (workers, address); // Forward message to client if it's not a READY zframe_t *frame = zmsg_first (msg); if (memcmp (zframe_data (frame), LRU_READY, 1) == 0) zmsg_destroy (&msg); else zmsg_send (&msg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Get client request, route to first available worker zmsg_t *msg = zmsg_recv (frontend); if (msg) { zmsg_wrap (msg, (zframe_t *) zlist_pop (workers)); zmsg_send (&msg, backend); } } } // When we're done, clean up properly while (zlist_size (workers)) { zframe_t *frame = (zframe_t *) zlist_pop (workers); zframe_destroy (&frame); } zlist_destroy (&workers); zctx_destroy (&ctx); return 0; // .until }