示例#1
0
static void dispatch_usecount_decr (struct dispatch *d)
{
    flux_msg_handler_t *w;
    if (d && --d->usecount == 0) {
        if (d->handlers) {
            while ((w = zlist_pop (d->handlers))) {
                assert (w->destroyed);
                free_msg_handler (w);
            }
            zlist_destroy (&d->handlers);
        }
        if (d->handlers_new) {
            while ((w = zlist_pop (d->handlers_new))) {
                assert (w->destroyed);
                free_msg_handler (w);
            }
            zlist_destroy (&d->handlers_new);
        }
        flux_watcher_destroy (d->w);
        fastpath_free (&d->norm);
        fastpath_free (&d->group);
        free (d);
    }
}
示例#2
0
void publisher_destroy (struct publisher *pub)
{
    if (pub) {
        int saved_errno = errno;
        flux_msg_handler_delvec (pub->handlers);
        if (pub->senders) {
            struct sender *sender;
            while ((sender = zlist_pop (pub->senders)))
                free (sender);
            zlist_destroy (&pub->senders);
        }
        free (pub);
        errno = saved_errno;
    }
}
示例#3
0
文件: zmsg.c 项目: bartuer/bew
void
zmsg_destroy (zmsg_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        zmsg_t *self = *self_p;
        while (zlist_size (self->frames) > 0) {
            zframe_t *frame = (zframe_t *) zlist_pop (self->frames);
            zframe_destroy (&frame);
        }
        zlist_destroy (&self->frames);
        free (self);
        *self_p = NULL;
    }
}
示例#4
0
文件: zmsg.c 项目: PSG-Luna/czmq
char *
zmsg_popstr (zmsg_t *self)
{
    assert (self);
    assert (zmsg_is (self));

    zframe_t *frame = (zframe_t *) zlist_pop (self->frames);
    char *string = NULL;
    if (frame) {
        self->content_size -= zframe_size (frame);
        string = zframe_strdup (frame);
        zframe_destroy (&frame);
    }
    return string;
}
示例#5
0
文件: zloop.c 项目: xekoukou/czmq
void
zloop_destroy (zloop_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        zloop_t *self = *self_p;

        //  Destroy list of pollers
        while (zlist_size (self->pollers))
            free (zlist_pop (self->pollers));
        zlist_destroy (&self->pollers);

        //  Destroy list of timers
        while (zlist_size (self->timers))
            free (zlist_pop (self->timers));
        zlist_destroy (&self->timers);
        zlist_destroy (&self->zombies);

        free (self->pollset);
        free (self->pollact);
        free (self);
        *self_p = NULL;
    }
}
示例#6
0
文件: zmsg.c 项目: dadavita/stalk
int
zmsg_send (zmsg_t **self_p, void *dest)
{
    assert (self_p);
    assert (dest);
    zmsg_t *self = *self_p;

    int rc = 0;
    void *handle = zsock_resolve (dest);
    if (self) {
        assert (zmsg_is (self));
        zframe_t *frame = (zframe_t *) zlist_pop (self->frames);
        while (frame) {
            rc = zframe_send (&frame, handle,
                              zlist_size (self->frames) ? ZFRAME_MORE : 0);
            if (rc != 0)
                break;
            frame = (zframe_t *) zlist_pop (self->frames);
        }
        if (rc == 0)
            zmsg_destroy (self_p);
    }
    return rc;
}
示例#7
0
文件: req.c 项目: cigolabs/flux-core
static void freectx (void *arg)
{
    ctx_t *ctx = arg;
    flux_msg_t *msg;

    if (ctx) {
        zhash_destroy (&ctx->ping_requests);
        if (ctx->clog_requests) {
            while ((msg = zlist_pop (ctx->clog_requests)))
                flux_msg_destroy (msg);
            zlist_destroy (&ctx->clog_requests);
        }
        free (ctx);
    }
}
示例#8
0
文件: req.c 项目: cigolabs/flux-core
/* Reply to all queued requests.
 */
void flush_request_cb (flux_t h, flux_msg_handler_t *w,
                       const flux_msg_t *msg, void *arg)
{
    ctx_t *ctx = getctx (h);
    flux_msg_t *req;

    while ((req = zlist_pop (ctx->clog_requests))) {
        /* send clog response */
        if (flux_respond (h, req, 0, NULL) < 0)
            flux_log_error (h, "%s: flux_respond", __FUNCTION__);
    }
    /* send flush response */
    if (flux_respond (h, msg, 0, NULL) < 0)
        flux_log_error (h, "%s: flux_respond", __FUNCTION__);
}
示例#9
0
// Handle input from client, on frontend
int s_handle_frontend(zloop_t *loop, zmq_pollitem_t *poller, void *arg)
{
	lbbroker_t *self = (lbbroker_t *)arg;
	zmsg_t *msg = zmsg_recv(self->frontend);
	if (msg) {
		zmsg_wrap(msg, (zframe_t *)zlist_pop(self->workers));
		zmsg_send(&msg, self->backend);

		// Cancel reader on frontend if we went from 1 to 0 workers
		if (zlist_size(self->workers) == 0) {
			zmq_pollitem_t poller = { self->frontend, 0, ZMQ_POLLIN };
			zloop_poller_end(loop, &poller);
		}
	}
	return 0;
}
示例#10
0
文件: zmsg.c 项目: dadavita/stalk
void
zmsg_destroy (zmsg_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        zmsg_t *self = *self_p;
        assert (zmsg_is (self));
        zframe_t *frame;
        while ((frame = (zframe_t *) zlist_pop (self->frames)))
            zframe_destroy (&frame);
        zlist_destroy (&self->frames);
        self->tag = 0xDeadBeef;
        free (self);
        *self_p = NULL;
    }
}
示例#11
0
// Take all of the scheduled job events that were queued up while we
// weren't running and add those jobs to the set of running jobs. This
// also requires switching their state in the kvs (to trigger events
// in the scheduler)
static int handle_queued_events (ctx_t *ctx)
{
    job_t *job = NULL;
    int *jobid = NULL;
    flux_kvsdir_t *kvs_dir;
    flux_t *h = ctx->h;
    zlist_t *queued_events = ctx->queued_events;
    zlist_t *running_jobs = ctx->running_jobs;
    double sim_time = ctx->sim_state->sim_time;

    while (zlist_size (queued_events) > 0) {
        jobid = zlist_pop (queued_events);
        if (!(kvs_dir = job_kvsdir (ctx->h, *jobid)))
            log_err_exit ("job_kvsdir (id=%d)", *jobid);
        job = pull_job_from_kvs (*jobid, kvs_dir);
        if (update_job_state (ctx, *jobid, kvs_dir, J_STARTING, sim_time) < 0) {
            flux_log (h,
                      LOG_ERR,
                      "failed to set job %d's state to starting",
                      *jobid);
            return -1;
        }
        if (update_job_state (ctx, *jobid, kvs_dir, J_RUNNING, sim_time) < 0) {
            flux_log (h,
                      LOG_ERR,
                      "failed to set job %d's state to running",
                      *jobid);
            return -1;
        }
        if (update_job_state (ctx, *jobid, kvs_dir, J_COMPLETING, sim_time) < 0) {
            flux_log (h,
                      LOG_ERR,
                      "failed to set job %d's state to completing",
                      *jobid);
            return -1;
        }
        flux_log (h,
                  LOG_INFO,
                  "job %d's state to starting then running",
                  *jobid);
        job->start_time = ctx->sim_state->sim_time;
        zlist_append (running_jobs, job);
    }

    return 0;
}
示例#12
0
/* Reply to all queued requests.
 */
static int flush_request_cb (flux_t h, int typemask, zmsg_t **zmsg, void *arg)
{
    ctx_t *ctx = getctx (h);
    zmsg_t *z;

    while ((z = zlist_pop (ctx->clog_requests))) {
        /* send clog response */
        if (flux_err_respond (h, 0, &z) < 0)
            flux_log (h, LOG_ERR, "%s: flux_err_respond: %s", __FUNCTION__,
                          strerror (errno));
    }
    /* send flush response */
    if (flux_err_respond (h, 0, zmsg) < 0)
        flux_log (h, LOG_ERR, "%s: flux_err_respond: %s", __FUNCTION__,
                      strerror (errno));
    return 0;
}
示例#13
0
文件: broker.c 项目: emef/sprk
// Send a message to the next available executor. Will attempt
// to send to all available executors until send succeeds, or
// returns -1 if all fail.
int
broker_send_to_executor (broker_t *self, zmsg_t *msg)
{
    int rc = -1;
    while (rc != 0 && zlist_size (self->executor_lb)) {
        zframe_t *executor_addr = (zframe_t *) zlist_pop (self->executor_lb);
        zmsg_prepend (msg, &executor_addr);

        // [executor] [context] [request]
        rc = zmsg_send (&msg, self->executors);
        if (rc != 0) {
            zframe_t *addr_discard = zmsg_pop (msg);
            zframe_destroy (&addr_discard);
        }
    }

    return rc;
}
示例#14
0
文件: vtx_tcp.c 项目: imatix/vtx
static void
driver_destroy (driver_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        driver_t *self = *self_p;
        if (self->verbose)
            zclock_log ("I: (tcp) shutting down driver");
        while (zlist_size (self->vockets)) {
            vocket_t *vocket = (vocket_t *) zlist_pop (self->vockets);
            vocket_destroy (&vocket);
        }
        zlist_destroy (&self->vockets);
        zloop_destroy (&self->loop);
        free (self);
        *self_p = NULL;
    }
}
示例#15
0
static int client_send_try (client_t *c)
{
    flux_msg_t *msg = zlist_head (c->outqueue);

    if (msg) {
        if (flux_msg_sendfd (c->fd, msg, &c->outbuf) < 0) {
            if (errno != EWOULDBLOCK && errno != EAGAIN)
                return -1;
            //flux_log (c->ctx->h, LOG_DEBUG, "send: client not ready");
            flux_fd_watcher_start (c->ctx->h, c->outw);
            errno = 0;
        } else {
            msg = zlist_pop (c->outqueue);
            flux_msg_destroy (msg);
        }
    }
    return 0;
}
示例#16
0
static void
client_destroy (client_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        client_t *self = *self_p;
        fmq_config_destroy (&self->config);
        fmq_msg_destroy (&self->request);
        fmq_msg_destroy (&self->reply);
        //  Destroy subscriptions                         
        while (zlist_size (self->subs)) {                 
            sub_t *sub = (sub_t *) zlist_pop (self->subs);
            sub_destroy (&sub);                           
        }                                                 
        zlist_destroy (&self->subs);                      
        free (self);
        *self_p = NULL;
    }
}
示例#17
0
void reduce (flux_reduce_t *r, int batchnum, void *arg)
{
    void *item;
    zlist_t *tmp = zlist_new ();

    if (!tmp)
        oom ();
    reduce_calls++;
    while ((item = flux_reduce_pop (r))) {
        if (zlist_push (tmp, item) < 0)
            oom ();
        reduce_items++;
    }
    while ((item = zlist_pop (tmp))) {
        if (flux_reduce_push (r, item) < 0)
            oom ();
    }
    zlist_destroy (&tmp);
}
示例#18
0
static void
client_destroy (client_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        client_t *self = *self_p;
        zframe_destroy (&self->address);
        fmq_msg_destroy (&self->request);
        fmq_msg_destroy (&self->reply);
        free (self->hashkey);
        while (zlist_size (self->patches)) {                               
            fmq_patch_t *patch = (fmq_patch_t *) zlist_pop (self->patches);
            fmq_patch_destroy (&patch);                                    
        }                                                                  
        zlist_destroy (&self->patches);                                    
        free (self);
        *self_p = NULL;
    }
}
示例#19
0
static void
server_destroy (server_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        server_t *self = *self_p;
        zsocket_destroy (self->ctx, self->router);
        zconfig_destroy (&self->config);
        zhash_destroy (&self->clients);
        //  Destroy mount points                                  
        while (zlist_size (self->mounts)) {                       
            mount_t *mount = (mount_t *) zlist_pop (self->mounts);
            mount_destroy (&mount);                               
        }                                                         
        zlist_destroy (&self->mounts);                            
        free (self);
        *self_p = NULL;
    }
}
示例#20
0
static void
mount_destroy (mount_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        mount_t *self = *self_p;
        free (self->location);
        free (self->alias);
        //  Destroy subscriptions
        while (zlist_size (self->subs)) {
            sub_t *sub = (sub_t *) zlist_pop (self->subs);
            sub_destroy (&sub);
        }
        zlist_destroy (&self->subs);
        fmq_dir_destroy (&self->dir);
        free (self);
        *self_p = NULL;
    }
}
示例#21
0
static void
mount_sub_store (mount_t *self, client_t *client, fmq_msg_t *request)
{
    assert (self);
    assert (self->subs);
    
    //  Store subscription along with any previous ones
    //  Coalesce subscriptions that are on same path
    char *path = fmq_msg_path (request);
    sub_t *sub = (sub_t *) zlist_first (self->subs);
    while (sub) {
        if (client == sub->client) {
            //  If old subscription is superset/same as new, ignore new
            if (strncmp (path, sub->path, strlen (sub->path)) == 0)
                return;
            else
            //  If new subscription is superset of old one, remove old
            if (strncmp (sub->path, path, strlen (path)) == 0) {
                zlist_remove (self->subs, sub);
                sub_destroy (&sub);
                sub = (sub_t *) zlist_first (self->subs);
            }
            else
                sub = (sub_t *) zlist_next (self->subs);
        }
        else
            sub = (sub_t *) zlist_next (self->subs);
    }
    //  New subscription for this client, append to our list
    sub = sub_new (client, path, fmq_msg_cache (request));
    zlist_append (self->subs, sub);

    //  If client requested resync, send full mount contents now
    if (fmq_msg_options_number (client->request, "RESYNC", 0) == 1) {
        zlist_t *patches = zdir_resync (self->dir, self->alias);
        while (zlist_size (patches)) {
            zdir_patch_t *patch = (zdir_patch_t *) zlist_pop (patches);
            sub_patch_add (sub, patch);
            zdir_patch_destroy (&patch);
        }
        zlist_destroy (&patches);
    }
}
示例#22
0
int main(void)
{
	zctx_t *ctx = zctx_new();
	lbbroker_t *self = (lbbroker_t *)zmalloc(sizeof(lbbroker_t));
	self->frontend = zsocket_new(ctx, ZMQ_ROUTER);
	self->backend = zsocket_new(ctx, ZMQ_ROUTER);

#if (defined (WIN32))
	zsocket_bind(self->frontend, "tcp://*:5672"); // frontend
	zsocket_bind(self->backend, "tcp://*:5673"); // backend
#else
	zsocket_bind(self->frontend, "ipc://frontend.ipc");
	zsocket_bind(self->backend, "ipc://backend.ipc");
#endif

	int client_nbr;
	for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
		zthread_new(client_task, NULL);
	int worker_nbr;
	for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
		zthread_new(worker_task, NULL);

	// Queue of available workers
	self->workers = zlist_new();

	// Prepare reactor and fire it up
	zloop_t *reactor = zloop_new();
	zmq_pollitem_t poller = { self->backend, 0, ZMQ_POLLIN };
	zloop_poller(reactor, &poller, s_handle_backend, self);
	zloop_start(reactor);
	zloop_destroy(&reactor);

	// When we're done, clean up properly
	while (zlist_size(self->workers)) {
		zframe_t *frame = (zframe_t *)zlist_pop(self->workers);
		zframe_destroy(&frame);
	}
	zlist_destroy(&self->workers);
	zctx_destroy(&ctx);
	free(self);
	return 0;
}
示例#23
0
static void
s_service_destroy (void *argument)
{
    service_t *service = (service_t *) argument;
    while (zlist_size (service->requests)) {
        zmsg_t *msg = (zmsg_t*)zlist_pop (service->requests);
        zmsg_destroy (&msg);
    }
    //  Free memory keeping  blacklisted commands.
    char *command = (char *) zlist_first (service->blacklist);
    while (command) {
        zlist_remove (service->blacklist, command);
        free (command);
    }
    zlist_destroy (&service->requests);
    zlist_destroy (&service->waiting);
    zlist_destroy (&service->blacklist);
    free (service->name);
    free (service);
}
示例#24
0
static void freectx (void *arg)
{
    ctx_t *ctx = arg;
    zlist_t *keys = zhash_keys (ctx->ping_requests);
    char *key = zlist_first (keys);
    while (key) {
        zmsg_t *zmsg = zhash_lookup (ctx->ping_requests, key);
        zmsg_destroy (&zmsg);
        key = zlist_next (keys);
    }
    zlist_destroy (&keys);
    zhash_destroy (&ctx->ping_requests);

    zmsg_t *zmsg;
    while ((zmsg = zlist_pop (ctx->clog_requests)))
        zmsg_destroy (&zmsg);
    zlist_destroy (&ctx->clog_requests);

    free (ctx);
}
示例#25
0
static int
s_new_master (zloop_t *loop, zmq_pollitem_t *unused, void *args)
{
    clonesrv_t *self = (clonesrv_t *) args;

    self->master = TRUE;
    self->slave = FALSE;
    zmq_pollitem_t poller = { self->subscriber, 0, ZMQ_POLLIN };
    zloop_poller_end (bstar_zloop (self->bstar), &poller);

    //  Apply pending list to own hash table
    while (zlist_size (self->pending)) {
        kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending);
        kvmsg_set_sequence (kvmsg, ++self->sequence);
        kvmsg_send (kvmsg, self->publisher);
        kvmsg_store (&kvmsg, self->kvmap);
        zclock_log ("I: publishing pending=%d", (int) self->sequence);
    }
    return 0;
}
示例#26
0
static int backlog_flush (flux_msg_handler_t *w)
{
    int errnum = 0;
    int rc = 0;

    if (w->backlog) {
        flux_msg_t *msg;
        while ((msg = zlist_pop (w->backlog))) {
            if (flux_requeue (w->d->h, msg, FLUX_RQ_TAIL) < 0) {
                if (errnum < errno) {
                    errnum = errno;
                    rc = -1;
                }
                flux_msg_destroy (msg);
            }
        }
    }
    if (errnum > 0)
        errno = errnum;
    return rc;
}
示例#27
0
static void
collect_data_to_send (client_t *self)
{
    zsys_info ("read %d bytes", (int) zpipes_msg_size (self->request));
    //  Do we have enough data to satisfy the read request?
    size_t required = zpipes_msg_size (self->request);
    
    //  If pipe was closed, we'll do a short read with as much
    //  data as we have pending
    if (required > self->pending && self->pipe == NULL)
        required = self->pending;

    if (self->pipe == NULL && self->pending == 0)
        engine_set_exception (self, pipe_shut_event);
    else
    if (self->pending >= required) {
        //  Create a bucket chunk with the required max size
        zchunk_t *bucket = zchunk_new (NULL, required);

        //  Now fill the bucket with chunks from our queue
        while (zchunk_size (bucket) < required) {
            //  Get next chunk and consume as much of it as possible
            zchunk_t *chunk = (zchunk_t *) zlist_pop (self->queue);
            assert (chunk);
            zchunk_consume (bucket, chunk);
            //  If chunk is exhausted, destroy it
            if (zchunk_exhausted (chunk))
                zchunk_destroy (&chunk);
            else {
                //  Push chunk back for next time
                zlist_push (self->queue, chunk);
                assert (zchunk_size (bucket) == required);
            }
        }
        zpipes_msg_set_chunk (self->reply, &bucket);
        self->pending -= required;
    }
    else
        engine_set_exception (self, not_enough_data_event);
}
示例#28
0
static void
client_destroy (client_t **self_p)
{
    assert (self_p);
    if (*self_p) {
        client_t *self = *self_p;
        fmq_config_destroy (&self->config);
        int server_nbr;
        for (server_nbr = 0; server_nbr < self->nbr_servers; server_nbr++) {
            server_t *server = self->servers [server_nbr];
            server_destroy (&server);
        }
        //  Destroy subscriptions                         
        while (zlist_size (self->subs)) {                 
            sub_t *sub = (sub_t *) zlist_pop (self->subs);
            sub_destroy (&sub);                           
        }                                                 
        zlist_destroy (&self->subs);                      
        free (self);
        *self_p = NULL;
    }
}
示例#29
0
文件: cache.c 项目: tpatki/flux-core
int cache_expire_entries (struct cache *cache, int current_epoch, int thresh)
{
    zlist_t *keys;
    char *ref;
    struct cache_entry *hp;
    int count = 0;

    if (!(keys = zhash_keys (cache->zh)))
        oom ();
    while ((ref = zlist_pop (keys))) {
        if ((hp = zhash_lookup (cache->zh, ref))
            && !cache_entry_get_dirty (hp)
            && cache_entry_get_valid (hp)
            && (thresh == 0 || cache_entry_age (hp, current_epoch) > thresh)) {
                zhash_delete (cache->zh, ref);
                count++;
        }
        free (ref);
    }
    zlist_destroy (&keys);
    return count;
}
示例#30
0
// Remove completed jobs from the list of running jobs
// Update sched timer as necessary (to trigger an event in sched)
// Also change the state of the job in the KVS
static int handle_completed_jobs (ctx_t *ctx)
{
    double curr_progress;
    zlist_t *running_jobs = ctx->running_jobs;
    job_t *job = NULL;
    int num_jobs = zlist_size (running_jobs);
    double sim_time = ctx->sim_state->sim_time;

    // print_next_completing (running_jobs, ctx);

    while (num_jobs > 0) {
        job = zlist_pop (running_jobs);
        if (job->execution_time > 0) {
            curr_progress = calc_curr_progress (job, ctx->sim_state->sim_time);
        } else {
            curr_progress = 1;
            flux_log (ctx->h,
                      LOG_DEBUG,
                      "handle_completed_jobs found a job (%d) with execution "
                      "time <= 0 (%f), setting progress = 1",
                      job->id,
                      job->execution_time);
        }
        if (curr_progress < 1) {
            zlist_append (running_jobs, job);
        } else {
            flux_log (ctx->h,
                      LOG_DEBUG,
                      "handle_completed_jobs found a completed job");
            complete_job (ctx, job, sim_time);
        }
        num_jobs--;
    }

    return 0;
}