void rblf_mtrack(void * mem) { unsigned long i; void * newmt; if (mtrack == NULL) { mtrack = calloc(MT_EXTEND,sizeof(char *)); if (!mtrack) oom(); mtend = MT_EXTEND; } FIND_EMPTY_SLOT: for (i=0;i<mtend;i++) { if (!mtrack[i]) { mtrack[i] = mem; return; } } /* no more room in array, extend */ newmt = realloc(mtrack,mtend + (MT_EXTEND)); if (!newmt) oom(); bzero(newmt + (MT_EXTEND),MT_EXTEND); mtrack = newmt; mtend += MT_EXTEND; goto FIND_EMPTY_SLOT; }
int epollAddEvent(epoller *epoll, int fd, int mask, eventProc *proc, void *privdata) { if (fd >= MAX_EVENT_CNT) { oom("epollAddEvent() : fd max limited"); return -1; } epollEvent *ev = &epoll->events[fd]; struct epoll_event ee; /* If the fd was already monitored for some event, we need a MOD * operation. Otherwise we need an ADD operation. */ int op = ev->mask == 0 ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; ee.events = 0; ee.events = mask | ev->mask; /* Merge old events */ ee.data.u64 = 0; /* avoid valgrind warning */ ee.data.fd = fd; //printf("epoll add %d : 0x%x\n", fd, mask); if (epoll_ctl(epoll->epfd, op, fd, &ee) == -1) { oom("epollAddEvent() : epoll_ctl error"); return -1; } ev->mask |= mask; if (mask & EPOLLIN) ev->readProc = proc; if (mask & EPOLLOUT) ev->writeProc = proc; ev->privdata = privdata; if (fd > epoll->maxfd) epoll->maxfd = fd; return 0; }
echoServerContext *createTcpServer(epoller *epoll, int port) { int s; struct sockaddr_in sa; if ((s = createSocket()) == -1) { oom("socket creation error : %s", strerror(errno)); return NULL; } memset(&sa, 0, sizeof(sa)); sa.sin_family = AF_INET; sa.sin_port = htons(port); sa.sin_addr.s_addr = htonl(INADDR_ANY); if (bind(s, (struct sockaddr*)&sa, sizeof(sa)) == -1) { close(s); oom("bind error : %s", strerror(errno)); return NULL; } if (listen(s, 511) == -1) { /* the magic 511 constant is from nginx */ close(s); oom("listen error : %s", strerror(errno)); return NULL; } echoServerContext *server = (echoServerContext *)malloc(sizeof(echoServerContext)); memset(server, 0, sizeof(echoServerContext)); server->port = port; server->epoll = epoll; server->serverfd = s; return server; }
void callgraph_add(char *dst, long src_pc) { struct cg_pclist *pcl; struct callgraphe *cge; /* Create cg_pclist structure. */ pcl = malloc(sizeof(struct cg_pclist)); if ( pcl == NULL ) oom(); pcl->pc = src_pc; /* Search for dst's pc_list */ cge = callgraph; while ( cge != NULL ) { /* Exit if found. */ if ( !strcmp(cge->symbol, dst) ) break; cge = cge->next; } /* If not found, create. */ if ( cge == NULL ) { cge = malloc(sizeof(struct callgraphe)); if ( cge == NULL ) oom(); cge->symbol = dst; cge->pclist = NULL; cge->next = callgraph; callgraph = cge; } /* Add pc to cge's pclist */ pcl->next = cge->pclist; cge->pclist = pcl; }
int wait_destroy_match (waitqueue_t *q, wait_compare_f cb, void *arg) { zlist_t *tmp = NULL; wait_t *w; int rc = -1; int count = 0; assert (q->magic == WAITQUEUE_MAGIC); w = zlist_first (q->q); while (w) { if (w->hand.msg && cb != NULL && cb (w->hand.msg, arg)) { if (!tmp && !(tmp = zlist_new ())) oom (); if (zlist_append (tmp, w) < 0) oom (); w->hand.cb = NULL; // prevent wait_runone from restarting handler count++; } w = zlist_next (q->q); } if (tmp) { while ((w = zlist_pop (tmp))) { zlist_remove (q->q, w); if (--w->usecount == 0) wait_destroy (w, NULL); } } rc = count; zlist_destroy (&tmp); return rc; }
void testoom(int mempolicy, int lite, int retcode, int allow_sigkill) { int ksm_run_orig; set_global_mempolicy(mempolicy); tst_res(TINFO, "start normal OOM testing."); oom(NORMAL, lite, retcode, allow_sigkill); tst_res(TINFO, "start OOM testing for mlocked pages."); oom(MLOCK, lite, retcode, allow_sigkill); /* * Skip oom(KSM) if lite == 1, since limit_in_bytes may vary from * run to run, which isn't reliable for oom03 cgroup test. */ if (access(PATH_KSM, F_OK) == -1 || lite == 1) { tst_res(TINFO, "KSM is not configed or lite == 1, " "skip OOM test for KSM pags"); } else { tst_res(TINFO, "start OOM testing for KSM pages."); SAFE_FILE_SCANF(PATH_KSM "run", "%d", &ksm_run_orig); SAFE_FILE_PRINTF(PATH_KSM "run", "1"); oom(KSM, lite, retcode, allow_sigkill); SAFE_FILE_PRINTF(PATH_KSM "run", "%d", ksm_run_orig); } }
void send_czmq (char *buf, int len) { zctx_t *zctx; void *zs; zmsg_t *zmsg; if (!(zctx = zctx_new ())) log_err_exit ("C: zctx_new"); if (lopt) /* zctx linger default = 0 (flush none) */ zctx_set_linger (zctx, linger); if (!(zs = zsocket_new (zctx, ZMQ_DEALER))) log_err_exit ("C: zsocket_new"); //if (lopt) // doesn't work here // zsocket_set_linger (zs, linger); if (iopt) zsocket_set_immediate (zs, imm); //zsocket_set_sndhwm (zs, 0); /* unlimited */ if (zsocket_connect (zs, "%s", uri) < 0) log_err_exit ("C: zsocket_connect"); if (!(zmsg = zmsg_new ())) oom (); if (zmsg_pushmem (zmsg, buf, bufsize) < 0) oom (); if (zmsg_send (&zmsg, zs) < 0) log_err_exit ("C: zmsg_send"); if (sleep_usec > 0) usleep (sleep_usec); zctx_destroy (&zctx); }
/* * This routine is used to map in a page into an address space: needed by * execve() for the initial stack and environment pages. */ unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address) { pgd_t * pgd; pmd_t * pmd; pte_t * pte; if (page >= high_memory) printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address); if (mem_map[MAP_NR(page)].count != 1) printk("mem_map disagrees with %08lx at %08lx\n",page,address); pgd = pgd_offset(tsk->mm,address); pmd = pmd_alloc(pgd, address); if (!pmd) { free_page(page); oom(tsk); return 0; } pte = pte_alloc(pmd, address); if (!pte) { free_page(page); oom(tsk); return 0; } if (!pte_none(*pte)) { printk("put_dirty_page: page already exists\n"); free_page(page); return 0; } flush_page_to_ram(page); set_pte(pte, pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY)))); /* no need for invalidate */ return page; }
int main(int argc, char *argv[]) { int lc; tst_parse_opts(argc, argv, NULL, NULL); #if __WORDSIZE == 32 tst_brkm(TCONF, NULL, "test is not designed for 32-bit system."); #endif setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { tst_count = 0; /* we expect mmap to fail before OOM is hit */ set_sys_tune("overcommit_memory", 2, 1); oom(OVERCOMMIT, 0, ENOMEM, 0); /* with overcommit_memory set to 0 or 1 there's no * guarantee that mmap fails before OOM */ set_sys_tune("overcommit_memory", 0, 1); oom(OVERCOMMIT, 0, ENOMEM, 1); set_sys_tune("overcommit_memory", 1, 1); testoom(0, 0, ENOMEM, 1); } cleanup(); tst_exit(); }
/* N.B. services is hardwired to test1,test2,testN, where N is the local * broker rank. This is a specific setup for the flux-module test. This * base component does not perform message routing to its extension modules. */ static json_t *module_list (void) { json_t *mods; zlist_t *keys; module_t *m; char *name; char rankstr[16]; int n; if (!(mods = json_array ())) oom (); if (!(keys = zhash_keys (modules))) oom (); name = zlist_first (keys); n = snprintf (rankstr, sizeof (rankstr), "rank%d", (int)rank); assert (n < sizeof (rankstr)); while (name) { json_t *o; m = zhash_lookup (modules, name); if (!(o = json_pack ("{s:s s:i s:s s:i s:i s:[s,s,s]}", "name", m->name, "size", m->size, "digest", m->digest, "idle", m->idle, "status", m->status, "services", "test1", "test2", rankstr))) oom (); if (json_array_append_new (mods, o) < 0) oom (); name = zlist_next (keys); } zlist_destroy (&keys); return mods; }
//! Returns a dictionar of models: methods int db_load_models(PyObject **py_models) { /*! * Returns a dictionary of models and their methods. * * @py_models Pointer to dictionary * @return 0 on success, -1 on error * */ struct dirent *dp; DIR *dir = opendir(config_dir_models); iks *xml; *py_models = PyDict_New(); // Iterate over all files under models directory while ((dp = readdir(dir)) != NULL) { if (dp->d_name[0] == '.') { continue; } // Load XML int size = strlen(config_dir_models) + 1 + strlen(dp->d_name) + 1; char *fn_xml = malloc(size); if (fn_xml == NULL) oom(); snprintf(fn_xml, size, "%s/%s", config_dir_models, dp->d_name); fn_xml[size - 1] = 0; switch (iks_load(fn_xml, &xml)) { case IKS_NOMEM: free(fn_xml); oom(); case IKS_FILE_RWERR: case IKS_FILE_NOACCESS: log_error("Unable to open XML: %s\n", fn_xml); closedir(dir); free(fn_xml); return -1; } // Validate XML if (db_validate_model(xml, fn_xml) != 0) { closedir(dir); iks_delete(xml); free(fn_xml); return -1; } // Load model db_load_model(xml, py_models); } closedir(dir); return 0; }
static int backlog_append (flux_msg_handler_t *w, flux_msg_t **msg) { if (!w->backlog && !(w->backlog = zlist_new ())) oom (); if (zlist_append (w->backlog, *msg) < 0) oom (); *msg = NULL; return 0; }
/* This test is to make sure that deferred responses are handled in order. * Arrange for module to source 10K sequenced responses. Messages 5000-5499 * are "put back" on the handle using flux_putmsg(). We ensure that * the 10K messages are nonetheless received in order. */ void test_putmsg (flux_t *h, uint32_t nodeid) { flux_future_t *f; const char *json_str; const int count = 10000; const int defer_start = 5000; const int defer_count = 500; json_object *in = Jnew (); json_object *out = NULL; int seq, myseq = 0; zlist_t *defer = zlist_new (); bool popped = false; flux_msg_t *z; if (!defer) oom (); Jadd_int (in, "count", count); if (!(f = flux_rpc (h, "req.nsrc", Jtostr (in), FLUX_NODEID_ANY, FLUX_RPC_NORESPONSE))) log_err_exit ("%s", __FUNCTION__); flux_future_destroy (f); do { flux_msg_t *msg = flux_recv (h, FLUX_MATCH_ANY, 0); if (!msg) log_err_exit ("%s", __FUNCTION__); if (flux_response_decode (msg, NULL, &json_str) < 0) log_msg_exit ("%s: decode", __FUNCTION__); if (!json_str || !(out = Jfromstr (json_str)) || !Jget_int (out, "seq", &seq)) log_msg_exit ("%s: decode - payload", __FUNCTION__); Jput (out); if (seq >= defer_start && seq < defer_start + defer_count && !popped) { if (zlist_append (defer, msg) < 0) oom (); if (seq == defer_start + defer_count - 1) { while ((z = zlist_pop (defer))) { if (flux_requeue (h, z, FLUX_RQ_TAIL) < 0) log_err_exit ("%s: flux_requeue", __FUNCTION__); flux_msg_destroy (z); } popped = true; } continue; } if (seq != myseq) log_msg_exit ("%s: expected %d got %d", __FUNCTION__, myseq, seq); myseq++; flux_msg_destroy (msg); } while (myseq < count); zlist_destroy (&defer); Jput (in); }
static void newgroup(tarjan *t) { uint32_t *group = malloc(sizeof(*group)); if (!group) oom(t); // Push group and empty group leader (we'll fill in leader later). if (!upb_inttable_push(&t->groups, upb_value_ptr(group)) || !upb_inttable_push(&t->groups, upb_value_ptr(NULL))) { free(group); oom(t); } *group = 0; }
int open_file(const char* prefix, const char* filename) { static str fullname; if (!str_truncate(&fullname, 0)) oom(); if (prefix != 0) { if (!str_copys(&fullname, prefix)) oom(); if (!str_catc(&fullname, '/')) oom(); } if (!str_cats(&fullname, filename)) oom(); return open(fullname.s, O_RDONLY); }
static bool nodeset_op_string (nodeset_t *n, op_t op, const char *str) { char *cpy; int len; char *p, *s, *saveptr, *a1; uint32_t lo, hi; int count = 0; len = strlen (str); if (str[0] == '[' && str[len - 1] == ']') { /* hostlist compat */ if (!(cpy = strdup (str + 1))) oom (); cpy[len - 2] = '\0'; } else if (!(cpy = strdup (str))) oom (); a1 = cpy; while ((s = strtok_r (a1, ",", &saveptr))) { if ((p = strchr (s, '-'))) { *p = '\0'; if (!str2rank (s, &lo) || !str2rank (p + 1, &hi)) break; if (op == OP_DEL) { nodeset_delete_range (n, hi, lo); } else if (op == OP_ADD) { if (!nodeset_add_range (n, hi, lo)) break; } else if (op == OP_TEST) { if (!nodeset_test_range (n, hi, lo)) break; } } else { if (!str2rank (s, &lo)) break; if (op == OP_DEL) { nodeset_delete_rank (n, lo); } else if (op == OP_ADD) { if (!nodeset_add_rank (n, lo)) break; } else if (op == OP_TEST) { if (!nodeset_test_rank (n, lo)) break; } } a1 = NULL; count++; } free (cpy); if (s || (count == 0 && strlen (str) > 0)) return false; return true; }
/* This test is to make sure that deferred responses are handled in order. * Arrange for module to source 10K sequenced responses. Messages 5000-5499 * are "put back" on the handle using flux_putmsg(). We ensure that * the 10K messages are nonetheless received in order. */ void test_putmsg (flux_t *h, uint32_t nodeid) { flux_future_t *f; const char *json_str; const int count = 10000; const int defer_start = 5000; const int defer_count = 500; int seq, myseq = 0; zlist_t *defer = zlist_new (); bool popped = false; flux_msg_t *z; json_t *o; if (!defer) oom (); if (!(f = flux_rpc_pack (h, "req.nsrc", FLUX_NODEID_ANY, FLUX_RPC_NORESPONSE, "{s:i}", "count", count))) log_err_exit ("%s", __FUNCTION__); flux_future_destroy (f); do { flux_msg_t *msg = flux_recv (h, FLUX_MATCH_ANY, 0); if (!msg) log_err_exit ("%s", __FUNCTION__); if (flux_response_decode (msg, NULL, &json_str) < 0) log_msg_exit ("%s: decode", __FUNCTION__); if (!json_str || !(o = json_loads (json_str, 0, NULL)) || json_unpack (o, "{s:i}", "seq", &seq) < 0) log_msg_exit ("%s: decode - payload", __FUNCTION__); json_decref (o); if (seq >= defer_start && seq < defer_start + defer_count && !popped) { if (zlist_append (defer, msg) < 0) oom (); if (seq == defer_start + defer_count - 1) { while ((z = zlist_pop (defer))) { if (flux_requeue (h, z, FLUX_RQ_TAIL) < 0) log_err_exit ("%s: flux_requeue", __FUNCTION__); flux_msg_destroy (z); } popped = true; } continue; } if (seq != myseq) log_msg_exit ("%s: expected %d got %d", __FUNCTION__, myseq, seq); myseq++; flux_msg_destroy (msg); } while (myseq < count); zlist_destroy (&defer); }
static ctx_t *getctx (flux_t h) { ctx_t *ctx = (ctx_t *)flux_aux_get (h, "req"); if (!ctx) { ctx = xzmalloc (sizeof (*ctx)); ctx->h = h; if (!(ctx->ping_requests = zhash_new ())) oom (); if (!(ctx->clog_requests = zlist_new ())) oom (); flux_aux_set (h, "req", ctx, freectx); } return ctx; }
static jscctx_t *getctx (flux_t h) { jscctx_t *ctx = (jscctx_t *)flux_aux_get (h, "jstatctrl"); if (!ctx) { ctx = xzmalloc (sizeof (*ctx)); if (!(ctx->active_jobs = zhash_new ())) oom (); if (!(ctx->callbacks = zlist_new ())) oom (); ctx->first_time = 1; ctx->h = h; flux_aux_set (h, "jstatctrl", ctx, freectx); } return ctx; }
static client_t * client_create (ctx_t *ctx, int fd) { client_t *c; socklen_t crlen = sizeof (c->ucred); flux_t h = ctx->h; c = xzmalloc (sizeof (*c)); c->fd = fd; if (!(c->uuid = zuuid_new ())) oom (); c->ctx = ctx; if (!(c->disconnect_notify = zhash_new ())) oom (); if (!(c->subscriptions = zlist_new ())) oom (); if (!(c->outqueue = zlist_new ())) oom (); if (getsockopt (fd, SOL_SOCKET, SO_PEERCRED, &c->ucred, &crlen) < 0) { flux_log (h, LOG_ERR, "getsockopt SO_PEERCRED: %s", strerror (errno)); goto error; } assert (crlen == sizeof (c->ucred)); /* Deny connections by uid other than session owner for now. */ if (c->ucred.uid != ctx->session_owner) { flux_log (h, LOG_ERR, "connect by uid=%d pid=%d denied", c->ucred.uid, (int)c->ucred.pid); goto error; } c->inw = flux_fd_watcher_create (fd, FLUX_POLLIN, client_read_cb, c); c->outw = flux_fd_watcher_create (fd, FLUX_POLLOUT, client_write_cb, c); if (!c->inw || !c->outw) { flux_log (h, LOG_ERR, "flux_fd_watcher_create: %s", strerror (errno)); goto error; } flux_fd_watcher_start (h, c->inw); flux_msg_iobuf_init (&c->inbuf); flux_msg_iobuf_init (&c->outbuf); if (set_nonblock (c->fd, true) < 0) { flux_log (h, LOG_ERR, "set_nonblock: %s", strerror (errno)); goto error; } return (c); error: client_destroy (c); return NULL; }
/* timer adds zmsgs to zlist, then stops reactor after 100. */ void test_ev_zlist (void) { struct ev_loop *loop; ev_zlist list_w; ev_timer timer_w; zlist_t *l; zmsg_t *zmsg; ok ((loop = ev_loop_new (EVFLAG_AUTO)) != NULL, "ev_loop_new works"); if (!(l = zlist_new ()) || !(zmsg = zmsg_new ()) || zlist_append (l, zmsg) < 0) oom (); ev_zlist_init (&list_w, list_cb, l, EV_READ); ev_timer_init (&timer_w, list_timer_cb, 1E-3, 1E-3); timer_w.data = l; ev_zlist_start (loop, &list_w); ev_timer_start (loop, &timer_w); ok (ev_run (loop, 0) != 0 && zlist_size (l) == 0, "ev_zlist handler ran 100 times"); ev_zlist_stop (loop, &list_w); ev_timer_stop (loop, &timer_w); if (l) zlist_destroy (&l); ev_loop_destroy (loop); }
/* Return 'n' sequenced responses. */ static int nsrc_request_cb (flux_t h, int typemask, zmsg_t **zmsg, void *arg) { JSON o = Jnew (); int i, count; if (flux_json_request_decode (*zmsg, &o) < 0) { if (flux_err_respond (h, errno, zmsg) < 0) flux_log (h, LOG_ERR, "%s: flux_err_respond: %s", __FUNCTION__, strerror (errno)); goto done; } if (!Jget_int (o, "count", &count)) { if (flux_err_respond (h, EPROTO, zmsg) < 0) flux_log (h, LOG_ERR, "%s: flux_err_respond: %s", __FUNCTION__, strerror (errno)); goto done; } for (i = 0; i < count; i++) { zmsg_t *cpy = zmsg_dup (*zmsg); if (!cpy) oom (); Jadd_int (o, "seq", i); if (flux_json_respond (h, o, &cpy) < 0) flux_log (h, LOG_ERR, "%s: flux_json_respond: %s", __FUNCTION__, strerror (errno)); zmsg_destroy (&cpy); } zmsg_destroy (zmsg); done: Jput (o); return 0; }
bool nodeset_resize (nodeset_t *n, uint32_t size) { assert (n->magic == NS_MAGIC); uint32_t r; Veb T; if (size < veb_minsize) /* don't allow size below minimum */ size = veb_minsize; if (size < NS_SIZE (n)) { /* If shrinking, bump size up to */ r = NS_FIRST (n); /* fit highest rank in set. */ while (r < NS_SIZE (n)) { if (r >= size) size = r + 1; r = NS_NEXT (n, r); } } if (size != NS_SIZE (n)) { T = vebnew (size, 0); if (!T.D) oom (); r = NS_FIRST (n); while (r < NS_SIZE (n)) { vebput (T, r); r = NS_NEXT (n, r); } free (n->T.D); n->T = T; } return true; }
void *xrealloc(void *oldp, size_t sz) { void *p = realloc(oldp, sz); if (!p) oom(); return p; }
void epollDelEvent(epoller *epoll, int fd, int mask) { if (fd >= MAX_EVENT_CNT) { oom("epollDelEvent() : fd max limited"); return; } epollEvent *ev = &epoll->events[fd]; if (ev->mask == 0) { printf("event mask is zero : fd = %d\n", fd); return; } ev->mask = ev->mask & (~mask); if (fd == epoll->maxfd && ev->mask == 0) { /* Update the max fd */ int j; for (j = epoll->maxfd-1; j >= 0; j--) if (epoll->events[j].mask != 0) break; epoll->maxfd = j; } struct epoll_event ee; int op = (ev->mask == 0) ? EPOLL_CTL_DEL : EPOLL_CTL_MOD; ee.events = ev->mask; ee.data.u64 = 0; /* avoid valgrind warning */ ee.data.fd = fd; /* Note, Kernel < 2.6.9 requires a non null event pointer even for * EPOLL_CTL_DEL. */ //printf("epoll delete %d : op = %d, mask = 0x%x\n", fd, op, ee.events); epoll_ctl(epoll->epfd, op, fd, &ee); }
void list_insert(struct list_cursor *cur, void *item) { assert(cur); assert(cur->list); struct list_item *node = calloc(1, sizeof(*node)); if (!node) oom(); node->list = cur->list; node->data = item; assert(cur->list->length < UINT_MAX); ++cur->list->length; if (cur->target) { struct list_item *right = cur->target; struct list_item *left = right->prev; node->next = right; node->prev = left; right->prev = node; if (left) { left->next = node; } else { cur->list->head = node; } } else { struct list_item *tail = cur->list->tail; node->prev = tail; cur->list->tail = node; if (tail) { assert(tail->next == NULL); tail->next = node; } else { assert(!cur->list->head); cur->list->head = node; } } }
attr_t *attr_create (void) { attr_t *attrs = xzmalloc (sizeof (*attrs)); if (!(attrs->hash = zhash_new ())) oom (); return attrs; }
/* Received response message from broker. * Look up the sender uuid in clients hash and deliver. * Responses for disconnected clients are silently discarded. */ static void response_cb (flux_t h, flux_msg_watcher_t *w, const flux_msg_t *msg, void *arg) { ctx_t *ctx = arg; char *uuid = NULL; client_t *c; flux_msg_t *cpy = flux_msg_copy (msg, true); if (!cpy) oom (); if (flux_msg_pop_route (cpy, &uuid) < 0) goto done; if (flux_msg_clear_route (cpy) < 0) goto done; c = zlist_first (ctx->clients); while (c) { if (!strcmp (uuid, zuuid_str (c->uuid))) { if (client_send_nocopy (c, &cpy) < 0) { /* FIXME handle errors */ flux_log (h, LOG_ERR, "%s: client_send %s: %s", __FUNCTION__, zuuid_str (c->uuid), strerror (errno)); errno = 0; } break; } c = zlist_next (ctx->clients); } if (uuid) free (uuid); done: flux_msg_destroy (cpy); }
/* Accept a connection from new client. */ static void listener_cb (flux_t h, flux_fd_watcher_t *w, int fd, int revents, void *arg) { ctx_t *ctx = arg; if (revents & FLUX_POLLIN) { client_t *c; int cfd; if ((cfd = accept4 (fd, NULL, NULL, SOCK_CLOEXEC)) < 0) { flux_log (h, LOG_ERR, "accept: %s", strerror (errno)); goto done; } if (!(c = client_create (ctx, cfd))) { close (cfd); goto done; } if (zlist_append (ctx->clients, c) < 0) oom (); } if (revents & ZMQ_POLLERR) { flux_log (h, LOG_ERR, "poll listen fd: %s", strerror (errno)); } done: return; }
int kz_close (kz_t *kz) { int rc = -1; char *json_str = NULL; char *key = NULL; if ((kz->flags & KZ_FLAGS_WRITE)) { if (!(kz->flags & KZ_FLAGS_RAW)) { if (asprintf (&key, "%s.%.6d", kz->name, kz->seq++) < 0) oom (); if (!(json_str = zio_json_encode (NULL, 0, true))) { /* EOF */ errno = EPROTO; goto done; } if (kvs_put (kz->h, key, json_str) < 0) goto done; } if (!(kz->flags & KZ_FLAGS_NOCOMMIT_CLOSE)) { if (kvs_commit (kz->h) < 0) goto done; } if (kz->nprocs > 0 && kz->grpname) { if (kz_fence (kz) < 0) goto done; } } rc = 0; done: if (json_str) free (json_str); if (key) free (key); kz_destroy (kz); return rc; }