/* ARGSUSED */ void lde_dispatch_imsg(int fd, short event, void *bula) { struct imsgev *iev = bula; struct imsgbuf *ibuf = &iev->ibuf; struct imsg imsg; struct lde_nbr rn, *nbr; struct map map; struct timespec tp; struct in_addr addr; ssize_t n; time_t now; int state, shut = 0, verbose; if (event & EV_READ) { if ((n = imsg_read(ibuf)) == -1) fatal("imsg_read error"); if (n == 0) /* connection closed */ shut = 1; } if (event & EV_WRITE) { if (msgbuf_write(&ibuf->w) == -1) fatal("msgbuf_write"); } clock_gettime(CLOCK_MONOTONIC, &tp); now = tp.tv_sec; for (;;) { if ((n = imsg_get(ibuf, &imsg)) == -1) fatal("lde_dispatch_imsg: imsg_read error"); if (n == 0) break; switch (imsg.hdr.type) { case IMSG_LABEL_MAPPING_FULL: nbr = lde_nbr_find(imsg.hdr.peerid); if (nbr == NULL) { log_debug("lde_dispatch_imsg: cannot find " "lde neighbor"); return; } rt_snap(nbr); lde_imsg_compose_ldpe(IMSG_MAPPING_ADD_END, imsg.hdr.peerid, 0, NULL, 0); break; case IMSG_LABEL_MAPPING: case IMSG_LABEL_REQUEST: case IMSG_LABEL_RELEASE: case IMSG_LABEL_WITHDRAW: case IMSG_LABEL_ABORT: if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(map)) fatalx("invalid size of OE request"); memcpy(&map, imsg.data, sizeof(map)); nbr = lde_nbr_find(imsg.hdr.peerid); if (nbr == NULL) { log_debug("lde_dispatch_imsg: cannot find " "lde neighbor"); return; } switch (imsg.hdr.type) { case IMSG_LABEL_MAPPING: lde_check_mapping(&map, nbr); break; case IMSG_LABEL_REQUEST: lde_check_request(&map, nbr); break; case IMSG_LABEL_RELEASE: lde_check_release(&map, nbr); break; case IMSG_LABEL_WITHDRAW: lde_check_withdraw(&map, nbr); break; default: log_warnx("type %d not yet handled. nbr %s", imsg.hdr.type, inet_ntoa(nbr->id)); } break; case IMSG_ADDRESS_ADD: if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(addr)) fatalx("invalid size of OE request"); memcpy(&addr, imsg.data, sizeof(addr)); nbr = lde_nbr_find(imsg.hdr.peerid); if (nbr == NULL) { log_debug("lde_dispatch_imsg: cannot find " "lde neighbor"); return; } if (lde_address_add(nbr, &addr) < 0) { log_debug("lde_dispatch_imsg: cannot add " "address %s, it already exists", inet_ntoa(addr)); } break; case IMSG_ADDRESS_DEL: if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(addr)) fatalx("invalid size of OE request"); memcpy(&addr, imsg.data, sizeof(addr)); nbr = lde_nbr_find(imsg.hdr.peerid); if (nbr == NULL) { log_debug("lde_dispatch_imsg: cannot find " "lde neighbor"); return; } if (lde_address_del(nbr, &addr) < 0) { log_debug("lde_dispatch_imsg: cannot delete " "address %s, it does not exists", inet_ntoa(addr)); } break; case IMSG_NEIGHBOR_UP: if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) fatalx("invalid size of OE request"); memcpy(&rn, imsg.data, sizeof(rn)); if (lde_nbr_find(imsg.hdr.peerid)) fatalx("lde_dispatch_imsg: " "neighbor already exists"); lde_nbr_new(imsg.hdr.peerid, &rn); break; case IMSG_NEIGHBOR_DOWN: lde_nbr_del(lde_nbr_find(imsg.hdr.peerid)); break; case IMSG_NEIGHBOR_CHANGE: if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) fatalx("invalid size of OE request"); memcpy(&state, imsg.data, sizeof(state)); nbr = lde_nbr_find(imsg.hdr.peerid); if (nbr == NULL) break; nbr->state = state; break; case IMSG_CTL_SHOW_LIB: rt_dump(imsg.hdr.pid); imsg_compose_event(iev_ldpe, IMSG_CTL_END, 0, imsg.hdr.pid, -1, NULL, 0); break; case IMSG_CTL_LOG_VERBOSE: /* already checked by ldpe */ memcpy(&verbose, imsg.data, sizeof(verbose)); log_verbose(verbose); break; default: log_debug("lde_dispatch_imsg: unexpected imsg %d", imsg.hdr.type); break; } imsg_free(&imsg); } if (!shut) imsg_event_add(iev); else { /* this pipe is dead, so remove the event handler */ event_del(&iev->ev); event_loopexit(NULL); } }
struct timeval * run_once(void) { int *cp, i, space; static struct timeval ta, ts, te, tv; gettimeofday(&ta, NULL); // Set up event watches: for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { if (native) { #if NATIVE //if (ev_is_active (&evio [i])) // ev_io_stop (&evio [i]); ev_io_set (&io_blocks[i].io, cp [0], EV_READ); ev_io_start (EV_DEFAULT_UC_ &io_blocks[i].io); io_blocks[i].timer.repeat = 10. + drand48 (); ev_timer_again (EV_DEFAULT_UC_ &io_blocks[i].timer); #else abort (); #endif } else { event_set(&io_blocks[i].event, cp[0], EV_READ | EV_PERSIST, read_cb, &io_blocks[i]); if (set_prios) { event_priority_set(&io_blocks[i].event, drand48() * EV_MAXPRI); } if (timers) { tv.tv_sec = 10.; tv.tv_usec = drand48() * 1e6; } event_add(&io_blocks[i].event, timers ? &tv : 0); } } event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK); // Make the chosen number of descriptors active: fired = 0; space = num_pipes / num_active; space = space * 2; for (i = 0; i < num_active; i++, fired++) { write(pipes[i * space + 1], "e", 1); } count = 0; writes = num_writes - fired; { int xcount = 0; gettimeofday(&ts, NULL); do { event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK); xcount++; } while (count != num_writes); gettimeofday(&te, NULL); //if (xcount != count) fprintf(stderr, "Xcount: %d, Rcount: %d\n", xcount, count); } timersub(&te, &ta, &ta); timersub(&te, &ts, &ts); fprintf(stdout, "%8ld %8ld\n", ta.tv_sec * 1000000L + ta.tv_usec, ts.tv_sec * 1000000L + ts.tv_usec); cp = pipes; for (int j = 0; j < num_pipes; j++, cp += 2) { if (native) { #if NATIVE ev_io_stop(EV_DEFAULT_UC_ &io_blocks[j].io); #endif } else { event_del(&io_blocks[j].event); event_set(&io_blocks[j].event, cp[0], EV_READ | EV_PERSIST, read_cb, &io_blocks[j]); } } return (&te); }
static int __event_dispatch(void) { fd_set r, w; int nfd; struct event *ev; struct timeval now, timeout, t; FD_ZERO(&r); FD_ZERO(&w); nfd = 0; gettimeofday(&now, NULL); timeout.tv_sec = 10; /* arbitrary */ timeout.tv_usec = 0; TAILQ_INIT(¤t); /* * Build fd_set's */ event_log_debug("%s: building fd set...", __func__); while (!TAILQ_EMPTY(&pending)) { ev = TAILQ_FIRST(&pending); event_del(ev); if (ev->flags & EV_HAS_TIMEOUT) { if (tv_cmp(&now, &ev->expire) >= 0) t.tv_sec = t.tv_usec = 0; else { t = ev->expire; tv_sub(&t, &now); } if (tv_cmp(&t, &timeout) < 0) timeout = t; } if (ev->fd >= 0) { if (ev->flags & EV_READ) { FD_SET(ev->fd, &r); nfd = (nfd > ev->fd) ? nfd : ev->fd; } if (ev->flags & EV_WRITE) { FD_SET(ev->fd, &w); nfd = (nfd > ev->fd) ? nfd : ev->fd; } } __event_add_current(ev); } event_log_debug("%s: waiting for events...", __func__); nfd = select(nfd + 1, &r, &w, NULL, &timeout); if (nfd < 0) return (-1); /* * Process current pending */ event_log_debug("%s: processing events...", __func__); gettimeofday(&now, NULL); while (!TAILQ_EMPTY(¤t)) { ev = TAILQ_FIRST(¤t); __event_del_current(ev); /* check if fd is ready for reading/writing */ if (nfd > 0 && ev->fd >= 0) { if (FD_ISSET(ev->fd, &r) || FD_ISSET(ev->fd, &w)) { if (ev->flags & EV_PERSIST) { if (ev->flags & EV_HAS_TIMEOUT) event_add(ev, &ev->timeout); else event_add(ev, NULL); } nfd --; event_log_debug("%s: calling %p(%d, %p), " \ "ev=%p", __func__, ev->cb, ev->fd, ev->cbarg, ev); (ev->cb)(ev->fd, (ev->flags & (EV_READ|EV_WRITE)), ev->cbarg); continue; } } /* if event has no timeout - just requeue */ if ((ev->flags & EV_HAS_TIMEOUT) == 0) { event_add(ev, NULL); continue; } /* check if event has expired */ if (tv_cmp(&now, &ev->expire) >= 0) { if (ev->flags & EV_PERSIST) event_add(ev, &ev->timeout); event_log_debug("%s: calling %p(%d, %p), ev=%p", __func__, ev->cb, ev->fd, ev->cbarg, ev); (ev->cb)(ev->fd, (ev->flags & (EV_READ|EV_WRITE)), ev->cbarg); continue; } assert((ev->flags & (EV_PENDING|EV_CURRENT)) == 0); __event_link(ev); } return (0); }
void icbd_accept(int fd, short event __attribute__((__unused__)), void *arg) { struct icbd_listener *l = arg; struct sockaddr_storage ss; struct timeval p = { 1, 0 }; struct icb_session *is; socklen_t ss_len = sizeof ss; int s, on = 1, tos = IPTOS_LOWDELAY; ss.ss_len = ss_len; s = accept(fd, (struct sockaddr *)&ss, &ss_len); if (s == -1) { switch (errno) { case EINTR: case EWOULDBLOCK: case ECONNABORTED: return; case EMFILE: case ENFILE: event_del(&l->ev); evtimer_add(&l->pause, &p); return; default: syslog(LOG_ERR, "accept: %m"); return; } } if (ss.ss_family == AF_INET) if (setsockopt(s, IPPROTO_IP, IP_TOS, &tos, sizeof tos) < 0) syslog(LOG_WARNING, "IP_TOS: %m"); if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof on) < 0) syslog(LOG_WARNING, "SO_KEEPALIVE: %m"); if ((is = calloc(1, sizeof *is)) == NULL) { syslog(LOG_ERR, "calloc: %m"); (void)close(s); return; } if ((is->bev = bufferevent_new(s, icbd_dispatch, NULL, icbd_ioerr, is)) == NULL) { syslog(LOG_ERR, "bufferevent_new: %m"); (void)close(s); free(is); return; } if (bufferevent_enable(is->bev, EV_READ)) { syslog(LOG_ERR, "bufferevent_enable: %m"); (void)close(s); bufferevent_free(is->bev); free(is); return; } /* save host information */ getpeerinfo(is); /* start icb conversation */ icb_start(is); }
void PendingResponseQueue::close() { event_del(&m_event); }
static void event_io_destroy(gpointer data) { PurpleIOClosure *closure = (PurpleIOClosure* )data; event_del(&closure->evfifo); g_free(data); }
/* sock stream/TCP handler */ void ev_handler(int fd, short ev_flags, void *arg) { int n = 0; if(ev_flags & E_READ) { if(is_use_ssl) { #ifdef USE_SSL n = SSL_read(conns[fd].ssl, conns[fd].response, EV_BUF_SIZE - 1); #else n = read(fd, conns[fd].response, EV_BUF_SIZE - 1); #endif } else { n = read(fd, conns[fd].response, EV_BUF_SIZE - 1); } if(n > 0 ) { SHOW_LOG("Read %d bytes from %d", n, fd); conns[fd].response[n] = 0; SHOW_LOG("Updating event[%p] on %d ", &conns[fd].event, fd); event_add(&conns[fd].event, E_WRITE); } else { if(n < 0 ) FATAL_LOG("Reading from %d failed, %s", fd, strerror(errno)); goto err; } } if(ev_flags & E_WRITE) { if(is_use_ssl) { #ifdef USE_SSL n = SSL_write(conns[fd].ssl, conns[fd].request, strlen(conns[fd].request)); #else n = write(fd, conns[fd].request, strlen(conns[fd].request)); #endif } else { n = write(fd, conns[fd].request, strlen(conns[fd].request)); } if(n > 0 ) { SHOW_LOG("Wrote %d bytes via %d", n, fd); } else { if(n < 0) FATAL_LOG("Wrote data via %d failed, %s", fd, strerror(errno)); goto err; } event_del(&conns[fd].event, E_WRITE); } return ; err: { event_destroy(&conns[fd].event); shutdown(fd, SHUT_RDWR); close(fd); #ifdef USE_SSL if(conns[fd].ssl) { SSL_shutdown(conns[fd].ssl); SSL_free(conns[fd].ssl); conns[fd].ssl = NULL; } #endif SHOW_LOG("Connection %d closed", fd); } }
void RemoveTimer(TimerEvent* te) { event_del(&te->m_event); }
void RemoveTimer(std::shared_ptr<TimerEvent> te) { event_del(&te->m_event); }
/** * this is what the child process does continuously. If * the child process dies, then it gets respawned by the * watchdog to maintain continuity. */ static int do_child_process(void) { int server_sockfd; struct sockaddr_in server_address; struct event_base *pbase = NULL; struct event evsignal; /* libdaemon's signal fd */ struct event evaccept; /* server socket */ int retval = 1; /* if(lookup_config.drop_core) { */ /* const struct rlimit rlim = { */ /* RLIM_INFINITY, */ /* RLIM_INFINITY */ /* }; */ /* setrlimit(RLIMIT_CORE, &rlim); */ /* prctl(PR_SET_DUMPABLE, 1); */ /* } */ server_sockfd = socket(AF_INET, SOCK_STREAM, 0); if(server_sockfd == -1) { ERROR("Cannot create server socket: %s", strerror(errno)); goto finish; } server_address.sin_family = AF_INET; server_address.sin_addr.s_addr = INADDR_ANY; server_address.sin_port = htons(config.port); if(bind(server_sockfd, (struct sockaddr*)&server_address, (socklen_t)sizeof(server_address)) < 0) { ERROR("Bind error: %s", strerror(errno)); goto finish; } if(listen(server_sockfd, config.socket_backlog) < 0) { ERROR("Listen error: %s", strerror(errno)); goto finish; } if(setnonblock(server_sockfd) < 0) { ERROR("Could not set server socket to non-blocking: %s", strerror(errno)); goto finish; } /* FIXME: drop privs */ /* set up events */ pbase = event_init(); if(!pbase) { ERROR("Could not get event_base. Failing"); goto finish; } /* set up event for libdaemon's signal fd */ event_set(&evsignal, daemon_signal_fd(), EV_READ | EV_PERSIST, on_signal, &evsignal); event_add(&evsignal, NULL); /* set up events for listening AF_INET socket */ event_set(&evaccept, server_sockfd, EV_READ | EV_PERSIST, on_accept, &evaccept); event_add(&evaccept, NULL); while(!g_quitflag) { event_base_loop(pbase, EVLOOP_ONCE); } retval = 0; finish: if(pbase) { event_del(&evaccept); event_del(&evsignal); } if(server_sockfd != -1) { shutdown(server_sockfd, SHUT_RDWR); close(server_sockfd); } exit(retval); }
TimerEvent::~TimerEvent() { event_del(&this->m_event); }
/* sock stream/TCP handler */ void ev_handler(int fd, int ev_flags, void *arg) { char *p = NULL, *s = NULL, *ks = "Content-Length:"; int n = 0, x = 0; if(ev_flags & E_READ) { if(is_use_ssl) { #ifdef USE_SSL n = SSL_read(conns[fd].ssl, conns[fd].response+conns[fd].nresp, EV_BUF_SIZE - conns[fd].nresp); #else n = read(fd, conns[fd].response+conns[fd].nresp, EV_BUF_SIZE - conns[fd].nresp); #endif } else { n = read(fd, conns[fd].response+conns[fd].nresp, EV_BUF_SIZE - conns[fd].nresp); } if(n > 0 ) { SHOW_LOG("Read %d bytes from %d", n, fd); conns[fd].response[conns[fd].nresp] = 0; conns[fd].nresp += n; if(keepalive && (s = strstr(conns[fd].response, "\r\n\r\n"))) { fprintf(stdout, "%s::%d n:%d x:%d\n", __FILE__, __LINE__, n, x); s += 4; x = conns[fd].nresp - (s - conns[fd].response); if((p = strcasestr(conns[fd].response, ks))) { p += strlen(ks); while(*p != 0 && *p == 0x20)++p; n = atoi(p); } if(x == n) event_add(&(conns[fd].event), E_WRITE); } } else { if(n < 0 ) FATAL_LOG("Reading from %d failed, %s", fd, strerror(errno)); goto err; } } if(ev_flags & E_WRITE) { if(is_use_ssl) { #ifdef USE_SSL n = SSL_write(conns[fd].ssl, conns[fd].request, conns[fd].nreq); #else n = write(fd, conns[fd].request, conns[fd].nreq); #endif } else { n = write(fd, conns[fd].request, conns[fd].nreq); } if(n == conns[fd].nreq ) { conns[fd].nresp = 0; nrequest++; SHOW_LOG("Wrote %d bytes via %d", n, fd); } else { if(n < 0) FATAL_LOG("Wrote data via %d failed, %s", fd, strerror(errno)); goto err; } event_del(&conns[fd].event, E_WRITE); } return ; err: { event_destroy(&(conns[fd].event)); #ifdef USE_SSL if(conns[fd].ssl) { SSL_shutdown(conns[fd].ssl); SSL_free(conns[fd].ssl); conns[fd].ssl = NULL; } #endif memset(&(conns[fd].event), 0, sizeof(EVENT)); conns[fd].nresp = 0; shutdown(fd, SHUT_RDWR); conns[fd].fd = 0; close(fd); //SHOW_LOG("Connection %d closed", fd); ncompleted++; new_request(); } }
void proc_dispatch(int fd, short event, void *arg) { struct privsep_proc *p = (struct privsep_proc *)arg; struct privsep *ps = p->p_ps; struct imsgev *iev; struct imsgbuf *ibuf; struct imsg imsg; ssize_t n; int verbose; const char *title; title = ps->ps_title[privsep_process]; iev = &ps->ps_ievs[p->p_id]; ibuf = &iev->ibuf; if (event & EV_READ) { if ((n = imsg_read(ibuf)) == -1) fatal(title); if (n == 0) { /* this pipe is dead, so remove the event handler */ event_del(&iev->ev); event_loopexit(NULL); return; } } if (event & EV_WRITE) { if (msgbuf_write(&ibuf->w) <= 0 && errno != EAGAIN) fatal(title); } for (;;) { if ((n = imsg_get(ibuf, &imsg)) == -1) fatal(title); if (n == 0) break; /* * Check the message with the program callback */ if ((p->p_cb)(fd, p, &imsg) == 0) { /* Message was handled by the callback, continue */ imsg_free(&imsg); continue; } /* * Generic message handling */ switch (imsg.hdr.type) { case IMSG_CTL_VERBOSE: IMSG_SIZE_CHECK(&imsg, &verbose); memcpy(&verbose, imsg.data, sizeof(verbose)); log_verbose(verbose); break; default: log_warnx("%s: %s got imsg %d", __func__, p->p_title, imsg.hdr.type); fatalx(title); } imsg_free(&imsg); } imsg_event_add(iev); }
/* ARGSUSED */ void lde_dispatch_parent(int fd, short event, void *bula) { struct imsg imsg; struct kroute kr; struct imsgev *iev = bula; struct imsgbuf *ibuf = &iev->ibuf; ssize_t n; int shut = 0; if (event & EV_READ) { if ((n = imsg_read(ibuf)) == -1) fatal("imsg_read error"); if (n == 0) /* connection closed */ shut = 1; } if (event & EV_WRITE) { if (msgbuf_write(&ibuf->w) == -1) fatal("msgbuf_write"); } for (;;) { if ((n = imsg_get(ibuf, &imsg)) == -1) fatal("lde_dispatch_parent: imsg_read error"); if (n == 0) break; switch (imsg.hdr.type) { case IMSG_NETWORK_ADD: if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { log_warnx("lde_dispatch_parent: " "wrong imsg len"); break; } memcpy(&kr, imsg.data, sizeof(kr)); lde_kernel_insert(&kr); break; case IMSG_NETWORK_DEL: if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { log_warnx("lde_dispatch_parent: " "wrong imsg len"); break; } memcpy(&kr, imsg.data, sizeof(kr)); lde_kernel_remove(&kr); break; case IMSG_RECONF_CONF: if ((nconf = malloc(sizeof(struct ldpd_conf))) == NULL) fatal(NULL); memcpy(nconf, imsg.data, sizeof(struct ldpd_conf)); break; case IMSG_RECONF_IFACE: break; case IMSG_RECONF_END: break; default: log_debug("lde_dispatch_parent: unexpected imsg %d", imsg.hdr.type); break; } imsg_free(&imsg); } if (!shut) imsg_event_add(iev); else { /* this pipe is dead, so remove the event handler */ event_del(&iev->ev); event_loopexit(NULL); } }
int _getdns_event_add(struct _getdns_event *ev, struct timeval *tv) { printf( "event_add %p added=%d fd=%d tv=" ARG_LL "d %s%s%s", ev, ev->added, ev->ev_fd, (tv?(long long)tv->tv_sec*1000+(long long)tv->tv_usec/1000:-1), (ev->ev_events&EV_READ)?" EV_READ":"", (ev->ev_events&EV_WRITE)?" EV_WRITE":"", (ev->ev_events&EV_TIMEOUT)?" EV_TIMEOUT":""); if(ev->added) event_del(ev); /* gowri no works if (ev->ev_fd == -1 || find_fd(ev->ev_base, ev->ev_fd) == -1) { printf("event_add failed, bad fd\n"); return 0; } */ ev->is_tcp = 0; ev->is_signal = 0; ev->just_checked = 0; if ((ev->ev_events&(EV_READ | EV_WRITE)) && ev->ev_fd != -1) { BOOL b = 0; int t, l; long events = 0; //gprintf("\getdns_event_add %d %d\n", ev->ev_fd, events); if (ev->ev_base->max == ev->ev_base->cap) return -1; ev->idx = ev->ev_base->max++; ev->ev_base->items[ev->idx] = ev; if ((ev->ev_events&EV_READ)) events |= FD_READ; if ((ev->ev_events&EV_WRITE)) { events |= FD_CONNECT; events |= FD_WRITE; } //printf("\getdns_event_add %d read = %d write = %d %d\n", ev->ev_fd, ev->ev_events&EV_READ, ev->ev_events&EV_WRITE, events); l = sizeof(t); if(getsockopt(ev->ev_fd, SOL_SOCKET, SO_TYPE, (void*)&t, &l) != 0) log_err("getdns: getsockopt(SO_TYPE) failed: %s", wsa_strerror(WSAGetLastError())); if(t == SOCK_STREAM) { /* TCP socket */ ev->is_tcp = 1; events |= FD_CLOSE; if( (ev->ev_events&EV_WRITE) ) events |= FD_CONNECT; l = sizeof(b); if(getsockopt(ev->ev_fd, SOL_SOCKET, SO_ACCEPTCONN, (void*)&b, &l) != 0) log_err("getdns: getsockopt(SO_ACCEPTCONN) failed: %s", wsa_strerror(WSAGetLastError())); if(b) /* TCP accept socket */ events |= FD_ACCEPT; } ev->hEvent = WSACreateEvent(); if(ev->hEvent == WSA_INVALID_EVENT) log_err("getdns: WSACreateEvent failed: %s", wsa_strerror(WSAGetLastError())); /* automatically sets fd to nonblocking mode. * nonblocking cannot be disabled, until wsaES(fd, NULL, 0) */ //g printf("\nWSAEventSelect %d events %d hEvent %d\n", ev->ev_fd, events, ev->hEvent); //gg if (WSAEventSelect(ev->ev_fd, ev->hEvent, FD_ACCEPT | FD_CONNECT | FD_READ | FD_CLOSE | FD_WRITE) != 0) { //if (WSAEventSelect(ev->ev_fd, ev->hEvent,FD_READ | FD_WRITE) != 0) { if (WSAEventSelect(ev->ev_fd, ev->hEvent, events) != 0) { log_err("getdns: WSAEventSelect in getdns failed: %s", wsa_strerror(WSAGetLastError())); } if(ev->is_tcp && ev->stick_events && (ev->ev_events & ev->old_events)) { /* go to processing the sticky event right away */ ev->ev_base->tcp_reinvigorated = 1; } } if(tv && (ev->ev_events&EV_TIMEOUT)) { #ifndef S_SPLINT_S struct timeval *now = ev->ev_base->time_tv; ev->ev_timeout.tv_sec = tv->tv_sec + now->tv_sec; ev->ev_timeout.tv_usec = tv->tv_usec + now->tv_usec; while(ev->ev_timeout.tv_usec > 1000000) { ev->ev_timeout.tv_usec -= 1000000; ev->ev_timeout.tv_sec++; } #endif (void)_getdns_rbtree_insert(ev->ev_base->times, &ev->node); } ev->added = 1; return 0; }
int devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct devpollop *devpollop = arg; struct pollfd *events = devpollop->events; struct dvpoll dvp; struct evdevpoll *evdp; int i, res, timeout; if (evsignal_deliver(&devpollop->evsigmask) == -1) return (-1); if (devpollop->nchanges) devpoll_commit(devpollop); timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; dvp.dp_fds = devpollop->events; dvp.dp_nfds = devpollop->nevents; dvp.dp_timeout = timeout; res = ioctl(devpollop->dpfd, DP_POLL, &dvp); if (evsignal_recalc(&devpollop->evsigmask) == -1) return (-1); if (res == -1) { if (errno != EINTR) { event_warn("ioctl: DP_POLL"); return (-1); } evsignal_process(); return (0); } else if (evsignal_caught) evsignal_process(); event_debug(("%s: devpoll_wait reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; int what = events[i].revents; struct event *evread = NULL, *evwrite = NULL; assert(events[i].fd < devpollop->nfds); evdp = &devpollop->fds[events[i].fd]; if (what & POLLHUP) what |= POLLIN | POLLOUT; else if (what & POLLERR) what |= POLLIN | POLLOUT; if (what & POLLIN) { evread = evdp->evread; which |= EV_READ; } if (what & POLLOUT) { evwrite = evdp->evwrite; which |= EV_WRITE; } if (!which) continue; if (evread != NULL && !(evread->ev_events & EV_PERSIST)) event_del(evread); if (evwrite != NULL && evwrite != evread && !(evwrite->ev_events & EV_PERSIST)) event_del(evwrite); if (evread != NULL) event_active(evread, EV_READ, 1); if (evwrite != NULL) event_active(evwrite, EV_WRITE, 1); } return (0); }
/* ARGSUSED */ static void control_accept(int listenfd, short event, void *arg) { int connfd; socklen_t len; struct sockaddr_un s_un; struct ctl_conn *c; size_t *count; uid_t euid; gid_t egid; if (getdtablesize() - getdtablecount() < CONTROL_FD_RESERVE) goto pause; len = sizeof(s_un); if ((connfd = accept(listenfd, (struct sockaddr *)&s_un, &len)) == -1) { if (errno == ENFILE || errno == EMFILE) goto pause; if (errno == EINTR || errno == EWOULDBLOCK || errno == ECONNABORTED) return; fatal("control_accept: accept"); } io_set_nonblocking(connfd); if (getpeereid(connfd, &euid, &egid) == -1) fatal("getpeereid"); count = tree_get(&ctl_count, euid); if (count == NULL) { count = xcalloc(1, sizeof *count, "control_accept"); tree_xset(&ctl_count, euid, count); } if (*count == CONTROL_MAXCONN_PER_CLIENT) { close(connfd); log_warnx("warn: too many connections to control socket " "from user with uid %lu", (unsigned long int)euid); return; } (*count)++; do { ++connid; } while (tree_get(&ctl_conns, connid)); c = xcalloc(1, sizeof(*c), "control_accept"); c->euid = euid; c->egid = egid; c->id = connid; c->mproc.proc = PROC_CLIENT; c->mproc.handler = control_dispatch_ext; c->mproc.data = c; mproc_init(&c->mproc, connfd); mproc_enable(&c->mproc); tree_xset(&ctl_conns, c->id, c); stat_backend->increment("control.session", 1); return; pause: log_warnx("warn: ctl client limit hit, disabling new connections"); event_del(&control_state.ev); }
int _bufferevent_decref_and_unlock(struct bufferevent *bufev) { struct bufferevent_private *bufev_private = EVUTIL_UPCAST(bufev, struct bufferevent_private, bev); struct bufferevent *underlying; EVUTIL_ASSERT(bufev_private->refcnt > 0); if (--bufev_private->refcnt) { BEV_UNLOCK(bufev); return 0; } underlying = bufferevent_get_underlying(bufev); /* Clean up the shared info */ if (bufev->be_ops->destruct) bufev->be_ops->destruct(bufev); /* XXX what happens if refcnt for these buffers is > 1? * The buffers can share a lock with this bufferevent object, * but the lock might be destroyed below. */ /* evbuffer will free the callbacks */ evbuffer_free(bufev->input); evbuffer_free(bufev->output); if (bufev_private->rate_limiting) { if (bufev_private->rate_limiting->group) bufferevent_remove_from_rate_limit_group_internal(bufev,0); if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event)) event_del(&bufev_private->rate_limiting->refill_bucket_event); event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event); mm_free(bufev_private->rate_limiting); bufev_private->rate_limiting = NULL; } event_debug_unassign(&bufev->ev_read); event_debug_unassign(&bufev->ev_write); BEV_UNLOCK(bufev); if (bufev_private->own_lock) EVTHREAD_FREE_LOCK(bufev_private->lock, EVTHREAD_LOCKTYPE_RECURSIVE); /* Free the actual allocated memory. */ mm_free(((char*)bufev) - bufev->be_ops->mem_offset); /* Release the reference to underlying now that we no longer need the * reference to it. We wait this long mainly in case our lock is * shared with underlying. * * The 'destruct' function will also drop a reference to underlying * if BEV_OPT_CLOSE_ON_FREE is set. * * XXX Should we/can we just refcount evbuffer/bufferevent locks? * It would probably save us some headaches. */ if (underlying) bufferevent_decref(underlying); return 1; }
static void sssp_bfs(void) { avl_node_t *node, *next, *to; edge_t *e; node_t *n; list_t *todo_list; list_node_t *from, *todonext; bool indirect; char *name; char *address, *port; char *envp[7]; int i; todo_list = list_alloc(NULL); /* Clear visited status on nodes */ for(node = node_tree->head; node; node = node->next) { n = node->data; n->status.visited = false; n->status.indirect = true; } /* Begin with myself */ myself->status.visited = true; myself->status.indirect = false; myself->nexthop = myself; myself->prevedge = NULL; myself->via = myself; list_insert_head(todo_list, myself); /* Loop while todo_list is filled */ for(from = todo_list->head; from; from = todonext) { /* "from" is the node from which we start */ n = from->data; for(to = n->edge_tree->head; to; to = to->next) { /* "to" is the edge connected to "from" */ e = to->data; if(!e->reverse) continue; /* Situation: / / ----->(n)---e-->(e->to) \ \ Where e is an edge, (n) and (e->to) are nodes. n->address is set to the e->address of the edge left of n to n. We are currently examining the edge e right of n from n: - If edge e provides for better reachability of e->to, update e->to and (re)add it to the todo_list to (re)examine the reachability of nodes behind it. */ indirect = n->status.indirect || e->options & OPTION_INDIRECT; if(e->to->status.visited && (!e->to->status.indirect || indirect)) continue; e->to->status.visited = true; e->to->status.indirect = indirect; e->to->nexthop = (n->nexthop == myself) ? e->to : n->nexthop; e->to->prevedge = e; e->to->via = indirect ? n->via : e->to; e->to->options = e->options; if(e->to->address.sa.sa_family == AF_UNSPEC && e->address.sa.sa_family != AF_UNKNOWN) update_node_udp(e->to, &e->address); list_insert_tail(todo_list, e->to); } todonext = from->next; list_delete_node(todo_list, from); } list_free(todo_list); /* Check reachability status. */ for(node = node_tree->head; node; node = next) { next = node->next; n = node->data; if(n->status.visited != n->status.reachable) { n->status.reachable = !n->status.reachable; if(n->status.reachable) { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Node %s (%s) became reachable", n->name, n->hostname); } else { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Node %s (%s) became unreachable", n->name, n->hostname); } /* TODO: only clear status.validkey if node is unreachable? */ n->status.validkey = false; n->last_req_key = 0; n->maxmtu = MTU; n->minmtu = 0; n->mtuprobes = 0; if(n->mtuevent) { event_del(n->mtuevent); n->mtuevent = NULL; } xasprintf(&envp[0], "NETNAME=%s", netname ? : ""); xasprintf(&envp[1], "DEVICE=%s", device ? : ""); xasprintf(&envp[2], "INTERFACE=%s", iface ? : ""); xasprintf(&envp[3], "NODE=%s", n->name); sockaddr2str(&n->address, &address, &port); xasprintf(&envp[4], "REMOTEADDRESS=%s", address); xasprintf(&envp[5], "REMOTEPORT=%s", port); envp[6] = NULL; execute_script(n->status.reachable ? "host-up" : "host-down", envp); xasprintf(&name, n->status.reachable ? "hosts/%s-up" : "hosts/%s-down", n->name); execute_script(name, envp); free(name); free(address); free(port); for(i = 0; i < 6; i++) free(envp[i]); subnet_update(n, NULL, n->status.reachable); if(!n->status.reachable) { update_node_udp(n, NULL); memset(&n->status, 0, sizeof n->status); n->options = 0; } else if(n->connection) { send_ans_key(n); } } }
/* * zark: 当epoll层的读事件到来, 会从活跃队列中回调该函数, * 开始从socket中读取数据. */ static void bufferevent_readcb(int fd, short event, void *arg) { struct bufferevent *bufev = arg; int res = 0; short what = EVBUFFER_READ; size_t len; int howmuch = -1; //! zark: bufferevent是对I/O操作的封装, 所以不监听超时事件. if (event == EV_TIMEOUT) { what |= EVBUFFER_TIMEOUT; goto error; } /* * If we have a high watermark configured then we don't want to * read more data than would make us reach the watermark. * * zark: 这里有必要普及一下libevent关于输入输出时的水位概念. * 读取低水位 - 读取操作使得输入缓冲区的数据量在此级别或者更高时, * 读取回调将被调用.默认值为0, 所以每个读取操作都会 * 导致读取回调被调用. * 读取高水位 - 输入缓冲区中的数据量达到此级别后,bufferevent将停 * 止读取,直到输入缓冲区中足够量的数据被抽取,使得数 * 据量低于此级别.默认值是无限, 所以永远不会因为输入 * 缓冲区的大小而停止读取. * 写入低水位 - 写入操作使得输出缓冲区的数据量达到或者低于此级别时, * 写入回调将被调用.默认值是0, 所以只有输出缓冲区空的 * 时候才会调用写入回调. * 写入高水位 - bufferevent没有直接使用这个水位. 它在bufferevent用 * 作另外一个bufferevent的底层传输端口时有特殊意义.请 * 看后面关于过滤型bufferevent的介绍. */ //! zark: 读取高水位到达, 停止读取. if (bufev->wm_read.high != 0) { howmuch = bufev->wm_read.high - EVBUFFER_LENGTH(bufev->input); /* we might have lowered the watermark, stop reading */ if (howmuch <= 0) { struct evbuffer *buf = bufev->input; event_del(&bufev->ev_read); //! 删除当前读事件. /* * zark: 设置一个新的读事件, 该读事件的callback函 * 数(即bufferevent_read_pressure_cb)会检测 * 当输入冲区大小小于高水位时, 会再次添加正 * 常的读取event. */ evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev); return; } } /* * zark: 开始从fd读取数据到我们的输入缓冲区中. */ res = evbuffer_read(bufev->input, fd, howmuch); if (res == -1) { if (errno == EAGAIN || errno == EINTR) goto reschedule; /* error case */ what |= EVBUFFER_ERROR; } else if (res == 0) { /* eof case */ what |= EVBUFFER_EOF; } if (res <= 0) goto error; /* * zark: 因为bufferevent当初添加事件的时候没有使用 * persist来修饰event, 所以需要重新添加. */ bufferevent_add(&bufev->ev_read, bufev->timeout_read); /* See if this callbacks meets the water marks */ len = EVBUFFER_LENGTH(bufev->input); /* * zark: 读取低水位线没有达到, 所以不能调用callback, 直接return~ */ if (bufev->wm_read.low != 0 && len < bufev->wm_read.low) return; /* * zark: 读取高水位到达, 停止读取. */ if (bufev->wm_read.high != 0 && len >= bufev->wm_read.high) { struct evbuffer *buf = bufev->input; event_del(&bufev->ev_read); /* Now schedule a callback for us when the buffer changes */ evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev); } /* Invoke the user callback - must always be called last */ /* * zark: 哈哈, 一顿乱七八糟的check下来, 终于到了调用我们 * 真正注册的读取回调函数拉~ 散花~ */ if (bufev->readcb != NULL) (*bufev->readcb)(bufev, bufev->cbarg); return; reschedule: bufferevent_add(&bufev->ev_read, bufev->timeout_read); return; /* * zark: 调用我们注册的错误处理回调函数. */ error: (*bufev->errorcb)(bufev, what, bufev->cbarg); }
int epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct epollop *epollop = arg; struct epoll_event *events = epollop->events; struct evepoll *evep; int i, res, timeout; if (evsignal_deliver(&epollop->evsigmask) == -1) return (-1); timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; benchmark_stop_sample(); event_mutex_unlock(base); res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); event_mutex_lock(base); benchmark_start_sample(); if (evsignal_recalc(&epollop->evsigmask) == -1) return (-1); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } evsignal_process(); return (0); } else if (evsignal_caught) evsignal_process(); event_debug(("%s: epoll_wait reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; int what = events[i].events; struct event *evread = NULL, *evwrite = NULL; evep = (struct evepoll *)events[i].data.ptr; if (what & EPOLLHUP) what |= EPOLLIN | EPOLLOUT; else if (what & EPOLLERR) what |= EPOLLIN | EPOLLOUT; if (what & EPOLLIN) { evread = evep->evread; which |= EV_READ; } if (what & EPOLLOUT) { evwrite = evep->evwrite; which |= EV_WRITE; } if (!which) continue; if (evread != NULL && !(evread->ev_events & EV_PERSIST)) event_del(evread); if (evwrite != NULL && evwrite != evread && !(evwrite->ev_events & EV_PERSIST)) event_del(evwrite); if (evread != NULL) event_active(evread, EV_READ, 1); if (evwrite != NULL) event_active(evwrite, EV_WRITE, 1); } return (0); }
static void bufferevent_readcb(int fd, short event, void *arg) { struct bufferevent *bufev = arg; int res = 0; short what = EVBUFFER_READ; size_t len; int howmuch = -1; if (event == EV_TIMEOUT) { what |= EVBUFFER_TIMEOUT; goto error; } if (bufev->wm_read.high != 0) { howmuch = bufev->wm_read.high - EVBUFFER_LENGTH(bufev->input); if (howmuch <= 0) { struct evbuffer *buf = bufev->input; event_del(&bufev->ev_read); evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev); return; } } res = evbuffer_read(bufev->input, fd, howmuch); if (res == -1) { if (errno == EAGAIN || errno == EINTR) goto reschedule; what |= EVBUFFER_ERROR; } else if (res == 0) { what |= EVBUFFER_EOF; } if (res <= 0) goto error; bufferevent_add(&bufev->ev_read, bufev->timeout_read); len = EVBUFFER_LENGTH(bufev->input); if (bufev->wm_read.low != 0 && len < bufev->wm_read.low) return; if (bufev->wm_read.high != 0 && len >= bufev->wm_read.high) { struct evbuffer *buf = bufev->input; event_del(&bufev->ev_read); evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev); } if (bufev->readcb != NULL) (*bufev->readcb)(bufev, bufev->cbarg); return; reschedule: bufferevent_add(&bufev->ev_read, bufev->timeout_read); return; error: (*bufev->errorcb)(bufev, what, bufev->cbarg); }
void check_tcp(struct ctl_tcp_event *cte) { int s; socklen_t len; struct timeval tv; struct linger lng; int he = HCE_TCP_SOCKET_OPTION; switch (cte->host->conf.ss.ss_family) { case AF_INET: ((struct sockaddr_in *)&cte->host->conf.ss)->sin_port = cte->table->conf.port; break; case AF_INET6: ((struct sockaddr_in6 *)&cte->host->conf.ss)->sin6_port = cte->table->conf.port; break; } len = ((struct sockaddr *)&cte->host->conf.ss)->sa_len; if ((s = socket(cte->host->conf.ss.ss_family, SOCK_STREAM, 0)) == -1) { if (errno == EMFILE || errno == ENFILE) he = HCE_TCP_SOCKET_LIMIT; else he = HCE_TCP_SOCKET_ERROR; goto bad; } bzero(&lng, sizeof(lng)); if (setsockopt(s, SOL_SOCKET, SO_LINGER, &lng, sizeof(lng)) == -1) goto bad; if (cte->host->conf.ttl > 0) { if (setsockopt(s, IPPROTO_IP, IP_TTL, &cte->host->conf.ttl, sizeof(int)) == -1) goto bad; } if (fcntl(s, F_SETFL, O_NONBLOCK) == -1) goto bad; bcopy(&cte->table->conf.timeout, &tv, sizeof(tv)); if (connect(s, (struct sockaddr *)&cte->host->conf.ss, len) == -1) { if (errno != EINPROGRESS) { he = HCE_TCP_CONNECT_FAIL; goto bad; } } cte->buf = NULL; cte->host->up = HOST_UP; event_del(&cte->ev); event_set(&cte->ev, s, EV_TIMEOUT|EV_WRITE, tcp_write, cte); event_add(&cte->ev, &tv); return; bad: close(s); cte->host->up = HOST_DOWN; hce_notify_done(cte->host, he); }
static void ag_client_free(ag_client_t *c) { event_del(&c->read_ev); free(c->pending); free(c); }
static void bufferevent_readcb(int fd, short event, void *arg) { struct bufferevent *bufev = arg; int res = 0; short what = EVBUFFER_READ; size_t len; int howmuch = -1; if (event == EV_TIMEOUT) { what |= EVBUFFER_TIMEOUT; goto error; } /* * If we have a high watermark configured then we don't want to * read more data than would make us reach the watermark. */ if (bufev->wm_read.high != 0) { howmuch = bufev->wm_read.high - EVBUFFER_LENGTH(bufev->input); /* we might have lowered the watermark, stop reading */ if (howmuch <= 0) { struct evbuffer *buf = bufev->input; event_del(&bufev->ev_read); evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev); return; } } res = evbuffer_read(bufev->input, fd, howmuch); if (res == -1) { if (errno == EAGAIN || errno == EINTR) goto reschedule; /* error case */ what |= EVBUFFER_ERROR; } else if (res == 0) { /* eof case */ what |= EVBUFFER_EOF; } if (res <= 0) goto error; bufferevent_add(&bufev->ev_read, bufev->timeout_read); /* See if this callbacks meets the water marks */ len = EVBUFFER_LENGTH(bufev->input); if (bufev->wm_read.low != 0 && len < bufev->wm_read.low) return; if (bufev->wm_read.high != 0 && len >= bufev->wm_read.high) { struct evbuffer *buf = bufev->input; event_del(&bufev->ev_read); /* Now schedule a callback for us when the buffer changes */ evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev); } /* Invoke the user callback - must always be called last */ if (bufev->readcb != NULL) (*bufev->readcb)(bufev, bufev->cbarg); return; reschedule: bufferevent_add(&bufev->ev_read, bufev->timeout_read); return; error: (*bufev->errorcb)(bufev, what, bufev->cbarg); }
//## int cevent.event_del(cevent event); static KMETHOD cevent_event_del(KonohaContext *kctx, KonohaStack* sfp) { kcevent *kcev = (kcevent *)sfp[0].asObject; int ret = event_del(kcev->event); KReturnUnboxValue(ret); }
void fable_event_del_unixdomain(struct fable_event_unixdomain *evt) { event_del(&evt->event); }
void Session::close() { event_del(ev_); }
/** Work-alike replacement for event_free() on pre-Libevent-2.0 systems. */ void tor_event_free(struct event *ev) { event_del(ev); tor_free(ev); }
pid_t lka(void) { pid_t pid; struct passwd *pw; struct event ev_sigint; struct event ev_sigterm; struct event ev_sigchld; struct peer peers[] = { { PROC_PARENT, imsg_dispatch }, { PROC_MFA, imsg_dispatch }, { PROC_QUEUE, imsg_dispatch }, { PROC_SMTP, imsg_dispatch }, { PROC_MTA, imsg_dispatch }, { PROC_CONTROL, imsg_dispatch } }; switch (pid = fork()) { case -1: fatal("lka: cannot fork"); case 0: break; default: return (pid); } purge_config(PURGE_EVERYTHING); pw = env->sc_pw; smtpd_process = PROC_LKA; setproctitle("%s", env->sc_title[smtpd_process]); if (setgroups(1, &pw->pw_gid) || setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) fatal("lka: cannot drop privileges"); imsg_callback = lka_imsg; event_init(); SPLAY_INIT(&env->lka_sessions); signal_set(&ev_sigint, SIGINT, lka_sig_handler, NULL); signal_set(&ev_sigterm, SIGTERM, lka_sig_handler, NULL); signal_set(&ev_sigchld, SIGCHLD, lka_sig_handler, NULL); signal_add(&ev_sigint, NULL); signal_add(&ev_sigterm, NULL); signal_add(&ev_sigchld, NULL); signal(SIGPIPE, SIG_IGN); signal(SIGHUP, SIG_IGN); /* * lka opens all kinds of files and sockets, so bump the limit to max. * XXX: need to analyse the exact hard limit. */ fdlimit(1.0); config_pipes(peers, nitems(peers)); config_peers(peers, nitems(peers)); /* ignore them until we get our config */ event_del(&env->sc_ievs[PROC_MTA]->ev); event_del(&env->sc_ievs[PROC_MFA]->ev); event_del(&env->sc_ievs[PROC_SMTP]->ev); if (event_dispatch() < 0) fatal("event_dispatch"); lka_shutdown(); return (0); }