static int process_events_after(struct pt_insn_decoder *decoder, struct pt_insn *insn) { int pending, processed, errcode; if (!decoder || !insn) return -pte_internal; pending = event_pending(decoder); if (pending <= 0) return pending; decoder->paging_event_bound = 0; decoder->vmcs_event_bound = 0; for (;;) { processed = process_one_event_after(decoder, insn); if (processed < 0) return processed; if (!processed) return 0; decoder->process_event = 0; errcode = process_events_before(decoder, insn); if (errcode < 0) return errcode; pending = event_pending(decoder); if (pending <= 0) return pending; } }
static void php_http_client_curl_event_timer(CURLM *multi, long timeout_ms, void *timer_data) { php_http_client_curl_event_context_t *context = timer_data; struct timeval timeout; #if DBG_EVENTS fprintf(stderr, "(%ld)", timeout_ms); #endif switch (timeout_ms) { case -1: if (event_initialized(context->timeout) && event_pending(context->timeout, EV_TIMEOUT, NULL)) { event_del(context->timeout); } break; case 0: php_http_client_curl_event_handler(context, CURL_SOCKET_TIMEOUT, 0); break; default: if (!event_initialized(context->timeout)) { event_assign(context->timeout, context->evbase, CURL_SOCKET_TIMEOUT, 0, php_http_client_curl_event_timeout_callback, context); } timeout.tv_sec = timeout_ms / 1000; timeout.tv_usec = (timeout_ms % 1000) * 1000; if (!event_pending(context->timeout, EV_TIMEOUT, &timeout)) { event_add(context->timeout, &timeout); } break; } }
void bufferevent_settimeout(struct bufferevent *bufev, int timeout_read, int timeout_write) { bufev->timeout_read = timeout_read; bufev->timeout_write = timeout_write; if (event_pending(&bufev->ev_read, EV_READ, NULL)) bufferevent_add(&bufev->ev_read, timeout_read); if (event_pending(&bufev->ev_write, EV_WRITE, NULL)) bufferevent_add(&bufev->ev_write, timeout_write); }
static int be_socket_adj_timeouts(struct bufferevent *bufev) { int r = 0; if (event_pending(&bufev->ev_read, EV_READ, NULL)) if (be_socket_add(&bufev->ev_read, &bufev->timeout_read) < 0) r = -1; if (event_pending(&bufev->ev_write, EV_WRITE, NULL)) { if (be_socket_add(&bufev->ev_write, &bufev->timeout_write) < 0) r = -1; } return r; }
static AvahiWatchEvent ev_watch_get_events(AvahiWatch *w) { AvahiWatchEvent a_events; a_events = 0; if (event_pending(&w->ev, EV_READ, NULL)) a_events |= AVAHI_WATCH_IN; if (event_pending(&w->ev, EV_WRITE, NULL)) a_events |= AVAHI_WATCH_OUT; return a_events; }
static int process_events_before(struct pt_insn_decoder *decoder, struct pt_insn *insn) { if (!decoder || !insn) return -pte_internal; for (;;) { int pending, processed; pending = event_pending(decoder); if (pending < 0) return pending; if (!pending) break; processed = process_one_event_before(decoder, insn); if (processed < 0) return processed; if (!processed) break; decoder->process_event = 0; } return 0; }
static void php_http_client_curl_event_dtor(void **context) { php_http_client_curl_event_context_t *ctx = *context; php_http_client_curl_t *curl; #if DBG_EVENTS fprintf(stderr, "D"); #endif curl = ctx->client->ctx; curl_multi_setopt(curl->handle->multi, CURLMOPT_SOCKETDATA, NULL); curl_multi_setopt(curl->handle->multi, CURLMOPT_SOCKETFUNCTION, NULL); curl_multi_setopt(curl->handle->multi, CURLMOPT_TIMERDATA, NULL); curl_multi_setopt(curl->handle->multi, CURLMOPT_TIMERFUNCTION, NULL); if (event_initialized(ctx->timeout) && event_pending(ctx->timeout, EV_TIMEOUT, NULL)) { event_del(ctx->timeout); } efree(ctx->timeout); event_base_free(ctx->evbase); efree(ctx); *context = NULL; }
static ZEND_RESULT_CODE php_http_client_curl_event_wait(void *context, struct timeval *custom_timeout) { php_http_client_curl_event_context_t *ctx = context; struct timeval timeout; #if DBG_EVENTS fprintf(stderr, "W"); #endif if (!event_initialized(ctx->timeout)) { if (0 > event_assign(ctx->timeout, ctx->evbase, CURL_SOCKET_TIMEOUT, 0, php_http_client_curl_event_timeout_callback, ctx)) { return FAILURE; } } else if (custom_timeout && timerisset(custom_timeout)) { if (0 > event_add(ctx->timeout, custom_timeout)) { return FAILURE; } } else if (!event_pending(ctx->timeout, EV_TIMEOUT, NULL)) { php_http_client_curl_get_timeout(ctx->client->ctx, 1000, &timeout); if (0 > event_add(ctx->timeout, &timeout)) { return FAILURE; } } if (0 > event_base_loop(ctx->evbase, EVLOOP_ONCE)) { return FAILURE; } return SUCCESS; }
void remove_from_event_loop(struct tun_info *info) { /* Clean event */ if (event_initialized(&info->tun_ev) && event_pending(&info->tun_ev, EV_READ, NULL)) event_del(&info->tun_ev); }
static int lcb_io_update_timer(struct lcb_io_opt_st *iops, void *timer, lcb_uint32_t usec, void *cb_data, void (*handler)(lcb_socket_t sock, short which, void *cb_data)) { short flags = EV_TIMEOUT | EV_PERSIST; struct timeval tmo; if (flags == event_get_events(timer) && handler == event_get_callback(timer)) { /* no change! */ return 0; } if (event_pending(timer, EV_TIMEOUT, 0)) { event_del(timer); } event_assign(timer, ((struct libevent_cookie *)iops->v.v2.cookie)->base, -1, flags, handler, cb_data); tmo.tv_sec = usec / 1000000; tmo.tv_usec = usec % 1000000; return event_add(timer, &tmo); }
static void control_close(struct ctl_conn *c) { size_t *count; count = tree_xget(&ctl_count, c->euid); (*count)--; if (*count == 0) { tree_xpop(&ctl_count, c->euid); free(count); } tree_xpop(&ctl_conns, c->id); mproc_clear(&c->mproc); free(c); stat_backend->decrement("control.session", 1); if (getdtablesize() - getdtablecount() < CONTROL_FD_RESERVE) return; if (!event_pending(&control_state.ev, EV_READ, NULL)) { log_warnx("warn: re-enabling ctl connections"); event_add(&control_state.ev, NULL); } }
static void chassis_event_thread_update_conn_status(chassis_event_thread_t *thread) { network_mysqld_con *conn = NULL; GList *gl_conn = NULL; network_mysqld_con_lua_t *st = NULL; g_assert(thread != NULL); gl_conn = thread->connection_list; while (gl_conn) { conn = gl_conn->data; st = conn->plugin_con_state; if (chassis_is_shutdown_normal() && g_atomic_int_get(&conn->conn_status.exit_phase) != CON_EXIT_TX) { g_atomic_int_set(&conn->conn_status.exit_begin_time, time(NULL)); g_atomic_int_set(&conn->conn_status.exit_phase, CON_EXIT_TX); } if (g_atomic_int_get(&conn->conn_status.exit_phase) == CON_EXIT_KILL || g_atomic_int_get(&conn->conn_status.exit_phase) == CON_EXIT_TX) { /*|| (st != NULL && st->backend != NULL && IS_BACKEND_WAITING_EXIT(st->backend)))*/ struct event *ev = NULL; gchar *event_msg = NULL; int pending = event_pending(&conn->client->event, EV_READ|EV_WRITE|EV_TIMEOUT, NULL); if (pending) { ev = &conn->client->event; event_msg = "client"; } else { pending = event_pending(&conn->server->event, EV_READ|EV_WRITE|EV_TIMEOUT, NULL); ev = &conn->server->event; event_msg = "server"; } if (pending != 0) { /* * 1 stands for the times of calling callback function after manual active event, * this parameter has been obsoleted at libevent-2.0. */ g_log_dbproxy(g_debug, "pending %s's %d event", event_msg, pending); event_active(ev, pending, 1); } } gl_conn = g_list_next(gl_conn); } }
static void lcb_io_delete_timer(struct lcb_io_opt_st *iops, void *event) { (void)iops; if (event_pending(event, EV_TIMEOUT, 0) != 0 && event_del(event) == -1) { iops->v.v2.error = EINVAL; } event_assign(event, ((struct libevent_cookie *)iops->v.v2.cookie)->base, -1, 0, NULL, NULL); }
static void lcb_io_destroy_event(struct lcb_io_opt_st *iops, void *event) { (void)iops; if (event_pending(event, EV_READ | EV_WRITE | EV_TIMEOUT, 0)) { event_del(event); } event_free(event); }
int main(void) { lb_init(&lb); event_init(); motor_init(); uart_init(); // init USART enc_init(); i2c_init(); adc_init(); kalman_init(); sei(); // enable interrupts // Wait a second at startup _delay_ms(1000); // send initial string printf_P(PSTR("Hello world!\n")); imu_init(); for (;/*ever*/;) { // ADCSRA |= (1<<ADSC); // Set start conversion bit and wait for conversion to finish // while(ADCSRA&(1<<ADSC)); // OCR1AL = ADCH; // Set ADC reading to timer 0 compare if(event_pending()) { event_action(); } else // No pending operation, do low priority tasks { // dequeue receive buffer if any bytes waiting while (uart_avail()) { char c = uart_getc(); if (lb_append(&lb, c) == LB_BUFFER_FULL) { lb_init(&lb); // Clear line printf_P(PSTR("\nMax line length exceeded\n")); } // Process command if line buffer is ready ... if (lb_line_ready(&lb)) { strcpy(cmd_string,lb_gets(&lb)); do_cmd(cmd_string); lb_init(&lb); } } } // Process command if line buffer is terminated by a line feed or carriage return } return 0; }
static unsigned int xts_poll(struct file *filp, poll_table * wait) { struct xts_dev *dev = filp->private_data; int ret = 0; poll_wait(filp, &dev->read_wait, wait); if (event_pending(dev)) ret = POLLIN | POLLRDNORM; return ret; }
/* Must be an easier way to figure out when an event is going to fire */ uint32_t event_remaining_seconds(struct event *ev) { struct timeval now_tv; struct timeval event_tv; struct timeval remaining_tv; event_pending(ev, EV_TIMEOUT, &event_tv); evutil_gettimeofday(&now_tv, NULL); evutil_timersub(&event_tv, &now_tv, &remaining_tv); return remaining_tv.tv_sec; }
int spnav_remove_events(int type) { int rm_count = 0; #ifdef USE_X11 if(dpy) { XEvent xev; while(XCheckIfEvent(dpy, &xev, match_events, (char*)&type)) { rm_count++; } return rm_count; } #endif if(sock) { struct event_node *tmplist, *tmptail; if(!(tmplist = tmptail = malloc(sizeof *tmplist))) { return -1; } tmplist->next = 0; /* while there are events in the event queue, or the daemon socket */ while(event_pending(sock)) { spnav_event event; read_event(sock, &event); /* remove next event */ if(event.type != type) { /* We don't want to drop this one, wrong type. Keep the event * in the temporary list, for deferred reinsertion */ enqueue_event(&event, &tmptail); } else { rm_count++; } } /* reinsert any events we removed that we didn't mean to */ while(tmplist->next) { struct event_node *node = tmplist->next; enqueue_event(&node->event, 0); free(tmplist); tmplist = node; } return rm_count; } return 0; }
static int be_socket_adj_timeouts(struct bufferevent *bufev) { int r = 0; if (event_pending(&bufev->ev_read, EV_READ, NULL)) { if (evutil_timerisset(&bufev->timeout_read)) { if (be_socket_add(&bufev->ev_read, &bufev->timeout_read) < 0) r = -1; } else { event_remove_timer(&bufev->ev_read); } } if (event_pending(&bufev->ev_write, EV_WRITE, NULL)) { if (evutil_timerisset(&bufev->timeout_write)) { if (be_socket_add(&bufev->ev_write, &bufev->timeout_write) < 0) r = -1; } else { event_remove_timer(&bufev->ev_write); } } return r; }
void read_cb(evutil_socket_t fd, short evtype, void *arg) { printf("read_cb\n"); char buf[1024]; int ret = read(fd, buf, 1024); buf[ret] = '\0'; printf("read == %s\n", buf); /* test event_active and event_pending function. */ if(event_pending(evw, EV_WRITE, NULL) == 0) { printf("evw being pending, make it active now.\n"); event_active(evw, EV_WRITE, 1); /* no matter the event is pending nor non-pending. */ } }
static void php_http_client_curl_event_callback(int socket, short action, void *event_data) { php_http_client_curl_event_context_t *ctx = event_data; php_http_client_curl_t *curl = ctx->client->ctx; #if DBG_EVENTS fprintf(stderr, "E"); #endif php_http_client_curl_event_handler(event_data, socket, etoca(action)); /* remove timeout if there are no transfers left */ if (!curl->unfinished && event_initialized(ctx->timeout) && event_pending(ctx->timeout, EV_TIMEOUT, NULL)) { event_del(ctx->timeout); } }
static void test_bufferevent_watermarks_impl(int use_pair) { struct bufferevent *bev1 = NULL, *bev2 = NULL; char buffer[65000]; int i; test_ok = 0; if (use_pair) { struct bufferevent *pair[2]; tt_assert(0 == bufferevent_pair_new(NULL, 0, pair)); bev1 = pair[0]; bev2 = pair[1]; bufferevent_setcb(bev1, NULL, wm_writecb, errorcb, NULL); bufferevent_setcb(bev2, wm_readcb, NULL, errorcb, NULL); } else { bev1 = bufferevent_new(pair[0], NULL, wm_writecb, wm_errorcb, NULL); bev2 = bufferevent_new(pair[1], wm_readcb, NULL, wm_errorcb, NULL); } bufferevent_disable(bev1, EV_READ); bufferevent_enable(bev2, EV_READ); for (i = 0; i < sizeof(buffer); i++) buffer[i] = (char)i; /* limit the reading on the receiving bufferevent */ bufferevent_setwatermark(bev2, EV_READ, 10, 20); /* Tell the sending bufferevent not to notify us till it's down to 100 bytes. */ bufferevent_setwatermark(bev1, EV_WRITE, 100, 2000); bufferevent_write(bev1, buffer, sizeof(buffer)); event_dispatch(); tt_int_op(test_ok, ==, 2); /* The write callback drained all the data from outbuf, so we * should have removed the write event... */ tt_assert(!event_pending(&bev2->ev_write, EV_WRITE, NULL)); end: bufferevent_free(bev1); bufferevent_free(bev2); }
static void control_close(struct ctl_conn *c) { TAILQ_REMOVE(&ctl_conns, c, entry); event_del(&c->iev.ev); close(c->iev.ibuf.fd); imsg_clear(&c->iev.ibuf); free(c); stat_backend->decrement("control.session", 1); if (available_fds(CONTROL_FD_RESERVE)) return; if (!event_pending(&control_state.ev, EV_READ, NULL)) { log_warnx("re-enabling ctl connections"); event_add(&control_state.ev, NULL); } }
static void bufferevent_socket_outbuf_cb(struct evbuffer *buf, const struct evbuffer_cb_info *cbinfo, void *arg) { struct bufferevent *bufev = arg; struct bufferevent_private *bufev_p = EVUTIL_UPCAST(bufev, struct bufferevent_private, bev); if (cbinfo->n_added && (bufev->enabled & EV_WRITE) && !event_pending(&bufev->ev_write, EV_WRITE, NULL) && !bufev_p->write_suspended) { /* Somebody added data to the buffer, and we would like to * write, and we were not writing. So, start writing. */ be_socket_add(&bufev->ev_write, &bufev->timeout_write); /* XXXX handle failure from be_socket_add */ } }
void control_close(int fd) { struct ctl_conn *c; if ((c = control_connbyfd(fd)) == NULL) { log_warn("control_close: fd %d: not found", fd); return; } TAILQ_REMOVE(&ctl_conns, c, entry); event_del(&c->iev.ev); imsg_clear(&c->iev.ibuf); close(fd); free(c); if (stat_decrement(STATS_CONTROL_SESSION) < env->sc_maxconn && !event_pending(&control_state.ev, EV_READ, NULL)) { log_warnx("re-enabling ctl connections"); event_add(&control_state.ev, NULL); } }
/* TODO(wad) rename to schedule_event */ void trigger_event (struct state *state, enum event_id_t id, int sec) { #ifdef WITH_EVENTS struct event *e = state->events[id]; struct timeval delay = { sec, 0 }; /* Fallthrough to tlsdate if there is no resolver. */ if (!e && id == E_RESOLVER) e = state->events[E_TLSDATE]; if (!e) { info ("trigger_event with NULL |e|. I hope this is a test!"); return; } if (event_pending (e, EV_READ|EV_WRITE|EV_TIMEOUT|EV_SIGNAL, NULL)) event_del (e); if (sec >= 0) event_add (e, &delay); else /* Note! This will not fire a TIMEOUT event. */ event_add (e, NULL); #endif }
static ssize_t xts_read(struct file *filp, char *buffer, size_t count, loff_t * ppos) { DECLARE_WAITQUEUE(wait, current); struct xts_dev *dev = filp->private_data; char *ptr = buffer; int err = 0; add_wait_queue(&dev->read_wait, &wait); while (count >= sizeof (struct ts_event)) { err = -ERESTARTSYS; if (signal_pending(current)) break; if (event_pending(dev)) { struct ts_event *evt = event_get(dev); err = copy_to_user(ptr, evt, sizeof (struct ts_event)); event_pull(dev); if (err) break; ptr += sizeof (struct ts_event); count -= sizeof (struct ts_event); continue; } set_current_state(TASK_INTERRUPTIBLE); err = -EAGAIN; if (filp->f_flags & O_NONBLOCK) break; schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&dev->read_wait, &wait); return ptr == buffer ? err : ptr - buffer; }
static int lcb_io_update_event(struct lcb_io_opt_st *iops, lcb_socket_t sock, void *event, short flags, void *cb_data, void (*handler)(lcb_socket_t sock, short which, void *cb_data)) { flags |= EV_PERSIST; if (flags == event_get_events(event) && handler == event_get_callback(event)) { /* no change! */ return 0; } if (event_pending(event, EV_READ | EV_WRITE, 0)) { event_del(event); } event_assign(event, ((struct libevent_cookie *)iops->v.v2.cookie)->base, sock, flags, handler, cb_data); return event_add(event, NULL); }
int spnav_poll_event(spnav_event *event) { #ifdef USE_X11 if(dpy) { if(XPending(dpy)) { XEvent xev; XNextEvent(dpy, &xev); return spnav_x11_event(&xev, event); } return 0; } #endif if(sock) { if(event_pending(sock)) { if(read_event(sock, event) > 0) { return event->type; } } } return 0; }
void fs_node_send_data(struct fs_node* node, BYTE* data, size_t len){ if(fs_node_is_closed(node)){ fprintf(stderr, "Try to an closed node[%d] to send data", node->node_id); return; } pthread_mutex_lock(&node->write_mutex); if(!node->send_buffer){ fprintf(stderr, "Try to an unreachable node[%d] to send data", node->node_id); pthread_mutex_unlock(&node->write_mutex); return; } fs_stream_write_data(node->send_buffer, data, len); pthread_mutex_unlock(&node->write_mutex); if(!event_pending(node->write_ev, EV_WRITE, NULL)){ event_add(node->write_ev, NULL); } }