static int be_async_enable(struct bufferevent *buf, short what) { struct bufferevent_async *bev_async = upcast(buf); if (!bev_async->ok) return -1; if (bev_async->bev.connecting) { /* Don't launch anything during connection attempts. */ return 0; } if (what & EV_READ) BEV_RESET_GENERIC_READ_TIMEOUT(buf); if (what & EV_WRITE) BEV_RESET_GENERIC_WRITE_TIMEOUT(buf); /* If we newly enable reading or writing, and we aren't reading or writing already, consider launching a new read or write. */ if (what & EV_READ) bev_async_consider_reading(bev_async); if (what & EV_WRITE) bev_async_consider_writing(bev_async); return 0; }
static void write_complete(struct event_overlapped *eo, ev_uintptr_t key, ev_ssize_t nbytes, int ok) { struct bufferevent_async *bev_a = upcast_write(eo); struct bufferevent *bev = &bev_a->bev.bev; short what = BEV_EVENT_WRITING; _bufferevent_incref_and_lock(bev); EVUTIL_ASSERT(bev_a->ok && bev_a->write_in_progress); evbuffer_commit_write(bev->output, nbytes); bev_a->write_in_progress = 0; if (ok && nbytes) { BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); _bufferevent_decrement_write_buckets(&bev_a->bev, nbytes); if (evbuffer_get_length(bev->output) <= bev->wm_write.low) _bufferevent_run_writecb(bev); bev_async_consider_writing(bev_a); } else if (!ok) { what |= BEV_EVENT_ERROR; bev_a->ok = 0; _bufferevent_run_eventcb(bev, what); } else if (!nbytes) { what |= BEV_EVENT_EOF; bev_a->ok = 0; _bufferevent_run_eventcb(bev, what); } _bufferevent_decref_and_unlock(bev); }
static void be_async_outbuf_callback(struct evbuffer *buf, const struct evbuffer_cb_info *cbinfo, void *arg) { struct bufferevent *bev = arg; struct bufferevent_async *bev_async = upcast(bev); /* If we successfully wrote from the outbuf, or we added data to the * outbuf and were not writing before, we may want to write now. */ _bufferevent_incref_and_lock(bev); if (cbinfo->n_deleted) { /* XXXX can't detect 0-length write completion */ bev_async->write_in_progress = 0; } if (cbinfo->n_added || cbinfo->n_deleted) bev_async_consider_writing(bev_async); if (cbinfo->n_deleted) { BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); if (bev->writecb != NULL && evbuffer_get_length(bev->output) <= bev->wm_write.low) _bufferevent_run_writecb(bev); } _bufferevent_decref_and_unlock(bev); }
static void write_complete(struct event_overlapped *eo, ev_uintptr_t key, ev_ssize_t nbytes, int ok) { struct bufferevent_async *bev_a = upcast_write(eo); struct bufferevent *bev = &bev_a->bev.bev; short what = BEV_EVENT_WRITING; ev_ssize_t amount_unwritten; BEV_LOCK(bev); EVUTIL_ASSERT(bev_a->write_in_progress); amount_unwritten = bev_a->write_in_progress - nbytes; evbuffer_commit_write_(bev->output, nbytes); bev_a->write_in_progress = 0; if (amount_unwritten) bufferevent_decrement_write_buckets_(&bev_a->bev, -amount_unwritten); if (!ok) bev_async_set_wsa_error(bev, eo); if (bev_a->ok) { if (ok && nbytes) { BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); if (evbuffer_get_length(bev->output) <= bev->wm_write.low) bufferevent_run_writecb_(bev); bev_async_consider_writing(bev_a); } else if (!ok) { what |= BEV_EVENT_ERROR; bev_a->ok = 0; bufferevent_run_eventcb_(bev, what); } else if (!nbytes) { what |= BEV_EVENT_EOF; bev_a->ok = 0; bufferevent_run_eventcb_(bev, what); } } bufferevent_decref_and_unlock_(bev); }
static int be_async_enable(struct bufferevent *buf, short what) { struct bufferevent_async *bev_async = upcast(buf); if (!bev_async->ok) return -1; /* NOTE: This interferes with non-blocking connect */ if (what & EV_READ) BEV_RESET_GENERIC_READ_TIMEOUT(buf); if (what & EV_WRITE) BEV_RESET_GENERIC_WRITE_TIMEOUT(buf); /* If we newly enable reading or writing, and we aren't reading or writing already, consider launching a new read or write. */ if (what & EV_READ) bev_async_consider_reading(bev_async); if (what & EV_WRITE) bev_async_consider_writing(bev_async); return 0; }