static void bev_async_consider_writing(struct bufferevent_async *beva) { size_t at_most; int limit; struct bufferevent *bev = &beva->bev.bev; /* Don't write if there's a write in progress, or we do not * want to write, or when there's nothing left to write. */ if (beva->write_in_progress || beva->bev.connecting) return; if (!beva->ok || !(bev->enabled&EV_WRITE) || !evbuffer_get_length(bev->output)) { bev_async_del_write(beva); return; } at_most = evbuffer_get_length(bev->output); /* This is safe so long as bufferevent_get_write_max never returns * more than INT_MAX. That's true for now. XXXX */ limit = (int)_bufferevent_get_write_max(&beva->bev); if (at_most >= (size_t)limit && limit >= 0) at_most = limit; if (beva->bev.write_suspended) { bev_async_del_write(beva); return; } /* XXXX doesn't respect low-water mark very well. */ bufferevent_incref(bev); if (evbuffer_launch_write(bev->output, at_most, &beva->write_overlapped)) { bufferevent_decref(bev); beva->ok = 0; _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR); } else { beva->write_in_progress = at_most; _bufferevent_decrement_write_buckets(&beva->bev, at_most); bev_async_add_write(beva); } }
static int be_async_disable(struct bufferevent *bev, short what) { struct bufferevent_async *bev_async = upcast(bev); /* XXXX If we disable reading or writing, we may want to consider * canceling any in-progress read or write operation, though it might * not work. */ if (what & EV_READ) { BEV_DEL_GENERIC_READ_TIMEOUT(bev); bev_async_del_read(bev_async); } if (what & EV_WRITE) { BEV_DEL_GENERIC_WRITE_TIMEOUT(bev); bev_async_del_write(bev_async); } return 0; }
static void be_async_destruct(struct bufferevent *bev) { struct bufferevent_async *bev_async = upcast(bev); struct bufferevent_private *bev_p = BEV_UPCAST(bev); evutil_socket_t fd; EVUTIL_ASSERT(!upcast(bev)->write_in_progress && !upcast(bev)->read_in_progress); bev_async_del_read(bev_async); bev_async_del_write(bev_async); fd = evbuffer_overlapped_get_fd_(bev->input); if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) { /* XXXX possible double-close */ ld_evutil_closesocket(fd); } }
static void be_async_destruct(struct bufferevent *bev) { struct bufferevent_async *bev_async = upcast(bev); struct bufferevent_private *bev_p = BEV_UPCAST(bev); evutil_socket_t fd; EVUTIL_ASSERT(!upcast(bev)->write_in_progress && !upcast(bev)->read_in_progress); bev_async_del_read(bev_async); bev_async_del_write(bev_async); fd = evbuffer_overlapped_get_fd_(bev->input); if (fd != (evutil_socket_t)INVALID_SOCKET && (bev_p->options & BEV_OPT_CLOSE_ON_FREE)) { evutil_closesocket(fd); evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET); } }
static void be_async_destruct(struct bufferevent *bev) { struct bufferevent_async *bev_async = upcast(bev); struct bufferevent_private *bev_p = BEV_UPCAST(bev); evutil_socket_t fd; EVUTIL_ASSERT(!upcast(bev)->write_in_progress && !upcast(bev)->read_in_progress); bev_async_del_read(bev_async); bev_async_del_write(bev_async); fd = _evbuffer_overlapped_get_fd(bev->input); if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) { /* XXXX possible double-close */ evutil_closesocket(fd); } /* delete this in case non-blocking connect was used */ if (event_initialized(&bev->ev_write)) { event_del(&bev->ev_write); _bufferevent_del_generic_timeout_cbs(bev); } }