static void bev_async_consider_reading(struct bufferevent_async *beva) { size_t cur_size; size_t read_high; size_t at_most; int limit; struct bufferevent *bev = &beva->bev.bev; /* Don't read if there is a read in progress, or we do not * want to read. */ if (beva->read_in_progress || beva->bev.connecting) return; if (!beva->ok || !(bev->enabled&EV_READ)) { bev_async_del_read(beva); return; } /* Don't read if we're full */ cur_size = evbuffer_get_length(bev->input); read_high = bev->wm_read.high; if (read_high) { if (cur_size >= read_high) { bev_async_del_read(beva); return; } at_most = read_high - cur_size; } else { at_most = 16384; /* FIXME totally magic. */ } /* XXXX This over-commits. */ /* XXXX see also not above on cast on _bufferevent_get_write_max() */ limit = (int)_bufferevent_get_read_max(&beva->bev); if (at_most >= (size_t)limit && limit >= 0) at_most = limit; if (beva->bev.read_suspended) { bev_async_del_read(beva); return; } bufferevent_incref(bev); if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) { beva->ok = 0; _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR); bufferevent_decref(bev); } else { beva->read_in_progress = at_most; _bufferevent_decrement_read_buckets(&beva->bev, at_most); bev_async_add_read(beva); } return; }
void _bufferevent_decref_and_unlock(struct bufferevent *bufev) { struct bufferevent_private *bufev_private = EVUTIL_UPCAST(bufev, struct bufferevent_private, bev); struct bufferevent *underlying; if (--bufev_private->refcnt) { BEV_UNLOCK(bufev); return; } underlying = bufferevent_get_underlying(bufev); /* Clean up the shared info */ if (bufev->be_ops->destruct) bufev->be_ops->destruct(bufev); /* XXX what happens if refcnt for these buffers is > 1? * The buffers can share a lock with this bufferevent object, * but the lock might be destroyed below. */ /* evbuffer will free the callbacks */ evbuffer_free(bufev->input); evbuffer_free(bufev->output); if (bufev_private->rate_limiting) { if (bufev_private->rate_limiting->group) bufferevent_remove_from_rate_limit_group(bufev); if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event)) event_del(&bufev_private->rate_limiting->refill_bucket_event); event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event); mm_free(bufev_private->rate_limiting); bufev_private->rate_limiting = NULL; } event_debug_unassign(&bufev->ev_read); event_debug_unassign(&bufev->ev_write); BEV_UNLOCK(bufev); if (bufev_private->own_lock) EVTHREAD_FREE_LOCK(bufev_private->lock, EVTHREAD_LOCKTYPE_RECURSIVE); /* Free the actual allocated memory. */ mm_free(bufev - bufev->be_ops->mem_offset); /* release the reference to underlying now that we no longer need * the reference to it. This is mainly in case our lock is shared * with underlying. * XXX Should we/can we just refcount evbuffer/bufferevent locks? * It would probably save us some headaches. */ if (underlying) bufferevent_decref(underlying); }
int bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd, const struct sockaddr *sa, int socklen) { BOOL rc; struct bufferevent_async *bev_async = upcast(bev); struct sockaddr_storage ss; const struct win32_extension_fns *ext = event_get_win32_extension_fns(); EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL); /* ConnectEx() requires that the socket be bound to an address * with bind() before using, otherwise it will fail. We attempt * to issue a bind() here, taking into account that the error * code is set to WSAEINVAL when the socket is already bound. */ memset(&ss, 0, sizeof(ss)); if (sa->sa_family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)&ss; sin->sin_family = AF_INET; sin->sin_addr.s_addr = INADDR_ANY; } else if (sa->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; sin6->sin6_family = AF_INET6; sin6->sin6_addr = in6addr_any; } else { /* Well, the user will have to bind() */ return -1; } if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 && WSAGetLastError() != WSAEINVAL) return -1; event_base_add_virtual(bev->ev_base); bufferevent_incref(bev); rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL, &bev_async->connect_overlapped.overlapped); if (rc || WSAGetLastError() == ERROR_IO_PENDING) return 0; event_base_del_virtual(bev->ev_base); bufferevent_decref(bev); return -1; }
static void bev_async_consider_writing(struct bufferevent_async *beva) { size_t at_most; int limit; struct bufferevent *bev = &beva->bev.bev; /* Don't write if there's a write in progress, or we do not * want to write, or when there's nothing left to write. */ if (beva->write_in_progress || beva->bev.connecting) return; if (!beva->ok || !(bev->enabled&EV_WRITE) || !evbuffer_get_length(bev->output)) { bev_async_del_write(beva); return; } at_most = evbuffer_get_length(bev->output); /* This is safe so long as bufferevent_get_write_max never returns * more than INT_MAX. That's true for now. XXXX */ limit = (int)_bufferevent_get_write_max(&beva->bev); if (at_most >= (size_t)limit && limit >= 0) at_most = limit; if (beva->bev.write_suspended) { bev_async_del_write(beva); return; } /* XXXX doesn't respect low-water mark very well. */ bufferevent_incref(bev); if (evbuffer_launch_write(bev->output, at_most, &beva->write_overlapped)) { bufferevent_decref(bev); beva->ok = 0; _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR); } else { beva->write_in_progress = at_most; _bufferevent_decrement_write_buckets(&beva->bev, at_most); bev_async_add_write(beva); } }