Esempio n. 1
0
static int
be_async_enable(struct bufferevent *buf, short what)
{
	struct bufferevent_async *bev_async = upcast(buf);

	if (!bev_async->ok)
		return -1;

	if (bev_async->bev.connecting) {
		/* Don't launch anything during connection attempts. */
		return 0;
	}

	if (what & EV_READ)
		BEV_RESET_GENERIC_READ_TIMEOUT(buf);
	if (what & EV_WRITE)
		BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);

	/* If we newly enable reading or writing, and we aren't reading or
	   writing already, consider launching a new read or write. */

	if (what & EV_READ)
		bev_async_consider_reading(bev_async);
	if (what & EV_WRITE)
		bev_async_consider_writing(bev_async);
	return 0;
}
Esempio n. 2
0
static void
read_complete(struct event_overlapped *eo, ev_uintptr_t key,
    ev_ssize_t nbytes, int ok)
{
	struct bufferevent_async *bev_a = upcast_read(eo);
	struct bufferevent *bev = &bev_a->bev.bev;
	short what = BEV_EVENT_READING;

	_bufferevent_incref_and_lock(bev);
	EVUTIL_ASSERT(bev_a->ok && bev_a->read_in_progress);

	evbuffer_commit_read(bev->input, nbytes);
	bev_a->read_in_progress = 0;

	if (ok && nbytes) {
		BEV_RESET_GENERIC_READ_TIMEOUT(bev);
		_bufferevent_decrement_read_buckets(&bev_a->bev, nbytes);
		if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
			_bufferevent_run_readcb(bev);
		bev_async_consider_reading(bev_a);
	} else if (!ok) {
		what |= BEV_EVENT_ERROR;
		bev_a->ok = 0;
		_bufferevent_run_eventcb(bev, what);
	} else if (!nbytes) {
		what |= BEV_EVENT_EOF;
		bev_a->ok = 0;
		_bufferevent_run_eventcb(bev, what);
	}

	_bufferevent_decref_and_unlock(bev);
}
Esempio n. 3
0
static void
be_async_inbuf_callback(struct evbuffer *buf,
    const struct evbuffer_cb_info *cbinfo,
    void *arg)
{
	struct bufferevent *bev = arg;
	struct bufferevent_async *bev_async = upcast(bev);

	/* If we successfully read into the inbuf, or we drained data from
	 * the inbuf and were not reading before, we may want to read now */

	_bufferevent_incref_and_lock(bev);
	if (cbinfo->n_added) {
		/* XXXX can't detect 0-length read completion */
		bev_async->read_in_progress = 0;
	}

	if (cbinfo->n_added || cbinfo->n_deleted)
		bev_async_consider_reading(bev_async);

	if (cbinfo->n_added) {
		BEV_RESET_GENERIC_READ_TIMEOUT(bev);

		if (evbuffer_get_length(bev->input) >= bev->wm_read.low &&
		    bev->readcb != NULL)
			_bufferevent_run_readcb(bev);
	}

	_bufferevent_decref_and_unlock(bev);
}
Esempio n. 4
0
static void
read_complete(struct event_overlapped *eo, ev_uintptr_t key,
    ev_ssize_t nbytes, int ok)
{
	struct bufferevent_async *bev_a = upcast_read(eo);
	struct bufferevent *bev = &bev_a->bev.bev;
	short what = BEV_EVENT_READING;
	ev_ssize_t amount_unread;
	BEV_LOCK(bev);
	EVUTIL_ASSERT(bev_a->read_in_progress);

	amount_unread = bev_a->read_in_progress - nbytes;
	evbuffer_commit_read_(bev->input, nbytes);
	bev_a->read_in_progress = 0;
	if (amount_unread)
		bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);

	if (!ok)
		bev_async_set_wsa_error(bev, eo);

	if (bev_a->ok) {
		if (ok && nbytes) {
			BEV_RESET_GENERIC_READ_TIMEOUT(bev);
			if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
				bufferevent_run_readcb_(bev);
			bev_async_consider_reading(bev_a);
		} else if (!ok) {
			what |= BEV_EVENT_ERROR;
			bev_a->ok = 0;
			bufferevent_run_eventcb_(bev, what);
		} else if (!nbytes) {
			what |= BEV_EVENT_EOF;
			bev_a->ok = 0;
			bufferevent_run_eventcb_(bev, what);
		}
	}

	bufferevent_decref_and_unlock_(bev);
}
Esempio n. 5
0
static int
be_async_enable(struct bufferevent *buf, short what)
{
	struct bufferevent_async *bev_async = upcast(buf);

	if (!bev_async->ok)
		return -1;

	/* NOTE: This interferes with non-blocking connect */
	if (what & EV_READ)
		BEV_RESET_GENERIC_READ_TIMEOUT(buf);
	if (what & EV_WRITE)
		BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);

	/* If we newly enable reading or writing, and we aren't reading or
	   writing already, consider launching a new read or write. */

	if (what & EV_READ)
		bev_async_consider_reading(bev_async);
	if (what & EV_WRITE)
		bev_async_consider_writing(bev_async);
	return 0;
}