예제 #1
0
파일: kqueue.c 프로젝트: TomCN7/Event
/* signal handling */
static int
kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
{
	struct kqop *kqop = base->evbase;
	struct kevent kev;
	struct timespec timeout = { 0, 0 };
	(void)p;

	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);

	memset(&kev, 0, sizeof(kev));
	kev.ident = nsignal;
	kev.filter = EVFILT_SIGNAL;
	kev.flags = EV_ADD;

	/* Be ready for the signal if it is sent any
	 * time between now and the next call to
	 * kq_dispatch. */
	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
		return (-1);

        /* We can set the handler for most signals to SIG_IGN and
         * still have them reported to us in the queue.  However,
         * if the handler for SIGCHLD is SIG_IGN, the system reaps
         * zombie processes for us, and we don't get any notification.
         * This appears to be the only signal with this quirk. */
	if (evsig_set_handler_(base, nsignal,
                               nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
		return (-1);

	return (0);
}
예제 #2
0
static void
debug_lock_free(void *lock_, unsigned locktype)
{
	struct debug_lock *lock = lock_;
	EVUTIL_ASSERT(lock->count == 0);
	EVUTIL_ASSERT(locktype == lock->locktype);
	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
	if (original_lock_fns_.free) {
		original_lock_fns_.free(lock->lock,
		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
	}
	lock->lock = NULL;
	lock->count = -100;
	lock->signature = 0x12300fda;
	mm_free(lock);
}
예제 #3
0
static int
debug_lock_lock(unsigned mode, void *lock_)
{
	struct debug_lock *lock = lock_;
	int res = 0;
	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
	else
		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
	if (original_lock_fns_.lock)
		res = original_lock_fns_.lock(mode, lock->lock);
	if (!res) {
		evthread_debug_lock_mark_locked(mode, lock);
	}
	return res;
}
예제 #4
0
/* Called to write data info the BIO */
static int
bio_bufferevent_write(BIO *b, const char *in, int inlen)
{
	struct bufferevent *bufev = b->ptr;
	struct evbuffer *output;
	size_t outlen;

	BIO_clear_retry_flags(b);

	if (!b->ptr)
		return -1;

	output = bufferevent_get_output(bufev);
	outlen = evbuffer_get_length(output);

	/* Copy only as much data onto the output buffer as can fit under the
	 * high-water mark. */
	if (bufev->wm_write.high && bufev->wm_write.high <= (outlen+inlen)) {
		if (bufev->wm_write.high <= outlen) {
			/* If no data can fit, we'll need to retry later. */
			BIO_set_retry_write(b);
			return -1;
		}
		inlen = bufev->wm_write.high - outlen;
	}

	EVUTIL_ASSERT(inlen > 0);
	evbuffer_add(output, in, inlen);

	return inlen;
}
예제 #5
0
static int
kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
{
	struct kqop *kqop = base->evbase;
	struct kevent kev;

	struct timespec timeout = { 0, 0 };
	(void)p;

	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);

	memset(&kev, 0, sizeof(kev));
	kev.ident = nsignal;
	kev.filter = EVFILT_SIGNAL;
	kev.flags = EV_DELETE;

	/* Because we insert signal events
	 * immediately, we need to delete them
	 * immediately, too */
	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
		return (-1);

	if (_evsig_restore_handler(base, nsignal) == -1)
		return (-1);

	return (0);
}
예제 #6
0
static void
connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
    ev_ssize_t nbytes, int ok)
{
	struct bufferevent_async *bev_a = upcast_connect(eo);
	struct bufferevent *bev = &bev_a->bev.bev;
	evutil_socket_t sock;

	BEV_LOCK(bev);

	EVUTIL_ASSERT(bev_a->bev.connecting);
	bev_a->bev.connecting = 0;
	sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
	/* XXXX Handle error? */
	setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);

	if (ok)
		bufferevent_async_set_connected(bev);
	else
		bev_async_set_wsa_error(bev, eo);

	_bufferevent_run_eventcb(bev,
			ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);

	event_base_del_virtual(bev->ev_base);

	_bufferevent_decref_and_unlock(bev);
}
예제 #7
0
/* signal handling */
static int
kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
{
	struct kqop *kqop = base->evbase;
	struct kevent kev;
	struct timespec timeout = { 0, 0 };
	(void)p;

	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);

	memset(&kev, 0, sizeof(kev));
	kev.ident = nsignal;
	kev.filter = EVFILT_SIGNAL;
	kev.flags = EV_ADD;

	/* Be ready for the signal if it is sent any
	 * time between now and the next call to
	 * kq_dispatch. */
	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
		return (-1);

	/* XXXX The manpage suggest we could use SIG_IGN instead of a
	 * do-nothing handler */
	if (_evsig_set_handler(base, nsignal, kq_sighandler) == -1)
		return (-1);

	return (0);
}
예제 #8
0
파일: poll.c 프로젝트: 00datman/ompi
static void
poll_check_ok(struct pollop *pop)
{
	int i, idx;
	struct event *ev;

	for (i = 0; i < pop->fd_count; ++i) {
		idx = pop->idxplus1_by_fd[i]-1;
		if (idx < 0)
			continue;
		EVUTIL_ASSERT(pop->event_set[idx].fd == i);
	}
	for (i = 0; i < pop->nfds; ++i) {
		struct pollfd *pfd = &pop->event_set[i];
		EVUTIL_ASSERT(pop->idxplus1_by_fd[pfd->fd] == i+1);
	}
}
예제 #9
0
static inline struct bufferevent_async *
upcast_write(struct event_overlapped *eo)
{
	struct bufferevent_async *bev_a;
	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
	return bev_a;
}
예제 #10
0
static inline struct bufferevent_async *
upcast_overlapped(struct event_overlapped *eo)
{
	struct bufferevent_async *bev_a;
	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
	EVUTIL_ASSERT(bev_a->bev.bev.be_ops == &bufferevent_ops_async);
	return bev_a;
}
예제 #11
0
static void
listener_read_cb(evutil_socket_t fd, short what, void *p)
{
	struct evconnlistener *lev = p;
	int err;
	evconnlistener_cb cb;
	evconnlistener_errorcb errorcb;
	void *user_data;
	LOCK(lev);
	while (1) {
		struct sockaddr_storage ss;
#ifdef WIN32
		int socklen = sizeof(ss);
#else
		socklen_t socklen = sizeof(ss);
#endif
		evutil_socket_t new_fd = accept(fd, (struct sockaddr*)&ss, &socklen);
		if (new_fd < 0)
			break;

		if (!(lev->flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
			evutil_make_socket_nonblocking(new_fd);

		if (lev->cb == NULL) {
			UNLOCK(lev);
			return;
		}
		++lev->refcnt;
		cb = lev->cb;
		user_data = lev->user_data;
		UNLOCK(lev);
		cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
		    user_data);
		LOCK(lev);
		if (lev->refcnt == 1) {
			int freed = listener_decref_and_unlock(lev);
			EVUTIL_ASSERT(freed);
			return;
		}
		--lev->refcnt;
	}
	err = evutil_socket_geterror(fd);
	if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) {
		UNLOCK(lev);
		return;
	}
	if (lev->errorcb != NULL) {
		++lev->refcnt;
		errorcb = lev->errorcb;
		user_data = lev->user_data;
		UNLOCK(lev);
		errorcb(lev, user_data);
		LOCK(lev);
		listener_decref_and_unlock(lev);
	} else {
		event_sock_warn(fd, "Error from accept() call");
	}
}
예제 #12
0
static void
listener_read_cb(evutil_socket_t fd, short what, void *p)
{
	struct evconnlistener *lev = (struct evconnlistener *)p;
	int err;
	evconnlistener_cb cb;
	evconnlistener_errorcb errorcb;
	void *user_data;
	LOCK(lev);
	while (1) {
		struct sockaddr_storage ss;
		ev_socklen_t socklen = sizeof(ss);
		evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags);
		if (new_fd < 0)
			break;
		if (socklen == 0) {
			/* This can happen with some older linux kernels in
			 * response to nmap. */
			evutil_closesocket(new_fd);
			continue;
		}

		if (lev->cb == NULL) {
			evutil_closesocket(new_fd);
			UNLOCK(lev);
			return;
		}
		++lev->refcnt;
		cb = lev->cb;
		user_data = lev->user_data;
		UNLOCK(lev);
		cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
		    user_data);
		LOCK(lev);
		if (lev->refcnt == 1) {
			int freed = listener_decref_and_unlock(lev);
			EVUTIL_ASSERT(freed);
			return;
		}
		--lev->refcnt;
	}
	err = evutil_socket_geterror(fd);
	if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) {
		UNLOCK(lev);
		return;
	}
	if (lev->errorcb != NULL) {
		++lev->refcnt;
		errorcb = lev->errorcb;
		user_data = lev->user_data;
		UNLOCK(lev);
		errorcb(lev, user_data);
		LOCK(lev);
		listener_decref_and_unlock(lev);
	} else {
		event_sock_warn(fd, "Error from accept() call");
	}
}
예제 #13
0
static int
realloc_fd_sets(struct win32op *op, size_t new_size)
{
	size_t size;

	EVUTIL_ASSERT(new_size >= op->readset_in->fd_count &&
	       new_size >= op->writeset_in->fd_count);
	EVUTIL_ASSERT(new_size >= 1);

	size = FD_SET_ALLOC_SIZE(new_size);
	if (!(op->readset_in = mm_realloc(op->readset_in, size)))
		return (-1);
	if (!(op->writeset_in = mm_realloc(op->writeset_in, size)))
		return (-1);
	op->resize_out_sets = 1;
	op->fd_setsz = new_size;
	return (0);
}
예제 #14
0
파일: signal.c 프로젝트: andyk/load_gen
static int
evsig_del(struct event_base *base, int evsignal, short old, short events, void *p)
{
	EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG);

	event_debug(("%s: %d: restoring signal handler", __func__, evsignal));

	return (_evsig_restore_handler(base, evsignal));
}
예제 #15
0
static void
evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
{
	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
	else
		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
	if (evthread_id_fn_) {
		unsigned long me;
		me = evthread_id_fn_();
		EVUTIL_ASSERT(lock->held_by == me);
		if (lock->count == 1)
			lock->held_by = 0;
	}
	--lock->count;
	EVUTIL_ASSERT(lock->count >= 0);
}
예제 #16
0
static int
grow_fd_sets(struct win32op *op, unsigned new_num_fds)
{
	size_t size;

	EVUTIL_ASSERT(new_num_fds >= op->readset_in->fd_count &&
	       new_num_fds >= op->writeset_in->fd_count);
	EVUTIL_ASSERT(new_num_fds >= 1);

	size = FD_SET_ALLOC_SIZE(new_num_fds);
	if (!(op->readset_in = mm_realloc(op->readset_in, size)))
		return (-1);
	if (!(op->writeset_in = mm_realloc(op->writeset_in, size)))
		return (-1);
	op->resize_out_sets = 1;
	op->num_fds_in_fd_sets = new_num_fds;
	return (0);
}
예제 #17
0
파일: poll.c 프로젝트: 00datman/ompi
static int
poll_add(struct event_base *base, int fd, short old, short events, void *_idx)
{
	struct pollop *pop = base->evbase;
	struct pollfd *pfd = NULL;
	struct pollidx *idx = _idx;
	int i;

	EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
	if (!(events & (EV_READ|EV_WRITE)))
		return (0);

	poll_check_ok(pop);
	if (pop->nfds + 1 >= pop->event_count) {
		struct pollfd *tmp_event_set;
		int tmp_event_count;

		if (pop->event_count < 32)
			tmp_event_count = 32;
		else
			tmp_event_count = pop->event_count * 2;

		/* We need more file descriptors */
		tmp_event_set = mm_realloc(pop->event_set,
				 tmp_event_count * sizeof(struct pollfd));
		if (tmp_event_set == NULL) {
			event_warn("realloc");
			return (-1);
		}
		pop->event_set = tmp_event_set;

		pop->event_count = tmp_event_count;
		pop->realloc_copy = 1;
	}

	i = idx->idxplus1 - 1;

	if (i >= 0) {
		pfd = &pop->event_set[i];
	} else {
		i = pop->nfds++;
		pfd = &pop->event_set[i];
		pfd->events = 0;
		pfd->fd = fd;
		idx->idxplus1 = i + 1;
	}

	pfd->revents = 0;
	if (events & EV_WRITE)
		pfd->events |= POLLOUT;
	if (events & EV_READ)
		pfd->events |= POLLIN;
	poll_check_ok(pop);

	return (0);
}
예제 #18
0
static inline struct bufferevent_async *
upcast(struct bufferevent *bev)
{
	struct bufferevent_async *bev_a;
	if (bev->be_ops != &bufferevent_ops_async)
		return NULL;
	bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
	EVUTIL_ASSERT(bev_a->bev.bev.be_ops == &bufferevent_ops_async);
	return bev_a;
}
예제 #19
0
파일: evport.c 프로젝트: TomCN7/Event
/*
 * Verifies very basic integrity of a given port_event.
 */
static void
check_event(port_event_t* pevt)
{
	/*
	 * We've only registered for PORT_SOURCE_FD events. The only
	 * other thing we can legitimately receive is PORT_SOURCE_ALERT,
	 * but since we're not using port_alert either, we can assume
	 * PORT_SOURCE_FD.
	 */
	EVUTIL_ASSERT(pevt->portev_source == PORT_SOURCE_FD);
}
예제 #20
0
파일: buffer_iocp.c 프로젝트: TomCN7/Event
void evbuffer_commit_write_(struct evbuffer *evbuf, ev_ssize_t nBytes)
{
	struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);

	EVUTIL_ASSERT(buf->write_in_progress && !buf->read_in_progress);
	evbuffer_unfreeze(evbuf, 1);
	evbuffer_drain(evbuf, nBytes);
	pin_release(buf,EVBUFFER_MEM_PINNED_W);
	buf->write_in_progress = 0;
	evbuffer_decref(evbuf);
}
예제 #21
0
파일: evthread.c 프로젝트: ICLDisco/ompi
static int
debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
{
	int r;
	struct debug_lock *lock = _lock;
	EVUTIL_ASSERT(lock);
	EVLOCK_ASSERT_LOCKED(_lock);
	evthread_debug_lock_mark_unlocked(0, lock);
	r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
	evthread_debug_lock_mark_locked(0, lock);
	return r;
}
예제 #22
0
파일: buffer_iocp.c 프로젝트: TomCN7/Event
/** Unpin all the chains noted as pinned in 'eo'. */
static void pin_release(struct evbuffer_overlapped *eo, unsigned flag)
{
	int i;
	struct evbuffer_chain *next, *chain = eo->first_pinned;

	for (i = 0; i < eo->n_buffers; ++i) {
		EVUTIL_ASSERT(chain);
		next = chain->next;
		evbuffer_chain_unpin_(chain, flag);
		chain = next;
	}
}
예제 #23
0
void
evbuffer_commit_read(struct evbuffer *evbuf, ev_ssize_t nBytes)
{
	struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
	struct evbuffer_chain **chainp;
	size_t remaining, len;
	unsigned i;

	EVBUFFER_LOCK(evbuf);
	EVUTIL_ASSERT(buf->read_in_progress && !buf->write_in_progress);
	EVUTIL_ASSERT(nBytes >= 0); /* XXXX Can this be false? */

	evbuffer_unfreeze(evbuf, 0);

	chainp = evbuf->last_with_datap;
	if (!((*chainp)->flags & EVBUFFER_MEM_PINNED_R))
		chainp = &(*chainp)->next;
	remaining = nBytes;
	for (i = 0; remaining > 0 && i < (unsigned)buf->n_buffers; ++i) {
		EVUTIL_ASSERT(*chainp);
		len = buf->buffers[i].len;
		if (remaining < len)
			len = remaining;
		(*chainp)->off += len;
		evbuf->last_with_datap = chainp;
		remaining -= len;
		chainp = &(*chainp)->next;
	}

	pin_release(buf, EVBUFFER_MEM_PINNED_R);

	buf->read_in_progress = 0;

	evbuf->total_len += nBytes;
	evbuf->n_add_for_cb += nBytes;

	evbuffer_invoke_callbacks(evbuf);

	_evbuffer_decref_and_unlock(evbuf);
}
예제 #24
0
static void
be_socket_destruct(struct bufferevent *bufev)
{
	struct bufferevent_private *bufev_p =
	    EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
	evutil_socket_t fd;
	EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);

	fd = event_get_fd(&bufev->ev_read);

	if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0)
		EVUTIL_CLOSESOCKET(fd);
}
예제 #25
0
파일: signal.c 프로젝트: Breakster/iSoul
static int
evsig_del(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p)
{
	EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG);

	event_debug(("%s: %d: restoring signal handler", __func__, evsignal));

	EVSIGBASE_LOCK();
	--evsig_base_n_signals_added;
	--base->sig.ev_n_signals_added;
	EVSIGBASE_UNLOCK();

	return (evsig_restore_handler_(base, (int)evsignal));
}
예제 #26
0
/* strsep replacement for platforms that lack it.  Only works if
 * del is one character long. */
static char *
strsep(char **s, const char *del)
{
	char *d, *tok;
	EVUTIL_ASSERT(strlen(del) == 1);
	if (!s || !*s)
		return NULL;
	tok = *s;
	d = strstr(tok, del);
	if (d) {
		*d = '\0';
		*s = d + 1;
	} else
		*s = NULL;
	return tok;
}
예제 #27
0
static void
be_async_destruct(struct bufferevent *bev)
{
	struct bufferevent_private *bev_p = BEV_UPCAST(bev);
	evutil_socket_t fd;

	EVUTIL_ASSERT(!upcast(bev)->write_in_progress && !upcast(bev)->read_in_progress);

	/* XXX cancel any outstanding I/O operations */
	fd = _evbuffer_overlapped_get_fd(bev->input);
	/* delete this in case non-blocking connect was used */
	event_del(&bev->ev_write);
	if (bev_p->options & BEV_OPT_CLOSE_ON_FREE)
		EVUTIL_CLOSESOCKET(fd);
	_bufferevent_del_generic_timeout_cbs(bev);
}
예제 #28
0
파일: evthread.c 프로젝트: ICLDisco/ompi
void *
evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
{
	/* there are four cases here:
	   1) we're turning on debugging; locking is not on.
	   2) we're turning on debugging; locking is on.
	   3) we're turning on locking; debugging is not on.
	   4) we're turning on locking; debugging is on. */

	if (!enable_locks && _original_lock_fns.alloc == NULL) {
		/* Case 1: allocate a debug lock. */
		EVUTIL_ASSERT(lock_ == NULL);
		return debug_lock_alloc(locktype);
	} else if (!enable_locks && _original_lock_fns.alloc != NULL) {
		/* Case 2: wrap the lock in a debug lock. */
		struct debug_lock *lock;
		EVUTIL_ASSERT(lock_ != NULL);

		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
			/* We can't wrap it: We need a recursive lock */
			_original_lock_fns.free(lock_, locktype);
			return debug_lock_alloc(locktype);
		}
		lock = mm_malloc(sizeof(struct debug_lock));
		if (!lock) {
			_original_lock_fns.free(lock_, locktype);
			return NULL;
		}
		lock->lock = lock_;
		lock->locktype = locktype;
		lock->count = 0;
		lock->held_by = 0;
		return lock;
	} else if (enable_locks && ! ompi__evthread_lock_debugging_enabled) {
		/* Case 3: allocate a regular lock */
		EVUTIL_ASSERT(lock_ == NULL);
		return ompi__evthread_lock_fns.alloc(locktype);
	} else {
		/* Case 4: Fill in a debug lock with a real lock */
		struct debug_lock *lock = lock_;
		EVUTIL_ASSERT(enable_locks &&
		              ompi__evthread_lock_debugging_enabled);
		EVUTIL_ASSERT(lock->locktype == locktype);
		EVUTIL_ASSERT(lock->lock == NULL);
		lock->lock = _original_lock_fns.alloc(
			locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
		if (!lock->lock) {
			lock->count = -200;
			mm_free(lock);
			return NULL;
		}
		return lock;
	}
}
예제 #29
0
static void
connect_complete(struct event_overlapped *eo, uintptr_t key,
    ev_ssize_t nbytes, int ok)
{
	struct bufferevent_async *bev_a = upcast_overlapped(eo);
	struct bufferevent *bev = &bev_a->bev.bev; /* XXX locking issue ? */

	_bufferevent_incref_and_lock(bev);

	EVUTIL_ASSERT(bev_a->bev.connecting);
	bev_a->bev.connecting = 0;

	_bufferevent_run_eventcb(bev,
			ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);

	_bufferevent_decref_and_unlock(bev);
}
예제 #30
0
int
bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd,
	const struct sockaddr *sa, int socklen)
{
	BOOL rc;
	struct bufferevent_async *bev_async = upcast(bev);
	struct sockaddr_storage ss;
	const struct win32_extension_fns *ext =
	    event_get_win32_extension_fns();

	EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);

	/* ConnectEx() requires that the socket be bound to an address
	 * with bind() before using, otherwise it will fail. We attempt
	 * to issue a bind() here, taking into account that the error
	 * code is set to WSAEINVAL when the socket is already bound. */
	memset(&ss, 0, sizeof(ss));
	if (sa->sa_family == AF_INET) {
		struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
		sin->sin_family = AF_INET;
		sin->sin_addr.s_addr = INADDR_ANY;
	} else if (sa->sa_family == AF_INET6) {
		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
		sin6->sin6_family = AF_INET6;
		sin6->sin6_addr = in6addr_any;
	} else {
		/* Well, the user will have to bind() */
		return -1;
	}
	if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
	    WSAGetLastError() != WSAEINVAL)
		return -1;

	event_base_add_virtual(bev->ev_base);
	bufferevent_incref(bev);
	rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
			    &bev_async->connect_overlapped.overlapped);
	if (rc || WSAGetLastError() == ERROR_IO_PENDING)
		return 0;

	event_base_del_virtual(bev->ev_base);
	bufferevent_decref(bev);

	return -1;
}