int bufferevent_enable_locking(struct bufferevent *bufev, void *lock) { #ifdef _EVENT_DISABLE_THREAD_SUPPORT return -1; #else struct bufferevent *underlying; if (BEV_UPCAST(bufev)->lock) return -1; underlying = bufferevent_get_underlying(bufev); if (!lock && underlying && BEV_UPCAST(underlying)->lock) { lock = BEV_UPCAST(underlying)->lock; BEV_UPCAST(bufev)->lock = lock; BEV_UPCAST(bufev)->own_lock = 0; } else if (!lock) { EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); if (!lock) return -1; BEV_UPCAST(bufev)->lock = lock; BEV_UPCAST(bufev)->own_lock = 1; } else { BEV_UPCAST(bufev)->lock = lock; BEV_UPCAST(bufev)->own_lock = 0; } evbuffer_enable_locking(bufev->input, lock); evbuffer_enable_locking(bufev->output, lock); if (underlying && !BEV_UPCAST(underlying)->lock) bufferevent_enable_locking(underlying, lock); return 0; #endif }
static void thread_basic(void *arg) { THREAD_T threads[NUM_THREADS]; struct event ev; struct timeval tv; int i; struct basic_test_data *data = arg; struct event_base *base = data->base; struct event *notification_event = NULL; struct event *sigchld_event = NULL; EVTHREAD_ALLOC_LOCK(count_lock, 0); tt_assert(count_lock); tt_assert(base); if (evthread_make_base_notifiable(base)<0) { tt_abort_msg("Couldn't make base notifiable!"); } #ifndef WIN32 if (data->setup_data && !strcmp(data->setup_data, "forking")) { pid_t pid; int status; sigchld_event = evsignal_new(base, SIGCHLD, sigchld_cb, base); /* This piggybacks on the th_notify_fd weirdly, and looks * inside libevent internals. Not a good idea in non-testing * code! */ notification_event = event_new(base, base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb, NULL); event_add(sigchld_event, NULL); event_add(notification_event, NULL); if ((pid = fork()) == 0) { event_del(notification_event); if (event_reinit(base) < 0) { TT_FAIL(("reinit")); exit(1); } event_assign(notification_event, base, base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb, NULL); event_add(notification_event, NULL); goto child; } event_base_dispatch(base); if (waitpid(pid, &status, 0) == -1) tt_abort_perror("waitpid"); TT_BLATHER(("Waitpid okay\n")); tt_assert(got_sigchld); tt_int_op(notification_fd_used, ==, 0); goto end; }
static void count_init(void) { EVTHREAD_ALLOC_LOCK(count_lock, 0); EVTHREAD_ALLOC_COND(count_cond); tt_assert(count_lock); tt_assert(count_cond); end: ; }
struct evconnlistener * evconnlistener_new(struct event_base *base, evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, evutil_socket_t fd) { struct evconnlistener_event *lev; #ifdef _WIN32 if (base && event_base_get_iocp_(base)) { const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); if (ext->AcceptEx && ext->GetAcceptExSockaddrs) return evconnlistener_new_async(base, cb, ptr, flags, backlog, fd); } #endif if (backlog > 0) { if (listen(fd, backlog) < 0) return NULL; } else if (backlog < 0) { if (listen(fd, 128) < 0) return NULL; } lev = (struct evconnlistener_event *)mm_calloc(1, sizeof(struct evconnlistener_event)); if (!lev) return NULL; lev->base.ops = &evconnlistener_event_ops; lev->base.cb = cb; lev->base.user_data = ptr; lev->base.flags = flags; lev->base.refcnt = 1; lev->base.accept4_flags = 0; if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK; if (flags & LEV_OPT_CLOSE_ON_EXEC) lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC; if (flags & LEV_OPT_THREADSAFE) { EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); } event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST, listener_read_cb, lev); if (!(flags & LEV_OPT_DISABLED)) evconnlistener_enable(&lev->base); return &lev->base; }
int evutil_secure_rng_init(void) { int val; if (!arc4rand_lock) { EVTHREAD_ALLOC_LOCK(arc4rand_lock, 0); } _ARC4_LOCK(); if (!arc4_seeded_ok) arc4_stir(); val = arc4_seeded_ok ? 0 : -1; _ARC4_UNLOCK(); return val; }
static THREAD_FN basic_thread(void *arg) { struct cond_wait cw; struct event_base *base = arg; struct event ev; int i = 0; EVTHREAD_ALLOC_LOCK(cw.lock, 0); EVTHREAD_ALLOC_COND(cw.cond); assert(cw.lock); assert(cw.cond); evtimer_assign(&ev, base, wake_all_timeout, &cw); for (i = 0; i < NUM_ITERATIONS; i++) { struct timeval tv; evutil_timerclear(&tv); tv.tv_sec = 0; tv.tv_usec = 3000; EVLOCK_LOCK(cw.lock, 0); /* we need to make sure that event does not happen before * we get to wait on the conditional variable */ assert(evtimer_add(&ev, &tv) == 0); assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0); EVLOCK_UNLOCK(cw.lock, 0); EVLOCK_LOCK(count_lock, 0); ++count; EVLOCK_UNLOCK(count_lock, 0); } /* exit the loop only if all threads fired all timeouts */ EVLOCK_LOCK(count_lock, 0); if (count >= NUM_THREADS * NUM_ITERATIONS) event_base_loopexit(base, NULL); EVLOCK_UNLOCK(count_lock, 0); EVTHREAD_FREE_LOCK(cw.lock, 0); EVTHREAD_FREE_COND(cw.cond); THREAD_RETURN(); }
static void thread_conditions_simple(void *arg) { struct timeval tv_signal, tv_timeout, tv_broadcast; struct alerted_record alerted[NUM_THREADS]; THREAD_T threads[NUM_THREADS]; struct cond_wait cond; int i; struct timeval launched_at; struct event wake_one; struct event wake_all; struct basic_test_data *data = arg; struct event_base *base = data->base; int n_timed_out=0, n_signal=0, n_broadcast=0; tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0; tv_signal.tv_usec = 30*1000; tv_timeout.tv_usec = 150*1000; tv_broadcast.tv_usec = 500*1000; EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE); EVTHREAD_ALLOC_COND(cond.cond); tt_assert(cond.lock); tt_assert(cond.cond); for (i = 0; i < NUM_THREADS; ++i) { memset(&alerted[i], 0, sizeof(struct alerted_record)); alerted[i].cond = &cond; } /* Threads 5 and 6 will be allowed to time out */ memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout)); memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout)); evtimer_assign(&wake_one, base, wake_one_timeout, &cond); evtimer_assign(&wake_all, base, wake_all_timeout, &cond); evutil_gettimeofday(&launched_at, NULL); /* Launch the threads... */ for (i = 0; i < NUM_THREADS; ++i) { THREAD_START(threads[i], wait_for_condition, &alerted[i]); } /* Start the timers... */ tt_int_op(event_add(&wake_one, &tv_signal), ==, 0); tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0); /* And run for a bit... */ event_base_dispatch(base); /* And wait till the threads are done. */ for (i = 0; i < NUM_THREADS; ++i) THREAD_JOIN(threads[i]); /* Now, let's see what happened. At least one of 5 or 6 should * have timed out. */ n_timed_out = alerted[5].timed_out + alerted[6].timed_out; tt_int_op(n_timed_out, >=, 1); tt_int_op(n_timed_out, <=, 2); for (i = 0; i < NUM_THREADS; ++i) { const struct timeval *target_delay; struct timeval target_time, actual_delay; if (alerted[i].timed_out) { TT_BLATHER(("%d looks like a timeout\n", i)); target_delay = &tv_timeout; tt_assert(i == 5 || i == 6); } else if (evutil_timerisset(&alerted[i].alerted_at)) { long diff1,diff2; evutil_timersub(&alerted[i].alerted_at, &launched_at, &actual_delay); diff1 = timeval_msec_diff(&actual_delay, &tv_signal); diff2 = timeval_msec_diff(&actual_delay, &tv_broadcast); if (abs(diff1) < abs(diff2)) { TT_BLATHER(("%d looks like a signal\n", i)); target_delay = &tv_signal; ++n_signal; } else { TT_BLATHER(("%d looks like a broadcast\n", i)); target_delay = &tv_broadcast; ++n_broadcast; } } else { TT_FAIL(("Thread %d never got woken", i)); continue; } evutil_timeradd(target_delay, &launched_at, &target_time); test_timeval_diff_leq(&target_time, &alerted[i].alerted_at, 0, 50); } tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS); tt_int_op(n_signal, ==, 1); end: ; }
struct evconnlistener * evconnlistener_new_async(struct event_base *base, evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, evutil_socket_t fd) { struct sockaddr_storage ss; int socklen = sizeof(ss); struct evconnlistener_iocp *lev; int i; flags |= LEV_OPT_THREADSAFE; if (!base || !event_base_get_iocp(base)) goto err; /* XXXX duplicate code */ if (backlog > 0) { if (listen(fd, backlog) < 0) goto err; } else if (backlog < 0) { if (listen(fd, 128) < 0) goto err; } if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) { event_sock_warn(fd, "getsockname"); goto err; } lev = mm_calloc(1, sizeof(struct evconnlistener_iocp)); if (!lev) { event_warn("calloc"); goto err; } lev->base.ops = &evconnlistener_iocp_ops; lev->base.cb = cb; lev->base.user_data = ptr; lev->base.flags = flags; lev->base.refcnt = 1; lev->base.enabled = 1; lev->port = event_base_get_iocp(base); lev->fd = fd; lev->event_base = base; if (event_iocp_port_associate(lev->port, fd, 1) < 0) goto err_free_lev; EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); lev->n_accepting = N_SOCKETS_PER_LISTENER; lev->accepting = mm_calloc(lev->n_accepting, sizeof(struct accepting_socket *)); if (!lev->accepting) { event_warn("calloc"); goto err_delete_lock; } for (i = 0; i < lev->n_accepting; ++i) { lev->accepting[i] = new_accepting_socket(lev, ss.ss_family); if (!lev->accepting[i]) { event_warnx("Couldn't create accepting socket"); goto err_free_accepting; } if (cb && start_accepting(lev->accepting[i]) < 0) { event_warnx("Couldn't start accepting on socket"); EnterCriticalSection(&lev->accepting[i]->lock); free_and_unlock_accepting_socket(lev->accepting[i]); goto err_free_accepting; } ++lev->base.refcnt; } iocp_listener_event_add(lev); return &lev->base; err_free_accepting: mm_free(lev->accepting); /* XXXX free the other elements. */ err_delete_lock: EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); err_free_lev: mm_free(lev); err: /* Don't close the fd, it is caller's responsibility. */ return NULL; }