void _bufferevent_decref_and_unlock(struct bufferevent *bufev) { struct bufferevent_private *bufev_private = EVUTIL_UPCAST(bufev, struct bufferevent_private, bev); if (--bufev_private->refcnt) { BEV_UNLOCK(bufev); return; } /* Clean up the shared info */ if (bufev->be_ops->destruct) bufev->be_ops->destruct(bufev); /* evbuffer will free the callbacks */ evbuffer_free(bufev->input); evbuffer_free(bufev->output); BEV_UNLOCK(bufev); if (bufev_private->own_lock) EVTHREAD_FREE_LOCK(bufev_private->lock); /* Free the actual allocated memory. */ mm_free(bufev - bufev->be_ops->mem_offset); }
static void evutil_free_secure_rng_globals_locks(void) { #ifndef EVENT__DISABLE_THREAD_SUPPORT if (arc4rand_lock != NULL) { EVTHREAD_FREE_LOCK(arc4rand_lock, 0); } #endif return; }
void _bufferevent_decref_and_unlock(struct bufferevent *bufev) { struct bufferevent_private *bufev_private = EVUTIL_UPCAST(bufev, struct bufferevent_private, bev); struct bufferevent *underlying; if (--bufev_private->refcnt) { BEV_UNLOCK(bufev); return; } underlying = bufferevent_get_underlying(bufev); /* Clean up the shared info */ if (bufev->be_ops->destruct) bufev->be_ops->destruct(bufev); /* XXX what happens if refcnt for these buffers is > 1? * The buffers can share a lock with this bufferevent object, * but the lock might be destroyed below. */ /* evbuffer will free the callbacks */ evbuffer_free(bufev->input); evbuffer_free(bufev->output); if (bufev_private->rate_limiting) { if (bufev_private->rate_limiting->group) bufferevent_remove_from_rate_limit_group(bufev); if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event)) event_del(&bufev_private->rate_limiting->refill_bucket_event); event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event); mm_free(bufev_private->rate_limiting); bufev_private->rate_limiting = NULL; } event_debug_unassign(&bufev->ev_read); event_debug_unassign(&bufev->ev_write); BEV_UNLOCK(bufev); if (bufev_private->own_lock) EVTHREAD_FREE_LOCK(bufev_private->lock, EVTHREAD_LOCKTYPE_RECURSIVE); /* Free the actual allocated memory. */ mm_free(bufev - bufev->be_ops->mem_offset); /* release the reference to underlying now that we no longer need * the reference to it. This is mainly in case our lock is shared * with underlying. * XXX Should we/can we just refcount evbuffer/bufferevent locks? * It would probably save us some headaches. */ if (underlying) bufferevent_decref(underlying); }
static void evsig_free_globals_locks(void) { #ifndef EVENT__DISABLE_THREAD_SUPPORT if (evsig_base_lock != NULL) { EVTHREAD_FREE_LOCK(evsig_base_lock, 0); evsig_base_lock = NULL; } #endif return; }
static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_) { struct bufferevent *bufev = arg_; struct bufferevent *underlying; struct bufferevent_private *bufev_private = EVUTIL_UPCAST(bufev, struct bufferevent_private, bev); BEV_LOCK(bufev); underlying = bufferevent_get_underlying(bufev); /* Clean up the shared info */ if (bufev->be_ops->destruct) bufev->be_ops->destruct(bufev); /* XXX what happens if refcnt for these buffers is > 1? * The buffers can share a lock with this bufferevent object, * but the lock might be destroyed below. */ /* evbuffer will free the callbacks */ evbuffer_free(bufev->input); evbuffer_free(bufev->output); if (bufev_private->rate_limiting) { if (bufev_private->rate_limiting->group) bufferevent_remove_from_rate_limit_group_internal_(bufev,0); mm_free(bufev_private->rate_limiting); bufev_private->rate_limiting = NULL; } BEV_UNLOCK(bufev); if (bufev_private->own_lock) EVTHREAD_FREE_LOCK(bufev_private->lock, EVTHREAD_LOCKTYPE_RECURSIVE); /* Free the actual allocated memory. */ mm_free(((char*)bufev) - bufev->be_ops->mem_offset); /* Release the reference to underlying now that we no longer need the * reference to it. We wait this long mainly in case our lock is * shared with underlying. * * The 'destruct' function will also drop a reference to underlying * if BEV_OPT_CLOSE_ON_FREE is set. * * XXX Should we/can we just refcount evbuffer/bufferevent locks? * It would probably save us some headaches. */ if (underlying) bufferevent_decref_(underlying); }
static int listener_decref_and_unlock(struct evconnlistener *listener) { int refcnt = --listener->refcnt; if (refcnt == 0) { listener->ops->destroy(listener); UNLOCK(listener); EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE); mm_free(listener); return 1; } else { UNLOCK(listener); return 0; } }
static THREAD_FN basic_thread(void *arg) { struct cond_wait cw; struct event_base *base = arg; struct event ev; int i = 0; EVTHREAD_ALLOC_LOCK(cw.lock, 0); EVTHREAD_ALLOC_COND(cw.cond); assert(cw.lock); assert(cw.cond); evtimer_assign(&ev, base, wake_all_timeout, &cw); for (i = 0; i < NUM_ITERATIONS; i++) { struct timeval tv; evutil_timerclear(&tv); tv.tv_sec = 0; tv.tv_usec = 3000; EVLOCK_LOCK(cw.lock, 0); /* we need to make sure that event does not happen before * we get to wait on the conditional variable */ assert(evtimer_add(&ev, &tv) == 0); assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0); EVLOCK_UNLOCK(cw.lock, 0); EVLOCK_LOCK(count_lock, 0); ++count; EVLOCK_UNLOCK(count_lock, 0); } /* exit the loop only if all threads fired all timeouts */ EVLOCK_LOCK(count_lock, 0); if (count >= NUM_THREADS * NUM_ITERATIONS) event_base_loopexit(base, NULL); EVLOCK_UNLOCK(count_lock, 0); EVTHREAD_FREE_LOCK(cw.lock, 0); EVTHREAD_FREE_COND(cw.cond); THREAD_RETURN(); }
struct evconnlistener * evconnlistener_new_async(struct event_base *base, evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, evutil_socket_t fd) { struct sockaddr_storage ss; int socklen = sizeof(ss); struct evconnlistener_iocp *lev; int i; flags |= LEV_OPT_THREADSAFE; if (!base || !event_base_get_iocp(base)) goto err; /* XXXX duplicate code */ if (backlog > 0) { if (listen(fd, backlog) < 0) goto err; } else if (backlog < 0) { if (listen(fd, 128) < 0) goto err; } if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) { event_sock_warn(fd, "getsockname"); goto err; } lev = mm_calloc(1, sizeof(struct evconnlistener_iocp)); if (!lev) { event_warn("calloc"); goto err; } lev->base.ops = &evconnlistener_iocp_ops; lev->base.cb = cb; lev->base.user_data = ptr; lev->base.flags = flags; lev->base.refcnt = 1; lev->base.enabled = 1; lev->port = event_base_get_iocp(base); lev->fd = fd; lev->event_base = base; if (event_iocp_port_associate(lev->port, fd, 1) < 0) goto err_free_lev; EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); lev->n_accepting = N_SOCKETS_PER_LISTENER; lev->accepting = mm_calloc(lev->n_accepting, sizeof(struct accepting_socket *)); if (!lev->accepting) { event_warn("calloc"); goto err_delete_lock; } for (i = 0; i < lev->n_accepting; ++i) { lev->accepting[i] = new_accepting_socket(lev, ss.ss_family); if (!lev->accepting[i]) { event_warnx("Couldn't create accepting socket"); goto err_free_accepting; } if (cb && start_accepting(lev->accepting[i]) < 0) { event_warnx("Couldn't start accepting on socket"); EnterCriticalSection(&lev->accepting[i]->lock); free_and_unlock_accepting_socket(lev->accepting[i]); goto err_free_accepting; } ++lev->base.refcnt; } iocp_listener_event_add(lev); return &lev->base; err_free_accepting: mm_free(lev->accepting); /* XXXX free the other elements. */ err_delete_lock: EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); err_free_lev: mm_free(lev); err: /* Don't close the fd, it is caller's responsibility. */ return NULL; }
static void count_free(void) { EVTHREAD_FREE_LOCK(count_lock, 0); EVTHREAD_FREE_COND(count_cond); }
static void thread_conditions_simple(void *arg) { struct timeval tv_signal, tv_timeout, tv_broadcast; struct alerted_record alerted[NUM_THREADS]; THREAD_T threads[NUM_THREADS]; struct cond_wait cond; int i; struct timeval launched_at; struct event wake_one; struct event wake_all; struct basic_test_data *data = arg; struct event_base *base = data->base; int n_timed_out=0, n_signal=0, n_broadcast=0; tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0; tv_signal.tv_usec = 30*1000; tv_timeout.tv_usec = 150*1000; tv_broadcast.tv_usec = 500*1000; EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE); EVTHREAD_ALLOC_COND(cond.cond); tt_assert(cond.lock); tt_assert(cond.cond); for (i = 0; i < NUM_THREADS; ++i) { memset(&alerted[i], 0, sizeof(struct alerted_record)); alerted[i].cond = &cond; } /* Threads 5 and 6 will be allowed to time out */ memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout)); memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout)); evtimer_assign(&wake_one, base, wake_one_timeout, &cond); evtimer_assign(&wake_all, base, wake_all_timeout, &cond); evutil_gettimeofday(&launched_at, NULL); /* Launch the threads... */ for (i = 0; i < NUM_THREADS; ++i) { THREAD_START(threads[i], wait_for_condition, &alerted[i]); } /* Start the timers... */ tt_int_op(event_add(&wake_one, &tv_signal), ==, 0); tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0); /* And run for a bit... */ event_base_dispatch(base); /* And wait till the threads are done. */ for (i = 0; i < NUM_THREADS; ++i) THREAD_JOIN(threads[i]); /* Now, let's see what happened. At least one of 5 or 6 should * have timed out. */ n_timed_out = alerted[5].timed_out + alerted[6].timed_out; tt_int_op(n_timed_out, >=, 1); tt_int_op(n_timed_out, <=, 2); for (i = 0; i < NUM_THREADS; ++i) { const struct timeval *target_delay; struct timeval target_time, actual_delay; if (alerted[i].timed_out) { TT_BLATHER(("%d looks like a timeout\n", i)); target_delay = &tv_timeout; tt_assert(i == 5 || i == 6); } else if (evutil_timerisset(&alerted[i].alerted_at)) { long diff1,diff2; evutil_timersub(&alerted[i].alerted_at, &launched_at, &actual_delay); diff1 = timeval_msec_diff(&actual_delay, &tv_signal); diff2 = timeval_msec_diff(&actual_delay, &tv_broadcast); if (labs(diff1) < labs(diff2)) { TT_BLATHER(("%d looks like a signal\n", i)); target_delay = &tv_signal; ++n_signal; } else { TT_BLATHER(("%d looks like a broadcast\n", i)); target_delay = &tv_broadcast; ++n_broadcast; } } else { TT_FAIL(("Thread %d never got woken", i)); continue; } evutil_timeradd(target_delay, &launched_at, &target_time); test_timeval_diff_leq(&target_time, &alerted[i].alerted_at, 0, 50); } tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS); tt_int_op(n_signal, ==, 1); end: EVTHREAD_FREE_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE); EVTHREAD_FREE_COND(cond.cond); }