static void test_fin_many(void *arg) { struct basic_test_data *data = arg; struct event_base *base = data->base; struct event *ev1, *ev2; struct event_callback evcb1, evcb2; int ev1_count = 0, ev2_count = 0; int evcb1_count = 0, evcb2_count = 0; struct event_callback *array[4]; int n; /* First attempt: call finalize_many with no events running */ ev1 = evtimer_new(base, timer_callback, &ev1_count); ev1 = evtimer_new(base, timer_callback, &ev2_count); event_deferred_cb_init_(&evcb1, 0, simple_callback, &evcb1_called); event_deferred_cb_init_(&evcb2, 0, simple_callback, &evcb2_called); array[0] = &ev1->ev_evcallback; array[1] = &ev2->ev_evcallback; array[2] = &evcb1; array[3] = &evcb2; n = event_callback_finalize_many(base, 4, array, callback_finalize_callback_1); }
static struct accepting_socket * new_accepting_socket(struct evconnlistener_iocp *lev, int family) { struct accepting_socket *res; int addrlen; int buflen; if (family == AF_INET) addrlen = sizeof(struct sockaddr_in); else if (family == AF_INET6) addrlen = sizeof(struct sockaddr_in6); else return NULL; buflen = (addrlen+16)*2; res = (struct accepting_socket *)mm_calloc(1,sizeof(struct accepting_socket)-1+buflen); if (!res) return NULL; event_overlapped_init_(&res->overlapped, accepted_socket_cb); res->s = INVALID_SOCKET; res->lev = lev; res->buflen = buflen; res->family = family; event_deferred_cb_init_(&res->deferred, event_base_get_npriorities(lev->event_base) / 2, accepted_socket_invoke_user_cb, res); InitializeCriticalSectionAndSpinCount(&res->lock, 1000); return res; }
static THREAD_FN load_deferred_queue(void *arg) { struct deferred_test_data *data = arg; size_t i; for (i = 0; i < CB_COUNT; ++i) { event_deferred_cb_init_(&data->cbs[i], 0, deferred_callback, NULL); event_deferred_cb_schedule_(data->queue, &data->cbs[i]); SLEEP_MS(1); } THREAD_RETURN(); }
static void test_fin_cb_invoked(void *arg) { struct basic_test_data *data = arg; struct event_base *base = data->base; struct event *ev; struct event ev2; struct event_callback evcb; int cb_called = 0; int ev_called = 0; const struct timeval ten_sec = {10,0}; event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called); ev = evtimer_new(base, timer_callback, &ev_called); /* Just finalize them; don't bother adding. */ event_free_finalize(0, ev, event_finalize_callback_1); event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1); event_base_dispatch(base); tt_int_op(cb_called, ==, 100); tt_int_op(ev_called, ==, 100); ev_called = cb_called = 0; event_base_assert_ok_(base); /* Now try it when they're active. (actually, don't finalize: make * sure activation can happen! */ ev = evtimer_new(base, timer_callback, &ev_called); event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called); event_active(ev, EV_TIMEOUT, 1); event_callback_activate_(base, &evcb); event_base_dispatch(base); tt_int_op(cb_called, ==, 1); tt_int_op(ev_called, ==, 1); ev_called = cb_called = 0; event_base_assert_ok_(base); /* Great, it worked. Now activate and finalize and make sure only * finalizing happens. */ event_active(ev, EV_TIMEOUT, 1); event_callback_activate_(base, &evcb); event_free_finalize(0, ev, event_finalize_callback_1); event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1); event_base_dispatch(base); tt_int_op(cb_called, ==, 100); tt_int_op(ev_called, ==, 100); ev_called = 0; event_base_assert_ok_(base); /* Okay, now add but don't have it become active, and make sure *that* * works. */ ev = evtimer_new(base, timer_callback, &ev_called); event_add(ev, &ten_sec); event_free_finalize(0, ev, event_finalize_callback_1); event_base_dispatch(base); tt_int_op(ev_called, ==, 100); ev_called = 0; event_base_assert_ok_(base); /* Now try adding and deleting after finalizing. */ ev = evtimer_new(base, timer_callback, &ev_called); evtimer_assign(&ev2, base, timer_callback, &ev_called); event_add(ev, &ten_sec); event_free_finalize(0, ev, event_finalize_callback_1); event_finalize(0, &ev2, event_finalize_callback_1); event_add(&ev2, &ten_sec); event_del(ev); event_active(&ev2, EV_TIMEOUT, 1); event_base_dispatch(base); tt_int_op(ev_called, ==, 200); event_base_assert_ok_(base); end: ; }
int bufferevent_init_common_(struct bufferevent_private *bufev_private, struct event_base *base, const struct bufferevent_ops *ops, enum bufferevent_options options) { struct bufferevent *bufev = &bufev_private->bev; if (!bufev->input) { if ((bufev->input = evbuffer_new()) == NULL) return -1; } if (!bufev->output) { if ((bufev->output = evbuffer_new()) == NULL) { evbuffer_free(bufev->input); return -1; } } bufev_private->refcnt = 1; bufev->ev_base = base; /* Disable timeouts. */ evutil_timerclear(&bufev->timeout_read); evutil_timerclear(&bufev->timeout_write); bufev->be_ops = ops; bufferevent_ratelim_init_(bufev_private); /* * Set to EV_WRITE so that using bufferevent_write is going to * trigger a callback. Reading needs to be explicitly enabled * because otherwise no data will be available. */ bufev->enabled = EV_WRITE; #ifndef EVENT__DISABLE_THREAD_SUPPORT if (options & BEV_OPT_THREADSAFE) { if (bufferevent_enable_locking_(bufev, NULL) < 0) { /* cleanup */ evbuffer_free(bufev->input); evbuffer_free(bufev->output); bufev->input = NULL; bufev->output = NULL; return -1; } } #endif if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS)) == BEV_OPT_UNLOCK_CALLBACKS) { event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS"); return -1; } if (options & BEV_OPT_UNLOCK_CALLBACKS) event_deferred_cb_init_( &bufev_private->deferred, event_base_get_npriorities(base) / 2, bufferevent_run_deferred_callbacks_unlocked, bufev_private); else event_deferred_cb_init_( &bufev_private->deferred, event_base_get_npriorities(base) / 2, bufferevent_run_deferred_callbacks_locked, bufev_private); bufev_private->options = options; evbuffer_set_parent_(bufev->input, bufev); evbuffer_set_parent_(bufev->output, bufev); return 0; }