bool PipedEventWatcher::DoInit() { assert(pipe_[0] == 0); if (evutil_socketpair(AF_LOCAL, SOCK_STREAM, 0, pipe_) < 0) { log_error("evutil_socketpair failed"); goto failed; } if (evutil_make_socket_nonblocking(pipe_[0]) < 0 || evutil_make_socket_nonblocking(pipe_[1]) < 0) { log_error("evutil_make_socket_nonblockingfailed"); goto failed; } event_assign(&event_, event_base_, pipe_[1], EV_READ | EV_PERSIST, PipedEventWatcher::HandlerFn, this); return true; failed: Close(); return false; }
int neuworker_init(neuworker_t *nw, void *data){ ASSERT(nw != NULL); nw->base = event_base_new(); if(!nw->base) return -1; if(!evutil_socketpair(AF_LOCAL, SOCK_STREAM, 0, nw->socks)){ event_base_free(nw->base); return -1; } evutil_make_socket_nonblocking(nw->socks[0]); evutil_make_socket_nonblocking(nw->socks[1]); nw->evsock = event_new(nw->base, nw->socks[0], EV_READ, neuworker_sockcb, nw); if(nw->evsock == NULL){ event_base_free(nw->base); evutil_closesocket(nw->socks[0]); evutil_closesocket(nw->socks[1]); return -1; } event_add(nw->evsock, NULL); pthread_mutex_init(&nw->lock, NULL); nw->data = data; return 0; }
void EventLoopThread::threadFunc() { struct event_base* base = event_base_new(); int iret = evutil_socketpair(AF_INET, SOCK_STREAM, 0, wakeFd_); std::cout << "socket pair ret:" << iret << std::endl; std::cout << "thread id:" << std::this_thread::get_id() << std::endl; iret = event_assign(&ev_, base, wakeFd_[1], EV_READ | EV_PERSIST, WakeupCallback, this); if (0 != iret) { std::cout << "event assign error" << iret << std::endl; } iret = event_add(&ev_, nullptr); if (0 != iret) { std::cout << "event add error:" << iret << std::endl; } { std::unique_lock<std::mutex> ul(mutex_); base_ = base; cond_.notify_one(); } event_base_dispatch(base); std::cout << "event loop exiting...\n"; }
int main(int argc, char **argv) { struct event ev; const char *test = "test string"; int pair[2]; if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) return (1); send(pair[0], test, strlen(test)+1, 0); shutdown(pair[0], SHUT_WR); /* Initalize the event library */ event_init(); /* Initalize one event */ event_set(&ev, pair[1], EV_READ, read_cb, &ev); event_add(&ev, NULL); event_dispatch(); return (test_okay); }
bool create_notification_pipe(LIBEVENT_THREAD *me) { if (evutil_socketpair(SOCKETPAIR_AF, SOCK_STREAM, 0, (void*)me->notify) == SOCKET_ERROR) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "Can't create notify pipe: %s", strerror(errno)); return false; } for (int j = 0; j < 2; ++j) { int flags = 1; setsockopt(me->notify[j], IPPROTO_TCP, TCP_NODELAY, (void *)&flags, sizeof(flags)); setsockopt(me->notify[j], SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags)); if (evutil_make_socket_nonblocking(me->notify[j]) == -1) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "Failed to enable non-blocking: %s", strerror(errno)); return false; } } return true; }
static bool create_notification_pipe(work_queue *me) { int j; SOCKET notify[2]; if (evutil_socketpair(SOCKETPAIR_AF, SOCK_STREAM, 0, (void*)notify) == SOCKET_ERROR) { moxi_log_write("Failed to create notification pipe"); return false; } for (j = 0; j < 2; ++j) { int flags = 1; setsockopt(notify[j], IPPROTO_TCP, TCP_NODELAY, (void *)&flags, sizeof(flags)); setsockopt(notify[j], SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags)); if (evutil_make_socket_nonblocking(notify[j]) == -1) { moxi_log_write("Failed to enable non-blocking"); return false; } } me->recv_fd = notify[0]; me->send_fd = notify[1]; return true; }
/** * create the event-threads handler * * provides the event-queue that is contains the event_ops from the event-threads * and notifies all the idling event-threads for the new event-ops to process */ chassis_event_threads_t *chassis_event_threads_new() { chassis_event_threads_t *threads; tls_event_base_key = g_private_new(NULL); threads = g_new0(chassis_event_threads_t, 1); /* create the ping-fds * * the event-thread write a byte to the ping-pipe to trigger a fd-event when * something is available in the event-async-queues */ if (0 != evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, threads->event_notify_fds)) { int err; #ifdef WIN32 err = WSAGetLastError(); #else err = errno; #endif g_error("%s: evutil_socketpair() failed: %s (%d)", G_STRLOC, g_strerror(err), err); } threads->event_threads = g_ptr_array_new(); threads->event_queue = g_async_queue_new(); return threads; }
int main(int argc, char **argv) { struct event ev; #ifdef WIN32 WORD wVersionRequested; WSADATA wsaData; wVersionRequested = MAKEWORD(2, 2); (void) WSAStartup(wVersionRequested, &wsaData); #endif #ifndef WIN32 if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) return (1); #endif if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) return (1); /* Initalize the event library */ event_init(); /* Initalize one event */ event_set(&ev, pair[1], EV_WRITE, write_cb, &ev); event_add(&ev, NULL); event_dispatch(); return (test_okay); }
int main(int argc, char **argv) { struct event* ev; struct event* timeout; struct event_base* base; evutil_socket_t pair[2]; struct timeval tv; struct cpu_usage_timer timer; double usage, secPassed, secUsed; #ifdef _WIN32 WORD wVersionRequested; WSADATA wsaData; int err; wVersionRequested = MAKEWORD(2, 2); err = WSAStartup(wVersionRequested, &wsaData); #endif if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) return (1); /* Initalize the event library */ base = event_base_new(); /* Initalize a timeout to terminate the test */ timeout = evtimer_new(base,timeout_cb,&timeout); /* and watch for writability on one end of the pipe */ ev = event_new(base,pair[1],EV_WRITE | EV_PERSIST, write_cb, &ev); tv.tv_sec = 1; tv.tv_usec = 500*1000; evtimer_add(timeout, &tv); event_add(ev, NULL); start_cpu_usage_timer(&timer); event_base_dispatch(base); get_cpu_usage(&timer, &secPassed, &secUsed, &usage); /* attempt to calculate our cpu usage over the test should be virtually nil */ printf("usec used=%d, usec passed=%d, cpu usage=%.2f%%\n", (int)(secUsed*1e6), (int)(secPassed*1e6), usage*100); if (usage > 50.0) /* way too high */ return 1; return 0; }
static struct timeval * run_once(int num_pipes) { int i; evutil_socket_t *cp; static struct timeval ts, te, tv_timeout; events = calloc(num_pipes, sizeof(struct event)); pipes = calloc(num_pipes * 2, sizeof(evutil_socket_t)); if (events == NULL || pipes == NULL) { perror("malloc"); exit(1); } for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) { perror("socketpair"); exit(1); } } /* measurements includes event setup */ evutil_gettimeofday(&ts, NULL); /* provide a default timeout for events */ evutil_timerclear(&tv_timeout); tv_timeout.tv_sec = 60; for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { evutil_socket_t fd = i < num_pipes - 1 ? cp[3] : -1; event_set(&events[i], cp[0], EV_READ, read_cb, (void *)(ev_intptr_t)fd); event_add(&events[i], &tv_timeout); } fired = 0; /* kick everything off with a single write */ if (send(pipes[1], "e", 1, 0) < 0) perror("send"); event_dispatch(); evutil_gettimeofday(&te, NULL); evutil_timersub(&te, &ts, &te); for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { event_del(&events[i]); close(cp[0]); close(cp[1]); } free(pipes); free(events); return (&te); }
int socketpair(int domain, int type, int protocol, int sv[2]) { if (domain != PF_UNIX || type != SOCK_STREAM || protocol != 0) { return -1; } intptr_t pair[2]; auto r = evutil_socketpair(AF_INET, type, protocol, pair); if (r == -1) { return r; } sv[0] = _open_osfhandle(pair[0], O_RDWR | O_BINARY); sv[1] = _open_osfhandle(pair[1], O_RDWR | O_BINARY); return 0; }
static void test_edgetriggered(void *et) { struct event *ev = NULL; struct event_base *base = NULL; const char *test = "test string"; evutil_socket_t pair[2] = {-1,-1}; int supports_et; if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair) == -1) { tt_abort_perror("socketpair"); } called = was_et = 0; send(pair[0], test, (int)strlen(test)+1, 0); shutdown(pair[0], SHUT_WR); /* Initalize the event library */ base = event_base_new(); if (!strcmp(event_base_get_method(base), "epoll") || !strcmp(event_base_get_method(base), "epoll (with changelist)") || !strcmp(event_base_get_method(base), "kqueue")) supports_et = 1; else supports_et = 0; TT_BLATHER(("Checking for edge-triggered events with %s, which should %s" "support edge-triggering", event_base_get_method(base), supports_et?"":"not ")); /* Initalize one event */ ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev); event_add(ev, NULL); /* We're going to call the dispatch function twice. The first invocation * will read a single byte from pair[1] in either case. If we're edge * triggered, we'll only see the event once (since we only see transitions * from no data to data), so the second invocation of event_base_loop will * do nothing. If we're level triggered, the second invocation of * event_base_loop will also activate the event (because there's still * data to read). */ event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE); event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE); if (supports_et) { tt_int_op(called, ==, 1); tt_assert(was_et); } else {
bool YYLibEvent::initqueue() { evutil_socket_t sv[2]; // sv[0] push socket, sv[1] pop socket if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, sv) != 0) return false; evutil_make_socket_nonblocking(sv[1]); struct event * event_read = event_new(base, sv[1], EV_READ|EV_PERSIST, do_queue_read, this); event_add(event_read, NULL); init(sv[0]); LOG::Info("queue create success, port0:%d, port1:%d", sv[0], sv[1]); return true; }
int evsignal_init(struct event_base *base) { int i; /* * Our signal handler is going to write to one end of the socket * pair to wake up our event loop. The event loop then scans for * signals that got delivered. */ if (evutil_socketpair( AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1) { #ifdef WIN32 /* Make this nonfatal on win32, where sometimes people have localhost firewalled. */ event_warn("%s: socketpair", __func__); #else event_err(1, "%s: socketpair", __func__); #endif return -1; } FD_CLOSEONEXEC(base->sig.ev_signal_pair[0]); FD_CLOSEONEXEC(base->sig.ev_signal_pair[1]); base->sig.sh_old = NULL; base->sig.sh_old_max = 0; base->sig.evsignal_caught = 0; memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG); /* initialize the queues for all events */ for (i = 0; i < NSIG; ++i) TAILQ_INIT(&base->sig.evsigevents[i]); evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]); event_set(&base->sig.ev_signal, base->sig.ev_signal_pair[1], EV_READ | EV_PERSIST, evsignal_cb, &base->sig.ev_signal); base->sig.ev_signal.ev_base = base; base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL; return 0; }
int main(int argc, char **argv) { struct event_base *base; struct event_config *cfg; struct event *ev; const char *test = "test string"; evutil_socket_t pair[2]; /* Initialize the library and check if the backend supports EV_FEATURE_EARLY_CLOSE */ cfg = event_config_new(); event_config_require_features(cfg, EV_FEATURE_EARLY_CLOSE); base = event_base_new_with_config(cfg); event_config_free(cfg); if (!base) { /* Backend doesn't support EV_FEATURE_EARLY_CLOSE */ return 0; } /* Create a pair of sockets */ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) return (1); /* Send some data on socket 0 and immediately close it */ if (send(pair[0], test, (int)strlen(test)+1, 0) < 0) return (1); shutdown(pair[0], SHUT_WR); /* Dispatch */ ev = event_new(base, pair[1], EV_CLOSED | EV_TIMEOUT, closed_cb, event_self_cbarg()); event_add(ev, &timeout); event_base_dispatch(base); /* Finalize library */ event_base_free(base); return 0; }
int evsig_init(struct event_base *base) { /* * Our signal handler is going to write to one end of the socket * pair to wake up our event loop. The event loop then scans for * signals that got delivered. */ if (evutil_socketpair( AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1) { #ifdef WIN32 /* Make this nonfatal on win32, where sometimes people have localhost firewalled. */ event_sock_warn(-1, "%s: socketpair", __func__); #else event_sock_err(1, -1, "%s: socketpair", __func__); #endif return -1; } evutil_make_socket_closeonexec(base->sig.ev_signal_pair[0]); evutil_make_socket_closeonexec(base->sig.ev_signal_pair[1]); base->sig.sh_old = NULL; base->sig.sh_old_max = 0; base->sig.evsig_caught = 0; memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG); evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]); event_assign(&base->sig.ev_signal, base, base->sig.ev_signal_pair[1], EV_READ | EV_PERSIST, evsig_cb, &base->sig.ev_signal); base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL; base->evsigsel = &evsigops; base->evsigbase = &base->sig; return 0; }
//初始化base->sig成员 int evsig_init(struct event_base *base) { /* * Our signal handler is going to write to one end of the socket * pair to wake up our event loop. The event loop then scans for * signals that got delivered. //这感觉就是上一个函数的功能 */ if (evutil_socketpair( AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1) { #ifdef WIN32 /* Make this nonfatal on win32, where sometimes people have localhost firewalled. */ event_sock_warn(-1, "%s: socketpair", __func__); #else event_sock_err(1, -1, "%s: socketpair", __func__); #endif return -1; } evutil_make_socket_closeonexec(base->sig.ev_signal_pair[0]);//两个都设置执行时关闭 evutil_make_socket_closeonexec(base->sig.ev_signal_pair[1]); base->sig.sh_old = NULL; //? base->sig.sh_old_max = 0; evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);//设置为非阻塞 evutil_make_socket_nonblocking(base->sig.ev_signal_pair[1]); event_assign(&base->sig.ev_signal, base, base->sig.ev_signal_pair[1],//注册了上一个函数evsig_cb为回调函数 EV_READ | EV_PERSIST, evsig_cb, base); base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;//sig为evsig_info类型,ev_signal为event类型。设置为内部,一直没搞懂是啥 event_priority_set(&base->sig.ev_signal, 0);//初始优先级为0 base->evsigsel = &evsigops;//设置信号处理的后端 return 0; }
int neuworker_init(neuworker_t *nw, void *data){ ASSERT(nw != NULL); nw->base = event_base_new(); if(!nw->base){ log_warn("neuworker : event base new fail\n"); return -1; } if(evutil_socketpair(AF_LOCAL, SOCK_STREAM, 0, nw->socks) != 0){ event_base_free(nw->base); log_warn("neuworker : socket pair fail\n"); return -1; } evutil_make_socket_nonblocking(nw->socks[0]); evutil_make_socket_nonblocking(nw->socks[1]); nw->evsock = event_new(nw->base, nw->socks[0], EV_READ, neuworker_dotask, nw); if(nw->evsock == NULL){ event_base_free(nw->base); evutil_closesocket(nw->socks[0]); evutil_closesocket(nw->socks[1]); log_warn("neuworker : event new fail\n"); return -1; } event_add(nw->evsock, NULL); if(pthread_mutex_init(&nw->lock, NULL) != 0){ log_warn("neuworker : inti mutex fail\n"); return -1; } fixmap_init(&nw->fmevs, neumap_event_compare); nw->data = data; return 0; }
int main(int argc, char **argv) { struct event ev; const char *test = "test string"; evutil_socket_t pair[2]; #ifdef WIN32 WORD wVersionRequested; WSADATA wsaData; int err; wVersionRequested = MAKEWORD(2, 2); err = WSAStartup(wVersionRequested, &wsaData); #endif if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) return (1); if (send(pair[0], test, (int)strlen(test)+1, 0) < 0) return (1); shutdown(pair[0], SHUT_WR); /* Initalize the event library */ event_init(); /* Initalize one event */ event_set(&ev, pair[1], EV_READ | EV_TIMEOUT, read_cb, &ev); event_add(&ev, &timeout); event_dispatch(); return (test_okay); }
int main(int argc, char **argv) { struct event ev0, ev1, ev2, ev3; int pair[2]; if (pipe(pair) == -1) { if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) { perror("pipe"); exit(1); } } struct event_base *base = event_init(); event_set(&ev0, pair[0], EV_READ, read_cb, &ev0); event_set(&ev1, pair[1], EV_WRITE, write_cb, &ev1); event_set(&ev2, pair[1], EV_WRITE, write_cb2, &ev2); event_set(&ev3, pair[1], EV_WRITE, write_cb3, &ev3); event_add(&ev0, NULL); event_add(&ev1, NULL); event_add(&ev2, NULL); event_add(&ev3, NULL); // add_event(pair[1], EV_WRITE, write_cb); // add_event(pair[0], EV_READ, read_cb); event_base_dispatch(base); return (0); }
zend_bool ion_buffer_pair(ion_buffer ** one, ion_buffer ** two) { evutil_socket_t socks[2] = {-1, -1}; ion_buffer * pair[2]; if(evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, socks) == FAILURE) { return false; } pair[0] = bufferevent_socket_new(GION(base), socks[0], BEV_OPT_CLOSE_ON_FREE); pair[1] = bufferevent_socket_new(GION(base), socks[1], BEV_OPT_CLOSE_ON_FREE); if(bufferevent_enable(pair[0], EV_READ | EV_WRITE) == FAILURE || bufferevent_enable(pair[1], EV_READ | EV_WRITE) == FAILURE) { bufferevent_free(pair[0]); bufferevent_free(pair[1]); zend_throw_exception(ion_ce_ION_StreamException, "Failed to enable stream", 0); return false; } *one = pair[0]; *two = pair[1]; return true; }
tr_watchdir_backend * tr_watchdir_win32_new (tr_watchdir_t handle) { const char * const path = tr_watchdir_get_path (handle); wchar_t * wide_path; tr_watchdir_win32 * backend; backend = tr_new0 (tr_watchdir_win32, 1); backend->base.free_func = &tr_watchdir_win32_free; backend->fd = INVALID_HANDLE_VALUE; backend->notify_pipe[0] = backend->notify_pipe[1] = TR_BAD_SOCKET; if ((wide_path = tr_win32_utf8_to_native (path, -1)) == NULL) { log_error ("Failed to convert \"%s\" to native path", path); goto fail; } if ((backend->fd = CreateFileW (wide_path, FILE_LIST_DIRECTORY, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED, NULL)) == INVALID_HANDLE_VALUE) { log_error ("Failed to open directory \"%s\"", path); goto fail; } tr_free (wide_path); wide_path = NULL; backend->overlapped.Pointer = handle; if (!ReadDirectoryChangesW (backend->fd, backend->buffer, sizeof (backend->buffer), FALSE, WIN32_WATCH_MASK, NULL, &backend->overlapped, NULL)) { log_error ("Failed to read directory changes"); goto fail; } if (evutil_socketpair (AF_INET, SOCK_STREAM, 0, backend->notify_pipe) == -1) { log_error ("Failed to create notify pipe: %s", tr_strerror (errno)); goto fail; } if ((backend->event = bufferevent_socket_new (tr_watchdir_get_event_base (handle), backend->notify_pipe[0], 0)) == NULL) { log_error ("Failed to create event buffer: %s", tr_strerror (errno)); goto fail; } bufferevent_setwatermark (backend->event, EV_READ, sizeof (FILE_NOTIFY_INFORMATION), 0); bufferevent_setcb (backend->event, &tr_watchdir_win32_on_event, NULL, NULL, handle); bufferevent_enable (backend->event, EV_READ); if ((backend->thread = (HANDLE) _beginthreadex (NULL, 0, &tr_watchdir_win32_thread, handle, 0, NULL)) == NULL) { log_error ("Failed to create thread"); goto fail; } /* Perform an initial scan on the directory */ if (event_base_once (tr_watchdir_get_event_base (handle), -1, EV_TIMEOUT, &tr_watchdir_win32_on_first_scan, handle, NULL) == -1) log_error ("Failed to perform initial scan: %s", tr_strerror (errno)); return BACKEND_DOWNCAST (backend); fail: tr_watchdir_win32_free (BACKEND_DOWNCAST (backend)); tr_free (wide_path); return NULL; }
void test_bufferevent_zlib(void *arg) { struct bufferevent *bev1=NULL, *bev2=NULL; char buffer[8333]; z_stream z_input, z_output; int i, pair[2]={-1,-1}, r; (void)arg; infilter_calls = outfilter_calls = readcb_finished = writecb_finished = errorcb_invoked = 0; if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) { tt_abort_perror("socketpair"); } evutil_make_socket_nonblocking(pair[0]); evutil_make_socket_nonblocking(pair[1]); bev1 = bufferevent_socket_new(NULL, pair[0], 0); bev2 = bufferevent_socket_new(NULL, pair[1], 0); memset(&z_output, 0, sizeof(z_output)); r = deflateInit(&z_output, Z_DEFAULT_COMPRESSION); tt_int_op(r, ==, Z_OK); memset(&z_input, 0, sizeof(z_input)); r = inflateInit(&z_input); tt_int_op(r, ==, Z_OK); /* initialize filters */ bev1 = bufferevent_filter_new(bev1, NULL, zlib_output_filter, BEV_OPT_CLOSE_ON_FREE, zlib_deflate_free, &z_output); bev2 = bufferevent_filter_new(bev2, zlib_input_filter, NULL, BEV_OPT_CLOSE_ON_FREE, zlib_inflate_free, &z_input); bufferevent_setcb(bev1, readcb, writecb, errorcb, NULL); bufferevent_setcb(bev2, readcb, writecb, errorcb, NULL); bufferevent_disable(bev1, EV_READ); bufferevent_enable(bev1, EV_WRITE); bufferevent_enable(bev2, EV_READ); for (i = 0; i < (int)sizeof(buffer); i++) buffer[i] = i; /* break it up into multiple buffer chains */ bufferevent_write(bev1, buffer, 1800); bufferevent_write(bev1, buffer + 1800, sizeof(buffer) - 1800); /* we are done writing - we need to flush everything */ bufferevent_flush(bev1, EV_WRITE, BEV_FINISHED); event_dispatch(); tt_want(infilter_calls); tt_want(outfilter_calls); tt_want(readcb_finished); tt_want(writecb_finished); tt_want(!errorcb_invoked); test_ok = 1; end: if (bev1) bufferevent_free(bev1); if (bev2) bufferevent_free(bev2); if (pair[0] >= 0) evutil_closesocket(pair[0]); if (pair[1] >= 0) evutil_closesocket(pair[1]); }
void TNonblockingIOThread::createNotificationPipe() { if(evutil_socketpair(AF_LOCAL, SOCK_STREAM, 0, notificationPipeFDs_) == -1) { GlobalOutput.perror("TNonblockingServer::createNotificationPipe ", EVUTIL_SOCKET_ERROR()); throw TException("can't create notification pipe"); } if(evutil_make_socket_nonblocking(notificationPipeFDs_[0])<0 || evutil_make_socket_nonblocking(notificationPipeFDs_[1])<0) { ::THRIFT_CLOSESOCKET(notificationPipeFDs_[0]); ::THRIFT_CLOSESOCKET(notificationPipeFDs_[1]); throw TException("TNonblockingServer::createNotificationPipe() THRIFT_O_NONBLOCK"); } for (int i = 0; i < 2; ++i) { #if LIBEVENT_VERSION_NUMBER < 0x02000000 int flags; if ((flags = THRIFT_FCNTL(notificationPipeFDs_[i], F_GETFD, 0)) < 0 || THRIFT_FCNTL(notificationPipeFDs_[i], F_SETFD, flags | FD_CLOEXEC) < 0) { #else if (evutil_make_socket_closeonexec(notificationPipeFDs_[i]) < 0) { #endif ::THRIFT_CLOSESOCKET(notificationPipeFDs_[0]); ::THRIFT_CLOSESOCKET(notificationPipeFDs_[1]); throw TException("TNonblockingServer::createNotificationPipe() " "FD_CLOEXEC"); } } } /** * Register the core libevent events onto the proper base. */ void TNonblockingIOThread::registerEvents() { threadId_ = Thread::get_current(); assert(eventBase_ == 0); eventBase_ = getServer()->getUserEventBase(); if (eventBase_ == NULL) { eventBase_ = event_base_new(); ownEventBase_ = true; } // Print some libevent stats if (number_ == 0) { GlobalOutput.printf("TNonblockingServer: using libevent %s method %s", event_get_version(), event_base_get_method(eventBase_)); } if (listenSocket_ >= 0) { // Register the server event event_set(&serverEvent_, listenSocket_, EV_READ | EV_PERSIST, TNonblockingIOThread::listenHandler, server_); event_base_set(eventBase_, &serverEvent_); // Add the event and start up the server if (-1 == event_add(&serverEvent_, 0)) { throw TException("TNonblockingServer::serve(): " "event_add() failed on server listen event"); } GlobalOutput.printf("TNonblocking: IO thread #%d registered for listen.", number_); } createNotificationPipe(); // Create an event to be notified when a task finishes event_set(¬ificationEvent_, getNotificationRecvFD(), EV_READ | EV_PERSIST, TNonblockingIOThread::notifyHandler, this); // Attach to the base event_base_set(eventBase_, ¬ificationEvent_); // Add the event and start up the server if (-1 == event_add(¬ificationEvent_, 0)) { throw TException("TNonblockingServer::serve(): " "event_add() failed on task-done notification event"); } GlobalOutput.printf("TNonblocking: IO thread #%d registered for notify.", number_); }
static void * basic_test_setup(const struct testcase_t *testcase) { struct event_base *base = NULL; evutil_socket_t spair[2] = { -1, -1 }; struct basic_test_data *data = NULL; #ifndef _WIN32 if (testcase->flags & TT_ENABLE_IOCP_FLAG) return (void*)TT_SKIP; #endif if (testcase->flags & TT_NEED_THREADS) { if (!(testcase->flags & TT_FORK)) return NULL; #if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED) if (evthread_use_pthreads()) exit(1); #elif defined(EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED) if (evthread_use_windows_threads()) exit(1); #else return (void*)TT_SKIP; #endif } if (testcase->flags & TT_NEED_SOCKETPAIR) { if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, spair) == -1) { fprintf(stderr, "%s: socketpair\n", __func__); exit(1); } if (evutil_make_socket_nonblocking(spair[0]) == -1) { fprintf(stderr, "fcntl(O_NONBLOCK)"); exit(1); } if (evutil_make_socket_nonblocking(spair[1]) == -1) { fprintf(stderr, "fcntl(O_NONBLOCK)"); exit(1); } } if (testcase->flags & TT_NEED_BASE) { if (testcase->flags & TT_LEGACY) base = event_init(); else base = event_base_new(); if (!base) exit(1); } if (testcase->flags & TT_ENABLE_IOCP_FLAG) { if (event_base_start_iocp(base, 0)<0) { event_base_free(base); return (void*)TT_SKIP; } } if (testcase->flags & TT_NEED_DNS) { evdns_set_log_fn(dnslogcb); if (evdns_init()) return NULL; /* fast failure */ /*XXX asserts. */ } if (testcase->flags & TT_NO_LOGS) event_set_log_callback(ignore_log_cb); data = calloc(1, sizeof(*data)); if (!data) exit(1); data->base = base; data->pair[0] = spair[0]; data->pair[1] = spair[1]; data->setup_data = testcase->setup_data; return data; }
static void test_edgetriggered(void *et) { struct event *ev = NULL; struct event_base *base = NULL; const char *test = "test string"; evutil_socket_t pair[2] = {-1,-1}; int supports_et; /* On Linux 3.2.1 (at least, as patched by Fedora and tested by Nick), * doing a "recv" on an AF_UNIX socket resets the readability of the * socket, even though there is no state change, so we don't actually * get edge-triggered behavior. Yuck! Linux 3.1.9 didn't have this * problem. */ #ifdef __linux__ if (evutil_ersatz_socketpair_(AF_INET, SOCK_STREAM, 0, pair) == -1) { tt_abort_perror("socketpair"); } #else if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair) == -1) { tt_abort_perror("socketpair"); } #endif called = was_et = 0; tt_int_op(send(pair[0], test, (int)strlen(test)+1, 0), >, 0); shutdown(pair[0], SHUT_WR); /* Initalize the event library */ base = event_base_new(); if (!strcmp(event_base_get_method(base), "epoll") || !strcmp(event_base_get_method(base), "epoll (with changelist)") || !strcmp(event_base_get_method(base), "kqueue")) supports_et = 1; else supports_et = 0; TT_BLATHER(("Checking for edge-triggered events with %s, which should %s" "support edge-triggering", event_base_get_method(base), supports_et?"":"not ")); /* Initalize one event */ ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev); event_add(ev, NULL); /* We're going to call the dispatch function twice. The first invocation * will read a single byte from pair[1] in either case. If we're edge * triggered, we'll only see the event once (since we only see transitions * from no data to data), so the second invocation of event_base_loop will * do nothing. If we're level triggered, the second invocation of * event_base_loop will also activate the event (because there's still * data to read). */ event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE); event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE); if (supports_et) { tt_int_op(called, ==, 1); tt_assert(was_et); } else {
int main(int argc, char **argv) { #ifndef WIN32 struct rlimit rl; #endif int i, c; struct timeval *tv; int *cp; #ifdef WIN32 WSADATA WSAData; WSAStartup(0x101, &WSAData); #endif num_pipes = 100; num_active = 1; num_writes = num_pipes; while ((c = getopt(argc, argv, "n:a:w:")) != -1) { switch (c) { case 'n': num_pipes = atoi(optarg); break; case 'a': num_active = atoi(optarg); break; case 'w': num_writes = atoi(optarg); break; default: fprintf(stderr, "Illegal argument \"%c\"\n", c); exit(1); } } #ifndef WIN32 rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50; if (setrlimit(RLIMIT_NOFILE, &rl) == -1) { perror("setrlimit"); exit(1); } #endif events = calloc(num_pipes, sizeof(struct event)); pipes = calloc(num_pipes * 2, sizeof(int)); if (events == NULL || pipes == NULL) { perror("malloc"); exit(1); } event_init(); for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { #ifdef USE_PIPES if (pipe(cp) == -1) { #else if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) { #endif perror("pipe"); exit(1); } } for (i = 0; i < 25; i++) { tv = run_once(); if (tv == NULL) exit(1); fprintf(stdout, "%ld\n", tv->tv_sec * 1000000L + tv->tv_usec); } exit(0); }
void TNonblockingIOThread::createNotificationPipe() { if(evutil_socketpair(AF_LOCAL, SOCK_STREAM, 0, notificationPipeFDs_) == -1) { GlobalOutput.perror("TNonblockingServer::createNotificationPipe ", EVUTIL_SOCKET_ERROR()); throw TException("can't create notification pipe"); } if(evutil_make_socket_nonblocking(notificationPipeFDs_[0])<0 || evutil_make_socket_nonblocking(notificationPipeFDs_[1])<0) { ::close(notificationPipeFDs_[0]); ::close(notificationPipeFDs_[1]); throw TException("TNonblockingServer::createNotificationPipe() O_NONBLOCK"); } for (int i = 0; i < 2; ++i) { #if LIBEVENT_VERSION_NUMBER < 0x02000000 int flags; if ((flags = fcntl(notificationPipeFDs_[i], F_GETFD, 0)) < 0 || fcntl(notificationPipeFDs_[i], F_SETFD, flags | FD_CLOEXEC) < 0) { #else if (evutil_make_socket_closeonexec(notificationPipeFDs_[i]) < 0) { #endif ::close(notificationPipeFDs_[0]); ::close(notificationPipeFDs_[1]); throw TException("TNonblockingServer::createNotificationPipe() " "FD_CLOEXEC"); } } } /** * Register the core libevent events onto the proper base. */ void TNonblockingIOThread::registerEvents() { if (listenSocket_ >= 0) { // Register the server event event_set(&serverEvent_, listenSocket_, EV_READ | EV_PERSIST, TNonblockingIOThread::listenHandler, server_); event_base_set(eventBase_, &serverEvent_); // Add the event and start up the server if (-1 == event_add(&serverEvent_, 0)) { throw TException("TNonblockingServer::serve(): " "event_add() failed on server listen event"); } GlobalOutput.printf("TNonblocking: IO thread #%d registered for listen.", number_); } createNotificationPipe(); // Create an event to be notified when a task finishes event_set(¬ificationEvent_, getNotificationRecvFD(), EV_READ | EV_PERSIST, TNonblockingIOThread::notifyHandler, this); // Attach to the base event_base_set(eventBase_, ¬ificationEvent_); // Add the event and start up the server if (-1 == event_add(¬ificationEvent_, 0)) { throw TException("TNonblockingServer::serve(): " "event_add() failed on task-done notification event"); } GlobalOutput.printf("TNonblocking: IO thread #%d registered for notify.", number_); } bool TNonblockingIOThread::notify(TNonblockingServer::TConnection* conn) { int fd = getNotificationSendFD(); if (fd < 0) { return false; } const int kSize = sizeof(conn); if (send(fd, const_cast_sockopt(&conn), kSize, 0) != kSize) { return false; } return true; }