int Server::Connect(){ if((epfd=epoll_create(MAX_EPOLL_SIZE)) < 0){ perror("epoll create failed"); return -1; } if((sockfd = socket(PF_INET, SOCK_STREAM, 0)) <= 0){ perror("socket creation failed"); return -1; } memset(&dest, 0 , sizeof(dest)); dest.sin_family = AF_INET; dest.sin_port = htons(port); dest.sin_addr.s_addr = INADDR_ANY; int opt = 1; if(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (const char*)&opt, sizeof(int)) < 0){ perror("setsocketopt failed"); return -1; } if(bind(sockfd, (sockaddr*)&dest, sizeof(dest)) < 0){ perror("bind failed"); return -1; } if(listen(sockfd, connection) < 0){ perror("listen failed"); return -1; } set_nonblock(sockfd); set_nonblock(STDIN_FILENO); epoll_add_event(epfd, sockfd, EPOLLIN); epoll_add_event(epfd, STDIN_FILENO, EPOLLIN); return 0; }
int add_connection(int epoll_fd, int conn_sock) { socket_buf sb; struct epoll_event ev; sb = generate_socket_buf(conn_sock); if(sb == NULL) { perror("not enough memory"); return L_HTTP_FAIL; } sb->state = STATE_HTTP_READ; if(sb_array.size() < MAX_CONNECTION && find_socket_buf(conn_sock) == NULL) sb_array[conn_sock] = sb; else { printf("to many connections or using same fd"); return L_HTTP_FAIL; } ev.events = EPOLLIN; set_timer(sb->timer, DEFAULT_TIME_LENGTH); return epoll_add_event(epoll_fd, EPOLLIN, conn_sock); }
void master_loop() { int listenfd = tcp_listen(); int epoll_fd = epoll_init(); if(listenfd < 0 || epoll_fd < 0) exit(1); if(epoll_add_event(epoll_fd, EPOLLIN, listenfd) == L_HTTP_FAIL) { printf("add listenfd fail\n"); return; } while (1) { if(master_epoll_start(epoll_fd, MAX_CONNECTION, listenfd) < 0) { if(errno == EINTR) continue; break; } //on_timer(epoll_fd); } close(listenfd); close(epoll_fd); }
void worker_loop(int channel_fd) { int epoll_fd = epoll_init(); if(channel_fd < 0 || epoll_fd < 0) exit(1); if(epoll_add_event(epoll_fd, EPOLLIN, channel_fd) == L_HTTP_FAIL) { printf("add channel_fd fail\n"); return; } while (1) { if(worker_epoll_start(epoll_fd, MAX_CONNECTION, channel_fd) < 0) { if(errno == EINTR) continue; break; } on_timer(epoll_fd); } close(channel_fd); close(epoll_fd); }
/* add a fd based event return NULL on failure (memory allocation error) */ static struct tevent_fd *std_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct std_event_context *std_ev = talloc_get_type(ev->additional_data, struct std_event_context); struct tevent_fd *fde; epoll_check_reopen(std_ev); fde = tevent_common_add_fd(ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); if (!fde) return NULL; if ((std_ev->maxfd != EVENT_INVALID_MAXFD) && (fde->fd > std_ev->maxfd)) { std_ev->maxfd = fde->fd; } talloc_set_destructor(fde, std_event_fd_destructor); epoll_add_event(std_ev, fde); return fde; }
static void epoll_change_event(struct std_event_context *std_ev, struct tevent_fd *fde) { bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR); bool want_read = (fde->flags & TEVENT_FD_READ); bool want_write= (fde->flags & TEVENT_FD_WRITE); if (std_ev->epoll_fd == -1) return; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; /* there's already an event */ if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) { if (want_read || (want_write && !got_error)) { epoll_mod_event(std_ev, fde); return; } /* * if we want to match the select behavior, we need to remove the epoll_event * when the caller isn't interested in events. * * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them */ epoll_del_event(std_ev, fde); return; } /* there's no epoll_event attached to the fde */ if (want_read || (want_write && !got_error)) { epoll_add_event(std_ev, fde); return; } }
/* reopen the epoll handle when our pid changes see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an demonstration of why this is needed */ static void epoll_check_reopen(struct epoll_event_context *epoll_ev) { struct tevent_fd *fde; if (epoll_ev->pid == getpid()) { return; } close(epoll_ev->epoll_fd); epoll_ev->epoll_fd = epoll_create(64); if (epoll_ev->epoll_fd == -1) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL, "Failed to recreate epoll handle after fork\n"); return; } if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING, "Failed to set close-on-exec, file descriptor may be leaked to children.\n"); } epoll_ev->pid = getpid(); for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) { epoll_add_event(epoll_ev, fde); } }
int Reactor :: add_event(Event * ev, struct timeval * tv, bool actived) { int ret = 0; if(ev == NULL) return 0; if(status & RA_THREAD) { pthread_mutex_lock(&event_mutex); if(!set_io_event_active(ev)) { Log :: WARN("Add fail, Event is ACtive, FD %d", ev->ev_fd); pthread_mutex_unlock(&event_mutex); return -1; } pthread_mutex_unlock(&event_mutex); } Log :: DEBUG("ADD EVENT FD: %d events: %s%s%s reactor: %d", ev->ev_fd, ev->ev_events&EV_READ ? "READ":"", ev->ev_events&EV_WRITE ?"WRITE":"", ev->ev_events&EV_TIMEOUT?"TIME":"", epfd); assert((ev->ev_flags & EV_INIT)); if(tv != NULL) { assert(ev->ev_events & EV_TIMEOUT); printf("add timeout event\n"); } //if pass, no other can handle this event if((ev->ev_events & (EV_READ|EV_WRITE)) && !(ev->ev_flags & (EV_INSERTED))) { event_queue_insert(ev, EV_INSERTED); Log :: NOTICE("EVENT fd:%d added IN LIST", ev->ev_fd); if((ret = epoll_add_event(ev)) < 0) goto done; if(actived) { if((ret = epoll_active_event(ev)) < 0) goto done; } Log :: NOTICE("Reactor Add Event succ fd %d", ev->ev_fd); if((status & RA_THREAD) & (!set_event_unactive(ev))) { Log :: WARN("Add SUcc Set fail, set Event UnaCtive Error,\ status broken FD %d", ev->ev_fd); }
/* reopen the epoll handle when our pid changes see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an demonstration of why this is needed */ static void epoll_check_reopen(struct std_event_context *std_ev) { struct tevent_fd *fde; if (std_ev->pid == getpid()) { return; } close(std_ev->epoll_fd); std_ev->epoll_fd = epoll_create(64); if (std_ev->epoll_fd == -1) { tevent_debug(std_ev->ev, TEVENT_DEBUG_FATAL, "Failed to recreate epoll handle after fork\n"); return; } std_ev->pid = getpid(); for (fde=std_ev->ev->fd_events;fde;fde=fde->next) { epoll_add_event(std_ev, fde); } }
static bool epoll_handle_newclient(struct epollserver_s* epollserver, void* ud, sock client_fd) { bool result = false; struct session_s* session = epoll_session_malloc(epollserver, client_fd); if(session != NULL) { struct server_s* server = &epollserver->base; session->ud = ud; session->status = session_status_connect; (*(server->logic_on_enter))(server, session->ud, session); #ifdef PLATFORM_LINUX epoll_add_event(epollserver->epoll_fd, client_fd, session, EPOLLET | EPOLLIN | EPOLLOUT | EPOLLRDHUP); #endif result = true; } return result; }
/* add a fd based event return NULL on failure (memory allocation error) */ static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context); struct tevent_fd *fde; epoll_check_reopen(epoll_ev); fde = tevent_common_add_fd(ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); if (!fde) return NULL; talloc_set_destructor(fde, epoll_event_fd_destructor); epoll_add_event(epoll_ev, fde); return fde; }
int epoll_process_events(int timer) { int rslt = HIXO_OK; int nevents = 0; int tmp_err = 0; hixo_conf_t *p_conf = g_rt_ctx.mp_conf; if ((gp_ps_info->m_power > 0) && spinlock_try(g_rt_ctx.mp_accept_lock)) { s_epoll_private.m_hold_lock = TRUE; } else { s_epoll_private.m_hold_lock = FALSE; } // 添加监听套接字事件监视 if (s_epoll_private.m_hold_lock) { for (int i = 0; i < g_rt_ctx.m_nservers; ++i) { assert(0 == (((uintptr_t)g_rt_ctx.mpp_servers[i]) & 1)); if (HIXO_ERROR == epoll_add_event(g_rt_ctx.mpp_servers[i])) { g_rt_ctx.mpp_servers[i] = (hixo_socket_t *)( ((uintptr_t)g_rt_ctx.mpp_servers[i]) | 1 ); break; } } } assert(NULL == g_rt_ctx.mp_posted_events); errno = 0; nevents = epoll_wait(s_epoll_private.m_epfd, s_epoll_private.mp_epevs, p_conf->m_max_connections, timer); tmp_err = errno; // 清除监听套接字事件监视 if (s_epoll_private.m_hold_lock) { for (int i = 0; i < g_rt_ctx.m_nservers; ++i) { if (((uintptr_t)g_rt_ctx.mpp_servers[i]) & 1) { g_rt_ctx.mpp_servers[i] = (hixo_socket_t *)( ((uintptr_t)g_rt_ctx.mpp_servers[i]) & (~1) ); continue; } if (HIXO_ERROR == epoll_del_event(g_rt_ctx.mpp_servers[i])) { break; } } } if (tmp_err) { if (EINTR == tmp_err) { (void)fprintf(stderr, "[INFO] epoll_wait interupted\n"); goto EXIT; } else { rslt = HIXO_ERROR; (void)fprintf(stderr, "[ERROR] epoll_wait failed: %d\n", tmp_err); goto EXIT; } } if (0 == nevents) { // timeout goto EXIT; } // 处理事件 for (int i = 0; i < nevents; ++i) { struct epoll_event *p_epev = &s_epoll_private.mp_epevs[i]; uintptr_t stale = ((uintptr_t)p_epev->data.ptr) & 1; hixo_socket_t *p_sock = (hixo_socket_t *)((uintptr_t)p_epev->data.ptr & (~1)); if ((-1 == p_sock->m_fd) || (stale != p_sock->m_stale)) { continue; } if ((HIXO_EVENT_ERR | HIXO_EVENT_HUP) & p_epev->events) { p_epev->events |= HIXO_EVENT_IN | HIXO_EVENT_OUT; } p_sock->m_readable = (HIXO_EVENT_IN & p_epev->events) ? 1U : 0U; p_sock->m_writable = (HIXO_EVENT_OUT & p_epev->events) ? 1U : 0U; if (p_sock->m_writable || p_sock->m_readable) { add_node(&g_rt_ctx.mp_posted_events, &p_sock->m_posted_node); } } EXIT: if (s_epoll_private.m_hold_lock) { // 只有拿到锁的进程才能解锁 (void)spinlock_unlock(g_rt_ctx.mp_accept_lock); s_epoll_private.m_hold_lock = FALSE; } return rslt; }