int hammer_epoll_del(int efd, int fd) { int ret; ret = epoll_ctl(efd, EPOLL_CTL_DEL, fd, NULL); HAMMER_TRACE("Epoll, removing fd %i from efd %i", fd, efd); return ret; }
int hammer_epoll_change_mode(int efd, int fd, int mode, int behavior) { int ret; struct epoll_event event = {0, {0}}; event.events = EPOLLERR | EPOLLHUP; event.data.fd = fd; switch (mode) { case HAMMER_EPOLL_READ: HAMMER_TRACE("[FD %i] EPoll changing mode to READ", fd); event.events |= EPOLLIN; break; case HAMMER_EPOLL_WRITE: HAMMER_TRACE("[FD %i] EPoll changing mode to WRITE", fd); event.events |= EPOLLOUT; break; case HAMMER_EPOLL_RW: HAMMER_TRACE("[FD %i] Epoll changing mode to READ/WRITE", fd); event.events |= EPOLLIN | EPOLLOUT; break; case HAMMER_EPOLL_SLEEP: HAMMER_TRACE("[FD %i] Epoll changing mode to DISABLE", fd); event.events = 0; printf("epoll sleep? \n"); break; case HAMMER_EPOLL_WAKEUP: printf("epoll wakeup? \n"); break; } if (behavior == HAMMER_EPOLL_EDGE_TRIGGERED) { event.events |= EPOLLET; } /* Update epoll fd events */ ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &event); return ret; }
int hammer_epoll_add(int efd, int fd, int init_mode, int behavior, void *user_ptr) { int ret; struct epoll_event event = {0, {0}}; event.data.fd = fd; event.events = EPOLLERR | EPOLLHUP | EPOLLRDHUP; if (behavior == HAMMER_EPOLL_EDGE_TRIGGERED) { event.events |= EPOLLET; } switch (init_mode) { case HAMMER_EPOLL_READ: event.events |= EPOLLIN; break; case HAMMER_EPOLL_WRITE: event.events |= EPOLLOUT; break; case HAMMER_EPOLL_RW: event.events |= EPOLLIN | EPOLLOUT; break; case HAMMER_EPOLL_SLEEP: event.events = 0; break; } event.data.ptr = user_ptr; /* Add to epoll queue */ ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &event); if (hammer_unlikely(ret < 0 && errno != EEXIST)) { HAMMER_TRACE("[FD %i] epoll_ctl() %s", fd, strerror(errno)); return ret; } return ret; }
void *hammer_epoll_start(int efd, hammer_epoll_handlers_t *handler, int max_events) { int i, ret = -1; int num_events; struct epoll_event *events; hammer_connection_t *c; // int fds_timeout; //fds_timeout = log_current_utime + config->timeout; events = hammer_mem_malloc(max_events * sizeof(struct epoll_event)); while (1) { if (config->gpu) { /* Each time, we first check if GPU has gave any indication for 1) which buffer is taken, 2) which buffer has been processed */ if (hammer_batch_if_gpu_processed_new()) { hammer_batch_forwarding(); } } //FIXME: maybe problems in pointer &events num_events = hammer_epoll_wait(efd, &events, max_events); for (i = 0; i < num_events; i ++) { c = (hammer_connection_t *) events[i].data.ptr; if (events[i].events & EPOLLIN) { if (c->type == HAMMER_CONN_CLIENT) { ret = (*handler->client_read) (c); } else { if (c->type != HAMMER_CONN_SERVER) { hammer_err("this connection is not a server conn?\n"); exit(0); } ret = (*handler->server_read) (c); } } else if (events[i].events & EPOLLOUT) { if (c->type == HAMMER_CONN_CLIENT) { ret = (*handler->client_write) (c); } else { if (c->type != HAMMER_CONN_SERVER) { hammer_err("this connection is not a server conn?\n"); exit(0); } ret = (*handler->server_write) (c); } } else if (events[i].events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP)) { ret = (*handler->error) (c); } else { hammer_err("What's up man, error here\n"); exit(0); } if (ret < 0) { HAMMER_TRACE("[FD %i] Epoll Event FORCE CLOSE | ret = %i", fd, ret); (*handler->close) (c); } } // FIXME: enable timeout /* Check timeouts and update next one if (log_current_utime >= fds_timeout) { hammer_sched_check_timeouts(sched); fds_timeout = log_current_utime + config->timeout; }*/ } return NULL; }