static void connector_destroy(struct connector *c) { connector_close(c); wait_list_fini(&c->connecting); tasklet_fini(&c->tasklet); error_fini(&c->err); c->fai(c->addrinfos); free(c); }
static void start_connecting(struct connector *c) { for (;;) { struct addrinfo *ai = c->next_addrinfo; if (!ai) break; /* If we have an existing connecting socket, dispose of it. */ connector_close(c); c->next_addrinfo = ai->ai_next; error_reset(&c->err); c->fd = make_socket(ai->ai_family, ai->ai_socktype, &c->err); if (c->fd < 0) continue; c->watched_fd = watched_fd_create(c->fd, connector_handle_events, c); if (connect(c->fd, ai->ai_addr, ai->ai_addrlen) >= 0) { /* Immediately connected. Not sure this can actually happen. */ wait_list_up(&c->connecting, 1); return; } else if (would_block()) { /* Writeability will indicate that the connection has * been established. */ if (!watched_fd_set_interest(c->watched_fd, WATCHED_FD_OUT, &c->err)) /* Give up and propagate the error */ break; return; } else { error_errno(&c->err, "connect"); } } /* Ran out of addresses to try, so we are done. We should have an error to report. */ assert(!error_ok(&c->err)); simple_socket_wake_all(&c->socket->base); }
static void connect_redis_done(connector_t pconredis) { int error = 0; socklen_t len = sizeof(int); if ((getsockopt(pconredis->sockfd, SOL_SOCKET, SO_ERROR, &error, &len)) == 0) { if (error == 0) { connector_sig_read(pconredis); connector_unsig_write(pconredis); pconredis->state = CONN_STATE_RUN; } else { connector_close(pconredis); print_log(LOG_TYPE_ERROR, "connect redis error, ip %s, port %d, file = %s, line = %d", pconredis->ip, pconredis->port, __FILE__, __LINE__); } } }
void worker_close(worker_t pworker) { connector_close(pworker->redis); ht_destroy(pworker->pht); list_free(pworker->plist); }
//这种异步非阻塞的模式,带来高性能的同时需要开设空间保存还在等待异步返回的数据,如:redis回调的顺序链表,保存connector的哈希表 void * worker_loop(void *param) { worker_t pworker = (worker_t)param; pworker->tid = pthread_self(); int nfds = 0; int timeout = 100; struct epoll_event evs[4096]; connector_t pconn = NULL; int i; while (1) { nfds = epoll_wait(pworker->epfd, evs, 4096, timeout); if (nfds == -1) { if (errno == EINTR) continue; print_log(LOG_TYPE_ERROR, "worker epoll_wait error, epfd = %d, errno = %d", pworker->epfd, errno); break; } for (i = 0; i < nfds; i++) { pconn = (connector_t)evs[i].data.ptr; if (evs[i].events & EPOLLIN) { worker_handle_read(pconn, evs[i].events); } if (evs[i].events & EPOLLOUT) { worker_handle_write(pconn); } if ((evs[i].events & EPOLLERR) || (evs[i].events & EPOLLHUP)) { print_log(LOG_TYPE_DEBUG, "EPOLLERR Or EPOLLHUP Event Occure"); pworker->neterr_count++; connector_close(pconn); } if (evs[i].events & EPOLLRDHUP) { connector_unsig_read(pconn); connector_unsig_rdhup(pconn); //可以在应用层面(写缓冲区)检查数据是否已经完全发出,server发出去,系统层面会在close后根据SO_LINGER的设置处理 print_log(LOG_TYPE_DEBUG, "EPOLLRDHUP Event Occure"); pworker->closed_count++; if (buffer_readable(pconn->pwritebuf) > 0) connector_write(pconn); else connector_close(pconn); } } handle_time_check(pworker); } return NULL; }