static void stream_on_close(ACL_VSTREAM *stream, void *arg) { const char *myname = "stream_on_close"; EVENT_KERNEL *ev = (EVENT_KERNEL*) arg; ACL_EVENT_FDTABLE *fdp = (ACL_EVENT_FDTABLE*) stream->fdp; ACL_SOCKET sockfd = ACL_VSTREAM_SOCK(stream); BOOL is_completed; if (fdp == NULL) acl_msg_fatal("%s(%d): fdp null, sockfd(%d)", myname, __LINE__, sockfd); if (fdp->h_iocp != NULL) { fdp->h_iocp = NULL; fdp->flag &= ~EVENT_FDTABLE_FLAG_IOCP; } /* windows xp 环境下,必须在关闭套接字之前调用此宏判断重叠 IO * 是否处于 STATUS_PENDING 状态 */ is_completed = HasOverlappedIoCompleted(&fdp->event_read->overlapped); /* 必须在释放 fdp->event_read/fdp->event_write 前关闭套接口句柄 */ if (ACL_VSTREAM_SOCK(stream) != ACL_SOCKET_INVALID && stream->close_fn) { (void) stream->close_fn(ACL_VSTREAM_SOCK(stream)); } else if (ACL_VSTREAM_FILE(stream) != ACL_FILE_INVALID && stream->fclose_fn) { (void) stream->fclose_fn(ACL_VSTREAM_FILE(stream)); } ACL_VSTREAM_SOCK(stream) = ACL_SOCKET_INVALID; ACL_VSTREAM_FILE(stream) = ACL_FILE_INVALID; if (fdp->event_read) { /* 如果完成端口处于未决状态,则不能释放重叠结构,需在主循环的 * GetQueuedCompletionStatus 调用后来释放 */ if (is_completed) acl_myfree(fdp->event_read); else { fdp->event_read->type = IOCP_EVENT_DEAD; fdp->event_read->fdp = NULL; } fdp->event_read = NULL; } if (fdp->event_write) { /* 如果完成端口处于未决状态,则不能释放重叠结构,需在主循环的 * GetQueuedCompletionStatus 调用后来释放 */ if (HasOverlappedIoCompleted(&fdp->event_write->overlapped)) acl_myfree(fdp->event_write); else { fdp->event_write->type = IOCP_EVENT_DEAD; fdp->event_write->fdp = NULL; } fdp->event_write = NULL; } if ((fdp->flag & EVENT_FDTABLE_FLAG_DELAY_OPER)) { fdp->flag &= ~EVENT_FDTABLE_FLAG_DELAY_OPER; acl_ring_detach(&fdp->delay_entry); } if (ev->event.maxfd == ACL_VSTREAM_SOCK(fdp->stream)) ev->event.maxfd = ACL_SOCKET_INVALID; if (fdp->fdidx >= 0 && fdp->fdidx < --ev->event.fdcnt) { ev->event.fdtabs[fdp->fdidx] = ev->event.fdtabs[ev->event.fdcnt]; ev->event.fdtabs[fdp->fdidx]->fdidx = fdp->fdidx; } fdp->fdidx = -1; if (fdp->fdidx_ready >= 0 && fdp->fdidx_ready < ev->event.fdcnt_ready && ev->event.fdtabs_ready[fdp->fdidx_ready] == fdp) { ev->event.fdtabs_ready[fdp->fdidx_ready] = NULL; } fdp->fdidx_ready = -1; event_fdtable_free(fdp); stream->fdp = NULL; }
static void event_loop(ACL_EVENT *eventp) { const char *myname = "event_loop"; EVENT_KERNEL *ev = (EVENT_KERNEL *) eventp; ACL_EVENT_NOTIFY_TIME timer_fn; void *timer_arg; ACL_EVENT_TIMER *timer; int delay; ACL_EVENT_FDTABLE *fdp; delay = (int) (eventp->delay_sec * 1000 + eventp->delay_usec / 1000); if (delay < 0) delay = 0; /* 0 milliseconds at least */ SET_TIME(eventp->present); /* * Find out when the next timer would go off. Timer requests are sorted. * If any timer is scheduled, adjust the delay appropriately. */ if ((timer = ACL_FIRST_TIMER(&eventp->timer_head)) != 0) { acl_int64 n = (timer->when - eventp->present) / 1000; if (n <= 0) delay = 0; else if ((int) n < delay) delay = (int) n; } eventp->nested++; event_set_all(eventp); if (eventp->fdcnt == 0) { if (eventp->fdcnt_ready == 0) sleep(1); goto TAG_DONE; } if (eventp->fdcnt_ready > 0) delay = 0; TAG_DONE: /* * Deliver timer events. Requests are sorted: we can stop when we reach * the future or the list end. Allow the application to update the timer * queue while it is being called back. To this end, we repeatedly pop * the first request off the timer queue before delivering the event to * the application. */ SET_TIME(eventp->present); while ((timer = ACL_FIRST_TIMER(&eventp->timer_head)) != 0) { if (timer->when > eventp->present) break; timer_fn = timer->callback; timer_arg = timer->context; /* 如果定时器的时间间隔 > 0 且允许定时器被循环调用,则再重设定时器 */ if (timer->delay > 0 && timer->keep) { timer->ncount++; eventp->timer_request(eventp, timer->callback, timer->context, timer->delay, timer->keep); } else { acl_ring_detach(&timer->ring); /* first this */ timer->nrefer--; if (timer->nrefer != 0) acl_msg_fatal("%s(%d): nrefer(%d) != 0", myname, __LINE__, timer->nrefer); acl_myfree(timer); } timer_fn(ACL_EVENT_TIME, eventp, timer_arg); } for (;;) { BOOL isSuccess = FALSE; DWORD bytesTransferred = 0; DWORD iocpKey = 0; DWORD lastError = 0; IOCP_EVENT *iocp_event = NULL; isSuccess = GetQueuedCompletionStatus(ev->h_iocp, &bytesTransferred, (DWORD*) &fdp, (OVERLAPPED**) &iocp_event, delay); if (!isSuccess) { if (iocp_event == NULL) break; if (iocp_event->type == IOCP_EVENT_DEAD) acl_myfree(iocp_event); else if (iocp_event->fdp == NULL) { acl_msg_warn("%s(%d): fdp null", myname, __LINE__); acl_myfree(iocp_event); } else if (iocp_event->fdp != fdp) acl_msg_fatal("%s(%d): invalid fdp", myname, __LINE__); else if (!(fdp->event_type & (ACL_EVENT_XCPT | ACL_EVENT_RW_TIMEOUT))) { fdp->event_type |= ACL_EVENT_XCPT; fdp->fdidx_ready = eventp->fdcnt_ready; eventp->fdtabs_ready[eventp->fdcnt_ready] = fdp; eventp->fdcnt_ready++; } continue; } acl_assert(fdp == iocp_event->fdp); if ((fdp->event_type & (ACL_EVENT_XCPT | ACL_EVENT_RW_TIMEOUT))) { continue; } if (iocp_event->type == IOCP_EVENT_READ) { acl_assert(fdp->event_read == iocp_event); iocp_event->type &= ~IOCP_EVENT_READ; fdp->stream->sys_read_ready = 1; if ((fdp->event_type & (ACL_EVENT_READ | ACL_EVENT_WRITE)) == 0) { fdp->event_type |= ACL_EVENT_READ; fdp->fdidx_ready = eventp->fdcnt_ready; eventp->fdtabs_ready[eventp->fdcnt_ready] = fdp; eventp->fdcnt_ready++; } } if (iocp_event->type == IOCP_EVENT_WRITE) { acl_assert(fdp->event_write == iocp_event); iocp_event->type &= ~IOCP_EVENT_WRITE; if ((fdp->event_type & (ACL_EVENT_READ | ACL_EVENT_WRITE)) == 0) { fdp->event_type |= ACL_EVENT_WRITE; fdp->fdidx_ready = eventp->fdcnt_ready; eventp->fdtabs_ready[eventp->fdcnt_ready] = fdp; eventp->fdcnt_ready++; } } delay = 0; } if (eventp->fdcnt_ready > 0) event_fire(eventp); eventp->nested--; }
static void event_loop(ACL_EVENT *eventp) { const char *myname = "event_loop"; EVENT_SELECT_THR *event_thr = (EVENT_SELECT_THR *) eventp; ACL_EVENT_NOTIFY_TIME timer_fn; void *timer_arg; ACL_SOCKET sockfd; ACL_EVENT_TIMER *timer; int select_delay, nready, i; ACL_EVENT_FDTABLE *fdp; ACL_RING timer_ring, *entry_ptr; struct timeval tv, *tvp; fd_set rmask; /* enabled read events */ fd_set wmask; /* enabled write events */ fd_set xmask; /* for bad news mostly */ acl_ring_init(&timer_ring); SET_TIME(eventp->present); THREAD_LOCK(&event_thr->event.tm_mutex); /* * Find out when the next timer would go off. Timer requests are sorted. * If any timer is scheduled, adjust the delay appropriately. */ if ((timer = ACL_FIRST_TIMER(&eventp->timer_head)) != 0) { select_delay = (int) ((timer->when - eventp->present + 1000000 - 1) / 1000000); if (select_delay < 0) select_delay = 0; else if (eventp->delay_sec >= 0 && select_delay > eventp->delay_sec) select_delay = eventp->delay_sec; } else select_delay = eventp->delay_sec; THREAD_UNLOCK(&event_thr->event.tm_mutex); THREAD_LOCK(&event_thr->event.tb_mutex); eventp->ready_cnt = 0; if (event_thr_prepare(eventp) == 0) { THREAD_UNLOCK(&event_thr->event.tb_mutex); if (eventp->ready_cnt == 0) { select_delay /= 1000000; if (select_delay <= 0) select_delay = 1; sleep((int) select_delay); } nready = 0; goto TAG_DONE; } if (eventp->ready_cnt > 0) { tv.tv_sec = 0; tv.tv_usec = 0; tvp = &tv; } else if (select_delay < 0) { tvp = NULL; } else { tv.tv_sec = select_delay; tv.tv_usec = eventp->delay_usec; tvp = &tv; } rmask = event_thr->rmask; wmask = event_thr->wmask; xmask = event_thr->xmask; THREAD_UNLOCK(&event_thr->event.tb_mutex); event_thr->event.blocked = 1; nready = select((int) eventp->maxfd + 1, &rmask, &wmask, &xmask, tvp); event_thr->event.blocked = 0; if (nready < 0) { if (acl_last_error() != ACL_EINTR) acl_msg_fatal("%s(%d), %s: event_loop: select: %s", __FILE__, __LINE__, myname, acl_last_serror()); goto TAG_DONE; } else if (nready == 0) goto TAG_DONE; THREAD_LOCK(&event_thr->event.tb_mutex); for (i = 0; i < eventp->fdcnt; i++) { fdp = eventp->fdtabs[i]; /* if fdp has been set in eventp->ready ? */ if ((fdp->event_type & (ACL_EVENT_XCPT | ACL_EVENT_RW_TIMEOUT))) continue; sockfd = ACL_VSTREAM_SOCK(fdp->stream); if (FD_ISSET(sockfd, &xmask)) { fdp->event_type |= ACL_EVENT_XCPT; fdp->fdidx_ready = eventp->ready_cnt; eventp->ready[eventp->ready_cnt++] = fdp; continue; } if (FD_ISSET(sockfd, &rmask)) { /* has been set in ready ? */ if ((fdp->event_type & ACL_EVENT_READ) == 0) { fdp->event_type |= ACL_EVENT_READ; fdp->fdidx_ready = eventp->ready_cnt; eventp->ready[eventp->ready_cnt++] = fdp; } if (fdp->listener) fdp->event_type |= ACL_EVENT_ACCEPT; else fdp->stream->read_ready = 1; } else if (fdp->w_callback && FD_ISSET(sockfd, &wmask)) { fdp->event_type |= ACL_EVENT_WRITE; fdp->fdidx_ready = eventp->ready_cnt; eventp->ready[eventp->ready_cnt++] = fdp; } } THREAD_UNLOCK(&event_thr->event.tb_mutex); TAG_DONE: /* * Deliver timer events. Requests are sorted: we can stop when we reach * the future or the list end. Allow the application to update the timer * queue while it is being called back. To this end, we repeatedly pop * the first request off the timer queue before delivering the event to * the application. */ SET_TIME(eventp->present); THREAD_LOCK(&event_thr->event.tm_mutex); while ((timer = ACL_FIRST_TIMER(&eventp->timer_head)) != 0) { if (timer->when > eventp->present) break; acl_ring_detach(&timer->ring); /* first this */ acl_ring_prepend(&timer_ring, &timer->ring); } THREAD_UNLOCK(&event_thr->event.tm_mutex); while (1) { entry_ptr = acl_ring_pop_head(&timer_ring); if (entry_ptr == NULL) break; timer = ACL_RING_TO_TIMER(entry_ptr); timer_fn = timer->callback; timer_arg = timer->context; timer_fn(ACL_EVENT_TIME, eventp, timer_arg); acl_myfree(timer); } if (eventp->ready_cnt > 0) event_thr_fire(eventp); }