VOID LwRtlCancelTaskGroup( PLW_TASK_GROUP group ) { PRING ring = NULL; PLW_TASK pTask = NULL; ULONG i = 0; LOCK_GROUP(group); group->bCancelled = TRUE; for (i = 0; i < group->pPool->ulEventThreadCount; i++) { LOCK_THREAD(&group->pPool->pEventThreads[i]); } for (ring = group->Tasks.pNext; ring != &group->Tasks; ring = ring->pNext) { pTask = LW_STRUCT_FROM_FIELD(ring, SELECT_TASK, GroupRing); pTask->TriggerSet |= LW_TASK_EVENT_EXPLICIT | LW_TASK_EVENT_CANCEL; } for (i = 0; i < group->pPool->ulEventThreadCount; i++) { SignalThread(&group->pPool->pEventThreads[i]); UNLOCK_THREAD(&group->pPool->pEventThreads[i]); } UNLOCK_GROUP(group); }
void NotifyTaskUnixSignal( PLW_TASK pTask, siginfo_t* pInfo ) { LOCK_THREAD(pTask->pThread); if (pTask->EventSignal != TASK_COMPLETE_MASK) { while (pTask->pUnixSignal->si_signo) { pthread_cond_wait(&pTask->pThread->Event, &pTask->pThread->Lock); if (pTask->EventSignal == TASK_COMPLETE_MASK) { goto cleanup; } } *pTask->pUnixSignal = *pInfo; pTask->EventSignal |= LW_TASK_EVENT_UNIX_SIGNAL; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } cleanup: UNLOCK_THREAD(pTask->pThread); }
LW_BOOLEAN LwRtlNextTaskUnixSignal( LW_IN PLW_TASK pTask, LW_OUT siginfo_t* pInfo ) { BOOLEAN bResult = FALSE; LOCK_THREAD(pTask->pThread); if (pTask->pUnixSignal == NULL || pTask->pUnixSignal->si_signo == 0) { bResult = FALSE; } else { if (pInfo) { *pInfo = *pTask->pUnixSignal; } pTask->pUnixSignal->si_signo = 0; pthread_cond_broadcast(&pTask->pThread->Event); bResult = TRUE; } UNLOCK_THREAD(pTask->pThread); return bResult; }
void NotifyTaskUnixSignal( PLW_TASK pTask, siginfo_t* pInfo ) { LOCK_THREAD(pTask->pThread); if (pTask->TriggerSet != TASK_COMPLETE_MASK) { while (pTask->pUnixSignal->si_signo) { pthread_cond_wait(&pTask->pThread->Event, &pTask->pThread->Lock); if (pTask->TriggerSet == TASK_COMPLETE_MASK) { goto cleanup; } } *pTask->pUnixSignal = *pInfo; pTask->TriggerSet |= LW_TASK_EVENT_UNIX_SIGNAL; SignalThread(pTask->pThread); } cleanup: UNLOCK_THREAD(pTask->pThread); }
VOID LwRtlReleaseTask( PLW_TASK* ppTask ) { PLW_TASK pTask = *ppTask; int ulRefCount = 0; if (pTask) { LOCK_THREAD(pTask->pThread); ulRefCount = --pTask->ulRefCount; if (ulRefCount == 0) { RingRemove(&pTask->SignalRing); } UNLOCK_THREAD(pTask->pThread); if (ulRefCount == 0) { TaskDelete(pTask); } *ppTask = NULL; } }
VOID LwRtlWaitTaskGroup( PLW_TASK_GROUP group ) { PRING pRing = NULL; PLW_TASK pTask = NULL; BOOLEAN bStillAlive = TRUE; LOCK_GROUP(group); while (bStillAlive) { bStillAlive = FALSE; for (pRing = group->Tasks.pNext; !bStillAlive && pRing != &group->Tasks; pRing = pRing->pNext) { pTask = LW_STRUCT_FROM_FIELD(pRing, SELECT_TASK, GroupRing); LOCK_THREAD(pTask->pThread); if (pTask->TriggerSet != TASK_COMPLETE_MASK) { bStillAlive = TRUE; } UNLOCK_THREAD(pTask->pThread); } if (bStillAlive) { pthread_cond_wait(&group->Event, &group->Lock); } } UNLOCK_GROUP(group); }
VOID LwRtlWakeTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); pTask->TriggerSet |= LW_TASK_EVENT_EXPLICIT; SignalThread(pTask->pThread); UNLOCK_THREAD(pTask->pThread); }
/* * Processes an incoming "handle a new connection" item. This is called when * input arrives on the libevent wakeup pipe. */ static void thread_libevent_process(evutil_socket_t fd, short which, void *arg) { LIBEVENT_THREAD *me = arg; CQ_ITEM *item; conn* pending; cb_assert(me->type == GENERAL); drain_notification_channel(fd); if (memcached_shutdown) { event_base_loopbreak(me->base); return ; } while ((item = cq_pop(me->new_conn_queue)) != NULL) { conn *c = conn_new(item->sfd, item->parent_port, item->init_state, item->event_flags, item->read_buffer_size, me->base); if (c == NULL) { if (settings.verbose > 0) { settings.extensions.logger->log(EXTENSION_LOG_INFO, NULL, "Can't listen for events on fd %d\n", item->sfd); } closesocket(item->sfd); } else { cb_assert(c->thread == NULL); c->thread = me; } cqi_free(item); } LOCK_THREAD(me); pending = me->pending_io; me->pending_io = NULL; while (pending != NULL) { conn *c = pending; cb_assert(me == c->thread); pending = pending->next; c->next = NULL; if (c->sfd != INVALID_SOCKET && !c->registered_in_libevent) { /* The socket may have been shut down while we're looping */ /* in delayed shutdown */ register_event(c, 0); } /* * We don't want the thread to keep on serving all of the data * from the context of the notification pipe, so just let it * run one time to set up the correct mask in libevent */ c->nevents = 1; run_event_loop(c); } UNLOCK_THREAD(me); }
VOID RetainTask( PLW_TASK pTask ) { if (pTask) { LOCK_THREAD(pTask->pThread); ++pTask->ulRefCount; UNLOCK_THREAD(pTask->pThread); } }
VOID LwRtlCancelTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); pTask->TriggerSet |= LW_TASK_EVENT_EXPLICIT | LW_TASK_EVENT_CANCEL; SignalThread(pTask->pThread); UNLOCK_THREAD(pTask->pThread); }
static VOID LockAllThreads( PLW_THREAD_POOL pPool ) { ULONG i = 0; for (i = 0; i < pPool->ulEventThreadCount; i++) { LOCK_THREAD(&pPool->pEventThreads[i]); } }
void notify_io_complete(const void *cookie, ENGINE_ERROR_CODE status) { struct conn *conn = (struct conn *)cookie; mc_logger->log(EXTENSION_LOG_DEBUG, NULL, "Got notify from %d, status %x\n", conn->sfd, status); /* ** There may be a race condition between the engine calling this ** function and the core closing the connection. ** Let's lock the connection structure (this might not be the ** correct one) and re-evaluate. */ LIBEVENT_THREAD *thr = conn->thread; if (thr == NULL || conn->state == conn_closing) { return; } int notify = 0; LOCK_THREAD(thr); if (thr != conn->thread || conn->state == conn_closing || !conn->io_blocked){ conn->premature_notify_io_complete = true; UNLOCK_THREAD(thr); mc_logger->log(EXTENSION_LOG_DEBUG, NULL, "Premature notify_io_complete\n"); return; } conn->io_blocked = false; conn->aiostat = status; if (number_of_pending(conn, thr->pending_io) == 0) { if (thr->pending_io == NULL) { notify = 1; } conn->next = thr->pending_io; thr->pending_io = conn; } assert(number_of_pending(conn, thr->pending_io) == 1); UNLOCK_THREAD(thr); /* kick the thread in the butt */ if (notify && write(thr->notify_send_fd, "", 1) != 1) { mc_logger->log(EXTENSION_LOG_WARNING, NULL, "Writing to thread notify pipe: %s", strerror(errno)); } }
VOID LwRtlWaitTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); while (pTask->EventSignal != TASK_COMPLETE_MASK) { pthread_cond_wait(&pTask->pThread->Event, &pTask->pThread->Lock); } UNLOCK_THREAD(pTask->pThread); }
static VOID ScheduleSignalled( PEPOLL_THREAD pThread, PRING pRunnable, PBOOLEAN pbShutdown ) { PRING pRing = NULL; PRING pNext = NULL; PLW_TASK pTask = NULL; char c = 0; int res = 0; LOCK_THREAD(pThread); if (pThread->bSignalled) { pThread->bSignalled = FALSE; res = read(pThread->SignalFds[0], &c, sizeof(c)); assert(res == sizeof(c)); /* Add all signalled tasks to the runnable list */ for (pRing = pThread->Tasks.pNext; pRing != &pThread->Tasks; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, EPOLL_TASK, SignalRing); RingRemove(&pTask->SignalRing); RingRemove(&pTask->QueueRing); if (pTask->EventSignal != TASK_COMPLETE_MASK) { RingEnqueue(pRunnable, &pTask->QueueRing); /* Transfer the signal bits into the event args */ pTask->EventArgs |= pTask->EventSignal; pTask->EventSignal = 0; } } if (pThread->bShutdown && !*pbShutdown) { *pbShutdown = pThread->bShutdown; } } UNLOCK_THREAD(pThread); }
VOID LwRtlFreeThreadPool( PLW_THREAD_POOL* ppPool ) { PLW_THREAD_POOL pPool = *ppPool; PEPOLL_THREAD pThread = NULL; int i = 0; if (pPool) { LOCK_POOL(pPool); pPool->bShutdown = TRUE; pthread_cond_broadcast(&pPool->Event); UNLOCK_POOL(pPool); if (pPool->pEventThreads) { for (i = 0; i < pPool->ulEventThreadCount; i++) { pThread = &pPool->pEventThreads[i]; LOCK_THREAD(pThread); pThread->bShutdown = TRUE; SignalThread(pThread); UNLOCK_THREAD(pThread); pthread_join(pThread->Thread, NULL); DestroyEventThread(pThread); } RtlMemoryFree(pPool->pEventThreads); } if (pPool->pDelegate) { ReleaseDelegatePool(&pPool->pDelegate); } pthread_cond_destroy(&pPool->Event); pthread_mutex_destroy(&pPool->Lock); DestroyWorkThreads(&pPool->WorkThreads); RtlMemoryFree(pPool); *ppPool = NULL; } }
VOID LwRtlWakeTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); if (pTask->EventSignal != TASK_COMPLETE_MASK) { pTask->EventSignal |= LW_TASK_EVENT_EXPLICIT; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } UNLOCK_THREAD(pTask->pThread); }
void notify_io_complete(const void *cookie, ENGINE_ERROR_CODE status) { struct conn *conn = (struct conn *)cookie; assert(conn); LIBEVENT_THREAD *thr = conn->thread; assert(thr); settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Got notify from %d, status %x\n", conn->sfd, status); LOCK_THREAD(thr); conn->aiostat = status; int notify = add_conn_to_pending_io_list(conn); UNLOCK_THREAD(thr); /* kick the thread in the butt */ if (notify) { notify_thread(thr); } }
static void libevent_tap_process(int fd, short which, void *arg) { LIBEVENT_THREAD *me = arg; assert(me->type == TAP); if (recv(fd, devnull, sizeof(devnull), 0) == -1) { if (settings.verbose > 0) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "Can't read from libevent pipe: %s\n", strerror(errno)); } } if (memcached_shutdown) { event_base_loopbreak(me->base); return ; } // Do we have pending closes? const size_t max_items = 256; LOCK_THREAD(me); conn *pending_close[max_items]; size_t n_pending_close = 0; if (me->pending_close && me->last_checked != current_time) { assert(!has_cycle(me->pending_close)); me->last_checked = current_time; n_pending_close = list_to_array(pending_close, max_items, &me->pending_close); } // Now copy the pending IO buffer and run them... conn *pending_io[max_items]; size_t n_items = list_to_array(pending_io, max_items, &me->pending_io); UNLOCK_THREAD(me); for (size_t i = 0; i < n_items; ++i) { conn *c = pending_io[i]; assert(c->thread == me); LOCK_THREAD(c->thread); assert(me == c->thread); settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Processing tap pending_io for %d\n", c->sfd); UNLOCK_THREAD(me); if (!c->registered_in_libevent) { register_event(c, NULL); } /* * We don't want the thread to keep on serving all of the data * from the context of the notification pipe, so just let it * run one time to set up the correct mask in libevent */ c->nevents = 1; c->which = EV_WRITE; while (c->state(c)) { /* do task */ } } /* Close any connections pending close */ for (size_t i = 0; i < n_pending_close; ++i) { conn *ce = pending_close[i]; if (ce->refcount == 1) { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "OK, time to nuke: %p\n", (void*)ce); assert(ce->next == NULL); conn_close(ce); pending_close[i] = NULL; } else { LOCK_THREAD(me); enlist_conn(ce, &me->pending_close); UNLOCK_THREAD(me); } } LOCK_THREAD(me); finalize_list(pending_io, n_items); finalize_list(pending_close, n_pending_close); UNLOCK_THREAD(me); }
/* * Processes an incoming "handle a new connection" item. This is called when * input arrives on the libevent wakeup pipe. */ static void thread_libevent_process(int fd, short which, void *arg) { LIBEVENT_THREAD *me = arg; assert(me->type == GENERAL); CQ_ITEM *item; if (recv(fd, devnull, sizeof(devnull), 0) == -1) { if (settings.verbose > 0) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "Can't read from libevent pipe: %s\n", strerror(errno)); } } if (memcached_shutdown) { event_base_loopbreak(me->base); return ; } while ((item = cq_pop(me->new_conn_queue)) != NULL) { conn *c = conn_new(item->sfd, item->parent_port, item->init_state, item->event_flags, item->read_buffer_size, item->transport, me->base, NULL); if (c == NULL) { if (IS_UDP(item->transport)) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "Can't listen for events on UDP socket\n"); exit(1); } else { if (settings.verbose > 0) { settings.extensions.logger->log(EXTENSION_LOG_INFO, NULL, "Can't listen for events on fd %d\n", item->sfd); } closesocket(item->sfd); } } else { assert(c->thread == NULL); c->thread = me; } cqi_free(item); } LOCK_THREAD(me); conn* pending = me->pending_io; me->pending_io = NULL; while (pending != NULL) { conn *c = pending; assert(me == c->thread); pending = pending->next; c->next = NULL; if (c->sfd != INVALID_SOCKET && !c->registered_in_libevent) { // The socket may have been shut down while we're looping // in delayed shutdown register_event(c, 0); } /* * We don't want the thread to keep on serving all of the data * from the context of the notification pipe, so just let it * run one time to set up the correct mask in libevent */ c->nevents = 1; do { if (settings.verbose) { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%d - Running task: (%s)\n", c->sfd, state_text(c->state)); } } while (c->state(c)); } UNLOCK_THREAD(me); }
static VOID ProcessRunnable( PKQUEUE_THREAD pThread, PKQUEUE_COMMANDS pCommands, PRING pRunnable, PRING pTimed, PRING pWaiting, LONG64 llNow ) { ULONG ulTicks = MAX_TICKS; PLW_TASK pTask = NULL; PLW_TASK_GROUP pGroup = NULL; PRING pRing = NULL; PRING pNext = NULL; /* We are guaranteed to run each task at least once. If tasks remain on the runnable list by yielding, we will continue to run them all in a round robin until our ticks are depleted. */ while (ulTicks && !RingIsEmpty(pRunnable)) { for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, KQUEUE_TASK, QueueRing); RunTask(pTask, llNow); if (ulTicks) { ulTicks--; } if (pTask->EventWait != LW_TASK_EVENT_COMPLETE) { if (pTask->EventWait & LW_TASK_EVENT_YIELD) { /* Task is yielding. Set the YIELD flag and leave it on the runnable list for the next iteration. */ pTask->EventArgs |= LW_TASK_EVENT_YIELD; } else { /* Task is still waiting on events, update kqueue */ UpdateEventWait(pCommands, pTask); if (pTask->EventWait & LW_TASK_EVENT_TIME) { /* If the task is waiting for a timeout, insert it into the timed queue */ RingRemove(&pTask->QueueRing); InsertTimedQueue(pTimed, pTask); } else { /* Otherwise, put it in the generic waiting queue */ RingRemove(&pTask->QueueRing); RingEnqueue(pWaiting, &pTask->QueueRing); } } } else { /* Task is complete */ RingRemove(&pTask->QueueRing); /* Remove any associated events from the kqueue */ if (pTask->Fd >= 0) { (void) LwRtlSetTaskFd(pTask, pTask->Fd, 0); } /* Unsubscribe task from any UNIX signals */ if (pTask->pUnixSignal) { RegisterTaskUnixSignal(pTask, 0, FALSE); } LOCK_POOL(pThread->pPool); pThread->ulLoad--; UNLOCK_POOL(pThread->pPool); pGroup = pTask->pGroup; /* If task was in a task group, remove it and notify anyone waiting on the group */ if (pGroup) { LOCK_GROUP(pGroup); pTask->pGroup = NULL; RingRemove(&pTask->GroupRing); pthread_cond_broadcast(&pGroup->Event); UNLOCK_GROUP(pGroup); } LOCK_THREAD(pThread); if (--pTask->ulRefCount) { /* The task still has a reference, so mark it as completed and notify anyone waiting on it */ pTask->EventSignal = TASK_COMPLETE_MASK; pthread_cond_broadcast(&pThread->Event); UNLOCK_THREAD(pThread); } else { /* We held the last reference to the task, so delete it */ RingRemove(&pTask->SignalRing); UNLOCK_THREAD(pThread); TaskDelete(pTask); } } } } /* Update kevent commands for yielding tasks */ for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pRing->pNext) { pTask = LW_STRUCT_FROM_FIELD(pRing, KQUEUE_TASK, QueueRing); if (pTask->EventArgs & LW_TASK_EVENT_YIELD) { UpdateEventWait(pCommands, pTask); } } }
static NTSTATUS ProcessRunnable( PEPOLL_THREAD pThread, PRING pRunnable, PRING pTimed, PRING pWaiting, LONG64 llNow ) { NTSTATUS status = STATUS_SUCCESS; ULONG ulTicks = MAX_TICKS; PLW_TASK pTask = NULL; PLW_TASK_GROUP pGroup = NULL; PRING pRing = NULL; PRING pNext = NULL; /* We are guaranteed to run each task at least once. If tasks remain on the runnable list by yielding, we will continue to run them all in a round robin until our ticks are depleted. */ while (ulTicks && !RingIsEmpty(pRunnable)) { for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, EPOLL_TASK, QueueRing); RunTask(pTask, llNow); if (ulTicks) { ulTicks--; } if (pTask->EventWait != LW_TASK_EVENT_COMPLETE) { /* Task is still waiting to be runnable, update events in epoll set */ status = UpdateEventWait( pTask, pThread->EpollFd ); GOTO_ERROR_ON_STATUS(status); if (pTask->EventWait & LW_TASK_EVENT_YIELD) { /* Task is yielding. Set YIELD in its trigger arguments and and leave it on the runnable list for the next iteration */ pTask->EventArgs |= LW_TASK_EVENT_YIELD; } else if (pTask->EventWait & LW_TASK_EVENT_TIME) { /* If the task is waiting for a timeout, insert it into the timed queue */ RingRemove(&pTask->QueueRing); InsertTimedQueue(pTimed, pTask); } else { /* Otherwise, put it in the generic waiting queue */ RingRemove(&pTask->QueueRing); RingEnqueue(pWaiting, &pTask->QueueRing); } } else { /* Task is complete */ RingRemove(&pTask->QueueRing); /* Turn off any fd in the epoll set */ if (pTask->Fd >= 0) { status = LwRtlSetTaskFd(pTask, pTask->Fd, 0); GOTO_ERROR_ON_STATUS(status); } /* Unsubscribe task from any UNIX signals */ if (pTask->pUnixSignal) { RegisterTaskUnixSignal(pTask, 0, FALSE); } LOCK_POOL(pThread->pPool); pThread->ulLoad--; UNLOCK_POOL(pThread->pPool); pGroup = pTask->pGroup; /* If task was in a task group, remove it and notify anyone waiting on the group */ if (pGroup) { LOCK_GROUP(pGroup); pTask->pGroup = NULL; RingRemove(&pTask->GroupRing); pthread_cond_broadcast(&pGroup->Event); UNLOCK_GROUP(pGroup); } LOCK_THREAD(pThread); if (--pTask->ulRefCount) { /* The task still has a reference, so mark it as completed and notify anyone waiting on it */ pTask->EventSignal = TASK_COMPLETE_MASK; pthread_cond_broadcast(&pThread->Event); UNLOCK_THREAD(pThread); } else { /* We held the last reference to the task, so delete it */ RingRemove(&pTask->SignalRing); UNLOCK_THREAD(pThread); TaskDelete(pTask); } } } } error: return status; }
static NTSTATUS EventLoop( PSELECT_THREAD pThread ) { NTSTATUS status = STATUS_SUCCESS; RING tasks; RING runnable; PRING pRing = NULL; PRING pNext = NULL; PSELECT_TASK pTask = NULL; CLOCK clock = {0}; LONG64 llNow; LONG64 llNextDeadline; fd_set readSet; fd_set writeSet; fd_set exceptSet; int ready = 0; int nfds = 0; char c = 0; int res = 0; BOOLEAN bShutdown = FALSE; PLW_TASK_GROUP pGroup = NULL; BOOLEAN bYielding = FALSE; RingInit(&tasks); RingInit(&runnable); FD_ZERO(&readSet); FD_ZERO(&writeSet); FD_ZERO(&exceptSet); LOCK_THREAD(pThread); while (!bShutdown || !RingIsEmpty(&tasks)) { /* Reset variables */ llNextDeadline = 0; nfds = 0; bYielding = FALSE; /* Get current time for this iteration */ GOTO_ERROR_ON_STATUS(status = ClockGetMonotonicTime(&clock, &llNow)); /* Figure out which tasks are runnable */ for (pRing = tasks.pNext; pRing != &tasks; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, SELECT_TASK, EventRing); /* Update trigger set with results from select() */ UpdateTriggerSet( pTask, &readSet, &writeSet, &exceptSet, llNow); /* Schedule tasks to run if they have been triggered or were yielding */ if ((pTask->TriggerWait & LW_TASK_EVENT_YIELD) || ((pTask->TriggerWait | LW_TASK_EVENT_EXPLICIT) & pTask->TriggerSet)) { /* Put task on a separate list to run its trigger function */ RingRemove(&pTask->EventRing); RingInsertBefore(&runnable, &pTask->EventRing); /* Update the trigger args with the trigger set */ pTask->TriggerArgs |= pTask->TriggerSet; /* Turn off bits (except cancel) now that we have copied them */ pTask->TriggerSet &= (LW_TASK_EVENT_CANCEL); } else { /* Update select parameters to wait for task to trigger */ UpdateTriggerWait( pTask, &nfds, &readSet, &writeSet, &exceptSet, &llNextDeadline); } } UNLOCK_THREAD(pThread); for (pRing = runnable.pNext; pRing != &runnable; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, SELECT_TASK, EventRing); GOTO_ERROR_ON_STATUS(status = TaskProcessTrigger(pTask, llNow)); if (pTask->TriggerWait != 0) { /* Task is still waiting to be runnable, update select parameters and put it back in the task list */ UpdateTriggerWait( pTask, &nfds, &readSet, &writeSet, &exceptSet, &llNextDeadline); if (pTask->TriggerWait & LW_TASK_EVENT_YIELD) { /* Task is yielding temporarily. Set the yield flag on its trigger arguments. Leave it on the runnable list */ pTask->TriggerArgs |= LW_TASK_EVENT_YIELD; } else { RingRemove(&pTask->EventRing); RingInsertBefore(&tasks, &pTask->EventRing); } } else { /* Task is complete, notify and remove from task group if it is in one */ RingRemove(&pTask->EventRing); /* Unregister task from global signal loop */ if (pTask->pUnixSignal) { RegisterTaskUnixSignal(pTask, 0, FALSE); } pGroup = pTask->pGroup; if (pGroup) { LOCK_GROUP(pGroup); pTask->pGroup = NULL; RingRemove(&pTask->GroupRing); pthread_cond_broadcast(&pGroup->Event); UNLOCK_GROUP(pGroup); } LOCK_THREAD(pThread); if (--pTask->ulRefCount) { pTask->TriggerSet = TASK_COMPLETE_MASK; pthread_cond_broadcast(&pThread->Event); UNLOCK_THREAD(pThread); } else { UNLOCK_THREAD(pThread); TaskDelete(pTask); } } } if (!RingIsEmpty(&runnable)) { /* We have runnable tasks that are yielding. Move them back to the event list and note the fact. */ bYielding = TRUE; RingMove(&runnable, &tasks); } if (!bShutdown) { /* Also wait for a poke on the thread's signal fd */ FD_SET(pThread->SignalFds[0], &readSet); if (pThread->SignalFds[0] >= nfds) { nfds = pThread->SignalFds[0] + 1; } } if (nfds) { /* If there are still runnable tasks due to LW_TASK_EVENT_YIELD, set the next deadline to now so we wake immediately. This gives other tasks the chance to become runnable before we proceed */ if (bYielding) { llNextDeadline = llNow; } /* Wait for a task to be runnable */ GOTO_ERROR_ON_STATUS(status = Sleep( &clock, &llNow, nfds, &readSet, &writeSet, &exceptSet, llNextDeadline, &ready)); } LOCK_THREAD(pThread); /* Check for a signal to the thread */ if (FD_ISSET(pThread->SignalFds[0], &readSet)) { FD_CLR(pThread->SignalFds[0], &readSet); pThread->bSignalled = FALSE; res = read(pThread->SignalFds[0], &c, sizeof(c)); assert(res == sizeof(c)); /* Move all tasks in queue into local task list */ RingMove(&pThread->Tasks, &tasks); if (pThread->bShutdown && !bShutdown) { bShutdown = pThread->bShutdown; /* Cancel all outstanding tasks */ for (pRing = tasks.pNext; pRing != &tasks; pRing = pRing->pNext) { pTask = LW_STRUCT_FROM_FIELD(pRing, SELECT_TASK, EventRing); pTask->TriggerSet |= LW_TASK_EVENT_CANCEL | LW_TASK_EVENT_EXPLICIT; } } } } error: UNLOCK_THREAD(pThread); return status; }
void notify_io_complete(const void *cookie, ENGINE_ERROR_CODE status) { if (cookie == NULL) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "notify_io_complete called without a valid cookie (status %x)\n", status); return ; } struct conn *conn = (struct conn *)cookie; settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Got notify from %d, status %x\n", conn->sfd, status); /* ** TROND: ** I changed the logic for the tap connections so that the core ** issues the ON_DISCONNECT call to the engine instead of trying ** to close the connection. Then it let's the engine have a grace ** period to call notify_io_complete if not it will go ahead and ** kill it. ** */ if (status == ENGINE_DISCONNECT && conn->thread == tap_thread) { LOCK_THREAD(conn->thread); /** Remove the connection from both of the lists */ conn->thread->pending_io = list_remove(conn->thread->pending_io, conn); conn->thread->pending_close = list_remove(conn->thread->pending_close, conn); if (conn->state == conn_pending_close || conn->state == conn_immediate_close) { if (conn->refcount == 1) { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Complete shutdown of %p", conn); conn_set_state(conn, conn_immediate_close); enlist_conn(conn, &conn->thread->pending_close); } else { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Keep on waiting for shutdown of %p", conn); } } else { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Engine requested shutdown of %p", conn); conn_set_state(conn, conn_closing); enlist_conn(conn, &conn->thread->pending_io); } if (!is_thread_me(conn->thread)) { /* kick the thread in the butt */ notify_thread(conn->thread); } UNLOCK_THREAD(conn->thread); return; } /* ** There may be a race condition between the engine calling this ** function and the core closing the connection. ** Let's lock the connection structure (this might not be the ** correct one) and re-evaluate. */ LIBEVENT_THREAD *thr = conn->thread; if (thr == NULL || (conn->state == conn_closing || conn->state == conn_pending_close || conn->state == conn_immediate_close)) { return; } int notify = 0; LOCK_THREAD(thr); if (thr != conn->thread || !conn->ewouldblock) { // Ignore UNLOCK_THREAD(thr); return; } conn->aiostat = status; /* Move the connection to the closing state if the engine * wants it to be disconnected */ if (status == ENGINE_DISCONNECT) { conn->state = conn_closing; notify = 1; thr->pending_io = list_remove(thr->pending_io, conn); if (number_of_pending(conn, thr->pending_close) == 0) { enlist_conn(conn, &thr->pending_close); } } else { if (number_of_pending(conn, thr->pending_io) + number_of_pending(conn, thr->pending_close) == 0) { if (thr->pending_io == NULL) { notify = 1; } enlist_conn(conn, &thr->pending_io); } } UNLOCK_THREAD(thr); /* kick the thread in the butt */ if (notify) { notify_thread(thr); } }
NTSTATUS LwRtlCreateTask( PLW_THREAD_POOL pPool, PLW_TASK* ppTask, PLW_TASK_GROUP pGroup, LW_TASK_FUNCTION pfnFunc, PVOID pContext ) { NTSTATUS status = STATUS_SUCCESS; PSELECT_TASK pTask = NULL; if (pPool->pDelegate) { return LwRtlCreateTask(pPool->pDelegate, ppTask, pGroup, pfnFunc, pContext); } GOTO_ERROR_ON_STATUS(status = LW_RTL_ALLOCATE_AUTO(&pTask)); RingInit(&pTask->GroupRing); RingInit(&pTask->EventRing); pTask->pPool = pPool; pTask->pGroup = pGroup; pTask->ulRefCount = 2; pTask->pfnFunc = pfnFunc; pTask->pFuncContext = pContext; pTask->Fd = -1; pTask->TriggerSet = LW_TASK_EVENT_INIT; pTask->TriggerWait = LW_TASK_EVENT_EXPLICIT; pTask->llDeadline = 0; LOCK_POOL(pPool); if (pGroup) { LOCK_GROUP(pGroup); if (pGroup->bCancelled) { UNLOCK_GROUP(pGroup); UNLOCK_POOL(pPool); status = STATUS_CANCELLED; GOTO_ERROR_ON_STATUS(status); } RingInsertBefore(&pGroup->Tasks, &pTask->GroupRing); } pTask->pThread = &pPool->pEventThreads[pPool->ulNextEventThread]; pPool->ulNextEventThread = (pPool->ulNextEventThread + 1) % pPool->ulEventThreadCount; UNLOCK_POOL(pPool); LOCK_THREAD(pTask->pThread); RingInsertBefore(&pTask->pThread->Tasks, &pTask->EventRing); /* It's not necessary to signal the thread about the new task here since it won't be run anyway */ UNLOCK_THREAD(pTask->pThread); if (pGroup) { UNLOCK_GROUP(pGroup); } *ppTask = pTask; error: return status; }