void finalize_list(conn **list, size_t items) { for (size_t i = 0; i < items; i++) { if (list[i] != NULL) { list[i]->list_state &= ~LIST_STATE_PROCESSING; if (list[i]->sfd != INVALID_SOCKET) { if (list[i]->list_state & LIST_STATE_REQ_PENDING_IO) { enlist_conn(list[i], &list[i]->thread->pending_io); } else if (list[i]->list_state & LIST_STATE_REQ_PENDING_CLOSE) { enlist_conn(list[i], &list[i]->thread->pending_close); } } list[i]->list_state = 0; } } }
int add_conn_to_pending_io_list(conn *c) { int notify = 0; if (number_of_pending(c, c->thread->pending_io) == 0) { if (c->thread->pending_io == NULL) { notify = 1; } enlist_conn(c, &c->thread->pending_io); } return notify; }
void notify_io_complete(const void *cookie, ENGINE_ERROR_CODE status) { if (cookie == NULL) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "notify_io_complete called without a valid cookie (status %x)\n", status); return ; } struct conn *conn = (struct conn *)cookie; settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Got notify from %d, status %x\n", conn->sfd, status); /* ** TROND: ** I changed the logic for the tap connections so that the core ** issues the ON_DISCONNECT call to the engine instead of trying ** to close the connection. Then it let's the engine have a grace ** period to call notify_io_complete if not it will go ahead and ** kill it. ** */ if (status == ENGINE_DISCONNECT && conn->thread == tap_thread) { LOCK_THREAD(conn->thread); /** Remove the connection from both of the lists */ conn->thread->pending_io = list_remove(conn->thread->pending_io, conn); conn->thread->pending_close = list_remove(conn->thread->pending_close, conn); if (conn->state == conn_pending_close || conn->state == conn_immediate_close) { if (conn->refcount == 1) { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Complete shutdown of %p", conn); conn_set_state(conn, conn_immediate_close); enlist_conn(conn, &conn->thread->pending_close); } else { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Keep on waiting for shutdown of %p", conn); } } else { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Engine requested shutdown of %p", conn); conn_set_state(conn, conn_closing); enlist_conn(conn, &conn->thread->pending_io); } if (!is_thread_me(conn->thread)) { /* kick the thread in the butt */ notify_thread(conn->thread); } UNLOCK_THREAD(conn->thread); return; } /* ** There may be a race condition between the engine calling this ** function and the core closing the connection. ** Let's lock the connection structure (this might not be the ** correct one) and re-evaluate. */ LIBEVENT_THREAD *thr = conn->thread; if (thr == NULL || (conn->state == conn_closing || conn->state == conn_pending_close || conn->state == conn_immediate_close)) { return; } int notify = 0; LOCK_THREAD(thr); if (thr != conn->thread || !conn->ewouldblock) { // Ignore UNLOCK_THREAD(thr); return; } conn->aiostat = status; /* Move the connection to the closing state if the engine * wants it to be disconnected */ if (status == ENGINE_DISCONNECT) { conn->state = conn_closing; notify = 1; thr->pending_io = list_remove(thr->pending_io, conn); if (number_of_pending(conn, thr->pending_close) == 0) { enlist_conn(conn, &thr->pending_close); } } else { if (number_of_pending(conn, thr->pending_io) + number_of_pending(conn, thr->pending_close) == 0) { if (thr->pending_io == NULL) { notify = 1; } enlist_conn(conn, &thr->pending_io); } } UNLOCK_THREAD(thr); /* kick the thread in the butt */ if (notify) { notify_thread(thr); } }
static void libevent_tap_process(int fd, short which, void *arg) { LIBEVENT_THREAD *me = arg; assert(me->type == TAP); if (recv(fd, devnull, sizeof(devnull), 0) == -1) { if (settings.verbose > 0) { settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, "Can't read from libevent pipe: %s\n", strerror(errno)); } } if (memcached_shutdown) { event_base_loopbreak(me->base); return ; } // Do we have pending closes? const size_t max_items = 256; LOCK_THREAD(me); conn *pending_close[max_items]; size_t n_pending_close = 0; if (me->pending_close && me->last_checked != current_time) { assert(!has_cycle(me->pending_close)); me->last_checked = current_time; n_pending_close = list_to_array(pending_close, max_items, &me->pending_close); } // Now copy the pending IO buffer and run them... conn *pending_io[max_items]; size_t n_items = list_to_array(pending_io, max_items, &me->pending_io); UNLOCK_THREAD(me); for (size_t i = 0; i < n_items; ++i) { conn *c = pending_io[i]; assert(c->thread == me); LOCK_THREAD(c->thread); assert(me == c->thread); settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "Processing tap pending_io for %d\n", c->sfd); UNLOCK_THREAD(me); if (!c->registered_in_libevent) { register_event(c, NULL); } /* * We don't want the thread to keep on serving all of the data * from the context of the notification pipe, so just let it * run one time to set up the correct mask in libevent */ c->nevents = 1; c->which = EV_WRITE; while (c->state(c)) { /* do task */ } } /* Close any connections pending close */ for (size_t i = 0; i < n_pending_close; ++i) { conn *ce = pending_close[i]; if (ce->refcount == 1) { settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL, "OK, time to nuke: %p\n", (void*)ce); assert(ce->next == NULL); conn_close(ce); pending_close[i] = NULL; } else { LOCK_THREAD(me); enlist_conn(ce, &me->pending_close); UNLOCK_THREAD(me); } } LOCK_THREAD(me); finalize_list(pending_io, n_items); finalize_list(pending_close, n_pending_close); UNLOCK_THREAD(me); }