void uv__cf_loop_cb(void* arg) { uv_loop_t* loop; QUEUE* item; QUEUE split_head; uv__cf_loop_signal_t* s; loop = arg; uv_mutex_lock(&loop->cf_mutex); QUEUE_INIT(&split_head); if (!QUEUE_EMPTY(&loop->cf_signals)) { QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals); QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head); } uv_mutex_unlock(&loop->cf_mutex); while (!QUEUE_EMPTY(&split_head)) { item = QUEUE_HEAD(&split_head); s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); s->cb(s->arg); QUEUE_REMOVE(item); free(s); } }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); req->status = -ECANCELED; QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); } uv__udp_run_completed(handle); assert(handle->send_queue_size == 0); assert(handle->send_queue_count == 0); /* Now tear down the handle. */ handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
static void uv__write_callbacks(uv_stream_t* stream) { uv_write_t* req; QUEUE* q; while (!QUEUE_EMPTY(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = QUEUE_HEAD(&stream->write_completed_queue); req = QUEUE_DATA(q, uv_write_t, queue); QUEUE_REMOVE(q); uv__req_unregister(stream->loop, req); if (req->bufs != NULL) { stream->write_queue_size -= uv__write_req_size(req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; } /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) req->cb(req, req->error); } assert(QUEUE_EMPTY(&stream->write_completed_queue)); }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; while (!QUEUE_EMPTY(&handle->write_completed_queue)) { q = QUEUE_HEAD(&handle->write_completed_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) req->send_cb(req, 0); else req->send_cb(req, req->status); } }
static void uv__cf_loop_cb(void* arg) { uv_loop_t* loop; QUEUE* item; QUEUE split_head; uv__cf_loop_signal_t* s; loop = arg; uv_mutex_lock(&loop->cf_mutex); QUEUE_INIT(&split_head); if (!QUEUE_EMPTY(&loop->cf_signals)) { QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals); QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head); } uv_mutex_unlock(&loop->cf_mutex); while (!QUEUE_EMPTY(&split_head)) { item = QUEUE_HEAD(&split_head); s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); /* This was a termination signal */ if (s->cb == NULL) CFRunLoopStop(loop->cf_loop); else s->cb(s->arg); QUEUE_REMOVE(item); free(s); } }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); uv__udp_run_completed(handle); while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb != NULL) req->send_cb(req, -ECANCELED); } /* Now tear down the handle. */ handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
static void uv__inotify_read(uv_loop_t* loop, uv__io_t* dummy, unsigned int events) { const struct uv__inotify_event* e; struct watcher_list* w; uv_fs_event_t* h; QUEUE queue; QUEUE* q; const char* path; ssize_t size; const char *p; /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ char buf[4096]; while (1) { do size = read(loop->inotify_fd, buf, sizeof(buf)); while (size == -1 && errno == EINTR); if (size == -1) { assert(errno == EAGAIN || errno == EWOULDBLOCK); break; } assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ /* Now we have one or more inotify_event structs. */ for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { e = (const struct uv__inotify_event*)p; events = 0; if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY)) events |= UV_CHANGE; if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY)) events |= UV_RENAME; w = find_watcher(loop, e->wd); if (w == NULL) continue; /* Stale event, no watchers left. */ /* inotify does not return the filename when monitoring a single file * for modifications. Repurpose the filename for API compatibility. * I'm not convinced this is a good thing, maybe it should go. */ path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); QUEUE_MOVE(&w->watchers, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_fs_event_t, watchers); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&w->watchers, q); h->cb(h, path, events, 0); } } } }
static void uv__udp_run_pending(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; struct msghdr h; ssize_t size; while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); assert(q != NULL); req = QUEUE_DATA(q, uv_udp_send_t, queue); assert(req != NULL); memset(&h, 0, sizeof h); h.msg_name = &req->addr; h.msg_namelen = (req->addr.sin6_family == AF_INET6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); h.msg_iov = (struct iovec*)req->bufs; h.msg_iovlen = req->bufcnt; do { size = sendmsg(handle->io_watcher.fd, &h, 0); } while (size == -1 && errno == EINTR); /* TODO try to write once or twice more in the * hope that the socket becomes readable again? */ if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) break; req->status = (size == -1 ? -errno : size); #ifndef NDEBUG /* Sanity check. */ if (size != -1) { ssize_t nbytes; int i; for (nbytes = i = 0; i < req->bufcnt; i++) nbytes += req->bufs[i].len; assert(size == nbytes); } #endif /* Sending a datagram is an atomic operation: either all data * is written or nothing is (and EMSGSIZE is raised). That is * why we don't handle partial writes. Just pop the request * off the write queue and onto the completed queue, done. */ QUEUE_REMOVE(&req->queue); QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); } }
static void loop_run_immediate(evLoop *loop) { QUEUE *q; evHandle *handle; while ( !QUEUE_EMPTY(&loop->handle_queue) ){ q = QUEUE_HEAD(&(loop)->handle_queue); QUEUE_REMOVE(q); handle = QUEUE_DATA(q, evHandle, queue); assert(handle); handle->cb(handle); } }
static uv_process_t* uv__process_find(uv_loop_t* loop, int pid) { uv_process_t* handle; QUEUE* h; QUEUE* q; h = uv__process_queue(loop, pid); QUEUE_FOREACH(q, h) { handle = QUEUE_DATA(q, uv_process_t, queue); if (handle->pid == pid) return handle; }
void uv_chan_clear(uv_chan_t *chan) { uv_mutex_lock(&chan->mutex); uv__chan_item_t *item = NULL; QUEUE *head = NULL; while (!QUEUE_EMPTY(&chan->q)) { head = QUEUE_HEAD(&chan->q); item = QUEUE_DATA(head, uv__chan_item_t, active_queue); QUEUE_REMOVE(head); free(item); } uv_mutex_unlock(&chan->mutex); }
void tls__write_done_cb(uv_write_t* w, int status) { tr_uv_wi_t* wi = NULL; int i; QUEUE* q; GET_TLS(w); tt->is_writing = 0; if (status) { pc_lib_log(PC_LOG_ERROR, "tcp__write_done_cb - uv_write callback error: %s", uv_strerror(status)); } status = status ? PC_RC_ERROR : PC_RC_OK; pc_mutex_lock(&tt->wq_mutex); while(!QUEUE_EMPTY(&tt->writing_queue)) { q = QUEUE_HEAD(&tt->writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); wi = (tr_uv_wi_t* )QUEUE_DATA(q, tr_uv_wi_t, queue); if (!status && TR_UV_WI_IS_RESP(wi->type)) { pc_lib_log(PC_LOG_DEBUG, "tls__write_to_tcp - move wi from writing queue to resp pending queue," " seq_num: %u, req_id: %u", wi->seq_num, wi->req_id); QUEUE_INSERT_TAIL(&tt->resp_pending_queue, q); continue; }; pc_lib_free(wi->buf.base); wi->buf.base = NULL; wi->buf.len = 0; if (TR_UV_WI_IS_NOTIFY(wi->type)) { pc_trans_sent(tt->client, wi->seq_num, status); } if (TR_UV_WI_IS_RESP(wi->type)) { pc_trans_resp(tt->client, wi->req_id, status, NULL); } // if internal, do nothing here. if (PC_IS_PRE_ALLOC(wi->type)) { PC_PRE_ALLOC_SET_IDLE(wi->type); } else { pc_lib_free(wi); } } pc_mutex_unlock(&tt->wq_mutex); tls__write_to_tcp(tls); }
static void ref_pop(QUEUE *refs) { QUEUE *h = QUEUE_HEAD(refs); queue_ref_item *item = QUEUE_DATA(h, queue_ref_item, node); QUEUE_REMOVE(&item->node); void *ref = item->item.ref; if (item->item.v_type == VAR_LIST) { List *l = ref; utarray_free(l->items); } free(item); free(ref); }
static void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { uv_write_t* req; QUEUE* q; while (!QUEUE_EMPTY(&stream->write_queue)) { q = QUEUE_HEAD(&stream->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_write_t, queue); req->error = error; QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); } }
static void uv__run_pending(uv_loop_t* loop) { QUEUE* q; uv__io_t* w; while (!QUEUE_EMPTY(&loop->pending_queue)) { q = QUEUE_HEAD(&loop->pending_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); w = QUEUE_DATA(q, uv__io_t, pending_queue); w->cb(loop, w, UV__POLLOUT); } }
static int _worker_dispatch_cb (evHandle *handle){ comoWorker *worker = handle->data; duk_context *ctx = worker->Mainctx; mtx_lock(&worker->mtx); QUEUE *q; while ( !QUEUE_EMPTY(&worker->queueOut) ){ q = QUEUE_HEAD(&(worker)->queueOut); QUEUE_REMOVE(q); comoQueue *queue = QUEUE_DATA(q, comoQueue, queue); if (worker->destroy != 0){ goto FREE; } duk_push_heapptr(ctx, worker->self); if (duk_get_type(ctx, -1) != DUK_TYPE_OBJECT){ dump_stack(ctx, "DUK"); assert(0); } como_push_worker_value(ctx, queue); duk_call(ctx, 1); duk_pop(ctx); FREE : /* free except in case of pointers */ if (queue->data != NULL && queue->type != DUK_TYPE_POINTER){ free(queue->data); } free(queue); } mtx_unlock(&worker->mtx); if (worker->destroy == 2){ duk_push_global_stash(ctx); duk_get_prop_string(ctx, -1, "comoWorkersCallBack"); duk_push_number(ctx, (double) handle->id); duk_del_prop(ctx, -2); handle_close(handle); free(worker); } return 0; }
void loop_run_closing_handles(evLoop *loop){ QUEUE *q; evHandle *handle; while ( !QUEUE_EMPTY(&loop->closing_queue) ){ q = QUEUE_HEAD(&(loop)->closing_queue); QUEUE_REMOVE(q); handle = QUEUE_DATA(q, evHandle, queue); assert(handle); if (handle->close != NULL){ handle->close(handle); } // _free_handle(handle); } }
int ravaL_cond_signal(rava_cond_t* cond) { QUEUE* q; rava_state_t* s; if (!QUEUE_EMPTY(cond)) { q = QUEUE_HEAD(cond); s = QUEUE_DATA(q, rava_state_t, cond); QUEUE_REMOVE(q); TRACE("READY state %p\n", s); ravaL_state_ready(s); return 1; } return 0; }
void *uv_chan_receive(uv_chan_t *chan) { uv__chan_item_t *item; QUEUE *head; void *data = NULL; uv_mutex_lock(&chan->mutex); while (QUEUE_EMPTY(&chan->q)) { uv_cond_wait(&chan->cond, &chan->mutex); } head = QUEUE_HEAD(&chan->q); item = QUEUE_DATA(head, uv__chan_item_t, active_queue); data = item->data; QUEUE_REMOVE(head); free(item); uv_mutex_unlock(&chan->mutex); return data; }
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { char buf[1024]; ssize_t r; QUEUE queue; QUEUE* q; uv_async_t* h; assert(w == &loop->async_io_watcher); for (;;) { r = read(w->fd, buf, sizeof(buf)); if (r == sizeof(buf)) continue; if (r != -1) break; if (errno == EAGAIN || errno == EWOULDBLOCK) break; if (errno == EINTR) continue; abort(); } QUEUE_MOVE(&loop->async_handles, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_async_t, queue); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&loop->async_handles, q); if (cmpxchgi(&h->pending, 1, 0) == 0) continue; if (h->async_cb == NULL) continue; h->async_cb(h); } }
int loop_start (evLoop *loop, int type){ while (loop->active_handles){ /* closing handles */ QUEUE *q; evHandle *handle; while ( !QUEUE_EMPTY(&loop->closing_queue) ){ q = QUEUE_HEAD(&(loop)->closing_queue); QUEUE_REMOVE(q); handle = QUEUE_DATA(q, evHandle, queue); assert(handle); if (handle->close != NULL){ handle->close(handle); } _free_handle(handle); } loop_update_time(loop); int timeout; if (type == 1){ timeout = 0; } else { timeout = next_timeout(loop); } loop_run_timers(loop); loop_run_immediate(loop); if (QUEUE_EMPTY(&loop->io_queue)) { #ifdef _WIN32 Sleep(timeout); #else usleep(1000 * timeout); #endif } else { io_poll(loop, timeout); } /* run once */ if (type == 1){ break; } } return loop->active_handles > 0; }
void wan_tick() { wan_driver_tick(); // check to see if we have a new message if(wan_queue.count > 0) { queue_header_t *qh; qh = wan_queue.head; wan_msg_t *msg = (wan_msg_t *) QUEUE_DATA(qh); // TODO: Handle Messages // do something with the message // Dequeue the message queue_remove(&wan_queue, (queue_header_t*) msg); } }
static void tls__write_to_tcp(tr_uv_tls_transport_t* tls) { QUEUE* q; char* ptr; size_t len; uv_buf_t buf; tr_uv_wi_t* wi = NULL; tr_uv_tcp_transport_t* tt = (tr_uv_tcp_transport_t*)tls; if (tt->is_writing) return; len = BIO_pending(tls->out); if (len == 0) { assert(QUEUE_EMPTY(&tls->when_tcp_is_writing_queue)); uv_async_send(&tt->write_async); return ; } while(!QUEUE_EMPTY(&tls->when_tcp_is_writing_queue)) { q = QUEUE_HEAD(&tls->when_tcp_is_writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); wi = (tr_uv_wi_t* )QUEUE_DATA(q, tr_uv_wi_t, queue); pc_lib_log(PC_LOG_DEBUG, "tls__write_to_tcp - move wi from when tcp is writing queue to writing queue," " seq_num: %u, req_id: %u", wi->seq_num, wi->req_id); QUEUE_INSERT_TAIL(&tt->writing_queue, q); } BIO_get_mem_data(tls->out, &ptr); buf.base = ptr; buf.len = len; // TODO: error handling tt->write_req.data = tls; uv_write(&tt->write_req, (uv_stream_t* )&tt->socket, &buf, 1, tls__write_done_cb); BIO_reset(tls->out); tt->is_writing = 1; }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!(handle->flags & UV_UDP_PROCESSING)); handle->flags |= UV_UDP_PROCESSING; while (!QUEUE_EMPTY(&handle->write_completed_queue)) { q = QUEUE_HEAD(&handle->write_completed_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count--; if (req->bufs != req->bufsml) uv__free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) req->send_cb(req, 0); else req->send_cb(req, req->status); } if (QUEUE_EMPTY(&handle->write_queue)) { /* Pending queue and completion queue empty, stop watcher. */ uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLOUT); if (!uv__io_active(&handle->io_watcher, UV__POLLIN)) uv__handle_stop(handle); } handle->flags &= ~UV_UDP_PROCESSING; }
static int uv__run_pending(uv_loop_t* loop) { QUEUE* q; QUEUE pq; uv__io_t* w; if (QUEUE_EMPTY(&loop->pending_queue)) return 0; QUEUE_MOVE(&loop->pending_queue, &pq); while (!QUEUE_EMPTY(&pq)) { q = QUEUE_HEAD(&pq); QUEUE_REMOVE(q); QUEUE_INIT(q); w = QUEUE_DATA(q, uv__io_t, pending_queue); w->cb(loop, w, POLLOUT); } return 1; }
int ravaL_cond_broadcast(rava_cond_t* cond) { QUEUE* q; rava_state_t* s; int roused = 0; while (!QUEUE_EMPTY(cond)) { q = QUEUE_HEAD(cond); s = QUEUE_DATA(q, rava_state_t, cond); QUEUE_REMOVE(q); TRACE("READY state %p\n", s); ravaL_state_ready(s); ++roused; } return roused; }
void uv__platform_loop_delete(uv_loop_t* loop) { QUEUE* item; uv__cf_loop_signal_t* s; assert(loop->cf_loop != NULL); uv__cf_loop_signal(loop, NULL, NULL); uv_thread_join(&loop->cf_thread); uv_sem_destroy(&loop->cf_sem); uv_mutex_destroy(&loop->cf_mutex); /* Free any remaining data */ while (!QUEUE_EMPTY(&loop->cf_signals)) { item = QUEUE_HEAD(&loop->cf_signals); s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); QUEUE_REMOVE(item); free(s); } }
void EIO_AfterWrite(uv_work_t* req) { NanScope(); QueuedWrite* queuedWrite = static_cast<QueuedWrite*>(req->data); WriteBaton* data = static_cast<WriteBaton*>(queuedWrite->baton); v8::Handle<v8::Value> argv[2]; if(data->errorString[0]) { argv[0] = v8::Exception::Error(NanNew<v8::String>(data->errorString)); argv[1] = NanUndefined(); } else { argv[0] = NanUndefined(); argv[1] = NanNew<v8::Int32>(data->result); } data->callback->Call(2, argv); if (data->offset < data->bufferLength && !data->errorString[0]) { // We're not done with this baton, so throw it right back onto the queue. // Don't re-push the write in the event loop if there was an error; because same error could occur again! // TODO: Add a uv_poll here for unix... uv_queue_work(uv_default_loop(), req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite); return; } uv_mutex_lock(&write_queue_mutex); QUEUE_REMOVE(&queuedWrite->queue); if (!QUEUE_EMPTY(&write_queue)) { // Always pull the next work item from the head of the queue QUEUE* head = QUEUE_HEAD(&write_queue); QueuedWrite* nextQueuedWrite = QUEUE_DATA(head, QueuedWrite, queue); uv_queue_work(uv_default_loop(), &nextQueuedWrite->req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite); } uv_mutex_unlock(&write_queue_mutex); NanDisposePersistent(data->buffer); delete data->callback; delete data; delete queuedWrite; }
static void uv__udp_sendmsg(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; struct msghdr h; ssize_t size; while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); assert(q != NULL); req = QUEUE_DATA(q, uv_udp_send_t, queue); assert(req != NULL); memset(&h, 0, sizeof h); h.msg_name = &req->addr; h.msg_namelen = (req->addr.ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); h.msg_iov = (struct iovec*) req->bufs; h.msg_iovlen = req->nbufs; do { size = sendmsg(handle->io_watcher.fd, &h, 0); } while (size == -1 && errno == EINTR); if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) break; req->status = (size == -1 ? -errno : size); /* Sending a datagram is an atomic operation: either all data * is written or nothing is (and EMSGSIZE is raised). That is * why we don't handle partial writes. Just pop the request * off the write queue and onto the completed queue, done. */ QUEUE_REMOVE(&req->queue); QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); uv__io_feed(handle->loop, &handle->io_watcher); } }
static void uv__async_event(uv_loop_t* loop, struct uv__async* w, unsigned int nevents) { QUEUE queue; QUEUE* q; uv_async_t* h; QUEUE_MOVE(&loop->async_handles, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_async_t, queue); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&loop->async_handles, q); if (cmpxchgi(&h->pending, 1, 0) == 0) continue; if (h->async_cb == NULL) continue; h->async_cb(h); } }