void tls__reset(tr_uv_tcp_transport_t* tt) { int ret; QUEUE* q; tr_uv_tls_transport_t* tls = (tr_uv_tls_transport_t* )tt; pc_lib_log(PC_LOG_DEBUG, "tls__reset - reset ssl"); if (!SSL_clear(tls->tls)) { pc_lib_log(PC_LOG_WARN, "tls__reset - ssl clear error: %s", ERR_error_string(ERR_get_error(), NULL)); } ret = BIO_reset(tls->in); assert(ret == 1); ret = BIO_reset(tls->out); assert(ret == 1); // write should retry remained, insert it to writing queue // then tcp__reset will recycle it. if (tls->should_retry) { pc_lib_log(PC_LOG_DEBUG, "tls__reset - move should retry wi to writing queue, seq_num: %u, req_id: %u", tls->should_retry->seq_num, tls->should_retry->req_id); QUEUE_INIT(&tls->should_retry->queue); QUEUE_INSERT_TAIL(&tt->writing_queue, &tls->should_retry->queue); tls->should_retry = NULL; } if (tls->retry_wb) { pc_lib_free(tls->retry_wb); tls->retry_wb = NULL; tls->retry_wb_len = 0; } // tcp reset will recycle following write item while(!QUEUE_EMPTY(&tls->when_tcp_is_writing_queue)) { q = QUEUE_HEAD(&tls->when_tcp_is_writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); QUEUE_INSERT_TAIL(&tt->writing_queue, q); } tcp__reset(tt); }
/*----------------------------------------------------------------------------*/ P_CMD_INFO_T cmdBufAllocateCmdInfo(IN P_ADAPTER_T prAdapter, IN UINT_32 u4Length) { P_CMD_INFO_T prCmdInfo; KAL_SPIN_LOCK_DECLARATION(); DEBUGFUNC("cmdBufAllocateCmdInfo"); ASSERT(prAdapter); KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_CMD_RESOURCE); QUEUE_REMOVE_HEAD(&prAdapter->rFreeCmdList, prCmdInfo, P_CMD_INFO_T); KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_CMD_RESOURCE); if (prCmdInfo) { /* Setup initial value in CMD_INFO_T */ /* Start address of allocated memory */ prCmdInfo->pucInfoBuffer = cnmMemAlloc(prAdapter, RAM_TYPE_BUF, u4Length); if (prCmdInfo->pucInfoBuffer == NULL) { KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_CMD_RESOURCE); QUEUE_INSERT_TAIL(&prAdapter->rFreeCmdList, &prCmdInfo->rQueEntry); KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_CMD_RESOURCE); prCmdInfo = NULL; } else { prCmdInfo->u2InfoBufLen = 0; prCmdInfo->fgIsOid = FALSE; prCmdInfo->fgDriverDomainMCR = FALSE; } } return prCmdInfo; } /* end of cmdBufAllocateCmdInfo() */
/*----------------------------------------------------------------------------*/ VOID cnmMgtPktFree ( P_ADAPTER_T prAdapter, P_MSDU_INFO_T prMsduInfo ) { P_QUE_T prQueList; KAL_SPIN_LOCK_DECLARATION(); ASSERT(prAdapter); ASSERT(prMsduInfo); prQueList = &prAdapter->rTxCtrl.rFreeMsduInfoList; ASSERT(prMsduInfo->prPacket); if (prMsduInfo->prPacket) { cnmMemFree(prAdapter, prMsduInfo->prPacket); prMsduInfo->prPacket = NULL; } KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST); //added by zhaoyun.wu for 438323,438318,438313 mtk_patch 2015.7.24 begin prMsduInfo->fgIsBasicRate = FALSE; //add this line //added by zhaoyun.wu for 438323,438318,438313 mtk_patch 2015.7.24 end QUEUE_INSERT_TAIL(prQueList, &prMsduInfo->rQueEntry) KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST); }
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); assert(0 != events); assert(w->fd >= 0); assert(w->fd < INT_MAX); w->pevents |= events; maybe_resize(loop, w->fd + 1); #if !defined(__sun) /* The event ports backend needs to rearm all file descriptors on each and * every tick of the event loop but the other backends allow us to * short-circuit here if the event mask is unchanged. */ if (w->events == w->pevents) { if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) { QUEUE_REMOVE(&w->watcher_queue); QUEUE_INIT(&w->watcher_queue); } return; } #endif if (QUEUE_EMPTY(&w->watcher_queue)) QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); if (loop->watchers[w->fd] == NULL) { loop->watchers[w->fd] = w; loop->nfds++; } }
int ravaL_cond_wait(rava_cond_t* cond, rava_state_t* curr) { QUEUE_INSERT_TAIL(cond, &curr->cond); TRACE("SUSPEND state %p\n", curr); return ravaL_state_suspend(curr); }
static void post(QUEUE* q) { if (initialized == 0) return; uv_mutex_lock(&mutex); QUEUE_INSERT_TAIL(&wq, q); uv_cond_signal(&cond); uv_mutex_unlock(&mutex); }
static void post(QUEUE* q) { uv_mutex_lock(&mutex); QUEUE_INSERT_TAIL(&wq, q); if (idle_threads > 0) uv_cond_signal(&cond); uv_mutex_unlock(&mutex); }
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); assert(0 != events); if (w->fd == -1) return; assert(w->fd >= 0); /* Happens when uv__io_stop() is called on a handle that was never started. */ if ((unsigned) w->fd >= loop->nwatchers) return; w->pevents &= ~events; if (w->pevents == 0) { QUEUE_REMOVE(&w->watcher_queue); QUEUE_INIT(&w->watcher_queue); if (loop->watchers[w->fd] != NULL) { assert(loop->watchers[w->fd] == w); assert(loop->nfds > 0); loop->watchers[w->fd] = NULL; loop->nfds--; w->events = 0; } } else if (QUEUE_EMPTY(&w->watcher_queue)) QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); }
void queue_push(Queue *queue, Event event) { queue_item *item = malloc(sizeof(queue_item)); item->item = event; QUEUE_INIT(&item->node); QUEUE_INSERT_TAIL(&queue->headtail, &item->node); }
evt_tls_t *getSSL(evt_ctx_t *d_eng) { evt_tls_t *con = malloc(sizeof(evt_tls_t)); if ( !con ) { return NULL; } memset( con, 0, sizeof *con); SSL *ssl = SSL_new(d_eng->ctx); if ( !ssl ) { return NULL; } con->ssl = ssl; //use default buf size for now. BIO_new_bio_pair(&(con->ssl_bio_), 0, &(con->app_bio_), 0); SSL_set_bio(con->ssl, con->ssl_bio_, con->ssl_bio_); QUEUE_INIT(&(con->q)); QUEUE_INSERT_TAIL(&(d_eng->live_con), &(con->q)); con->writer = d_eng->writer; return con; }
int uv_loop_fork(uv_loop_t* loop) { int err; unsigned int i; uv__io_t* w; err = uv__io_fork(loop); if (err) return err; err = uv__async_fork(loop); if (err) return err; err = uv__signal_loop_fork(loop); if (err) return err; /* Rearm all the watchers that aren't re-queued by the above. */ for (i = 0; i < loop->nwatchers; i++) { w = loop->watchers[i]; if (w == NULL) continue; if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) { w->events = 0; /* Force re-registration in uv__io_poll. */ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); } } return 0; }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); req->status = -ECANCELED; QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); } uv__udp_run_completed(handle); assert(handle->send_queue_size == 0); assert(handle->send_queue_count == 0); /* Now tear down the handle. */ handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
static int rava_fiber_join(lua_State* L) { rava_fiber_t* self = (rava_fiber_t*)luaL_checkudata(L, 1, RAVA_PROCESS_FIBER); rava_state_t* curr = (rava_state_t*)ravaL_state_self(L); TRACE("joining fiber[%p], from [%p]\n", self, curr); assert((rava_state_t*)self != curr); if (self->flags & RAVA_STATE_DEAD) { /* seen join after termination */ TRACE("join after termination\n"); return ravaL_state_xcopy((rava_state_t*)self, curr); } QUEUE_INSERT_TAIL(&self->rouse, &curr->join); ravaL_fiber_ready(self); TRACE("calling ravaL_state_suspend on %p\n", curr); if (curr->type == RAVA_STATE_TYPE_FIBER) { return ravaL_state_suspend(curr); } else { ravaL_state_suspend(curr); return ravaL_state_xcopy((rava_state_t*)self, curr); } }
/*----------------------------------------------------------------------------*/ VOID cmdBufFreeCmdInfo ( IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo ) { KAL_SPIN_LOCK_DECLARATION(); DEBUGFUNC("cmdBufFreeCmdInfo"); ASSERT(prAdapter); ASSERT(prCmdInfo); if (prCmdInfo) { if (prCmdInfo->pucInfoBuffer) { cnmMemFree(prAdapter, prCmdInfo->pucInfoBuffer); prCmdInfo->pucInfoBuffer = NULL; } KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_CMD_RESOURCE); QUEUE_INSERT_TAIL(&prAdapter->rFreeCmdList, &prCmdInfo->rQueEntry); KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_CMD_RESOURCE); } return; } /* end of cmdBufFreeCmdPacket() */
static void uv__inotify_read(uv_loop_t* loop, uv__io_t* dummy, unsigned int events) { const struct uv__inotify_event* e; struct watcher_list* w; uv_fs_event_t* h; QUEUE queue; QUEUE* q; const char* path; ssize_t size; const char *p; /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ char buf[4096]; while (1) { do size = read(loop->inotify_fd, buf, sizeof(buf)); while (size == -1 && errno == EINTR); if (size == -1) { assert(errno == EAGAIN || errno == EWOULDBLOCK); break; } assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ /* Now we have one or more inotify_event structs. */ for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { e = (const struct uv__inotify_event*)p; events = 0; if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY)) events |= UV_CHANGE; if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY)) events |= UV_RENAME; w = find_watcher(loop, e->wd); if (w == NULL) continue; /* Stale event, no watchers left. */ /* inotify does not return the filename when monitoring a single file * for modifications. Repurpose the filename for API compatibility. * I'm not convinced this is a good thing, maybe it should go. */ path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); QUEUE_MOVE(&w->watchers, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_fs_event_t, watchers); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&w->watchers, q); h->cb(h, path, events, 0); } } } }
int timer_close (evHandle *handle) { handle->flags |= HANDLE_CLOSING; timer_stop(handle); QUEUE_REMOVE(&handle->queue); QUEUE_INSERT_TAIL(&handle->loop->closing_queue, &handle->queue); return 0; }
void uv_chan_send(uv_chan_t *chan, void *data) { uv__chan_item_t *item = (uv__chan_item_t *)malloc(sizeof(uv__chan_item_t)); item->data = data; uv_mutex_lock(&chan->mutex); QUEUE_INSERT_TAIL(&chan->q, &item->active_queue); uv_cond_signal(&chan->cond); uv_mutex_unlock(&chan->mutex); }
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC); handle->async_sent = 0; handle->async_cb = async_cb; QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); uv__handle_start(handle); return 0; }
static void uv__udp_run_pending(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; struct msghdr h; ssize_t size; while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); assert(q != NULL); req = QUEUE_DATA(q, uv_udp_send_t, queue); assert(req != NULL); memset(&h, 0, sizeof h); h.msg_name = &req->addr; h.msg_namelen = (req->addr.sin6_family == AF_INET6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); h.msg_iov = (struct iovec*)req->bufs; h.msg_iovlen = req->bufcnt; do { size = sendmsg(handle->io_watcher.fd, &h, 0); } while (size == -1 && errno == EINTR); /* TODO try to write once or twice more in the * hope that the socket becomes readable again? */ if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) break; req->status = (size == -1 ? -errno : size); #ifndef NDEBUG /* Sanity check. */ if (size != -1) { ssize_t nbytes; int i; for (nbytes = i = 0; i < req->bufcnt; i++) nbytes += req->bufs[i].len; assert(size == nbytes); } #endif /* Sending a datagram is an atomic operation: either all data * is written or nothing is (and EMSGSIZE is raised). That is * why we don't handle partial writes. Just pop the request * off the write queue and onto the completed queue, done. */ QUEUE_REMOVE(&req->queue); QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); } }
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags) { struct watcher_list* w; int events; int err; int wd; if (uv__is_active(handle)) return -EINVAL; err = init_inotify(handle->loop); if (err) return err; events = UV__IN_ATTRIB | UV__IN_CREATE | UV__IN_MODIFY | UV__IN_DELETE | UV__IN_DELETE_SELF | UV__IN_MOVE_SELF | UV__IN_MOVED_FROM | UV__IN_MOVED_TO; wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events); if (wd == -1) return -errno; w = find_watcher(handle->loop, wd); if (w) goto no_insert; w = uv__malloc(sizeof(*w) + strlen(path) + 1); if (w == NULL) return -ENOMEM; w->wd = wd; w->path = strcpy((char*)(w + 1), path); QUEUE_INIT(&w->watchers); w->iterating = 0; RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w); no_insert: uv__handle_start(handle); QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); handle->path = w->path; handle->cb = cb; handle->wd = wd; return 0; }
void tls__write_done_cb(uv_write_t* w, int status) { tr_uv_wi_t* wi = NULL; int i; QUEUE* q; GET_TLS(w); tt->is_writing = 0; if (status) { pc_lib_log(PC_LOG_ERROR, "tcp__write_done_cb - uv_write callback error: %s", uv_strerror(status)); } status = status ? PC_RC_ERROR : PC_RC_OK; pc_mutex_lock(&tt->wq_mutex); while(!QUEUE_EMPTY(&tt->writing_queue)) { q = QUEUE_HEAD(&tt->writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); wi = (tr_uv_wi_t* )QUEUE_DATA(q, tr_uv_wi_t, queue); if (!status && TR_UV_WI_IS_RESP(wi->type)) { pc_lib_log(PC_LOG_DEBUG, "tls__write_to_tcp - move wi from writing queue to resp pending queue," " seq_num: %u, req_id: %u", wi->seq_num, wi->req_id); QUEUE_INSERT_TAIL(&tt->resp_pending_queue, q); continue; }; pc_lib_free(wi->buf.base); wi->buf.base = NULL; wi->buf.len = 0; if (TR_UV_WI_IS_NOTIFY(wi->type)) { pc_trans_sent(tt->client, wi->seq_num, status); } if (TR_UV_WI_IS_RESP(wi->type)) { pc_trans_resp(tt->client, wi->req_id, status, NULL); } // if internal, do nothing here. if (PC_IS_PRE_ALLOC(wi->type)) { PC_PRE_ALLOC_SET_IDLE(wi->type); } else { pc_lib_free(wi); } } pc_mutex_unlock(&tt->wq_mutex); tls__write_to_tcp(tls); }
static void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { uv_write_t* req; QUEUE* q; while (!QUEUE_EMPTY(&stream->write_queue)) { q = QUEUE_HEAD(&stream->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_write_t, queue); req->error = error; QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); } }
int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb send_cb) { int err; int empty_queue; assert(nbufs > 0); err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); if (err) return err; /* It's legal for send_queue_count > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up send_queue_size/count later. */ empty_queue = (handle->send_queue_count == 0); uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->nbufs = nbufs; req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count++; QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); uv__handle_start(handle); if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) { uv__udp_sendmsg(handle); } else { uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); } return 0; }
void ravaL_thread_enqueue(rava_thread_t* self, rava_fiber_t* fiber) { int need_async = QUEUE_EMPTY(&self->rouse); QUEUE_INSERT_TAIL(&self->rouse, &fiber->queue); if (need_async) { TRACE("need async\n"); /* interrupt the event loop (the sequence of these two calls matters) */ uv_async_send(&self->async); /* make sure we loop at least once more */ uv_ref((uv_handle_t*)&self->async); } }
/*----------------------------------------------------------------------------*/ VOID cmdBufInitialize(IN P_ADAPTER_T prAdapter) { P_CMD_INFO_T prCmdInfo; UINT_32 i; ASSERT(prAdapter); QUEUE_INITIALIZE(&prAdapter->rFreeCmdList); for (i = 0; i < CFG_TX_MAX_CMD_PKT_NUM; i++) { prCmdInfo = &prAdapter->arHifCmdDesc[i]; QUEUE_INSERT_TAIL(&prAdapter->rFreeCmdList, &prCmdInfo->rQueEntry); } } /* end of cmdBufInitialize() */
/*----------------------------------------------------------------------------*/ P_MSDU_INFO_T cnmMgtPktAlloc(P_ADAPTER_T prAdapter, UINT_32 u4Length) { P_MSDU_INFO_T prMsduInfo; P_QUE_T prQueList; KAL_SPIN_LOCK_DECLARATION(); ASSERT(prAdapter); prQueList = &prAdapter->rTxCtrl.rFreeMsduInfoList; /* Get a free MSDU_INFO_T */ KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST); QUEUE_REMOVE_HEAD(prQueList, prMsduInfo, P_MSDU_INFO_T); KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST); if (prMsduInfo) { prMsduInfo->prPacket = cnmMemAlloc(prAdapter, RAM_TYPE_BUF, u4Length); prMsduInfo->eSrc = TX_PACKET_MGMT; if (prMsduInfo->prPacket == NULL) { KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST); QUEUE_INSERT_TAIL(prQueList, &prMsduInfo->rQueEntry); KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST); prMsduInfo = NULL; } } #if DBG if (prMsduInfo == NULL) { DBGLOG(MEM, WARN, ("\n")); DBGLOG(MEM, WARN, ("MgtDesc#=%ld\n", prQueList->u4NumElem)); #if CFG_DBG_MGT_BUF DBGLOG(MEM, WARN, ("rMgtBufInfo: alloc#=%ld, free#=%ld, null#=%ld\n", prAdapter->rMgtBufInfo.u4AllocCount, prAdapter->rMgtBufInfo.u4FreeCount, prAdapter->rMgtBufInfo.u4AllocNullCount)); #endif DBGLOG(MEM, WARN, ("\n")); } #endif return prMsduInfo; }
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { int err; err = uv__async_start(loop); if (err) return err; uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC); handle->async_cb = async_cb; handle->pending = 0; QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); uv__handle_start(handle); return 0; }
void handle_close (evHandle *h){ if ((h->flags & HANDLE_CLOSING) != 0) return; if (h->type == EV_IO){ io_close(h); } else if (h->type == EV_TIMER){ timer_close(h); } else { handle_stop(h); h->flags |= HANDLE_CLOSING; QUEUE_REMOVE(&h->queue); QUEUE_INSERT_TAIL(&h->loop->closing_queue, &h->queue); //closing un initaited handle //assert(0 && "CLOSING UNKNOWN HANDLE\n"); } }
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { char buf[1024]; ssize_t r; QUEUE queue; QUEUE* q; uv_async_t* h; assert(w == &loop->async_io_watcher); for (;;) { r = read(w->fd, buf, sizeof(buf)); if (r == sizeof(buf)) continue; if (r != -1) break; if (errno == EAGAIN || errno == EWOULDBLOCK) break; if (errno == EINTR) continue; abort(); } QUEUE_MOVE(&loop->async_handles, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_async_t, queue); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&loop->async_handles, q); if (cmpxchgi(&h->pending, 1, 0) == 0) continue; if (h->async_cb == NULL) continue; h->async_cb(h); } }
void uv__cf_loop_signal(uv_loop_t* loop, cf_loop_signal_cb cb, void* arg) { uv__cf_loop_signal_t* item; item = malloc(sizeof(*item)); /* XXX: Fail */ if (item == NULL) abort(); item->arg = arg; item->cb = cb; uv_mutex_lock(&loop->cf_mutex); QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member); uv_mutex_unlock(&loop->cf_mutex); assert(loop->cf_loop != NULL); CFRunLoopSourceSignal(loop->cf_cb); CFRunLoopWakeUp(loop->cf_loop); }