static void uv__cf_loop_cb(void* arg) { uv_loop_t* loop; QUEUE* item; QUEUE split_head; uv__cf_loop_signal_t* s; loop = arg; uv_mutex_lock(&loop->cf_mutex); QUEUE_INIT(&split_head); if (!QUEUE_EMPTY(&loop->cf_signals)) { QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals); QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head); } uv_mutex_unlock(&loop->cf_mutex); while (!QUEUE_EMPTY(&split_head)) { item = QUEUE_HEAD(&split_head); s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); /* This was a termination signal */ if (s->cb == NULL) CFRunLoopStop(loop->cf_loop); else s->cb(s->arg); QUEUE_REMOVE(item); free(s); } }
void uv__cf_loop_cb(void* arg) { uv_loop_t* loop; QUEUE* item; QUEUE split_head; uv__cf_loop_signal_t* s; loop = arg; uv_mutex_lock(&loop->cf_mutex); QUEUE_INIT(&split_head); if (!QUEUE_EMPTY(&loop->cf_signals)) { QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals); QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head); } uv_mutex_unlock(&loop->cf_mutex); while (!QUEUE_EMPTY(&split_head)) { item = QUEUE_HEAD(&split_head); s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); s->cb(s->arg); QUEUE_REMOVE(item); free(s); } }
void uv__work_done(uv_async_t* handle) { struct uv__work* w; uv_loop_t* loop; QUEUE* q; QUEUE wq; int err; loop = container_of(handle, uv_loop_t, wq_async); QUEUE_INIT(&wq); // uv_mutex_lock(&loop->wq_mutex); if (!QUEUE_EMPTY(&loop->wq)) { q = QUEUE_HEAD(&loop->wq); QUEUE_SPLIT(&loop->wq, q, &wq); } // uv_mutex_unlock(&loop->wq_mutex); while (!QUEUE_EMPTY(&wq)) { q = QUEUE_HEAD(&wq); QUEUE_REMOVE(q); w = container_of(q, struct uv__work, wq); w->done(w, 0, NULL, 0); } }
void uv__work_done(uv_async_t* handle) { struct uv__work* w; uv_loop_t* loop; QUEUE* q; QUEUE wq; int err; loop = container_of(handle, uv_loop_t, wq_async); QUEUE_INIT(&wq); uv_mutex_lock(&loop->wq_mutex); if (!QUEUE_EMPTY(&loop->wq)) { q = QUEUE_HEAD(&loop->wq); QUEUE_SPLIT(&loop->wq, q, &wq); } uv_mutex_unlock(&loop->wq_mutex); while (!QUEUE_EMPTY(&wq)) { q = QUEUE_HEAD(&wq); QUEUE_REMOVE(q); w = container_of(q, struct uv__work, wq); err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; w->done(w, err); } }
/** * @brief default NPE event processing callback * * @param npeID ID of the NPE that generated the event * @param msg NPE message (encapsulated event) * * Creates an event object on the Ethernet event processor queue * and signals the new event by incrementing the event queue semaphore. * Events are processed by @ref ixEthDBEventProcessorLoop() which runs * at user level. * * @see ixEthDBEventProcessorLoop() * * @warning do not call directly * * @internal */ IX_ETH_DB_PUBLIC void ixEthDBNPEEventCallback(IxNpeMhNpeId npeID, IxNpeMhMessage msg) { PortEvent *local_event; IX_ETH_DB_IRQ_EVENTS_TRACE("DB: (Events) new event received by processor callback from port %d, id 0x%X\n", IX_ETHNPE_NODE_AND_PORT_TO_PHYSICAL_ID(npeID,0), NPE_MSG_ID(msg), 0, 0, 0, 0); if (CAN_ENQUEUE(&eventQueue)) { TEST_FIXTURE_LOCK_EVENT_QUEUE; local_event = QUEUE_HEAD(&eventQueue); /* create event structure on queue */ local_event->eventType = NPE_MSG_ID(msg); local_event->portID = IX_ETHNPE_NODE_AND_PORT_TO_PHYSICAL_ID(npeID,0); /* update queue */ PUSH_UPDATE_QUEUE(&eventQueue); TEST_FIXTURE_UNLOCK_EVENT_QUEUE; IX_ETH_DB_IRQ_EVENTS_TRACE("DB: (Events) Waking up main processor loop...\n", 0, 0, 0, 0, 0, 0); /* increment event queue semaphore */ ixOsalSemaphorePost(&eventQueueSemaphore); } else { IX_ETH_DB_IRQ_EVENTS_TRACE("DB: (Events) Warning: could not enqueue event (overflow)\n", 0, 0, 0, 0, 0, 0); } }
static void uv__write_callbacks(uv_stream_t* stream) { uv_write_t* req; QUEUE* q; while (!QUEUE_EMPTY(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = QUEUE_HEAD(&stream->write_completed_queue); req = QUEUE_DATA(q, uv_write_t, queue); QUEUE_REMOVE(q); uv__req_unregister(stream->loop, req); if (req->bufs != NULL) { stream->write_queue_size -= uv__write_req_size(req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; } /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) req->cb(req, req->error); } assert(QUEUE_EMPTY(&stream->write_completed_queue)); }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); req->status = -ECANCELED; QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); } uv__udp_run_completed(handle); assert(handle->send_queue_size == 0); assert(handle->send_queue_count == 0); /* Now tear down the handle. */ handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); uv__udp_run_completed(handle); while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb != NULL) req->send_cb(req, -ECANCELED); } /* Now tear down the handle. */ handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; while (!QUEUE_EMPTY(&handle->write_completed_queue)) { q = QUEUE_HEAD(&handle->write_completed_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) req->send_cb(req, 0); else req->send_cb(req, req->status); } }
static void uv__inotify_read(uv_loop_t* loop, uv__io_t* dummy, unsigned int events) { const struct uv__inotify_event* e; struct watcher_list* w; uv_fs_event_t* h; QUEUE queue; QUEUE* q; const char* path; ssize_t size; const char *p; /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ char buf[4096]; while (1) { do size = read(loop->inotify_fd, buf, sizeof(buf)); while (size == -1 && errno == EINTR); if (size == -1) { assert(errno == EAGAIN || errno == EWOULDBLOCK); break; } assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ /* Now we have one or more inotify_event structs. */ for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { e = (const struct uv__inotify_event*)p; events = 0; if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY)) events |= UV_CHANGE; if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY)) events |= UV_RENAME; w = find_watcher(loop, e->wd); if (w == NULL) continue; /* Stale event, no watchers left. */ /* inotify does not return the filename when monitoring a single file * for modifications. Repurpose the filename for API compatibility. * I'm not convinced this is a good thing, maybe it should go. */ path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); QUEUE_MOVE(&w->watchers, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_fs_event_t, watchers); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&w->watchers, q); h->cb(h, path, events, 0); } } } }
static Token stack_pop(QUEUE *stack) { QUEUE *h = QUEUE_HEAD(stack); queue_item *item = queue_node_data(h); QUEUE_REMOVE(&item->node); Token token = item->item; free(item); return token; }
static void uv__udp_run_pending(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; struct msghdr h; ssize_t size; while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); assert(q != NULL); req = QUEUE_DATA(q, uv_udp_send_t, queue); assert(req != NULL); memset(&h, 0, sizeof h); h.msg_name = &req->addr; h.msg_namelen = (req->addr.sin6_family == AF_INET6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); h.msg_iov = (struct iovec*)req->bufs; h.msg_iovlen = req->bufcnt; do { size = sendmsg(handle->io_watcher.fd, &h, 0); } while (size == -1 && errno == EINTR); /* TODO try to write once or twice more in the * hope that the socket becomes readable again? */ if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) break; req->status = (size == -1 ? -errno : size); #ifndef NDEBUG /* Sanity check. */ if (size != -1) { ssize_t nbytes; int i; for (nbytes = i = 0; i < req->bufcnt; i++) nbytes += req->bufs[i].len; assert(size == nbytes); } #endif /* Sending a datagram is an atomic operation: either all data * is written or nothing is (and EMSGSIZE is raised). That is * why we don't handle partial writes. Just pop the request * off the write queue and onto the completed queue, done. */ QUEUE_REMOVE(&req->queue); QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); } }
static void loop_run_immediate(evLoop *loop) { QUEUE *q; evHandle *handle; while ( !QUEUE_EMPTY(&loop->handle_queue) ){ q = QUEUE_HEAD(&(loop)->handle_queue); QUEUE_REMOVE(q); handle = QUEUE_DATA(q, evHandle, queue); assert(handle); handle->cb(handle); } }
static Event queue_pop(Queue *queue) { QUEUE *h = QUEUE_HEAD(&queue->headtail); queue_item *item = queue_node_data(h); QUEUE_REMOVE(&item->node); Event e; e = item->item; free(item); return e; }
void uv_chan_clear(uv_chan_t *chan) { uv_mutex_lock(&chan->mutex); uv__chan_item_t *item = NULL; QUEUE *head = NULL; while (!QUEUE_EMPTY(&chan->q)) { head = QUEUE_HEAD(&chan->q); item = QUEUE_DATA(head, uv__chan_item_t, active_queue); QUEUE_REMOVE(head); free(item); } uv_mutex_unlock(&chan->mutex); }
void tls__write_done_cb(uv_write_t* w, int status) { tr_uv_wi_t* wi = NULL; int i; QUEUE* q; GET_TLS(w); tt->is_writing = 0; if (status) { pc_lib_log(PC_LOG_ERROR, "tcp__write_done_cb - uv_write callback error: %s", uv_strerror(status)); } status = status ? PC_RC_ERROR : PC_RC_OK; pc_mutex_lock(&tt->wq_mutex); while(!QUEUE_EMPTY(&tt->writing_queue)) { q = QUEUE_HEAD(&tt->writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); wi = (tr_uv_wi_t* )QUEUE_DATA(q, tr_uv_wi_t, queue); if (!status && TR_UV_WI_IS_RESP(wi->type)) { pc_lib_log(PC_LOG_DEBUG, "tls__write_to_tcp - move wi from writing queue to resp pending queue," " seq_num: %u, req_id: %u", wi->seq_num, wi->req_id); QUEUE_INSERT_TAIL(&tt->resp_pending_queue, q); continue; }; pc_lib_free(wi->buf.base); wi->buf.base = NULL; wi->buf.len = 0; if (TR_UV_WI_IS_NOTIFY(wi->type)) { pc_trans_sent(tt->client, wi->seq_num, status); } if (TR_UV_WI_IS_RESP(wi->type)) { pc_trans_resp(tt->client, wi->req_id, status, NULL); } // if internal, do nothing here. if (PC_IS_PRE_ALLOC(wi->type)) { PC_PRE_ALLOC_SET_IDLE(wi->type); } else { pc_lib_free(wi); } } pc_mutex_unlock(&tt->wq_mutex); tls__write_to_tcp(tls); }
static void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { uv_write_t* req; QUEUE* q; while (!QUEUE_EMPTY(&stream->write_queue)) { q = QUEUE_HEAD(&stream->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_write_t, queue); req->error = error; QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); } }
static void uv__run_pending(uv_loop_t* loop) { QUEUE* q; uv__io_t* w; while (!QUEUE_EMPTY(&loop->pending_queue)) { q = QUEUE_HEAD(&loop->pending_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); w = QUEUE_DATA(q, uv__io_t, pending_queue); w->cb(loop, w, UV__POLLOUT); } }
static int uv__run_pending(uv_loop_t* loop) { QUEUE* q; QUEUE pq; uv__io_t* w; if (QUEUE_EMPTY(&loop->pending_queue)) return 0; QUEUE_INIT(&pq); q = QUEUE_HEAD(&loop->pending_queue); QUEUE_SPLIT(&loop->pending_queue, q, &pq); while (!QUEUE_EMPTY(&pq)) { q = QUEUE_HEAD(&pq); QUEUE_REMOVE(q); QUEUE_INIT(q); w = QUEUE_DATA(q, uv__io_t, pending_queue); w->cb(loop, w, UV__POLLOUT); } return 1; }
void prio_sched_for_all_threads(struct sched_t* sched, void(*func)(thread_t*)) { int i; for (i = 0; i < LOWEST_PRIO; ++i) { node_t* node = QUEUE_HEAD(SCHED_QUEUE(sched)[i]); while (node != NULL) { func(NODE_DATA(node)); node = NODE_NEXT(node); } } }
static void ref_pop(QUEUE *refs) { QUEUE *h = QUEUE_HEAD(refs); queue_ref_item *item = QUEUE_DATA(h, queue_ref_item, node); QUEUE_REMOVE(&item->node); void *ref = item->item.ref; if (item->item.v_type == VAR_LIST) { List *l = ref; utarray_free(l->items); } free(item); free(ref); }
static int _worker_dispatch_cb (evHandle *handle){ comoWorker *worker = handle->data; duk_context *ctx = worker->Mainctx; mtx_lock(&worker->mtx); QUEUE *q; while ( !QUEUE_EMPTY(&worker->queueOut) ){ q = QUEUE_HEAD(&(worker)->queueOut); QUEUE_REMOVE(q); comoQueue *queue = QUEUE_DATA(q, comoQueue, queue); if (worker->destroy != 0){ goto FREE; } duk_push_heapptr(ctx, worker->self); if (duk_get_type(ctx, -1) != DUK_TYPE_OBJECT){ dump_stack(ctx, "DUK"); assert(0); } como_push_worker_value(ctx, queue); duk_call(ctx, 1); duk_pop(ctx); FREE : /* free except in case of pointers */ if (queue->data != NULL && queue->type != DUK_TYPE_POINTER){ free(queue->data); } free(queue); } mtx_unlock(&worker->mtx); if (worker->destroy == 2){ duk_push_global_stash(ctx); duk_get_prop_string(ctx, -1, "comoWorkersCallBack"); duk_push_number(ctx, (double) handle->id); duk_del_prop(ctx, -2); handle_close(handle); free(worker); } return 0; }
/* write handler */ int conn_write_handler(CONN *conn) { int ret = -1, n = 0; CONN_CHECK_RET(conn, ret); CHUNK *cp = NULL; if(conn && conn->send_queue && QTOTAL(conn->send_queue) > 0) { DEBUG_LOGGER(conn->logger, "Ready for send data to %s:%d via %d " "qtotal:%d qhead:%d qcount:%d", conn->ip, conn->port, conn->fd, QTOTAL(conn->send_queue), QHEAD(conn->send_queue), QCOUNT(conn->send_queue)); if(QUEUE_HEAD(conn->send_queue, PCHUNK, &cp) == 0) { DEBUG_LOGGER(conn->logger, "Ready for send data to %s:%d via %d qtotal:%d pcp:%08x", conn->ip, conn->port, conn->fd, QTOTAL(conn->send_queue), cp); if((n = CHUNK_WRITE(cp, conn->fd)) > 0) { conn->sent_data_total += n; DEBUG_LOGGER(conn->logger, "Sent %d byte(s) (total sent %lld) " "to %s:%d via %d leave %lld", n, conn->sent_data_total, conn->ip, conn->port, conn->fd, CK_LEFT(cp)); /* CONN TIMER sample */ TIMER_SAMPLE(conn->timer); if(CHUNK_STATUS(cp) == CHUNK_STATUS_OVER ) { if(QUEUE_POP(conn->send_queue, PCHUNK, &cp) == 0) { DEBUG_LOGGER(conn->logger, "Completed chunk[%08x] and clean it leave %d", cp, QTOTAL(conn->send_queue)); CK_CLEAN(cp); } } ret = 0; } else { FATAL_LOGGER(conn->logger, "Sending data to %s:%d via %d failed, %s", conn->ip, conn->port, conn->fd, strerror(errno)); /* Terminate connection */ CONN_TERMINATE(conn); } } if(QTOTAL(conn->send_queue) <= 0) { conn->event->del(conn->event, E_WRITE); } } return ret; }
void loop_run_closing_handles(evLoop *loop){ QUEUE *q; evHandle *handle; while ( !QUEUE_EMPTY(&loop->closing_queue) ){ q = QUEUE_HEAD(&(loop)->closing_queue); QUEUE_REMOVE(q); handle = QUEUE_DATA(q, evHandle, queue); assert(handle); if (handle->close != NULL){ handle->close(handle); } // _free_handle(handle); } }
void tls__reset(tr_uv_tcp_transport_t* tt) { int ret; QUEUE* q; tr_uv_tls_transport_t* tls = (tr_uv_tls_transport_t* )tt; pc_lib_log(PC_LOG_DEBUG, "tls__reset - reset ssl"); if (!SSL_clear(tls->tls)) { pc_lib_log(PC_LOG_WARN, "tls__reset - ssl clear error: %s", ERR_error_string(ERR_get_error(), NULL)); } ret = BIO_reset(tls->in); assert(ret == 1); ret = BIO_reset(tls->out); assert(ret == 1); // write should retry remained, insert it to writing queue // then tcp__reset will recycle it. if (tls->should_retry) { pc_lib_log(PC_LOG_DEBUG, "tls__reset - move should retry wi to writing queue, seq_num: %u, req_id: %u", tls->should_retry->seq_num, tls->should_retry->req_id); QUEUE_INIT(&tls->should_retry->queue); QUEUE_INSERT_TAIL(&tt->writing_queue, &tls->should_retry->queue); tls->should_retry = NULL; } if (tls->retry_wb) { pc_lib_free(tls->retry_wb); tls->retry_wb = NULL; tls->retry_wb_len = 0; } // tcp reset will recycle following write item while(!QUEUE_EMPTY(&tls->when_tcp_is_writing_queue)) { q = QUEUE_HEAD(&tls->when_tcp_is_writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); QUEUE_INSERT_TAIL(&tt->writing_queue, q); } tcp__reset(tt); }
void *uv_chan_receive(uv_chan_t *chan) { uv__chan_item_t *item; QUEUE *head; void *data = NULL; uv_mutex_lock(&chan->mutex); while (QUEUE_EMPTY(&chan->q)) { uv_cond_wait(&chan->cond, &chan->mutex); } head = QUEUE_HEAD(&chan->q); item = QUEUE_DATA(head, uv__chan_item_t, active_queue); data = item->data; QUEUE_REMOVE(head); free(item); uv_mutex_unlock(&chan->mutex); return data; }
int ravaL_cond_signal(rava_cond_t* cond) { QUEUE* q; rava_state_t* s; if (!QUEUE_EMPTY(cond)) { q = QUEUE_HEAD(cond); s = QUEUE_DATA(q, rava_state_t, cond); QUEUE_REMOVE(q); TRACE("READY state %p\n", s); ravaL_state_ready(s); return 1; } return 0; }
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { char buf[1024]; ssize_t r; QUEUE queue; QUEUE* q; uv_async_t* h; assert(w == &loop->async_io_watcher); for (;;) { r = read(w->fd, buf, sizeof(buf)); if (r == sizeof(buf)) continue; if (r != -1) break; if (errno == EAGAIN || errno == EWOULDBLOCK) break; if (errno == EINTR) continue; abort(); } QUEUE_MOVE(&loop->async_handles, &queue); while (!QUEUE_EMPTY(&queue)) { q = QUEUE_HEAD(&queue); h = QUEUE_DATA(q, uv_async_t, queue); QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&loop->async_handles, q); if (cmpxchgi(&h->pending, 1, 0) == 0) continue; if (h->async_cb == NULL) continue; h->async_cb(h); } }
int loop_start (evLoop *loop, int type){ while (loop->active_handles){ /* closing handles */ QUEUE *q; evHandle *handle; while ( !QUEUE_EMPTY(&loop->closing_queue) ){ q = QUEUE_HEAD(&(loop)->closing_queue); QUEUE_REMOVE(q); handle = QUEUE_DATA(q, evHandle, queue); assert(handle); if (handle->close != NULL){ handle->close(handle); } _free_handle(handle); } loop_update_time(loop); int timeout; if (type == 1){ timeout = 0; } else { timeout = next_timeout(loop); } loop_run_timers(loop); loop_run_immediate(loop); if (QUEUE_EMPTY(&loop->io_queue)) { #ifdef _WIN32 Sleep(timeout); #else usleep(1000 * timeout); #endif } else { io_poll(loop, timeout); } /* run once */ if (type == 1){ break; } } return loop->active_handles > 0; }
static void tls__write_to_tcp(tr_uv_tls_transport_t* tls) { QUEUE* q; char* ptr; size_t len; uv_buf_t buf; tr_uv_wi_t* wi = NULL; tr_uv_tcp_transport_t* tt = (tr_uv_tcp_transport_t*)tls; if (tt->is_writing) return; len = BIO_pending(tls->out); if (len == 0) { assert(QUEUE_EMPTY(&tls->when_tcp_is_writing_queue)); uv_async_send(&tt->write_async); return ; } while(!QUEUE_EMPTY(&tls->when_tcp_is_writing_queue)) { q = QUEUE_HEAD(&tls->when_tcp_is_writing_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); wi = (tr_uv_wi_t* )QUEUE_DATA(q, tr_uv_wi_t, queue); pc_lib_log(PC_LOG_DEBUG, "tls__write_to_tcp - move wi from when tcp is writing queue to writing queue," " seq_num: %u, req_id: %u", wi->seq_num, wi->req_id); QUEUE_INSERT_TAIL(&tt->writing_queue, q); } BIO_get_mem_data(tls->out, &ptr); buf.base = ptr; buf.len = len; // TODO: error handling tt->write_req.data = tls; uv_write(&tt->write_req, (uv_stream_t* )&tt->socket, &buf, 1, tls__write_done_cb); BIO_reset(tls->out); tt->is_writing = 1; }