static void process_req(uv_tcp_t* handle, ssize_t nread, uv_buf_t buf) { write_req_t *wr; dnshandle* dns = (dnshandle*)handle; char hdrbuf[DNSREC_LEN]; int hdrbuf_remaining = DNSREC_LEN; int rec_remaining = 0; int readbuf_remaining; char* dnsreq; char* hdrstart; int usingprev = 0; wr = (write_req_t*) malloc(sizeof *wr); uv_req_init(&wr->req, (uv_handle_t*)handle, after_write); wr->buf.base = (char*)malloc(WRITE_BUF_LEN); wr->buf.len = 0; if (dns->state.prevbuf_ptr != NULL) { dnsreq = dns->state.prevbuf_ptr + dns->state.prevbuf_pos; readbuf_remaining = dns->state.prevbuf_rem; usingprev = 1; } else { dnsreq = buf.base; readbuf_remaining = nread; } hdrstart = dnsreq; while (dnsreq != NULL) { /* something to process */ while (readbuf_remaining > 0) { /* something to process in current buffer */ if (hdrbuf_remaining > 0) { /* process len and id */ if (readbuf_remaining < hdrbuf_remaining) { /* too little to get request header. save for next buffer */ memcpy(&hdrbuf[DNSREC_LEN - hdrbuf_remaining], dnsreq, readbuf_remaining); hdrbuf_remaining = DNSREC_LEN - readbuf_remaining; break; } else { short int reclen_n; /* save header */ memcpy(&hdrbuf[DNSREC_LEN - hdrbuf_remaining], dnsreq, hdrbuf_remaining); dnsreq += hdrbuf_remaining; readbuf_remaining -= hdrbuf_remaining; hdrbuf_remaining = 0; /* get record length */ reclen_n = *((short int*)hdrbuf); rec_remaining = ntohs(reclen_n) - (DNSREC_LEN - 2); } } if (rec_remaining <= readbuf_remaining) { /* prepare reply */ addrsp(wr, hdrbuf); /* move to next record */ dnsreq += rec_remaining; hdrstart = dnsreq; readbuf_remaining -= rec_remaining; rec_remaining = 0; hdrbuf_remaining = DNSREC_LEN; } else { /* otherwise this buffer is done. */ rec_remaining -= readbuf_remaining; break; } } /* if we had to use bytes from prev buffer, start processing the current one */ if (usingprev == 1) { /* free previous buffer */ free(dns->state.prevbuf_ptr); dnsreq = buf.base; readbuf_remaining = nread; usingprev = 0; } else { dnsreq = NULL; } } /* send write buffer */ if (wr->buf.len > 0) { if (uv_write(&wr->req, &wr->buf, 1)) { FATAL("uv_write failed"); } } if (readbuf_remaining > 0) { /* save start of record position, so we can continue on next read */ dns->state.prevbuf_ptr = buf.base; dns->state.prevbuf_pos = hdrstart - buf.base; dns->state.prevbuf_rem = nread - dns->state.prevbuf_pos; } else { /* nothing left in this buffer */ dns->state.prevbuf_ptr = NULL; dns->state.prevbuf_pos = 0; dns->state.prevbuf_rem = 0; free(buf.base); } }
static void on_read(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) { int r; uv_pipe_t* pipe; uv_handle_type pending; uv_buf_t outbuf; pipe = (uv_pipe_t*) handle; if (nread == 0) { /* Everything OK, but nothing read. */ free(buf->base); return; } if (nread < 0) { if (nread == UV_EOF) { free(buf->base); return; } printf("error recving on channel: %s\n", uv_strerror(nread)); abort(); } fprintf(stderr, "got %d bytes\n", (int)nread); pending = uv_pipe_pending_type(pipe); if (!tcp_server_listening) { ASSERT(1 == uv_pipe_pending_count(pipe)); ASSERT(nread > 0 && buf->base && pending != UV_UNKNOWN_HANDLE); read_cb_called++; /* Accept the pending TCP server, and start listening on it. */ ASSERT(pending == UV_TCP); r = uv_tcp_init(uv_default_loop(), &tcp_server); ASSERT(r == 0); r = uv_accept((uv_stream_t*)pipe, (uv_stream_t*)&tcp_server); ASSERT(r == 0); r = uv_listen((uv_stream_t*)&tcp_server, BACKLOG, on_connection); ASSERT(r == 0); tcp_server_listening = 1; /* Make sure that the expected data is correctly multiplexed. */ ASSERT(memcmp("hello\n", buf->base, nread) == 0); outbuf = uv_buf_init("world\n", 6); r = uv_write(&write_req, (uv_stream_t*)pipe, &outbuf, 1, NULL); ASSERT(r == 0); /* Create a bunch of connections to get both servers to accept. */ make_many_connections(); } else if (memcmp("accepted_connection\n", buf->base, nread) == 0) { /* Remote server has accepted a connection. Close the channel. */ ASSERT(0 == uv_pipe_pending_count(pipe)); ASSERT(pending == UV_UNKNOWN_HANDLE); remote_conn_accepted = 1; uv_close((uv_handle_t*)&channel, NULL); } free(buf->base); }
/** * handle queues. this handles all deletions of callbacks as well as setting up the uv requests on each cycle. In normal course of affairs, it will be locked * for operation, as should all the queue vectors for all the various requests */ void UVEventLoop::HandleRunnerQueues() { for (auto bit = resolvers.begin(); bit != resolvers.end();) { auto qp = *bit; if (qp->disposed) { if (qp->bindCB) delete qp->bindCB; delete qp; bit = resolvers.erase(bit); } else if (!qp->resolving) { qp->resolving = true; qp->dnsHints.ai_family = PF_INET; qp->dnsHints.ai_socktype = SOCK_STREAM; qp->dnsHints.ai_protocol = IPPROTO_TCP; qp->dnsHints.ai_flags = 0; qp->resolver.data = qp; UVTCPClient* client = const_cast<UVTCPClient*>(qp->client); if (client->address == nullptr || client->socket == nullptr) { } int r = uv_getaddrinfo( loop, &qp->resolver, OnResolved, qp->host.c_str(), qp->service.c_str(), &qp->dnsHints); if (r<0) { if (qp->bindCB) { (*qp->bindCB)(client->address, kUVBindCallError); } qp->disposed = true; } ++bit; } else { ++bit; } } for (auto rit = readers.begin(); rit != readers.end();) { auto qp = *rit; if (qp->disposed) { //! in the main connection queue, readers, the socket i.e. client has ownership of the read callback (*rit)->cb that should be erased by the close request, which will //! supplant the read functional call back with a close functional if (qp->connectCB) delete qp->connectCB; if (qp->readerCB) delete qp->readerCB; if (qp->closerCB) delete qp->closerCB; delete qp; rit = readers.erase(rit); } else if (qp->closing) { UVTCPClient* client = const_cast<UVTCPClient*>(qp->client); int r = uv_read_stop((uv_stream_t*)client->socket); if (r >= 0) { uv_close((uv_handle_t*)client->socket, OnClose); } qp->closing = false; // only try to close once! ++rit; } else if (!qp->connected) { qp->connected = true; UVTCPClient *client = const_cast<UVTCPClient*>(qp->client); if (client->socket) { if (client->socket->data) { // should already be deleted } uv_tcp_init(loop, client->socket); client->socket->data = qp; qp->request.data = qp; int r=uv_tcp_connect(&qp->request, client->socket, client->address, OnConnect); if (r<0) { if (qp->connectCB) { (*qp->connectCB)(&qp->request, kUVCnxCallError); } } } ++rit; } else { ++rit; } } for (auto writ = writers.begin(); writ != writers.end();) { auto qp=*writ; if (qp->disposed) { if (qp->buf.base) { delete []qp->buf.base; } delete *writ; writ = writers.erase(writ); } else { UVTCPClient* client = const_cast<UVTCPClient*>((*writ)->client); int r=uv_write(&qp->request, (uv_stream_t*)client->socket, &qp->buf, 1, OnWrite); if (r < 0) { qp->disposed = true; } ++writ; } } ; for (auto cit = closers.begin(); cit != closers.end();) { auto qp = *cit; if (qp->disposed) { if (qp->cb) delete qp->cb; delete qp; cit=closers.erase(cit); } else { UVTCPClient* client = const_cast<UVTCPClient*>(qp->client); uv_read_stop((uv_stream_t*)client->socket); if (client->socket->data) { delete static_cast<ReaderCB*>(client->socket->data); } client->socket->data = qp; uv_close((uv_handle_t*)client->socket, OnClose); ++cit; } } // workerLock.lock(); for (auto wit = activeWorkers.begin(); wit != activeWorkers.end();) { auto qp=*wit; if (qp->disposed) { if (qp->queuedCB) delete qp->queuedCB; if (qp->apresCB) delete qp->apresCB; delete qp; wit = activeWorkers.erase(wit); } else { int r = uv_queue_work(loop, &qp->work, OnWork, OnAfterWork); if (r < 0) { OnAfterWork(&qp->work, r); } ++wit; } } // workerLock.unlock(); for (auto tit = scheduledTimers.begin(); tit != scheduledTimers.end();) { auto qp = *tit; if (qp->disposed) { if (qp->tikCB) delete qp->tikCB; delete qp; } else { DEBUG_OUT("UVEventLoop::HandleRunnerQueues() timer started"); uv_timer_init(loop, &qp->timer); uv_timer_start(&qp->timer, OnTick, qp->delayMS, qp->repeatMs); activeTimers.push_back(qp); } tit = scheduledTimers.erase(tit); } for (auto tit = activeTimers.begin(); tit != activeTimers.end();) { auto qp = *tit; if (qp->disposed) { DEBUG_OUT("UVEventLoop::HandleRunnerQueues() timer cleaned up"); if (uv_is_active((uv_handle_t*)&qp->timer)) { DEBUG_OUT("UVEventLoop::HandleRunnerQueues() stopping an active timer"); uv_timer_stop(&qp->timer); ++tit; } else { if (qp->tikCB) delete qp->tikCB; delete qp; tit = activeTimers.erase(tit); } } else { ++tit; } } }
} req->pipe->queueLen -= req->msg->length; Assert_true(req->pipe->queueLen >= 0); Allocator_free(req->alloc); } static uint8_t sendMessage2(struct Pipe_WriteRequest_pvt* req) { struct Pipe_pvt* pipe = req->pipe; struct Message* m = req->msg; uv_buf_t buffers[] = { { .base = (char*)m->bytes, .len = m->length } }; int ret = uv_write(&req->uvReq, (uv_stream_t*) pipe->out, buffers, 1, sendMessageCallback); if (ret) { Log_info(pipe->pub.logger, "Failed writing to pipe [%s] [%s]", pipe->pub.fullName, uv_strerror(ret) ); Allocator_free(req->alloc); return Error_UNDELIVERABLE; } pipe->queueLen += m->length; return Error_NONE; } static uint8_t sendMessage(struct Message* m, struct Interface* iface) { struct Pipe_pvt* pipe = Identity_cast((struct Pipe_pvt*) iface);