static uv_buf_t on_alloc(uv_handle_t* handle, size_t suggested_size) { return uv_buf_init(output + output_used, OUTPUT_SIZE - output_used); }
void Socket::BeginWrite(const string& s) { uv_buf_t buf = uv_buf_init((char*) malloc(s.length()), s.length()); s.copy(buf.base,s.length()); BeginWrite(buf); }
static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) { uv_read_t* req; uv_buf_t buf; int result; DWORD bytes, flags; assert(handle->flags & UV_HANDLE_READING); assert(!(handle->flags & UV_HANDLE_READ_PENDING)); req = &handle->read_req; memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); /* * Preallocate a read buffer if the number of active streams is below * the threshold. */ if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) { handle->flags &= ~UV_HANDLE_ZERO_READ; handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0); handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer); if (handle->tcp.conn.read_buffer.base == NULL || handle->tcp.conn.read_buffer.len == 0) { handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer); return; } assert(handle->tcp.conn.read_buffer.base != NULL); buf = handle->tcp.conn.read_buffer; } else { handle->flags |= UV_HANDLE_ZERO_READ; buf.base = (char*) &uv_zero_; buf.len = 0; } /* Prepare the overlapped structure. */ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { assert(req->event_handle); req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); } flags = 0; result = WSARecv(handle->socket, (WSABUF*)&buf, 1, &bytes, &flags, &req->u.io.overlapped, NULL); if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { /* Process the req without IOCP. */ handle->flags |= UV_HANDLE_READ_PENDING; req->u.io.overlapped.InternalHigh = bytes; handle->reqs_pending++; uv_insert_pending_req(loop, (uv_req_t*)req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* The req will be processed with IOCP. */ handle->flags |= UV_HANDLE_READ_PENDING; handle->reqs_pending++; if (handle->flags & UV_HANDLE_EMULATE_IOCP && req->wait_handle == INVALID_HANDLE_VALUE && !RegisterWaitForSingleObject(&req->wait_handle, req->event_handle, post_completion, (void*) req, INFINITE, WT_EXECUTEINWAITTHREAD)) { SET_REQ_ERROR(req, GetLastError()); uv_insert_pending_req(loop, (uv_req_t*)req); } } else { /* Make this req pending reporting an error. */ SET_REQ_ERROR(req, WSAGetLastError()); uv_insert_pending_req(loop, (uv_req_t*)req); handle->reqs_pending++; } }
void alloc_cb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { *buf = uv_buf_init((char*) malloc(suggested_size), suggested_size); }
static uv_buf_t forza__on_alloc(uv_handle_t* handle, size_t suggested_size) { return uv_buf_init((char*) malloc(suggested_size), suggested_size); }
#include "main.h" SteamClient client( // write callback [](std::size_t length, std::function<void(unsigned char* buffer)> fill) { auto write = new uv_write_t; // TODO: check if previous write has finished write_buffer.resize(length); fill(reinterpret_cast<unsigned char*>(&write_buffer[0])); auto buf = uv_buf_init(&write_buffer[0], write_buffer.size()); uv_write(write, (uv_stream_t*)&sock, &buf, 1, [](uv_write_t* req, int status) { delete req; }); }, // set_inverval callback [](std::function<void()> callback, int timeout) { auto callback_heap = new std::function<void()>(std::move(callback)); timer.data = callback_heap; uv_timer_start(&timer, [](uv_timer_t* handle) { auto callback = reinterpret_cast<std::function<void()>*>(handle->data); (*callback)(); // TODO: delete it somewhere }, timeout * 1000, timeout * 1000); } ); void lua_error_fatal(lua_State *L, int status) {
void alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) { *buf = uv_buf_init((char*) malloc(size), size); assert(buf->base != NULL); }
uv_buf_t net_alloc(uv_handle_t* handle, size_t size) { char * base = (char *) calloc(size, 1); return uv_buf_init(base, size); }
void net_read(uv_stream_t *handle, ssize_t nread, const uv_buf_t buf) { net_t * net = (net_t *) handle->data; err_t err; if (nread < 0) { err = uv_last_error(net->loop); if (net->error_cb) { net->error_cb(net, err, (char *) uv_strerror(err)); } else { printf("error(%s:%d) %s", net->hostname, net->port, (char *) uv_strerror(err)); net_free(net); } return; } /* * BIO Return rule: * All these functions return either the amount of data successfully * read or written (if the return value is positive) or that no data * was successfully read or written if the result is 0 or -1. If the * return value is -2 then the operation is not implemented in the specific BIO type. */ if (net->use_ssl) { net->tls->data = malloc(1); tls_bio_write(net->tls, buf.base, nread); free(buf.base); int read = 0; int stat = tls_read(net->tls); if (stat == 1) { /* * continue: Say hello */ do { read = tls_bio_read(net->tls, 0); if (read > 0) { uv_write_t req; uv_buf_t uvbuf = uv_buf_init(net->tls->buf, read); uv_write(&req, (uv_stream_t*)net->handle, &uvbuf, 1, NULL); } } while (read > 0); } else if (stat == 0) { /* * SSL Connection is created * Here need to call user-land callback */ uv_read_stop((uv_stream_t*)net->handle); if (net->read_cb != NULL) { net->read_cb(net, buffer_length(net->tls->buffer), buffer_string(net->tls->buffer)); } } else if (stat == -1) { /* * Just connection in SSL * call `conn_cb`, the ssl connection has been * established in user-land. */ if (net->conn_cb != NULL) { net->conn_cb(net); } } else { /* * TODO(Yorkie): HOWTO */ } return; } /* * TCP Part, no SSL, just proxy of uv. */ uv_read_stop(handle); buf.base[nread] = 0; if (net->read_cb != NULL) { net->read_cb(net, nread, buf.base); } }
static int on_headers_complete(multipart_parser *const parser) { MultipartFormRef const form = multipart_parser_get_data(parser); form->type = MultipartHeadersComplete; *form->out = uv_buf_init(NULL, 0); return -1; }
static int on_form_end(multipart_parser *const parser) { MultipartFormRef const form = multipart_parser_get_data(parser); form->type = MultipartFormEnd; *form->out = uv_buf_init(NULL, 0); return -1; }
static int on_header_value(multipart_parser *const parser, strarg_t const at, size_t const len) { MultipartFormRef const form = multipart_parser_get_data(parser); form->type = MultipartHeaderValue; *form->out = uv_buf_init((char *)at, len); return -1; }
static int on_part_begin(multipart_parser *const parser) { MultipartFormRef const form = multipart_parser_get_data(parser); form->type = MultipartPartBegin; *form->out = uv_buf_init(NULL, 0); return -1; }
static void cb_read_over(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { ws_connect_t *ptr = (ws_connect_t*)(stream->data); if(nread >= 0) { ptr->read_length += nread; //print_info(); if (WS_CONNECT == ptr->state) { char *pHeader = strstr(ptr->read_buff, "\r\n\r\n"); if (NULL == pHeader) { //没找到http尾 if (ptr->read_length >= ptr->read_len_max) { ptr->read_length = 0; //出错 /*if (WS_CLIENT == ptr->type) { ptr->cb.cb_connect(ptr, ptr->cb.obj_connect, WS_ERROR_BUF_FULL); } else*/ { //服务器处理 ptr->cb.cb_connect(ptr, ptr->cb.obj_connect, WS_ERROR_BUF_FULL); } } return; } //握手协议 if (WS_CLIENT == ptr->type) { uv_read_stop((uv_stream_t *)&ptr->connect); // if(NULL != strstr(ptr->read_buff, "HTTP/1.1 101")) { ptr->state = WS_ONLINE; ptr->cb.cb_connect(ptr, ptr->cb.obj_connect, WS_OK); //发个心跳 /*uv_buf_t bufs; ptr->write_buf[0].base = new char[2]; ptr->write_buf[0].len = 2; ptr->write_buf[0].base[0] = 0x89; ptr->write_buf[0].base[1] = 0; uv_write(&ptr->write_req, (uv_stream_t*)&ptr->connect, ptr->write_buf, 1, cb_write);*/ } else { ptr->cb.cb_connect(ptr, ptr->cb.obj_connect, WS_ERROR_BAD); } //int len = pHeader - ptr->read_buff + 4; //memmove(ptr->read_buff, pHeader + 4, ptr->read_length - len); //ptr->read_length = ptr->read_length - len; ptr->read_length = 0; } else { uv_read_stop((uv_stream_t *)&ptr->connect); int reslen = ptr->write_len_max; ParseHandShake(ptr->read_buff, ptr->read_length, ptr->write_buff, ptr->write_len_max, reslen); ptr->write_buf[0] = uv_buf_init(ptr->write_buff, reslen); uv_write(&(ptr->write_req[0]), (uv_stream_t*)&ptr->connect, ptr->write_buf, 1, cb_write); //int len = pHeader - ptr->read_buff + 4; //memmove(ptr->read_buff, pHeader + 4, ptr->read_length - len); //ptr->read_length = ptr->read_length - len; ptr->read_length = 0; } } else { //状态不对 ptr->cb.cb_connect(ptr, ptr->cb.obj_connect, WS_ERROR_REONLIN); } } else if(nread == UV_ENOBUFS) { //当缓存不足时, 会回调这个 } else { ptr->cb.cb_connect(ptr, ptr->cb.obj_connect, WS_ERROR_BAD); // 关闭连接 printf("->strerror:%s,(%s,%d)\n",uv_strerror(nread), __FILE__, __LINE__); } }
/* used by udp and stream */ uv_buf_t luvL_alloc_cb(uv_handle_t* handle, size_t size) { luv_object_t* self = container_of(handle, luv_object_t, h); size = (size_t)self->buf.len; return uv_buf_init((char*)malloc(size), size); }
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req) { uv_buf_t buf; int partial; assert(handle->type == UV_UDP); handle->flags &= ~UV_HANDLE_READ_PENDING; if (!REQ_SUCCESS(req)) { DWORD err = GET_REQ_SOCK_ERROR(req); if (err == WSAEMSGSIZE) { /* Not a real error, it just indicates that the received packet */ /* was bigger than the receive buffer. */ } else if (err == WSAECONNRESET || err == WSAENETRESET) { /* A previous sendto operation failed; ignore this error. If */ /* zero-reading we need to call WSARecv/WSARecvFrom _without_ the */ /* MSG_PEEK flag to clear out the error queue. For nonzero reads, */ /* immediately queue a new receive. */ if (!(handle->flags & UV_HANDLE_ZERO_READ)) { goto done; } } else { /* A real error occurred. Report the error to the user only if we're */ /* currently reading. */ if (handle->flags & UV_HANDLE_READING) { uv__set_sys_error(loop, err); uv_udp_recv_stop(handle); buf = (handle->flags & UV_HANDLE_ZERO_READ) ? uv_buf_init(NULL, 0) : handle->recv_buffer; handle->recv_cb(handle, -1, buf, NULL, 0); } goto done; } } if (!(handle->flags & UV_HANDLE_ZERO_READ)) { /* Successful read */ partial = !REQ_SUCCESS(req); handle->recv_cb(handle, req->overlapped.InternalHigh, handle->recv_buffer, (struct sockaddr*) &handle->recv_from, partial ? UV_UDP_PARTIAL : 0); } else if (handle->flags & UV_HANDLE_READING) { DWORD bytes, err, flags; struct sockaddr_storage from; int from_len; /* Do a nonblocking receive */ /* TODO: try to read multiple datagrams at once. FIONREAD maybe? */ buf = handle->alloc_cb((uv_handle_t*) handle, 65536); assert(buf.len > 0); memset(&from, 0, sizeof from); from_len = sizeof from; flags = 0; if (WSARecvFrom(handle->socket, (WSABUF*)&buf, 1, &bytes, &flags, (struct sockaddr*) &from, &from_len, NULL, NULL) != SOCKET_ERROR) { /* Message received */ handle->recv_cb(handle, bytes, buf, (struct sockaddr*) &from, 0); } else { err = WSAGetLastError(); if (err == WSAEMSGSIZE) { /* Message truncated */ handle->recv_cb(handle, bytes, buf, (struct sockaddr*) &from, UV_UDP_PARTIAL); } if (err == WSAEWOULDBLOCK) { /* Kernel buffer empty */ uv__set_sys_error(loop, WSAEWOULDBLOCK); handle->recv_cb(handle, 0, buf, NULL, 0); } else if (err != WSAECONNRESET && err != WSAENETRESET) { /* Serious error. WSAECONNRESET/WSANETRESET is ignored because this */ /* just indicates that a previous sendto operation failed. */ uv_udp_recv_stop(handle); uv__set_sys_error(loop, err); handle->recv_cb(handle, -1, buf, NULL, 0); } } } done: /* Post another read if still reading and not closing. */ if ((handle->flags & UV_HANDLE_READING) && !(handle->flags & UV_HANDLE_READ_PENDING)) { uv_udp_queue_recv(loop, handle); } DECREASE_PENDING_REQ_COUNT(handle); }
static void on_read(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) { int r; uv_pipe_t* pipe; uv_handle_type pending; uv_buf_t outbuf; pipe = (uv_pipe_t*) handle; if (nread == 0) { /* Everything OK, but nothing read. */ free(buf->base); return; } if (nread < 0) { if (nread == UV_EOF) { free(buf->base); return; } printf("error recving on channel: %s\n", uv_strerror(nread)); abort(); } fprintf(stderr, "got %d bytes\n", (int)nread); pending = uv_pipe_pending_type(pipe); if (!tcp_server_listening) { ASSERT(1 == uv_pipe_pending_count(pipe)); ASSERT(nread > 0 && buf->base && pending != UV_UNKNOWN_HANDLE); read_cb_called++; /* Accept the pending TCP server, and start listening on it. */ ASSERT(pending == UV_TCP); r = uv_tcp_init(uv_default_loop(), &tcp_server); ASSERT(r == 0); r = uv_accept((uv_stream_t*)pipe, (uv_stream_t*)&tcp_server); ASSERT(r == 0); r = uv_listen((uv_stream_t*)&tcp_server, BACKLOG, on_connection); ASSERT(r == 0); tcp_server_listening = 1; /* Make sure that the expected data is correctly multiplexed. */ ASSERT(memcmp("hello\n", buf->base, nread) == 0); outbuf = uv_buf_init("world\n", 6); r = uv_write(&write_req, (uv_stream_t*)pipe, &outbuf, 1, NULL); ASSERT(r == 0); /* Create a bunch of connections to get both servers to accept. */ make_many_connections(); } else if (memcmp("accepted_connection\n", buf->base, nread) == 0) { /* Remote server has accepted a connection. Close the channel. */ ASSERT(0 == uv_pipe_pending_count(pipe)); ASSERT(pending == UV_UNKNOWN_HANDLE); remote_conn_accepted = 1; uv_close((uv_handle_t*)&channel, NULL); } free(buf->base); }
void uv_process_tcp_read_req(uv_tcp_t* handle, uv_req_t* req) { DWORD bytes, flags, err; uv_buf_t buf; assert(handle->type == UV_TCP); handle->flags &= ~UV_HANDLE_READ_PENDING; if (req->error.code != UV_OK) { /* An error occurred doing the read. */ if ((handle->flags & UV_HANDLE_READING)) { handle->flags &= ~UV_HANDLE_READING; LOOP->last_error = req->error; buf = (handle->flags & UV_HANDLE_ZERO_READ) ? uv_buf_init(NULL, 0) : handle->read_buffer; handle->read_cb((uv_stream_t*)handle, -1, buf); } } else { if (!(handle->flags & UV_HANDLE_ZERO_READ)) { /* The read was done with a non-zero buffer length. */ if (req->overlapped.InternalHigh > 0) { /* Successful read */ handle->read_cb((uv_stream_t*)handle, req->overlapped.InternalHigh, handle->read_buffer); /* Read again only if bytes == buf.len */ if (req->overlapped.InternalHigh < handle->read_buffer.len) { goto done; } } else { /* Connection closed */ handle->flags &= ~UV_HANDLE_READING; handle->flags |= UV_HANDLE_EOF; LOOP->last_error.code = UV_EOF; LOOP->last_error.sys_errno_ = ERROR_SUCCESS; buf.base = 0; buf.len = 0; handle->read_cb((uv_stream_t*)handle, -1, handle->read_buffer); goto done; } } /* Do nonblocking reads until the buffer is empty */ while (handle->flags & UV_HANDLE_READING) { buf = handle->alloc_cb((uv_stream_t*)handle, 65536); assert(buf.len > 0); flags = 0; if (WSARecv(handle->socket, (WSABUF*)&buf, 1, &bytes, &flags, NULL, NULL) != SOCKET_ERROR) { if (bytes > 0) { /* Successful read */ handle->read_cb((uv_stream_t*)handle, bytes, buf); /* Read again only if bytes == buf.len */ if (bytes < buf.len) { break; } } else { /* Connection closed */ handle->flags &= ~UV_HANDLE_READING; handle->flags |= UV_HANDLE_EOF; LOOP->last_error.code = UV_EOF; LOOP->last_error.sys_errno_ = ERROR_SUCCESS; handle->read_cb((uv_stream_t*)handle, -1, buf); break; } } else { err = WSAGetLastError(); if (err == WSAEWOULDBLOCK) { /* Read buffer was completely empty, report a 0-byte read. */ uv_set_sys_error(WSAEWOULDBLOCK); handle->read_cb((uv_stream_t*)handle, 0, buf); } else { /* Ouch! serious error. */ uv_set_sys_error(err); handle->read_cb((uv_stream_t*)handle, -1, buf); } break; } } done: /* Post another read if still reading and not closing. */ if ((handle->flags & UV_HANDLE_READING) && !(handle->flags & UV_HANDLE_READ_PENDING)) { uv_tcp_queue_read(handle); } } DECREASE_PENDING_REQ_COUNT(handle); }
int main() { uv_tcp_init(uv_default_loop(), &sock); uv_timer_init(uv_default_loop(), &timer); state.doFile("lua/main.lua"); //L = luaL_newstate(); //luaL_openlibs(L); // // //int s = luaL_loadfile(L, "lua/main.lua"); //if (s==0) { // s = lua_pcall(L, 0, 0, 0); //} //lua_error_fatal(L, s); auto &endpoint = servers[rand() % (sizeof(servers) / sizeof(servers[0]))]; auto connect = new uv_connect_t; sockaddr_in addr; uv_ip4_addr(endpoint.host, endpoint.port, &addr); uv_tcp_connect(connect, &sock, (sockaddr*)&addr, [](uv_connect_t* req, int status) { auto length = client.connected(); read_buffer.resize(length); uv_read_start(req->handle, [](uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { *buf = uv_buf_init(&read_buffer[read_offset], read_buffer.size() - read_offset); }, [](uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { if (nread < 1) { //auto str = uv_strerror(nread); } read_offset += nread; if (read_offset == read_buffer.size()) { auto next_length = client.readable(reinterpret_cast<unsigned char*>(&read_buffer[0])); read_offset = 0; read_buffer.resize(next_length); } }); delete req; }); client.onHandshake = [] { state["onHandshake"](); }; client.onLogOn = [](EResult result, SteamID steamID) { std::string str = std::to_string(steamID.steamID64); state["onLogOn"]((unsigned int)result,str.c_str()); }; client.onLogOff = [](EResult result) { state["onLogOff"]((unsigned int)result); }; client.onUserInfo = []( SteamID user, SteamID* source, const char* name, EPersonaState* pstate, const unsigned char avatar_hash[20], const char* game_name ) { std::string nullstr = ""; std::string str_user = std::to_string(user.steamID64); std::string str_source = source?std::to_string(source->steamID64):nullstr; std::ostringstream ss; if (avatar_hash) { ss << std::hex << std::setfill('0'); for (auto i = 0; i < 20; i++) ss << std::setw(2) << static_cast<unsigned>(avatar_hash[i]); } std::string avatar_hex = avatar_hash?ss.str():nullstr; state["onUserInfo"]( str_user.c_str(), str_source.c_str(), name?name:"", pstate?static_cast<unsigned int>(*pstate):0, avatar_hex.c_str(), game_name?game_name:"" ); }; client.onTyping = [](SteamID user,bool relaying) { std::string str_user = std::to_string(user.steamID64); state["onTyping"](str_user.c_str(),relaying); }; client.onPrivateMsg = [](SteamID user, const char* message, bool relaying) { std::string str_user = std::to_string(user.steamID64); state["onPrivateMsg"]( str_user.c_str(),message, relaying ); }; client.onSentry = [](const unsigned char sentryhash[20]) { std::string str((const char *)(&sentryhash),20); //std::cout << "Sentry Hash ("<< str.length(); // //std::cout << "): " << bin2hex(str) << std::endl; state["onSentry"](&str); }; client.onChatMsg = [](SteamID room, SteamID chatter, std::string message) { std::string str_room = std::to_string(room.steamID64); std::string str_chatter = std::to_string(chatter.steamID64); state["onChatMsg"](str_room.c_str(),str_chatter.c_str(),message.c_str()); }; client.onUnhandledMessage = [](EMsg emsg, const unsigned char* data, std::size_t length) { //std::string datastr(reinterpret_cast<const char*>(data),length); //std::cout << "Unhandled message (" << (int)emsg << ", " << length; //std::cout << "): " << bin2hex(datastr) << std::endl; tolua_data = data; state["onUnhandledMessage"]((int)emsg,length); }; int ret = uv_run(uv_default_loop(), uv_run_mode::UV_RUN_DEFAULT); //lua_close(L); return ret; }
static uv_buf_t tcp_alloc_cb(uv_handle_t *handle, size_t suggested_size) { static char buf[65536]; return uv_buf_init(buf, sizeof buf); }
static PyObject * Pipe_func_write2(Pipe *self, PyObject *args) { uv_buf_t buf; Py_buffer *view; PyObject *callback, *send_handle; callback = Py_None; RAISE_IF_HANDLE_NOT_INITIALIZED(self, NULL); RAISE_IF_HANDLE_CLOSED(self, PyExc_HandleClosedError, NULL); view = PyMem_Malloc(sizeof *view); if (!view) { PyErr_NoMemory(); return NULL; } #ifdef PYUV_PYTHON3 if (!PyArg_ParseTuple(args, "y*O|O:write", view, &send_handle, &callback)) { #else if (!PyArg_ParseTuple(args, "s*O|O:write", view, &send_handle, &callback)) { #endif return NULL; } if (PyObject_IsSubclass((PyObject *)send_handle->ob_type, (PyObject *)&StreamType)) { if (UV_HANDLE(send_handle)->type != UV_TCP && UV_HANDLE(send_handle)->type != UV_NAMED_PIPE) { PyErr_SetString(PyExc_TypeError, "Only TCP and Pipe objects are supported for write2"); goto error; } } else if (PyObject_IsSubclass((PyObject *)send_handle->ob_type, (PyObject *)&UDPType)) { /* empty */ } else { PyErr_SetString(PyExc_TypeError, "Only Stream and UDP objects are supported"); goto error; } if (callback != Py_None && !PyCallable_Check(callback)) { PyErr_SetString(PyExc_TypeError, "a callable or None is required"); goto error; } buf = uv_buf_init(view->buf, view->len); return pyuv_stream_write((Stream *)self, view, &buf, 1, callback, send_handle); error: PyBuffer_Release(view); PyMem_Free(view); return NULL; } static int Pipe_tp_init(Pipe *self, PyObject *args, PyObject *kwargs) { int r; Loop *loop; PyObject *ipc = Py_False; UNUSED_ARG(kwargs); RAISE_IF_HANDLE_INITIALIZED(self, -1); if (!PyArg_ParseTuple(args, "O!|O!:__init__", &LoopType, &loop, &PyBool_Type, &ipc)) { return -1; } r = uv_pipe_init(loop->uv_loop, (uv_pipe_t *)UV_HANDLE(self), (ipc == Py_True) ? 1 : 0); if (r != 0) { RAISE_UV_EXCEPTION(loop->uv_loop, PyExc_PipeError); return -1; } initialize_handle(HANDLE(self), loop); return 0; }
void write_data(uv_stream_t *dest, size_t size, uv_buf_t buf, uv_write_cb callback) { write_req_t *req = (write_req_t*) malloc(sizeof(write_req_t)); req->buf = uv_buf_init((char*) malloc(size), size); memcpy(req->buf.base, buf.base, size); uv_write((uv_write_t*) req, (uv_stream_t*)dest, &req->buf, 1, callback); }
static uv_buf_t on_alloc(uv_handle_t* handle, size_t suggested_size) { return uv_buf_init(malloc(suggested_size), suggested_size); }
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req) { uv_buf_t buf; int partial; assert(handle->type == UV_UDP); handle->flags &= ~UV_HANDLE_READ_PENDING; if (!REQ_SUCCESS(req) && GET_REQ_STATUS(req) != STATUS_RECEIVE_EXPEDITED) { /* An error occurred doing the read. */ if ((handle->flags & UV_HANDLE_READING)) { uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req)); uv_udp_recv_stop(handle); #if 0 buf = (handle->flags & UV_HANDLE_ZERO_READ) ? uv_buf_init(NULL, 0) : handle->recv_buffer; #else buf = handle->recv_buffer; #endif handle->recv_cb(handle, -1, buf, NULL, 0); } goto done; } #if 0 if (!(handle->flags & UV_HANDLE_ZERO_READ)) { #endif /* Successful read */ partial = (GET_REQ_STATUS(req) == STATUS_RECEIVE_EXPEDITED); handle->recv_cb(handle, req->overlapped.InternalHigh, handle->recv_buffer, (struct sockaddr*) &handle->recv_from, partial ? UV_UDP_PARTIAL : 0); #if 0 } else { DWORD bytes, err, flags; struct sockaddr_storage from; int from_len; /* Do a nonblocking receive */ /* TODO: try to read multiple datagrams at once. FIONREAD maybe? */ buf = handle->alloc_cb((uv_handle_t*) handle, 65536); assert(buf.len > 0); memset(&from, 0, sizeof from); from_len = sizeof from; flags = MSG_PARTIAL; if (WSARecvFrom(handle->socket, (WSABUF*)&buf, 1, &bytes, &flags, (struct sockaddr*) &from, &from_len, NULL, NULL) != SOCKET_ERROR) { /* Message received */ handle->recv_cb(handle, bytes, buf, (struct sockaddr*) &from, (flags & MSG_PARTIAL) ? UV_UDP_PARTIAL : 0); } else { err = WSAGetLastError(); if (err == WSAEWOULDBLOCK) { uv__set_sys_error(loop, WSAEWOULDBLOCK); handle->recv_cb(handle, 0, buf, NULL, 0); } else { /* Ouch! serious error. */ uv__set_sys_error(loop, err); handle->recv_cb(handle, -1, buf, NULL, 0); } } } #endif done: /* Post another read if still reading and not closing. */ if ((handle->flags & UV_HANDLE_READING) && !(handle->flags & UV_HANDLE_READ_PENDING)) { uv_udp_queue_recv(loop, handle); } DECREASE_PENDING_REQ_COUNT(handle); }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { static char buf[1024]; return uv_buf_init(buf, ARRAY_SIZE(buf)); }
static uv_buf_t on_alloc(uv_handle_t* handle, size_t suggested_size) { puts(__func__); return uv_buf_init(malloc(suggested_size), suggested_size); }
uv_buf_t Socket::alloc_buffer(uv_handle_t *handle, size_t suggested_size) { return uv_buf_init((char*) malloc(suggested_size), suggested_size); }
static int stdio_over_pipes_helper() { /* Write several buffers to test that the write order is preserved. */ char* buffers[] = { "he", "ll", "o ", "wo", "rl", "d", "\n" }; uv_write_t write_req[ARRAY_SIZE(buffers)]; uv_buf_t buf[ARRAY_SIZE(buffers)]; int r, i; uv_loop_t* loop = uv_default_loop(); ASSERT(UV_NAMED_PIPE == uv_guess_handle(0)); ASSERT(UV_NAMED_PIPE == uv_guess_handle(1)); r = uv_pipe_init(loop, &stdin_pipe, 0); ASSERT(r == 0); r = uv_pipe_init(loop, &stdout_pipe, 0); ASSERT(r == 0); uv_pipe_open(&stdin_pipe, 0); uv_pipe_open(&stdout_pipe, 1); /* Unref both stdio handles to make sure that all writes complete. */ uv_unref(loop); uv_unref(loop); for (i = 0; i < ARRAY_SIZE(buffers); i++) { buf[i] = uv_buf_init((char*)buffers[i], strlen(buffers[i])); } for (i = 0; i < ARRAY_SIZE(buffers); i++) { r = uv_write(&write_req[i], (uv_stream_t*)&stdout_pipe, &buf[i], 1, after_pipe_write); ASSERT(r == 0); } uv_run(loop); ASSERT(after_write_called == 7); ASSERT(on_pipe_read_called == 0); ASSERT(close_cb_called == 0); uv_ref(loop); uv_ref(loop); r = uv_read_start((uv_stream_t*)&stdin_pipe, on_pipe_read_alloc, on_pipe_read); ASSERT(r == 0); uv_run(loop); ASSERT(after_write_called == 7); ASSERT(on_pipe_read_called == 1); ASSERT(close_cb_called == 2); return 0; }
void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req) { DWORD bytes, flags, err; uv_buf_t buf; assert(handle->type == UV_TCP); handle->flags &= ~UV_HANDLE_READ_PENDING; if (!REQ_SUCCESS(req)) { /* An error occurred doing the read. */ if ((handle->flags & UV_HANDLE_READING) || !(handle->flags & UV_HANDLE_ZERO_READ)) { handle->flags &= ~UV_HANDLE_READING; DECREASE_ACTIVE_COUNT(loop, handle); buf = (handle->flags & UV_HANDLE_ZERO_READ) ? uv_buf_init(NULL, 0) : handle->tcp.conn.read_buffer; err = GET_REQ_SOCK_ERROR(req); if (err == WSAECONNABORTED) { /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix. */ err = WSAECONNRESET; } handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(err), &buf); } } else { if (!(handle->flags & UV_HANDLE_ZERO_READ)) { /* The read was done with a non-zero buffer length. */ if (req->u.io.overlapped.InternalHigh > 0) { /* Successful read */ handle->read_cb((uv_stream_t*)handle, req->u.io.overlapped.InternalHigh, &handle->tcp.conn.read_buffer); /* Read again only if bytes == buf.len */ if (req->u.io.overlapped.InternalHigh < handle->tcp.conn.read_buffer.len) { goto done; } } else { /* Connection closed */ if (handle->flags & UV_HANDLE_READING) { handle->flags &= ~UV_HANDLE_READING; DECREASE_ACTIVE_COUNT(loop, handle); } handle->flags &= ~UV_HANDLE_READABLE; buf.base = 0; buf.len = 0; handle->read_cb((uv_stream_t*)handle, UV_EOF, &handle->tcp.conn.read_buffer); goto done; } } /* Do nonblocking reads until the buffer is empty */ while (handle->flags & UV_HANDLE_READING) { buf = uv_buf_init(NULL, 0); handle->alloc_cb((uv_handle_t*) handle, 65536, &buf); if (buf.base == NULL || buf.len == 0) { handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf); break; } assert(buf.base != NULL); flags = 0; if (WSARecv(handle->socket, (WSABUF*)&buf, 1, &bytes, &flags, NULL, NULL) != SOCKET_ERROR) { if (bytes > 0) { /* Successful read */ handle->read_cb((uv_stream_t*)handle, bytes, &buf); /* Read again only if bytes == buf.len */ if (bytes < buf.len) { break; } } else { /* Connection closed */ handle->flags &= ~(UV_HANDLE_READING | UV_HANDLE_READABLE); DECREASE_ACTIVE_COUNT(loop, handle); handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf); break; } } else { err = WSAGetLastError(); if (err == WSAEWOULDBLOCK) { /* Read buffer was completely empty, report a 0-byte read. */ handle->read_cb((uv_stream_t*)handle, 0, &buf); } else { /* Ouch! serious error. */ handle->flags &= ~UV_HANDLE_READING; DECREASE_ACTIVE_COUNT(loop, handle); if (err == WSAECONNABORTED) { /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with * Unix. */ err = WSAECONNRESET; } handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(err), &buf); } break; } } done: /* Post another read if still reading and not closing. */ if ((handle->flags & UV_HANDLE_READING) && !(handle->flags & UV_HANDLE_READ_PENDING)) { uv_tcp_queue_read(loop, handle); } } DECREASE_PENDING_REQ_COUNT(handle); }
static PyObject * UDP_func_send(UDP *self, PyObject *args) { int r, dest_port, address_type; char *dest_ip; uv_buf_t buf; Py_buffer pbuf; PyObject *callback; uv_udp_send_t *wr = NULL; udp_send_data_t *req_data = NULL; callback = Py_None; RAISE_IF_HANDLE_CLOSED(self, PyExc_HandleClosedError, NULL); if (!PyArg_ParseTuple(args, "(si)s*|O:send", &dest_ip, &dest_port, &pbuf, &callback)) { return NULL; } if (callback != Py_None && !PyCallable_Check(callback)) { PyBuffer_Release(&pbuf); PyErr_SetString(PyExc_TypeError, "a callable or None is required"); return NULL; } if (dest_port < 0 || dest_port > 65535) { PyErr_SetString(PyExc_ValueError, "port must be between 0 and 65535"); return NULL; } if (pyuv_guess_ip_family(dest_ip, &address_type)) { PyErr_SetString(PyExc_ValueError, "invalid IP address"); return NULL; } Py_INCREF(callback); wr = (uv_udp_send_t *)PyMem_Malloc(sizeof(uv_udp_send_t)); if (!wr) { PyErr_NoMemory(); goto error; } req_data = (udp_send_data_t*) PyMem_Malloc(sizeof(udp_send_data_t)); if (!req_data) { PyErr_NoMemory(); goto error; } buf = uv_buf_init(pbuf.buf, pbuf.len); req_data->callback = callback; req_data->buf_count = 1; req_data->data.view = pbuf; wr->data = (void *)req_data; if (address_type == AF_INET) { r = uv_udp_send(wr, (uv_udp_t *)UV_HANDLE(self), &buf, 1, uv_ip4_addr(dest_ip, dest_port), (uv_udp_send_cb)on_udp_send); } else { r = uv_udp_send6(wr, (uv_udp_t *)UV_HANDLE(self), &buf, 1, uv_ip6_addr(dest_ip, dest_port), (uv_udp_send_cb)on_udp_send); } if (r != 0) { RAISE_UV_EXCEPTION(UV_HANDLE_LOOP(self), PyExc_UDPError); goto error; } Py_RETURN_NONE; error: PyBuffer_Release(&pbuf); Py_DECREF(callback); if (req_data) { PyMem_Free(req_data); } if (wr) { PyMem_Free(wr); } return NULL; }