/* * call-seq: * basicsocket.send(mesg, flags [, dest_sockaddr]) => numbytes_sent * * send _mesg_ via _basicsocket_. * * _mesg_ should be a string. * * _flags_ should be a bitwise OR of Socket::MSG_* constants. * * _dest_sockaddr_ should be a packed sockaddr string or an addrinfo. * * TCPSocket.open("localhost", 80) {|s| * s.send "GET / HTTP/1.0\r\n\r\n", 0 * p s.read * } */ VALUE rsock_bsock_send(int argc, VALUE *argv, VALUE sock) { struct rsock_send_arg arg; VALUE flags, to; rb_io_t *fptr; int n; rb_blocking_function_t *func; rb_secure(4); rb_scan_args(argc, argv, "21", &arg.mesg, &flags, &to); StringValue(arg.mesg); if (!NIL_P(to)) { SockAddrStringValue(to); to = rb_str_new4(to); arg.to = (struct sockaddr *)RSTRING_PTR(to); arg.tolen = (socklen_t)RSTRING_LENINT(to); func = rsock_sendto_blocking; } else { func = rsock_send_blocking; } GetOpenFile(sock, fptr); arg.fd = fptr->fd; arg.flags = NUM2INT(flags); while (rb_thread_fd_writable(arg.fd), (n = (int)BLOCKING_REGION_FD(func, &arg)) < 0) { if (rb_io_wait_writable(arg.fd)) { continue; } rb_sys_fail("send(2)"); } return INT2FIX(n); }
/* * Support functions for write_unblocked. * They originate from Ruby's io.c. We redefine them here * because in that file they are declared with static, so * they aren't accessible here. */ static int io_fflush(rb_io_t *fptr){ rb_io_check_closed(fptr); if (fptr->wbuf_len == 0) return 0; if (!rb_thread_fd_writable(fptr->fd)) { rb_io_check_closed(fptr); } while (fptr->wbuf_len > 0 && io_flush_buffer(fptr) != 0) { if (!rb_io_wait_writable(fptr->fd)) return -1; rb_io_check_closed(fptr); } return 0; }
/* * call-seq: * udpsocket.send(mesg, flags, host, port) => numbytes_sent * udpsocket.send(mesg, flags, sockaddr_to) => numbytes_sent * udpsocket.send(mesg, flags) => numbytes_sent * * Sends _mesg_ via _udpsocket_. * * _flags_ should be a bitwise OR of Socket::MSG_* constants. * * u1 = UDPSocket.new * u1.bind("127.0.0.1", 4913) * * u2 = UDPSocket.new * u2.send "hi", 0, "127.0.0.1", 4913 * * mesg, addr = u1.recvfrom(10) * u1.send mesg, 0, addr[3], addr[1] * * p u2.recv(100) #=> "hi" * */ static VALUE udp_send(int argc, VALUE *argv, VALUE sock) { VALUE flags, host, port; rb_io_t *fptr; int n; struct addrinfo *res0, *res; struct rsock_send_arg arg; if (argc == 2 || argc == 3) { return rsock_bsock_send(argc, argv, sock); } rb_scan_args(argc, argv, "4", &arg.mesg, &flags, &host, &port); StringValue(arg.mesg); res0 = rsock_addrinfo(host, port, SOCK_DGRAM, 0); GetOpenFile(sock, fptr); arg.fd = fptr->fd; arg.flags = NUM2INT(flags); for (res = res0; res; res = res->ai_next) { retry: arg.to = res->ai_addr; arg.tolen = res->ai_addrlen; rb_thread_fd_writable(arg.fd); n = (int)BLOCKING_REGION_FD(rsock_sendto_blocking, &arg); if (n >= 0) { freeaddrinfo(res0); return INT2FIX(n); } if (rb_io_wait_writable(fptr->fd)) { goto retry; } } freeaddrinfo(res0); rsock_sys_fail_host_port("sendto(2)", host, port); return INT2FIX(n); }
/* * call-seq: * unixsocket.send_io(io) => nil * * Sends _io_ as file descriptor passing. * * s1, s2 = UNIXSocket.pair * * s1.send_io STDOUT * stdout = s2.recv_io * * p STDOUT.fileno #=> 1 * p stdout.fileno #=> 6 * * stdout.puts "hello" # outputs "hello\n" to standard output. */ static VALUE unix_send_io(VALUE sock, VALUE val) { int fd; rb_io_t *fptr; struct iomsg_arg arg; struct iovec vec[1]; char buf[1]; #if FD_PASSING_BY_MSG_CONTROL struct { struct cmsghdr hdr; char pad[8+sizeof(int)+8]; } cmsg; #endif if (rb_obj_is_kind_of(val, rb_cIO)) { rb_io_t *valfptr; GetOpenFile(val, valfptr); fd = valfptr->fd; } else if (FIXNUM_P(val)) { fd = FIX2INT(val); } else { rb_raise(rb_eTypeError, "neither IO nor file descriptor"); } GetOpenFile(sock, fptr); arg.msg.msg_name = NULL; arg.msg.msg_namelen = 0; /* Linux and Solaris doesn't work if msg_iov is NULL. */ buf[0] = '\0'; vec[0].iov_base = buf; vec[0].iov_len = 1; arg.msg.msg_iov = vec; arg.msg.msg_iovlen = 1; #if FD_PASSING_BY_MSG_CONTROL arg.msg.msg_control = (caddr_t)&cmsg; arg.msg.msg_controllen = CMSG_LEN(sizeof(int)); arg.msg.msg_flags = 0; MEMZERO((char*)&cmsg, char, sizeof(cmsg)); cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(int)); cmsg.hdr.cmsg_level = SOL_SOCKET; cmsg.hdr.cmsg_type = SCM_RIGHTS; memcpy(CMSG_DATA(&cmsg.hdr), &fd, sizeof(int)); #else arg.msg.msg_accrights = (caddr_t)&fd; arg.msg.msg_accrightslen = sizeof(fd); #endif arg.fd = fptr->fd; rb_thread_fd_writable(arg.fd); if ((int)BLOCKING_REGION(sendmsg_blocking, &arg) == -1) rb_sys_fail("sendmsg(2)"); return Qnil; }
static VALUE f_generic_writev(VALUE fd, VALUE *array_of_components, unsigned int count) { VALUE components, str; unsigned int total_size, total_components, ngroups; IOVectorGroup *groups; unsigned int i, j, group_offset, vector_offset; unsigned long long ssize_max; ssize_t ret; int done, fd_num, e; #ifndef TRAP_BEG WritevWrapperData writev_wrapper_data; #endif /* First determine the number of components that we have. */ total_components = 0; for (i = 0; i < count; i++) { Check_Type(array_of_components[i], T_ARRAY); total_components += (unsigned int) RARRAY_LEN(array_of_components[i]); } if (total_components == 0) { return NUM2INT(0); } /* A single writev() call can only accept IOV_MAX vectors, so we * may have to split the components into groups and perform * multiple writev() calls, one per group. Determine the number * of groups needed, how big each group should be and allocate * memory for them. */ if (total_components % IOV_MAX == 0) { ngroups = total_components / IOV_MAX; groups = alloca(ngroups * sizeof(IOVectorGroup)); if (groups == NULL) { rb_raise(rb_eNoMemError, "Insufficient stack space."); } memset(groups, 0, ngroups * sizeof(IOVectorGroup)); for (i = 0; i < ngroups; i++) { groups[i].io_vectors = alloca(IOV_MAX * sizeof(struct iovec)); if (groups[i].io_vectors == NULL) { rb_raise(rb_eNoMemError, "Insufficient stack space."); } groups[i].count = IOV_MAX; } } else { ngroups = total_components / IOV_MAX + 1; groups = alloca(ngroups * sizeof(IOVectorGroup)); if (groups == NULL) { rb_raise(rb_eNoMemError, "Insufficient stack space."); } memset(groups, 0, ngroups * sizeof(IOVectorGroup)); for (i = 0; i < ngroups - 1; i++) { groups[i].io_vectors = alloca(IOV_MAX * sizeof(struct iovec)); if (groups[i].io_vectors == NULL) { rb_raise(rb_eNoMemError, "Insufficient stack space."); } groups[i].count = IOV_MAX; } groups[ngroups - 1].io_vectors = alloca((total_components % IOV_MAX) * sizeof(struct iovec)); if (groups[ngroups - 1].io_vectors == NULL) { rb_raise(rb_eNoMemError, "Insufficient stack space."); } groups[ngroups - 1].count = total_components % IOV_MAX; } /* Now distribute the components among the groups, filling the iovec * array in each group. Also calculate the total data size while we're * at it. */ total_size = 0; group_offset = 0; vector_offset = 0; for (i = 0; i < count; i++) { components = array_of_components[i]; for (j = 0; j < (unsigned int) RARRAY_LEN(components); j++) { str = rb_ary_entry(components, j); str = rb_obj_as_string(str); total_size += (unsigned int) RSTRING_LEN(str); /* I know writev() doesn't write to iov_base, but on some * platforms it's still defined as non-const char * * :-( */ groups[group_offset].io_vectors[vector_offset].iov_base = (char *) RSTRING_PTR(str); groups[group_offset].io_vectors[vector_offset].iov_len = RSTRING_LEN(str); groups[group_offset].total_size += RSTRING_LEN(str); vector_offset++; if (vector_offset == groups[group_offset].count) { group_offset++; vector_offset = 0; } } } /* We don't compare to SSIZE_MAX directly in order to shut up a compiler warning on OS X Snow Leopard. */ ssize_max = SSIZE_MAX; if (total_size > ssize_max) { rb_raise(rb_eArgError, "The total size of the components may not be larger than SSIZE_MAX."); } /* Write the data. */ fd_num = NUM2INT(fd); for (i = 0; i < ngroups; i++) { /* Wait until the file descriptor becomes writable before writing things. */ rb_thread_fd_writable(fd_num); done = 0; while (!done) { #ifdef TRAP_BEG TRAP_BEG; ret = writev(fd_num, groups[i].io_vectors, groups[i].count); TRAP_END; #else writev_wrapper_data.filedes = fd_num; writev_wrapper_data.iov = groups[i].io_vectors; writev_wrapper_data.iovcnt = groups[i].count; ret = (int) rb_thread_blocking_region(writev_wrapper, &writev_wrapper_data, RUBY_UBF_IO, 0); #endif if (ret == -1) { /* If the error is something like EAGAIN, yield to another * thread until the file descriptor becomes writable again. * In case of other errors, raise an exception. */ if (!rb_io_wait_writable(fd_num)) { rb_sys_fail("writev()"); } } else if (ret < groups[i].total_size) { /* Not everything in this group has been written. Retry without * writing the bytes that been successfully written. */ e = errno; update_group_written_info(&groups[i], ret); errno = e; rb_io_wait_writable(fd_num); } else { done = 1; } } } return INT2NUM(total_size); }