ssize_t uwsgi_proto_zeromq_sendfile(struct wsgi_request * wsgi_req) { ssize_t len; char buf[65536]; size_t remains = wsgi_req->sendfile_fd_size - wsgi_req->sendfile_fd_pos; wsgi_req->sendfile_fd_chunk = 65536; if (uwsgi.async > 1) { len = read(wsgi_req->sendfile_fd, buf, UMIN(remains, wsgi_req->sendfile_fd_chunk)); if (len != (int) UMIN(remains, wsgi_req->sendfile_fd_chunk)) { uwsgi_error("read()"); return -1; } wsgi_req->sendfile_fd_pos += len; return uwsgi_proto_zeromq_write(wsgi_req, buf, len); } while (remains) { len = read(wsgi_req->sendfile_fd, buf, UMIN(remains, wsgi_req->sendfile_fd_chunk)); if (len != (int) UMIN(remains, wsgi_req->sendfile_fd_chunk)) { uwsgi_error("read()"); return -1; } wsgi_req->sendfile_fd_pos += len; len = uwsgi_proto_zeromq_write(wsgi_req, buf, len); remains = wsgi_req->sendfile_fd_size - wsgi_req->sendfile_fd_pos; } return wsgi_req->sendfile_fd_pos; }
ssize_t uwsgi_proto_zeromq_writev_header(struct wsgi_request *wsgi_req, struct iovec *iovec, size_t iov_len) { int i; ssize_t len; ssize_t ret = 0; for (i = 0; i < (int) iov_len; i++) { len = uwsgi_proto_zeromq_write(wsgi_req, iovec[i].iov_base, iovec[i].iov_len); if (len <= 0) { return len; } ret += len; } return ret; }
void uwsgi_proto_zeromq_close(struct wsgi_request *wsgi_req) { zmq_msg_t reply; // check for already freed wsgi_req->proto_parser_buf/wsgi_req->proto_parser_pos if (!wsgi_req->proto_parser_pos) return; zmq_msg_init_data(&reply, wsgi_req->proto_parser_buf, wsgi_req->proto_parser_pos, uwsgi_proto_zeromq_free, NULL); if (uwsgi.threads > 1) pthread_mutex_lock(&wsgi_req->socket->lock); #if ZMQ_VERSION >= ZMQ_MAKE_VERSION(3,0,0) if (zmq_sendmsg(wsgi_req->socket->pub, &reply, 0)) { #else if (zmq_send(wsgi_req->socket->pub, &reply, 0)) { #endif uwsgi_error("zmq_send()"); } if (uwsgi.threads > 1) pthread_mutex_unlock(&wsgi_req->socket->lock); zmq_msg_close(&reply); if (wsgi_req->async_post && wsgi_req->body_as_file) { fclose(wsgi_req->async_post); } } ssize_t uwsgi_proto_zeromq_writev_header(struct wsgi_request *wsgi_req, struct iovec *iovec, size_t iov_len) { int i; ssize_t len; ssize_t ret = 0; for (i = 0; i < (int) iov_len; i++) { len = uwsgi_proto_zeromq_write(wsgi_req, iovec[i].iov_base, iovec[i].iov_len); if (len <= 0) { wsgi_req->write_errors++; return 0; } ret += len; } return ret; }
int uwsgi_proto_zeromq_accept(struct wsgi_request *wsgi_req, int fd) { zmq_msg_t message; char *req_uuid = NULL; size_t req_uuid_len = 0; char *req_id = NULL; size_t req_id_len = 0; char *req_path = NULL; size_t req_path_len = 0; #ifdef UWSGI_JSON json_t *root; json_error_t error; #endif char *mongrel2_req = NULL; size_t mongrel2_req_size = 0; int resp_id_len; uint32_t events = 0; char *message_ptr; size_t message_size = 0; char *post_data; #ifdef ZMQ_EVENTS size_t events_len = sizeof(uint32_t); if (zmq_getsockopt(pthread_getspecific(wsgi_req->socket->key), ZMQ_EVENTS, &events, &events_len) < 0) { uwsgi_error("zmq_getsockopt()"); goto retry; } #endif if (events & ZMQ_POLLIN || (wsgi_req->socket->retry && wsgi_req->socket->retry[wsgi_req->async_id])) { wsgi_req->do_not_add_to_async_queue = 1; wsgi_req->proto_parser_status = 0; zmq_msg_init(&message); #if ZMQ_VERSION >= ZMQ_MAKE_VERSION(3,0,0) if (zmq_recvmsg(pthread_getspecific(wsgi_req->socket->key), &message, wsgi_req->socket->recv_flag) < 0) { #else if (zmq_recv(pthread_getspecific(wsgi_req->socket->key), &message, wsgi_req->socket->recv_flag) < 0) { #endif if (errno == EAGAIN) { zmq_msg_close(&message); goto repoll; } uwsgi_error("zmq_recv()"); zmq_msg_close(&message); goto retry; } message_size = zmq_msg_size(&message); //uwsgi_log("%.*s\n", (int) wsgi_req->proto_parser_pos, zmq_msg_data(&message)); if (message_size > 0xffff) { uwsgi_log("too much big message %d\n", message_size); zmq_msg_close(&message); goto retry; } message_ptr = zmq_msg_data(&message); // warning mongrel2_req_size will contains a bad value, but this is not a problem... post_data = uwsgi_split4(message_ptr, message_size, ' ', &req_uuid, &req_uuid_len, &req_id, &req_id_len, &req_path, &req_path_len, &mongrel2_req, &mongrel2_req_size); if (post_data == NULL) { uwsgi_log("cannot parse message (split4 phase)\n"); zmq_msg_close(&message); goto retry; } // fix post_data, mongrel2_req and mongrel2_req_size post_data = uwsgi_netstring(mongrel2_req, message_size - (mongrel2_req - message_ptr), &mongrel2_req, &mongrel2_req_size); if (post_data == NULL) { uwsgi_log("cannot parse message (body netstring phase)\n"); zmq_msg_close(&message); goto retry; } // ok ready to parse tnetstring/json data and build uwsgi request if (mongrel2_req[mongrel2_req_size] == '}') { if (uwsgi_mongrel2_tnetstring_parse(wsgi_req, mongrel2_req, mongrel2_req_size)) { zmq_msg_close(&message); goto retry; } } else { #ifdef UWSGI_JSON #ifdef UWSGI_DEBUG uwsgi_log("JSON %d: %.*s\n", mongrel2_req_size, mongrel2_req_size, mongrel2_req); #endif // add a zero to the end of buf mongrel2_req[mongrel2_req_size] = 0; root = json_loads(mongrel2_req, 0, &error); if (!root) { uwsgi_log("error parsing JSON data: line %d %s\n", error.line, error.text); zmq_msg_close(&message); goto retry; } if (uwsgi_mongrel2_json_parse(root, wsgi_req)) { json_decref(root); zmq_msg_close(&message); goto retry; } json_decref(root); #else uwsgi_log("JSON support not enabled (recompile uWSGI with libjansson support, or re-configure mongrel2 with \"protocol='tnetstring'\". skip request\n"); #endif } // pre-build the mongrel2 response_header wsgi_req->proto_parser_buf = uwsgi_malloc(req_uuid_len + 1 + 11 + 1 + req_id_len + 1 + 1); memcpy(wsgi_req->proto_parser_buf, req_uuid, req_uuid_len); ((char *) wsgi_req->proto_parser_buf)[req_uuid_len] = ' '; resp_id_len = uwsgi_num2str2(req_id_len, wsgi_req->proto_parser_buf + req_uuid_len + 1); ((char *) wsgi_req->proto_parser_buf)[req_uuid_len + 1 + resp_id_len] = ':'; memcpy((char *) wsgi_req->proto_parser_buf + req_uuid_len + 1 + resp_id_len + 1, req_id, req_id_len); memcpy((char *) wsgi_req->proto_parser_buf + req_uuid_len + 1 + resp_id_len + 1 + req_id_len, ", ", 2); wsgi_req->proto_parser_pos = (uint64_t) req_uuid_len + 1 + resp_id_len + 1 + req_id_len + 1 + 1; // handle post data (in memory) if (wsgi_req->post_cl > 0 && !wsgi_req->post_file) { if (uwsgi_netstring(post_data, message_size - (post_data - message_ptr), &message_ptr, &wsgi_req->post_cl)) { #ifdef UWSGI_DEBUG uwsgi_log("post_size: %d\n", wsgi_req->post_cl); #endif wsgi_req->post_read_buf = uwsgi_malloc(wsgi_req->post_cl); memcpy(wsgi_req->post_read_buf, message_ptr, wsgi_req->post_cl); } } zmq_msg_close(&message); // retry by default wsgi_req->socket->retry[wsgi_req->async_id] = 1; return 0; } repoll: // force polling of the socket wsgi_req->socket->retry[wsgi_req->async_id] = 0; return -1; retry: // retry til EAGAIN; wsgi_req->do_not_log = 1; wsgi_req->socket->retry[wsgi_req->async_id] = 1; return -1; } void uwsgi_proto_zeromq_close(struct wsgi_request *wsgi_req) { zmq_msg_t reply; // check for already freed wsgi_req->proto_parser_buf/wsgi_req->proto_parser_pos if (!wsgi_req->proto_parser_pos) return; // no need to pass a free function (the buffer will be freed during cloe_request) zmq_msg_init_data(&reply, wsgi_req->proto_parser_buf, wsgi_req->proto_parser_pos, NULL, NULL); if (uwsgi.threads > 1) pthread_mutex_lock(&wsgi_req->socket->lock); #if ZMQ_VERSION >= ZMQ_MAKE_VERSION(3,0,0) if (zmq_sendmsg(wsgi_req->socket->pub, &reply, 0)) { uwsgi_error("uwsgi_proto_zeromq_close()/zmq_sendmsg()"); #else if (zmq_send(wsgi_req->socket->pub, &reply, 0)) { uwsgi_error("uwsgi_proto_zeromq_close()/zmq_send()"); #endif } if (uwsgi.threads > 1) pthread_mutex_unlock(&wsgi_req->socket->lock); zmq_msg_close(&reply); } int uwsgi_proto_zeromq_write(struct wsgi_request *wsgi_req, char *buf, size_t len) { zmq_msg_t reply; if (zmq_msg_init_size(&reply, wsgi_req->proto_parser_pos + len)) { uwsgi_error("uwsgi_proto_zeromq_write()/zmq_msg_init_size()"); return -1; } char *zmq_body = zmq_msg_data(&reply); memcpy(zmq_body, wsgi_req->proto_parser_buf, wsgi_req->proto_parser_pos); memcpy(zmq_body + wsgi_req->proto_parser_pos, buf, len); if (uwsgi.threads > 1) pthread_mutex_lock(&wsgi_req->socket->lock); #if ZMQ_VERSION >= ZMQ_MAKE_VERSION(3,0,0) if (zmq_sendmsg(wsgi_req->socket->pub, &reply, 0)) { #else if (zmq_send(wsgi_req->socket->pub, &reply, 0)) { #endif if (uwsgi.threads > 1) pthread_mutex_unlock(&wsgi_req->socket->lock); zmq_msg_close(&reply); return -1; } if (uwsgi.threads > 1) pthread_mutex_unlock(&wsgi_req->socket->lock); zmq_msg_close(&reply); return UWSGI_OK; } /* we have a problem... recent Mongrel2 releases introduced a ring buffer that limit the amount of messages we can send (or better, the amount of messages mongrel2 is able to manage). If we send a big static file we can fill that buffer immediately. How to deal with this ? We know that the message ring can contains a fixed amount of messages. We could try to split the file in chunks (upto the maximum number supported by a specific mongrel2 instance). This is suboptimal, but there are no better solutions for now. Before you ask: do you really think that sending a single message with a 2GB file is a good approach ????? By the way, for now, waiting for a better approach, we use a 2MB buffer. Should support flawlessly files up to 32MB without being rejected by mongrel2. For bigger files you can tune it to higher values (or increase the mongrel2 ring buffer) */ #define UWSGI_MONGREL2_MAX_MSGSIZE 2*1024*1024 int uwsgi_proto_zeromq_sendfile(struct wsgi_request *wsgi_req, int fd, size_t pos, size_t len) { size_t chunk_size = UMIN( len - wsgi_req->write_pos, UWSGI_MONGREL2_MAX_MSGSIZE); char *tmp_buf = uwsgi_malloc(chunk_size); ssize_t rlen = read(fd, tmp_buf, chunk_size); if (rlen <= 0) { free(tmp_buf); return -1; } wsgi_req->write_pos += rlen; if (uwsgi_proto_zeromq_write(wsgi_req, tmp_buf, rlen) < 0) { free(tmp_buf); return -1; } free(tmp_buf); if (wsgi_req->write_pos == len) { return UWSGI_OK; } return UWSGI_AGAIN; }
ssize_t uwsgi_proto_zeromq_write(struct wsgi_request * wsgi_req, char *buf, size_t len) { zmq_msg_t reply; char *zmq_body; if (len == 0) return 0; zmq_body = uwsgi_concat2n(wsgi_req->proto_parser_buf, (int) wsgi_req->proto_parser_pos, buf, (int) len); //uwsgi_log("|%.*s|\n", (int)wsgi_req->proto_parser_pos+len, zmq_body); zmq_msg_init_data(&reply, zmq_body, wsgi_req->proto_parser_pos + len, uwsgi_proto_zeromq_free, NULL); if (uwsgi.threads > 1) pthread_mutex_lock(&wsgi_req->socket->lock); #if ZMQ_VERSION >= ZMQ_MAKE_VERSION(3,0,0) if (zmq_sendmsg(wsgi_req->socket->pub, &reply, 0)) { #else if (zmq_send(wsgi_req->socket->pub, &reply, 0)) { #endif if (!uwsgi.ignore_write_errors) { uwsgi_error("zmq_send()"); } wsgi_req->write_errors++; if (uwsgi.threads > 1) pthread_mutex_unlock(&wsgi_req->socket->lock); zmq_msg_close(&reply); return 0; } if (uwsgi.threads > 1) pthread_mutex_unlock(&wsgi_req->socket->lock); zmq_msg_close(&reply); return len; } ssize_t uwsgi_proto_zeromq_write_header(struct wsgi_request * wsgi_req, char *buf, size_t len) { return uwsgi_proto_zeromq_write(wsgi_req, buf, len); } ssize_t uwsgi_proto_zeromq_sendfile(struct wsgi_request * wsgi_req) { ssize_t len; char buf[65536]; size_t remains = wsgi_req->sendfile_fd_size - wsgi_req->sendfile_fd_pos; wsgi_req->sendfile_fd_chunk = 65536; if (uwsgi.async > 1) { len = read(wsgi_req->sendfile_fd, buf, UMIN(remains, wsgi_req->sendfile_fd_chunk)); if (len != (int) UMIN(remains, wsgi_req->sendfile_fd_chunk)) { uwsgi_error("read()"); return -1; } wsgi_req->sendfile_fd_pos += len; return uwsgi_proto_zeromq_write(wsgi_req, buf, len); } while (remains) { len = read(wsgi_req->sendfile_fd, buf, UMIN(remains, wsgi_req->sendfile_fd_chunk)); if (len != (int) UMIN(remains, wsgi_req->sendfile_fd_chunk)) { uwsgi_error("read()"); return -1; } wsgi_req->sendfile_fd_pos += len; len = uwsgi_proto_zeromq_write(wsgi_req, buf, len); remains = wsgi_req->sendfile_fd_size - wsgi_req->sendfile_fd_pos; } return wsgi_req->sendfile_fd_pos; }
ssize_t uwsgi_proto_zeromq_write_header(struct wsgi_request * wsgi_req, char *buf, size_t len) { return uwsgi_proto_zeromq_write(wsgi_req, buf, len); }