static write_state on_write_chunk(struct ev_loop* mainloop, Request* request) { if (do_send_chunk(request)) // data left to send in the current chunk return not_yet_done; if(request->iterator) { /* Reached the end of a chunk in the response iterator. Get next chunk. */ PyObject* next_chunk = wsgi_iterable_get_next_chunk(request); if(next_chunk) { /* We found another chunk to send. */ if(request->state.chunked_response) { request->current_chunk = wrap_http_chunk_cruft_around(next_chunk); Py_DECREF(next_chunk); } else { request->current_chunk = next_chunk; } assert(request->current_chunk_p == 0); return not_yet_done; } else { if(PyErr_Occurred()) { /* Trying to get the next chunk raised an exception. */ PyErr_Print(); DBG_REQ(request, "Exception in iterator, can not recover"); return aborted; } else { /* This was the last chunk; cleanup. */ Py_CLEAR(request->iterator); goto send_terminator_chunk; } } } else { /* We have no iterator to get more chunks from, so we're done. * Reasons we might end up in this place: * A) A parse or server error occurred * C) We just finished a chunked response with the call to 'do_send_chunk' * above and now maybe have to send the terminating empty chunk. * B) We used chunked responses earlier in the response and * are now sending the terminating empty chunk. */ goto send_terminator_chunk; } assert(0); // unreachable send_terminator_chunk: if(request->state.chunked_response) { /* We have to send a terminating empty chunk + \r\n */ request->current_chunk = PyString_FromString("0\r\n\r\n"); assert(request->current_chunk_p == 0); // Next time we get here, don't send the terminating empty chunk again. // XXX This is kind of a hack and should be refactored for easier understanding. request->state.chunked_response = false; return not_yet_done; } else { return done; } }
/* XXX too many gotos */ static void ev_io_on_write(struct ev_loop* mainloop, ev_io* watcher, const int events) { Request* request = REQUEST_FROM_WATCHER(watcher); GIL_LOCK(0); if(request->state.use_sendfile) { /* sendfile */ if(request->current_chunk) { /* current_chunk contains the HTTP headers */ if(send_chunk(request)) goto out; assert(!request->current_chunk_p); /* abuse current_chunk_p to store the file fd */ request->current_chunk_p = PyObject_AsFileDescriptor(request->iterable); goto out; } if(do_sendfile(request)) goto out; } else { /* iterable */ if(send_chunk(request)) goto out; if(request->iterator) { PyObject* next_chunk; next_chunk = wsgi_iterable_get_next_chunk(request); if(next_chunk) { if(request->state.chunked_response) { request->current_chunk = wrap_http_chunk_cruft_around(next_chunk); Py_DECREF(next_chunk); } else { request->current_chunk = next_chunk; } assert(request->current_chunk_p == 0); goto out; } else { if(PyErr_Occurred()) { PyErr_Print(); /* We can't do anything graceful here because at least one * chunk is already sent... just close the connection */ DBG_REQ(request, "Exception in iterator, can not recover"); ev_io_stop(mainloop, &request->ev_watcher); close(request->client_fd); Request_free(request); goto out; } Py_CLEAR(request->iterator); } } if(request->state.chunked_response) { /* We have to send a terminating empty chunk + \r\n */ request->current_chunk = PyString_FromString("0\r\n\r\n"); assert(request->current_chunk_p == 0); request->state.chunked_response = false; goto out; } } ev_io_stop(mainloop, &request->ev_watcher); if(request->state.keep_alive) { DBG_REQ(request, "done, keep-alive"); Request_clean(request); Request_reset(request); ev_io_init(&request->ev_watcher, &ev_io_on_read, request->client_fd, EV_READ); ev_io_start(mainloop, &request->ev_watcher); } else { DBG_REQ(request, "done, close"); close(request->client_fd); Request_free(request); } out: GIL_UNLOCK(0); }
bool wsgi_call_application(Request* request) { StartResponse* start_response = PyObject_NEW(StartResponse, &StartResponse_Type); start_response->request = request; /* From now on, `headers` stores the _response_ headers * (passed by the WSGI app) rather than the _request_ headers */ PyObject* request_headers = request->headers; request->headers = NULL; /* application(environ, start_response) call */ PyObject* retval = PyObject_CallFunctionObjArgs( request->server_info->wsgi_app, request_headers, start_response, NULL /* sentinel */ ); Py_DECREF(request_headers); Py_DECREF(start_response); if(retval == NULL) return false; /* The following code is somewhat magic, so worth an explanation. * * If the application we called was a generator, we have to call .next() on * it before we do anything else because that may execute code that * invokes `start_response` (which might not have been invoked yet). * Think of the following scenario: * * def app(environ, start_response): * start_response('200 Ok', ...) * yield 'Hello World' * * That would make `app` return an iterator (more precisely, a generator). * Unfortunately, `start_response` wouldn't be called until the first item * of that iterator is requested; `start_response` however has to be called * _before_ the wsgi body is sent, because it passes the HTTP headers. * * If the application returned a list this would not be required of course, * but special-handling is painful - especially in C - so here's one generic * way to solve the problem: * * Look into the returned iterator in any case. This allows us to do other * optimizations, for example if the returned value is a list with exactly * one string in it, we can pick the string and throw away the list so bjoern * does not have to come back again and look into the iterator a second time. */ PyObject* first_chunk; if(PyList_Check(retval) && PyList_GET_SIZE(retval) == 1 && PyString_Check(PyList_GET_ITEM(retval, 0))) { /* Optimize the most common case, a single string in a list: */ PyObject* tmp = PyList_GET_ITEM(retval, 0); Py_INCREF(tmp); Py_DECREF(retval); retval = tmp; goto string; /* eeevil */ } else if(PyString_Check(retval)) { /* According to PEP 333 strings should be handled like any other iterable, * i.e. sending the response item for item. "item for item" means * "char for char" if you have a string. -- I'm not that stupid. */ string: if(PyString_GET_SIZE(retval)) { first_chunk = retval; } else { Py_DECREF(retval); first_chunk = NULL; } } else if(FileWrapper_CheckExact(retval)) { request->state.use_sendfile = true; request->iterable = ((FileWrapper*)retval)->file; Py_INCREF(request->iterable); Py_DECREF(retval); request->iterator = NULL; first_chunk = NULL; } else { /* Generic iterable (list of length != 1, generator, ...) */ request->iterable = retval; request->iterator = PyObject_GetIter(retval); if(request->iterator == NULL) return false; first_chunk = wsgi_iterable_get_next_chunk(request); if(first_chunk == NULL && PyErr_Occurred()) return false; } if(request->headers == NULL) { /* It is important that this check comes *after* the call to * wsgi_iterable_get_next_chunk(), because in case the WSGI application * was an iterator, there's no chance start_response could be called * before. See above if you don't understand what I say. */ PyErr_SetString( PyExc_RuntimeError, "wsgi application returned before start_response was called" ); Py_XDECREF(first_chunk); return false; } /* keep-alive cruft */ if(http_should_keep_alive(&request->parser.parser)) { if(request->state.response_length_unknown) { if(request->parser.parser.http_major > 0 && request->parser.parser.http_minor > 0) { /* On HTTP 1.1, we can use Transfer-Encoding: chunked. */ request->state.chunked_response = true; request->state.keep_alive = true; } else { /* On HTTP 1.0, we can only resort to closing the connection. */ request->state.keep_alive = false; } } else { /* We know the content-length. Can always keep-alive. */ request->state.keep_alive = true; } } else { /* Explicit "Connection: close" (HTTP 1.1) or missing "Connection: keep-alive" (HTTP 1.0) */ request->state.keep_alive = false; } /* Get the headers and concatenate the first body chunk. * In the first place this makes the code more simple because afterwards * we can throw away the first chunk PyObject; but it also is an optimization: * At least for small responses, the complete response could be sent with * one send() call (in server.c:ev_io_on_write) which is a (tiny) performance * booster because less kernel calls means less kernel call overhead. */ PyObject* buf = PyString_FromStringAndSize(NULL, 1024); Py_ssize_t length = wsgi_getheaders(request, buf); if(first_chunk == NULL) { _PyString_Resize(&buf, length); goto out; } if(request->state.chunked_response) { PyObject* new_chunk = wrap_http_chunk_cruft_around(first_chunk); Py_DECREF(first_chunk); assert(PyString_GET_SIZE(new_chunk) >= PyString_GET_SIZE(first_chunk) + 5); first_chunk = new_chunk; } assert(buf); _PyString_Resize(&buf, length + PyString_GET_SIZE(first_chunk)); memcpy(PyString_AS_STRING(buf)+length, PyString_AS_STRING(first_chunk), PyString_GET_SIZE(first_chunk)); Py_DECREF(first_chunk); out: request->state.wsgi_call_done = true; request->current_chunk = buf; request->current_chunk_p = 0; return true; }
static void io_write(Request* request) { //GIL_LOCK(0); if(request->state.use_sendfile) { dprint("发送文件给客户端"); /* sendfile */ if(request->current_chunk && send_chunk(request)) goto out; /* abuse current_chunk_p to store the file fd */ request->current_chunk_p = PyObject_AsFileDescriptor(request->iterable); if(do_sendfile(request)) goto out; } else { dprint("发送字符"); /* iterable */ if(send_chunk(request)){ dprint("一次发送即完成"); //uv_close((uv_handle_t*) &request->ev_watcher, _http_uv__on_close__cb); goto out; } if(request->iterator) { PyObject* next_chunk; dprint("request迭代"); next_chunk = wsgi_iterable_get_next_chunk(request); if(next_chunk) { dprint("下一块chunk发送"); if(request->state.chunked_response) { request->current_chunk = wrap_http_chunk_cruft_around(next_chunk); Py_DECREF(next_chunk); } else { request->current_chunk = next_chunk; } assert(request->current_chunk_p == 0); //io_write(request); goto out; } else { if(PyErr_Occurred()) { uv_err_t err; dprint("迭代出错"); PyErr_Print(); DBG_REQ(request, "Exception in iterator, can not recover"); uv_close((uv_handle_t*) request->ev_watcher, on_close); Request_free(request); err = uv_last_error(loop); UVERR(err, "uv_write error on next chunk"); ASSERT(0); goto out; } dprint("没有下一块chunk"); Py_CLEAR(request->iterator); } } if(request->state.chunked_response) { dprint("如果是chunked_response 发送收尾数据,并置空chunked_response"); /* We have to send a terminating empty chunk + \r\n */ request->current_chunk = PyString_FromString("0\r\n\r\n"); assert(request->current_chunk_p == 0); //io_write(request); request->state.chunked_response = false; goto out; } } dprint("响应完成"); if(request->state.keep_alive) { DBG_REQ(request, "done, keep-alive"); Request_clean(request); Request_reset(request); } else { dprint("done not keep alive"); uv_close((uv_handle_t*) request->ev_watcher, on_close); Request_free(request); } out: dprint("本次字符发送结束"); //GIL_UNLOCK(0); return; }