void ProxygenTransport::onSendEndImpl() { if (!m_sendEnded) { VLOG(4) << "onSendEndImpl called"; m_server->putResponseMessage( ResponseMessage(shared_from_this(), ResponseMessage::Type::EOM)); m_sendEnded = true; } }
void ProxygenTransport::pushResourceBody(int64_t id, const void *data, int size, bool eom) { if (id == 0 || (size <= 0 && !eom)) { return; } m_server->putResponseMessage( ResponseMessage(shared_from_this(), ResponseMessage::Type::BODY, id, false, data, size, eom)); }
int64_t ProxygenTransport::pushResource(const char *host, const char *path, uint8_t priority, const Array &headers, const void *data, int size, bool eom) { if (!supportsServerPush()) { return 0; } int64_t pushId = m_nextPushId++; PushTxnHandler *handler = new PushTxnHandler(pushId, shared_from_this()); HTTPMessage& pushMsg = handler->getPushMessage(); pushMsg.setURL(path); pushMsg.setIsChunked(true); // implicitly chunked pushMsg.setSecure(true); // should we allow setting scheme? pushMsg.setPriority(priority); for (ArrayIter iter(headers); iter; ++iter) { Variant key = iter.first(); String header = iter.second(); if (key.isString() && !key.toString().empty()) { pushMsg.getHeaders().add(key.toString().data(), header.data()); } else { int pos = header.find(": "); if (pos >= 0) { std::string name = header.substr(0, pos).data(); std::string value = header.substr(pos + 2).data(); pushMsg.getHeaders().add(name, value); } else { Logger::Error("throwing away bad header: %s", header.data()); } } } pushMsg.getHeaders().set(HTTP_HEADER_HOST, host); { Lock lock(this); m_pushHandlers[pushId] = handler; } m_server->putResponseMessage( ResponseMessage(shared_from_this(), ResponseMessage::Type::HEADERS, pushId, false, data, size, eom)); return pushId; }
const void *ProxygenTransport::getMorePostData(int &size) { if (bufferRequest()) { CHECK(m_clientComplete); size = 0; return nullptr; } // proxygen will send onTimeout if we don't receive data in this much time long maxWait = RuntimeOption::ConnectionTimeoutSeconds; if (maxWait <= 0) { maxWait = 50; // this was the default read timeout in LibEventServer } Lock lock(this); while (m_bodyData.empty() && !m_clientComplete) { VLOG(4) << "waiting for POST data for maxWait=" << maxWait; wait(maxWait); } uint32_t oldLength = m_bodyData.chainLength(); // For chunk encodings, we way receive an EOM with no data, such that // hasMorePostData returns true (because client is not yet complete), // client sends EOM, getMorePostData should return 0/nullptr size = 0; const void *data = nullptr; while (!m_bodyData.empty()) { // this is the first body if it wasn't set and buf is unset m_firstBody = !(m_firstBody && m_currentBodyBuf); m_currentBodyBuf = m_bodyData.pop_front(); CHECK(m_currentBodyBuf && m_currentBodyBuf->length() > 0); size = m_currentBodyBuf->length(); data = m_currentBodyBuf->data(); break; } if (oldLength >= RuntimeOption::RequestBodyReadLimit && m_bodyData.chainLength() < RuntimeOption::RequestBodyReadLimit) { VLOG(4) << "resuming ingress"; m_server->putResponseMessage(ResponseMessage( shared_from_this(), ResponseMessage::Type::RESUME_INGRESS)); } VLOG(4) << "returning POST body chunk size=" << size; return data; }
void ProxygenTransport::sendImpl(const void *data, int size, int code, bool chunked, bool eom) { assert(data); assert(!m_sendStarted || chunked); if (m_sendEnded) { // This should never happen, but when it does we have to bail out, // since there's no sensible way to send data at this point and // trying to do so will horribly corrupt memory. // TODO #2821803: Figure out whether this is caused by another bug // somewhere. return; } VLOG(4) << "sendImpl called with data size=" << size << ", code=" << code << ", chunked=" << chunked << ", eom=" << eom; eom |= !chunked; if (!m_sendStarted) { if (!chunked) { if (!m_response.getHeaders().exists(HTTP_HEADER_CONTENT_LENGTH)) { m_response.getHeaders().add(HTTP_HEADER_CONTENT_LENGTH, folly::to<std::string>(size)); } } else { // Explicitly add Transfer-Encoding: chunked here. libproxygen will only // add it for keep-alive connections m_response.getHeaders().set(HTTP_HEADER_TRANSFER_ENCODING, "chunked"); } m_response.setStatusCode(code); auto const& reasonStr = getResponseInfo(); const char* reason = reasonStr.empty() ? HTTPMessage::getDefaultReason(code) : reasonStr.c_str(); m_response.setStatusMessage(reason); m_response.setHTTPVersion(1, 1); m_response.setIsChunked(chunked); m_response.dumpMessage(4); m_server->putResponseMessage( ResponseMessage(shared_from_this(), ResponseMessage::Type::HEADERS, 0, chunked, data, size, eom)); m_sendStarted = true; } else { m_server->putResponseMessage( ResponseMessage(shared_from_this(), ResponseMessage::Type::BODY, 0, chunked, data, size, eom)); } if (eom) { m_sendEnded = true; } if (chunked) { assert(m_method != Method::HEAD); /* * Chunked replies are sent async, so there is no way to know the * time it took to flush the response, but tracking the bytes sent is * very useful. */ onChunkedProgress(size); } }
void ProxygenTransport::finish(shared_ptr<ProxygenTransport> &&transport) { m_server->putResponseMessage(ResponseMessage(std::move(transport))); }
void DumpThread::writeToConnetion(const String &message) { EventLoop::mainEventLoop()->callLaterMove(std::bind((bool(Connection::*)(Message&&))&Connection::send, mConnection, std::placeholders::_1), ResponseMessage(message)); }