void HttpRequest::readBody() { LogSpam << "HttpRequest::readBody()"; std::string const *cl = header("Content-Length"); if (!cl) { throw std::runtime_error("Can't read body without content-length"); } size_t len = boost::lexical_cast<size_t>(*cl); bodyData_.resize(len); bodyRead_ = 0; if (headerSize_ < headerData_.size() && len > 0) { size_t toCopy = headerData_.size() - headerSize_; if (toCopy > len) { toCopy = len; } if (debugHttp.enabled()) { LogNotice << "http toCopy" << toCopy; } std::copy(&headerData_[headerSize_], &headerData_[headerSize_] + toCopy, &bodyData_[0]); bodyRead_ = toCopy; } if (debugHttp.enabled()) { LogNotice << "http readBody" << len << "bytes"; } try { size_t toRead = bodyData_.size() - bodyRead_; if (toRead == 0) { if (debugHttp.enabled()) { LogNotice << "http toRead complete"; } on_body(boost::system::errc::make_error_code(boost::system::errc::success), toRead); return; } if (!socket_.is_open()) { throw std::runtime_error("Socket is closed inside readBody()"); } if (debugHttp.enabled()) { LogNotice << "http queue read" << toRead; } boost::asio::async_read(socket_, boost::asio::buffer(&bodyData_[bodyRead_], toRead), boost::asio::transfer_at_least(toRead), boost::bind(&HttpRequest::on_body, HttpRequestHolder(shared_from_this()), placeholders::error, placeholders::bytes_transferred)); } catch (std::exception const &x) { LogWarning << "exception calling async_read() in readBody():" << x.what(); throw x; } }
void auditFunc(boost::system::system_error const &err) { if (debugAudit.enabled()) { LogDebug << "AuditTimer::auditFunc() thread" << boost::this_thread::get_id(); } else { LogSpam << "auditFunc() " << getpid(); } if (mm_) { int64_t nMap = 0, nUnmap = 0, nOpen = 0, nClose = 0; mm_->counters(&nMap, &nUnmap, &nOpen, &nClose); mmOpens.value(nOpen); mmCloses.value(nClose); mmMaps.value(nMap); mmUnmaps.value(nUnmap); } for (size_t i = 0, n = retentionCounters.size(); i != n; ++i) { RetentionCounters *rc = retentionCounters[i]; int64_t v = rc->ri_->stats.nHits.stat_; istat::atomic_add(&rc->ri_->stats.nHits.stat_, -v); rc->pHits_.value(v); v = rc->ri_->stats.nMisses.stat_; istat::atomic_add(&rc->ri_->stats.nMisses.stat_, -v); rc->pMisses_.value(v); } scheduleNext(); }
void cancel() { if (debugAudit.enabled()) { LogDebug << "AuditTimer::cancel() thread" << boost::this_thread::get_id(); } timer_.cancel(); }
void ReplicaOf::on_connection() { if (!debugReplicaOf.enabled()) { LogDebug << "ReplicaOf::on_connection() from" << conn_->endpointName(); } char buf[1024]; size_t sz = 0; UniqueId uid; ss_->getUniqueId(uid); if (debugReplicaOf.enabled()) { LogNotice << "replica connection from" << conn_->endpointName() << "my uid" << uid.str(); } PduConnect::make(buf, 1024, sz, ReplicateProtocolVersion1, uid); emit(buf, sz); }
~AuditTimer() { if (debugAudit.enabled()) { LogNotice << "~AuditTimer() thread" << boost::this_thread::get_id(); } timer_.cancel(); }
void HttpRequest::doReply(int code, std::string const &ctype, std::string const &xheaders) { if (debugHttp.enabled()) { LogNotice << "http reply" << code << ctype; } else { LogDebug << "HttpRequest::doReply()"; } if (code >= 400) { ++hs_->sInfo_.httpErrors; } std::string headers; headers += "HTTP/1.1 "; headers += boost::lexical_cast<std::string>(code); headers += " (that's a status code)"; headers += "\r\n"; headers += "Content-Type: "; headers += ctype; headers += "\r\n"; headers += "Content-Length: "; headers += boost::lexical_cast<std::string>(reply_.size()); headers += "\r\n"; headers += "Connection: close"; headers += "\r\n"; headers += xheaders; if (debugHttp.enabled() && xheaders.size()) { LogNotice << "http xheaders" << xheaders; } headers += "\r\n"; reply_.insert(reply_.begin(), headers.begin(), headers.end()); boost::asio::async_write(socket_, boost::asio::buffer(&reply_[0], reply_.size()), boost::asio::transfer_all(), boost::bind(&HttpRequest::on_reply, HttpRequestHolder(shared_from_this()), placeholders::error, placeholders::bytes_transferred)); onHeader_.disconnect_all_slots(); onBody_.disconnect_all_slots(); }
static void onHttpRequest(StatServer *ss, HttpRequestHolder const &req) { LogSpam << "onHttpRequest()"; if (debugHttp.enabled()) { LogDebug << "http onHttpRequest binding handlers"; } req->onHeader_.connect(boost::bind(printRequest, req, ss)); req->onError_.connect(boost::bind(printRequest, req, ss)); }
static void printRequest(HttpRequestHolder const &req, StatServer *ss) { if (debugHttp.enabled()) { LogNotice << "http serving" << req->url(); } LogDebug << "Serving:" << req->method() << req->url(); boost::shared_ptr<RequestInFlight> rif(new RequestInFlight(req.p_, ss, filesDir.get())); ss->service().post(boost::bind(&RequestInFlight::go, rif)); }
void ReplicaOf::on_pdu() { if (debugReplicaOf.enabled()) { LogNotice << "replica pdu from" << conn_->endpointName(); } else { LogDebug << "ReplicaOf::on_pdu() from" << conn_->endpointName(); } }
void HttpRequest::parseMethod(std::string const &data) { std::string temp; split(data, ' ', method_, temp); trim(temp); split(temp, ' ', url_, version_); if (debugHttp.enabled()) { LogNotice << "http method" << method_ << "url" << url_ << "version" << version_; } }
void timerFunc(boost::system::system_error const &err) { if (debugAudit.enabled()) { LogDebug << "LogRolloverTimer::threadFunc() thread" << boost::this_thread::get_id(); } else { LogSpam << "threadFunc() calling rollOver()" << getpid(); } istat::LogConfig::rollOver(); scheduleNext(); }
AuditTimer(boost::asio::io_service &svc, Mmap *mm = 0, StatServer *ss = 0) : timer_(svc), svc_(svc), mm_(mm), ss_(ss) { if (debugAudit.enabled()) { LogDebug << "AuditTimer() thread" << boost::this_thread::get_id(); } srand((int)((size_t)this & 0xffff)); scheduleNext(); }
void KeyMatch::extract(std::string const &pat, std::list<std::pair<std::string, CounterResponse> > &oList) { for (HashMap::iterator ptr(ctrs.begin()), end(ctrs.end()); ptr != end; ++ptr) { if (istat::str_pat_match((*ptr).first, pat)) { if (allKeys.enabled()) { LogDebug << "allKeys match " << (*ptr).first << "as" << ((*ptr).second.isLeaf ? "leaf" : "branch"); } oList.push_back(*ptr); } else { if (allKeys.enabled()) { LogDebug << "allKeys no match " << (*ptr).first; } } } }
void HttpRequest::on_reply(boost::system::error_code const &err, size_t xfer) { if (debugHttp.enabled()) { LogNotice << "http on_reply() complete" << err; } LogDebug << "HttpRequest::on_reply()"; if (!!err) { ++hs_->sInfo_.numErrors; } assert(hs_->sInfo_.current > 0); --hs_->sInfo_.current; hs_->sInfo_.currentGauge.value((int32_t)hs_->sInfo_.current); // this should soon go away, as the stack reference will go away! socket_.close(); onError_.disconnect_all_slots(); }
bool StatCounter::shiftCollated(time_t t) { if(collations_[0].time == 0) { // Nothing to flush yet! return false; } else { if (debugRecord.enabled()) { LogDebug << "shiftCollated (time " << collations_[0].time << " shifted out)"; } // Write collated buckets to the statfile. for(size_t i = 0; i != BUCKETS_PER_COLLATION_WINDOW; ++i) { istat::Bucket &bucket(collations_[i].bucket); counters_.begin()->file->updateBucket(bucket); } // Aggregate oldest bucket into coarser statfiles. for(std::vector<OneCounter>::iterator ptr(counters_.begin()), end(counters_.end()); ptr != end; ++ptr) { if(ptr != counters_.begin()) { ptr->file->updateBucket(collations_[0].bucket); } } // Shift collations by one interval. for(size_t i = 0; i != BUCKETS_PER_COLLATION_WINDOW - 1; ++i) { collations_[i] = collations_[i + 1]; } // Start a fresh collation entry on top. CollationInfo &top(collations_[BUCKETS_PER_COLLATION_WINDOW - 1]); CollationInfo &oldTop(collations_[BUCKETS_PER_COLLATION_WINDOW - 2]); top = CollationInfo(oldTop.time + collationInterval_); return true; } }
void RealSettings::save(boost::shared_ptr<ISettings> const &me) { (debugSettings ? LogNotice : LogDebug) << "RealSettings::save(" << name_ << ")"; grab aholdof(lock_); if (dirty_) { // Only re-write if dirty. This allows a settings file to be edited on disk and re-loaded // with a flush on the admin interface. std::string tmpName; std::string fileName; fileName = fac_->path_ + "/" + name_ + ".set"; tmpName = fileName + ".tmp"; { std::ofstream ostr(tmpName.c_str(), std::ios::binary | std::ios::out | std::ios::trunc); ostr << "# istatd settings 1" << std::endl; for (std::map<std::string, std::string>::iterator ptr(settings_.begin()), end(settings_.end()); ptr != end; ++ptr) { if ((*ptr).second.size()) { // empty string values are not saved -- same as "delete" ostr << (*ptr).first << "=" << istat::sql_quote((*ptr).second) << std::endl; } } } // Now, move the new file in place of the old. // First, remove the old file, to generate an error if we don't have permission. if (boost::filesystem::exists(fileName)) { boost::filesystem::remove(fileName); } boost::filesystem::rename(tmpName, fileName); } else { if (debugSettings.enabled()) { LogNotice << "settings not dirty"; } } saveQueued_ = false; dirty_ = false; ++countSaves; }
void AllKeys::add(std::string const &str, bool isCollated) { dirty_ = true; if (allKeys.enabled()) { LogSpam << "AllKeys::add(" << str << ")"; } Rec *r = new Rec(str, isCollated); // Spin, in case we race with some other winner. // Note that it will never livelock, because someone // will write to the head_ each cycle and thus make // progress. while (true) { r->next = head_; if (istat::atomic_compare_exchange((void * volatile *)&head_, (void *)r->next, (void *)r)) { return; } } }
void AdminConnection::doCmd(std::string const &cmd) { (debugAdmin.enabled() ? LogNotice : LogSpam) << "AdminConnection::doCmd(" << istat::sql_quote(cmd) << ")"; ++as_->numAdminCommands_; std::string left, right; switch (istat::split(cmd, ' ', left, right)) { case 0: huh(); break; case 1: case 2: { std::vector<std::string> args; istat::trim(left); istat::trim(right); istat::explode(right, ',', args); cmdArgs(left, args); } break; } }
void HttpServer::handleAccept(boost::system::error_code const &e, HttpRequestHolder const &req) { LogDebug << "HttpService::handleAccept()"; ++sInfo_.numRequests; if (!e) { if (debugHttp.enabled()) { LogNotice << "http request"; } ++sInfo_.current; sInfo_.currentGauge.value((int32_t)sInfo_.current); onRequest_(req); req->readHeaders(); acceptOne(); return; } ++sInfo_.numErrors; LogWarning << "Error accepting a HTTP request: " << e; timer_.expires_from_now(boost::posix_time::seconds(1)); timer_.async_wait(boost::bind(&HttpServer::acceptOne, this)); }
void HttpRequest::on_body(boost::system::error_code const &err, size_t xfer) { LogSpam << "HttpRequest::on_body()"; if (!!err) { LogWarning << "HttpRequest::on_body(): " << err; if ((xfer > 0) && (err.category() != boost::asio::error::get_misc_category() || err.value() != boost::asio::error::eof)) { error(); return; } } if (debugHttp.enabled()) { LogNotice << "http on_body" << xfer << "bytes"; } // got the body! onBody_(); onBody_.disconnect_all_slots(); onError_.disconnect_all_slots(); }
void StatCounter::record(time_t time, double value, double valueSq, double min, double max, size_t cnt) { if(debugRecord.enabled()) { LogDebug << "record" << value << valueSq << min << max << cnt << time; } else { LogSpam << "StatCounter::record thread_id " << boost::this_thread::get_id(); } ++dequeueRecords_; --queueLenRecords_; time_t nowTime; if(isCollated_) { if(time > istat::istattime(&nowTime) + collationInterval_) { ++recordsFromTheFuture_; if (debugRejectedCounters) { LogWarning << "StatCounter::record rejected counter from the future: " << time << " > " << nowTime << ": " << counters_[0].file->header().name; } return; } if(time < collations_[0].time) { ++recordsFromThePast_; if (debugRejectedCounters) { LogWarning << "StatCounter::record rejected counter from the past: " << time << " < " << collations_[0].time << " < " << collations_[1].time << " < " << collations_[2].time << ": " << counters_[0].file->header().name; } return; } } else { if(time > istat::istattime(&nowTime) + time_t(60)) { if (debugRejectedCounters) { LogWarning << "StatCounter::record rejected counter from the future: " << time << " > " << nowTime << ": " << counters_[0].file->header().name; } ++recordsFromTheFuture_; return; } } if(isCollated_) { time -= time % collationInterval_; size_t i = findCollationIndex(time); // No matching bucket candidate in time range. Shift ahead! if(i == BUCKETS_PER_COLLATION_WINDOW) { if(collations_[0].time == 0) { for(size_t i = 0; i != BUCKETS_PER_COLLATION_WINDOW; ++i) { collations_[(BUCKETS_PER_COLLATION_WINDOW - 1) - i] = CollationInfo(time - (i * collationInterval_)); } } else { maybeShiftCollated(time); } i = findCollationIndex(time); } CollationInfo &collation(collations_[i]); istat::Bucket &bucket(collation.bucket); bucket.collatedUpdate(value / double(collationInterval_), time); ++collation.writes; // Only update the statfile on power-of-two updates. if((collation.writes & (collation.writes - 1)) == 0) { counters_.begin()->file->updateBucket(bucket); } } else { // Don't record zero-sample buckets. if(cnt == 0) { ++recordsRejected_; if (debugRejectedCounters) { LogWarning << "StatCounter::record rejected counter with 0 count: " << counters_[0].file->header().name; } return; } double avg = value / double(cnt); // Ensure the value ranges are sensible, or reject them. // We allow a small amount of epsilon (0.01%) here before rejecting counters due to double vs. float // conversion in Buckets transferred from istatd agents to the master if (min > (max + fabs(max) * 0.0001) || (avg + fabs(avg) * 0.0001) < min || avg > (max + fabs(max) * 0.0001)) { if (debugRejectedCounters) { LogWarning << "StatCounter::record rejected counter with bad min/avg/max: " << counters_[0].file->header().name << min << avg << "(" << value << "/" << cnt << ")" << max; } ++recordsRejected_; return; } istat::Bucket b(value, float(valueSq), float(min), float(max), int(cnt), time); for(std::vector<OneCounter>::iterator ptr(counters_.begin()), end(counters_.end()); ptr != end; ++ptr) { ptr->file->updateBucket(b); } } }