time_t genDate( char *date, long dateLen ) { time_t result = -1; // the date string should always be the same length if ( ! date || dateLen != 16 ) return result; struct tm tmRef; struct tm tmBuild; //* memset( (char *)&tmRef, 0, sizeof( tmRef ) ); time_t now = (time_t)getTimeGlobalNoCore(); localtime_r( &now, &tmRef ); now = mktime( &tmRef ); // */ char tmp[18]; char *p = tmp; memcpy( p, date, dateLen ); p[2] = '\0'; p[5] = '\0'; p[10] = '\0'; p[13] = '\0'; p[16] = '\0'; memset( (char *)&tmBuild, 0, sizeof( tmBuild ) ); tmBuild.tm_mon = atoi( p ) - 1; p += 3; tmBuild.tm_mday = atoi( p ); p += 3; tmBuild.tm_year = atoi( p ) - 1900; p += 5; tmBuild.tm_hour = atoi( p ); p += 3; tmBuild.tm_min = atoi( p ); p += 3; tmBuild.tm_isdst = tmRef.tm_isdst; p += 3; // We must manually adjust for DST difference // if the current state of DST does not match // that of the date that was requested. /* struct tm nowDST; struct tm resultDST; localtime_r( &now, &nowDST ); localtime_r( &result, &resultDST ); if ( nowDST.tm_isdst && !resultDST.tm_isdst ) tmBuild.tm_hour++; else if ( !nowDST.tm_isdst && resultDST.tm_isdst ) tmBuild.tm_hour--; memcpy( p, date, dateLen ); p[16] = '\0'; log ( LOG_DEBUG, "stats: user string [%s]", p ); log ( LOG_DEBUG, "stats: user provided time [%s]", ctime( &result ) ); log ( LOG_DEBUG, "stats: our timestamp [%s]", ctime( &now ) ); // */ result = mktime( &tmBuild ); return result; }
// hostId is the remote hostid sending us the lock request void removeExpiredLocks ( int32_t hostId ) { // when we last cleaned them out static time_t s_lastTime = 0; int32_t nowGlobal = getTimeGlobalNoCore(); // only do this once per second at the most if ( nowGlobal <= s_lastTime ) return; // shortcut HashTableX *ht = &g_spiderLoop.m_lockTable; restart: // scan the slots int32_t ns = ht->m_numSlots; // . clean out expired locks... // . if lock was there and m_expired is up, then nuke it! // . when Rdb.cpp receives the "fake" title rec it removes the // lock, only it just sets the m_expired to a few seconds in the // future to give the negative doledb key time to be absorbed. // that way we don't repeat the same url we just got done spidering. // . this happens when we launch our lock request on a url that we // or a twin is spidering or has just finished spidering, and // we get the lock, but we avoided the negative doledb key. for ( int32_t i = 0 ; i < ns ; i++ ) { // breathe QUICKPOLL(MAX_NICENESS); // skip if empty if ( ! ht->m_flags[i] ) continue; // cast lock UrlLock *lock = (UrlLock *)ht->getValueFromSlot(i); int64_t lockKey = *(int64_t *)ht->getKeyFromSlot(i); // if collnum got deleted or reset collnum_t collnum = lock->m_collnum; if ( collnum >= g_collectiondb.m_numRecs || ! g_collectiondb.m_recs[collnum] ) { log("spider: removing lock from missing collnum " "%" PRId32,(int32_t)collnum); goto nuke; } // skip if not yet expired if ( lock->m_expires == 0 ) continue; if ( lock->m_expires >= nowGlobal ) continue; // note it for now if ( g_conf.m_logDebugSpider ) log("spider: removing lock after waiting. elapsed=%" PRId32"." " lockKey=%" PRIu64" hid=%" PRId32" expires=%" PRIu32" " "nowGlobal=%" PRIu32, (nowGlobal - lock->m_timestamp), lockKey,hostId, (uint32_t)lock->m_expires, (uint32_t)nowGlobal); nuke: // nuke the slot and possibly re-chain ht->removeSlot ( i ); // gotta restart from the top since table may have shrunk goto restart; } // store it s_lastTime = nowGlobal; }
// . returns false if blocked, otherwise true // . sets g_errno on error bool sendPageStatsdb ( TcpSocket *s, HttpRequest *r ) { char *cgi; long cgiLen; StateStatsdb *st; try { st = new StateStatsdb; } catch ( ... ) { g_errno = ENOMEM; log(LOG_INFO, "PageStatsdb: failed to allocate state memory."); return true; } mnew( st, sizeof(StateStatsdb), "PageStatsdb" ); st->m_niceness = MAX_NICENESS; st->m_socket = s; st->m_request = *r; // hostId must be one of the following: // 0-n - a valid hostId // -1 - a sample (subset) of the hosts // -2 - all hosts // -3 - this host st->m_hostId = r->getLong( "host", -3 ); if ( st->m_hostId == -3 ) st->m_hostId = g_hostdb.getMyHostId(); // If we are pulling from multiple hosts, are we merging // the data into a single graph? // TODO: // - Make sure this always happens. Now our only concern // is how many stats we will be drawing. //st->m_mergeResults = (bool )r->getLong( "merge_results" , 1 ); // get session parameters st->m_cacti = (bool )r->getLong( "cacti" , 0 ); // get date parameters cgi = r->getString( "sdate" , &cgiLen , NULL ); st->m_startDate = genDate( cgi, cgiLen ); cgi = r->getString( "edate" , &cgiLen , NULL ); st->m_endDate = genDate( cgi, cgiLen ); st->m_dateCustom = (bool)r->getLong( "custom", 0 ); // default to 10 hours, i would do 1 day except that there are // some bugs that mess up the display a lot when i do that st->m_datePeriod = r->getLong( "date_period" , 36000 ); st->m_dateUnits = r->getLong( "date_units" , 1 );//SECS_PER_MIN st->m_now = (bool)r->getLong( "date_now" , 1 ); st->m_autoUpdate = (bool)r->getLong( "auto_update" , 0 ); // # samples in moving average st->m_samples = r->getLong( "samples" , 300 ); //if ( st->m_columns < MIN_COLUMNS || st->m_columns > MAX_COLUMNS ) // st->m_columns = DEF_COLUMNS; if ( st->m_now ) st->m_startDate = (time_t)getTimeGlobalNoCore(); st->m_startDateR = st->m_startDate; st->m_endDateR = st->m_endDate; if ( ! st->m_dateCustom ) { st->m_endDateR = st->m_startDateR - ( st->m_datePeriod * st->m_dateUnits ); st->m_endDate = st->m_endDateR; } if ( ! g_statsdb.makeGIF ( st->m_endDateR , st->m_startDateR , st->m_samples , &st->m_sb2 , st , sendReply ) ) return false; // if we didn't block call it ourselves directly sendReply ( st ); return true; }