예제 #1
0
/// Renders the current contents of the cache to a displayable string.
std::string DnsCachedResolver::display_cache()
{
  std::ostringstream oss;
  pthread_mutex_lock(&_cache_lock);
  expire_cache();
  int now = time(NULL);
  for (DnsCache::const_iterator i = _cache.begin();
       i != _cache.end();
       ++i)
  {
    DnsCacheEntryPtr ce = i->second;
    oss << "Cache entry " << ce->domain
        << " type=" << DnsRRecord::rrtype_to_string(ce->dnstype)
        << " expires=" << ce->expires-now << std::endl;

    for (std::vector<DnsRRecord*>::const_iterator j = ce->records.begin();
         j != ce->records.end();
         ++j)
    {
      oss << (*j)->to_string() << std::endl;
    }
  }
  pthread_mutex_unlock(&_cache_lock);
  return oss.str();
}
예제 #2
0
bool cookie_auth::handle_request(http::request_ptr& http_request_ptr, tcp::connection_ptr& tcp_conn)
{
    if (process_login(http_request_ptr,tcp_conn)) {
        return false; // we processed login/logout request, no future processing for this request permitted
    }

    if (!need_authentication(http_request_ptr)) {
        return true; // this request does not require authentication
    }

    // check if it is redirection page.. If yes, then do not test its credentials ( as used for login)
    if (!m_redirect.empty() && m_redirect==http_request_ptr->get_resource()) {
        return true; // this request does not require authentication
    }
    
    // check cache for expiration
    boost::posix_time::ptime time_now(boost::posix_time::second_clock::universal_time());
    expire_cache(time_now);

    // if we are here, we need to check if access authorized...
    const std::string auth_cookie(http_request_ptr->get_cookie(AUTH_COOKIE_NAME));
    if (! auth_cookie.empty()) {
        // check if this cookie is in user cache
        boost::mutex::scoped_lock cache_lock(m_cache_mutex);
        user_cache_type::iterator user_cache_itr=m_user_cache.find(auth_cookie);
        if (user_cache_itr != m_user_cache.end()) {
            // we find those credential in our cache...
            // we can approve authorization now!
            http_request_ptr->set_user(user_cache_itr->second.second);
            // and update cache timeout
            user_cache_itr->second.first = time_now;
            return true;
        }
    }

    // user not found
    handle_unauthorized(http_request_ptr,tcp_conn);
    return false;
}
예제 #3
0
void io_loop()
{
char to_send[200];
time_t lasttime = 0;
long lastrecvK = 0;
int  lrv = 0;
time_t      lasttimeofday;
int delay = 0;

 while(1)
 {
   lasttimeofday = timeofday;

   if ((timeofday = time(NULL)) == -1) 
   {
#ifdef USE_SYSLOG
      syslog(LOG_WARNING, "Clock Failure (%d), TS can be corrupted", errno);
#endif
      sendto_ops("Clock Failure (%d), TS can be corrupted", errno);
   }

   if (timeofday < lasttimeofday) 
   {
      ircsprintf(to_send, "System clock is running backwards - (%d < %d)",
		 timeofday, lasttimeofday);
      report_error(to_send, &me);
   }

   NOW = timeofday;

   /*
    * This chunk of code determines whether or not "life sucks", that
    * is to say if the traffic level is so high that standard server
    * commands should be restricted
    * 
    * Changed by Taner so that it tells you what's going on as well as
    * allows forced on (long LCF), etc...
    */

   if ((timeofday - lasttime) >= LCF) 
   {
      lrv = LRV * LCF;
      lasttime = timeofday;
      currlife = (me.receiveK - lastrecvK) / LCF;
      if ((me.receiveK - lrv) > lastrecvK || HTMLOCK == YES) 
      {
	 if (!lifesux) 
 	 {
	    /*
	     * In the original +th code Taner had
	     * 
	     * LCF << 1;  / * add hysteresis * /
	     * 
	     * which does nothing... so, the hybrid team changed it to
	     * 
	     * LCF <<= 1;  / * add hysteresis * /
	     * 
	     * suddenly, there were reports of clients mysteriously just
	     * dropping off... Neither rodder or I can see why it makes
	     * a difference, but lets try it this way...
	     * 
	     * The original dog3 code, does not have an LCF variable
	     * 
	     * -Dianora
	     * 
	     */
	    lifesux = 1;

	    if (noisy_htm) 
	       sendto_ops("Entering high-traffic mode - (%dk/s > %dk/s)", currlife, LRV);
	 }
	 else 
	 {
	    lifesux++;		/* Ok, life really sucks! */
	    LCF += 2;		/* Wait even longer */
	    if (noisy_htm) 
	       sendto_ops("Still high-traffic mode %d%s (%d delay): %dk/s",
			  lifesux, (lifesux > 9) ? " (TURBO)" : "", (int) LCF, currlife);

	   /* Reset htm here, because its been on a little too long.
	    * Bad Things(tm) tend to happen with HTM on too long -epi */

	    if (lifesux>15) 
	    {
	       if (noisy_htm) 
		  sendto_ops("Resetting HTM and raising limit to: %dk/s\n", LRV + 5);
	       LCF=LOADCFREQ;
	       lifesux=0;
	       LRV+=5;
	    }
	 }
      }
      else 
      {
	 LCF = LOADCFREQ;
	 if (lifesux) 
	 {
	    lifesux = 0;
	    if (noisy_htm)
	       sendto_ops("Resuming standard operation . . . .");
	 }
      }
      lastrecvK = me.receiveK;
   }
   /*
    * * We only want to connect if a connection is due, not every
    * time through.  Note, if there are no active C lines, this call
    * to Tryconnections is made once only; it will return 0. - avalon
    */

   if (nextconnect && timeofday >= nextconnect)
      nextconnect = try_connections(timeofday);

   /* DNS checks. One to timeout queries, one for cache expiries.*/

   if (timeofday >= nextdnscheck)
      nextdnscheck = timeout_query_list(timeofday);
   if (timeofday >= nextexpire)
      nextexpire = expire_cache(timeofday);

   /*
    * * take the smaller of the two 'timed' event times as the time
    * of next event (stops us being late :) - avalon WARNING -
    * nextconnect can return 0!
    */

   if (nextconnect)
      delay = MIN(nextping, nextconnect);
   else
      delay = nextping;
   delay = MIN(nextdnscheck, delay);
   delay = MIN(nextexpire, delay);
   delay -= timeofday;

   /*
    * * Adjust delay to something reasonable [ad hoc values] (one
    * might think something more clever here... --msa) 
    * We don't really need to check that often and as long 
    * as we don't delay too long, everything should be ok. 
    * waiting too long can cause things to timeout... 
    * i.e. PINGS -> a disconnection :( 
    * - avalon
    */
   if (delay < 1)
      delay = 1;
   else
      delay = MIN(delay, TIMESEC);
   /*
    * We want to read servers on every io_loop, as well as "busy"
    * clients (which again, includes servers. If "lifesux", then we
    * read servers AGAIN, and then flush any data to servers. -Taner
    */

#ifndef NO_PRIORITY
   read_message(0, &serv_fdlist);
   read_message(1, &busycli_fdlist);
   if (lifesux) 
   {
      (void) read_message(1, &serv_fdlist);
      if (lifesux > 9) 		/* life really sucks */
      {
	 (void) read_message(1, &busycli_fdlist);
	 (void) read_message(1, &serv_fdlist);
      }
      flush_fdlist_connections(&serv_fdlist);
   }

   if ((timeofday = time(NULL)) == -1) 
   {
	#ifdef USE_SYSLOG
           syslog(LOG_WARNING, "Clock Failure (%d), TS can be corrupted", errno);
	#endif
      sendto_ops("Clock Failure (%d), TS can be corrupted", errno);
   }
   /*
    * CLIENT_SERVER = TRUE: If we're in normal mode, or if "lifesux"
    * and a few seconds have passed, then read everything.
    * CLIENT_SERVER = FALSE: If it's been more than lifesux*2 seconds
    * (that is, at most 1 second, or at least 2s when lifesux is != 0)
    * check everything. -Taner
    */
   { 
      static time_t lasttime = 0;

# ifdef CLIENT_SERVER
      if (!lifesux || (lasttime + lifesux) < timeofday) {
# else
      if ((lasttime + (lifesux + 1)) < timeofday) {
# endif
	 (void) read_message(delay ? delay : 1, NULL);	/* check everything! */
	 lasttime = timeofday;
      }
   }
#else
   (void) read_message(delay, NULL);	/* check everything! */
#endif
   /*
    * * ...perhaps should not do these loops every time, but only if
    * there is some chance of something happening (but, note that
    * conf->hold times may be changed elsewhere--so precomputed next
    * event time might be too far away... (similarly with ping
    * times) --msa
    */

   if ((timeofday >= nextping))
      nextping = check_pings(timeofday);

   if (dorehash && !lifesux) 
   {
      (void) rehash(&me, &me, 1);
      dorehash = 0;
   }
   /*
    * 
    * Flush output buffers on all connections now if they 
    * have data in them (or at least try to flush)  -avalon
    *
    * flush_connections(me.fd);
    *
    * avalon, what kind of crack have you been smoking? why
    * on earth would we flush_connections blindly when
    * we already check to see if we can write (and do)
    * in read_message? There is no point, as this causes
    * lots and lots of unnecessary sendto's which 
    * 99% of the time will fail because if we couldn't
    * empty them in read_message we can't empty them here.
    * one effect: during htm, output to normal lusers
    * will lag.
    */

    /* Now we've made this call a bit smarter. */
    /* Only flush non-blocked sockets. */

    flush_connections(me.fd);

#ifndef NO_PRIORITY
   check_fdlists();
#endif

#ifdef	LOCKFILE
   /*
    * * If we have pending klines and CHECK_PENDING_KLINES minutes
    * have passed, try writing them out.  -ThemBones
    */

   if ((pending_klines) && ((timeofday - pending_kline_time)
			    >= (CHECK_PENDING_KLINES * 60)))
      do_pending_klines();
#endif
 }
}
/*
 * open_debugfile
 * 
 * If the -t option is not given on the command line when the server is
 * started, all debugging output is sent to the file set by LPATH in
 * config.h Here we just open that file and make sure it is opened to
 * fd 2 so that any fprintf's to stderr also goto the logfile.  If the
 * debuglevel is not set from the command line by -x, use /dev/null as
 * the dummy logfile as long as DEBUGMODE has been defined, else dont
 * waste the fd.
 */
static void
open_debugfile()
{
#ifdef	DEBUGMODE
int         fd;
aClient    *cptr;

   if (debuglevel >= 0) 
   {
      cptr = make_client(NULL, NULL);
      cptr->fd = 2;
      SetLog(cptr);
      cptr->port = debuglevel;
      cptr->flags = 0;
      cptr->acpt = cptr;
      local[2] = cptr;
      (void) strcpy(cptr->sockhost, me.sockhost);

      (void) printf("isatty = %d ttyname = %#x\n",
		    isatty(2), (u_int) ttyname(2));
      if (!(bootopt & BOOT_TTY)) 	/*) leave debugging output on fd */ 
      {
	 (void) truncate(LOGFILE, 0);
	 if ((fd = open(LOGFILE, O_WRONLY | O_CREAT, 0600)) < 0)
	    if ((fd = open("/dev/null", O_WRONLY)) < 0)
	       exit(-1);
	 if (fd != 2) 
    	 {
	    (void) dup2(fd, 2);
	    (void) close(fd);
	 }
	 strncpyzt(cptr->name, LOGFILE, sizeof(cptr->name));
      }
      else if (isatty(2) && ttyname(2))
	 strncpyzt(cptr->name, ttyname(2), sizeof(cptr->name));
      else
	 (void) strcpy(cptr->name, "FD2-Pipe");
      Debug((DEBUG_FATAL, "Debug: File <%s> Level: %d at %s",
	     cptr->name, cptr->port, myctime(time(NULL))));
   }
   else
      local[2] = NULL;
#endif
   return;
}
예제 #4
0
void DnsCachedResolver::dns_query(const std::vector<std::string>& domains,
                                  int dnstype,
                                  std::vector<DnsResult>& results)
{
  DnsChannel* channel = NULL;

  pthread_mutex_lock(&_cache_lock);

  // Expire any cache entries that have passed their TTL.
  expire_cache();

  // First see if any of the domains need to be queried.
  for (std::vector<std::string>::const_iterator i = domains.begin();
       i != domains.end();
       ++i)
  {
    LOG_VERBOSE("Check cache for %s type %d", (*i).c_str(), dnstype);
    if (get_cache_entry(*i, dnstype) == NULL)
    {
      LOG_DEBUG("No entry found in cache");

      // Create an empty record for this cache entry.
      LOG_DEBUG("Create cache entry pending query");
      DnsCacheEntryPtr ce = create_cache_entry(*i, dnstype);

      if (channel == NULL)
      {
        // Get a DNS channel to issue any queries.
        channel = get_dns_channel();
      }

      if (channel != NULL)
      {
        // DNS server is configured, so create a Transaction for the query
        // and execute it.  Mark the entry as pending and take the lock on
        // it before doing this to prevent any other threads sending the
        // same query.
        LOG_DEBUG("Create and execute DNS query transaction");
        ce->pending_query = true;
        pthread_mutex_lock(&ce->lock);
        DnsTsx* tsx = new DnsTsx(channel, *i, dnstype);
        tsx->execute();
      }
    }
  }

  if (channel != NULL)
  {
    // Issued some queries, so wait for the replies before processing the
    // request further.
    LOG_DEBUG("Wait for query responses");
    pthread_mutex_unlock(&_cache_lock);
    wait_for_replies(channel);
    pthread_mutex_lock(&_cache_lock);
    LOG_DEBUG("Received all query responses");
  }

  // We should now have responses for everything (unless another thread was
  // already doing a query), so loop collecting the responses.
  for (std::vector<std::string>::const_iterator i = domains.begin();
       i != domains.end();
       ++i)
  {
    DnsCacheEntryPtr ce = get_cache_entry(*i, dnstype);

    if (ce != NULL)
    {
      // Found the cache entry, so check whether it is still pending a query.
      if (ce->pending_query)
      {
        // We must release the global lock and let the other thread finish
        // the query.
        // @TODO - may need to do something with reference counting of the
        // DnsCacheEntry to make this watertight.
        pthread_mutex_unlock(&_cache_lock);
        pthread_mutex_lock(&ce->lock);
        pthread_mutex_unlock(&ce->lock);
        pthread_mutex_lock(&_cache_lock);
      }

      // Can now pull the information from the cache entry in to the results.
      LOG_DEBUG("Pulling %d records from cache for %s %s",
                ce->records.size(),
                ce->domain.c_str(),
                DnsRRecord::rrtype_to_string(ce->dnstype).c_str());

      results.push_back(DnsResult(ce->domain,
                                  ce->dnstype,
                                  ce->records,
                                  ce->expires - time(NULL)));
    }
    else
    {
      // This shouldn't happen, but if it does, return an empty result set.
      LOG_DEBUG("Return empty result set");
      results.push_back(DnsResult(*i, dnstype, 0));
    }
  }

  pthread_mutex_unlock(&_cache_lock);
}
예제 #5
0
DnsResult DnsCachedResolver::dns_query(const std::string& domain,
                                       int dnstype)
{
  DnsChannel* channel = NULL;

  pthread_mutex_lock(&_cache_lock);

  // Expire any cache entries that have passed their TTL.
  expire_cache();

  DnsCacheEntryPtr ce = get_cache_entry(domain, dnstype);

  if (ce == NULL)
  {
    // Create an empty record for this cache entry.
    LOG_DEBUG("Create cache entry pending query");
    ce = create_cache_entry(domain, dnstype);

    // Get a DNS channel to issue any queries.
    channel = get_dns_channel();

    if (channel != NULL)
    {
      // DNS server is configured, so create a Transaction for the query and
      // execute it.  Mark the entry as pending and take the lock on it
      // before doing this to prevent any other threads sending the same
      // query.
      LOG_DEBUG("Create and execute DNS query transaction");
      ce->pending_query = true;
      pthread_mutex_lock(&ce->lock);
      DnsTsx* tsx = new DnsTsx(channel, domain, dnstype);
      tsx->execute();

      LOG_DEBUG("Wait for query responses");
      pthread_mutex_unlock(&_cache_lock);
      wait_for_replies(channel);
      pthread_mutex_lock(&_cache_lock);
      LOG_DEBUG("Received all query responses");
    }
  }

  // We should now have responses for everything (unless another thread was
  // already doing a query), so get the response.
  if (ce->pending_query)
  {
    // We must release the global lock and let the other thread finish
    // the query.
    // @TODO - may need to do something with reference counting of the
    // DnsCacheEntry to make this watertight.
    pthread_mutex_unlock(&_cache_lock);
    pthread_mutex_lock(&ce->lock);
    pthread_mutex_unlock(&ce->lock);
    pthread_mutex_lock(&_cache_lock);
  }

  LOG_DEBUG("Pulling %d records from cache for %s %s",
            ce->records.size(),
            ce->domain.c_str(),
            DnsRRecord::rrtype_to_string(ce->dnstype).c_str());

  DnsResult result(ce->domain,
                   ce->dnstype,
                   ce->records,
                   ce->expires - time(NULL));

  pthread_mutex_unlock(&_cache_lock);

  return result;
}