void LatencyAwarePolicy::init(const SharedRefPtr<Host>& connected_host, const HostMap& hosts) { copy_hosts(hosts, hosts_); for (HostMap::const_iterator i = hosts.begin(), end = hosts.end(); i != end; ++i) { i->second->enable_latency_tracking(settings_.scale_ns, settings_.min_measured); } ChainedLoadBalancingPolicy::init(connected_host, hosts); }
/** * Clears the connections kept by this pool (ie, not including the global pool) */ void clearPool() { for(HostMap::iterator iter = _hosts.begin(); iter != _hosts.end(); ++iter) { if (iter->second->avail != NULL) { delete iter->second->avail; } } _hosts.clear(); }
int DNS::connect( const std::string& domain, const LogSink& logInstance ) { HostMap hosts = resolve( domain ); if( hosts.size() == 0 ) return -DNS_NO_HOSTS_FOUND; struct protoent* prot; if( ( prot = getprotobyname( "tcp" ) ) == 0) return -DNS_COULD_NOT_RESOLVE; int fd; if( ( fd = socket( PF_INET, SOCK_STREAM, prot->p_proto ) ) == -1 ) return -DNS_COULD_NOT_RESOLVE; struct hostent *h; struct sockaddr_in target; target.sin_family = AF_INET; int ret = 0; HostMap::const_iterator it = hosts.begin(); for( ; it != hosts.end(); ++it ) { int port; if( (*it).second == 0 ) port = XMPP_PORT; else port = (*it).second; target.sin_port = htons( port ); if( ( h = gethostbyname( (*it).first.c_str() ) ) == 0 ) { ret = -DNS_COULD_NOT_RESOLVE; continue; } in_addr *addr = (in_addr*)malloc( sizeof( in_addr ) ); memcpy( addr, h->h_addr, sizeof( in_addr ) ); char *tmp = inet_ntoa( *addr ); free( addr ); std::ostringstream oss; oss << "resolved " << (*it).first.c_str() << " to: " << tmp << ":" << port; logInstance.log( LogLevelDebug, LogAreaClassDns, oss.str() ); if( inet_aton( tmp, &(target.sin_addr) ) == 0 ) continue; memset( target.sin_zero, '\0', 8 ); if( ::connect( fd, (struct sockaddr *)&target, sizeof( struct sockaddr ) ) == 0 ) return fd; close( fd ); } if( ret ) return ret; return -DNS_COULD_NOT_CONNECT; }
void LatencyAwarePolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { hosts_->reserve(hosts.size()); std::transform(hosts.begin(), hosts.end(), std::back_inserter(*hosts_), GetHost()); for (HostMap::const_iterator i = hosts.begin(), end = hosts.end(); i != end; ++i) { i->second->enable_latency_tracking(settings_.scale_ns, settings_.min_measured); } ChainedLoadBalancingPolicy::init(connected_host, hosts, random); }
void DCAwarePolicy::init(const SharedRefPtr<Host>& connected_host, const HostMap& hosts) { if (local_dc_.empty() && !connected_host->dc().empty()) { LOG_INFO("Using '%s' for the local data center " "(if this is incorrect, please provide the correct data center)", connected_host->dc().c_str()); local_dc_ = connected_host->dc(); } for (HostMap::const_iterator i = hosts.begin(), end = hosts.end(); i != end; ++i) { on_add(i->second); } }
int DNS::connect( const std::string& host, const LogSink& logInstance ) { HostMap hosts = resolve( host, logInstance ); if( hosts.size() == 0 ) return -ConnDnsError; HostMap::const_iterator it = hosts.begin(); for( ; it != hosts.end(); ++it ) { int fd = DNS::connect( (*it).first, (*it).second, logInstance ); if( fd >= 0 ) return fd; } return -ConnConnectionRefused; }
void TokenAwarePolicy::init(const SharedRefPtr<Host>& connected_host, const HostMap& hosts, Random* random) { if (random != NULL) { index_ = random->next(std::max(static_cast<size_t>(1), hosts.size())); } ChainedLoadBalancingPolicy::init(connected_host, hosts, random); }
void ListPolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { HostMap valid_hosts; for (HostMap::const_iterator i = hosts.begin(), end = hosts.end(); i != end; ++i) { const Host::Ptr& host = i->second; if (is_valid_host(host)) { valid_hosts.insert(HostPair(i->first, host)); } } if (valid_hosts.empty()) { LOG_ERROR("No valid hosts available for list policy"); } ChainedLoadBalancingPolicy::init(connected_host, valid_hosts, random); }
DNS::HostMap DNS::resolve( const std::string& service, const std::string& proto, const std::string& domain, const LogSink& logInstance ) { buffer srvbuf; bool error = false; const std::string dname = "_" + service + "._" + proto; if( !domain.empty() ) srvbuf.len = res_querydomain( dname.c_str(), const_cast<char*>( domain.c_str() ), C_IN, T_SRV, srvbuf.buf, NS_PACKETSZ ); else srvbuf.len = res_query( dname.c_str(), C_IN, T_SRV, srvbuf.buf, NS_PACKETSZ ); if( srvbuf.len < 0 ) return defaultHostMap( domain, logInstance ); HEADER* hdr = (HEADER*)srvbuf.buf; unsigned char* here = srvbuf.buf + NS_HFIXEDSZ; if( srvbuf.len < NS_HFIXEDSZ ) error = true; if( hdr->rcode >= 1 && hdr->rcode <= 5 ) error = true; if( ntohs( hdr->ancount ) == 0 ) error = true; if( ntohs( hdr->ancount ) > NS_PACKETSZ ) error = true; int cnt; for( cnt = ntohs( hdr->qdcount ); cnt > 0; --cnt ) { int strlen = dn_skipname( here, srvbuf.buf + srvbuf.len ); here += strlen + NS_QFIXEDSZ; } unsigned char* srv[NS_PACKETSZ]; int srvnum = 0; for( cnt = ntohs( hdr->ancount ); cnt > 0; --cnt ) { int strlen = dn_skipname( here, srvbuf.buf + srvbuf.len ); here += strlen; srv[srvnum++] = here; here += SRV_FIXEDSZ; here += dn_skipname( here, srvbuf.buf + srvbuf.len ); } if( error ) { return defaultHostMap( domain, logInstance ); } // (q)sort here HostMap servers; for( cnt = 0; cnt < srvnum; ++cnt ) { char srvname[NS_MAXDNAME]; srvname[0] = '\0'; if( dn_expand( srvbuf.buf, srvbuf.buf + NS_PACKETSZ, srv[cnt] + SRV_SERVER, srvname, NS_MAXDNAME ) < 0 || !(*srvname) ) continue; unsigned char* c = srv[cnt] + SRV_PORT; servers.insert( std::make_pair( (char*)srvname, ntohs( c[1] << 8 | c[0] ) ) ); } if( !servers.size() ) return defaultHostMap( domain, logInstance ); return servers; }
/* * Register all workers, create hosts, create slots. Assign a host-centric * rank to each of the workers. The worker with the lowest global rank on * each host is given host rank 0, the next lowest is given host rank 1, * and so on. The master is not given a host rank. */ void Master::register_workers() { typedef map<string, Host *> HostMap; HostMap hostmap; typedef map<int, string> HostnameMap; HostnameMap hostnames; // Collect host names from all workers, create host objects for (int i=0; i<numworkers; i++) { RegistrationMessage *msg = dynamic_cast<RegistrationMessage *>(comm->recv_message()); if (msg == NULL) { myfailure("Expected registration message"); } int rank = msg->source; string hostname = msg->hostname; unsigned int memory = msg->memory; unsigned int threads = msg->threads; unsigned int cores = msg->cores; unsigned int sockets = msg->sockets; delete msg; hostnames[rank] = hostname; if (hostmap.find(hostname) == hostmap.end()) { // If the host is not found, create a new one log_debug("Got new host: name=%s, mem=%u, threads/cpus=%u, cores=%u, sockets=%u", hostname.c_str(), memory, threads, cores, sockets); Host *newhost = new Host(hostname, memory, threads, cores, sockets); hosts.push_back(newhost); hostmap[hostname] = newhost; } else { // Otherwise, increment the number of slots available Host *host = hostmap[hostname]; host->add_slot(); } log_debug("Slot %d on host %s", rank, hostname.c_str()); } typedef map<string, int> RankMap; RankMap ranks; // Create slots, assign a host rank to each worker for (int rank=1; rank<=numworkers; rank++) { string hostname = hostnames.find(rank)->second; // Find host Host *host = hostmap.find(hostname)->second; // Create new slot Slot *slot = new Slot(rank, host); slots.push_back(slot); free_slots.push_back(slot); // Compute hostrank for this slot RankMap::iterator nextrank = ranks.find(hostname); int hostrank = 0; if (nextrank != ranks.end()) { hostrank = nextrank->second; } ranks[hostname] = hostrank + 1; HostrankMessage hrmsg(hostrank); comm->send_message(&hrmsg, rank); log_debug("Host rank of worker %d is %d", rank, hostrank); } // Log the initial resource freeability for (vector<Host *>::iterator i = hosts.begin(); i!=hosts.end(); i++) { Host *host = *i; host->log_resources(resource_log); } }