size_t ConsistentHashingLoadBalancer::AddServersInBatch( const std::vector<ServerId> &servers) { std::vector<Node> add_nodes; add_nodes.reserve(servers.size() * _num_replicas); for (size_t i = 0; i < servers.size(); ++i) { SocketUniquePtr ptr; if (Socket::AddressFailedAsWell(servers[i].id, &ptr) == -1) { continue; } for (size_t rep = 0; rep < _num_replicas; ++rep) { char host[32]; // To be compatible with libmemcached, we formulate the key of // a virtual node as `|address|-|replica_index|', see // http://fe.baidu.com/-1bszwnf at line 297. int len = snprintf(host, sizeof(host), "%s-%lu", endpoint2str(ptr->remote_side()).c_str(), rep); Node node; node.hash = _hash(host, len); node.server_sock = servers[i]; node.server_addr = ptr->remote_side(); add_nodes.push_back(node); } } std::sort(add_nodes.begin(), add_nodes.end()); bool executed = false; const size_t ret = _db_hash_ring.ModifyWithForeground(AddBatch, add_nodes, &executed); CHECK(ret % _num_replicas == 0); const size_t n = ret / _num_replicas; LOG_IF(ERROR, n != servers.size()) << "Fail to AddServersInBatch, expected " << servers.size() << " actually " << n; return n; }
int StreamWait(StreamId stream_id, const timespec* due_time) { SocketUniquePtr ptr; if (Socket::Address(stream_id, &ptr) != 0) { return EINVAL; } Stream* s = (Stream*)ptr->conn(); return s->Wait(due_time); }
int Stream::SetFailed(StreamId id) { SocketUniquePtr ptr; if (Socket::AddressFailedAsWell(id, &ptr) == -1) { // Don't care recycled stream return 0; } Stream* s = (Stream*)ptr->conn(); s->Close(); return 0; }
int StreamWrite(StreamId stream_id, const butil::IOBuf &message) { SocketUniquePtr ptr; if (Socket::Address(stream_id, &ptr) != 0) { return EINVAL; } Stream* s = (Stream*)ptr->conn(); const int rc = s->AppendIfNotFull(message); if (rc == 0) { return 0; } return (rc == 1) ? EAGAIN : errno; }
int Stream::SetHostSocket(Socket *host_socket) { if (_host_socket != NULL) { CHECK(false) << "SetHostSocket has already been called"; return -1; } SocketUniquePtr ptr; host_socket->ReAddress(&ptr); // TODO add *this to host socke if (ptr->AddStream(id()) != 0) { return -1; } _host_socket = ptr.release(); return 0; }
void ChannelBalancer::RemoveAndDestroyChannel(SelectiveChannel::ChannelHandle handle) { if (!RemoveServer(ServerId(handle))) { return; } SocketUniquePtr ptr; const int rc = Socket::AddressFailedAsWell(handle, &ptr); if (rc >= 0) { SubChannel* sub = static_cast<SubChannel*>(ptr->user()); { BAIDU_SCOPED_LOCK(_mutex); CHECK_EQ(1UL, _chan_map.erase(sub->chan)); } { SocketUniquePtr ptr2(ptr.get()); // Dereference. } if (rc == 0) { ptr->ReleaseAdditionalReference(); } } }
static void SSLInfoCallback(const SSL* ssl, int where, int ret) { (void)ret; SocketUniquePtr s; SocketId id = (SocketId)SSL_get_app_data((SSL*)ssl); if (Socket::Address(id, &s) != 0) { // Already failed return; } if (where & SSL_CB_HANDSHAKE_START) { if (s->ssl_state() == SSL_CONNECTING) { s->set_ssl_state(SSL_CONNECTED); } else if (s->ssl_state() == SSL_CONNECTED) { // Disable renegotiation (CVE-2009-3555) LOG(ERROR) << "Close " << *s << " due to insecure " << "renegotiation detected (CVE-2009-3555)"; s->SetFailed(); } } }
int ChannelBalancer::AddChannel(ChannelBase* sub_channel, SelectiveChannel::ChannelHandle* handle) { if (NULL == sub_channel) { LOG(ERROR) << "Parameter[sub_channel] is NULL"; return -1; } BAIDU_SCOPED_LOCK(_mutex); if (_chan_map.find(sub_channel) != _chan_map.end()) { LOG(ERROR) << "Duplicated sub_channel=" << sub_channel; return -1; } SubChannel* sub_chan = new (std::nothrow) SubChannel; if (sub_chan == NULL) { LOG(FATAL) << "Fail to to new SubChannel"; return -1; } sub_chan->chan = sub_channel; SocketId sock_id; SocketOptions options; options.user = sub_chan; options.health_check_interval_s = FLAGS_channel_check_interval; if (Socket::Create(options, &sock_id) != 0) { delete sub_chan; LOG(ERROR) << "Fail to create fake socket for sub channel"; return -1; } SocketUniquePtr ptr; CHECK_EQ(0, Socket::Address(sock_id, &ptr)); if (!AddServer(ServerId(sock_id))) { LOG(ERROR) << "Duplicated sub_channel=" << sub_channel; // sub_chan will be deleted when the socket is recycled. ptr->SetFailed(); return -1; } _chan_map[sub_channel]= ptr.release(); // Add reference. if (handle) { *handle = sock_id; } return 0; }
void StreamWait(StreamId stream_id, const timespec *due_time, void (*on_writable)(StreamId, void*, int), void *arg) { SocketUniquePtr ptr; if (Socket::Address(stream_id, &ptr) != 0) { Stream::WritableMeta* wm = new Stream::WritableMeta; wm->id = stream_id; wm->arg= arg; wm->has_timer = false; wm->on_writable = on_writable; wm->error_code = EINVAL; const bthread_attr_t* attr = FLAGS_usercode_in_pthread ? &BTHREAD_ATTR_PTHREAD : &BTHREAD_ATTR_NORMAL; bthread_t tid; if (bthread_start_background(&tid, attr, Stream::RunOnWritable, wm) != 0) { PLOG(FATAL) << "Fail to start bthread"; Stream::RunOnWritable(wm); } return; } Stream* s = (Stream*)ptr->conn(); return s->Wait(on_writable, arg, due_time); }
bool ConsistentHashingLoadBalancer::AddServer(const ServerId& server) { std::vector<Node> add_nodes; add_nodes.reserve(_num_replicas); SocketUniquePtr ptr; if (Socket::AddressFailedAsWell(server.id, &ptr) == -1) { return false; } for (size_t i = 0; i < _num_replicas; ++i) { char host[32]; int len = snprintf(host, sizeof(host), "%s-%lu", endpoint2str(ptr->remote_side()).c_str(), i); Node node; node.hash = _hash(host, len); node.server_sock = server; node.server_addr = ptr->remote_side(); add_nodes.push_back(node); } std::sort(add_nodes.begin(), add_nodes.end()); bool executed = false; const size_t ret = _db_hash_ring.ModifyWithForeground( AddBatch, add_nodes, &executed); CHECK(ret == 0 || ret == _num_replicas) << ret; return ret != 0; }
// Update global stuff periodically. static void* GlobalUpdate(void*) { // Expose variables. bvar::PassiveStatus<int64_t> var_iobuf_block_count( "iobuf_block_count", GetIOBufBlockCount, NULL); bvar::PassiveStatus<int64_t> var_iobuf_block_count_hit_tls_threshold( "iobuf_block_count_hit_tls_threshold", GetIOBufBlockCountHitTLSThreshold, NULL); bvar::PassiveStatus<int64_t> var_iobuf_new_bigview_count( GetIOBufNewBigViewCount, NULL); bvar::PerSecond<bvar::PassiveStatus<int64_t> > var_iobuf_new_bigview_second( "iobuf_newbigview_second", &var_iobuf_new_bigview_count); bvar::PassiveStatus<int64_t> var_iobuf_block_memory( "iobuf_block_memory", GetIOBufBlockMemory, NULL); bvar::PassiveStatus<int> var_running_server_count( "rpc_server_count", GetRunningServerCount, NULL); butil::FileWatcher fw; if (fw.init_from_not_exist(DUMMY_SERVER_PORT_FILE) < 0) { LOG(FATAL) << "Fail to init FileWatcher on `" << DUMMY_SERVER_PORT_FILE << "'"; return NULL; } std::vector<SocketId> conns; const int64_t start_time_us = butil::gettimeofday_us(); const int WARN_NOSLEEP_THRESHOLD = 2; int64_t last_time_us = start_time_us; int consecutive_nosleep = 0; int64_t last_return_free_memory_time = start_time_us; while (1) { const int64_t sleep_us = 1000000L + last_time_us - butil::gettimeofday_us(); if (sleep_us > 0) { if (bthread_usleep(sleep_us) < 0) { PLOG_IF(FATAL, errno != ESTOP) << "Fail to sleep"; break; } consecutive_nosleep = 0; } else { if (++consecutive_nosleep >= WARN_NOSLEEP_THRESHOLD) { consecutive_nosleep = 0; LOG(WARNING) << __FUNCTION__ << " is too busy!"; } } last_time_us = butil::gettimeofday_us(); TrackMe(); if (!IsDummyServerRunning() && g_running_server_count.load(butil::memory_order_relaxed) == 0 && fw.check_and_consume() > 0) { long port = ReadPortOfDummyServer(DUMMY_SERVER_PORT_FILE); if (port >= 0) { StartDummyServerAt(port); } } SocketMapList(&conns); const int64_t now_ms = butil::cpuwide_time_ms(); for (size_t i = 0; i < conns.size(); ++i) { SocketUniquePtr ptr; if (Socket::Address(conns[i], &ptr) == 0) { ptr->UpdateStatsEverySecond(now_ms); } } const int return_mem_interval = FLAGS_free_memory_to_system_interval/*reloadable*/; if (return_mem_interval > 0 && last_time_us >= last_return_free_memory_time + return_mem_interval * 1000000L) { last_return_free_memory_time = last_time_us; // TODO: Calling MallocExtension::instance()->ReleaseFreeMemory may // crash the program in later calls to malloc, verified on tcmalloc // 1.7 and 2.5, which means making the static member function weak // in details/tcmalloc_extension.cpp is probably not correct, however // it does work for heap profilers. if (MallocExtension_ReleaseFreeMemory != NULL) { MallocExtension_ReleaseFreeMemory(); } else { #if defined(OS_LINUX) // GNU specific. malloc_trim(10 * 1024 * 1024/*leave 10M pad*/); #endif } } } return NULL; }