DWORD _handle(network::socket_handle_ptr sck, std::shared_ptr<multi_thread::thread_impl_ex> thr) { try { char data[MaxLenth] = {0}; iocp::auto_buffer_ptr buffer(iocp::make_buffer(data)); while(!is_shutdown_) { size_t len = sck->read(iocp::mutable_buffer(buffer->data(), buffer->size()), 0); buffer->resize(len); if( len == 0 ) break; std::cout.write(buffer->data(), buffer->size()) << std::endl; sck->write(iocp::const_buffer(buffer->data(), buffer->size()), 0); } } catch(std::exception &e) { std::cerr << e.what() << std::endl; stop(); } thr->stop(); return 0; }
int main(int argc, char**argv) { using namespace std::placeholders; bkLogger.reset(new BackendLogger("./game.log")); bkLogger->start(); gLogger.setAppendCallback(std::bind(dumpfunc,_1, _2)); LOG_DEBUG("int={some int},str={your name}", 25,"hello"); LOG_DEBUG("hello logging"); int a{32}; float f{33.3}; std::string s{"logging"}; stFoo foo; LOG_TRACE("int={},float={}, {foo data}", a, f, foo); bool flag = (argc>1)?true:false; bench(flag); /* char buffer[64*1024]; g_file = fopen("/dev/null", "w"); setbuffer(g_file, buffer, sizeof buffer); bench(); fclose(g_file); g_file = NULL; */ sleep(8); bkLogger->stop(); }
void ClientMessageHandleJob::executeSerially(std::shared_ptr<WorkQueue>& serialQueue, std::weak_ptr<WorkQueue>& initialQueue, const scidb::Exception* error) { static const char *funcName="ClientMessageHandleJob::handleReschedule: "; if (dynamic_cast<const scidb::ClientMessageHandleJob::CancelChunkFetchException*>(error)) { serialQueue->stop(); LOG4CXX_TRACE(logger, funcName << "Serial queue "<<serialQueue.get()<<" is stopped"); serialQueue.reset(); if (std::shared_ptr<WorkQueue> q = initialQueue.lock()) { q->unreserve(); } return; } if (error) { LOG4CXX_ERROR(logger, funcName << "Error: "<<error); getQuery()->handleError(error->copy()); } std::shared_ptr<Job> fetchJob(shared_from_this()); WorkQueue::WorkItem work = boost::bind(&Job::executeOnQueue, fetchJob, _1, _2); assert(work); try { serialQueue->enqueue(work); } catch (const WorkQueue::OverflowException& e) { // as long as there is at least one item in the queue, we are OK LOG4CXX_TRACE(logger, funcName << "Serial queue is full, dropping request"); } }
void reporter_uninit() { abort_request = true; working_ioservice->stop(); for(auto it=working_threads.begin();it!=working_threads.end();it++) (*it)->join(); }
void stop() { grabber_->stop(); cloud_connection_.disconnect(); if(provides_images_) image_connection_.disconnect(); Logger::log(Logger::INFO, "Producer done.\n"); }
void wallet_manager::unregister_wallet(const std::shared_ptr<wallet> & val) { std::lock_guard<std::mutex> l1(mutex_); if (val) { val->stop(); m_wallets.erase(val); } }
ClientMessageHandleJob::RescheduleCallback ClientMessageHandleJob::getSerializeCallback(std::shared_ptr<WorkQueue>& serialQueue) { std::shared_ptr<WorkQueue> thisQ(_wq.lock()); ASSERT_EXCEPTION(thisQ.get()!=nullptr, "ClientMessageHandleJob::getSerializeCallback: current work queue is deallocated"); std::shared_ptr<ClientMessageHandleJob> thisJob(std::dynamic_pointer_cast<ClientMessageHandleJob>(shared_from_this())); const uint32_t cuncurrency = 1; const uint32_t depth = 2; serialQueue = NetworkManager::getInstance()->createWorkQueue(cuncurrency, depth); serialQueue->stop(); ClientMessageHandleJob::RescheduleCallback func = boost::bind(&ClientMessageHandleJob::executeSerially, thisJob, serialQueue, _wq, _1); thisQ->reserve(thisQ); return func; }
void do_accept2() { session_.reset(new asio_session(io_service_pool_.get_io_service(), buffer_size_, packet_size_, g_test_mode)); acceptor_.async_accept(session_->socket(), [this](const boost::system::error_code & ec) { if (!ec) { session_->start(); } else { // Accept error std::cout << "async_asio_echo_serv_ex::handle_accept2() - Error: (code = " << ec.value() << ") " << ec.message().c_str() << std::endl; session_->stop(); session_.reset(); } do_accept2(); }); }
void SimpleMultiThreadedReceiverApp::cleanup() { mWork.reset(); mIoService->stop(); mThread.join(); }
/** * Clean shutdown signal handler * * @param error * @param signal * @param p_server_instance */ void shut_me_down(const boost::system::error_code& error, int signal, std::shared_ptr<server> p_server_instance) { if (!error) p_server_instance->stop(); }
void terminator(boost::asio::io_service& io, std::shared_ptr<tianya_context>/*tianya*/&obj) { obj->stop(); }
virtual ~Service() { if (service) { service->stop(); } thread.join(); }
void Server::stop() { listener_->stop(); }
void on_pop(std::shared_ptr<Game> game) { game->stop(); }
~threaded_command() override { m_med->stop(); }
/** * Clean shutdown signal handler * * @param error * @param signal * @param server */ void shut_me_down(const boost::system::error_code& error, int signal, std::shared_ptr<server_data> server) { if (!error) server->stop(); }
void rpc_manager::handle_accept( std::shared_ptr<rpc_transport> transport ) { std::lock_guard<std::recursive_mutex> l1(mutex_tcp_connections_); if (m_tcp_connections.size() >= max_connections) { log_error( "RPC manager is dropping connection from " << transport->socket().remote_endpoint() << ", limit reached." ); /** * Stop the transport. */ transport->stop(); } else { if ( network::instance().is_address_rpc_allowed( transport->socket().remote_endpoint().address().to_string()) ) { log_debug( "RPC manager accepted new tcp connection from " << transport->socket().remote_endpoint() << ", " << m_tcp_connections.size() << " connected peers." ); /** * Allocate the tcp_connection. */ auto connection = std::make_shared<rpc_connection> ( io_service_, strand_, stack_impl_, transport ); /** * Set the read timeout. */ transport->set_read_timeout(60); /** * Set the write timeout. */ transport->set_write_timeout(60); /** * Retain the connection. */ m_tcp_connections[ transport->socket().remote_endpoint()] = connection ; /** * Start the tcp_connection. */ connection->start(); } else { log_info( "RPC manager is dropping non-whitelisted connection from " << transport->socket().remote_endpoint() << "." ); /** * Stop the transport. */ transport->stop(); } } }
/** * @brief Establish an ephemeral session between PSE and CSE if not established yet. * * @param is_new_pairing * @param redo * * @return SGX_SUCCESS for success. Other values indicate an error. */ ae_error_t CPSEClass::create_ephemeral_session_pse_cse(bool is_new_pairing, bool redo) { ae_error_t ret = AE_FAILURE; pse_cse_msg2_t msg2; pse_cse_msg3_t msg3; pse_cse_msg4_t msg4; uint32_t blob_size; uint8_t* p_sealed_buffer = NULL; sgx_status_t status = SGX_SUCCESS; sgx_status_t stat_initdb = SGX_SUCCESS; int retry = 0; if (m_status == PSE_STATUS_INIT || m_status == PSE_STATUS_UNAVAILABLE) { // CSE provisioning failed during initialization. PlatformServiceStatus::instance().set_platform_service_status(PLATFORM_SERVICE_NOT_AVAILABLE); return AE_FAILURE; } if (!redo) { if (m_status == PSE_STATUS_SERVICE_READY) { PlatformServiceStatus::instance().set_platform_service_status(PLATFORM_SERVICE_READY); return AE_SUCCESS; } } else { // invalidate current session m_status = PSE_STATUS_CSE_PROVISIONED; } // Set to NOT_READY at the beginning. PlatformServiceStatus::instance().set_platform_service_status(PLATFORM_SERVICE_NOT_READY); do { AESM_DBG_INFO("PSDA started"); // Check LT pairing blob first blob_size = sizeof(pairing_blob_t); p_sealed_buffer = (uint8_t*)malloc(blob_size); if (p_sealed_buffer == NULL) { return AE_FAILURE; } PROFILE_START("aesm_read_data"); // read sealed blob from persistent storage ret = aesm_read_data(FT_PERSISTENT_STORAGE, PSE_PR_LT_PAIRING_FID, p_sealed_buffer, &blob_size); PROFILE_END("aesm_read_data"); if (ret != AE_SUCCESS||blob_size!=sizeof(pairing_blob_t)) { // Failed to load LT sealed blob ret = PSE_PAIRING_BLOB_INVALID_ERROR; // unload pse_op enclave unload_enclave(); // load pse_pr enclave if(!g_psepr_service){ AESM_DBG_ERROR("failed to load psepr service"); ret = AE_FAILURE; break; } else g_psepr_service->stop(); break; } AESM_DBG_INFO("LT Paring Blob read"); // load pse-op enclave if it's not loaded yet if ((ret = load_enclave()) != AE_SUCCESS) break; do { // create PSDA session if not available if (!PSDAService::instance().start_service()) { PlatformServiceStatus::instance().set_platform_service_status(PLATFORM_SERVICE_NOT_AVAILABLE); ret = AE_FAILURE; break; } if(status == SGX_ERROR_ENCLAVE_LOST) { unload_enclave(); // Reload an AE will not fail because of out of EPC, so AESM_AE_OUT_OF_EPC is not checked here if(AE_SUCCESS != load_enclave()) { ret = AE_FAILURE; break; } } AESM_DBG_INFO("PSDA Start Ephemral Session"); // PSE --- M1:StartSession ---> CSE memset(&msg2, 0, sizeof(msg2)); PROFILE_START("psda_start_ephemeral_session"); ret = psda_start_ephemeral_session(((pairing_blob_t*)p_sealed_buffer)->plaintext.pse_instance_id, &msg2); PROFILE_END("psda_start_ephemeral_session"); if (ret != AE_SUCCESS) break; AESM_DBG_INFO("Ephemral Session M2/M3"); // PSE <--- M2 --- CSE memset(&msg3, 0, sizeof(msg3)); PROFILE_START("ephemeral_session_m2m3_wrapper"); status = ephemeral_session_m2m3_wrapper(m_enclave_id, &ret, (pairing_blob_t*)p_sealed_buffer, &msg2, &msg3); PROFILE_END("ephemeral_session_m2m3_wrapper"); CHECK_ECALL_RET(status, ret) AESM_DBG_INFO("PSDA Finalize Session"); // PSE --- M3 ---> CSE memset(&msg4, 0, sizeof(msg4)); PROFILE_START("psda_finalize_session"); ret = psda_finalize_session(((pairing_blob_t*)p_sealed_buffer)->plaintext.pse_instance_id, &msg3, &msg4); PROFILE_END("psda_finalize_session"); if (ret == AESM_PSDA_SESSION_LOST) { retry++; continue; } BREAK_IF_FAILED(ret); AESM_DBG_INFO("Ephemeral Session M4"); // PSE <--- M4 --- CSE PROFILE_START("ephemeral_session_m4_wrapper"); status = ephemeral_session_m4_wrapper(m_enclave_id, &ret, &msg4); PROFILE_END("ephemeral_session_m4_wrapper"); CHECK_ECALL_RET(status, ret) /* the return value of initialize_sqlite_database_file_wrapper is ignored unless it's SGX_ERROR_ENCLAVE_LOST */ AESM_DBG_INFO("initialize vmc database"); PROFILE_START("initialize_sqlite_database_file_wrapper"); ae_error_t ret2; stat_initdb = initialize_sqlite_database_file_wrapper(m_enclave_id, &ret2, is_new_pairing); PROFILE_END("initialize_sqlite_database_file_wrapper"); if (stat_initdb == SGX_ERROR_ENCLAVE_LOST) { retry++; continue; } else break; }while(retry < AESM_RETRY_COUNT); if (status == SGX_ERROR_ENCLAVE_LOST) { AESM_DBG_INFO("Enclave Lost"); // maximum retry times reached ret = AE_FAILURE; break; } else if(ret == AE_SUCCESS) { // Set status to READY m_status = PSE_STATUS_SERVICE_READY; // Successfully build the ephemeral session PlatformServiceStatus::instance().set_platform_service_status(PLATFORM_SERVICE_READY); } } while(0); free(p_sealed_buffer); return ret; }
void tcp_connection_manager::handle_accept( std::shared_ptr<tcp_transport> transport ) { std::lock_guard<std::recursive_mutex> l1(mutex_tcp_connections_); /** * Only peers accept incoming connections. */ if (globals::instance().operation_mode() == protocol::operation_mode_peer) { /** * We allow this many incoming connections per same IP address. */ enum { maximum_per_same_ip = 8 }; auto connections = 0; for (auto & i : m_tcp_connections) { try { if (auto t = i.second.lock()) { if ( t->is_transport_valid() && i.first.address() == transport->socket().remote_endpoint().address() ) { if (++connections == maximum_per_same_ip) { break; } } } } catch (...) { // ... } } if (connections > maximum_per_same_ip) { log_error( "TCP connection manager is dropping duplicate IP connection " "from " << transport->socket().remote_endpoint() << "." ); /** * Stop the transport. */ transport->stop(); } else if ( network::instance().is_address_banned( transport->socket().remote_endpoint().address().to_string()) ) { log_info( "TCP connection manager is dropping banned connection from " << transport->socket().remote_endpoint() << ", limit reached." ); /** * Stop the transport. */ transport->stop(); } else if ( is_ip_banned( transport->socket().remote_endpoint().address().to_string()) ) { log_debug( "TCP connection manager is dropping bad connection from " << transport->socket().remote_endpoint() << ", limit reached." ); /** * Stop the transport. */ transport->stop(); } else if ( m_tcp_connections.size() >= stack_impl_.get_configuration().network_tcp_inbound_maximum() ) { log_error( "TCP connection manager is dropping connection from " << transport->socket().remote_endpoint() << ", limit reached." ); /** * Stop the transport. */ transport->stop(); } else { log_debug( "TCP connection manager accepted new tcp connection from " << transport->socket().remote_endpoint() << ", " << m_tcp_connections.size() << " connected peers." ); /** * Allocate the tcp_connection. */ auto connection = std::make_shared<tcp_connection> ( io_service_, stack_impl_, tcp_connection::direction_incoming, transport ); /** * Retain the connection. */ m_tcp_connections[transport->socket().remote_endpoint()] = connection ; /** * Start the tcp_connection. */ connection->start(); } } }