コード例 #1
0
ファイル: Foreman.hpp プロジェクト: cramja/quickstep
  /**
   * @brief Constructor.
   *
   * @param bus A pointer to the TMB.
   * @param catalog_database The catalog database where this query is executed.
   * @param storage_manager The StorageManager to use.
   * @param cpu_id The ID of the CPU to which the Foreman thread can be pinned.
   * @param num_numa_nodes The number of NUMA nodes in the system.
   *
   * @note If cpu_id is not specified, Foreman thread can be possibly moved
   *       around on different CPUs by the OS.
  **/
  Foreman(tmb::MessageBus *bus,
          CatalogDatabaseLite *catalog_database,
          StorageManager *storage_manager,
          const int cpu_id = -1,
          const int num_numa_nodes = 1)
      : ForemanLite(bus, cpu_id),
        catalog_database_(DCHECK_NOTNULL(catalog_database)),
        storage_manager_(DCHECK_NOTNULL(storage_manager)),
        max_msgs_per_worker_(1),
        num_numa_nodes_(num_numa_nodes) {
    bus_->RegisterClientAsSender(foreman_client_id_, kWorkOrderMessage);
    bus_->RegisterClientAsSender(foreman_client_id_, kRebuildWorkOrderMessage);
    // NOTE : Foreman thread sends poison messages in the optimizer's
    // ExecutionGeneratorTest.
    bus_->RegisterClientAsSender(foreman_client_id_, kPoisonMessage);

    bus_->RegisterClientAsReceiver(foreman_client_id_,
                                   kWorkOrderCompleteMessage);
    bus_->RegisterClientAsReceiver(foreman_client_id_,
                                   kRebuildWorkOrderCompleteMessage);
    bus_->RegisterClientAsReceiver(foreman_client_id_, kCatalogRelationNewBlockMessage);
    bus_->RegisterClientAsReceiver(foreman_client_id_, kDataPipelineMessage);
    bus_->RegisterClientAsReceiver(foreman_client_id_,
                                   kWorkOrdersAvailableMessage);
    bus_->RegisterClientAsReceiver(foreman_client_id_,
                                   kWorkOrderFeedbackMessage);
  }
コード例 #2
0
  /**
   * @brief Constructor. Does not take ownership of \p query_handle.
   *
   * @param catalog_database The catalog database where this query is executed.
   * @param query_handle The pointer to the output query handle.
   */
  ExecutionGenerator(CatalogDatabase *catalog_database,
                     QueryHandle *query_handle)
      : catalog_database_(DCHECK_NOTNULL(catalog_database)),
        query_handle_(DCHECK_NOTNULL(query_handle)),
        execution_plan_(DCHECK_NOTNULL(query_handle->getQueryPlanMutable())),
        query_context_proto_(DCHECK_NOTNULL(query_handle->getQueryContextProtoMutable())) {
    query_context_proto_->set_query_id(query_handle_->query_id());
#ifdef QUICKSTEP_DISTRIBUTED
    catalog_database_cache_proto_ = DCHECK_NOTNULL(query_handle->getCatalogDatabaseCacheProtoMutable());
#endif
  }
コード例 #3
0
 /**
  * @brief Constructor.
  *
  * @param input_relation The relation to build hash table on.
  * @param join_key_attributes The IDs of equijoin attributes in
  *        input_relation.
  * @param any_join_key_attributes_nullable If any attribute is nullable.
  * @param build_block_id The block id.
  * @param hash_table The JoinHashTable to use.
  * @param storage_manager The StorageManager to use.
  **/
 BuildHashWorkOrder(const CatalogRelationSchema &input_relation,
                    const std::vector<attribute_id> &join_key_attributes,
                    const bool any_join_key_attributes_nullable,
                    const block_id build_block_id,
                    JoinHashTable *hash_table,
                    StorageManager *storage_manager)
     : input_relation_(input_relation),
       join_key_attributes_(join_key_attributes),
       any_join_key_attributes_nullable_(any_join_key_attributes_nullable),
       build_block_id_(build_block_id),
       hash_table_(DCHECK_NOTNULL(hash_table)),
       storage_manager_(DCHECK_NOTNULL(storage_manager)) {}
コード例 #4
0
 /**
  * @brief Constructor.
  *
  * @param query_id The ID of the query to which this WorkOrder belongs.
  * @param input_relation The relation to perform sampling over.
  * @param input_block_id The block to sample.
  * @param is_block_sample Flag indicating whether the sample type is block or tuple.
  * @param percentage The percentage of data to be sampled.
  * @param output_destination The InsertDestination to insert the sample results.
  * @param storage_manager The StorageManager to use.
  **/
 SampleWorkOrder(const std::size_t query_id,
                 const CatalogRelationSchema &input_relation,
                 const block_id input_block_id,
                 const bool is_block_sample,
                 const int percentage,
                 InsertDestination *output_destination,
                 StorageManager *storage_manager)
     : WorkOrder(query_id),
       input_relation_(input_relation),
       input_block_id_(input_block_id),
       is_block_sample_(is_block_sample),
       percentage_(percentage),
       output_destination_(DCHECK_NOTNULL(output_destination)),
       storage_manager_(DCHECK_NOTNULL(storage_manager)) {}
コード例 #5
0
ファイル: IOBuf.cpp プロジェクト: chenbiaolong/folly
IOBuf IOBuf::cloneCoalescedAsValue() const {
  if (!isChained()) {
    return cloneOneAsValue();
  }
  // Coalesce into newBuf
  const uint64_t newLength = computeChainDataLength();
  const uint64_t newHeadroom = headroom();
  const uint64_t newTailroom = prev()->tailroom();
  const uint64_t newCapacity = newLength + newHeadroom + newTailroom;
  IOBuf newBuf{CREATE, newCapacity};
  newBuf.advance(newHeadroom);

  auto current = this;
  do {
    if (current->length() > 0) {
      DCHECK_NOTNULL(current->data());
      DCHECK_LE(current->length(), newBuf.tailroom());
      memcpy(newBuf.writableTail(), current->data(), current->length());
      newBuf.append(current->length());
    }
    current = current->next();
  } while (current != this);

  DCHECK_EQ(newLength, newBuf.length());
  DCHECK_EQ(newHeadroom, newBuf.headroom());
  DCHECK_LE(newTailroom, newBuf.tailroom());

  return newBuf;
}
コード例 #6
0
QueryManagerBase::QueryManagerBase(QueryHandle *query_handle)
    : query_handle_(DCHECK_NOTNULL(query_handle)),
      query_id_(query_handle->query_id()),
      query_dag_(DCHECK_NOTNULL(
          DCHECK_NOTNULL(query_handle->getQueryPlanMutable())->getQueryPlanDAGMutable())),
      num_operators_in_dag_(query_dag_->size()),
      output_consumers_(num_operators_in_dag_),
      blocking_dependencies_(num_operators_in_dag_),
      query_exec_state_(new QueryExecutionState(num_operators_in_dag_)),
      blocking_dependents_(num_operators_in_dag_),
      non_blocking_dependencies_(num_operators_in_dag_) {
  if (FLAGS_visualize_execution_dag) {
    dag_visualizer_ =
        std::make_unique<quickstep::ExecutionDAGVisualizer>(query_handle_->getQueryPlan());
  }

  for (dag_node_index node_index = 0;
       node_index < num_operators_in_dag_;
       ++node_index) {
    const QueryContext::insert_destination_id insert_destination_index =
        query_dag_->getNodePayload(node_index).getInsertDestinationID();
    if (insert_destination_index != QueryContext::kInvalidInsertDestinationId) {
      // Rebuild is necessary whenever InsertDestination is present.
      query_exec_state_->setRebuildRequired(node_index);
    }

    if (query_dag_->getDependencies(node_index).empty()) {
      non_dependent_operators_.push_back(node_index);
    }

    for (const pair<dag_node_index, bool> &dependent_link :
         query_dag_->getDependents(node_index)) {
      const dag_node_index dependent_op_index = dependent_link.first;
      if (query_dag_->getLinkMetadata(node_index, dependent_op_index)) {
        // The link is a pipeline-breaker. Streaming of blocks is not possible
        // between these two operators.
        blocking_dependencies_[dependent_op_index].insert(node_index);
        blocking_dependents_[node_index].push_back(dependent_op_index);
      } else {
        // The link is not a pipeline-breaker. Streaming of blocks is possible
        // between these two operators.
        non_blocking_dependencies_[dependent_op_index].insert(node_index);
        output_consumers_[node_index].push_back(dependent_op_index);
      }
    }
  }
}
コード例 #7
0
 /**
  * @brief Constructor.
  *
  * @param query_id The ID of the query to which this operator belongs.
  * @param relation The relation to create index upon.
  * @param index_name The index to create.
  * @param index_description The index_description associated with this index.
  **/
 CreateIndexOperator(const std::size_t query_id,
                     CatalogRelation *relation,
                     const std::string &index_name,
                     IndexSubBlockDescription &&index_description)  // NOLINT(whitespace/operators)
     : RelationalOperator(query_id),
       relation_(DCHECK_NOTNULL(relation)),
       index_name_(index_name),
       index_description_(index_description) {}
コード例 #8
0
 /**
  * @brief Constructor.
  *
  * @note This constructor is relevant for HashTables specialized for
  *       aggregation.
  *
  * @param estimated_num_entries The maximum number of entries in a hash table.
  * @param hash_table_impl_type The type of hash table implementation.
  * @param group_by_types A vector of pointer of types which form the group by
  *        key.
  * @param handles The AggregationHandles in this query.
  * @param storage_manager A pointer to the storage manager.
  **/
 HashTablePool(const std::size_t estimated_num_entries,
               const HashTableImplType hash_table_impl_type,
               const std::vector<const Type *> &group_by_types,
               const std::vector<AggregationHandle *> &handles,
               StorageManager *storage_manager)
     : estimated_num_entries_(reduceEstimatedCardinality(estimated_num_entries)),
       hash_table_impl_type_(hash_table_impl_type),
       group_by_types_(group_by_types),
       handles_(handles),
       storage_manager_(DCHECK_NOTNULL(storage_manager)) {}
コード例 #9
0
 /**
  * @brief Constructor.
  *
  * @param sort_config The Sort configuration.
  * @param run_relation The relation to which the run blocks belong to.
  * @param input_runs Input runs to merge.
  * @param top_k If non-zero will merge only \c top_k tuples.
  * @param merge_level Merge level in the merge tree.
  * @param output_destination The InsertDestination to create new blocks.
  * @param storage_manager The StorageManager to use.
  * @param operator_index Merge-run operator index to send feedback messages
  *                       to.
  * @param scheduler_client_id The TMB client ID of the scheduler thread.
  * @param bus TMB to send the feedback message on.
  **/
 SortMergeRunWorkOrder(
     const SortConfiguration &sort_config,
     const CatalogRelationSchema &run_relation,
     std::vector<merge_run_operator::Run> &&input_runs,
     const std::size_t top_k,
     const std::size_t merge_level,
     InsertDestination *output_destination,
     StorageManager *storage_manager,
     const std::size_t operator_index,
     const tmb::client_id scheduler_client_id,
     MessageBus *bus)
     : sort_config_(sort_config),
       run_relation_(run_relation),
       input_runs_(std::move(input_runs)),
       top_k_(top_k),
       merge_level_(merge_level),
       output_destination_(DCHECK_NOTNULL(output_destination)),
       storage_manager_(DCHECK_NOTNULL(storage_manager)),
       operator_index_(operator_index),
       scheduler_client_id_(scheduler_client_id),
       bus_(DCHECK_NOTNULL(bus)) {
   DCHECK(sort_config_.isValid());
 }
コード例 #10
0
ファイル: ThriftServer.cpp プロジェクト: Jasonudoo/fbthrift
void ThriftServer::setup() {
  DCHECK_NOTNULL(cpp2Pfac_.get());
  DCHECK_GT(nWorkers_, 0);

  uint32_t threadsStarted = 0;
  bool eventBaseAttached = false;

  // Make sure EBM exists if we haven't set one explicitly
  getEventBaseManager();

  // Initialize event base for this thread, ensure event_init() is called
  serveEventBase_ = eventBaseManager_->getEventBase();
  // Print some libevent stats
  LOG(INFO) << "libevent " <<
    TEventBase::getLibeventVersion() << " method " <<
    TEventBase::getLibeventMethod();

  try {
    // We check for write success so we don't need or want SIGPIPEs.
    signal(SIGPIPE, SIG_IGN);

    if (!observer_ && apache::thrift::observerFactory_) {
      observer_ = apache::thrift::observerFactory_->getObserver();
    }

    // bind to the socket
    if (!serverChannel_) {
      if (socket_ == nullptr) {
        socket_.reset(new TAsyncServerSocket());
        socket_->setShutdownSocketSet(shutdownSocketSet_.get());
        if (port_ != -1) {
          socket_->bind(port_);
        } else {
          DCHECK(address_.isInitialized());
          socket_->bind(address_);
        }
      }

      socket_->listen(listenBacklog_);
      socket_->setMaxNumMessagesInQueue(maxNumMsgsInQueue_);
      socket_->setAcceptRateAdjustSpeed(acceptRateAdjustSpeed_);
    }

    // We always need a threadmanager for cpp2.
    if (!threadFactory_) {
      setThreadFactory(
        std::make_shared<apache::thrift::concurrency::NumaThreadFactory>());
    }

    if (FLAGS_sasl_policy == "required" || FLAGS_sasl_policy == "permitted") {
      if (!saslThreadManager_) {
        saslThreadManager_ = ThreadManager::newSimpleThreadManager(
          nPoolThreads_ > 0 ? nPoolThreads_ : nWorkers_, /* count */
          0, /* pendingTaskCountMax -- no limit */
          false, /* enableTaskStats */
          0 /* maxQueueLen -- large default */);
        saslThreadManager_->setNamePrefix("thrift-sasl");
        saslThreadManager_->threadFactory(threadFactory_);
        saslThreadManager_->start();
      }
      auto saslThreadManager = saslThreadManager_;

      if (getSaslServerFactory()) {
        // If the factory is already set, don't override it with the default
      } else if (FLAGS_kerberos_service_name.empty()) {
        // If the service name is not specified, not need to pin the principal.
        // Allow the server to accept anything in the keytab.
        setSaslServerFactory([=] (TEventBase* evb) {
          return std::unique_ptr<SaslServer>(
            new GssSaslServer(evb, saslThreadManager));
        });
      } else {
        char hostname[256];
        if (gethostname(hostname, 255)) {
          LOG(FATAL) << "Failed getting hostname";
        }
        setSaslServerFactory([=] (TEventBase* evb) {
          auto saslServer = std::unique_ptr<SaslServer>(
            new GssSaslServer(evb, saslThreadManager));
          saslServer->setServiceIdentity(
            FLAGS_kerberos_service_name + "/" + hostname);
          return std::move(saslServer);
        });
      }
    }

    if (!threadManager_) {
      std::shared_ptr<apache::thrift::concurrency::ThreadManager>
        threadManager(new apache::thrift::concurrency::NumaThreadManager(
                        nPoolThreads_ > 0 ? nPoolThreads_ : nWorkers_,
                        true /*stats*/,
                        getMaxRequests() /*maxQueueLen*/));
      threadManager->enableCodel(getEnableCodel());
      if (!poolThreadName_.empty()) {
        threadManager->setNamePrefix(poolThreadName_);
      }
      threadManager->start();
      setThreadManager(threadManager);
    }
    threadManager_->setExpireCallback([&](std::shared_ptr<Runnable> r) {
        EventTask* task = dynamic_cast<EventTask*>(r.get());
        if (task) {
          task->expired();
        }
    });
    threadManager_->setCodelCallback([&](std::shared_ptr<Runnable> r) {
        auto observer = getObserver();
        if (observer) {
          observer->queueTimeout();
        }
    });

    if (!serverChannel_) {
      // regular server
      auto b = std::make_shared<boost::barrier>(nWorkers_ + 1);

      // Create the worker threads.
      workers_.reserve(nWorkers_);
      for (uint32_t n = 0; n < nWorkers_; ++n) {
        addWorker();
        workers_[n].worker->getEventBase()->runInLoop([b](){
          b->wait();
        });
      }

      // Update address_ with the address that we are actually bound to.
      // (This is needed if we were supplied a pre-bound socket, or if
      // address_'s port was set to 0, so an ephemeral port was chosen by
      // the kernel.)
      if (socket_) {
        socket_->getAddress(&address_);
      }

      // Notify handler of the preServe event
      if (eventHandler_ != nullptr) {
        eventHandler_->preServe(&address_);
      }

      for (auto& worker: workers_) {
        worker.thread->start();
        ++threadsStarted;
        worker.thread->setName(folly::to<std::string>(cpp2WorkerThreadName_,
                                                      threadsStarted));
      }

      // Wait for all workers to start
      b->wait();

      if (socket_) {
        socket_->attachEventBase(eventBaseManager_->getEventBase());
      }
      eventBaseAttached = true;
      if (socket_) {
        socket_->startAccepting();
      }
    } else {
      // duplex server
      // Create the Cpp2Worker
      DCHECK(workers_.empty());
      WorkerInfo info;
      uint32_t workerID = 0;
      info.worker.reset(new Cpp2Worker(this, workerID, serverChannel_));
      // no thread, use current one (shared with client)
      info.thread = nullptr;

      workers_.push_back(info);
    }
  } catch (...) {
    // XXX: Cpp2Worker::acceptStopped() calls
    //      eventBase_.terminateLoopSoon().  Normally this stops the
    //      worker from processing more work and stops the event loop.
    //      However, if startConsuming() and eventBase_.loop() haven't
    //      run yet this won't do the right thing. The worker thread
    //      will still continue to call startConsuming() later, and
    //      will then start the event loop.
    if (!serverChannel_) {
      for (uint32_t i = 0; i < threadsStarted; ++i) {
        workers_[i].worker->acceptStopped();
        workers_[i].thread->join();
      }
    }
    workers_.clear();

    if (socket_) {
      if (eventBaseAttached) {
        socket_->detachEventBase();
      }

      socket_.reset();
    }

    // avoid crash on stop()
    serveEventBase_ = nullptr;

    throw;
  }
}
コード例 #11
0
 /**
  * @brief Constructor
  *
  * @param input_block_id The block id.
  * @param state The AggregationState to use.
  **/
 AggregationWorkOrder(const block_id input_block_id,
                      AggregationOperationState *state)
     : input_block_id_(input_block_id),
       state_(DCHECK_NOTNULL(state)) {}
コード例 #12
0
ファイル: ThriftServer.cpp プロジェクト: facebook/fbthrift
void ThriftServer::setup() {
  DCHECK_NOTNULL(getProcessorFactory().get());
  auto nWorkers = getNumIOWorkerThreads();
  DCHECK_GT(nWorkers, 0);

  uint32_t threadsStarted = 0;

  // Initialize event base for this thread, ensure event_init() is called
  serveEventBase_ = eventBaseManager_->getEventBase();
  if (idleServerTimeout_.count() > 0) {
    idleServer_.emplace(
        *this, serveEventBase_.load()->timer(), idleServerTimeout_);
  }
  // Print some libevent stats
  VLOG(1) << "libevent " << folly::EventBase::getLibeventVersion() << " method "
          << folly::EventBase::getLibeventMethod();

  try {
#ifndef _WIN32
    // OpenSSL might try to write to a closed socket if the peer disconnects
    // abruptly, raising a SIGPIPE signal. By default this will terminate the
    // process, which we don't want. Hence we need to handle SIGPIPE specially.
    //
    // We don't use SIG_IGN here as child processes will inherit that handler.
    // Instead, we swallow the signal to enable SIGPIPE in children to behave
    // normally.
    // Furthermore, setting flags to 0 and using sigaction prevents SA_RESTART
    // from restarting syscalls after the handler completed. This is important
    // for code using SIGPIPE to interrupt syscalls in other threads.
    struct sigaction sa = {};
    sa.sa_handler = [](int) {};
    sa.sa_flags = 0;
    sigemptyset(&sa.sa_mask);
    sigaction(SIGPIPE, &sa, nullptr);
#endif

    if (!getObserver() && server::observerFactory_) {
      setObserver(server::observerFactory_->getObserver());
    }

    // We always need a threadmanager for cpp2.
    setupThreadManager();
    threadManager_->setExpireCallback([&](std::shared_ptr<Runnable> r) {
      EventTask* task = dynamic_cast<EventTask*>(r.get());
      if (task) {
        task->expired();
      }
    });
    threadManager_->setCodelCallback([&](std::shared_ptr<Runnable>) {
      auto observer = getObserver();
      if (observer) {
        if (getEnableCodel()) {
          observer->queueTimeout();
        } else {
          observer->shadowQueueTimeout();
        }
      }
    });
    if (thriftProcessor_) {
      thriftProcessor_->setThreadManager(threadManager_.get());
      thriftProcessor_->setCpp2Processor(getCpp2Processor());
    }

    if (!serverChannel_) {
      ServerBootstrap::socketConfig.acceptBacklog = getListenBacklog();
      ServerBootstrap::socketConfig.maxNumPendingConnectionsPerWorker =
          getMaxNumPendingConnectionsPerWorker();
      if (reusePort_) {
        ServerBootstrap::setReusePort(true);
      }
      if (enableTFO_) {
        ServerBootstrap::socketConfig.enableTCPFastOpen = *enableTFO_;
        ServerBootstrap::socketConfig.fastOpenQueueSize = fastOpenQueueSize_;
      }

      // Resize the IO pool
      ioThreadPool_->setNumThreads(nWorkers);
      if (!acceptPool_) {
        acceptPool_ = std::make_shared<folly::IOThreadPoolExecutor>(
            nAcceptors_,
            std::make_shared<folly::NamedThreadFactory>("Acceptor Thread"));
      }

      // Resize the SSL handshake pool
      size_t nSSLHandshakeWorkers = getNumSSLHandshakeWorkerThreads();
      VLOG(1) << "Using " << nSSLHandshakeWorkers << " SSL handshake threads";
      sslHandshakePool_->setNumThreads(nSSLHandshakeWorkers);

      ServerBootstrap::childHandler(
          acceptorFactory_ ? acceptorFactory_
                           : std::make_shared<ThriftAcceptorFactory>(this));

      {
        std::lock_guard<std::mutex> lock(ioGroupMutex_);
        ServerBootstrap::group(acceptPool_, ioThreadPool_);
      }
      if (socket_) {
        ServerBootstrap::bind(std::move(socket_));
      } else if (port_ != -1) {
        ServerBootstrap::bind(port_);
      } else {
        ServerBootstrap::bind(address_);
      }
      // Update address_ with the address that we are actually bound to.
      // (This is needed if we were supplied a pre-bound socket, or if
      // address_'s port was set to 0, so an ephemeral port was chosen by
      // the kernel.)
      ServerBootstrap::getSockets()[0]->getAddress(&address_);

      for (auto& socket : getSockets()) {
        socket->setShutdownSocketSet(wShutdownSocketSet_);
        socket->setAcceptRateAdjustSpeed(acceptRateAdjustSpeed_);
        try {
          socket->setTosReflect(tosReflect_);
        } catch (std::exception const& ex) {
          LOG(ERROR) << "Got exception setting up TOS reflect: "
                     << folly::exceptionStr(ex);
        }
      }

      // Notify handler of the preServe event
      if (eventHandler_ != nullptr) {
        eventHandler_->preServe(&address_);
      }

    } else {
      startDuplex();
    }

    // Do not allow setters to be called past this point until the IO worker
    // threads have been joined in stopWorkers().
    configMutable_ = false;
  } catch (std::exception& ex) {
    // This block allows us to investigate the exception using gdb
    LOG(ERROR) << "Got an exception while setting up the server: " << ex.what();
    handleSetupFailure();
    throw;
  } catch (...) {
    handleSetupFailure();
    throw;
  }
}
コード例 #13
0
ファイル: ThriftServer.cpp プロジェクト: disigma/fbthrift
void ThriftServer::setup() {
  DCHECK_NOTNULL(cpp2Pfac_.get());
  DCHECK_GT(nWorkers_, 0);

  uint32_t threadsStarted = 0;

  // Initialize event base for this thread, ensure event_init() is called
  serveEventBase_ = eventBaseManager_->getEventBase();
  // Print some libevent stats
  VLOG(1) << "libevent " <<
    folly::EventBase::getLibeventVersion() << " method " <<
    folly::EventBase::getLibeventMethod();

  try {
    // We check for write success so we don't need or want SIGPIPEs.
    signal(SIGPIPE, SIG_IGN);

    if (!observer_ && apache::thrift::observerFactory_) {
      observer_ = apache::thrift::observerFactory_->getObserver();
    }

    // We always need a threadmanager for cpp2.
    if (!threadFactory_) {
      setThreadFactory(
        std::make_shared<apache::thrift::concurrency::PosixThreadFactory>(
          apache::thrift::concurrency::PosixThreadFactory::kDefaultPolicy,
          apache::thrift::concurrency::PosixThreadFactory::kDefaultPriority,
          threadStackSizeMB_
        )
      );
    }

    if (saslPolicy_ == "required" || saslPolicy_ == "permitted") {
      if (!saslThreadManager_) {
        auto numThreads = nSaslPoolThreads_ > 0
                              ? nSaslPoolThreads_
                              : (nPoolThreads_ > 0 ? nPoolThreads_ : nWorkers_);
        saslThreadManager_ = ThreadManager::newSimpleThreadManager(
            numThreads,
            0, /* pendingTaskCountMax -- no limit */
            false, /* enableTaskStats */
            0 /* maxQueueLen -- large default */);
        saslThreadManager_->setNamePrefix("thrift-sasl");
        saslThreadManager_->threadFactory(threadFactory_);
        saslThreadManager_->start();
      }
      auto saslThreadManager = saslThreadManager_;

      if (getSaslServerFactory()) {
        // If the factory is already set, don't override it with the default
      } else if (FLAGS_pin_service_identity &&
                 !FLAGS_service_identity.empty()) {
        // If pin_service_identity flag is set and service_identity is specified
        // force the server use the corresponding principal from keytab.
        char hostname[256];
        if (gethostname(hostname, 255)) {
          LOG(FATAL) << "Failed getting hostname";
        }
        setSaslServerFactory([=] (folly::EventBase* evb) {
          auto saslServer = std::unique_ptr<SaslServer>(
            new GssSaslServer(evb, saslThreadManager));
          saslServer->setServiceIdentity(
            FLAGS_service_identity + "/" + hostname);
          return saslServer;
        });
      } else {
        // Allow the server to accept anything in the keytab.
        setSaslServerFactory([=] (folly::EventBase* evb) {
          return std::unique_ptr<SaslServer>(
            new GssSaslServer(evb, saslThreadManager));
        });
      }
    }

    if (!threadManager_) {
      int numThreads = nPoolThreads_ > 0 ? nPoolThreads_ : nWorkers_;
      std::shared_ptr<apache::thrift::concurrency::ThreadManager>
        threadManager(PriorityThreadManager::newPriorityThreadManager(
                        numThreads,
                        true /*stats*/,
                        getMaxRequests() + numThreads /*maxQueueLen*/));
      threadManager->enableCodel(getEnableCodel());
      if (!poolThreadName_.empty()) {
        threadManager->setNamePrefix(poolThreadName_);
      }
      threadManager->start();
      setThreadManager(threadManager);
    }
    threadManager_->setExpireCallback([&](std::shared_ptr<Runnable> r) {
        EventTask* task = dynamic_cast<EventTask*>(r.get());
        if (task) {
          task->expired();
        }
    });
    threadManager_->setCodelCallback([&](std::shared_ptr<Runnable> r) {
        auto observer = getObserver();
        if (observer) {
          observer->queueTimeout();
        }
    });

    if (!serverChannel_) {

      ServerBootstrap::socketConfig.acceptBacklog = listenBacklog_;

      // Resize the IO pool
      ioThreadPool_->setNumThreads(nWorkers_);

      ServerBootstrap::childHandler(
          acceptorFactory_ ? acceptorFactory_
                           : std::make_shared<ThriftAcceptorFactory>(this));

      {
        std::lock_guard<std::mutex> lock(ioGroupMutex_);
        ServerBootstrap::group(acceptPool_, ioThreadPool_);
      }
      if (socket_) {
        ServerBootstrap::bind(std::move(socket_));
      } else if (port_ != -1) {
        ServerBootstrap::bind(port_);
      } else {
        ServerBootstrap::bind(address_);
      }
      // Update address_ with the address that we are actually bound to.
      // (This is needed if we were supplied a pre-bound socket, or if
      // address_'s port was set to 0, so an ephemeral port was chosen by
      // the kernel.)
      ServerBootstrap::getSockets()[0]->getAddress(&address_);

      for (auto& socket : getSockets()) {
        socket->setShutdownSocketSet(shutdownSocketSet_.get());
        socket->setMaxNumMessagesInQueue(maxNumPendingConnectionsPerWorker_);
        socket->setAcceptRateAdjustSpeed(acceptRateAdjustSpeed_);
      }

      // Notify handler of the preServe event
      if (eventHandler_ != nullptr) {
        eventHandler_->preServe(&address_);
      }

    } else {
      CHECK(configMutable());
      duplexWorker_ = folly::make_unique<Cpp2Worker>(this, serverChannel_);
      // we don't control the EventBase for the duplexWorker, so when we shut
      // it down, we need to ensure there's no delay
      duplexWorker_->setGracefulShutdownTimeout(std::chrono::milliseconds(0));
    }

    // Do not allow setters to be called past this point until the IO worker
    // threads have been joined in stopWorkers().
    configMutable_ = false;
  } catch (std::exception& ex) {
    // This block allows us to investigate the exception using gdb
    LOG(ERROR) << "Got an exception while setting up the server: "
      << ex.what();
    handleSetupFailure();
    throw;
  } catch (...) {
    handleSetupFailure();
    throw;
  }
}
コード例 #14
0
 /**
  * @brief Constructor.
  *
  * @param blocks The blocks to drop.
  * @param storage_manager The StorageManager to use.
  **/
 DropTableWorkOrder(std::vector<block_id> &&blocks,
                    StorageManager *storage_manager)
     : blocks_(std::move(blocks)),
       storage_manager_(DCHECK_NOTNULL(storage_manager)) {}
コード例 #15
0
ファイル: StopWatch.cpp プロジェクト: madrenegade/PGameStudio
 StopWatch::StopWatch(const char* const name)
     : name(DCHECK_NOTNULL(name)), start(std::chrono::system_clock::now())
 {
     checkType<StopWatch>();
 }
コード例 #16
0
 /**
  * @brief Constructor.
  *
  * @param hash_table_index The index of the JoinHashTable in QueryContext.
  * @param query_context The QueryContext to use.
  **/
 DestroyHashWorkOrder(const QueryContext::join_hash_table_id hash_table_index,
                      QueryContext *query_context)
     : hash_table_index_(hash_table_index),
       query_context_(DCHECK_NOTNULL(query_context)) {}
コード例 #17
0
ファイル: QueryManager.cpp プロジェクト: cramja/quickstep
QueryManager::QueryManager(const tmb::client_id foreman_client_id,
                           const std::size_t num_numa_nodes,
                           QueryHandle *query_handle,
                           CatalogDatabaseLite *catalog_database,
                           StorageManager *storage_manager,
                           tmb::MessageBus *bus)
      : foreman_client_id_(foreman_client_id),
        query_id_(DCHECK_NOTNULL(query_handle)->query_id()),
        catalog_database_(DCHECK_NOTNULL(catalog_database)),
        storage_manager_(DCHECK_NOTNULL(storage_manager)),
        bus_(DCHECK_NOTNULL(bus)) {
  DCHECK(query_handle->getQueryPlanMutable() != nullptr);
  query_dag_ = query_handle->getQueryPlanMutable()->getQueryPlanDAGMutable();
  DCHECK(query_dag_ != nullptr);

  const dag_node_index num_operators_in_dag = query_dag_->size();

  output_consumers_.resize(num_operators_in_dag);
  blocking_dependencies_.resize(num_operators_in_dag);

  query_exec_state_.reset(new QueryExecutionState(num_operators_in_dag));
  workorders_container_.reset(
      new WorkOrdersContainer(num_operators_in_dag, num_numa_nodes));

  query_context_.reset(new QueryContext(query_handle->getQueryContextProto(),
                                        *catalog_database_,
                                        storage_manager_,
                                        foreman_client_id_,
                                        bus_));

  for (dag_node_index node_index = 0;
       node_index < num_operators_in_dag;
       ++node_index) {
    const QueryContext::insert_destination_id insert_destination_index =
        query_dag_->getNodePayload(node_index).getInsertDestinationID();
    if (insert_destination_index != QueryContext::kInvalidInsertDestinationId) {
      // Rebuild is necessary whenever InsertDestination is present.
      query_exec_state_->setRebuildRequired(node_index);
      query_exec_state_->setRebuildStatus(node_index, 0, false);
    }

    for (const pair<dag_node_index, bool> &dependent_link :
         query_dag_->getDependents(node_index)) {
      const dag_node_index dependent_op_index = dependent_link.first;
      if (!query_dag_->getLinkMetadata(node_index, dependent_op_index)) {
        // The link is not a pipeline-breaker. Streaming of blocks is possible
        // between these two operators.
        output_consumers_[node_index].push_back(dependent_op_index);
      } else {
        // The link is a pipeline-breaker. Streaming of blocks is not possible
        // between these two operators.
        blocking_dependencies_[dependent_op_index].push_back(node_index);
      }
    }
  }

  // Collect all the workorders from all the relational operators in the DAG.
  for (dag_node_index index = 0; index < num_operators_in_dag; ++index) {
    if (checkAllBlockingDependenciesMet(index)) {
      query_dag_->getNodePayloadMutable(index)->informAllBlockingDependenciesMet();
      processOperator(index, false);
    }
  }
}
コード例 #18
0
 /**
  * @brief Get the parent catalog.
  *
  * @return Parent catalog.
  **/
 const Catalog& getParent() const {
   return *DCHECK_NOTNULL(parent_);
 }
コード例 #19
0
 /**
  * @brief Constructor.
  *
  * @note InsertWorkOrder takes ownership of \c state.
  *
  * @param state The AggregationState to use.
  * @param output_destination The InsertDestination to insert aggregation
  *        results.
  */
 FinalizeAggregationWorkOrder(AggregationOperationState *state,
                              InsertDestination *output_destination)
     : state_(DCHECK_NOTNULL(state)),
       output_destination_(DCHECK_NOTNULL(output_destination)) {}
コード例 #20
0
 /**
  * @brief Constructor.
  *
  * @param save_block_id The id of the block to save.
  * @param force If true, force writing of all blocks to disk, otherwise only
  *        write dirty blocks.
  * @param storage_manager The StorageManager to use.
  **/
 SaveBlocksWorkOrder(const block_id save_block_id,
                     const bool force,
                     StorageManager *storage_manager)
     : save_block_id_(save_block_id),
       force_(force),
       storage_manager_(DCHECK_NOTNULL(storage_manager)) {}
コード例 #21
0
 /**
  * @brief Constructor.
  *
  * @param query_id The ID of the query to which this WorkOrder belongs.
  * @param hash_table_index The index of the JoinHashTable in QueryContext.
  * @param query_context The QueryContext to use.
  **/
 DestroyHashWorkOrder(const std::size_t query_id,
                      const QueryContext::join_hash_table_id hash_table_index,
                      QueryContext *query_context)
     : WorkOrder(query_id),
       hash_table_index_(hash_table_index),
       query_context_(DCHECK_NOTNULL(query_context)) {}
コード例 #22
0
 /**
  * @brief Constructor.
  *
  * @param generator_function The GeneratorFunctionHandle to use.
  * @param output_destination The InsertDestination to insert the generated
  *        output.
  **/
 TableGeneratorWorkOrder(const GeneratorFunctionHandle &function_handle,
                         InsertDestination *output_destination)
     : function_handle_(function_handle),
       output_destination_(DCHECK_NOTNULL(output_destination)) {}
コード例 #23
0
ファイル: ThriftServer.cpp プロジェクト: UIKit0/fbthrift
void ThriftServer::setup() {
  DCHECK_NOTNULL(cpp2Pfac_.get());
  DCHECK_GT(nWorkers_, 0);

  uint32_t threadsStarted = 0;
  bool eventBaseAttached = false;

  // Make sure EBM exists if we haven't set one explicitly
  getEventBaseManager();

  // Initialize event base for this thread, ensure event_init() is called
  serveEventBase_ = eventBaseManager_->getEventBase();
  // Print some libevent stats
  LOG(INFO) << "libevent " <<
    TEventBase::getLibeventVersion() << " method " <<
    TEventBase::getLibeventMethod();

  try {
    // We check for write success so we don't need or want SIGPIPEs.
    signal(SIGPIPE, SIG_IGN);

    if (!observer_ && apache::thrift::observerFactory_) {
      observer_ = apache::thrift::observerFactory_->getObserver();
    }

    // bind to the socket
    if (socket_ == nullptr) {
      socket_.reset(new TAsyncServerSocket());
      socket_->setShutdownSocketSet(shutdownSocketSet_.get());
      if (port_ != -1) {
        socket_->bind(port_);
      } else {
        DCHECK(address_.isInitialized());
        socket_->bind(address_);
      }
    }

    socket_->listen(listenBacklog_);
    socket_->setMaxNumMessagesInQueue(maxNumMsgsInQueue_);
    socket_->setAcceptRateAdjustSpeed(acceptRateAdjustSpeed_);

    // We always need a threadmanager for cpp2.
    if (!threadFactory_) {
      setThreadFactory(std::shared_ptr<ThreadFactory>(
        new PosixThreadFactory));
    }

    if (!threadManager_) {
      std::shared_ptr<apache::thrift::concurrency::ThreadManager>
        threadManager(PriorityThreadManager::newPriorityThreadManager(
                        nPoolThreads_ > 0 ? nPoolThreads_ : nWorkers_,
                        true /*stats*/));
      threadManager->enableCodel(getEnableCodel());
      threadManager->start();
      setThreadManager(threadManager);
    }
    threadManager_->setExpireCallback([&](std::shared_ptr<Runnable> r) {
        EventTask* task = dynamic_cast<EventTask*>(r.get());
        if (task) {
          task->expired();
        }
    });
    threadManager_->setCodelCallback([&](std::shared_ptr<Runnable> r) {
        auto observer = getObserver();
        if (observer) {
          observer->queueTimeout();
        }
    });

    auto b = std::make_shared<boost::barrier>(nWorkers_ + 1);

    // Create the worker threads.
    workers_.reserve(nWorkers_);
    for (uint32_t n = 0; n < nWorkers_; ++n) {
      addWorker();
      workers_[n].worker->getEventBase()->runInLoop([b](){
        b->wait();
      });
    }

    // Update address_ with the address that we are actually bound to.
    // (This is needed if we were supplied a pre-bound socket, or if address_'s
    // port was set to 0, so an ephemeral port was chosen by the kernel.)
    socket_->getAddress(&address_);

    // Notify handler of the preServe event
    if (eventHandler_ != nullptr) {
      eventHandler_->preServe(&address_);
    }

    for (auto& worker: workers_) {
      worker.thread->start();
      ++threadsStarted;
      worker.thread->setName(folly::to<std::string>("Cpp2Worker",
                                                    threadsStarted));
    }

    // Wait for all workers to start
    b->wait();

    socket_->attachEventBase(eventBaseManager_->getEventBase());
    eventBaseAttached = true;
    socket_->startAccepting();
  } catch (...) {
    // XXX: Cpp2Worker::acceptStopped() calls
    //      eventBase_.terminateLoopSoon().  Normally this stops the
    //      worker from processing more work and stops the event loop.
    //      However, if startConsuming() and eventBase_.loop() haven't
    //      run yet this won't do the right thing. The worker thread
    //      will still continue to call startConsuming() later, and
    //      will then start the event loop.
    for (uint32_t i = 0; i < threadsStarted; ++i) {
      workers_[i].worker->acceptStopped();
      workers_[i].thread->join();
    }
    workers_.clear();

    if (socket_) {
      if (eventBaseAttached) {
        socket_->detachEventBase();
      }

      socket_.reset();
    }

    throw;
  }
}