void helper<ProtocolReader, ProtocolWriter>::process_exn(
    const char* func,
    const string& msg,
    unique_ptr<ResponseChannel::Request> req,
    Cpp2RequestContext* ctx,
    EventBase* eb,
    int32_t protoSeqId) {
  ProtocolWriter oprot;
  if (req) {
    LOG(ERROR) << msg << " in function " << func;
    TApplicationException x(msg);
    IOBufQueue queue = helper_w<ProtocolWriter>::write_exn(
        func, &oprot, protoSeqId, nullptr, x);
    queue.append(THeader::transform(
          queue.move(),
          ctx->getHeader()->getWriteTransforms(),
          ctx->getHeader()->getMinCompressBytes()));
    auto queue_mw = makeMoveWrapper(move(queue));
    auto req_mw = makeMoveWrapper(move(req));
    eb->runInEventBaseThread([=]() mutable {
      (*req_mw)->sendReply(queue_mw->move());
    });
  } else {
    LOG(ERROR) << msg << " in oneway function " << func;
  }
}
Beispiel #2
0
TEST(makeMoveWrapper, NonEmpty) {
  auto u = std::unique_ptr<int>(new int(5));
  EXPECT_EQ(*u, 5);
  auto p = makeMoveWrapper(std::move(u));
  EXPECT_TRUE(!u);
  EXPECT_EQ(**p, 5);
}
Beispiel #3
0
TEST(makeMoveWrapper, lvalue_copyable) {
  std::shared_ptr<int> p;
  makeMoveWrapper(p);
}
Beispiel #4
0
TEST(makeMoveWrapper, lvalue) {
  std::unique_ptr<int> p;
  makeMoveWrapper(p);
}
Beispiel #5
0
TEST(makeMoveWrapper, rvalue) {
  std::unique_ptr<int> p;
  makeMoveWrapper(std::move(p));
}
Beispiel #6
0
TEST(makeMoveWrapper, Empty) {
  // checks for crashes
  auto p = makeMoveWrapper(std::unique_ptr<int>());
}
/*
 * Distributed sorting.
 *
 * Performs the sorting by breaking the list into chunks, sending requests out
 * to each backend server to sort 1 chunk, then merging the results.
 *
 * Sorting a list of size N normally requires O(N log N) time.
 * Distributing the sorting operation over M servers requires
 * O(N/M log N/M) time on each server (performed in parallel), plus
 * O(N log M) time to merge the sorted lists back together.
 *
 * (In reality, the extra I/O overhead of copying the data and sending it out
 * to the servers probably makes it not worthwhile for most use cases.
 * However, it provides a relatively easy-to-understand example.)
 */
Future<vector<int32_t>> SortDistributorHandler::future_sort(
    const vector<int32_t>& values) {
  // If there's just one value, go ahead and return it now.
  // (This avoids infinite recursion if we happen to be pointing at ourself as
  // one of the backend servers.)
  if (values.size() <= 1) {
    return makeFuture(values);
  }

  // Perform the sort by breaking the list into pieces,
  // and farming them out the the servers we know about.
  size_t chunk_size = values.size() / backends_.size();

  // Round up the chunk size when it is not integral
  if (values.size() % backends_.size() != 0) {
    chunk_size += 1;
  }

  auto tm = getThreadManager();
  auto eb = getEventBase();

  // Create futures for all the requests to the backends, and when they all
  // complete.
  return collectAll(
    gen::range<size_t>(0, backends_.size())
    | gen::map([&](size_t idx) {
        // Chunk it.
        auto a = chunk_size * idx;
        auto b = chunk_size * (idx + 1);
        vector<int32_t> chunk(
            values.begin() + a,
            values.begin() + std::min(values.size(), b));
        // Issue a request from the IO thread for each chunk.
        auto chunkm = makeMoveWrapper(move(chunk));
        return via(eb, [=]() mutable {
            auto chunk = chunkm.move();
            auto client = make_unique<SorterAsyncClient>(
                HeaderClientChannel::newChannel(
                  async::TAsyncSocket::newSocket(
                    eb, backends_.at(idx))));
            return client->future_sort(chunk);
        });
      })
    | gen::as<vector>())
    // Back in a CPU thread, when al the results are in.
    .via(tm)
    .then([=](vector<Try<vector<int32_t>>> xresults) {
        // Throw if any of the backends threw.
        auto results = gen::from(xresults)
          | gen::map([](Try<vector<int32_t>> xresult) {
              return move(xresult.value());
            })
          | gen::as<vector>();

        // Build a heap containing one Range for each of the response vectors.
        // The Range starting with the smallest value is kept on top.
        using it = vector<int32_t>::const_iterator;
        struct it_range_cmp {
          bool operator()(Range<it> a, Range<it> b) {
            return a.front() > b.front();
          }
        };
        priority_queue<Range<it>, vector<Range<it>>, it_range_cmp> heap;
        for (auto& result : results) {
          if (result.empty()) {
            continue;
          }
          heap.push(Range<it>(result.begin(), result.end()));
        }

        // Cycle through the heap, merging the sorted chunks back into one list.
        // On each iteration:
        // * Pull out the Range with the least first element (each Range is pre-
        //   sorted).
        // * Pull out that first element and add it to the merged result.
        // * Put the Range back without that first element, if it is non-empty.
        vector<int32_t> merged;
        while (!heap.empty()) {
          auto v = heap.top();
          heap.pop();
          merged.push_back(v.front());
          v.advance(1);
          if (!v.empty()) {
            heap.push(v);
          }
        }

        return merged;
    });
}