/// @brief getSome AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) { DEBUG_BEGIN_BLOCK(); traceGetSomeBegin(); if (_done) { traceGetSomeEnd(nullptr); return nullptr; } // the simple case . . . if (_isSimple) { auto res = _dependencies.at(_atDep)->getSome(atLeast, atMost); while (res == nullptr && _atDep < _dependencies.size() - 1) { _atDep++; res = _dependencies.at(_atDep)->getSome(atLeast, atMost); } if (res == nullptr) { _done = true; } traceGetSomeEnd(res); return res; } // the non-simple case . . . size_t available = 0; // nr of available rows size_t index = 0; // an index of a non-empty buffer // pull more blocks from dependencies . . . for (size_t i = 0; i < _dependencies.size(); i++) { if (_gatherBlockBuffer.at(i).empty()) { if (getBlock(i, atLeast, atMost)) { index = i; _gatherBlockPos.at(i) = std::make_pair(i, 0); } } else { index = i; } auto cur = _gatherBlockBuffer.at(i); if (!cur.empty()) { available += cur.at(0)->size() - _gatherBlockPos.at(i).second; for (size_t j = 1; j < cur.size(); j++) { available += cur.at(j)->size(); } } } if (available == 0) { _done = true; traceGetSomeEnd(nullptr); return nullptr; } size_t toSend = (std::min)(available, atMost); // nr rows in outgoing block // the following is similar to AqlItemBlock's slice method . . . std::unordered_map<AqlValue, AqlValue> cache; // comparison function OurLessThan ourLessThan(_trx, _gatherBlockBuffer, _sortRegisters); AqlItemBlock* example = _gatherBlockBuffer.at(index).front(); size_t nrRegs = example->getNrRegs(); auto res = std::make_unique<AqlItemBlock>( toSend, static_cast<arangodb::aql::RegisterId>(nrRegs)); // automatically deleted if things go wrong for (size_t i = 0; i < toSend; i++) { // get the next smallest row from the buffer . . . std::pair<size_t, size_t> val = *(std::min_element( _gatherBlockPos.begin(), _gatherBlockPos.end(), ourLessThan)); // copy the row in to the outgoing block . . . for (RegisterId col = 0; col < nrRegs; col++) { AqlValue const& x( _gatherBlockBuffer.at(val.first).front()->getValue(val.second, col)); if (!x.isEmpty()) { auto it = cache.find(x); if (it == cache.end()) { AqlValue y = x.clone(); try { res->setValue(i, col, y); } catch (...) { y.destroy(); throw; } cache.emplace(x, y); } else { res->setValue(i, col, it->second); } } } // renew the _gatherBlockPos and clean up the buffer if necessary _gatherBlockPos.at(val.first).second++; if (_gatherBlockPos.at(val.first).second == _gatherBlockBuffer.at(val.first).front()->size()) { AqlItemBlock* cur = _gatherBlockBuffer.at(val.first).front(); delete cur; _gatherBlockBuffer.at(val.first).pop_front(); _gatherBlockPos.at(val.first) = std::make_pair(val.first, 0); } } traceGetSomeEnd(res.get()); return res.release(); // cppcheck-suppress style DEBUG_END_BLOCK(); }
/// @brief getSome AqlItemBlock* TraversalBlock::getSome(size_t, // atLeast, size_t atMost) { DEBUG_BEGIN_BLOCK(); traceGetSomeBegin(); if (_done) { traceGetSomeEnd(nullptr); return nullptr; } if (_buffer.empty()) { size_t toFetch = (std::min)(DefaultBatchSize(), atMost); if (!ExecutionBlock::getBlock(toFetch, toFetch)) { _done = true; traceGetSomeEnd(nullptr); return nullptr; } _pos = 0; // this is in the first block } // If we get here, we do have _buffer.front() AqlItemBlock* cur = _buffer.front(); size_t const curRegs = cur->getNrRegs(); if (_pos == 0) { // Initial initialization initializePaths(cur, _pos); } // Iterate more paths: if (_posInPaths >= _vertices.size()) { if (!morePaths(atMost)) { // This input does not have any more paths. maybe the next one has. // we can only return nullptr iff the buffer is empty. if (++_pos >= cur->size()) { _buffer.pop_front(); // does not throw // returnBlock(cur); delete cur; _pos = 0; } else { initializePaths(cur, _pos); } auto r = getSome(atMost, atMost); traceGetSomeEnd(r); return r; } } size_t available = _vertices.size() - _posInPaths; size_t toSend = (std::min)(atMost, available); RegisterId nrRegs = getPlanNode()->getRegisterPlan()->nrRegs[getPlanNode()->getDepth()]; std::unique_ptr<AqlItemBlock> res(requestBlock(toSend, nrRegs)); // automatically freed if we throw TRI_ASSERT(curRegs <= res->getNrRegs()); // only copy 1st row of registers inherited from previous frame(s) inheritRegisters(cur, res.get(), _pos); for (size_t j = 0; j < toSend; j++) { if (usesVertexOutput()) { res->setValue(j, _vertexReg, _vertices[_posInPaths].clone()); } if (usesEdgeOutput()) { res->setValue(j, _edgeReg, _edges[_posInPaths].clone()); } if (usesPathOutput()) { res->setValue(j, _pathReg, _paths[_posInPaths].clone()); } if (j > 0) { // re-use already copied AqlValues res->copyValuesFromFirstRow(j, static_cast<RegisterId>(curRegs)); } ++_posInPaths; } // Advance read position: if (_posInPaths >= _vertices.size()) { // we have exhausted our local paths buffer // fetch more paths into our buffer if (!morePaths(atMost)) { // nothing more to read, re-initialize fetching of paths if (++_pos >= cur->size()) { _buffer.pop_front(); // does not throw // returnBlock(cur); delete cur; _pos = 0; } else { initializePaths(cur, _pos); } } } // Clear out registers no longer needed later: clearRegisters(res.get()); traceGetSomeEnd(res.get()); return res.release(); // cppcheck-suppress style DEBUG_END_BLOCK(); }
AqlItemBlock* ShortestPathBlock::getSome(size_t, size_t atMost) { DEBUG_BEGIN_BLOCK(); if (_done) { return nullptr; } if (_buffer.empty()) { size_t toFetch = (std::min)(DefaultBatchSize(), atMost); if (!ExecutionBlock::getBlock(toFetch, toFetch)) { _done = true; return nullptr; } _pos = 0; // this is in the first block } // If we get here, we do have _buffer.front() AqlItemBlock* cur = _buffer.front(); size_t const curRegs = cur->getNrRegs(); // Collect the next path: if (_posInPath >= _pathLength) { if (!nextPath(cur)) { // This input does not have any path. maybe the next one has. // we can only return nullptr iff the buffer is empty. if (++_pos >= cur->size()) { _buffer.pop_front(); // does not throw delete cur; _pos = 0; } return getSome(atMost, atMost); } } size_t available = _pathLength - _posInPath; size_t toSend = (std::min)(atMost, available); RegisterId nrRegs = getPlanNode()->getRegisterPlan()->nrRegs[getPlanNode()->getDepth()]; std::unique_ptr<AqlItemBlock> res(requestBlock(toSend, nrRegs)); // automatically freed if we throw TRI_ASSERT(curRegs <= res->getNrRegs()); // only copy 1st row of registers inherited from previous frame(s) inheritRegisters(cur, res.get(), _pos); // TODO this might be optimized in favor of direct mptr. VPackBuilder resultBuilder; for (size_t j = 0; j < toSend; j++) { if (usesVertexOutput()) { // TODO this might be optimized in favor of direct mptr. resultBuilder.clear(); _path->vertexToVelocyPack(_trx, _posInPath, resultBuilder); res->setValue(j, _vertexReg, AqlValue(resultBuilder.slice())); } if (usesEdgeOutput()) { // TODO this might be optimized in favor of direct mptr. resultBuilder.clear(); _path->edgeToVelocyPack(_trx, _posInPath, resultBuilder); res->setValue(j, _edgeReg, AqlValue(resultBuilder.slice())); } if (j > 0) { // re-use already copied aqlvalues res->copyValuesFromFirstRow(j, static_cast<RegisterId>(curRegs)); } ++_posInPath; } if (_posInPath >= _pathLength) { // Advance read position for next call if (++_pos >= cur->size()) { _buffer.pop_front(); // does not throw delete cur; _pos = 0; } } // Clear out registers no longer needed later: clearRegisters(res.get()); return res.release(); DEBUG_END_BLOCK(); }