/** * Private function to compute the position in the chunk from coordinates */ inline uint64_t WindowChunk::coord2pos(Coordinates const& coord) const { SCIDB_ASSERT(_materialized); position_t pos = _mapper->coord2pos(coord); SCIDB_ASSERT(pos >= 0); return pos; }
/* Allocate more space into the data store to handle the requested chunk */ void DataStore::makeMoreSpace(size_t request) { SCIDB_ASSERT(request > _largestFreeChunk); SCIDB_ASSERT(_allocatedSize >= _largestFreeChunk); while (request > _largestFreeChunk) { _freelists[_allocatedSize].insert(_allocatedSize); _largestFreeChunk = _allocatedSize; _allocatedSize *= 2; } }
/* Add block to free list and try to consolidate buddy blocks */ void DataStore::addToFreelist(size_t bucket, off_t off) { SCIDB_ASSERT(roundUpPowerOf2(bucket) == bucket); SCIDB_ASSERT(off % bucket == 0); /* Calc the buddy block */ size_t parent = bucket * 2; off_t buddy; if (off % parent == 0) { buddy = off + bucket; } else { buddy = off - bucket; } /* Check if the buddy is free */ DataStoreFreelists::iterator it = _freelists.find(bucket); if (it != _freelists.end()) { std::set<off_t>::iterator bucket_it; bucket_it = (it->second).find(buddy); if (bucket_it != (it->second).end()) { /* Merge with the buddy */ off_t merged = (off < buddy) ? off : buddy; (it->second).erase(bucket_it); if ((it->second).size() == 0) { _freelists.erase(it); } addToFreelist(parent, merged); return; } } /* Buddy is not free, just insert into free list */ _freelists[bucket].insert(off); }
/* Flush all DataStore objects */ void DataStores::flushAllDataStores() { DataStoreMap::iterator it; shared_ptr<DataStore> current; DataStore::Guid lastGuid = 0; while (true) { { ScopedMutexLock sm(_dataStoreLock); SCIDB_ASSERT(_theDataStores); it = _theDataStores->upper_bound(lastGuid); if (it == _theDataStores->end()) { break; } current = it->second; lastGuid = it->first; } current->flush(); current.reset(); } }
/* Get a reference to a specific DataStore */ shared_ptr<DataStore> DataStores::getDataStore(DataStore::Guid guid) { DataStoreMap::iterator it; shared_ptr<DataStore> retval; ScopedMutexLock sm(_dataStoreLock); SCIDB_ASSERT(_theDataStores); /* Check the map */ it = _theDataStores->find(guid); if (it != _theDataStores->end()) { return it->second; } /* Not found, construct the object */ stringstream filepath; filepath << _basePath << guid << ".data"; retval = boost::make_shared<DataStore>(filepath.str().c_str(), guid, boost::ref(*this)); (*_theDataStores)[guid] = retval; return retval; }
/* Iterate the free lists and find a free chunk of the requested size @pre caller has locked the DataStore */ off_t DataStore::searchFreelist(size_t request) { off_t ret = 0; SCIDB_ASSERT(request <= _largestFreeChunk); /* Base case: the target bucket contains a free chunk */ DataStoreFreelists::iterator it = _freelists.find(request); if (it != _freelists.end()) { assert(it->second.size() > 0); ret = *(it->second.begin()); it->second.erase(ret); if (it->second.size() == 0) { _freelists.erase(it); } } /* Recursive case: we have to get a free chunk by breaking up a larger free chunk */ else { ret = searchFreelist(request * 2); _freelists[request].insert(ret + request); } return ret; }
/** * @see ConstIterator::setPosition() */ bool MaterializedWindowChunkIterator::setPosition(Coordinates const& pos) { uint64_t Pos = _chunk.coord2pos(pos); _iter = _stateMap.find(Pos); if(end()) { return false; } SCIDB_ASSERT(( _iter != _stateMap.end())); calculateNextValue(); if (_iterationMode & IGNORE_NULL_VALUES && _nextValue.isNull()) { return false; } else if (_iterationMode & IGNORE_DEFAULT_VALUES && _nextValue == _defaultValue) { return false; } return true; }
inline uint64_t WindowChunk::getStep() const { if (false == isMaterialized()) { throw USER_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_OP_WINDOW_ERROR6); } SCIDB_ASSERT(_mapper); return _mapper->getChunkInterval(_nDims-1); }
bool PhysicalQueryPlanNode::getRedimensionIsStrict(const PhysicalOperator::Parameters& redimParameters) { bool isStrict = true; if (redimParameters.size() == 2 && redimParameters[1]->getParamType() == scidb::PARAM_PHYSICAL_EXPRESSION) { OperatorParamPhysicalExpression* paramExpr = static_cast<OperatorParamPhysicalExpression*>(redimParameters[1].get()); SCIDB_ASSERT(paramExpr->isConstant()); isStrict = paramExpr->getExpression()->evaluate().getBool(); } return isStrict; }
void CoordMetrics::accumulate(CoordMetrics::PositionPair const& pp) { SCIDB_ASSERT(_ppValue.size() == sizeof(PositionPair)); ::memcpy(_ppValue.data(), &pp, sizeof(PositionPair)); AggState* asp = &_aggregates[indexOf(0, AI_ODC)]; asp->agg->accumulateIfNeeded(asp->state, _ppValue); if (_wantRepeats) { asp = &_aggregates[indexOf(0, AI_COL)]; asp->agg->accumulateIfNeeded(asp->state, _ppValue); } }
void inferArrayAccess(std::shared_ptr<Query>& query) { LogicalOperator::inferArrayAccess(query); SCIDB_ASSERT(_parameters.size() > 1); // from SCIDB_ASSERT(_parameters[0]->getParamType() == PARAM_ARRAY_REF); const string& oldArrayNameOrg = ((std::shared_ptr<OperatorParamReference>&)_parameters[0])->getObjectName(); SCIDB_ASSERT(oldArrayNameOrg.find('@') == std::string::npos); std::string oldArrayName; std::string oldNamespaceName; query->getNamespaceArrayNames(oldArrayNameOrg, oldNamespaceName, oldArrayName); std::shared_ptr<SystemCatalog::LockDesc> lock( new SystemCatalog::LockDesc( oldNamespaceName, oldArrayName, query->getQueryID(), Cluster::getInstance()->getLocalInstanceId(), SystemCatalog::LockDesc::COORD, SystemCatalog::LockDesc::RNF)); std::shared_ptr<SystemCatalog::LockDesc> resLock = query->requestLock(lock); SCIDB_ASSERT(resLock); SCIDB_ASSERT(resLock->getLockMode() >= SystemCatalog::LockDesc::RNF); // to SCIDB_ASSERT(_parameters[1]->getParamType() == PARAM_ARRAY_REF); const string &newArrayNameOrg = ((std::shared_ptr<OperatorParamReference>&)_parameters[1])->getObjectName(); SCIDB_ASSERT(newArrayNameOrg.find('@') == std::string::npos); std::string newArrayName; std::string newNamespaceName; query->getNamespaceArrayNames(newArrayNameOrg, newNamespaceName, newArrayName); lock.reset(new SystemCatalog::LockDesc(newArrayName, query->getQueryID(), Cluster::getInstance()->getLocalInstanceId(), SystemCatalog::LockDesc::COORD, SystemCatalog::LockDesc::XCL)); resLock = query->requestLock(lock); SCIDB_ASSERT(resLock); SCIDB_ASSERT(resLock->getLockMode() >= SystemCatalog::LockDesc::XCL); }
bool PhysicalQueryPlanNode::getInputIsStrict(const PhysicalOperator::Parameters& inputParameters) { bool isStrict = true; if (inputParameters.size() == 6 && inputParameters[5]->getParamType() == scidb::PARAM_PHYSICAL_EXPRESSION) { OperatorParamPhysicalExpression* paramExpr = static_cast<OperatorParamPhysicalExpression*>(inputParameters[5].get()); SCIDB_ASSERT(paramExpr->isConstant()); isStrict = paramExpr->getExpression()->evaluate().getBool(); } else if (inputParameters.size() == 7) { ASSERT_EXCEPTION((inputParameters[6]->getParamType() == scidb::PARAM_PHYSICAL_EXPRESSION), "Invalid input() parameters 6"); OperatorParamPhysicalExpression* paramExpr = static_cast<OperatorParamPhysicalExpression*>(inputParameters[6].get()); SCIDB_ASSERT(paramExpr->isConstant()); isStrict = paramExpr->getExpression()->evaluate().getBool(); } return isStrict; }
/** * @see ConstChunk::getConstIterator() */ std::shared_ptr<ConstChunkIterator> WindowChunk::getConstIterator(int iterationMode) const { SCIDB_ASSERT(( NULL != _arrayIterator )); ConstChunk const& inputChunk = _arrayIterator->iterator->getChunk(); if (_array.getArrayDesc().getEmptyBitmapAttribute() && _attrID == _array.getArrayDesc().getEmptyBitmapAttribute()->getId()) { return inputChunk.getConstIterator((iterationMode & ~ChunkIterator::INTENDED_TILE_MODE) | ChunkIterator::IGNORE_OVERLAPS); } if (isMaterialized()) { return std::shared_ptr<ConstChunkIterator>(new MaterializedWindowChunkIterator(*_arrayIterator, *this, iterationMode)); } return std::shared_ptr<ConstChunkIterator>(new WindowChunkIterator(*_arrayIterator, *this, iterationMode)); }
/* Remove a data store from memory and disk */ void DataStores::closeDataStore(DataStore::Guid guid, bool remove) { DataStoreMap::iterator it; ScopedMutexLock sm(_dataStoreLock); SCIDB_ASSERT(_theDataStores); /* Check the map */ it = _theDataStores->find(guid); if (it == _theDataStores->end()) { /* It isn't in the map... maybe it hasn't been opened this time. If remove is specified we need to open it so we can remove it from disk. */ if (remove) { stringstream filepath; filepath << _basePath << guid << ".data"; it = _theDataStores->insert( make_pair( guid, boost::make_shared<DataStore>(filepath.str().c_str(), guid, boost::ref(*this)) ) ).first; } else { return; } } /* Remove it from the map */ if (remove) { it->second->removeOnClose(); it->second->removeFreelistFile(); } _theDataStores->erase(it); }
void RowCollection<Group,Hash>::appendItem(size_t& rowId, const Group& group, const vector<Value>& item) { assert(_mode == RowCollectionModeAppend); // prepare to append, if rowId is not known // Get the rowId for the group. // If the group did not exist in the map, create it, and add an entry to _counts. // if (rowId == UNKNOWN_ROW_ID) { GroupToRowIdIterator it = _groupToRowId.find(group); if (it == _groupToRowId.end()) { rowId = _counts.size(); assert(rowId == _groupToRowId.size()); std::pair<typename GroupToRowId::iterator, bool> resultPair = _groupToRowId.insert(std::pair<Group, size_t>(group, rowId)); SCIDB_ASSERT(resultPair.second); // insertion should succeed _counts.push_back(0); } else { rowId = it->second; } } // Append to the buffer. MapRowIdToItems::iterator it = _appendBuffer.find(rowId); if (it == _appendBuffer.end()) { std::pair<typename MapRowIdToItems::iterator, bool> resultPair = _appendBuffer.insert(std::pair<size_t, Items>(rowId, Items())); assert(resultPair.second); // insertion should succeed it = resultPair.first; } it->second.push_back(item); // If the size of the buffered data is too large, flush it. BOOST_FOREACH(Value const& v, item) { _sizeBuffered += v.size(); } if (_sizeBuffered > _maxSizeBuffered) { flushBuffer(); } else if ((_sizeBuffered % _chunkSize) == 0) { _query->validate(); } }
/* Find space for the chunk of indicated size in the DataStore. */ off_t DataStore::allocateSpace(size_t requestedSize, size_t& allocatedSize) { ScopedMutexLock sm(_dslock); off_t ret = 0; LOG4CXX_TRACE(logger, "datastore: allocate space " << requestedSize << " for " << _file->getPath()); invalidateFreelistFile(); /* Round up required size to next power-of-two */ size_t requiredSize = requestedSize + sizeof(DiskChunkHeader); if (requiredSize < _dsm->getMinAllocSize()) requiredSize = _dsm->getMinAllocSize(); requiredSize = roundUpPowerOf2(requiredSize); /* Check if the free lists have a chunk of the proper size */ if (requiredSize > _largestFreeChunk) { makeMoreSpace(requiredSize); } SCIDB_ASSERT(requiredSize <= _largestFreeChunk); /* Look in the freelist to find a chunk to allocate. */ ret = searchFreelist(requiredSize); allocatedSize = requiredSize; /* Update the largest free chunk */ calcLargestFreeChunk(); LOG4CXX_TRACE(logger, "datastore: allocate space " << requestedSize << " for " << _file->getPath() << " returned " << ret); return ret; }
/** * Calculate next value using materialized input chunk * * Private function used when the input chunk's contents * have been materialized. As we scan the cells in the input * chunk, this method computes the window aggregate(s) for each * non-empty cell in the input. */ void MaterializedWindowChunkIterator::calculateNextValue() { Coordinates const& currPos = getPosition(); Coordinates windowStart(_nDims); Coordinates windowEnd(_nDims); // // We need to check that we're not stepping out over the limit of the // array's dimensional boundaries when the chunk is at the array edge. for (size_t i = 0; i < _nDims; i++) { windowStart[i] = std::max(currPos[i] - _chunk._array._window[i]._boundaries.first, _chunk._array._dimensions[i].getStartMin()); windowEnd[i] = std::min(currPos[i] + _chunk._array._window[i]._boundaries.second, _chunk._array._dimensions[i].getEndMax()); } uint64_t windowStartPos = _chunk.coord2pos(windowStart); uint64_t windowEndPos = _chunk.coord2pos(windowEnd); Value state; state.setNull(0); Coordinates probePos(_nDims); // // The _inputMap contains an entry for every non-NULL cell in the input. // So set markers at the start and the end of the window. map<uint64_t, Value>::const_iterator windowIteratorCurr = _inputMap.lower_bound(windowStartPos); map<uint64_t, Value>::const_iterator windowIteratorEnd = _inputMap.upper_bound(windowEndPos); while(windowIteratorCurr != windowIteratorEnd) { uint64_t pos = windowIteratorCurr->first; _chunk.pos2coord(pos,probePos); // // Sanity check. We should never go beyond the end of // the window as defined by the value of windowEndPos. SCIDB_ASSERT(( windowStartPos <= windowEndPos )); // // Check to see if this cell is outside the window's box. for(size_t i=0; i<_nDims; i++) { if (probePos[i]<windowStart[i] || probePos[i]>windowEnd[i]) { // // We're now out of the window box. So calculate // next probe position, reset windowIteratorCurr, and bounce // along. // // NOTE: This code is optimized for the 2D case. // I could calculate, depending on the // dimension that passed the disjunction // above, precisely by how much I should // step the probe. But to do so would // complicate this logic, and probably // won't help performance much. SCIDB_ASSERT ((_nDims == _chunk._array._dimensions.size())); SCIDB_ASSERT ((_nDims > 0 )); do { windowStartPos += _chunk.getStep(); } while ( windowStartPos <= pos ); windowIteratorCurr = _chunk._inputMap.lower_bound(windowStartPos); goto nextIter; } } _aggregate->accumulateIfNeeded(state, windowIteratorCurr->second); windowIteratorCurr++; nextIter:; } _aggregate->finalResult(_nextValue, state); }
/** * Private function to compute the position in the chunk from coordinates */ inline void WindowChunk::pos2coord(uint64_t pos, Coordinates& coord) const { SCIDB_ASSERT(_materialized); _mapper->pos2coord(pos, coord); }
bool CsvChunkLoader::loadChunk(boost::shared_ptr<Query>& query, size_t chunkIndex) { // Must do EOF check *before* nextImplicitChunkPosition() call, or // we risk stepping out of bounds. if (_csvParser.empty()) { int ch = ::getc(fp()); if (ch == EOF) { return false; } ::ungetc(ch, fp()); } // Reposition and make sure all is cool. nextImplicitChunkPosition(MY_CHUNK); enforceChunkOrder("csv loader"); // Initialize a chunk and chunk iterator for each attribute. Attributes const& attrs = schema().getAttributes(); size_t nAttrs = attrs.size(); vector< boost::shared_ptr<ChunkIterator> > chunkIterators(nAttrs); for (size_t i = 0; i < nAttrs; i++) { Address addr(i, _chunkPos); MemChunk& chunk = getLookaheadChunk(i, chunkIndex); chunk.initialize(array(), &schema(), addr, attrs[i].getDefaultCompressionMethod()); chunkIterators[i] = chunk.getIterator(query, ChunkIterator::NO_EMPTY_CHECK | ConstChunkIterator::SEQUENTIAL_WRITE); } char const *field = 0; int rc = 0; bool sawData = false; bool sawEof = false; while (!chunkIterators[0]->end()) { _column = 0; array()->countCell(); // Parse and write out a line's worth of fields. NB if you // have to 'continue;' after a writeItem() call, make sure the // iterator (and possibly the _column) gets incremented. // for (size_t i = 0; i < nAttrs; ++i) { try { // Handle empty tag... if (i == emptyTagAttrId()) { attrVal(i).setBool(true); chunkIterators[i]->writeItem(attrVal(i)); ++(*chunkIterators[i]); // ...but don't increment _column. continue; } // Parse out next input field. rc = _csvParser.getField(field); if (rc == CsvParser::END_OF_FILE) { sawEof = true; break; } if (rc == CsvParser::END_OF_RECORD) { // Got record terminator, but we have more attributes! throw USER_EXCEPTION(SCIDB_SE_IMPORT_ERROR, SCIDB_LE_OP_INPUT_TOO_FEW_FIELDS) << _csvParser.getFileOffset() << _csvParser.getRecordNumber() << _column; } if (rc > 0) { // So long as we never call _csvParser.setStrict(true), we should never see this. throw USER_EXCEPTION(SCIDB_SE_IMPORT_ERROR, SCIDB_LE_CSV_PARSE_ERROR) << _csvParser.getFileOffset() << _csvParser.getRecordNumber() << _column << csv_strerror(rc); } SCIDB_ASSERT(rc == CsvParser::OK); SCIDB_ASSERT(field); sawData = true; // Process input field. if (mightBeNull(field) && attrs[i].isNullable()) { int8_t missingReason = parseNullField(field); if (missingReason >= 0) { attrVal(i).setNull(missingReason); chunkIterators[i]->writeItem(attrVal(i)); ++(*chunkIterators[i]); _column += 1; continue; } } if (converter(i)) { Value v; v.setString(field); const Value* vp = &v; (*converter(i))(&vp, &attrVal(i), NULL); chunkIterators[i]->writeItem(attrVal(i)); } else { TypeId const &tid = typeIdOfAttr(i); if (attrs[i].isNullable() && (*field == '\0' || (iswhitespace(field) && IS_NUMERIC(tid)))) { // [csv2scidb compat] With csv2scidb, empty strings (or for numeric // fields, whitespace) became nulls if the target attribute was // nullable. We keep the same behavior. (We should *not* do this for // TSV, that format requires explicit nulls!) attrVal(i).setNull(); } else { StringToValue(tid, field, attrVal(i)); } chunkIterators[i]->writeItem(attrVal(i)); } } catch (Exception& ex) { _badField = field; _fileOffset = _csvParser.getFileOffset(); array()->handleError(ex, chunkIterators[i], i); } _column += 1; ++(*chunkIterators[i]); } if (sawEof) { break; } // We should be at EOL now, otherwise there are too many fields on this line. Post a // warning: it seems useful not to complain too loudly about this or to abort the load, but // we do want to mention it. // rc = _csvParser.getField(field); if (!_tooManyWarning && (rc != CsvParser::END_OF_RECORD)) { _tooManyWarning = true; query->postWarning(SCIDB_WARNING(SCIDB_LE_OP_INPUT_TOO_MANY_FIELDS) << _csvParser.getFileOffset() << _csvParser.getRecordNumber() << _column); } array()->completeShadowArrayRow(); // done with cell/record } for (size_t i = 0; i < nAttrs; i++) { if (chunkIterators[i]) { chunkIterators[i]->flush(); } } return sawData; }
/** * Calculate next value using materialized input chunk * * Private function used when the input chunk's contents * have been materialized. As we scan the cells in the input * chunk, this method computes the window aggregate(s) for each * non-empty cell in the input. * * NOTE: This implementation replaces the original, which is * in the calculateNextValueOLD() function. If we don't * encounter any problems with the NEW algorithm in the * field, we'll get rid of the OLD completely, and * replace it with the NEW all the time. function. If we don't * encounter any problems with the NEW algorithm in the * field, we'll get rid of the OLD completely, and * replace it with the NEW all the time. */ void MaterializedWindowChunkIterator::calculateNextValueNEW() { SCIDB_ASSERT ((_nDims == _chunk._array._dimensions.size())); SCIDB_ASSERT ((_nDims > 0 )); // // Where are we? Coordinates const& currPos = getPosition(); // // Figure out the start and end positions of the window. // // We need to check that we're not stepping out over the limit of the // entire array's dimensional boundaries when the chunk is at the array edge. for (size_t i = 0; i < _nDims; i++) { _windowStartCoords[i] = std::max(currPos[i] - _chunk._array._window[i]._boundaries.first, _chunk._array._dimensions[i].getStartMin()); _windowEndCoords[i] = std::min(currPos[i] + _chunk._array._window[i]._boundaries.second, _chunk._array._dimensions[i].getEndMax()); } // // Set up the object we'll use to track the several start and // end stripes of the window. RegionCoordinatesIterator regionCoordinatesIterator( _windowStartCoords, _windowEndCoords ); // // The map<> we're using to hold the data is keyed on the logical // position within the chunk, not on the coordinates. So we need to // convert the Coordinates for each stripe to the logical position. uint64_t windowStartPos = _chunk.coord2pos( regionCoordinatesIterator.getPosition() ); uint64_t windowEndPos = _chunk.coord2pos( _windowEndCoords ); // // Data object used to hold the state of the aggregate as // we process the values in the window. _state.setNull(0); // // The _inputMap contains an entry for every non-NULL cell in the // input. So set iterators at the start and the end of the window // to ensure we're not going outside it's bounds. map<uint64_t, Value>::const_iterator windowIteratorCurr = _inputMap.lower_bound(windowStartPos); map<uint64_t, Value>::const_iterator windowIteratorEnd = _inputMap.upper_bound(windowEndPos); // // We process the data stripe at a time. So we need another iterator // to point to the end of the current "stripe". map<uint64_t, Value>::const_iterator endOfStripeIter; uint64_t stripeStartPos = 0, stripeEndPos = 0; while( windowIteratorCurr != windowIteratorEnd ) { // // Where are we in coordinates space? _chunk.pos2coord( windowIteratorCurr->first, _stripeStartCoords ); // // Find the "next" cell that's inside the window in coordinate // space ... regionCoordinatesIterator.advanceToAtLeast( _stripeStartCoords ); // // We're inside a window. Let's find the map<> position at the 'start' // of the stripe ... _stripeStartCoords = regionCoordinatesIterator.getPosition(); stripeStartPos = _chunk.coord2pos(_stripeStartCoords); windowIteratorCurr = _chunk._inputMap.lower_bound(stripeStartPos); // // Now find the coordinate at the 'end' of the stripe ... _stripeEndCoords = _stripeStartCoords; _stripeEndCoords[_nDims-1] = _windowEndCoords[_nDims-1]; stripeEndPos = _chunk.coord2pos(_stripeEndCoords); endOfStripeIter = _inputMap.upper_bound(stripeEndPos); // // Check that we found anything from this stripe in the map<> at all, and // if not proceed to the next stripe... if ( windowIteratorCurr == endOfStripeIter ) continue; // // Now, just zip through the stripe of values in the map<>, // accumulating data into the aggregate as we go. while ( windowIteratorCurr != endOfStripeIter ) { _aggregate->accumulateIfNeeded(_state, windowIteratorCurr->second); windowIteratorCurr++; } } // // Done. Finalize the aggregate and compute the result ... _aggregate->finalResult(_nextValue, _state); }
void PhysicalQueryPlanNode::supplantChild(const PhysNodePtr& targetChild, const PhysNodePtr& newChild) { assert(newChild); assert(targetChild); assert(newChild.get() != this); int removed = 0; std::vector<PhysNodePtr> newChildren; if (logger->isTraceEnabled()) { std::ostringstream os; os << "Supplanting targetChild Node:\n"; targetChild->toString(os, 0 /*indent*/,false /*children*/); os << "\nwith\n"; newChild->toString(os, 0 /*indent*/,false /*children*/); LOG4CXX_TRACE(logger, os.str()); } for(auto &child : _childNodes) { if (child != targetChild) { newChildren.push_back(child); } else { // Set the parent of the newChild to this node. newChild->_parent = shared_from_this(); // NOTE: Any existing children of the newChild are removed from the // Query Plan. if ((newChild->_childNodes).size() > 0) { LOG4CXX_INFO(logger, "Child nodes of supplanting node are being removed from the tree."); } // Re-parent the children of the targetChild to the newChild newChild->_childNodes.swap(targetChild->_childNodes); for (auto grandchild : newChild -> _childNodes) { assert(grandchild != newChild); grandchild->_parent = newChild; } // Remove any references to the children from the targetChild targetChild->_childNodes.clear(); targetChild->resetParent(); // Add the newChild to this node newChildren.push_back(newChild); ++removed; } } _childNodes.swap(newChildren); if (logger->isTraceEnabled()) { std::ostringstream os; newChild->toString(os); LOG4CXX_TRACE(logger, "New Node subplan:\n" << os.str()); } SCIDB_ASSERT(removed==1); }