size_t Command::alloc_( NodePtr node, LocalNodePtr localNode, const uint64_t size ) { EQ_TS_THREAD( _writeThread ); EQASSERT( _refCount == 0 ); EQASSERTINFO( !_func.isValid(), *this ); size_t allocated = 0; if( !_data ) { _dataSize = EQ_MAX( Packet::minSize, size ); _data = static_cast< Packet* >( malloc( _dataSize )); allocated = _dataSize; } else if( size > _dataSize ) { allocated = size - _dataSize; _dataSize = EQ_MAX( Packet::minSize, size ); free( _data ); _data = static_cast< Packet* >( malloc( _dataSize )); } _node = node; _localNode = localNode; _refCountMaster = 0; _func.clear(); _packet = _data; _packet->size = size; return allocated; }
void TreeEqualizer::_notifyLoadData( Node* node, Channel* channel, const uint32_t nStatistics, const Statistic* statistics ) { if( !node ) return; _notifyLoadData( node->left, channel, nStatistics, statistics ); _notifyLoadData( node->right, channel, nStatistics, statistics ); if( !node->compound || node->compound->getChannel() != channel ) return; // gather relevant load data const uint32_t taskID = node->compound->getTaskID(); int64_t startTime = std::numeric_limits< int64_t >::max(); int64_t endTime = 0; bool loadSet = false; int64_t timeTransmit = 0; for( uint32_t i = 0; i < nStatistics && !loadSet; ++i ) { const Statistic& stat = statistics[ i ]; if( stat.task != taskID ) // from different compound continue; switch( stat.type ) { case Statistic::CHANNEL_CLEAR: case Statistic::CHANNEL_DRAW: case Statistic::CHANNEL_READBACK: startTime = EQ_MIN( startTime, stat.startTime ); endTime = EQ_MAX( endTime, stat.endTime ); break; case Statistic::CHANNEL_FRAME_TRANSMIT: timeTransmit += stat.endTime - stat.startTime; break; // assemble blocks on input frames, stop using subsequent data case Statistic::CHANNEL_ASSEMBLE: loadSet = true; break; default: break; } } if( startTime == std::numeric_limits< int64_t >::max( )) return; node->time = endTime - startTime; node->time = EQ_MAX( node->time, 1 ); node->time = EQ_MAX( node->time, timeTransmit ); }
void FramerateEqualizer::_init() { const Compound* compound = getCompound(); if( _nSamples > 0 || !compound ) return; _nSamples = 1; // Subscribe to child channel load events const Compounds& children = compound->getChildren(); EQASSERT( _loadListeners.empty( )); _loadListeners.resize( children.size( )); for( size_t i = 0; i < children.size(); ++i ) { Compound* child = children[i]; const uint32_t period = child->getInheritPeriod(); LoadListener& loadListener = _loadListeners[i]; loadListener.parent = this; loadListener.period = period; LoadSubscriber subscriber( &loadListener ); child->accept( subscriber ); _nSamples = EQ_MAX( _nSamples, period ); } _nSamples = EQ_MIN( _nSamples, 100 ); }
void FramerateEqualizer::LoadListener::notifyLoadData( Channel* channel, const uint32_t frameNumber, const uint32_t nStatistics, const eq::Statistic* statistics ) { // gather required load data int64_t startTime = std::numeric_limits< int64_t >::max(); int64_t endTime = 0; for( uint32_t i = 0; i < nStatistics; ++i ) { const eq::Statistic& data = statistics[i]; switch( data.type ) { case eq::Statistic::CHANNEL_CLEAR: case eq::Statistic::CHANNEL_DRAW: case eq::Statistic::CHANNEL_ASSEMBLE: case eq::Statistic::CHANNEL_READBACK: startTime = EQ_MIN( startTime, data.startTime ); endTime = EQ_MAX( endTime, data.endTime ); break; default: break; } } if( startTime == std::numeric_limits< int64_t >::max( )) return; if( startTime == endTime ) // very fast draws might report 0 times ++endTime; for( std::deque< FrameTime >::iterator i = parent->_times.begin(); i != parent->_times.end(); ++i ) { FrameTime& frameTime = *i; if( frameTime.first != frameNumber ) continue; const float time = static_cast< float >( endTime - startTime ) / period; frameTime.second = EQ_MAX( frameTime.second, time ); EQLOG( LOG_LB2 ) << "Frame " << frameNumber << " channel " << channel->getName() << " time " << time << " period " << period << std::endl; } }
void Channel::_updateNearFar( const mesh::BoundingSphere& boundingSphere ) { // compute dynamic near/far plane of whole model const FrameData& frameData = _getFrameData(); const eq::Matrix4f& rotation = frameData.getCameraRotation(); const eq::Matrix4f headTransform = getHeadTransform() * rotation; eq::Matrix4f modelInv; compute_inverse( headTransform, modelInv ); const eq::Vector3f zero = modelInv * eq::Vector3f::ZERO; eq::Vector3f front = modelInv * eq::Vector3f( 0.0f, 0.0f, -1.0f ); front -= zero; front.normalize(); front *= boundingSphere.w(); const eq::Vector3f center = frameData.getCameraPosition().get_sub_vector< 3 >() - boundingSphere.get_sub_vector< 3 >(); const eq::Vector3f nearPoint = headTransform * ( center - front ); const eq::Vector3f farPoint = headTransform * ( center + front ); if( useOrtho( )) { EQASSERTINFO( fabs( farPoint.z() - nearPoint.z() ) > std::numeric_limits< float >::epsilon(), nearPoint << " == " << farPoint ); setNearFar( -nearPoint.z(), -farPoint.z() ); } else { // estimate minimal value of near plane based on frustum size const eq::Frustumf& frustum = getFrustum(); const float width = fabs( frustum.right() - frustum.left() ); const float height = fabs( frustum.top() - frustum.bottom() ); const float size = EQ_MIN( width, height ); const float minNear = frustum.near_plane() / size * .001f; const float zNear = EQ_MAX( minNear, -nearPoint.z() ); const float zFar = EQ_MAX( zNear * 2.f, -farPoint.z() ); setNearFar( zNear, zFar ); } }
void FrameData::adjustQuality( const float delta ) { _quality += delta; _quality = EQ_MAX( _quality, 0.1f ); _quality = EQ_MIN( _quality, 1.0f ); setDirty( DIRTY_FLAGS ); EQINFO << "Set non-idle image quality to " << _quality << std::endl; }
void Channel::frameAssemble( const eq::uint128_t& frameID ) { if( stopRendering( )) return; if( _isDone( )) return; Accum& accum = _accum[ co::base::getIndexOfLastBit( getEye()) ]; if( getPixelViewport() != _currentPVP ) { accum.transfer = true; if( accum.buffer && !accum.buffer->usesFBO( )) { EQWARN << "Current viewport different from view viewport, "; EQWARN << "idle anti-aliasing not implemented." << std::endl; accum.step = 0; } eq::Channel::frameAssemble( frameID ); return; } // else accum.transfer = true; const eq::Frames& frames = getInputFrames(); for( eq::Frames::const_iterator i = frames.begin(); i != frames.end(); ++i ) { eq::Frame* frame = *i; const eq::SubPixel& curSubPixel = frame->getSubPixel(); if( curSubPixel != eq::SubPixel::ALL ) accum.transfer = false; accum.stepsDone = EQ_MAX( accum.stepsDone, frame->getSubPixel().size*frame->getPeriod( )); } applyBuffer(); applyViewport(); setupAssemblyState(); try { eq::Compositor::assembleFrames( getInputFrames(), this, accum.buffer ); } catch( const co::Exception& e ) { EQWARN << e.what() << std::endl; } resetAssemblyState(); }
void LoadEqualizer::_updateNode( Node* node ) { Node* left = node->left; Node* right = node->right; EQASSERT( left ); EQASSERT( right ); _update( left ); _update( right ); node->resources = left->resources + right->resources; if( left->resources == 0.f ) { node->maxSize = right->maxSize; node->boundary2i = right->boundary2i; node->boundaryf = right->boundaryf; } else if( right->resources == 0.f ) { node->maxSize = left->maxSize; node->boundary2i = left->boundary2i; node->boundaryf = left->boundaryf; } else { switch( node->mode ) { case MODE_VERTICAL: node->maxSize.x() = left->maxSize.x() + right->maxSize.x(); node->maxSize.y() = EQ_MIN( left->maxSize.y(), right->maxSize.y()); node->boundary2i.x() = left->boundary2i.x()+ right->boundary2i.x(); node->boundary2i.y() = EQ_MAX( left->boundary2i.y(), right->boundary2i.y()); node->boundaryf = EQ_MAX( left->boundaryf, right->boundaryf ); break; case MODE_HORIZONTAL: node->maxSize.x() = EQ_MIN( left->maxSize.x(), right->maxSize.x()); node->maxSize.y() = left->maxSize.y() + right->maxSize.y(); node->boundary2i.x() = EQ_MAX( left->boundary2i.x(), right->boundary2i.x() ); node->boundary2i.y() = left->boundary2i.y()+ right->boundary2i.y(); node->boundaryf = EQ_MAX( left->boundaryf, right->boundaryf ); break; case MODE_DB: node->boundary2i.x() = EQ_MAX( left->boundary2i.x(), right->boundary2i.x() ); node->boundary2i.y() = EQ_MAX( left->boundary2i.y(), right->boundary2i.y() ); node->boundaryf = left->boundaryf + right->boundaryf; break; default: EQUNIMPLEMENTED; } } }
bool InstanceCache::add( const ObjectVersion& rev, const uint32_t instanceID, Command& command, const uint32_t usage ) { #ifdef EQ_INSTRUMENT_CACHE ++nWrite; #endif const NodeID nodeID = command.getNode()->getNodeID(); base::ScopedMutex<> mutex( _items ); ItemHash::const_iterator i = _items->find( rev.identifier ); if( i == _items->end( )) { Item& item = _items.data[ rev.identifier ]; item.data.masterInstanceID = instanceID; item.from = nodeID; } Item& item = _items.data[ rev.identifier ] ; if( item.data.masterInstanceID != instanceID || item.from != nodeID ) { EQASSERT( !item.access ); // same master with different instance ID?! if( item.access != 0 ) // are accessed - don't add return false; // trash data from different master mapping _releaseStreams( item ); item.data.masterInstanceID = instanceID; item.from = nodeID; item.used = usage; } else item.used = EQ_MAX( item.used, usage ); if( item.data.versions.empty( )) { item.data.versions.push_back( new ObjectDataIStream ); item.times.push_back( _clock.getTime64( )); } else if( item.data.versions.back()->getPendingVersion() == rev.version ) { if( item.data.versions.back()->isReady( )) { #ifdef EQ_INSTRUMENT_CACHE ++nWriteReady; #endif return false; // Already have stream } // else append data to stream } else { const ObjectDataIStream* previous = item.data.versions.back(); EQASSERT( previous->isReady( )); const uint128_t previousVersion = previous->getPendingVersion(); if( previousVersion > rev.version ) { #ifdef EQ_INSTRUMENT_CACHE ++nWriteOld; #endif return false; } if( ( previousVersion + 1 ) != rev.version ) // hole { EQASSERT( previousVersion < rev.version ); if( item.access != 0 ) // are accessed - don't add return false; _releaseStreams( item ); } else { EQASSERT( previous->isReady( )); } item.data.versions.push_back( new ObjectDataIStream ); item.times.push_back( _clock.getTime64( )); } EQASSERT( !item.data.versions.empty( )); ObjectDataIStream* stream = item.data.versions.back(); stream->addDataPacket( command ); if( stream->isReady( )) _size += stream->getDataSize(); _releaseItems( 1 ); _releaseItems( 0 ); #ifdef EQ_INSTRUMENT_CACHE if( _items->find( rev.identifier ) != _items->end( )) ++nWriteHit; else ++nWriteMiss; #endif return true; }
void FramerateEqualizer::notifyUpdatePre( Compound* compound, const uint32_t frameNumber ) { _init(); // find starting point of contiguous block const ssize_t size = static_cast< ssize_t >( _times.size( )); ssize_t from = 0; if( size > 0 ) { for( ssize_t i = size-1; i >= 0; --i ) { if( _times[i].second != 0.f ) continue; from = i; break; } } // find max / avg time in block size_t nSamples = 0; #ifdef USE_AVERAGE float sumTime = 0.f; #else float maxTime = 0.f; #endif for( ++from; from < size && nSamples < _nSamples; ++from ) { const FrameTime& time = _times[from]; EQASSERT( time.first > 0 ); EQASSERT( time.second != 0.f ); ++nSamples; #ifdef USE_AVERAGE sumTime += time.second; #else maxTime = EQ_MAX( maxTime, time.second ); #endif EQLOG( LOG_LB2 ) << "Using " << time.first << ", " << time.second << "ms" << std::endl; } if( nSamples == _nSamples ) // If we have a full set while( from < static_cast< ssize_t >( _times.size( ))) _times.pop_back(); // delete all older samples if( isFrozen() || !compound->isRunning( )) { // always execute code above to not leak memory compound->setMaxFPS( std::numeric_limits< float >::max( )); return; } if( nSamples > 0 ) { //TODO: totalTime *= 1.f - damping; #ifdef USE_AVERAGE const float time = (sumTime / nSamples) * SLOWDOWN; #else const float time = maxTime * SLOWDOWN; #endif const float fps = 1000.f / time; #ifdef VSYNC_CAP if( fps > VSYNC_CAP ) compound->setMaxFPS( std::numeric_limits< float >::max( )); else #endif compound->setMaxFPS( fps ); EQLOG( LOG_LB2 ) << fps << " Hz from " << nSamples << "/" << _times.size() << " samples, " << time << "ms" << std::endl; } _times.push_front( FrameTime( frameNumber, 0.f )); EQASSERT( _times.size() < 210 ); }
Command& CommandCache::_newCommand( const Cache which ) { EQ_TS_THREAD( _thread ); Data& cache = _cache[ which ]; const uint32_t cacheSize = uint32_t( cache.size( )); base::a_int32_t& freeCounter = _free[ which ]; EQASSERTINFO( size_t( freeCounter ) <= cacheSize, freeCounter << " > " << cacheSize ); if( freeCounter > 0 ) { EQASSERT( cacheSize > 0 ); const DataCIter end = _position[ which ]; DataCIter& i = _position[ which ]; for( ++i; i != end; ++i ) { if( i == cache.end( )) i = cache.begin(); #ifdef PROFILE ++_lookups; #endif Command* command = *i; if( command->isFree( )) { #ifdef PROFILE const long hits = ++_hits; if( (hits%1000) == 0 ) { for( size_t j = 0; j < CACHE_ALL; ++j ) { size_t size = 0; const Data& cmds = _cache[ j ]; for( DataCIter k=cmds.begin(); k != cmds.end(); ++k) size += (*k)->getAllocationSize(); EQINFO << _hits << "/" << _hits + _misses << " hits, " << _lookups << " lookups, " << _free[j] << " of " << cmds.size() << " packets free, " << _allocs << " allocs, " << _frees << " frees, " << size / 1024 << "KB" << std::endl; } } #endif return *command; } } } #ifdef PROFILE ++_misses; #endif const uint32_t add = (cacheSize >> 3) + 1; for( size_t j = 0; j < add; ++j ) cache.push_back( new Command( freeCounter )); freeCounter += add; const int32_t num = int32_t( cache.size() >> _freeShift ); _maxFree[ which ] = EQ_MAX( _minFree[ which ], num ); _position[ which ] = cache.begin(); #ifdef PROFILE _allocs += add; #endif return *( cache.back( )); }
void Channel::frameViewFinish( const eq::uint128_t& frameID ) { if( stopRendering( )) return; applyBuffer(); const FrameData& frameData = _getFrameData(); Accum& accum = _accum[ co::base::getIndexOfLastBit( getEye()) ]; if( accum.buffer ) { const eq::PixelViewport& pvp = getPixelViewport(); const bool isResized = accum.buffer->resize( pvp.w, pvp.h ); if( isResized ) { const View* view = static_cast< const View* >( getView( )); accum.buffer->clear(); accum.step = view->getIdleSteps(); accum.stepsDone = 0; } else if( frameData.isIdle( )) { setupAssemblyState(); if( !_isDone() && accum.transfer ) accum.buffer->accum(); accum.buffer->display(); resetAssemblyState(); } } applyViewport(); _drawOverlay(); _drawHelp(); if( frameData.useStatistics()) drawStatistics(); ConfigEvent event; event.data.originator = getID(); event.data.type = ConfigEvent::IDLE_AA_LEFT; if( frameData.isIdle( )) { event.steps = 0; for( size_t i = 0; i < eq::NUM_EYES; ++i ) event.steps = EQ_MAX( event.steps, _accum[i].step ); } else { const View* view = static_cast< const View* >( getView( )); event.steps = view ? view->getIdleSteps() : 0; } // if _jitterStep == 0 and no user redraw event happened, the app will exit // FSAA idle mode and block on the next redraw event. eq::Config* config = getConfig(); config->sendEvent( event ); }
void FullMasterCM::addSlave( Command& command, NodeMapObjectReplyPacket& reply ) { EQ_TS_THREAD( _cmdThread ); EQASSERT( command->type == PACKETTYPE_CO_NODE ); EQASSERT( command->command == CMD_NODE_MAP_OBJECT ); NodePtr node = command.getNode(); const NodeMapObjectPacket* packet = command.get< NodeMapObjectPacket >(); const uint128_t requested = packet->requestedVersion; const uint32_t instanceID = packet->instanceID; Mutex mutex( _slaves ); EQASSERT( _version != VERSION_NONE ); _checkConsistency(); // add to subscribers ++_slavesCount[ node->getNodeID() ]; _slaves->push_back( node ); stde::usort( *_slaves ); if( requested == VERSION_NONE ) // no data to send { _sendEmptyVersion( node, instanceID, _version ); reply.version = _version; return; } const uint128_t oldest = _instanceDatas.front()->os.getVersion(); uint128_t start = (requested == VERSION_OLDEST || requested < oldest ) ? oldest : requested; uint128_t end = _version; const bool useCache = packet->masterInstanceID == _object->getInstanceID(); #ifndef NDEBUG if( requested != VERSION_OLDEST && requested < start ) EQINFO << "Mapping version " << start << " instead of requested version " << requested << std::endl; #endif reply.version = start; reply.useCache = packet->useCache && useCache; if( reply.useCache ) { if( packet->minCachedVersion <= start && packet->maxCachedVersion >= start ) { #ifdef EQ_INSTRUMENT_MULTICAST _hit += packet->maxCachedVersion + 1 - start; #endif start = packet->maxCachedVersion + 1; } else if( packet->maxCachedVersion == end ) { end = EQ_MAX( start, packet->minCachedVersion - 1 ); #ifdef EQ_INSTRUMENT_MULTICAST _hit += _version - end; #endif } // TODO else cached block in the middle, send head and tail elements } #if 0 EQLOG( LOG_OBJECTS ) << *_object << ", instantiate on " << node->getNodeID() << " with v" << ((requested == VERSION_OLDEST) ? oldest : requested) << " (" << requested << ") sending " << start << ".." << end << " have " << _version - _nVersions << ".." << _version << " " << _instanceDatas.size() << std::endl; #endif EQASSERT( start >= oldest ); // send all instance datas from start..end InstanceDataDeque::iterator i = _instanceDatas.begin(); while( i != _instanceDatas.end() && (*i)->os.getVersion() < start ) ++i; for( ; i != _instanceDatas.end() && (*i)->os.getVersion() <= end; ++i ) { InstanceData* data = *i; EQASSERT( data ); data->os.sendMapData( node, instanceID ); #ifdef EQ_INSTRUMENT_MULTICAST ++_miss; #endif } #ifdef EQ_INSTRUMENT_MULTICAST if( _miss % 100 == 0 ) EQINFO << "Cached " << _hit << "/" << _hit + _miss << " instance data transmissions" << std::endl; #endif }
void LoadEqualizer::_computeSplit( Node* node, const float time, LBDatas* datas, const Viewport& vp, const Range& range ) { EQLOG( LOG_LB2 ) << "_computeSplit " << vp << ", " << range << " time " << time << std::endl; EQASSERTINFO( vp.isValid(), vp ); EQASSERTINFO( range.isValid(), range ); EQASSERTINFO( node->resources > 0.f || !vp.hasArea() || !range.hasData(), "Assigning " << node->resources << " work to viewport " << vp << ", " << range ); Compound* compound = node->compound; if( compound ) { _assign( compound, vp, range ); return; } EQASSERT( node->left && node->right ); LBDatas workingSet = datas[ node->mode ]; const float leftTime = node->resources > 0 ? time * node->left->resources / node->resources : 0.f; float timeLeft = EQ_MIN( leftTime, time ); // correct for fp rounding error switch( node->mode ) { case MODE_VERTICAL: { EQASSERT( range == Range::ALL ); float splitPos = vp.x; const float end = vp.getXEnd(); while( timeLeft > std::numeric_limits< float >::epsilon() && splitPos < end ) { EQLOG( LOG_LB2 ) << timeLeft << "ms left using " << workingSet.size() << " tiles" << std::endl; // remove all irrelevant items from working set for( LBDatas::iterator i = workingSet.begin(); i != workingSet.end(); ) { const Data& data = *i; if( data.vp.getXEnd() > splitPos ) ++i; else i = workingSet.erase( i ); } if( workingSet.empty( )) break; // find next 'discontinouity' in loads float currentPos = 1.0f; for( LBDatas::const_iterator i = workingSet.begin(); i != workingSet.end(); ++i ) { const Data& data = *i; if( data.vp.x > splitPos && data.vp.x < currentPos ) currentPos = data.vp.x; const float xEnd = data.vp.getXEnd(); if( xEnd > splitPos && xEnd < currentPos ) currentPos = xEnd; } const float width = currentPos - splitPos; EQASSERTINFO( width > 0.f, currentPos << "<=" << splitPos ); EQASSERT( currentPos <= 1.0f ); // accumulate normalized load in splitPos...currentPos EQLOG( LOG_LB2 ) << "Computing load in X " << splitPos << "..." << currentPos << std::endl; float currentTime = 0.f; for( LBDatas::const_iterator i = workingSet.begin(); i != workingSet.end(); ++i ) { const Data& data = *i; if( data.vp.x >= currentPos ) // not yet needed data sets break; float yContrib = data.vp.h; if( data.vp.y < vp.y ) yContrib -= (vp.y - data.vp.y); const float dataEnd = data.vp.getYEnd(); const float vpEnd = vp.getYEnd(); if( dataEnd > vpEnd ) yContrib -= (dataEnd - vpEnd); if( yContrib > 0.f ) { const float percentage = ( width / data.vp.w ) * ( yContrib / data.vp.h ); currentTime += ( data.time * percentage ); EQLOG( LOG_LB2 ) << data.vp << " contributes " << yContrib << " in " << vp.h << " (" << percentage << ") with " << data.time << ": " << ( data.time * percentage ) << " vp.y " << vp.y << " dataEnd " << dataEnd << " vpEnd " << vpEnd << std::endl; EQASSERT( percentage < 1.01f ) } } EQLOG( LOG_LB2 ) << splitPos << "..." << currentPos << ": t=" << currentTime << " of " << timeLeft << std::endl; if( currentTime >= timeLeft ) // found last region { splitPos += ( width * timeLeft / currentTime ); timeLeft = 0.0f; } else { timeLeft -= currentTime; splitPos = currentPos; } } EQLOG( LOG_LB2 ) << "Should split at X " << splitPos << std::endl; splitPos = (1.f - _damping) * splitPos + _damping * node->split; EQLOG( LOG_LB2 ) << "Dampened split at X " << splitPos << std::endl; // There might be more time left due to MIN_PIXEL rounding by parent // EQASSERTINFO( timeLeft <= .001f, timeLeft ); // Ensure minimum size const Compound* root = getCompound(); const float pvpW = static_cast< float >( root->getInheritPixelViewport().w ); const float boundary = static_cast< float >( node->boundary2i.x()) / pvpW; if( node->left->resources == 0.f ) splitPos = vp.x; else if( node->right->resources == 0.f ) splitPos = end; else if( boundary > 0 ) { const float lengthRight = vp.getXEnd() - splitPos; const float lengthLeft = splitPos - vp.x; const float maxRight = static_cast< float >( node->right->maxSize.x( )) / pvpW; const float maxLeft = static_cast< float >( node->left->maxSize.x( )) / pvpW; if( lengthRight > maxRight ) splitPos = end - maxRight; else if( lengthLeft > maxLeft ) splitPos = vp.x + maxLeft; if( (splitPos - vp.x) < boundary ) splitPos = vp.x + boundary; if( (end - splitPos) < boundary ) splitPos = end - boundary; const uint32_t ratio = static_cast< uint32_t >( splitPos / boundary + .5f ); splitPos = ratio * boundary; } splitPos = EQ_MAX( splitPos, vp.x ); splitPos = EQ_MIN( splitPos, end); EQLOG( LOG_LB2 ) << "Constrained split " << vp << " at X " << splitPos << std::endl; node->split = splitPos; // balance children Viewport childVP = vp; childVP.w = (splitPos - vp.x); _computeSplit( node->left, leftTime, datas, childVP, range ); childVP.x = childVP.getXEnd(); childVP.w = end - childVP.x; // Fix 2994111: Rounding errors with 2D LB and 16 sources // Floating point rounding may create a width for the 'right' // child which is slightly below the parent width. Correct it. while( childVP.getXEnd() < end ) childVP.w += std::numeric_limits< float >::epsilon(); _computeSplit( node->right, time-leftTime, datas, childVP, range ); break; } case MODE_HORIZONTAL: { EQASSERT( range == Range::ALL ); float splitPos = vp.y; const float end = vp.getYEnd(); while( timeLeft > std::numeric_limits< float >::epsilon() && splitPos < end ) { EQLOG( LOG_LB2 ) << timeLeft << "ms left using " << workingSet.size() << " tiles" << std::endl; // remove all unrelevant items from working set for( LBDatas::iterator i = workingSet.begin(); i != workingSet.end(); ) { const Data& data = *i; if( data.vp.getYEnd() > splitPos ) ++i; else i = workingSet.erase( i ); } if( workingSet.empty( )) break; // find next 'discontinuouity' in loads float currentPos = 1.0f; for( LBDatas::const_iterator i = workingSet.begin(); i != workingSet.end(); ++i ) { const Data& data = *i; if( data.vp.y > splitPos && data.vp.y < currentPos ) currentPos = data.vp.y; const float yEnd = data.vp.getYEnd(); if( yEnd > splitPos && yEnd < currentPos ) currentPos = yEnd; } const float height = currentPos - splitPos; EQASSERTINFO( height > 0.f, currentPos << "<=" << splitPos ); EQASSERT( currentPos <= 1.0f ); // accumulate normalized load in splitPos...currentPos EQLOG( LOG_LB2 ) << "Computing load in Y " << splitPos << "..." << currentPos << std::endl; float currentTime = 0.f; for( LBDatas::const_iterator i = workingSet.begin(); i != workingSet.end(); ++i ) { const Data& data = *i; if( data.vp.y >= currentPos ) // not yet needed data sets break; float xContrib = data.vp.w; if( data.vp.x < vp.x ) xContrib -= (vp.x - data.vp.x); const float dataEnd = data.vp.getXEnd(); const float vpEnd = vp.getXEnd(); if( dataEnd > vpEnd ) xContrib -= (dataEnd - vpEnd); if( xContrib > 0.f ) { const float percentage = ( height / data.vp.h ) * ( xContrib / data.vp.w ); currentTime += ( data.time * percentage ); EQLOG( LOG_LB2 ) << data.vp << " contributes " << xContrib << " in " << vp.w << " (" << percentage << ") with " << data.time << ": " << ( data.time * percentage ) << " total " << currentTime << " vp.x " << vp.x << " dataEnd " << dataEnd << " vpEnd " << vpEnd << std::endl; EQASSERT( percentage < 1.01f ) } } EQLOG( LOG_LB2 ) << splitPos << "..." << currentPos << ": t=" << currentTime << " of " << timeLeft << std::endl; if( currentTime >= timeLeft ) // found last region { splitPos += (height * timeLeft / currentTime ); timeLeft = 0.0f; } else { timeLeft -= currentTime; splitPos = currentPos; } } EQLOG( LOG_LB2 ) << "Should split at Y " << splitPos << std::endl; splitPos = (1.f - _damping) * splitPos + _damping * node->split; EQLOG( LOG_LB2 ) << "Dampened split at Y " << splitPos << std::endl; const Compound* root = getCompound(); const float pvpH = static_cast< float >( root->getInheritPixelViewport().h ); const float boundary = static_cast< float >(node->boundary2i.y( )) / pvpH; if( node->left->resources == 0.f ) splitPos = vp.y; else if( node->right->resources == 0.f ) splitPos = end; else if ( boundary > 0 ) { const float lengthRight = vp.getYEnd() - splitPos; const float lengthLeft = splitPos - vp.y; const float maxRight = static_cast< float >( node->right->maxSize.y( )) / pvpH; const float maxLeft = static_cast< float >( node->left->maxSize.y( )) / pvpH; if( lengthRight > maxRight ) splitPos = end - maxRight; else if( lengthLeft > maxLeft ) splitPos = vp.y + maxLeft; if( (splitPos - vp.y) < boundary ) splitPos = vp.y + boundary; if( (end - splitPos) < boundary ) splitPos = end - boundary; const uint32_t ratio = static_cast< uint32_t >( splitPos / boundary + .5f ); splitPos = ratio * boundary; } splitPos = EQ_MAX( splitPos, vp.y ); splitPos = EQ_MIN( splitPos, end ); EQLOG( LOG_LB2 ) << "Constrained split " << vp << " at Y " << splitPos << std::endl; node->split = splitPos; Viewport childVP = vp; childVP.h = (splitPos - vp.y); _computeSplit( node->left, leftTime, datas, childVP, range ); childVP.y = childVP.getYEnd(); childVP.h = end - childVP.y; while( childVP.getYEnd() < end ) childVP.h += std::numeric_limits< float >::epsilon(); _computeSplit( node->right, time - leftTime, datas, childVP, range); break; } case MODE_DB: { EQASSERT( vp == Viewport::FULL ); float splitPos = range.start; const float end = range.end; while( timeLeft > std::numeric_limits< float >::epsilon() && splitPos < end ) { EQLOG( LOG_LB2 ) << timeLeft << "ms left using " << workingSet.size() << " tiles" << std::endl; // remove all irrelevant items from working set for( LBDatas::iterator i = workingSet.begin(); i != workingSet.end(); ) { const Data& data = *i; if( data.range.end > splitPos ) ++i; else i = workingSet.erase( i ); } if( workingSet.empty( )) break; // find next 'discontinouity' in loads float currentPos = 1.0f; for( LBDatas::const_iterator i = workingSet.begin(); i != workingSet.end(); ++i ) { const Data& data = *i; currentPos = EQ_MIN( currentPos, data.range.end ); } const float size = currentPos - splitPos; EQASSERTINFO( size > 0.f, currentPos << "<=" << splitPos ); EQASSERT( currentPos <= 1.0f ); // accumulate normalized load in splitPos...currentPos EQLOG( LOG_LB2 ) << "Computing load in range " << splitPos << "..." << currentPos << std::endl; float currentTime = 0.f; for( LBDatas::const_iterator i = workingSet.begin(); i != workingSet.end(); ++i ) { const Data& data = *i; if( data.range.start >= currentPos ) // not yet needed data break; #if 0 // make sure we cover full area EQASSERTINFO( data.range.start <= splitPos, data.range.start << " > " << splitPos ); EQASSERTINFO( data.range.end >= currentPos, data.range.end << " < " << currentPos); #endif currentTime += data.time * size / data.range.getSize(); } EQLOG( LOG_LB2 ) << splitPos << "..." << currentPos << ": t=" << currentTime << " of " << timeLeft << std::endl; if( currentTime >= timeLeft ) // found last region { const float width = currentPos - splitPos; splitPos += (width * timeLeft / currentTime ); timeLeft = 0.0f; } else { timeLeft -= currentTime; splitPos = currentPos; } } EQLOG( LOG_LB2 ) << "Should split at " << splitPos << std::endl; splitPos = (1.f - _damping) * splitPos + _damping * node->split; EQLOG( LOG_LB2 ) << "Dampened split at " << splitPos << std::endl; const float boundary( node->boundaryf ); if( node->left->resources == 0.f ) splitPos = range.start; else if( node->right->resources == 0.f ) splitPos = end; const uint32_t ratio = static_cast< uint32_t > ( splitPos / boundary + .5f ); splitPos = ratio * boundary; if( (splitPos - range.start) < boundary ) splitPos = range.start; if( (end - splitPos) < boundary ) splitPos = end; EQLOG( LOG_LB2 ) << "Constrained split " << range << " at pos " << splitPos << std::endl; node->split = splitPos; Range childRange = range; childRange.end = splitPos; _computeSplit( node->left, leftTime, datas, vp, childRange ); childRange.start = childRange.end; childRange.end = range.end; _computeSplit( node->right, time - leftTime, datas, vp, childRange); break; } default: EQUNIMPLEMENTED; } }
void LoadEqualizer::notifyLoadData( Channel* channel, const uint32_t frameNumber, const uint32_t nStatistics, const Statistic* statistics, const Viewport& region ) { EQLOG( LOG_LB2 ) << nStatistics << " samples from "<< channel->getName() << " @ " << frameNumber << std::endl; for( std::deque< LBFrameData >::iterator i = _history.begin(); i != _history.end(); ++i ) { LBFrameData& frameData = *i; if( frameData.first != frameNumber ) continue; // Found corresponding historical data set LBDatas& items = frameData.second; for( LBDatas::iterator j = items.begin(); j != items.end(); ++j ) { Data& data = *j; if( data.channel != channel ) continue; // Found corresponding historical data item const uint32_t taskID = data.taskID; EQASSERTINFO( taskID > 0, channel->getName( )); // gather relevant load data int64_t startTime = std::numeric_limits< int64_t >::max(); int64_t endTime = 0; bool loadSet = false; int64_t transmitTime = 0; for( uint32_t k = 0; k < nStatistics; ++k ) { const Statistic& stat = statistics[k]; if( stat.task == data.destTaskID ) _updateAssembleTime( data, stat ); // from different compound if( stat.task != taskID || loadSet ) continue; switch( stat.type ) { case Statistic::CHANNEL_CLEAR: case Statistic::CHANNEL_DRAW: case Statistic::CHANNEL_READBACK: startTime = EQ_MIN( startTime, stat.startTime ); endTime = EQ_MAX( endTime, stat.endTime ); break; case Statistic::CHANNEL_FRAME_TRANSMIT: transmitTime += stat.endTime - stat.startTime; break; case Statistic::CHANNEL_FRAME_WAIT_SENDTOKEN: transmitTime -= stat.endTime - stat.startTime; break; // assemble blocks on input frames, stop using subsequent data case Statistic::CHANNEL_ASSEMBLE: loadSet = true; break; default: break; } } if( startTime == std::numeric_limits< int64_t >::max( )) return; data.vp.apply( region ); // Update ROI data.time = endTime - startTime; data.time = EQ_MAX( data.time, 1 ); data.time = EQ_MAX( data.time, transmitTime ); data.assembleTime = EQ_MAX( data.assembleTime, 0 ); EQLOG( LOG_LB2 ) << "Added time " << data.time << " (+" << data.assembleTime << ") for " << channel->getName() << " " << data.vp << ", " << data.range << " @ " << frameNumber << std::endl; return; // Note: if the same channel is used twice as a child, the // load-compound association does not work. } } }
void Channel::frameDraw( const eq::uint128_t& frameID ) { if( stopRendering( )) return; _initJitter(); if( _isDone( )) return; Window* window = static_cast< Window* >( getWindow( )); VertexBufferState& state = window->getState(); const Model* oldModel = _model; const Model* model = _getModel(); if( oldModel != model ) state.setFrustumCulling( false ); // create all display lists/VBOs if( model ) _updateNearFar( model->getBoundingSphere( )); // Setup OpenGL state eq::Channel::frameDraw( frameID ); glLightfv( GL_LIGHT0, GL_POSITION, lightPosition ); glLightfv( GL_LIGHT0, GL_AMBIENT, lightAmbient ); glLightfv( GL_LIGHT0, GL_DIFFUSE, lightDiffuse ); glLightfv( GL_LIGHT0, GL_SPECULAR, lightSpecular ); glMaterialfv( GL_FRONT, GL_AMBIENT, materialAmbient ); glMaterialfv( GL_FRONT, GL_DIFFUSE, materialDiffuse ); glMaterialfv( GL_FRONT, GL_SPECULAR, materialSpecular ); glMateriali( GL_FRONT, GL_SHININESS, materialShininess ); const FrameData& frameData = _getFrameData(); glPolygonMode( GL_FRONT_AND_BACK, frameData.useWireframe() ? GL_LINE : GL_FILL ); const eq::Vector3f& position = frameData.getCameraPosition(); glMultMatrixf( frameData.getCameraRotation().array ); glTranslatef( position.x(), position.y(), position.z() ); glMultMatrixf( frameData.getModelRotation().array ); if( frameData.getColorMode() == COLOR_DEMO ) { const eq::Vector3ub color = getUniqueColor(); glColor3ub( color.r(), color.g(), color.b() ); } else glColor3f( .75f, .75f, .75f ); if( model ) _drawModel( model ); else { glNormal3f( 0.f, -1.f, 0.f ); glBegin( GL_TRIANGLE_STRIP ); glVertex3f( .25f, 0.f, .25f ); glVertex3f( -.25f, 0.f, .25f ); glVertex3f( .25f, 0.f, -.25f ); glVertex3f( -.25f, 0.f, -.25f ); glEnd(); } state.setFrustumCulling( true ); Accum& accum = _accum[ co::base::getIndexOfLastBit( getEye()) ]; accum.stepsDone = EQ_MAX( accum.stepsDone, getSubPixel().size * getPeriod( )); accum.transfer = true; }
/* Compute the bounding sphere of the leaf's indexed vertices. */ const BoundingSphere& VertexBufferLeaf::updateBoundingSphere() { // We determine a bounding sphere by: // 1) Using the inner sphere of the dominant axis of the bounding box as an // estimate // 2) Test all points to be in that sphere // 3) Expand the sphere to contain all points outside. // 1a) initialize and compute a bounding box BoundingBox boundingBox; boundingBox[0] = _globalData.vertices[ _vertexStart + _globalData.indices[_indexStart] ]; boundingBox[1] = _globalData.vertices[ _vertexStart + _globalData.indices[_indexStart] ]; for( Index offset = 1; offset < _indexLength; ++offset ) { const Vertex& vertex = _globalData.vertices[ _vertexStart + _globalData.indices[_indexStart + offset] ]; boundingBox[0][0] = min( boundingBox[0][0], vertex[0] ); boundingBox[1][0] = max( boundingBox[1][0], vertex[0] ); boundingBox[0][1] = min( boundingBox[0][1], vertex[1] ); boundingBox[1][1] = max( boundingBox[1][1], vertex[1] ); boundingBox[0][2] = min( boundingBox[0][2], vertex[2] ); boundingBox[1][2] = max( boundingBox[1][2], vertex[2] ); } // 1b) get inner sphere of bounding box as an initial estimate _boundingSphere.x() = ( boundingBox[0].x() + boundingBox[1].x() ) * 0.5f; _boundingSphere.y() = ( boundingBox[0].y() + boundingBox[1].y() ) * 0.5f; _boundingSphere.z() = ( boundingBox[0].z() + boundingBox[1].z() ) * 0.5f; _boundingSphere.w() = EQ_MAX( boundingBox[1].x() - boundingBox[0].x(), boundingBox[1].y() - boundingBox[0].y() ); _boundingSphere.w() = EQ_MAX( boundingBox[1].z() - boundingBox[0].z(), _boundingSphere.w() ); _boundingSphere.w() *= .5f; float radius = _boundingSphere.w(); float radiusSquared = radius * radius; Vertex center( _boundingSphere.array ); // 2) test all points to be in the estimated bounding sphere for( Index offset = 0; offset < _indexLength; ++offset ) { const Vertex& vertex = _globalData.vertices[ _vertexStart + _globalData.indices[_indexStart + offset] ]; const Vertex centerToPoint = vertex - center; const float distanceSquared = centerToPoint.squared_length(); if( distanceSquared <= radiusSquared ) // point is inside existing BS continue; // 3) expand sphere to contain 'outside' points const float distance = sqrtf( distanceSquared ); const float delta = distance - radius; radius = ( radius + distance ) * .5f; radiusSquared = radius * radius; const Vertex normdelta = normalize( centerToPoint ) * ( 0.5f * delta ); center += normdelta; EQASSERTINFO( Vertex( vertex-center ).squared_length() <= ( radiusSquared + 2.f* numeric_limits<float>::epsilon( )), vertex << " c " << center << " r " << radius << " (" << Vertex( vertex-center ).length() << ")" ); } #ifndef NDEBUG // 2a) re-test all points to be in the estimated bounding sphere for( Index offset = 0; offset < _indexLength; ++offset ) { const Vertex& vertex = _globalData.vertices[ _vertexStart + _globalData.indices[_indexStart + offset] ]; const Vertex centerToPoint = vertex - center; const float distanceSquared = centerToPoint.squared_length(); EQASSERTINFO( distanceSquared <= ( radiusSquared + 2.f* numeric_limits<float>::epsilon( )), vertex << " c " << center << " r " << radius << " (" << Vertex( vertex-center ).length() << ")" ); } #endif // store optimal bounding sphere _boundingSphere.x() = center.x(); _boundingSphere.y() = center.y(); _boundingSphere.z() = center.z(); _boundingSphere.w() = radius; #ifndef NDEBUG MESHINFO << "updateBoundingSphere" << "( " << _boundingSphere << " )." << endl; #endif return _boundingSphere; }
void TreeEqualizer::_assign( Node* node, const Viewport& vp, const Range& range ) { EQLOG( LOG_LB2 ) << "assign " << vp << ", " << range << " time " << node->time << " split " << node->split << std::endl; EQASSERTINFO( vp.isValid(), vp ); EQASSERTINFO( range.isValid(), range ); EQASSERTINFO( node->resources > 0.f || !vp.hasArea() || !range.hasData(), "Assigning work to unused compound: " << vp << ", " << range); Compound* compound = node->compound; if( compound ) { EQASSERTINFO( vp == Viewport::FULL || range == Range::ALL, "Mixed 2D/DB load-balancing not implemented" ); compound->setViewport( vp ); compound->setRange( range ); EQLOG( LOG_LB2 ) << compound->getChannel()->getName() << " set " << vp << ", " << range << std::endl; return; } switch( node->mode ) { case MODE_VERTICAL: { // Ensure minimum size const Compound* root = getCompound(); const float pvpW = float( root->getInheritPixelViewport().w ); const float end = vp.getXEnd(); const float boundary = float( node->boundary2i.x( )) / pvpW; float absoluteSplit = vp.x + vp.w * node->split; if( node->left->resources == 0.f ) absoluteSplit = vp.x; else if( node->right->resources == 0.f ) absoluteSplit = end; else if( boundary > 0 ) { const float right = vp.getXEnd() - absoluteSplit; const float left = absoluteSplit - vp.x; const float maxRight = float( node->right->maxSize.x( )) / pvpW; const float maxLeft = float( node->left->maxSize.x( )) / pvpW; if( right > maxRight ) absoluteSplit = end - maxRight; else if( left > maxLeft ) absoluteSplit = vp.x + maxLeft; if( (absoluteSplit - vp.x) < boundary ) absoluteSplit = vp.x + boundary; if( (end - absoluteSplit) < boundary ) absoluteSplit = end - boundary; const uint32_t ratio = uint32_t( absoluteSplit / boundary + .5f ); absoluteSplit = ratio * boundary; } absoluteSplit = EQ_MAX( absoluteSplit, vp.x ); absoluteSplit = EQ_MIN( absoluteSplit, end); node->split = (absoluteSplit - vp.x ) / vp.w; EQLOG( LOG_LB2 ) << "Constrained split " << vp << " at X " << node->split << std::endl; // traverse children Viewport childVP = vp; childVP.w = (absoluteSplit - vp.x); _assign( node->left, childVP, range ); childVP.x = childVP.getXEnd(); childVP.w = end - childVP.x; // Fix 2994111: Rounding errors with 2D LB and 16 sources // Floating point rounding may create a width for the 'right' // child which is slightly below the parent width. Correct it. while( childVP.getXEnd() < end ) childVP.w += std::numeric_limits< float >::epsilon(); _assign( node->right, childVP, range ); break; } case MODE_HORIZONTAL: { // Ensure minimum size const Compound* root = getCompound(); const float pvpH = float( root->getInheritPixelViewport().h ); const float end = vp.getYEnd(); const float boundary = float( node->boundary2i.y( )) / pvpH; float absoluteSplit = vp.y + vp.h * node->split; if( node->left->resources == 0.f ) absoluteSplit = vp.y; else if( node->right->resources == 0.f ) absoluteSplit = end; else if( boundary > 0 ) { const float right = vp.getYEnd() - absoluteSplit; const float left = absoluteSplit - vp.y; const float maxRight = float( node->right->maxSize.y( )) / pvpH; const float maxLeft = float( node->left->maxSize.y( )) / pvpH; if( right > maxRight ) absoluteSplit = end - maxRight; else if( left > maxLeft ) absoluteSplit = vp.y + maxLeft; if( (absoluteSplit - vp.y) < boundary ) absoluteSplit = vp.y + boundary; if( (end - absoluteSplit) < boundary ) absoluteSplit = end - boundary; const uint32_t ratio = uint32_t( absoluteSplit / boundary + .5f ); absoluteSplit = ratio * boundary; } absoluteSplit = EQ_MAX( absoluteSplit, vp.y ); absoluteSplit = EQ_MIN( absoluteSplit, end); node->split = (absoluteSplit - vp.y ) / vp.h; EQLOG( LOG_LB2 ) << "Constrained split " << vp << " at X " << node->split << std::endl; // traverse children Viewport childVP = vp; childVP.h = (absoluteSplit - vp.y); _assign( node->left, childVP, range ); childVP.y = childVP.getYEnd(); childVP.h = end - childVP.y; // Fix 2994111: Rounding errors with 2D LB and 16 sources // Floating point rounding may create a width for the 'right' // child which is slightly below the parent width. Correct it. while( childVP.getYEnd() < end ) childVP.h += std::numeric_limits< float >::epsilon(); _assign( node->right, childVP, range ); break; } case MODE_DB: { EQASSERT( vp == Viewport::FULL ); const float end = range.end; float absoluteSplit = range.start + (range.end-range.start)*node->split; const float boundary( node->boundaryf ); if( node->left->resources == 0.f ) absoluteSplit = range.start; else if( node->right->resources == 0.f ) absoluteSplit = end; const uint32_t ratio = uint32_t( absoluteSplit / boundary + .5f ); absoluteSplit = ratio * boundary; if( (absoluteSplit - range.start) < boundary ) absoluteSplit = range.start; if( (end - absoluteSplit) < boundary ) absoluteSplit = end; node->split = (absoluteSplit-range.start) / (range.end-range.start); EQLOG( LOG_LB2 ) << "Constrained split " << range << " at pos " << node->split << std::endl; Range childRange = range; childRange.end = absoluteSplit; _assign( node->left, vp, childRange ); childRange.start = childRange.end; childRange.end = range.end; _assign( node->right, vp, childRange); break; } default: EQUNIMPLEMENTED; } }
void TreeEqualizer::_update( Node* node ) { if( !node ) return; const Compound* compound = node->compound; if( compound ) { const Channel* channel = compound->getChannel(); const PixelViewport& pvp = channel->getPixelViewport(); EQASSERT( channel ); node->resources = compound->isRunning() ? compound->getUsage() : 0.f; node->maxSize.x() = pvp.w; node->maxSize.y() = pvp.h; node->boundaryf = _boundaryf; node->boundary2i = _boundary2i; return; } // else EQASSERT( node->left ); EQASSERT( node->right ); _update( node->left ); _update( node->right ); node->resources = node->left->resources + node->right->resources; if( node->left->resources == 0.f ) { node->maxSize = node->right->maxSize; node->boundary2i = node->right->boundary2i; node->boundaryf = node->right->boundaryf; node->time = node->right->time; } else if( node->right->resources == 0.f ) { node->maxSize = node->left->maxSize; node->boundary2i = node->left->boundary2i; node->boundaryf = node->left->boundaryf; node->time = node->left->time; } else { switch( node->mode ) { case MODE_VERTICAL: node->maxSize.x() = node->left->maxSize.x() + node->right->maxSize.x(); node->maxSize.y() = EQ_MIN( node->left->maxSize.y(), node->right->maxSize.y() ); node->boundary2i.x() = node->left->boundary2i.x() + node->right->boundary2i.x(); node->boundary2i.y() = EQ_MAX( node->left->boundary2i.y(), node->right->boundary2i.y()); node->boundaryf = EQ_MAX( node->left->boundaryf, node->right->boundaryf ); break; case MODE_HORIZONTAL: node->maxSize.x() = EQ_MIN( node->left->maxSize.x(), node->right->maxSize.x() ); node->maxSize.y() = node->left->maxSize.y() + node->right->maxSize.y(); node->boundary2i.x() = EQ_MAX( node->left->boundary2i.x(), node->right->boundary2i.x() ); node->boundary2i.y() = node->left->boundary2i.y() + node->right->boundary2i.y(); node->boundaryf = EQ_MAX( node->left->boundaryf, node->right->boundaryf ); break; case MODE_DB: node->boundary2i.x() = EQ_MAX( node->left->boundary2i.x(), node->right->boundary2i.x() ); node->boundary2i.y() = EQ_MAX( node->left->boundary2i.y(), node->right->boundary2i.y() ); node->boundaryf = node->left->boundaryf +node->right->boundaryf; break; default: EQUNIMPLEMENTED; } node->time = node->left->time + node->right->time; } }