void Channel::frameReadback( const eq::uint128_t& frameID, const eq::Frames& frames ) { if( stopRendering() || _isDone( )) return; const FrameData& frameData = _getFrameData(); for( eq::FramesCIter i = frames.begin(); i != frames.end(); ++i ) { eq::Frame* frame = *i; // OPT: Drop alpha channel from all frames during network transport frame->setAlphaUsage( false ); if( frameData.isIdle( )) frame->setQuality( eq::Frame::BUFFER_COLOR, 1.f ); else frame->setQuality( eq::Frame::BUFFER_COLOR, frameData.getQuality()); if( frameData.useCompression( )) frame->useCompressor( eq::Frame::BUFFER_COLOR, EQ_COMPRESSOR_AUTO ); else frame->useCompressor( eq::Frame::BUFFER_COLOR, EQ_COMPRESSOR_NONE ); } eq::Channel::frameReadback( frameID, frames ); }
void Channel::frameAssemble( const eq::uint128_t& frameID, const eq::Frames& frames ) { if( stopRendering( )) return; if( _isDone( )) return; Accum& accum = _accum[ lunchbox::getIndexOfLastBit( getEye()) ]; if( getPixelViewport() != _currentPVP ) { accum.transfer = true; if( accum.buffer && !accum.buffer->usesFBO( )) { LBWARN << "Current viewport different from view viewport, " << "idle anti-aliasing not implemented." << std::endl; accum.step = 0; } eq::Channel::frameAssemble( frameID, frames ); return; } // else accum.transfer = true; for( eq::Frames::const_iterator i = frames.begin(); i != frames.end(); ++i ) { eq::Frame* frame = *i; const eq::SubPixel& curSubPixel = frame->getSubPixel(); if( curSubPixel != eq::SubPixel::ALL ) accum.transfer = false; accum.stepsDone = LB_MAX( accum.stepsDone, frame->getSubPixel().size * frame->getPeriod( )); } applyBuffer(); applyViewport(); setupAssemblyState(); try { eq::Compositor::assembleFrames( frames, this, accum.buffer ); } catch( const co::Exception& e ) { LBWARN << e.what() << std::endl; } resetAssemblyState(); }
void orderFrames( eq::Frames& frames, const Matrix4f& modelView ) { LBASSERT( !_channel->useOrtho( )); // calculate modelview inversed+transposed matrix Matrix3f modelviewITM; Matrix4f modelviewIM; modelView.inverse( modelviewIM ); Matrix3f( modelviewIM ).transpose_to( modelviewITM ); Vector3f norm = modelviewITM * Vector3f( 0.0f, 0.0f, 1.0f ); norm.normalize(); std::sort( frames.begin(), frames.end(), cmpRangesInc ); // cos of angle between normal and vectors from center std::vector<double> dotVals; // of projection to the middle of slices' boundaries for( const eq::Frame* frame : frames ) { const double px = -1.0 + frame->getRange().end*2.0; const Vector4f pS = modelView * Vector4f( 0.0f, 0.0f, px, 1.0f ); Vector3f pSsub( pS[ 0 ], pS[ 1 ], pS[ 2 ] ); pSsub.normalize(); dotVals.push_back( norm.dot( pSsub )); } const Vector4f pS = modelView * Vector4f( 0.0f, 0.0f, -1.0f, 1.0f ); eq::Vector3f pSsub( pS[ 0 ], pS[ 1 ], pS[ 2 ] ); pSsub.normalize(); dotVals.push_back( norm.dot( pSsub )); // check if any slices need to be rendered in reverse order size_t minPos = std::numeric_limits< size_t >::max(); for( size_t i=0; i<dotVals.size()-1; i++ ) if( dotVals[i] > 0 && dotVals[i+1] > 0 ) minPos = static_cast< int >( i ); const size_t nFrames = frames.size(); minPos++; if( minPos < frames.size()-1 ) { eq::Frames framesTmp = frames; // copy slices that should be rendered first memcpy( &frames[ nFrames-minPos-1 ], &framesTmp[0], (minPos+1)*sizeof( eq::Frame* ) ); // copy slices that should be rendered last, in reverse order for( size_t i=0; i<nFrames-minPos-1; i++ ) frames[ i ] = framesTmp[ nFrames-i-1 ]; } }
void Channel::frameReadback( const eq::uint128_t& frameID, const eq::Frames& frames ) { // Drop depth buffer flag from all output frames const FrameData& frameData = _getFrameData(); for( eq::FramesCIter i = frames.begin(); i != frames.end(); ++i ) { eq::Frame* frame = *i; frame->setQuality( eq::Frame::BUFFER_COLOR, frameData.getQuality()); frame->disableBuffer( eq::Frame::BUFFER_DEPTH ); } eq::Channel::frameReadback( frameID, frames ); }
void orderFrames( eq::Frames& frames, const eq::Matrix4d& modelviewM, const eq::Matrix3d& modelviewITM, const eq::Matrix4f& rotation, const bool orthographic ) { if( orthographic ) { const bool orientation = rotation.array[10] < 0; sort( frames.begin(), frames.end(), orientation ? cmpRangesInc : cmpRangesDec ); return; } // else perspective projection eq::Vector3d norm = modelviewITM * eq::Vector3d( 0.0, 0.0, 1.0 ); norm.normalize(); sort( frames.begin(), frames.end(), cmpRangesInc ); // cos of angle between normal and vectors from center std::vector<double> dotVals; // of projection to the middle of slices' boundaries for( eq::Frames::const_iterator i = frames.begin(); i != frames.end(); ++i ) { const eq::Frame* frame = *i; const double px = -1.0 + frame->getRange().end*2.0; const eq::Vector4d pS = modelviewM * eq::Vector4d( 0.0, 0.0, px , 1.0 ); eq::Vector3d pSsub( pS[ 0 ], pS[ 1 ], pS[ 2 ] ); pSsub.normalize(); dotVals.push_back( norm.dot( pSsub )); } const eq::Vector4d pS = modelviewM * eq::Vector4d( 0.0, 0.0,-1.0, 1.0 ); eq::Vector3d pSsub( pS[ 0 ], pS[ 1 ], pS[ 2 ] ); pSsub.normalize(); dotVals.push_back( norm.dot( pSsub )); //check if any slices need to be rendered in reverse order size_t minPos = std::numeric_limits< size_t >::max(); for( size_t i=0; i<dotVals.size()-1; i++ ) if( dotVals[i] > 0 && dotVals[i+1] > 0 ) minPos = static_cast< int >( i ); const size_t nFrames = frames.size(); minPos++; if( minPos < frames.size()-1 ) { eq::Frames framesTmp = frames; // copy slices that should be rendered first memcpy( &frames[ nFrames-minPos-1 ], &framesTmp[0], (minPos+1)*sizeof( eq::Frame* ) ); // copy slices that should be rendered last, in reverse order for( size_t i=0; i<nFrames-minPos-1; i++ ) frames[ i ] = framesTmp[ nFrames-i-1 ]; } }
void prepareFramesAndSetPvp( const eq::Frames& frames, eq::Frames& dbFrames, eq::PixelViewport& coveredPVP ) { for( eq::Frame* frame : frames ) { { eq::ChannelStatistics event( eq::Statistic::CHANNEL_FRAME_WAIT_READY, _channel ); frame->waitReady( ); } const eq::Range& range = frame->getRange(); if( range == eq::Range::ALL ) // 2D frame, assemble directly { eq::Compositor::assembleFrame( frame, _channel ); continue; } dbFrames.push_back( frame ); for( const eq::Image* image : frame->getImages( )) { const eq::PixelViewport imagePVP = image->getPixelViewport() + frame->getOffset(); coveredPVP.merge( imagePVP ); } } }