void Utility::FilterApplier::applyToAVVideoP() { isRunning_=true; for(std::size_t i=0; i<source1_->getNumberOfFrames()&&isRunning_; i++) { auto filteredImage=applyToFrame(*source1_->getFrame(i)); target_->appendFrame(VideoConverter::convertAVFrameToQImage(*filteredImage)); av_frame_unref(filteredImage); av_frame_free(&filteredImage); } isRunning_=false; }
void InPatchGraphCut::for_each_frame(void) { Segmentation::GraphCut gc; MeshBundle<DefaultMesh>& mesh = *meshes_[current_frame_]; arma::uvec& label = labels_[current_frame_]; arma::uword label_max = arma::max(label); current_label_ = 1; while( current_label_ <= label_max ) { arma::uvec label_indices = arma::find( label == current_label_ ); if(label_indices.is_empty()){ ++ current_label_; continue; } // std::cerr<<"extract patch graph"<<std::endl; current_patch_mesh_.clear(); current_patch_graph_ = VoxelGraph<DefaultMesh>::getSubGraphPtr(mesh.graph_,label_indices,current_patch_mesh_); // std::cerr<<"done extract patch graph"<<std::endl; label_number_ = 2; pix_number_ = current_patch_graph_->voxel_centers.n_cols; gc.setLabelNumber( label_number_ ); gc.setPixelNumber( pix_number_ ); // std::cerr<<"prepareDataTerm()"<<std::endl; if(!prepareDataTerm(gc)) { std::cerr<<"Failed to prepare data term"<<std::endl; } // std::cerr<<"prepareSmoothTerm()"<<std::endl; if(!prepareSmoothTerm(gc)) { std::cerr<<"Failed to prepare smooth term"<<std::endl; } gc.init(Segmentation::GraphCut::EXPANSION); // std::cerr<<"prepareNeighbors()"<<std::endl; if(!prepareNeighbors(gc)) { std::cerr<<"Failed to prepare graph"<<std::endl; } // std::cerr<<"gc.updateInfo()"<<std::endl; gc.updateInfo(); float t; emit message(QString::fromStdString(gc.info()),0); gc.optimize(config_->getInt("GC_iter_num"),t); emit message(QString::fromStdString(gc.info()),0); arma::uvec gc_label; gc.getAnswer(gc_label); // std::cerr<<"get answer"<<std::endl; applyToFrame(gc_label,label_indices); // std::cerr<<"done apply to frame"<<std::endl; ++current_label_; } }
void Utility::FilterApplier::applyToVideoP() { isRunning_=true; std::size_t i=0; do { for(; i<source_->getNumberOfFrames()&&isRunning_; i++) { auto image=VideoConverter::convertQImageToAVFrame(*source_->getFrame(i)); auto filteredImage=applyToFrame(*image); target_->appendFrame(VideoConverter::convertAVFrameToQImage(*filteredImage)); av_frame_unref(image); av_frame_unref(filteredImage); av_frame_free(&image); av_frame_free(&filteredImage); } if(source_->isComplete()) { break; } } while(isRunning_); target_->setIsComplete(true); bool buffer=isRunning_; isRunning_=false; emit applyComplete(buffer); }