void PhVideoDecoder::open(QString fileName) { close(); PHDEBUG << fileName; _currentFrame = PHFRAMEMIN; if(avformat_open_input(&_formatContext, fileName.toStdString().c_str(), NULL, NULL) < 0) { emit openFailed(); close(); return; } PHDEBUG << "Retrieve stream information"; if (avformat_find_stream_info(_formatContext, NULL) < 0) { emit openFailed(); close(); return; // Couldn't find stream information } // Disable dump for specs if(PhDebug::logMask() & 1) av_dump_format(_formatContext, 0, fileName.toStdString().c_str(), 0); // Find video stream : for(int i = 0; i < (int)_formatContext->nb_streams; i++) { AVMediaType streamType = _formatContext->streams[i]->codec->codec_type; PHDEBUG << i << ":" << streamType; switch(streamType) { case AVMEDIA_TYPE_VIDEO: // Some containers are advertised with several video streams. // For example, one is the main stream and the other one is just a cover picture (single frame). // Here we choose the one that has the largest number of frames. if (!_videoStream || _videoStream->nb_frames < _formatContext->streams[i]->nb_frames) { _videoStream = _formatContext->streams[i]; } PHDEBUG << "\t=> video"; break; case AVMEDIA_TYPE_AUDIO: if(_useAudio && (_audioStream == NULL)) _audioStream = _formatContext->streams[i]; PHDEBUG << "\t=> audio"; break; default: PHDEBUG << "\t=> unknown"; break; } } if(_videoStream == NULL) { emit openFailed(); close(); return; } // Looking for timecode type _tcType = PhTimeCode::computeTimeCodeType(this->framePerSecond()); PHDEBUG << "size : " << _videoStream->codec->width << "x" << _videoStream->codec->height; AVCodec * videoCodec = avcodec_find_decoder(_videoStream->codec->codec_id); if(videoCodec == NULL) { PHDEBUG << "Unable to find the codec:" << _videoStream->codec->codec_id; emit openFailed(); close(); return; } if (avcodec_open2(_videoStream->codec, videoCodec, NULL) < 0) { PHDEBUG << "Unable to open the codec:" << _videoStream->codec; emit openFailed(); close(); return; } _videoFrame = av_frame_alloc(); if(_audioStream) { AVCodec* audioCodec = avcodec_find_decoder(_audioStream->codec->codec_id); if(audioCodec) { if(avcodec_open2(_audioStream->codec, audioCodec, NULL) < 0) { PHDEBUG << "Unable to open audio codec."; _audioStream = NULL; } else { _audioFrame = av_frame_alloc(); PHDEBUG << "Audio OK."; } } else { PHDEBUG << "Unable to find codec for audio."; _audioStream = NULL; } } _fileName = fileName; emit opened(_tcType, frameIn(), frameLength(), width(), height(), codecName()); }
void HaarDetectorBody::Process(void) { const Mat& frameIn = imageMessageIn_->Normalized(); outputFrame_ = imageMessageIn_->Rgb().clone(); prevObjects_ = objects_; if(objects_.empty() || imageMessageIn_->GetMetaData().GetFrameNumber() % 30 == 0) { const vector<Rect>& rectangles = rectangleMessageIn_->GetRectangles(); Rect firstRect(0, 0, frameIn.cols, frameIn.rows); if(rectangles.size() > 0) PartitionateFace(rectangles[0], &firstRect); Mat frameInRect = frameIn(firstRect); Mat normalizedRes(cvRound(frameInRect.rows * param_->imgScaleFactor), cvRound(frameInRect.cols * param_->imgScaleFactor), CV_8UC1); resize(frameInRect, normalizedRes, normalizedRes.size()); objects_.clear(); cascade_.detectMultiScale( normalizedRes, objects_, param_->scaleFactor, param_->minNeighbors, param_->flags, param_->minSize, param_->maxSize ); if(!objects_.empty()) { for(vector<Rect>::iterator r = objects_.begin(); r != objects_.end(); r++) { r->x = cvRound(r->x * param_->invImgScaleFactor) + firstRect.x; r->y = cvRound(r->y * param_->invImgScaleFactor) + firstRect.y; r->width = cvRound(r->width * param_->invImgScaleFactor); r->height = cvRound(r->height * param_->invImgScaleFactor); rectangle(outputFrame_, *r, Scalar(255.0, 0.0, 0.0), 2); putText(outputFrame_, GetFullName(), r->tl(), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255.0, 0.0, 0.0)); } } } else if(!prevObjects_.empty() && prevImageMessageIn_) { const Mat& prevFrame = prevImageMessageIn_->Normalized(); param_->ClearLKVectors(); for(vector<Rect>::const_iterator r = prevObjects_.begin(); r != prevObjects_.end(); r++) { param_->prevPoints.push_back(Point(cvRound(r->x + r->width * 0.5), cvRound(r->y + r->height * 0.5))); param_->objSizes.push_back(r->size()); } calcOpticalFlowPyrLK( prevFrame, frameIn, param_->prevPoints, param_->nextPoints, param_->status, param_->error, param_->winSize, param_->maxLevel, param_->criteria, param_->LKflags, param_->minEigThreshold ); const vector<Point2f>& actPoints = (param_->prevPoints.size() == param_->nextPoints.size() ? param_->nextPoints : param_->prevPoints); for(size_t i = 0; i < actPoints.size(); i++) { Point tl(cvRound(actPoints[i].x - param_->objSizes[i].width * 0.5), cvRound(actPoints[i].y - param_->objSizes[i].height * 0.5)); Point br(cvRound(actPoints[i].x + param_->objSizes[i].width * 0.5), cvRound(actPoints[i].y + param_->objSizes[i].height * 0.5)); objects_[i] = Rect(tl, br); rectangle(outputFrame_, objects_[i], Scalar(0.0, 0.0, 255.0), 2); putText(outputFrame_, GetFullName(), tl, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0.0, 0.0, 255.0)); } } IMSHOW(GetFullName(), outputFrame_); output_ = HasSuccessor() ? new RectangleMessage(objects_) : NULL; }