static int readFunction(void* opaque, uint8_t* buffer, int bufferSize) { VideoDecoder* decoder = (VideoDecoder*) opaque; //Call implemented function return decoder->getFillFileBufferFunc()(decoder->getCustomFileBufferFuncData(), buffer, bufferSize); }
Vdec_ReturnType vdec_flush(struct VDecoder* dec, int *nFlushedFrames) { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: flush \n"); VideoDecoder* pDec = (VideoDecoder*)(dec->core); if(!pDec) return VDEC_EFAILED; pDec->Flush( nFlushedFrames ); return VDEC_SUCCESS; }
static AVPixelFormat GetFormat(AVCodecContext *Context, const AVPixelFormat *Formats) { if (!Context || !Formats) return AV_PIX_FMT_YUV420P; VideoDecoder* parent = static_cast<VideoDecoder*>(Context->opaque); if (parent) return parent->AgreePixelFormat(Context, Formats); return GetFormatDefault(Context, Formats); }
G_GNUC_INTERNAL gboolean gstvideo_has_codec(int codec_type) { gboolean has_codec = FALSE; VideoDecoder *decoder = create_gstreamer_decoder(codec_type, NULL); if (decoder) { has_codec = TRUE; decoder->destroy(decoder); } return has_codec; }
static void ReleaseBuffer(AVCodecContext *Context, AVFrame *Frame) { if (Frame->type == FF_BUFFER_TYPE_INTERNAL) { avcodec_default_release_buffer(Context, Frame); return; } VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque); if (parent) parent->ReleaseAVBuffer(Context, Frame); else LOG(VB_GENERAL, LOG_ERR, "Invalid context"); }
static int GetBuffer(struct AVCodecContext *Context, AVFrame *Frame) { if (!Context->codec) return -1; if (!(Context->codec->capabilities & CODEC_CAP_DR1)) return avcodec_default_get_buffer(Context, Frame); VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque); if (parent) return parent->GetAVBuffer(Context, Frame); LOG(VB_GENERAL, LOG_ERR, "Invalid context"); return -1; }
int main(int argc, char *argv[]) { QCoreApplication a(argc, argv); QString file = "test.avi"; int idx = a.arguments().indexOf("-f"); if (idx > 0) file = a.arguments().at(idx + 1); QString decName("FFmpeg"); idx = a.arguments().indexOf("-vc"); if (idx > 0) decName = a.arguments().at(idx + 1); VideoDecoderId cid = VideoDecoderFactory::id(decName.toStdString()); if (cid <= 0) { qWarning("Can not find decoder: %s", decName.toUtf8().constData()); return 1; } VideoDecoder *dec = VideoDecoderFactory::create(cid); AVDemuxer demux; if (!demux.loadFile(file)) { qWarning("Failed to load file: %s", file.toUtf8().constData()); return 1; } dec->setCodecContext(demux.videoCodecContext()); dec->prepare(); dec->open(); QElapsedTimer timer; timer.start(); int count = 0; VideoFrame frame; while (!demux.atEnd()) { if (!demux.readFrame()) continue; if (dec->decode(demux.packet()->data)) { /* * TODO: may contains more than 1 frames * map from gpu or not? */ //frame = dec->frame().clone(); count++; } } qint64 elapsed = timer.elapsed(); int msec = elapsed/1000LL; qDebug("decoded frames: %d, time: %d, average speed: %d", count, msec, count/msec); return 0; }
void Open(std::string filename) { stream_->Open(filename); audio_decoder_->Open(stream_->getHandle()); video_decoder_->Open(); audio_renderer_->Open(); video_renderer_->Open(); }
Vdec_ReturnType vdec_release_frame(struct VDecoder *dec, struct vdec_frame *frame) { VDEC_FRAME vdecFrame; VideoDecoder *pDec = (VideoDecoder*)(dec->core); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: release_frame %p\n", frame); if (NULL == dec || NULL == frame) { QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: error: encountered NULL parameter vdec: 0x%x frame: 0x%x", (unsigned int)dec, (unsigned int)frame); return VDEC_EFAILED; } //vdecFrame.pBuf = (VDEC_BYTE*)frame->phys; vdecFrame.pBuf = (VDEC_BYTE*)frame->base; vdecFrame.timestamp = (unsigned long long)frame->timestamp; pDec->ReuseFrameBuffer(&vdecFrame); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: released_frame with ptr: 0x%x", (unsigned int)vdecFrame.pBuf); return VDEC_SUCCESS; }
void Close() { Stop(); stream_->Close(); audio_decoder_->Close(); video_decoder_->Close(); audio_renderer_->Close(); video_renderer_->Close(); }
// Display next frame bool next() { static bool first = true; if(_decoder.getNextFrame(&_frame)) { ++_currentFrame; if(first) { _fh = _frame.height(); _fw = _frame.width(); createMask(); getBestFrames((folder+"/BgCandidates/_GraphcutInfo.txt").toStdString()); first = false; _width = 2000; _height = 1500; screen = new unsigned char[_width*_height*4]; } bool important = false; for(int i = 0; i < _bestFrames.size(); ++i) { if(_bestFrames[i].id == _currentFrame) important = true; } if(!important) return true; _framePos1 = _homographies[_currentFrame] * vec3(-_fw/2, _fh/2, 1); _framePos2 = _homographies[_currentFrame] * vec3(-_fw/2, -_fh/2, 1); _framePos3 = _homographies[_currentFrame] * vec3(_fw/2, -_fh/2, 1); _framePos4 = _homographies[_currentFrame] * vec3(_fw/2, _fh/2, 1); memset(screen, 0, _width*_height*3); flipV(screen, _width, _height, 3); SoftwareRenderer::render(RX::vec3(255, 0, 255), _framePos1, _framePos2, _framePos3, _framePos4, screen, _width, _height, 3); SoftwareRenderer::renderNoColorAB(_frame, RX::vec3(255, 0, 255), RX::vec3(0, 255, 0), _framePos1, _framePos2, _framePos3, _framePos4, screen, _width, _height, 4, 3); for(int i = 0; i < _width*_height; ++i) { char swap = screen[i*3]; screen[i*3] = screen[i*3+2]; screen[i*3+2] = swap; } char buf[50]; sprintf(buf, "/BgCandidates/mask%d.png", _currentFrame); QImage maskImg(screen, _width, _height, QImage::Format_RGB888); maskImg.save(folder+buf); return true; } else return false; }
FFPlayer() { stream_ = new FFStream(); audio_decoder_ = new AudioDecoder(); video_decoder_ = new VideoDecoder(); video_decoder_->setOnDecodeFinished( bind(&FFPlayer::video_decoder_OnDecodeFinished, this, placeholders::_1) ); audio_renderer_ = new AudioRenderer(); video_renderer_ = new VideoRenderer(); scheduler_ = new Scheduler(); scheduler_->setOnTime( bind(&FFPlayer::scheduler_OnTimer, this) ); }
virtual void onData(LiveTrack* track, uint8_t* p_buffer, int i_size, int i_truncated_bytes, int64_t pts, int64_t dts) { //std::cout << "Got Data. size = " << i_size << "; truncated bytes = " << i_truncated_bytes << "; pts = " << pts << "; dts = " << dts << std::endl; //std::cout << "Got Data. size = " << i_size << "; pts = " << pts << std::endl; int consumed; if (track->getFormat().i_codec != VLC_CODEC_H264) return; //std::cout << "Got H264 Data. size = " << i_size << "; truncated bytes = " << i_truncated_bytes << "; NAL type = " << (int)(p_buffer[4] & 0x1f) << std::endl; if (!decoder) { decoder = new VideoDecoder(); decoder->openCodec(0); if (track->getFormat().p_extra) { decoder->decode(track->getFormat().p_extra, track->getFormat().i_extra, consumed); } } uint8_t* tmp = p_buffer; int left = i_size; while (left) { AVFrame* ret = decoder->decode(tmp, left, consumed); if (ret) { av_frame_unref(ret); #ifdef TEST_MULTI_CLIENT std::cout << "client " << this << " got frame!!!\n"; #endif } tmp += consumed; left -= consumed; } }
void load() { // Prompt a video to load folder = QFileDialog::getExistingDirectory(NULL, "Load Video"); if(!folder.isNull()) { loadInfo((folder+"\\_LocalBoards.txt").toStdString()); loadHomographies((folder+"\\_LocalHoms.txt").toStdString()); _decoder.load(folder.toStdString(), _numFrames); _currentFrame = -1; _folder = folder.toStdString(); for(int i = 1; i < _homographies.size(); ++i) _homographies[i] = _homographies[i] * _homographies[i-1]; //ui->widget->setHomographies(&_homographies); next(); } }
/**************************************** * RecVideo * Obtiene los packetes y los muestra *****************************************/ int MediaBridgeSession::RecVideo() { //Coders VideoDecoder* decoder = NULL; VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON); //Create new video frame RTMPVideoFrame frame(0,262143); //Set codec frame.SetVideoCodec(RTMPVideoFrame::FLV1); int width=0; int height=0; DWORD numpixels=0; Log(">RecVideo\n"); //Mientras tengamos que capturar while(receivingVideo) { ///Obtenemos el paquete RTPPacket* packet = rtpVideo.GetPacket(); //Check if (!packet) //Next continue; //Get type VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec(); if ((decoder==NULL) || (type!=decoder->type)) { //Si habia uno nos lo cargamos if (decoder!=NULL) delete decoder; //Creamos uno dependiendo del tipo decoder = VideoCodecFactory::CreateDecoder(type); //Check if (!decoder) { delete(packet); continue; } } //Lo decodificamos if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark())) { delete(packet); continue; } //Get mark bool mark = packet->GetMark(); //Delete packet delete(packet); //Check if it is last one if(!mark) continue; //Check size if (decoder->GetWidth()!=width || decoder->GetHeight()!=height) { //Get dimension width = decoder->GetWidth(); height = decoder->GetHeight(); //Set size numpixels = width*height*3/2; //Set also frame rate and bps encoder->SetFrameRate(25,300,500); //Set them in the encoder encoder->SetSize(width,height); } //Encode next frame VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels); //Check if (!encoded) break; //Check size if (frame.GetMaxMediaSize()<encoded->GetLength()) //Not enougth space return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength()); //Get full frame frame.SetVideoFrame(encoded->GetData(),encoded->GetLength()); //Set buffer size frame.SetMediaSize(encoded->GetLength()); //Check type if (encoded->IsIntra()) //Set type frame.SetFrameType(RTMPVideoFrame::INTRA); else //Set type frame.SetFrameType(RTMPVideoFrame::INTER); //Let the connection set the timestamp frame.SetTimestamp(getDifTime(&first)/1000); //Send it SendMediaFrame(&frame); } //Check if (decoder) //Delete delete(decoder); //Check if (encoder) //Delete delete(encoder); Log("<RecVideo\n"); }
int main(int argc, char *argv[]) { #if 0 QCoreApplication a(argc, argv); return a.exec(); #endif VideoDecoder* videoDecoder = new VideoDecoder; VideoEncoder* videoEncoder = 0; AdaboostClassifier* openClassifier = new AdaboostClassifier; AdaboostClassifier* closedClassifier = new AdaboostClassifier; HandyTracker tracker; if ( argc != 5 ) { printf("Usage: %s <open classifier> <closed classifier> <input video> <output video>\n", argv[0]); return 0; } if ( !openClassifier->Load(argv[1]) ) { fprintf(stderr, "Failed loading open classifier\n", argv[1]); return 1; } if ( !tracker.SetOpenClassifier(openClassifier) ) { fprintf(stderr, "Failed setting open classifier\n"); return 1; } if ( !closedClassifier->Load(argv[2]) ) { fprintf(stderr, "Failed loading closed classifier\n", argv[2]); return 1; } if ( !tracker.SetClosedClassifier(closedClassifier) ) { fprintf(stderr, "Failed setting closed classifier\n"); return 1; } videoDecoder->SetFilename(argv[3]); if ( !videoDecoder->Load() ) { fprintf(stderr, "Failed loading video <%s>\n", argv[3]); return 1; } if ( !videoDecoder->UpdateFrame() ) { fprintf(stderr, "Failed updating frame\n"); return 1; } int frameNumber = 0; bool trackingInitialized = false; Image* img = videoDecoder->GetFrame(); while ( img ) { if ( !videoEncoder ) { videoEncoder = new VideoEncoder; if ( !videoEncoder->Open(argv[4], img->GetWidth(), img->GetHeight(), 25) ) { fprintf(stderr, "Failed opening output video <%s>\n", argv[4]); return 1; } } ProcessFrame(img, &tracker, trackingInitialized, frameNumber); if ( trackingInitialized ) DrawResults(img, &tracker, frameNumber); videoEncoder->AddFrame(img); if ( frameNumber > 1 ) tracker.PurgeRegion(frameNumber - 2); frameNumber++; videoDecoder->UpdateFrame(); img = videoDecoder->GetFrame(); } videoEncoder->Close(); return 0; }
Vdec_ReturnType vdec_post_input_buffer(struct VDecoder *dec, video_input_frame_info *frame, void *cookie) { QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: post_input data=%p len=%d cookie=%p\n", frame->data, frame->len, cookie); #ifdef LOG_INPUT_BUFFERS static int take_input = 1; #endif int fatal_err = 0; /*checkBufAvail flag is needed since we do not need to checkout * YUV/Slice buffer incase the NAL corresponds to same frame. * This is required for multiple NALs in one input buffer */ bool checkBufAvail = true; VDEC_INPUT_BUFFER input; VideoDecoder *pDec = (VideoDecoder*)(dec->core); VDEC_ERROR err = VDEC_ERR_EVERYTHING_FINE; if (NULL == dec || NULL == frame || NULL == frame->data ) { QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: error: encountered NULL parameter dec: 0x%x frame: 0x%x data: 0x%x\n", (unsigned int)dec, (unsigned int)frame, (unsigned int)frame->data); return VDEC_EFAILED; } input.buffer[0] = (unsigned char*)frame->data; input.timestamp[0] = (long long)frame->timestamp; input.buffer_size[0] = (unsigned long int)frame->len; input.buffer_pos[0] = 0; input.layers = 1; input.eOSIndicator[0]= false; QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: received ts: %lld", frame->timestamp); if (frame->timestamp < timestamp ) { QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: error: out of order stamp! %d < %d\n", (int)(frame->timestamp&0xFFFFFFFF), timestamp); } timestamp = (int)frame->timestamp; QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: vdec_core_post_input. buffer_size[0]: %ld frame->flags: 0x%x\n", input.buffer_size[0], frame->flags); if (input.buffer_size[0] == 0 && frame->flags & FRAME_FLAG_EOS) { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Zero-length buffer with EOS bit set\n"); input.eOSIndicator[0] = true; if(pDec) err = pDec->EOS( ); else err = VDEC_ERR_NULL_STREAM_ID; if(VDEC_ERR_OUT_OF_BUFFERS == err) return VDEC_EOUTOFBUFFERS; vdec_decoder_info->ctxt->buffer_done(vdec_decoder_info->ctxt, cookie); if (VDEC_ERR_EVERYTHING_FINE == err) return VDEC_SUCCESS; return VDEC_EFAILED; } QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_core_post_input\n"); #ifdef LOG_INPUT_BUFFERS if (take_input) { fwritex((unsigned char*)frame->data, frame->len, pInputFile); QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: frame %d frame->len %d\n", counter++, frame->len); } #endif do { QPERF_TIME(arm_decode, err = pDec->Decode( &input, checkBufAvail )); if (VDEC_ERR_EVERYTHING_FINE != err) { QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: vdec_decoder error: %d\n", (int)err); if(VDEC_ERR_UNSUPPORTED_DIMENSIONS == err) { fatal_err = 1; break; } } checkBufAvail = false; } while( ( VDEC_ERR_EVERYTHING_FINE == err ) && ( 0 != input.buffer_size[0] ) ); #ifdef LOG_INPUT_BUFFERS take_input = (err==14?0:1); #endif if(VDEC_ERR_OUT_OF_BUFFERS == err) return VDEC_EOUTOFBUFFERS; vdec_input_buffer_release_cb_handler(pDec,&input,cookie); if(VDEC_ERR_EVERYTHING_FINE == err) return VDEC_SUCCESS; if(fatal_err) { static struct vdec_frame frame; memset(&frame, 0, sizeof(frame)); frame.flags |= FRAME_FLAG_FATAL_ERROR; QPERF_END(frame_data); vdec_decoder_info->ctxt->frame_done(vdec_decoder_info->ctxt, &frame); } return VDEC_EFAILED; }
struct VDecoder *vdec_open(struct vdec_context *ctxt) { struct VDecoder *dec = NULL; VDEC_ERROR err = 0; const VDEC_CONCURRENCY_CONFIG concurrencyConfig = VDEC_CONCURRENT_NONE; VideoDecoder* pDec = NULL; dec = (VDecoder*)calloc(1, sizeof(struct VDecoder)); if (!dec) { return 0; } dec->ctxt = ctxt; dec->width = ctxt->width; dec->height = ctxt->height; dec->ctxt->outputBuffer.numBuffers = ctxt->outputBuffer.numBuffers; if(VDEC_SUCCESS != vdec_commit_memory(dec)) { return 0; } QPERF_RESET(arm_decode); QPERF_RESET(frame_data); nFrameDoneCnt = 0; nGoodFrameCnt = 0; #ifdef PROFILE_DECODER qperf_total_frame_cnt = 0; #endif vdec_output_frame_index = 0; timestamp = 0; int i; VDEC_PARAMETER_DATA codeDetectionEnable; codeDetectionEnable.startCodeDetection.bStartCodeDetectionEnable = false; // by default set to false; MPEG4 doesnt require it QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_open(). width: %d, height: %d\n", dec->width, dec->height); vdec_decoder_info = dec; QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_open(). width: %d, height: %d kind[%s]\n", vdec_decoder_info->ctxt->width, vdec_decoder_info->ctxt->height, vdec_decoder_info->ctxt->kind); if(!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.avc")) { dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnH264(&err)); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating H264 Decoder [%p]\n",dec->core); VDEC_PARAMETER_DATA sizeOfNalLengthField; sizeOfNalLengthField.sizeOfNalLengthField.sizeOfNalLengthField = dec->ctxt->size_of_nal_length_field; QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: NAL lenght [%d]\n",dec->ctxt->size_of_nal_length_field); pDec = (VideoDecoder*)(dec->core); if (0 == dec->ctxt->size_of_nal_length_field) { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: START CODE....\n"); codeDetectionEnable.startCodeDetection.bStartCodeDetectionEnable = true; if(!pDec) err = VDEC_ERR_NULL_STREAM_ID; else err = pDec->SetParameter(VDEC_PARM_START_CODE_DETECTION,&codeDetectionEnable); if (VDEC_ERR_EVERYTHING_FINE != err) { // TBD- printx("[vdec_core] set start code detection parameter failed: %d", (int)err); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"[vdec_core] set start code detection parameter failed: %d", (int)err); goto fail_initialize; } } else if(dec->ctxt->size_of_nal_length_field > 0 && dec->ctxt->size_of_nal_length_field <= 4) { QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: NALU LENGTH[%d]\n",dec->ctxt->size_of_nal_length_field); // test size of NAL length field decoder support if(!pDec) err = VDEC_ERR_NULL_STREAM_ID; else err = pDec->SetParameter( VDEC_PARM_SIZE_OF_NAL_LENGTH_FIELD, &sizeOfNalLengthField ); if (VDEC_ERR_EVERYTHING_FINE != err) { // TBD- printx("[vdec_core] set start code detection parameter failed: %d", (int)err); goto fail_initialize; } } else { QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: Invalid size of nal length field: %d\n", dec->ctxt->size_of_nal_length_field); goto fail_core; } } else if ((!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.mpeg4")) || (!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.h263"))) { dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnMpeg4(&err)); pDec = (VideoDecoder*)(dec->core); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating MP4 Decoder [%p]\n",dec->core); } else if (!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.vc1")) { dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnWmv(&err)); pDec = (VideoDecoder*)(dec->core); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating WMV Decoder [%p]\n",dec->core); } else { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"Incorrect codec kind\n"); goto fail_core; } if (VDEC_ERR_EVERYTHING_FINE != err || NULL == dec->core) { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: create failed\n"); goto fail_core; } for(i=0; i<VDEC_NFRAMES; i++) { slice_buffer_info[i].base = (char*)dec->input[i].base; //slice_buffer_info[i].phys = dec->input[i].phys; slice_buffer_info[i].fd = dec->input[i].pmem_id; } VDEC_DIMENSIONS frameSize; frameSize.height = dec->height; frameSize.width = dec->width; QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: Init width: %d height %d\n", frameSize.width, frameSize.height); /* VDEC_PARAMETER_DATA deblockerInfo; deblockerInfo.deblockerEnable.bDeblockerEnable = true; QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"[vdec_core] SetParameter\n"); err = vdec_set_parameter(dec->core, VDEC_PARM_DEBLOCKER_ENABLE, &deblockerInfo); QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"[vdec_core] after SetParameter\n",err); if (VDEC_ERR_EVERYTHING_FINE != err) { //TBD - printx("[vdec_core] set deblocker enable parameter failed: %d", (int)err); goto fail_initialize; } */ QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: vdec_initialize\n"); if(!pDec) err = VDEC_ERR_NULL_STREAM_ID; else { err = pDec->Initialize(1, vdec_frame_buffer_malloc_cb_handler, vdec_frame_buffer_free_cb_handler, vdec_frame_cb_handler, NULL, frameSize, VDEC_CONCURRENT_NONE, slice_buffer_info, dec->ctxt->outputBuffer.numBuffers, ctxt->adsp_fd); } QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: vdec_initialize\n",err); if (err != VDEC_ERR_EVERYTHING_FINE) { QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: initialization failed: %d\n", (int)err); goto fail_initialize; } #if NEED_VDEC_LP #ifdef _ANDROID_ dec->fake = adsp_open("/dev/adsp/VDEC_LP_MODE", dec->core, core_msg_func); #else dec->fake = adsp_open("/dev/VDEC_LP_MODE", dec->core, core_msg_func); #endif //_ANDROID_ if (!dec->fake) { QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: adsp_open() failed error: %s\n", strerror(errno)); goto fail_initialize; } if (adsp_enable((adsp_module*)dec->fake)) goto fail_enable; #endif #ifdef LOG_YUV_FRAMES #ifdef T_WINNT pYUVFile = fopen ( "../debug/yuvframes.yuv" , "wb" ); #elif _ANDROID_ pYUVFile = fopen ( "/data/yuvframes.yuv" , "wb" ); #else pYUVFile = fopen ( "yuvframes.yuv" , "wb" ); #endif #endif #ifdef LOG_INPUT_BUFFERS #ifdef T_WINNT pInputFile = fopen ( "../debug/inputbuffers.bin" , "wb" ); #elif _ANDROID_ pInputFile = fopen ( "/data/inputbuffers.bin" , "wb" ); #else pInputFile = fopen ( "inputbuffers.bin" , "wb" ); #endif #endif return dec; fail_enable: #if NEED_VDEC_LP adsp_close((adsp_module*)dec->fake); #endif fail_initialize: if(pDec) { pDec->Suspend( ); QTV_Delete( pDec ); } fail_core: if (dec) { dec->dead = 1; free(dec); } return NULL; }
Vdec_ReturnType vdec_close(struct VDecoder *dec) { VDEC_ERROR err; VideoDecoder* pDec = (VideoDecoder*)(dec->core); QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_close()\n"); #ifdef PROFILE_DECODER usecs_t time_taken_by_arm = QPERF_TERMINATE(arm_decode); float avArmTime = (float)time_taken_by_arm/(qperf_total_frame_cnt*1000); usecs_t frame_data_time = QPERF_TERMINATE(frame_data); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n"); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL," Arm Statistics \n"); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n"); QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Total number of frames decoded = %ld\n",qperf_total_frame_cnt); QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Average Arm time/frame(ms) = %f\n",avArmTime); QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frames Arm Decoded/sec = %f\n",1000/avArmTime); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n"); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL," Frame Done Statistics \n"); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n"); QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frame done cumulative time = %lld\n",frame_data_time); QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frames Done per second = %f\n",(float)(qperf_total_frame_cnt-1)*1000000/frame_data_time); QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n"); #endif #ifdef LOG_YUV_FRAMES if (pYUVFile) { fclose (pYUVFile); pYUVFile = NULL; } #endif #ifdef LOG_INPUT_BUFFERS if (pInputFile) { fclose (pInputFile); } #endif vdec_output_frame_index = 0; #if NEED_VDEC_LP if (vdec->fake) { //jlk - adsp_close() calls adsp_disable right now. Calling adsp_disable() twice causes problems //Renable this line when we fix the kernel driver //adsp_disable((adsp_module*)vdec->fake); adsp_close((adsp_module*)vdec->fake); } else { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: adsp modules is NULL\n"); } #endif nFrameDoneCnt = 0; nGoodFrameCnt = 0; if (dec->core) { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: calling Suspend"); err = pDec->Suspend( ); if (VDEC_ERR_EVERYTHING_FINE != err) { QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: Suspend returned error: %d\n", (int)err); } QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: calling vdec_destroy"); QTV_Delete( (VideoDecoder*)(dec->core) ); } else { QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: core is NULL"); } pmem_free(&dec->arena); free(dec); QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: closed\n"); return VDEC_SUCCESS; }
int main(int argc, char* argv[]) { int i, j, k; int width, height; int numFleshRegions, numHands, xScale, yScale; int left, right, top, bottom; Image* image; Image outlineImage; FleshDetector* fleshDetector; vector<ConnectedRegion*>* fleshRegionVector; vector<Hand*> hands; Hand* hand; vector<HandCandidate*> handCandidates; HandCandidate* candidate; unsigned char angledBoxColor[] = {255, 255, 0}; unsigned char longColor[] = {0, 255, 0}; unsigned char shortColor[] = {0, 0, 255}; unsigned char offsetColor[] = {0, 255, 255}; unsigned char pointColor[] = {255, 0, 0}; unsigned char farPointColor[] = {255, 0, 255}; int numLargeRegions; string basename; DoublePoint centroid, center, nearEdge, farEdge; LineSegment shortLine, longLine, offsetLine; Rect angledBox; double edgeAngle, offsetAngle; CompositeClassifier postureDetector; string features; Matrix input; int classIndex; SubImage handImage; vector<Point> farPoints; int numFarPoints; string inputFilename, outputFilename; VideoDecoder decoder; VideoEncoder encoder; bool needInit = true; if ( argc < 5 ) { printf("Usage: %s <flesh classifier file> <hand classifier file> <input file> <output file>\n", argv[0]); return 1; } // Either loads a real detector or gets a dummy detector if arg is "DUMMY" fleshDetector = FleshDetector::Get(argv[1]); if ( !fleshDetector ) { fprintf(stderr, "Error loading flesh detector %s\n", argv[1]); return 1; } if ( !postureDetector.Load(argv[2]) ) { fprintf(stderr, "Error loading hand detector %s\n", argv[2]); return 1; } features = postureDetector.GetFeatureString(); inputFilename = argv[3]; outputFilename = argv[4]; decoder.SetFilename(inputFilename); if ( !decoder.Load() ) { fprintf(stderr, "Error loading video %s\n", inputFilename.c_str()); return 1; } while ( decoder.UpdateFrame() ) { image = decoder.GetFrame(); if ( needInit ) { needInit = false; width = image->GetWidth(); height = image->GetHeight(); if ( !encoder.Open(outputFilename.c_str(), width, height, 10) ) { fprintf(stderr, "Failed opening %s\n", outputFilename.c_str()); return 1; } } hands.clear(); outlineImage = *image; fleshRegionVector = fleshDetector->GetFleshRegions(image, xScale, yScale); if ( fleshRegionVector ) { numFleshRegions = fleshRegionVector->size(); numLargeRegions = 0; for (i = 0; i < numFleshRegions; i++) { if ( !(*fleshRegionVector)[i]->GetBounds(left, right, top, bottom) ) { fprintf(stderr, "Error getting flesh block %d bounds\n", i); return 1; } left *= xScale; right = (right + 1) * xScale - 1; top *= yScale; bottom = (bottom + 1) * yScale - 1; if ( (right - left + 1 < FLESH_REGION_MIN_DIMENSION) || (bottom - top + 1 < FLESH_REGION_MIN_DIMENSION) ) continue; numLargeRegions++; handImage.CreateFromParent(image, left, right, top, bottom); vector<ConnectedRegion*>* fullResRegions; fullResRegions = fleshDetector->GetFleshRegions(&handImage); int numFullResRegions = 0; if ( fullResRegions ) numFullResRegions = fullResRegions->size(); if ( !numFullResRegions ) { fprintf(stderr, "Failed getting full resolution hand candidate\n"); return 1; } int regionIndex = 0; if ( numFullResRegions > 1 ) { for (k = 1; k < numFullResRegions; k++) if ( (*fullResRegions)[k]->HasMorePixels( *((*fullResRegions)[regionIndex]) ) ) regionIndex = k; } candidate = new HandCandidate( (*fullResRegions)[regionIndex] ); if ( !candidate->GetScaledFeatures(1, 1, centroid, center, nearEdge, farEdge, shortLine, longLine, offsetLine, edgeAngle, offsetAngle) ) { fprintf(stderr, "Error getting hand candidate features for flesh block %d\n", i); return 1; } angledBox = candidate->GetAngledBoundingBox(longLine); farPoints.clear(); if ( !candidate->GetFarPoints(farPoints) ) fprintf(stderr, "Error getting far points for flesh block %d\n", i); numFarPoints = farPoints.size(); centroid = handImage.GetTopLevelCoords(centroid); center = handImage.GetTopLevelCoords(center); nearEdge = handImage.GetTopLevelCoords(nearEdge); farEdge = handImage.GetTopLevelCoords(farEdge); shortLine.Translate(left, top); longLine.Translate(left, top); offsetLine.Translate(left, top); angledBox.Translate(left, top); for (k = 0; k < numFarPoints; k++) farPoints[k] = handImage.GetTopLevelCoords(farPoints[k]); if ( !candidate->GetFeatureVector(features, input) ) { fprintf(stderr, "Error getting hand candidate features for flesh block %d\n", i); return 1; } classIndex = postureDetector.Classify(input); if ( classIndex != -1 ) { hand = new Hand; hand->SetBounds(left, right, top, bottom); hand->SetPostureString(postureDetector.GetClassName(classIndex)); hands.push_back(hand); } delete candidate; outlineImage.DrawLine(longColor, 1, longLine); outlineImage.DrawLine(shortColor, 1, shortLine); outlineImage.DrawLine(offsetColor, 1, offsetLine); outlineImage.DrawLine(pointColor, 1, centroid, centroid); outlineImage.DrawLine(pointColor, 1, center, center); outlineImage.DrawLine(pointColor, 1, nearEdge, nearEdge); outlineImage.DrawLine(pointColor, 1, farEdge, farEdge); outlineImage.DrawRect(angledBoxColor, 1, angledBox); for (k = 0; k < numFarPoints; k++) outlineImage.DrawLine(farPointColor, 1, centroid, farPoints[k]); } numHands = hands.size(); for (j = 0; j < numHands; j++) { hands[j]->GetBounds(left, right, top, bottom); outlineImage.DrawBox(hands[j]->GetPostureColor(0), hands[j]->GetPostureColor(1), hands[j]->GetPostureColor(2), hands[j]->GetPostureColor(3), 3, left, top, right, bottom); delete hands[j]; } hands.clear(); } if ( !encoder.AddFrame(&outlineImage) ) { fprintf(stderr, "Error inserting video frame\n"); return 1; } } encoder.Close(); return 0; }
int MediaBridgeSession::SendVideo() { VideoDecoder *decoder = VideoCodecFactory::CreateDecoder(VideoCodec::SORENSON); VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(rtpVideoCodec); DWORD width = 0; DWORD height = 0; DWORD numpixels = 0; QWORD lastVideoTs = 0; Log(">SendVideo\n"); //Set video format if (!rtpVideo.SetSendingCodec(rtpVideoCodec)) //Error return Error("Peer do not support [%d,%s]\n",rtpVideoCodec,VideoCodec::GetNameFor(rtpVideoCodec)); //While sending video while (sendingVideo) { //Wait for next video if (!videoFrames.Wait(0)) //Check again continue; //Get audio grame RTMPVideoFrame* video = videoFrames.Pop(); //check if (!video) //Again continue; //Get time difference DWORD diff = 0; //Get timestam QWORD ts = video->GetTimestamp(); //If it is not the first frame if (lastVideoTs) //Calculate it diff = ts - lastVideoTs; //Set the last audio timestamp lastVideoTs = ts; //Check if (video->GetVideoCodec()!=RTMPVideoFrame::FLV1) //Error continue; //Decode frame if (!decoder->Decode(video->GetMediaData(),video->GetMediaSize())) { Error("decode packet error"); //Next continue; } //Check size if (decoder->GetWidth()!=width || decoder->GetHeight()!=height) { //Get dimension width = decoder->GetWidth(); height = decoder->GetHeight(); //Set size numpixels = width*height*3/2; //Set also frame rate and bps encoder->SetFrameRate(25,300,500); //Set them in the encoder encoder->SetSize(width,height); } //Check size if (!numpixels) { Error("numpixels equals 0"); //Next continue; } //Check fpu if (sendFPU) { //Send it encoder->FastPictureUpdate(); //Reset sendFPU = false; } //Encode it VideoFrame *videoFrame = encoder->EncodeFrame(decoder->GetFrame(),numpixels); //If was failed if (!videoFrame) { Log("No video frame\n"); //Next continue; } //Set frame time videoFrame->SetTimestamp(diff); //Send it smoothly smoother.SendFrame(videoFrame,diff); //Delete video frame delete(video); } Log("<SendVideo\n"); return 1; }
/**************************************** * RecVideo * Obtiene los packetes y los muestra *****************************************/ int VideoStream::RecVideo() { VideoDecoder* videoDecoder = NULL; VideoCodec::Type type; timeval before; timeval lastFPURequest; DWORD lostCount=0; DWORD frameTime = (DWORD)-1; DWORD lastSeq = RTPPacket::MaxExtSeqNum; bool waitIntra = false; Log(">RecVideo\n"); //Get now gettimeofday(&before,NULL); //Not sent FPU yet setZeroTime(&lastFPURequest); //Mientras tengamos que capturar while(receivingVideo) { //Get RTP packet RTPPacket* packet = rtp.GetPacket(); //Check if (!packet) //Next continue; //Get extended sequence number and timestamp DWORD seq = packet->GetExtSeqNum(); DWORD ts = packet->GetTimestamp(); //Get packet data BYTE* buffer = packet->GetMediaData(); DWORD size = packet->GetMediaLength(); //Get type type = (VideoCodec::Type)packet->GetCodec(); //Lost packets since last DWORD lost = 0; //If not first if (lastSeq!=RTPPacket::MaxExtSeqNum) //Calculate losts lost = seq-lastSeq-1; //Increase total lost count lostCount += lost; //Update last sequence number lastSeq = seq; //If lost some packets or still have not got an iframe if(lostCount || waitIntra) { //Check if we got listener and more than 1/2 second have elapsed from last request if (listener && getDifTime(&lastFPURequest)>minFPUPeriod) { //Debug Debug("-Requesting FPU lost %d\n",lostCount); //Reset count lostCount = 0; //Request it listener->onRequestFPU(); //Request also over rtp rtp.RequestFPU(); //Update time getUpdDifTime(&lastFPURequest); //Waiting for refresh waitIntra = true; } } //Check if it is a redundant packet if (type==VideoCodec::RED) { //Get redundant packet RTPRedundantPacket* red = (RTPRedundantPacket*)packet; //Get primary codec type = (VideoCodec::Type)red->GetPrimaryCodec(); //Check it is not ULPFEC redundant packet if (type==VideoCodec::ULPFEC) { //Delete packet delete(packet); //Skip continue; } //Update primary redundant payload buffer = red->GetPrimaryPayloadData(); size = red->GetPrimaryPayloadSize(); } //Check codecs if ((videoDecoder==NULL) || (type!=videoDecoder->type)) { //If we already got one if (videoDecoder!=NULL) //Delete it delete videoDecoder; //Create video decorder for codec videoDecoder = VideoCodecFactory::CreateDecoder(type); //Check if (videoDecoder==NULL) { Error("Error creando nuevo decodificador de video [%d]\n",type); //Delete packet delete(packet); //Next continue; } } //Check if we have lost the last packet from the previous frame by comparing both timestamps if (ts>frameTime) { Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime); //Try to decode what is in the buffer videoDecoder->DecodePacket(NULL,0,1,1); //Get picture BYTE *frame = videoDecoder->GetFrame(); DWORD width = videoDecoder->GetWidth(); DWORD height = videoDecoder->GetHeight(); //Check values if (frame && width && height) { //Set frame size videoOutput->SetVideoSize(width,height); //Check if muted if (!muted) //Send it videoOutput->NextFrame(frame); } } //Update frame time frameTime = ts; //Decode packet if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark())) { //Check if we got listener and more than 1/2 seconds have elapsed from last request if (listener && getDifTime(&lastFPURequest)>minFPUPeriod) { //Debug Log("-Requesting FPU decoder error\n"); //Reset count lostCount = 0; //Request it listener->onRequestFPU(); //Request also over rtp rtp.RequestFPU(); //Update time getUpdDifTime(&lastFPURequest); //Waiting for refresh waitIntra = true; } } //Check if it is the last packet of a frame if(packet->GetMark()) { if (videoDecoder->IsKeyFrame()) Debug("-Got Intra\n"); //No frame time yet for next frame frameTime = (DWORD)-1; //Get picture BYTE *frame = videoDecoder->GetFrame(); DWORD width = videoDecoder->GetWidth(); DWORD height = videoDecoder->GetHeight(); //Check values if (frame && width && height) { //Set frame size videoOutput->SetVideoSize(width,height); //Check if muted if (!muted) //Send it videoOutput->NextFrame(frame); } //Check if we got the waiting refresh if (waitIntra && videoDecoder->IsKeyFrame()) //Do not wait anymore waitIntra = false; } //Delete packet delete(packet); } //Delete encoder delete videoDecoder; Log("<RecVideo\n"); }
StreamTranscoder::StreamTranscoder( IInputStream& inputStream, IOutputFile& outputFile, const ProfileLoader::Profile& profile, const int subStreamIndex, const double offset ) : _inputStream( &inputStream ) , _outputStream( NULL ) , _sourceBuffer( NULL ) , _frameBuffer( NULL ) , _inputDecoder( NULL ) , _generator( NULL ) , _currentDecoder( NULL ) , _outputEncoder( NULL ) , _transform( NULL ) , _subStreamIndex( subStreamIndex ) , _offset( offset ) , _canSwitchToGenerator( false ) { // create a transcode case switch( _inputStream->getStreamType() ) { case AVMEDIA_TYPE_VIDEO : { // input decoder VideoDecoder* inputVideo = new VideoDecoder( *static_cast<InputStream*>( _inputStream ) ); // set decoder options with empty profile to set some key options to specific values (example: threads to auto) inputVideo->setProfile( ProfileLoader::Profile() ); inputVideo->setup(); _inputDecoder = inputVideo; _currentDecoder = _inputDecoder; // output encoder VideoEncoder* outputVideo = new VideoEncoder( profile.at( constants::avProfileCodec ) ); _outputEncoder = outputVideo; VideoFrameDesc outputFrameDesc = _inputStream->getVideoCodec().getVideoFrameDesc(); outputFrameDesc.setParameters( profile ); outputVideo->setProfile( profile, outputFrameDesc ); // output stream _outputStream = &outputFile.addVideoStream( outputVideo->getVideoCodec() ); // buffers to process _sourceBuffer = new VideoFrame( _inputStream->getVideoCodec().getVideoFrameDesc() ); _frameBuffer = new VideoFrame( outputVideo->getVideoCodec().getVideoFrameDesc() ); // transform _transform = new VideoTransform(); // generator decoder VideoGenerator* generatorVideo = new VideoGenerator(); generatorVideo->setVideoFrameDesc( outputVideo->getVideoCodec().getVideoFrameDesc() ); _generator = generatorVideo; break; } case AVMEDIA_TYPE_AUDIO : { // input decoder AudioDecoder* inputAudio = new AudioDecoder( *static_cast<InputStream*>( _inputStream ) ); // set decoder options with empty profile to set some key options to specific values (example: threads to auto) inputAudio->setProfile( ProfileLoader::Profile() ); inputAudio->setup(); _inputDecoder = inputAudio; _currentDecoder = _inputDecoder; // output encoder AudioEncoder* outputAudio = new AudioEncoder( profile.at( constants::avProfileCodec ) ); _outputEncoder = outputAudio; AudioFrameDesc outputFrameDesc( _inputStream->getAudioCodec().getAudioFrameDesc() ); outputFrameDesc.setParameters( profile ); if( subStreamIndex > -1 ) { // @todo manage downmix ? outputFrameDesc.setChannels( 1 ); } outputAudio->setProfile( profile, outputFrameDesc ); // output stream _outputStream = &outputFile.addAudioStream( outputAudio->getAudioCodec() ); // buffers to process AudioFrameDesc inputFrameDesc( _inputStream->getAudioCodec().getAudioFrameDesc() ); if( subStreamIndex > -1 ) inputFrameDesc.setChannels( 1 ); _sourceBuffer = new AudioFrame( inputFrameDesc ); _frameBuffer = new AudioFrame( outputAudio->getAudioCodec().getAudioFrameDesc() ); // transform _transform = new AudioTransform(); // generator decoder AudioGenerator* generatorAudio = new AudioGenerator(); generatorAudio->setAudioFrameDesc( outputAudio->getAudioCodec().getAudioFrameDesc() ); _generator = generatorAudio; break; } default: { throw std::runtime_error( "unupported stream type" ); break; } } if( offset ) switchToGeneratorDecoder(); }
int main(int argc, char* argv[]) { VideoDecoder decoder; FleshDetector fleshDetector; Image* inputImage; Image* fleshImage; Image* outlineImage; Image* confidenceImage; int frameNumber = 0; string vidFilename; char outputFilename[1024]; if ( argc < 4 ) { printf("Usage: %s <classifier file> <video file> <output directory>\n", argv[0]); return 1; } if ( !fleshDetector.Load(argv[1]) ) { fprintf(stderr, "Error loading flesh detector %s\n", argv[1]); return 1; } vidFilename = argv[2]; decoder.SetFilename(vidFilename); if ( !decoder.Load() ) { fprintf(stderr, "Error loading video %s\n", argv[2]); return 1; } while ( decoder.UpdateFrame() ) { inputImage = decoder.GetFrame(); TimingAnalyzer_Start(0); if ( fleshDetector.Process(inputImage, &outlineImage, &fleshImage, &confidenceImage) ) { TimingAnalyzer_Stop(0); sprintf(outputFilename, "%s/flesh%05d.ppm", argv[3], frameNumber); fleshImage->Save(outputFilename); sprintf(outputFilename, "%s/frame%05d.ppm", argv[3], frameNumber); outlineImage->Save(outputFilename); sprintf(outputFilename, "%s/confidence%05d.ppm", argv[3], frameNumber); confidenceImage->Save(outputFilename); } frameNumber++; } printf("FleshDetector Process Time Min: %d\tMax: %d\tMean: %d\n", TimingAnalyzer_Min(0), TimingAnalyzer_Max(0), TimingAnalyzer_Mean(0)); printf("FleshDetector GetFleshImage Time Min: %d\tMax: %d\tMean: %d\n", TimingAnalyzer_Min(1), TimingAnalyzer_Max(1), TimingAnalyzer_Mean(1)); printf("FleshDetector GetOutlineImage Time Min: %d\tMax: %d\tMean: %d\n", TimingAnalyzer_Min(2), TimingAnalyzer_Max(2), TimingAnalyzer_Mean(2)); printf("FleshDetector GetFleshConfidenceImage Time Min: %d\tMax: %d\tMean: %d\n", TimingAnalyzer_Min(3), TimingAnalyzer_Max(3), TimingAnalyzer_Mean(3)); printf("FleshDetector CalcConfidence Time Min: %d\tMax: %d\tMean: %d\n", TimingAnalyzer_Min(4), TimingAnalyzer_Max(4), TimingAnalyzer_Mean(4)); return 0; }