FFPP::~FFPP() { if (PPMode) pp_free_mode(PPMode); if (PPContext) pp_free_context(PPContext); if (SWSTo422P) sws_freeContext(SWSTo422P); if (SWSFrom422P) sws_freeContext(SWSFrom422P); avpicture_free(&InputPicture); avpicture_free(&OutputPicture); }
int CTool::ConvertFormat(/*[in]*/const std::shared_ptr<CVideoFrame> &inFrame, /*[out]*/ std::shared_ptr<CVideoFrame> &outFrame, /** 转换后图像 */ /*[in]*/ int nOutWidth, /** 转换后的帧的宽度 */ /*[in]*/ int nOutHeight, /** 转换后的帧的高度 */ /*[in]*/ VideoFormat format) { int nRet = 0; if(!inFrame) { LOG_MODEL_ERROR("CTool", "frame is null"); return -1; } AVPixelFormat inFormat, outFormat; inFormat = VideoFormatToFFMpegPixFormat(inFrame->m_VideoInfo.Format); outFormat = VideoFormatToFFMpegPixFormat(format); AVPicture pic; nRet = avpicture_fill(&pic, (uint8_t*)inFrame->GetData(), inFormat, inFrame->m_VideoInfo.nWidth, inFrame->m_VideoInfo.nHeight); if(nRet < 0) { LOG_MODEL_ERROR("Tool", "avpicture_fill fail:%x", nRet); return nRet; } AVPicture outPic; nRet = ConvertFormat(pic, inFrame->m_VideoInfo.nWidth, inFrame->m_VideoInfo.nHeight, inFormat, outPic, nOutWidth, nOutHeight, outFormat); if(nRet) return nRet; int nLen = avpicture_get_size(outFormat, nOutWidth, nOutHeight); VideoInfo vi; vi.Format = format; vi.nHeight = nOutHeight; vi.nWidth = nOutWidth; vi.nRatio = inFrame->m_VideoInfo.nRatio; std::shared_ptr<CVideoFrame> frame( new CVideoFrame(outPic.data[0], nLen, vi, inFrame->m_Timestamp)); outFrame = frame; avpicture_free(&pic); avpicture_free(&outPic); return nRet; }
//-------------------------------------------------------------------- ofUCUtils::~ofUCUtils(){ unicap_stop_capture(handle); if (buffer.data != NULL) { free(buffer.data); } if (src != NULL){ avpicture_free(src); delete src; } if (dst != NULL){ avpicture_free(dst); delete dst; } }
LibavStreamer::~LibavStreamer() { if (codec_context_) avcodec_close(codec_context_); if (frame_) { #if (LIBAVCODEC_VERSION_MAJOR < 54) av_free(frame_); frame_ = NULL; #else avcodec_free_frame(&frame_); #endif } if (format_context_) avformat_free_context(format_context_); if (picture_) { avpicture_free(picture_); delete picture_; picture_ = NULL; } if (tmp_picture_) { delete tmp_picture_; tmp_picture_ = NULL; } if (sws_context_) sws_freeContext(sws_context_); }
void video_decoder_destroy(video_decoder_t *vd) { sws_freeContext(vd->vd_sws); avpicture_free(&vd->vd_convert); free(vd); }
//---------------------------------------------------------------------------------------------------- EEVideo::~EEVideo() { for (auto& packet : m_packets) { avpicture_free((AVPicture*)packet); av_free_packet(packet); delete packet; packet = nullptr; } m_packets.clear(); if (m_frameRGBA) { av_frame_free(&m_frameRGBA); } if (m_codecContext) { avcodec_close(m_codecContext); } if (m_formatContext) { avformat_close_input(&m_formatContext); } }
int FFmpegEncoder::convertPixFmt(const uint8_t *src, int srclen, int srcw, int srch, PixelFormat srcfmt, uint8_t *dst, int dstlen, int dstw, int dsth, PixelFormat dstfmt) { LOGI("[FFmpegEncoder::%s] begin", __FUNCTION__); if (!src || !dst) { LOGE("[FFmpegEncoder::%s] src or dst is NULL", __FUNCTION__); return -1; } // src input frame AVPicture srcPic; FFmpegVideoParam srcParam(srcw, srch, srcfmt, 0, 0, ""); if(avpicture_fill(&srcPic, (uint8_t *)src, srcParam.pixelFormat, srcParam.width, srcParam.height) == -1) { LOGE("[FFmpegEncoder::%s] fail to avpicture_fill for src picture", __FUNCTION__); return -1; } // dst output frame AVPicture dstPic; FFmpegVideoParam dstParam(dstw, dsth, dstfmt, 0, 0, ""); if(avpicture_alloc(&dstPic, dstParam.pixelFormat, dstParam.width, dstParam.height) == -1) { LOGE("[FFmpegEncoder::%s] fail to avpicture_alloc for dst picture", __FUNCTION__); return -1; } int ret = -1; if (convertPixFmt(&srcPic, &dstPic, &srcParam, &dstParam) < 0) { LOGE("[FFmpegEncoder::%s] fail to convertPixFmt", __FUNCTION__); }else { ret = avpicture_layout(&dstPic, dstParam.pixelFormat, dstParam.width, dstParam.height, dst, dstlen); } avpicture_free(&dstPic); return ret; }
void ofUCUtils::close_unicap() { if(!deviceReady) return; unicap_stop_capture(handle); bUCFrameNew=false; if( src_pix_fmt != PIX_FMT_RGB24 ){ if ( dst != NULL ){ avpicture_free(dst); delete dst; } if ( pixels != NULL ) { delete[] pixels; } if ( src != NULL ){ //avpicture_free(src); delete src; } } deviceReady=false; }
AVCodecEncoder::~AVCodecEncoder() { TRACE("AVCodecEncoder::~AVCodecEncoder()\n"); _CloseCodecIfNeeded(); if (fSwsContext != NULL) sws_freeContext(fSwsContext); av_fifo_free(fAudioFifo); avpicture_free(&fDstFrame); // NOTE: Do not use avpicture_free() on fSrcFrame!! We fill the picture // data on the fly with the media buffer data passed to Encode(). if (fFrame != NULL) { fFrame->data[0] = NULL; fFrame->data[1] = NULL; fFrame->data[2] = NULL; fFrame->data[3] = NULL; fFrame->linesize[0] = 0; fFrame->linesize[1] = 0; fFrame->linesize[2] = 0; fFrame->linesize[3] = 0; free(fFrame); } free(fOwnContext); delete[] fChunkBuffer; }
void FFmpegVideoDecoder::close() { // Free the YUV frame if ( m_pFrame ) av_free( m_pFrame ); // Free the RGB frame if ( m_pFrameRGB ) { avpicture_free( m_pFrameRGB ); av_free( m_pFrameRGB ); } // Close the codec if ( m_pCodecCtx ) avcodec_close( m_pCodecCtx ); // Close the video file if ( m_pFormatCtx ) avformat_close_input( &m_pFormatCtx ); m_pFormatCtx = 0; m_pCodecCtx = 0; m_pCodec = 0; m_pFrame = 0; m_pFrameRGB = 0; }
void MediaRecorder::Stop() { if(oc) { if(in_audio_buf2) AddFrame((u16 *)0); av_write_trailer(oc); avformat_free_context(oc); oc = NULL; } if(audio_buf) { free(audio_buf); audio_buf = NULL; } if(video_buf) { free(video_buf); video_buf = NULL; } if(audio_buf2) { free(audio_buf2); audio_buf2 = NULL; } if(convpic) { avpicture_free((AVPicture *)convpic); av_free(convpic); convpic = NULL; } if(converter) { sws_freeContext(converter); converter = NULL; } }
int InputSource::save_pic(MSPicture* pic) { ost::MutexLock al(cs_); if (idle_) return 0; if (want_width_ == 0 || want_height_ == 0 || pic->w == 0 || pic->h == 0) return 0; valid_ = true; if (want_width_ != width_ || want_height_ != height_ || pic->w != last_width_ || pic->h != last_height_) { // 需要重置 sws if (sws_) sws_freeContext(sws_); avpicture_free(&pic_); avpicture_alloc(&pic_, PIX_FMT_YUV420P, want_width_, want_height_); sws_ = sws_getContext(pic->w, pic->h, PIX_FMT_YUV420P, want_width_, want_height_, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, 0, 0, 0); last_width_ = pic->w; last_height_ = pic->h; width_ = want_width_; height_ = want_height_; } x_ = want_x_, y_ = want_y_; alpha_ = want_alpha_; sws_scale(sws_, pic->planes, pic->strides, 0, pic->h, pic_.data, pic_.linesize); return 0; }
MediaThread::Picture::~Picture() { if (sws_) { sws_freeContext(sws_); } delete image_; avpicture_free(&pic_); }
void free_Picture(void* data) { if (data) { Picture* pic = (Picture*) data; avpicture_free(&pic->picture); av_free(data); } }
InputSource::~InputSource(void) { if (sws_) { sws_freeContext(sws_); sws_ = 0; } avpicture_free(&pic_); }
void VideoLayer::free_picture(AVPicture *picture) { if(picture != NULL) { if (picture->data[0]) avpicture_free(picture); free(picture); } if (audio_float_buf) free (audio_float_buf); if (audio_resampled_buf) free (audio_resampled_buf); }
FFMS_VideoSource::~FFMS_VideoSource() { if (SWS) sws_freeContext(SWS); avpicture_free(&SWSFrame); av_freep(&DecodeFrame); av_freep(&LastDecodedFrame); Index.Release(); }
FFMS_VideoSource::~FFMS_VideoSource() { #ifdef FFMS_USE_POSTPROC if (PPMode) pp_free_mode(PPMode); if (PPContext) pp_free_context(PPContext); avpicture_free(&PPFrame); #endif // FFMS_USE_POSTPROC if (SWS) sws_freeContext(SWS); avpicture_free(&SWSFrame); av_freep(&DecodeFrame); Index.Release(); }
static void FreeSchroFrame(SchroFrame *frame, void *priv) { AVPicture *p_pic = priv; if (!p_pic) return; avpicture_free(p_pic); av_freep(&p_pic); }
static av_cold int qtrle_encode_end(AVCodecContext *avctx) { QtrleEncContext *s = avctx->priv_data; avpicture_free(&s->previous_frame); av_free(s->rlecode_table); av_free(s->length_table); av_free(s->skip_table); return 0; }
int vs_show (void *ctx, const uint8_t *const data[4], int stride[4]) { // 首选检查 sws 是否有效, 根据当前窗口大小决定 Ctx *c = (Ctx*)ctx; Window root; int x, y; unsigned int cx, cy, border, depth; XGetGeometry(c->display, c->window, &root, &x, &y, &cx, &cy, &border, &depth); if (cx != c->curr_width || cy != c->curr_height) { avpicture_free(&c->pic_target); sws_freeContext(c->sws); c->sws = sws_getContext(c->v_width, c->v_height, PIX_FMT_YUV420P, cx, cy, c->target_pixfmt, SWS_FAST_BILINEAR, 0, 0, 0); avpicture_alloc(&c->pic_target, c->target_pixfmt, cx, cy); c->curr_width = cx; c->curr_height = cy; // re create image XShmDetach(c->display, &c->segment); shmdt(c->segment.shmaddr); shmctl(c->segment.shmid, IPC_RMID, 0); XDestroyImage(c->image); c->image = XShmCreateImage(c->display, c->vinfo.visual, depth, ZPixmap, 0, &c->segment, cx, cy); c->segment.shmid = shmget(IPC_PRIVATE, c->image->bytes_per_line * c->image->height, IPC_CREAT | 0777); c->segment.shmaddr = (char*)shmat(c->segment.shmid, 0, 0); c->image->data = c->segment.shmaddr; c->segment.readOnly = 0; XShmAttach(c->display, &c->segment); } // sws_scale(c->sws, data, stride, 0, c->v_height, c->pic_target.data, c->pic_target.linesize); // cp to image unsigned char *p = c->pic_target.data[0], *q = (unsigned char*)c->image->data; int xx = MIN(c->image->bytes_per_line, c->pic_target.linesize[0]); for (int i = 0; i < c->curr_height; i++) { memcpy(q, p, xx); p += c->image->bytes_per_line; q += c->pic_target.linesize[0]; } // 显示到 X 上 XShmPutImage(c->display, c->window, c->gc, c->image, 0, 0, 0, 0, c->curr_width, c->curr_height, 1); return 1; }
void IoAVCodec_freeContextIfNeeded(IoAVCodec *self) { //printf("IoAVCodec_freeContextIfNeeded\n"); DATA(self)->audioContext = NULL; DATA(self)->videoContext = NULL; if (DATA(self)->audioContext) { //avcodec_close(DATA(self)->audioContext); //av_free(DATA(self)->audioContext); DATA(self)->audioContext = NULL; } if (DATA(self)->videoContext) { //avcodec_close(DATA(self)->audioContext); //av_free(DATA(self)->audioContext); DATA(self)->audioContext = NULL; } if (DATA(self)->formatContext) { av_close_input_file(DATA(self)->formatContext); //av_free(DATA(self)->formatContext); DATA(self)->formatContext = NULL; } if(DATA(self)->packet) { //free(DATA(self)->packet); DATA(self)->packet = NULL; } if (DATA(self)->audioOutBuffer) { //free(DATA(self)->audioOutBuffer); DATA(self)->audioOutBuffer = NULL; } if(DATA(self)->decodedFrame) { //av_free(DATA(self)->decodedFrame); DATA(self)->decodedFrame = NULL; } if(DATA(self)->rgbPicture) { avpicture_free(DATA(self)->rgbPicture); //free(DATA(self)->rgbPicture); DATA(self)->rgbPicture = NULL; } //printf("IoAVCodec_freeContextIfNeeded done\n"); }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; pFrame = avcodec_alloc_frame(); is->rgbaFrame = avcodec_alloc_frame(); avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height); for(;;) { if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } pts = 0; // Save global pts to be stored in pFrame global_video_pkt_pts = packet->pts; // Decode video frame len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); if(packet->dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { pts = (double)(*(uint64_t *)pFrame->opaque); } else if(packet->dts != AV_NOPTS_VALUE) { pts = (double)packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if(frameFinished) { pts = synchronize_video(is, pFrame, pts); if(queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } SDL_CloseAudio(); av_free(pFrame); avpicture_free((AVPicture *)is->rgbaFrame); av_free(is->rgbaFrame); return 0; }
// Free object void video_frame_free(void * opaque) { VideoFrameInternal * internal = (VideoFrameInternal *)opaque; if (internal) { if (internal->picture) { if (internal->owner) avpicture_free(internal->picture); av_free(internal->picture); } av_free(internal); } }
void H264Encoder::deinit() { avpicture_free(&mAvOutputPic); sws_freeContext(mSwsCt); avcodec_close(mEncoderContext); av_free(mEncoderContext); av_free(mEncoderFrame); mEncoderContext = NULL; mEncoderFrame = NULL; mEncoderCodec = NULL; mInit = false; }
static av_cold int libschroedinger_decode_close(AVCodecContext *avccontext) { FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; /* Free the decoder. */ schro_decoder_free(p_schro_params->decoder); av_freep(&p_schro_params->format); avpicture_free(&p_schro_params->dec_pic); /* Free data in the output frame queue. */ ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue, libschroedinger_decode_frame_free); return 0; }
void video_decode() { if(-1 == build_avpkt(&avpkt)) { usleep(10000); return; } if(avpkt.size == 0) return; while(avpkt.size > 0) { len = avcodec_decode_video2(c,picture, &got_picture, &avpkt);//解码每一帧 IplImage *showImage = cvCreateImage(cvSize(picture->width, picture->height), 8, 3); avpicture_alloc((AVPicture *)&frameRGB, PIX_FMT_BGR24, picture->width, picture->height); if(len < 0) { printf("Error while decoding frame %d\n",frame); break; } if(got_picture) { /* thepicture is allocated by the decoder. no need to free it */ //将YUV420格式的图像转换成RGB格式所需要的转换上下文 struct SwsContext * scxt = (struct SwsContext *)sws_getContext(picture->width, picture->height, PIX_FMT_YUV420P, picture->width, picture->height, PIX_FMT_BGR24, 2,NULL,NULL,NULL); if(scxt != NULL) { sws_scale(scxt, picture->data, picture->linesize, 0, c->height, frameRGB.data, frameRGB.linesize);//图像格式转换 showImage->imageSize = frameRGB.linesize[0];//指针赋值给要显示的图像 showImage->imageData = (char *)frameRGB.data[0]; cvShowImage("decode", showImage);//显示 cvWaitKey(30);//设置显示一帧,如果不设置由于这是个循环,会导致看不到显示出来的图像 } avpicture_free((AVPicture *)&frameRGB); cvReleaseImage(&showImage); sws_freeContext(scxt); frame++; } avpkt.size -= len; avpkt.data += len; } }
static void rtp_output_context_free(RtpOutputContext *rtpContext) { if (rtpContext->urlContext != NULL) { url_close(rtpContext->urlContext); } if (rtpContext->avContext != NULL) { free_av_format_context(rtpContext->avContext); } if (rtpContext->tempFrame != NULL) { avpicture_free((AVPicture *)rtpContext->tempFrame); av_free(rtpContext->tempFrame); } if (rtpContext->imgConvert != NULL) { sws_freeContext(rtpContext->imgConvert); } av_free(rtpContext); }
//static void VideoPlayer::doUpdatePicture(void *args) { VideoPlayer *player = static_cast<VideoPlayer*>(args); if (! player->mStop && ! player->mPause) { pthread_mutex_lock(&(player->mRenderMutex)); if (player->mPicture) { avpicture_free(player->mPicture); delete player->mPicture; } player->mPictureRingBuffer.dequeue((DataType **)&player->mPicture); pthread_mutex_unlock(&(player->mRenderMutex)); } }
void FFMS_VideoSource::ReAdjustOutputFormat() { if (SWS) { sws_freeContext(SWS); SWS = NULL; } DetectInputFormat(); OutputFormat = FindBestPixelFormat(TargetPixelFormats, InputFormat); if (OutputFormat == PIX_FMT_NONE) { ResetOutputFormat(); throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT, "No suitable output format found"); } OutputColorRange = handle_jpeg(&OutputFormat); if (OutputColorRange == AVCOL_RANGE_UNSPECIFIED) OutputColorRange = CodecContext->color_range; if (OutputColorRange == AVCOL_RANGE_UNSPECIFIED) OutputColorRange = InputColorRange; OutputColorSpace = CodecContext->colorspace; if (OutputColorSpace == AVCOL_SPC_UNSPECIFIED) OutputColorSpace = InputColorSpace; if (InputFormat != OutputFormat || TargetWidth != CodecContext->width || TargetHeight != CodecContext->height || InputColorSpace != OutputColorSpace || InputColorRange != OutputColorRange) { SWS = GetSwsContext( CodecContext->width, CodecContext->height, InputFormat, InputColorSpace, InputColorRange, TargetWidth, TargetHeight, OutputFormat, OutputColorSpace, OutputColorRange, TargetResizer); if (!SWS) { ResetOutputFormat(); throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT, "Failed to allocate SWScale context"); } } avpicture_free(&SWSFrame); avpicture_alloc(&SWSFrame, OutputFormat, TargetWidth, TargetHeight); }