Пример #1
0
static int
put_image (struct vf_instance *vf, mp_image_t *mpi, double pts)
{
  struct vf_priv_s *priv = vf->priv;
  mp_image_t* dmpi;
  AVPicture pic;
  AVPicture lavc_picture;

  lavc_picture.data[0]     = mpi->planes[0];
  lavc_picture.data[1]     = mpi->planes[1];
  lavc_picture.data[2]     = mpi->planes[2];
  lavc_picture.linesize[0] = mpi->stride[0];
  lavc_picture.linesize[1] = mpi->stride[1];
  lavc_picture.linesize[2] = mpi->stride[2];

  dmpi = vf_get_image(vf->next, mpi->imgfmt,
		      MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
		      priv->width, priv->height);

  pic.data[0]     = dmpi->planes[0];
  pic.data[1]     = dmpi->planes[1];
  pic.data[2]     = dmpi->planes[2];
  pic.linesize[0] = dmpi->stride[0];
  pic.linesize[1] = dmpi->stride[1];
  pic.linesize[2] = dmpi->stride[2];

  if (avpicture_deinterlace(&pic, &lavc_picture,
			    priv->pix_fmt, priv->width, priv->height) < 0)
    {
      /* This should not happen -- see config() */
      return 0;
    }

  return vf_next_put_image(vf, dmpi, pts);
}
Пример #2
0
/** ffmpeg_deinterlace
 *      Make the image suitable for deinterlacing using ffmpeg, then deinterlace the picture.
 * 
 * Parameters
 *      img     image in YUV420P format
 *      width   image width in pixels
 *      height  image height in pixels
 *
 * Returns
 *      Function returns nothing.
 *      img     contains deinterlaced image
 */
void ffmpeg_deinterlace(unsigned char *img, int width, int height)
{
    AVFrame *picture;
    int width2 = width / 2;
    
    picture = avcodec_alloc_frame();
    if (!picture) {
        motion_log(LOG_ERR, 1, "Could not alloc frame");
        return;
    }
    
    picture->data[0] = img;
    picture->data[1] = img+width*height;
    picture->data[2] = picture->data[1]+(width*height)/4;
    picture->linesize[0] = width;
    picture->linesize[1] = width2;
    picture->linesize[2] = width2;
    
    /* We assume using 'PIX_FMT_YUV420P' always */
    avpicture_deinterlace((AVPicture *)picture, (AVPicture *)picture, PIX_FMT_YUV420P, width, height);
    
    av_free(picture);
    
    return;
}
void LiveStreamFrameFormatter::deinterlaceFrame(AVFrame* avFrame)
{
    int ret = avpicture_deinterlace((AVPicture*)avFrame, (AVPicture*)avFrame,
                                    m_stream->codec->pix_fmt, m_stream->codec->width, m_stream->codec->height);
    if (ret < 0)
        qDebug("deinterlacing failed");
}
static GstFlowReturn
gst_ffmpegdeinterlace_chain (GstPad * pad, GstBuffer * inbuf)
{
  GstFFMpegDeinterlace *deinterlace =
      GST_FFMPEGDEINTERLACE (gst_pad_get_parent (pad));
  GstBuffer *outbuf = NULL;
  GstFlowReturn result;

  result =
      gst_pad_alloc_buffer (deinterlace->srcpad, GST_BUFFER_OFFSET_NONE,
      deinterlace->to_size, GST_PAD_CAPS (deinterlace->srcpad), &outbuf);
  if (result == GST_FLOW_OK) {
    gst_ffmpeg_avpicture_fill (&deinterlace->from_frame,
        GST_BUFFER_DATA (inbuf), deinterlace->pixfmt, deinterlace->width,
        deinterlace->height);

    gst_ffmpeg_avpicture_fill (&deinterlace->to_frame, GST_BUFFER_DATA (outbuf),
        deinterlace->pixfmt, deinterlace->width, deinterlace->height);

    avpicture_deinterlace (&deinterlace->to_frame, &deinterlace->from_frame,
        deinterlace->pixfmt, deinterlace->width, deinterlace->height);

    gst_buffer_copy_metadata (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS);

    result = gst_pad_push (deinterlace->srcpad, outbuf);
  }

  gst_buffer_unref (inbuf);

  return result;
}
Пример #5
0
static GstFlowReturn
gst_ffmpegdeinterlace_chain (GstPad * pad, GstObject * parent,
    GstBuffer * inbuf)
{
  GstFFMpegDeinterlace *deinterlace = GST_FFMPEGDEINTERLACE (parent);
  GstBuffer *outbuf = NULL;
  GstFlowReturn result;
  GstMapInfo from_map, to_map;

  GST_OBJECT_LOCK (deinterlace);
  if (deinterlace->reconfigure) {
    if (deinterlace->new_mode != -1)
      deinterlace->mode = deinterlace->new_mode;
    deinterlace->new_mode = -1;

    deinterlace->reconfigure = FALSE;
    GST_OBJECT_UNLOCK (deinterlace);
    if (gst_pad_has_current_caps (deinterlace->srcpad)) {
      GstCaps *caps;

      caps = gst_pad_get_current_caps (deinterlace->sinkpad);
      gst_ffmpegdeinterlace_sink_setcaps (deinterlace->sinkpad, caps);
      gst_caps_unref (caps);
    }
  } else {
    GST_OBJECT_UNLOCK (deinterlace);
  }

  if (deinterlace->passthrough)
    return gst_pad_push (deinterlace->srcpad, inbuf);

  outbuf = gst_buffer_new_and_alloc (deinterlace->to_size);

  gst_buffer_map (inbuf, &from_map, GST_MAP_READ);
  gst_ffmpeg_avpicture_fill (&deinterlace->from_frame, from_map.data,
      deinterlace->pixfmt, deinterlace->width, deinterlace->height);

  gst_buffer_map (outbuf, &to_map, GST_MAP_WRITE);
  gst_ffmpeg_avpicture_fill (&deinterlace->to_frame, to_map.data,
      deinterlace->pixfmt, deinterlace->width, deinterlace->height);

  avpicture_deinterlace (&deinterlace->to_frame, &deinterlace->from_frame,
      deinterlace->pixfmt, deinterlace->width, deinterlace->height);
  gst_buffer_unmap (outbuf, &to_map);
  gst_buffer_unmap (inbuf, &from_map);

  gst_buffer_copy_into (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS, 0, -1);

  result = gst_pad_push (deinterlace->srcpad, outbuf);

  gst_buffer_unref (inbuf);

  return result;
}
Пример #6
0
// ---------------------------------------------------------------------------------
//Preprocess frame.
// Only does deinterlacing for now
static void Preprocess_Frame(PyCodecObject* cObj, AVPicture *picture, void **bufp)
{
	AVCodecContext *dec;
	AVPicture *picture2;
	AVPicture picture_tmp;
	uint8_t *buf = 0;

	dec = cObj->cCodec;
	/* deinterlace : must be done before any resize */
	if ((cObj->iVcodecFlags & VCODEC_DEINTERLACE_FL) ||
			(cObj->iVcodecFlags & VCODEC_POSTPROC_FL)) {
		int size;
		/* create temporary picture */
		size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
		buf = (uint8_t*)av_malloc(size);
		if (!buf)
			return;

		picture2 = &picture_tmp;
		avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);

		if (cObj->iVcodecFlags & VCODEC_DEINTERLACE_FL) {
			if(avpicture_deinterlace(picture2,
						picture,
						dec->pix_fmt,
						dec->width,
						dec->height) < 0) {
				/* if error, do not deinterlace */
				av_free(buf);
				buf = NULL;
				picture2 = picture;
			}
		} else {
			if (img_convert(picture2, dec->pix_fmt,
					picture, dec->pix_fmt,
					dec->width,
					dec->height) < 0) {
				/* if error, do not copy */
				av_free(buf);
				buf = NULL;
				picture2 = picture;
			}
		}
	} else {
		picture2 = picture;
	}

	//frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);

	if (picture != picture2)
		*picture = *picture2;
	*bufp = buf;
}
Пример #7
0
static GstFlowReturn
gst_ffmpegdeinterlace_chain (GstPad * pad, GstBuffer * inbuf)
{
  GstFFMpegDeinterlace *deinterlace =
      GST_FFMPEGDEINTERLACE (gst_pad_get_parent (pad));
  GstBuffer *outbuf = NULL;
  GstFlowReturn result;

  GST_OBJECT_LOCK (deinterlace);
  if (deinterlace->reconfigure) {
    if (deinterlace->new_mode != -1)
      deinterlace->mode = deinterlace->new_mode;
    deinterlace->new_mode = -1;

    deinterlace->reconfigure = FALSE;
    GST_OBJECT_UNLOCK (deinterlace);
    if (GST_PAD_CAPS (deinterlace->srcpad))
      gst_ffmpegdeinterlace_sink_setcaps (deinterlace->sinkpad,
          GST_PAD_CAPS (deinterlace->sinkpad));
  } else {
    GST_OBJECT_UNLOCK (deinterlace);
  }

  if (deinterlace->passthrough)
    return gst_pad_push (deinterlace->srcpad, inbuf);

  result =
      gst_pad_alloc_buffer (deinterlace->srcpad, GST_BUFFER_OFFSET_NONE,
      deinterlace->to_size, GST_PAD_CAPS (deinterlace->srcpad), &outbuf);
  if (result == GST_FLOW_OK) {
    gst_ffmpeg_avpicture_fill (&deinterlace->from_frame,
        GST_BUFFER_DATA (inbuf), deinterlace->pixfmt, deinterlace->width,
        deinterlace->height);

    gst_ffmpeg_avpicture_fill (&deinterlace->to_frame, GST_BUFFER_DATA (outbuf),
        deinterlace->pixfmt, deinterlace->width, deinterlace->height);

    avpicture_deinterlace (&deinterlace->to_frame, &deinterlace->from_frame,
        deinterlace->pixfmt, deinterlace->width, deinterlace->height);

    gst_buffer_copy_metadata (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS);

    result = gst_pad_push (deinterlace->srcpad, outbuf);
  }

  gst_buffer_unref (inbuf);

  return result;
}
Пример #8
0
void DecodeVideo::_Deinterlace(AVFrame * inFrame, AVFrame * outFrame)
{
    bool deinterlaced = false;

    if (avpicture_deinterlace((AVPicture *)outFrame, (AVPicture *)inFrame,
                              _codecCtx->pix_fmt,
                              _codecCtx->width, _codecCtx->height) >= 0) {
        deinterlaced = true;
    }

    if (!deinterlaced) {
        av_picture_copy((AVPicture *)outFrame, (AVPicture *)inFrame,
                        _codecCtx->pix_fmt, _codecCtx->width, _codecCtx->height);
    }
}
Пример #9
0
void MovieDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, VideoFrame& videoFrame)
{
    if (m_pFrame->interlaced_frame)
    {
        avpicture_deinterlace((AVPicture*) m_pFrame, (AVPicture*) m_pFrame, m_pVideoCodecContext->pix_fmt,
                              m_pVideoCodecContext->width, m_pVideoCodecContext->height);
    }

    int scaledWidth, scaledHeight;
    convertAndScaleFrame(PIX_FMT_RGB24, scaledSize, maintainAspectRatio, scaledWidth, scaledHeight);

    videoFrame.width = scaledWidth;
    videoFrame.height = scaledHeight;
    videoFrame.lineSize = m_pFrame->linesize[0];

    videoFrame.frameData.clear();
    videoFrame.frameData.resize(videoFrame.lineSize * videoFrame.height);
    memcpy((&(videoFrame.frameData.front())), m_pFrame->data[0], videoFrame.lineSize * videoFrame.height);
}
Пример #10
0
/*****************************************************************************
 * Do the processing here
 *****************************************************************************/
static picture_t *Deinterlace( filter_t *p_filter, picture_t *p_pic )
{
    filter_sys_t *p_sys = p_filter->p_sys;
    AVPicture src_pic, dest_pic;
    picture_t *p_pic_dst;
    int i, i_res = -1;

    /* Request output picture */
    p_pic_dst = filter_NewPicture( p_filter );
    if( !p_pic_dst )
    {
        picture_Release( p_pic );
        return NULL;
    }

    /* Prepare the AVPictures for the conversion */
    for( i = 0; i < p_pic->i_planes; i++ )
    {
        src_pic.data[i] = p_pic->p[i].p_pixels;
        src_pic.linesize[i] = p_pic->p[i].i_pitch;
    }
    for( i = 0; i < p_pic_dst->i_planes; i++ )
    {
        dest_pic.data[i] = p_pic_dst->p[i].p_pixels;
        dest_pic.linesize[i] = p_pic_dst->p[i].i_pitch;
    }

    i_res = avpicture_deinterlace( &dest_pic, &src_pic, p_sys->i_src_ffmpeg_chroma,
                                   p_filter->fmt_in.video.i_width,
                                   p_filter->fmt_in.video.i_height );
    if( i_res == -1 )
    {
        msg_Err( p_filter, "deinterlacing picture failed" );
        filter_DeletePicture( p_filter, p_pic_dst );
        picture_Release( p_pic );
        return NULL;
    }

    picture_CopyProperties( p_pic_dst, p_pic );
    p_pic_dst->b_progressive = true;
    picture_Release( p_pic );
    return p_pic_dst;
}
Пример #11
0
void MovieDecoder::getScaledVideoFrame(VideoFrame& videoFrame)
{
    if (pFrame_->interlaced_frame)
    {
        avpicture_deinterlace((AVPicture*) pFrame_, (AVPicture*) pFrame_, pVideoCodecContext_->pix_fmt,
                              pVideoCodecContext_->width, pVideoCodecContext_->height);
    }

    int scaledWidth, scaledHeight;
    convertAndScaleFrame(PIX_FMT_RGB24, scaledWidth, scaledHeight);

    videoFrame.width = scaledWidth;
    videoFrame.height = scaledHeight;
    videoFrame.lineSize = pFrame_->linesize[0];

    videoFrame.frameData.clear();
    videoFrame.frameData.resize(videoFrame.lineSize * videoFrame.height);
    memcpy((&(videoFrame.frameData.front())), pFrame_->data[0], videoFrame.lineSize * videoFrame.height);
}
Пример #12
0
/**
 * ffmpeg_deinterlace
 *      Make the image suitable for deinterlacing using ffmpeg, then deinterlace the picture.
 *
 * Parameters
 *      img     image in YUV420P format
 *      width   image width in pixels
 *      height  image height in pixels
 *
 * Returns
 *      Function returns nothing.
 *      img     contains deinterlaced image
 */
void ffmpeg_deinterlace(unsigned char *img, int width, int height)
{
    AVPicture picture;
    int width2 = width / 2;

    picture.data[0] = img;
    picture.data[1] = img + width * height;
    picture.data[2] = picture.data[1] + (width * height) / 4;
    picture.linesize[0] = width;
    picture.linesize[1] = width2;
    picture.linesize[2] = width2;

    /* We assume using 'PIX_FMT_YUV420P' always */
    avpicture_deinterlace(&picture, &picture, PIX_FMT_YUV420P, width, height);

#ifndef __SSE_MATH__
    __asm__ __volatile__ ( "emms");
#endif

    return;
}
Пример #13
0
void MovieDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, VideoFrame& videoFrame)
{
	
    if (m_pFrame->interlaced_frame)
    {
        avpicture_deinterlace((AVPicture*) m_pFrame, (AVPicture*) m_pFrame, m_pVideoCodecContext->pix_fmt,
                              GetVideoWidth(), GetVideoHeigth());
    }

    int scaledWidth, scaledHeight;
    convertAndScaleFrame(PIX_FMT_RGBA, scaledSize, maintainAspectRatio, scaledWidth, scaledHeight);

    videoFrame.width = scaledWidth;
    videoFrame.height = scaledHeight;
    videoFrame.lineSize = m_pFrame->linesize[0];

	if(videoFrame.frameData != nullptr)
		delete videoFrame.frameData;

	videoFrame.frameData = new uint8_t[videoFrame.lineSize * videoFrame.height];
    memcpy(videoFrame.frameData, m_pFrame->data[0], videoFrame.lineSize * videoFrame.height);

}
Пример #14
0
void VideoLayer::deinterlace(AVPicture *picture) {
  int size;
  AVPicture *picture2;
  AVPicture picture_tmp;
  
  /* create temporary picture */
  size = avpicture_get_size(video_codec_ctx->pix_fmt,
			    video_codec_ctx->width,
			    video_codec_ctx->height);
  
  /* allocate only first time */
  if(deinterlace_buffer==NULL)
    deinterlace_buffer = (uint8_t *)av_malloc(size);
  if (!deinterlace_buffer)
    return ;
  
  picture2 = &picture_tmp;
  avpicture_fill(picture2, deinterlace_buffer,
		 video_codec_ctx->pix_fmt,
		 video_codec_ctx->width,
		 video_codec_ctx->height);
  
  if(avpicture_deinterlace(picture2, picture,
			   video_codec_ctx->pix_fmt,
			   video_codec_ctx->width,
			   video_codec_ctx->height) < 0) {
    /* if error, do not deinterlace */
    //	av_free(deinterlace_buffer);
    //	deinterlace_buffer = NULL;
    picture2 = picture;
  }
  if (picture != picture2)
    *picture = *picture2;
  //    av_free(deinterlace_buffer);
  return;
}
Пример #15
0
/*! \brief This function applies deinterlacing (only if needed) and color
	conversion to the video frame in fRawDecodedPicture.

	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
	converted yet (otherwise this function behaves in unknown manners).

	You should only call this function when you	got a new picture decoded by
	the video decoder and the fHeader variable was updated accordingly (\see
	_UpdateMediaHeaderForVideoFrame()).

	When this function finishes the postprocessed video frame will be available
	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
	will be set accordingly).
*/
void
AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
{
	int displayWidth = fHeader.u.raw_video.display_line_width;
	int displayHeight = fHeader.u.raw_video.display_line_count;
	AVPicture deinterlacedPicture;
	bool useDeinterlacedPicture = false;

	if (fRawDecodedPicture->interlaced_frame) {
		AVPicture rawPicture;
		rawPicture.data[0] = fRawDecodedPicture->data[0];
		rawPicture.data[1] = fRawDecodedPicture->data[1];
		rawPicture.data[2] = fRawDecodedPicture->data[2];
		rawPicture.data[3] = fRawDecodedPicture->data[3];
		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];

		avpicture_alloc(&deinterlacedPicture, fContext->pix_fmt, displayWidth,
			displayHeight);

		if (avpicture_deinterlace(&deinterlacedPicture, &rawPicture,
				fContext->pix_fmt, displayWidth, displayHeight) < 0) {
			TRACE("[v] avpicture_deinterlace() - error\n");
		} else
			useDeinterlacedPicture = true;
	}

	// Some decoders do not set pix_fmt until they have decoded 1 frame
#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	if (fSwsContext == NULL) {
		fSwsContext = sws_getContext(displayWidth, displayHeight,
			fContext->pix_fmt, displayWidth, displayHeight,
			colorspace_to_pixfmt(fOutputColorSpace),
			SWS_FAST_BILINEAR, NULL, NULL, NULL);
	}
#else
	if (fFormatConversionFunc == NULL) {
		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
			fContext->pix_fmt, displayWidth, displayHeight);
	}
#endif

	fDecodedDataSizeInBytes = avpicture_get_size(
		colorspace_to_pixfmt(fOutputColorSpace), displayWidth, displayHeight);

	if (fDecodedData == NULL)
		fDecodedData
			= static_cast<uint8_t*>(malloc(fDecodedDataSizeInBytes));

	fPostProcessedDecodedPicture->data[0] = fDecodedData;
	fPostProcessedDecodedPicture->linesize[0]
		= fHeader.u.raw_video.bytes_per_row;

#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	if (fSwsContext != NULL) {
#else
	if (fFormatConversionFunc != NULL) {
#endif
		if (useDeinterlacedPicture) {
			AVFrame deinterlacedFrame;
			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
			deinterlacedFrame.linesize[0]
				= deinterlacedPicture.linesize[0];
			deinterlacedFrame.linesize[1]
				= deinterlacedPicture.linesize[1];
			deinterlacedFrame.linesize[2]
				= deinterlacedPicture.linesize[2];
			deinterlacedFrame.linesize[3]
				= deinterlacedPicture.linesize[3];

#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
			sws_scale(fSwsContext, deinterlacedFrame.data,
				deinterlacedFrame.linesize, 0, displayHeight,
				fPostProcessedDecodedPicture->data,
				fPostProcessedDecodedPicture->linesize);
#else
			(*fFormatConversionFunc)(&deinterlacedFrame,
				fPostProcessedDecodedPicture, displayWidth, displayHeight);
#endif
		} else {
#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
			sws_scale(fSwsContext, fRawDecodedPicture->data,
				fRawDecodedPicture->linesize, 0, displayHeight,
				fPostProcessedDecodedPicture->data,
				fPostProcessedDecodedPicture->linesize);
#else
			(*fFormatConversionFunc)(fRawDecodedPicture,
				fPostProcessedDecodedPicture, displayWidth, displayHeight);
#endif
		}
	}

	if (fRawDecodedPicture->interlaced_frame)
		avpicture_free(&deinterlacedPicture);
}
Пример #16
0
bool ImageConverterFF::convert(const quint8 *const srcSlice[], const int srcStride[])
{
    DPTR_D(ImageConverterFF);
    //Check out dimension. equals to in dimension if not setted. TODO: move to another common func
    if (d.w_out == 0 || d.h_out == 0) {
        if (d.w_in == 0 || d.h_in == 0)
            return false;
        setOutSize(d.w_in, d.h_in);
    }
//TODO: move those code to prepare()
    d.sws_ctx = sws_getCachedContext(d.sws_ctx
            , d.w_in, d.h_in, (AVPixelFormat)d.fmt_in
            , d.w_out, d.h_out, (AVPixelFormat)d.fmt_out
            , (d.w_in == d.w_out && d.h_in == d.h_out) ? SWS_POINT : SWS_FAST_BILINEAR //SWS_BICUBIC
            , NULL, NULL, NULL
            );
    //int64_t flags = SWS_CPU_CAPS_SSE2 | SWS_CPU_CAPS_MMX | SWS_CPU_CAPS_MMX2;
    //av_opt_set_int(d.sws_ctx, "sws_flags", flags, 0);
    if (!d.sws_ctx)
        return false;
    setupColorspaceDetails();
#if PREPAREDATA_NO_PICTURE //for YUV420 <=> RGB
#if 0
    struct
    {
        uint8_t *data[4]; //AV_NUM_DATA_POINTERS
        int linesize[4];  //AV_NUM_DATA_POINTERS
    }
#else
    AVPicture
#endif
            pic_in, pic_out;

    if ((AVPixelFormat)fmt_in == PIX_FMT_YUV420P) {
        pic_in.data[0] = (uint8_t*)in;
        pic_in.data[2] = (uint8_t*)pic_in.data[0] + (w_in * h_in);
        pic_in.data[1] = (uint8_t*)pic_in.data[2] + (w_in * h_in) / 4;
        pic_in.linesize[0] = w_in;
        pic_in.linesize[1] = w_in / 2;
        pic_in.linesize[2] = w_in / 2;
        //pic_in.linesize[3] = 0; //not used
    } else {
        pic_in.data[0] = (uint8_t*)in;
        pic_in.linesize[0] = w_in * 4; //TODO: not 0
    }
    if ((AVPixelFormat)fmt_out == PIX_FMT_YUV420P) {
        pic_out.data[0] = (uint8_t*)out;
        pic_out.data[2] = (uint8_t*)pic_out.data[0] + (w_out * h_in);
        pic_out.data[1] = (uint8_t*)pic_out.data[2] + (w_out * h_in) / 4;
        //pic_out.data[3] = (uint8_t*)pic_out.data[0] - 1;
        pic_out.linesize[0] = w_out;
        pic_out.linesize[1] = w_out / 2;
        pic_out.linesize[2] = w_out / 2;
        //3 not used
    } else {
        pic_out.data[0] = (uint8_t*)out;
        pic_out.linesize[0] = w_out * 4;
    }
#endif //PREPAREDATA_NO_PICTURE
    int result_h = sws_scale(d.sws_ctx, srcSlice, srcStride, 0, d.h_in, d.picture.data, d.picture.linesize);
    if (result_h != d.h_out) {
        qDebug("convert failed: %d, %d", result_h, d.h_out);
        return false;
    }
#if 0
    if (isInterlaced()) {
        //deprecated
        avpicture_deinterlace(&d.picture, &d.picture, (AVPixelFormat)d.fmt_out, d.w_out, d.h_out);
    }
#endif //0
    Q_UNUSED(result_h);
    return true;
}
Пример #17
0
// position pointer in file, position in second
AVFrame *VideoFFmpeg::grabFrame(long position)
{
	AVPacket packet;
	int frameFinished;
	int posFound = 1;
	bool frameLoaded = false;
	int64_t targetTs = 0;
	CacheFrame *frame;
	int64_t dts = 0;

	if (m_cacheStarted)
	{
		// when cache is active, we must not read the file directly
		do {
			pthread_mutex_lock(&m_cacheMutex);
			frame = (CacheFrame *)m_frameCacheBase.first;
			pthread_mutex_unlock(&m_cacheMutex);
			// no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
			if (frame == NULL)
			{
				// no frame in cache, in case of file it is an abnormal situation
				if (m_isFile)
				{
					// go back to no threaded reading
					stopCache();
					break;
				}
				return NULL;
			}
			if (frame->framePosition == -1)
			{
				// this frame mark the end of the file (only used for file)
				// leave in cache to make sure we don't miss it
				m_eof = true;
				return NULL;
			}
			// for streaming, always return the next frame,
			// that's what grabFrame does in non cache mode anyway.
			if (m_isStreaming || frame->framePosition == position)
			{
				return frame->frame;
			}
			// for cam, skip old frames to keep image realtime.
			// There should be no risk of clock drift since it all happens on the same CPU
			if (frame->framePosition > position)
			{
				// this can happen after rewind if the seek didn't find the first frame
				// the frame in the buffer is ahead of time, just leave it there
				return NULL;
			}
			// this frame is not useful, release it
			pthread_mutex_lock(&m_cacheMutex);
			BLI_remlink(&m_frameCacheBase, frame);
			BLI_addtail(&m_frameCacheFree, frame);
			pthread_mutex_unlock(&m_cacheMutex);
		} while (true);
	}
	double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
	int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
	if (startTs == AV_NOPTS_VALUE)
		startTs = 0;

	// come here when there is no cache or cache has been stopped
	// locate the frame, by seeking if necessary (seeking is only possible for files)
	if (m_isFile)
	{
		// first check if the position that we are looking for is in the preseek range
		// if so, just read the frame until we get there
		if (position > m_curPosition + 1
			&& m_preseek
			&& position - (m_curPosition + 1) < m_preseek)
		{
			while (av_read_frame(m_formatCtx, &packet)>=0)
			{
				if (packet.stream_index == m_videoStream)
				{
					avcodec_decode_video2(
						m_codecCtx,
						m_frame, &frameFinished,
						&packet);
					if (frameFinished)
					{
						m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
					}
				}
				av_free_packet(&packet);
				if (position == m_curPosition+1)
					break;
			}
		}
		// if the position is not in preseek, do a direct jump
		if (position != m_curPosition + 1)
		{
			int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));

			if (pos < 0)
				pos = 0;

			pos += startTs;

			if (position <= m_curPosition || !m_eof)
			{
#if 0
				// Tried to make this work but couldn't: seeking on byte is ignored by the
				// format plugin and it will generally continue to read from last timestamp.
				// Too bad because frame seek is not always able to get the first frame
				// of the file.
				if (position <= m_preseek)
				{
					// we can safely go the beginning of the file
					if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
					{
						// binary seek does not reset the timestamp, must do it now
						av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
						m_curPosition = 0;
					}
				}
				else
#endif
				{
					// current position is now lost, guess a value.
					if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
					{
						// current position is now lost, guess a value.
						// It's not important because it will be set at this end of this function
						m_curPosition = position - m_preseek - 1;
					}
				}
			}
			// this is the timestamp of the frame we're looking for
			targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;

			posFound = 0;
			avcodec_flush_buffers(m_codecCtx);
		}
	} else if (m_isThreaded)
	{
		// cache is not started but threading is possible
		// better not read the stream => make take some time, better start caching
		if (startCache())
			return NULL;
		// Abnormal!!! could not start cache, fall back on direct read
		m_isThreaded = false;
	}

	// find the correct frame, in case of streaming and no cache, it means just
	// return the next frame. This is not quite correct, may need more work
	while (av_read_frame(m_formatCtx, &packet) >= 0)
	{
		if (packet.stream_index == m_videoStream)
		{
			AVFrame *input = m_frame;
			short counter = 0;

			/* If m_isImage, while the data is not read properly (png, tiffs, etc formats may need several pass), else don't need while loop*/
			do {
				avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &packet);
				counter++;
			} while ((input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) && counter < 10 && m_isImage);

			// remember dts to compute exact frame number
			dts = packet.dts;
			if (frameFinished && !posFound)
			{
				if (dts >= targetTs)
				{
					posFound = 1;
				}
			}

			if (frameFinished && posFound == 1)
			{
				AVFrame * input = m_frame;

				/* This means the data wasnt read properly,
				 * this check stops crashing */
				if (   input->data[0]==0 && input->data[1]==0
					&& input->data[2]==0 && input->data[3]==0)
				{
					av_free_packet(&packet);
					break;
				}

				if (m_deinterlace)
				{
					if (avpicture_deinterlace(
						(AVPicture*) m_frameDeinterlaced,
						(const AVPicture*) m_frame,
						m_codecCtx->pix_fmt,
						m_codecCtx->width,
						m_codecCtx->height) >= 0)
					{
						input = m_frameDeinterlaced;
					}
				}
				// convert to RGB24
				sws_scale(m_imgConvertCtx,
					input->data,
					input->linesize,
					0,
					m_codecCtx->height,
					m_frameRGB->data,
					m_frameRGB->linesize);
				av_free_packet(&packet);
				frameLoaded = true;
				break;
			}
		}
		av_free_packet(&packet);
	}
	m_eof = m_isFile && !frameLoaded;
	if (frameLoaded)
	{
		m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
		if (m_isThreaded)
		{
			// normal case for file: first locate, then start cache
			if (!startCache())
			{
				// Abnormal!! could not start cache, return to non-cache mode
				m_isThreaded = false;
			}
		}
		return m_frameRGB;
	}
	return NULL;
}
Пример #18
0
/*
 * This thread is used to load video frame asynchronously.
 * It provides a frame caching service.
 * The main thread is responsible for positioning the frame pointer in the
 * file correctly before calling startCache() which starts this thread.
 * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
 * memory and CPU low 2) a cache of 5 decoded frames.
 * If the main thread does not find the frame in the cache (because the video has restarted
 * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
 * function: it sends a signal to stop the cache thread and wait for confirmation), then
 * change the position in the stream and restarts the cache thread.
 */
void *VideoFFmpeg::cacheThread(void *data)
{
	VideoFFmpeg* video = (VideoFFmpeg*)data;
	// holds the frame that is being decoded
	CacheFrame *currentFrame = NULL;
	CachePacket *cachePacket;
	bool endOfFile = false;
	int frameFinished = 0;
	double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
	int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;

	if (startTs == AV_NOPTS_VALUE)
		startTs = 0;

	while (!video->m_stopThread)
	{
		// packet cache is used solely by this thread, no need to lock
		// In case the stream/file contains other stream than the one we are looking for,
		// allow a bit of cycling to get rid quickly of those frames
		frameFinished = 0;
		while (	   !endOfFile
				&& (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL
				&& frameFinished < 25)
		{
			// free packet => packet cache is not full yet, just read more
			if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0)
			{
				if (cachePacket->packet.stream_index == video->m_videoStream)
				{
					// make sure fresh memory is allocated for the packet and move it to queue
					av_dup_packet(&cachePacket->packet);
					BLI_remlink(&video->m_packetCacheFree, cachePacket);
					BLI_addtail(&video->m_packetCacheBase, cachePacket);
					break;
				} else {
					// this is not a good packet for us, just leave it on free queue
					// Note: here we could handle sound packet
					av_free_packet(&cachePacket->packet);
					frameFinished++;
				}

			} else {
				if (video->m_isFile)
					// this mark the end of the file
					endOfFile = true;
				// if we cannot read a packet, no need to continue
				break;
			}
		}
		// frame cache is also used by main thread, lock
		if (currentFrame == NULL)
		{
			// no current frame being decoded, take free one
			pthread_mutex_lock(&video->m_cacheMutex);
			if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
				BLI_remlink(&video->m_frameCacheFree, currentFrame);
			pthread_mutex_unlock(&video->m_cacheMutex);
		}
		if (currentFrame != NULL)
		{
			// this frame is out of free and busy queue, we can manipulate it without locking
			frameFinished = 0;
			while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
			{
				BLI_remlink(&video->m_packetCacheBase, cachePacket);
				// use m_frame because when caching, it is not used in main thread
				// we can't use currentFrame directly because we need to convert to RGB first
				avcodec_decode_video2(video->m_codecCtx,
					video->m_frame, &frameFinished,
					&cachePacket->packet);
				if (frameFinished)
				{
					AVFrame * input = video->m_frame;

					/* This means the data wasnt read properly, this check stops crashing */
					if (   input->data[0]!=0 || input->data[1]!=0
						|| input->data[2]!=0 || input->data[3]!=0)
					{
						if (video->m_deinterlace)
						{
							if (avpicture_deinterlace(
								(AVPicture*) video->m_frameDeinterlaced,
								(const AVPicture*) video->m_frame,
								video->m_codecCtx->pix_fmt,
								video->m_codecCtx->width,
								video->m_codecCtx->height) >= 0)
							{
								input = video->m_frameDeinterlaced;
							}
						}
						// convert to RGB24
						sws_scale(video->m_imgConvertCtx,
							input->data,
							input->linesize,
							0,
							video->m_codecCtx->height,
							currentFrame->frame->data,
							currentFrame->frame->linesize);
						// move frame to queue, this frame is necessarily the next one
						video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
						currentFrame->framePosition = video->m_curPosition;
						pthread_mutex_lock(&video->m_cacheMutex);
						BLI_addtail(&video->m_frameCacheBase, currentFrame);
						pthread_mutex_unlock(&video->m_cacheMutex);
						currentFrame = NULL;
					}
				}
				av_free_packet(&cachePacket->packet);
				BLI_addtail(&video->m_packetCacheFree, cachePacket);
			}
			if (currentFrame && endOfFile)
			{
				// no more packet and end of file => put a special frame that indicates that
				currentFrame->framePosition = -1;
				pthread_mutex_lock(&video->m_cacheMutex);
				BLI_addtail(&video->m_frameCacheBase, currentFrame);
				pthread_mutex_unlock(&video->m_cacheMutex);
				currentFrame = NULL;
				// no need to stay any longer in this thread
				break;
			}
		}
		// small sleep to avoid unnecessary looping
		PIL_sleep_ms(10);
	}
	// before quitting, put back the current frame to queue to allow freeing
	if (currentFrame)
	{
		pthread_mutex_lock(&video->m_cacheMutex);
		BLI_addtail(&video->m_frameCacheFree, currentFrame);
		pthread_mutex_unlock(&video->m_cacheMutex);
	}
	return 0;
}
Пример #19
0
static void ffmpeg_postprocess(struct anim * anim)
{
	AVFrame * input = anim->pFrame;
	ImBuf * ibuf = anim->last_frame;
	int filter_y = 0;

	ibuf->profile = IB_PROFILE_SRGB;

	/* This means the data wasnt read properly, 
	   this check stops crashing */
	if (input->data[0]==0 && input->data[1]==0 
	    && input->data[2]==0 && input->data[3]==0){
		fprintf(stderr, "ffmpeg_fetchibuf: "
			"data not read properly...\n");
		return;
	}

	if (anim->ib_flags & IB_animdeinterlace) {
		if (avpicture_deinterlace(
			    (AVPicture*) 
			    anim->pFrameDeinterlaced,
			    (const AVPicture*)
			    anim->pFrame,
			    anim->pCodecCtx->pix_fmt,
			    anim->pCodecCtx->width,
			    anim->pCodecCtx->height)
		    < 0) {
			filter_y = TRUE;
		} else {
			input = anim->pFrameDeinterlaced;
		}
	}
	
	avpicture_fill((AVPicture*) anim->pFrameRGB, 
		       (unsigned char*) ibuf->rect, 
		       PIX_FMT_RGBA, anim->x, anim->y);

	if (ENDIAN_ORDER == B_ENDIAN) {
		int * dstStride   = anim->pFrameRGB->linesize;
		uint8_t** dst     = anim->pFrameRGB->data;
		int dstStride2[4] = { dstStride[0], 0, 0, 0 };
		uint8_t* dst2[4]  = { dst[0], 0, 0, 0 };
		int x,y,h,w;
		unsigned char* bottom;
		unsigned char* top;
		
		sws_scale(anim->img_convert_ctx,
			  (const uint8_t * const *)input->data,
			  input->linesize,
			  0,
			  anim->pCodecCtx->height,
			  dst2,
			  dstStride2);
		
		/* workaround: sws_scale bug
		   sets alpha = 0 and compensate
		   for altivec-bugs and flipy... */
		
		bottom = (unsigned char*) ibuf->rect;
		top = bottom + ibuf->x * (ibuf->y-1) * 4;
		
		h = (ibuf->y + 1) / 2;
		w = ibuf->x;
		
		for (y = 0; y < h; y++) {
			unsigned char tmp[4];
			unsigned int * tmp_l =
				(unsigned int*) tmp;
			tmp[3] = 0xff;
			
			for (x = 0; x < w; x++) {
				tmp[0] = bottom[0];
				tmp[1] = bottom[1];
				tmp[2] = bottom[2];
				
				bottom[0] = top[0];
				bottom[1] = top[1];
				bottom[2] = top[2];
				bottom[3] = 0xff;
				
				*(unsigned int*) top = *tmp_l;
				
				bottom +=4;
				top += 4;
			}
			top -= 8 * w;
		}
	} else {
		int * dstStride   = anim->pFrameRGB->linesize;
		uint8_t** dst     = anim->pFrameRGB->data;
		int dstStride2[4] = { -dstStride[0], 0, 0, 0 };
		uint8_t* dst2[4]  = { dst[0] + (anim->y - 1)*dstStride[0],
				      0, 0, 0 };
		int i;
		unsigned char* r;
		
		sws_scale(anim->img_convert_ctx,
			  (const uint8_t * const *)input->data,
			  input->linesize,
			  0,
			  anim->pCodecCtx->height,
			  dst2,
			  dstStride2);
		
		r = (unsigned char*) ibuf->rect;
		
		/* workaround sws_scale bug: older version of 
		   sws_scale set alpha = 0... */
		if (r[3] == 0) {
			for (i = 0; i < ibuf->x * ibuf->y; i++) {
				r[3] = 0xff;
				r += 4;
			}
		}
	}

	if (filter_y) {
		IMB_filtery(ibuf);
	}
}
Пример #20
0
bool ThumbFinder::getFrameImage(bool needKeyFrame, int64_t requiredPTS)
{
    AVPacket pkt;
    AVPicture orig;
    AVPicture retbuf;
    bzero(&orig, sizeof(AVPicture));
    bzero(&retbuf, sizeof(AVPicture));

    av_init_packet(&pkt);

    int frameFinished = 0;
    int keyFrame;
    int frameCount = 0;
    bool gotKeyFrame = false;

    while (av_read_frame(m_inputFC, &pkt) >= 0 && !frameFinished)
    {
        if (pkt.stream_index == m_videostream)
        {
            frameCount++;

            keyFrame = pkt.flags & PKT_FLAG_KEY;

            if (m_startPTS == -1 && pkt.dts != (int64_t)AV_NOPTS_VALUE)
            {
                m_startPTS = pkt.dts;
                m_frameTime = pkt.duration;
            }

            if (keyFrame)
                gotKeyFrame = true;

            if (!gotKeyFrame && needKeyFrame)
            {
                av_free_packet(&pkt);
                continue;
            }

            if (m_firstIFramePTS == -1)
                m_firstIFramePTS = pkt.dts;

            avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &pkt);

            if (requiredPTS != -1 && pkt.dts != (int64_t)AV_NOPTS_VALUE && pkt.dts < requiredPTS)
                frameFinished = false;

            m_currentPTS = pkt.dts;
        }

        av_free_packet(&pkt);
    }

    if (frameFinished)
    {
        avpicture_fill(&retbuf, m_outputbuf, PIX_FMT_RGB32, m_frameWidth, m_frameHeight);

        avpicture_deinterlace((AVPicture*)m_frame, (AVPicture*)m_frame,
                              m_codecCtx->pix_fmt, m_frameWidth, m_frameHeight);

        myth_sws_img_convert(
            &retbuf, PIX_FMT_RGB32,
            (AVPicture*) m_frame, m_codecCtx->pix_fmt, m_frameWidth, m_frameHeight);

        QImage img(m_outputbuf, m_frameWidth, m_frameHeight,
                   QImage::Format_RGB32);

        QByteArray ffile = m_frameFile.toLocal8Bit();
        if (!img.save(ffile.constData(), "JPEG"))
        {
            VERBOSE(VB_IMPORTANT, "Failed to save thumb: " + m_frameFile);
        }

        if (m_updateFrame)
        {
            if (m_image)
            {
                m_image->DownRef();
                m_image = NULL;
            }

            m_image = GetMythMainWindow()->GetCurrentPainter()->GetFormatImage();
            m_image->Assign(img);
            m_image->UpRef();

            m_frameImage->SetImage(m_image);
        }

        updateCurrentPos();
    }

    return true;
}
Пример #21
0
static void ffmpeg_postprocess(struct anim *anim)
{
	AVFrame *input = anim->pFrame;
	ImBuf *ibuf = anim->last_frame;
	int filter_y = 0;

	if (!anim->pFrameComplete) {
		return;
	}

	/* This means the data wasnt read properly, 
	 * this check stops crashing */
	if (input->data[0] == 0 && input->data[1] == 0 &&
	    input->data[2] == 0 && input->data[3] == 0)
	{
		fprintf(stderr, "ffmpeg_fetchibuf: "
		        "data not read properly...\n");
		return;
	}

	av_log(anim->pFormatCtx, AV_LOG_DEBUG, 
	       "  POSTPROC: anim->pFrame planes: %p %p %p %p\n",
	       input->data[0], input->data[1], input->data[2],
	       input->data[3]);


	if (anim->ib_flags & IB_animdeinterlace) {
		if (avpicture_deinterlace(
		        (AVPicture *)
		        anim->pFrameDeinterlaced,
		        (const AVPicture *)
		        anim->pFrame,
		        anim->pCodecCtx->pix_fmt,
		        anim->pCodecCtx->width,
		        anim->pCodecCtx->height) < 0)
		{
			filter_y = TRUE;
		}
		else {
			input = anim->pFrameDeinterlaced;
		}
	}
	
	avpicture_fill((AVPicture *) anim->pFrameRGB,
	               (unsigned char *) ibuf->rect,
	               PIX_FMT_RGBA, anim->x, anim->y);

	if (ENDIAN_ORDER == B_ENDIAN) {
		int *dstStride   = anim->pFrameRGB->linesize;
		uint8_t **dst     = anim->pFrameRGB->data;
		int dstStride2[4] = { dstStride[0], 0, 0, 0 };
		uint8_t *dst2[4]  = { dst[0], 0, 0, 0 };
		int x, y, h, w;
		unsigned char *bottom;
		unsigned char *top;
		
		sws_scale(anim->img_convert_ctx,
		          (const uint8_t *const *)input->data,
		          input->linesize,
		          0,
		          anim->y,
		          dst2,
		          dstStride2);
		
		bottom = (unsigned char *) ibuf->rect;
		top = bottom + ibuf->x * (ibuf->y - 1) * 4;
		
		h = (ibuf->y + 1) / 2;
		w = ibuf->x;
		
		for (y = 0; y < h; y++) {
			unsigned char tmp[4];
			unsigned int *tmp_l =
			    (unsigned int *) tmp;
			
			for (x = 0; x < w; x++) {
				tmp[0] = bottom[0];
				tmp[1] = bottom[1];
				tmp[2] = bottom[2];
				tmp[3] = bottom[3];
				
				bottom[0] = top[0];
				bottom[1] = top[1];
				bottom[2] = top[2];
				bottom[3] = top[3];
				
				*(unsigned int *) top = *tmp_l;
				
				bottom += 4;
				top += 4;
			}
			top -= 8 * w;
		}
	}
	else {
		int *dstStride   = anim->pFrameRGB->linesize;
		uint8_t **dst     = anim->pFrameRGB->data;
		int dstStride2[4] = { -dstStride[0], 0, 0, 0 };
		uint8_t *dst2[4]  = { dst[0] + (anim->y - 1) * dstStride[0],
			                  0, 0, 0 };
		
		sws_scale(anim->img_convert_ctx,
		          (const uint8_t *const *)input->data,
		          input->linesize,
		          0,
		          anim->y,
		          dst2,
		          dstStride2);
	}

	if (filter_y) {
		IMB_filtery(ibuf);
	}
}