Пример #1
0
JNIEXPORT jint JNICALL Java_com_jazzros_ffmpegtest_AVThread_nativeUpdateBitmap(JNIEnv* env, jobject thiz)
{
    //ros: gSwsContext = sws_getCachedContext(gSwsContext, gVideoCodecCtx->width, gVideoCodecCtx->height, gVideoCodecCtx->pix_fmt, gAbi.width, gAbi.height, AV_PIX_FMT_RGB565LE, SWS_FAST_BILINEAR, NULL, NULL, NULL);
    gSwsContext = sws_getCachedContext(gSwsContext, gVideoCodecCtx->width, gVideoCodecCtx->height, gVideoCodecCtx->pix_fmt, gAbi.width, gAbi.height, AV_PIX_FMT_RGB565LE, SWS_FAST_BILINEAR, NULL, NULL, NULL);
    if (gSwsContext == 0)
    {
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "sws_getCachedContext() failed");
        return -1;
    }

    AVPicture pict;
    //ros: int size = avpicture_fill(&pict, gBitmapRefPixelBuffer, AV_PIX_FMT_RGB565LE, gAbi.width, gAbi.height);
    int size = avpicture_fill(&pict, gBitmapRefPixelBuffer, AV_PIX_FMT_RGB565LE, gAbi.width, gAbi.height);
    if (size != gAbi.stride * gAbi.height)
    {
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "size != gAbi.stride * gAbi.height");
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "size = %d", size);
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "gAbi.stride * gAbi.height = %d", gAbi.stride * gAbi.height);
        return -2;
    }

    int height = sws_scale(gSwsContext, (const uint8_t* const*)gVideoFrame->data, gVideoFrame->linesize, 0, gVideoCodecCtx->height, pict.data, pict.linesize);
    if (height != gAbi.height)
    {
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "height != gAbi.height");
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "height = %d", height);
        __android_log_print(ANDROID_LOG_ERROR, "com.jazzros.ffmpegtest", "gAbi.height = %d", gAbi.height);
        return -3;
    }

    return 0;
}
Пример #2
0
void MediaEngine::updateSwsFormat(int videoPixelMode) {
#ifdef USE_FFMPEG
	auto codecIter = m_pCodecCtxs.find(m_videoStream);
	AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;

	AVPixelFormat swsDesired = getSwsFormat(videoPixelMode);
	if (swsDesired != m_sws_fmt && m_pCodecCtx != 0) {
		m_sws_fmt = swsDesired;
		m_sws_ctx = sws_getCachedContext
			(
				m_sws_ctx,
				m_pCodecCtx->width,
				m_pCodecCtx->height,
				m_pCodecCtx->pix_fmt,
				m_desWidth,
				m_desHeight,
				(AVPixelFormat)m_sws_fmt,
				SWS_BILINEAR,
				NULL,
				NULL,
				NULL
			);
	}
#endif
}
Пример #3
0
void RasterRenderPrivate::drawData(QPainter *painter, QRect rect)
{
	if (!srcFrame->isValid()){
		return;
	}
	QRect dest = fitRect(ARender::instance()->getPreferSize(), rect);
	QSize dstSize = dest.size()*painter->device()->devicePixelRatio();
	if (!dstFrame || dstFrame->size != dstSize){
		delete dstFrame;
		dstFrame = new Buffer(AV_PIX_FMT_RGB32, dstSize, 4);
		frame = QImage(*dstFrame->data, dstSize.width(), dstSize.height(), QImage::Format_RGB32);
		dirty = true;
	}
	if (dirty){
		swsctx = sws_getCachedContext(swsctx,
			srcFrame->size.width(), srcFrame->size.height(), srcFrame->format,
			dstFrame->size.width(), dstFrame->size.height(), dstFrame->format,
			SWS_FAST_BILINEAR, NULL, NULL, NULL);
		dataLock.lock();
		sws_scale(swsctx,
			srcFrame->data, srcFrame->width,
			0, srcFrame->size.height(),
			dstFrame->data, dstFrame->width);
		dirty = false;
		dataLock.unlock();
	}
	painter->drawImage(dest, frame);
}
Пример #4
0
int32_t avbin_decode_video(AVbinStream *stream,
                       uint8_t *data_in, size_t size_in,
                       uint8_t *data_out)
{
    AVPicture picture_rgb;
    int got_picture;
    int width = stream->codec_context->width;
    int height = stream->codec_context->height;
    int used;

    if (stream->type != AVMEDIA_TYPE_VIDEO)
        return AVBIN_RESULT_ERROR;

    AVPacket packet;
    av_init_packet(&packet);
    packet.data = data_in;
    packet.size = size_in;

    used = avcodec_decode_video2(stream->codec_context,
                                stream->frame, &got_picture,
                                &packet);

    if (!got_picture)
        return AVBIN_RESULT_ERROR;


    avpicture_fill(&picture_rgb, data_out, PIX_FMT_RGB24, width, height);
    static struct SwsContext *img_convert_ctx = NULL;
    img_convert_ctx = sws_getCachedContext(img_convert_ctx,width, height,stream->codec_context->pix_fmt,width, height,PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
    sws_scale(img_convert_ctx, (const uint8_t* const*)stream->frame->data, stream->frame->linesize,0, height, picture_rgb.data, picture_rgb.linesize);

    return used;
}
Пример #5
0
bool FFDecSW::open( StreamInfo *streamInfo, Writer * )
{
	AVCodec *codec = FFDec::init( streamInfo );
	if ( !codec )
		return false;
	if ( codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO )
	{
		if ( codec_ctx->pix_fmt == AV_PIX_FMT_NONE || streamInfo->W <= 0 || streamInfo->H <= 0 )
			return false;
		if ( codec->capabilities & CODEC_CAP_DR1 )
			codec_ctx->flags |= CODEC_FLAG_EMU_EDGE;
		if ( ( codec_ctx->thread_count = threads ) > 1 )
		{
			if ( !thread_type_slice )
				codec_ctx->thread_type = FF_THREAD_FRAME;
			else
				codec_ctx->thread_type = FF_THREAD_SLICE;
		}
		if ( codec_ctx->codec_id != CODEC_ID_H264 && codec_ctx->codec_id != CODEC_ID_VP8 )
			codec_ctx->lowres = lowres;
	}
	if ( !FFDec::openCodec( codec ) )
		return false;
	if ( codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO )
	{
		if ( codec_ctx->lowres )
		{
			streamInfo->W = codec_ctx->width;
			streamInfo->H = codec_ctx->height;
		}
		sws_ctx = sws_getCachedContext( NULL, streamInfo->W, streamInfo->H, codec_ctx->pix_fmt, streamInfo->W, streamInfo->H, AV_PIX_FMT_YUV420P, SWS_POINT, NULL, NULL, NULL );
	}
	return true;
}
Пример #6
0
int getThumbnail(AVFrame* pInputFrame, AVFrame* pOutputFrame, int desW, int desH)
{
    if (pInputFrame == NULL || pOutputFrame == NULL)
    {
        return -1;
    }
    SwsContext* pSwsContext = NULL;
    pSwsContext = sws_getCachedContext(pSwsContext, pInputFrame->width, pInputFrame->height, (AVPixelFormat)pInputFrame->format,
                                       desW, desH, AV_PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);

    if (pSwsContext == NULL)
    {
        return -1;
    }

    m_pThumbFrame->width = desW;
    m_pThumbFrame->height = desH;
    m_pThumbFrame->format = AV_PIX_FMT_RGB565;

    av_frame_get_buffer(m_pThumbFrame, 16);

    sws_scale(pSwsContext, pInputFrame->data, pInputFrame->linesize, 0, pInputFrame->height, m_pThumbFrame->data, m_pThumbFrame->linesize);

    sws_freeContext(pSwsContext);

    return 0;
}
Пример #7
0
void RtspStreamFrameFormatter::updateSWSContext()
{
    AVPixelFormat pixFormat;

    //convert deprecated pixel format in incoming stream
    //in order to suppress swscaler warning
    switch (m_stream->codecpar->format)
    {
    case AV_PIX_FMT_YUVJ420P :
        pixFormat = AV_PIX_FMT_YUV420P;
        break;
    case AV_PIX_FMT_YUVJ422P  :
        pixFormat = AV_PIX_FMT_YUV422P;
        break;
    case AV_PIX_FMT_YUVJ444P   :
        pixFormat = AV_PIX_FMT_YUV444P;
        break;
    case AV_PIX_FMT_YUVJ440P :
        pixFormat = AV_PIX_FMT_YUV440P;
    default:
        pixFormat = (AVPixelFormat) m_stream->codecpar->format;
        break;
    }

    m_sws_context = sws_getCachedContext(m_sws_context,
                                         m_width, m_height,
                                         pixFormat,
                                         m_width, m_height,
                                         m_pixelFormat,
                                         SWS_BICUBIC, NULL, NULL, NULL);
}
Пример #8
0
int decodeFrame()
{
	int frameFinished = 0;
	AVPacket packet;

	while(av_read_frame(gFormatCtx, &packet)>=0) {
		if(packet.stream_index == gVideoStreamIdx){
			avcodec_decode_video2(gVideoCodecCtx, gFrame, &frameFinished, &packet);

			if(frameFinished){
				gImgConvertCtx = sws_getCachedContext(gImgConvertCtx, gVideoCodecCtx->width, gVideoCodecCtx->height,
						gVideoCodecCtx->pix_fmt, gVideoCodecCtx->width, gVideoCodecCtx->height,
						PIX_FMT_RGB565LE, SWS_BICUBIC, NULL, NULL, NULL);

				sws_scale(gImgConvertCtx, gFrame->data, gFrame->linesize, 0, gVideoCodecCtx->height,
						gFrameRGB->data,gFrameRGB->linesize);

				av_free_packet(&packet);

				return 0;
			}
		}

		av_free_packet(&packet);
	}
	return -1;
}
Пример #9
0
static bool mp_media_init_scaling(mp_media_t *m)
{
	int space = get_sws_colorspace(m->v.decoder->colorspace);
	int range = get_sws_range(m->v.decoder->color_range);
	const int *coeff = sws_getCoefficients(space);

	m->swscale = sws_getCachedContext(NULL,
			m->v.decoder->width, m->v.decoder->height,
			m->v.decoder->pix_fmt,
			m->v.decoder->width, m->v.decoder->height,
			m->scale_format,
			SWS_FAST_BILINEAR, NULL, NULL, NULL);
	if (!m->swscale) {
		blog(LOG_WARNING, "MP: Failed to initialize scaler");
		return false;
	}

	sws_setColorspaceDetails(m->swscale, coeff, range, coeff, range, 0,
			FIXED_1_0, FIXED_1_0);

	int ret = av_image_alloc(m->scale_pic, m->scale_linesizes,
			m->v.decoder->width, m->v.decoder->height,
			m->scale_format, 1);
	if (ret < 0) {
		blog(LOG_WARNING, "MP: Failed to create scale pic data");
		return false;
	}

	return true;
}
Пример #10
0
static bool swscale(unsigned char *src, unsigned char *dst, int sw, int sh, int dw, int dh)
{
	bool ret = false;
	struct SwsContext *scale = NULL;
	AVFrame *sframe, *dframe;
	scale = sws_getCachedContext(scale, sw, sh, PIX_FMT_RGB32, dw, dh, PIX_FMT_RGB32, SWS_BICUBIC, 0, 0, 0);
	if (!scale) {
		lt_info_c("%s: ERROR setting up SWS context\n", __func__);
		return false;
	}
	sframe = av_frame_alloc();
	dframe = av_frame_alloc();
	if (!sframe || !dframe) {
		lt_info_c("%s: could not alloc sframe (%p) or dframe (%p)\n", __func__, sframe, dframe);
		goto out;
	}
	avpicture_fill((AVPicture *)sframe, &(src[0]), PIX_FMT_RGB32, sw, sh);
	avpicture_fill((AVPicture *)dframe, &(dst[0]), PIX_FMT_RGB32, dw, dh);
	sws_scale(scale, sframe->data, sframe->linesize, 0, sh, dframe->data, dframe->linesize);
 out:
	av_frame_free(&sframe);
	av_frame_free(&dframe);
	sws_freeContext(scale);
	return ret;
}
Пример #11
0
static void ffmpeg_encoder_scale(void) {
    sws_context = sws_getCachedContext(sws_context,
            frame->width, frame->height, AV_PIX_FMT_YUV420P,
            frame2->width, frame2->height, AV_PIX_FMT_YUV420P,
            SWS_BICUBIC, NULL, NULL, NULL);
    sws_scale(sws_context, (const uint8_t * const *)frame->data, frame->linesize, 0,
            frame->height, frame2->data, frame2->linesize);
}
Пример #12
0
int DecoderHelper::DecodeVideo(char *inBuff, int inBuffSize, void *yuvBuff, int width, int height)
{
	if (NULL == inBuff)			return -1;
	if (1 > inBuffSize)			return -1;
	if (NULL == yuvBuff)		return -1;
	if (NULL == _videoCodecContext)		return -2;

	_videoAVPacket.size = inBuffSize;
	_videoAVPacket.data	= (uint8_t*)inBuff;

	int frameFinished = 0;
	int nDecode = avcodec_decode_video2(_videoCodecContext, _videoFrame420, &frameFinished, &_videoAVPacket);//(uint8_t*)pInBuffer, inputSize);
	if (nDecode < 0)	return -3;
	if (!frameFinished)	return -4;

	if  (width != _width || height != _height)
	{
		if (NULL != _avframeYUV)
		{
			av_frame_free(&_avframeYUV);
			_avframeYUV = NULL;
		}

		if (NULL != _swsContext)
		{
			sws_freeContext(_swsContext);
			_swsContext = NULL;
		}

		_width = width;
		_height = height;
	}

	if (NULL == _avframeYUV)
	{
		int numBytes = avpicture_get_size((AVPixelFormat)_outputFormat, width, height);
		_avframeYUV = av_frame_alloc();
	}
	if (NULL == _avframeYUV)		return -5;

	if (avpicture_fill((AVPicture *)_avframeYUV, (uint8_t*)yuvBuff, (AVPixelFormat)_outputFormat,
		width, height) < 0)
	{
		return -1;
	}

	if (NULL == _swsContext)
	{
		_swsContext = sws_getCachedContext(_swsContext, _videoCodecContext->width, _videoCodecContext->height, (AVPixelFormat)AV_PIX_FMT_YUV420P, 
			width, height, (AVPixelFormat)_outputFormat, SWS_BICUBIC, NULL, NULL, NULL);
	}
	if (NULL == _swsContext)		return -1;

	int ret = sws_scale(_swsContext, _videoFrame420->data, _videoFrame420->linesize, 0, _videoCodecContext->height, 
		_avframeYUV->data, _avframeYUV->linesize);

	return 0;
}
void LiveStreamFrameFormatter::updateSWSContext()
{
    m_sws_context = sws_getCachedContext(m_sws_context,
                                         m_stream->codec->width, m_stream->codec->height,
                                         m_stream->codec->pix_fmt,
                                         m_stream->codec->width, m_stream->codec->height,
                                         m_pixelFormat,
                                         SWS_BICUBIC, NULL, NULL, NULL);
}
Пример #14
0
/*
Convert RGB24 array to YUV. Save directly to the `frame`,
modifying its `data` and `linesize` fields
*/
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
    const int in_linesize[1] = { 3 * c->width };
    sws_context = sws_getCachedContext(sws_context,
            c->width, c->height, AV_PIX_FMT_RGB24,
            c->width, c->height, AV_PIX_FMT_YUV420P,
            0, NULL, NULL, NULL);
    sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
            c->height, frame->data, frame->linesize);
}
Пример #15
0
bool video_recording_state_t::init_sws_context(void)
{
	AVCodecContext *c = video_stream->codec;
	sws_context = sws_getCachedContext(sws_context, c->width, c->height, (enum AVPixelFormat)video_frame_raw->format,
									   c->width, c->height, AV_PIX_FMT_YUV420P,
									   SWS_BICUBIC, NULL, NULL, NULL);
	if (!sws_context) return false;
	return true;
}
Пример #16
0
/***********************************************************************
**
** Process one frame with ffmpeg-libavfilter
**
***********************************************************************/
static int convert_picture(vf_wrapper_t * wrapper, dt_av_frame_t * src)
{
    uint8_t *buffer;
    int buffer_size;

    vf_ffmpeg_ctx_t *vf_ctx = (vf_ffmpeg_ctx_t *)(wrapper->vf_priv);
    dtvideo_para_t *para = &wrapper->para;
    int sw = para->s_width;
    int dw = para->d_width;
    int sh = para->s_height;
    int dh = para->d_height;
    int sf = para->s_pixfmt;
    int df = para->d_pixfmt;

    if (!vf_ctx->swap_frame) {
        vf_ctx->swap_frame = (dt_av_frame_t *)malloc(sizeof(dt_av_frame_t));
    }

    dt_av_frame_t *pict = vf_ctx->swap_frame;
    if (!pict) {
        dt_error(TAG, "[%s:%d] err: swap frame malloc failed \n", __FUNCTION__,
                 __LINE__);
        return -1;
    }
    memset(pict, 0, sizeof(dt_av_frame_t));

    AVPicture *dst = (AVPicture *)pict;
    buffer_size = avpicture_get_size(df, dw, dh);
    if (buffer_size > vf_ctx->swap_buf_size) {
        if (vf_ctx->swapbuf) {
            free(vf_ctx->swapbuf);
        }
        vf_ctx->swap_buf_size = buffer_size;
        vf_ctx->swapbuf = (uint8_t *) malloc(buffer_size * sizeof(uint8_t));
    }
    buffer = vf_ctx->swapbuf;
    avpicture_fill((AVPicture *) dst, buffer, df, dw, dh);

    vf_ctx->pSwsCtx = sws_getCachedContext(vf_ctx->pSwsCtx, sw, sh, sf, dw, dh, df,
                                           SWS_BICUBIC, NULL, NULL, NULL);
    sws_scale(vf_ctx->pSwsCtx, src->data, src->linesize, 0, sh, dst->data,
              dst->linesize);

    pict->pts = src->pts;
    pict->width = dw;
    pict->height = dh;
    pict->pixfmt = df;
    if (src->data) {
        free(src->data[0]);
    }
    memcpy(src, pict, sizeof(dt_av_frame_t));

    vf_ctx->swapbuf = NULL;
    vf_ctx->swap_buf_size = 0;
    return 0;
}
Пример #17
0
int FfmpegCamera::PrimeCapture()
{
    Info( "Priming capture from %s", mPath.c_str() );

    // Open the input, not necessarily a file
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );

    // Locate stream info from input
    if ( av_find_stream_info( mFormatContext ) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    // Find first video stream present
    mVideoStreamId = -1;
    for ( int i=0; i < mFormatContext->nb_streams; i++ )
    {
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    // Open the codec
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height );
    mBuffer.size( pictureSize );
    
    avpicture_fill( (AVPicture *)mFrame, (unsigned char *)mBuffer, PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);

#if HAVE_LIBSWSCALE
    if ( (mConvertContext = sws_getCachedContext( mConvertContext, mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL )) == NULL )
        Fatal( "Unable to create conversion context for %s", mPath.c_str() );
#else // HAVE_LIBSWSCALE
    Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE

    return( 0 );
}
Пример #18
0
static int vo_x11_vfmt2rgb (AVPicture * dst, AVPicture * src)
{
    static struct SwsContext *img_convert_ctx;

    img_convert_ctx = sws_getCachedContext (img_convert_ctx, dlpctxp->pwidth, dlpctxp->pheight, src_pic_fmt, dw, dh, my_pic_fmt, SWS_BICUBIC, NULL, NULL, NULL);

    sws_scale (img_convert_ctx, src->data, src->linesize, 0, dh, dst->data, dst->linesize);

    return 0;
}
Пример #19
0
void CFFmpegPlayer::setOutputSize(int width, int height) {
    if (m_outputWidth == width && m_outputHeight == height)
        return;
    m_outputWidth = width;
    m_outputHeight = height;
    m_swsCtx = sws_getCachedContext(
        m_swsCtx, m_codecCtx->width, m_codecCtx->height,
        m_codecCtx->pix_fmt, width, height,
        AV_PIX_FMT_BGRA, SWS_POINT, nullptr, nullptr, nullptr);
    m_pixelSize = GetGLPixelSize(GL_BGRA);
}
Пример #20
0
void PhVideoDecoder::frameToRgb(AVFrame *avFrame, PhVideoBuffer *buffer)
{
	int frameHeight = avFrame->height;
	if(_deinterlace)
		frameHeight = avFrame->height / 2;

	// As the following formats are deprecated (see https://libav.org/doxygen/master/pixfmt_8h.html#a9a8e335cf3be472042bc9f0cf80cd4c5)
	// we replace its with the new ones recommended by LibAv
	// in order to get ride of the warnings
	AVPixelFormat pixFormat;
	switch (_videoStream->codec->pix_fmt) {
	case AV_PIX_FMT_YUVJ420P:
		pixFormat = AV_PIX_FMT_YUV420P;
		break;
	case AV_PIX_FMT_YUVJ422P:
		pixFormat = AV_PIX_FMT_YUV422P;
		break;
	case AV_PIX_FMT_YUVJ444P:
		pixFormat = AV_PIX_FMT_YUV444P;
		break;
	case AV_PIX_FMT_YUVJ440P:
		pixFormat = AV_PIX_FMT_YUV440P;
		break;
	default:
		pixFormat = _videoStream->codec->pix_fmt;
		break;
	}

	/* Note: we output the frames in AV_PIX_FMT_BGRA rather than AV_PIX_FMT_RGB24,
	 * because this format is native to most video cards and will avoid a conversion
	 * in the video driver */
	/* sws_getCachedContext will check if the context is valid for the given parameters. It the context is not valid,
	 * it will be freed and a new one will be allocated. */
	_swsContext = sws_getCachedContext(_swsContext, avFrame->width, _videoStream->codec->height, pixFormat,
	                                   _videoStream->codec->width, frameHeight, AV_PIX_FMT_BGRA,
	                                   SWS_POINT, NULL, NULL, NULL);


	int linesize = avFrame->width * 4;
	uint8_t *rgb = buffer->rgb();
	if (0 <= sws_scale(_swsContext, (const uint8_t * const *) avFrame->data,
	                   avFrame->linesize, 0, _videoStream->codec->height, &rgb,
	                   &linesize)) {

		PhFrame frame = AVTimestamp_to_PhFrame(av_frame_get_best_effort_timestamp(avFrame));

		buffer->setFrame(frame);
		buffer->setWidth(avFrame->width);
		buffer->setHeight(frameHeight);

		// tell the video engine that we have finished decoding!
		emit frameAvailable(buffer);
	}
}
Пример #21
0
int THMoviePictureBuffer::write(AVFrame* pFrame, double dPts)
{
    THMoviePicture* pMoviePicture = nullptr;
    SDL_LockMutex(m_pMutex);
    while(full() && !m_fAborting)
    {
        SDL_CondWait(m_pCond, m_pMutex);
    }
    SDL_UnlockMutex(m_pMutex);
    if(m_fAborting) { return -1; }

    pMoviePicture = &m_aPictureQueue[m_iWriteIndex];
    SDL_LockMutex(pMoviePicture->m_pMutex);

    if(pMoviePicture->m_pTexture)
    {
        m_pSwsContext = sws_getCachedContext(m_pSwsContext, pFrame->width, pFrame->height, (PixelFormat)pFrame->format, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight, pMoviePicture->m_pixelFormat, SWS_BICUBIC, nullptr, nullptr, nullptr);
        if(m_pSwsContext == nullptr)
        {
            SDL_UnlockMutex(m_aPictureQueue[m_iWriteIndex].m_pMutex);
            std::cerr << "Failed to initialize SwsContext\n";
            return 1;
        }

        /* Allocate a new frame and buffer for the destination RGB24 data. */
        AVFrame *pFrameRGB = av_frame_alloc();
        int numBytes = avpicture_get_size(pMoviePicture->m_pixelFormat, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight);
        uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
        avpicture_fill((AVPicture *)pFrameRGB, buffer, pMoviePicture->m_pixelFormat, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight);

        /* Rescale the frame data and convert it to RGB24. */
        sws_scale(m_pSwsContext, pFrame->data, pFrame->linesize, 0, pMoviePicture->m_iHeight, pFrameRGB->data, pFrameRGB->linesize);

        /* Upload it to the texture we render from - note that this works because our OpenGL context shares texture namespace with the main threads' context. */
        SDL_UpdateTexture(pMoviePicture->m_pTexture, nullptr, buffer, pMoviePicture->m_iWidth * 3);

        av_free(buffer);
        av_frame_free(&pFrameRGB);

        pMoviePicture->m_dPts = dPts;

        SDL_UnlockMutex(m_aPictureQueue[m_iWriteIndex].m_pMutex);
        m_iWriteIndex++;
        if(m_iWriteIndex == PICTURE_BUFFER_SIZE)
        {
            m_iWriteIndex = 0;
        }
        SDL_LockMutex(m_pMutex);
        m_iCount++;
        SDL_UnlockMutex(m_pMutex);
    }

    return 0;
}
Пример #22
0
int THMoviePictureBuffer::write(AVFrame* pFrame, double dPts)
{
    THMoviePicture* pMoviePicture = nullptr;
    SDL_LockMutex(m_pMutex);
    while(full() && !m_fAborting)
    {
        SDL_CondWait(m_pCond, m_pMutex);
    }
    SDL_UnlockMutex(m_pMutex);
    if(m_fAborting) { return -1; }

    pMoviePicture = &m_aPictureQueue[m_iWriteIndex];
    SDL_LockMutex(pMoviePicture->m_pMutex);

    if(pMoviePicture->m_pBuffer)
    {
        m_pSwsContext = sws_getCachedContext(m_pSwsContext, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight, pMoviePicture->m_pixelFormat, SWS_BICUBIC, nullptr, nullptr, nullptr);
        if(m_pSwsContext == nullptr)
        {
            SDL_UnlockMutex(m_aPictureQueue[m_iWriteIndex].m_pMutex);
            std::cerr << "Failed to initialize SwsContext\n";
            return 1;
        }

        /* Allocate a new frame and buffer for the destination RGB24 data. */
        AVFrame *pFrameRGB = av_frame_alloc();
#if (defined(CORSIX_TH_USE_LIBAV) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54, 6, 0)) || \
    (defined(CORSIX_TH_USE_FFMPEG) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 63, 100))
        av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, pMoviePicture->m_pBuffer, pMoviePicture->m_pixelFormat, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight, 1);
#else
        avpicture_fill((AVPicture *)pFrameRGB, pMoviePicture->m_pBuffer, pMoviePicture->m_pixelFormat, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight);
#endif

        /* Rescale the frame data and convert it to RGB24. */
        sws_scale(m_pSwsContext, pFrame->data, pFrame->linesize, 0, pFrame->height, pFrameRGB->data, pFrameRGB->linesize);

        av_frame_free(&pFrameRGB);

        pMoviePicture->m_dPts = dPts;

        SDL_UnlockMutex(m_aPictureQueue[m_iWriteIndex].m_pMutex);
        m_iWriteIndex++;
        if(m_iWriteIndex == ms_pictureBufferSize)
        {
            m_iWriteIndex = 0;
        }
        SDL_LockMutex(m_pMutex);
        m_iCount++;
        SDL_UnlockMutex(m_pMutex);
    }

    return 0;
}
Пример #23
0
// LibAvW_PlayGetFrameImage
DLL_EXPORT int LibAvW_PlayGetFrameImage(void *stream, int pixel_format, void *imagedata, int imagewidth, int imageheight, int scaler)
{
	avwstream_t *s;
	PixelFormat avpixelformat;
	int avscaler;

	// check
	if (!libav_initialized)
		return 0;
	s = (avwstream_t *)stream;
	if (!s)
		return 0;

	// get pixel format
	if (pixel_format == LIBAVW_PIXEL_FORMAT_BGR)
		avpixelformat = PIX_FMT_BGR24;
	else if (pixel_format == LIBAVW_PIXEL_FORMAT_BGRA)
		avpixelformat = PIX_FMT_BGRA;
	else
	{
		s->lasterror = LIBAVW_ERROR_BAD_PIXEL_FORMAT;
		return 0;
	}

	// get scaler
	if (scaler >= LIBAVW_SCALER_BILINEAR && scaler <= LIBAVW_SCALER_SPLINE)
		avscaler = libav_scalers[scaler];
	else
	{
		s->lasterror = LIBAVW_ERROR_CREATE_SCALE_CONTEXT;
		return 0;
	}

	// get AV_InputFrame
	avpicture_fill((AVPicture *)s->AV_OutputFrame, (uint8_t *)imagedata, avpixelformat, imagewidth, imageheight);
	SwsContext *scale_context = sws_getCachedContext(NULL, s->AV_InputFrame->width, s->AV_InputFrame->height, (PixelFormat)s->AV_InputFrame->format, s->framewidth, s->frameheight, avpixelformat, avscaler, NULL, NULL, NULL); 
	if (!scale_context)
	{
		s->lasterror = LIBAVW_ERROR_BAD_SCALER;
		return 0;
	}
	if (!sws_scale(scale_context, s->AV_InputFrame->data, s->AV_InputFrame->linesize, 0, s->AV_InputFrame->height, s->AV_OutputFrame->data, s->AV_OutputFrame->linesize))
	{
		s->lasterror = LIBAVW_ERROR_APPLYING_SCALE;
		sws_freeContext(scale_context); 
		return 0;
	}

	// allright
	s->lasterror = LIBAVW_ERROR_NONE;
	sws_freeContext(scale_context); 
	return 1;
}
Пример #24
0
void Java_com_richitec_imeeting_video_ECVideoEncoder_processRawFrame(
		JNIEnv* env, jobject thiz, jbyteArray buffer, jint width, jint height,
		jint rotateDegree) {
	if (!qvo || !is_video_encode_ready) {
		return;
	}
//	D("process raw frame - width: %d height: %d", width, height);

	jint rotateWidth, rotateHeight;

	AVCodecContext *c = qvo->video_stream->codec;

	jbyte *p_buffer_array = (*env)->GetByteArrayElements(env, buffer, 0);

//	D("process raw frame - rotate degree: %d", rotateDegree);

	unsigned char * p_rotated_buffer = rotateYUV420SP(p_buffer_array, width,
			height, rotateDegree, &rotateWidth, &rotateHeight);
	if (!p_rotated_buffer) {
		(*env)->ReleaseByteArrayElements(env, buffer, p_buffer_array,
				JNI_ABORT);
		return;
	}

	avpicture_fill((AVPicture *) tmp_picture, p_rotated_buffer, src_pix_fmt,
			rotateWidth, rotateHeight);
//	D("avpicture fill ok");
	(*env)->ReleaseByteArrayElements(env, buffer, p_buffer_array, JNI_ABORT);

	img_convert_ctx = sws_getCachedContext(img_convert_ctx, rotateWidth,
			rotateHeight, src_pix_fmt, qvo->width, qvo->height, c->pix_fmt,
			SWS_BILINEAR, NULL, NULL, NULL);
	sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize, 0,
			rotateHeight, raw_picture->data, raw_picture->linesize);

	int out_size = write_video_frame(qvo, raw_picture);

//	D(
//			"stream pts val: %lld time base: %d / %d", qvo->video_stream->pts.val, qvo->video_stream->time_base.num, qvo->video_stream->time_base.den);
//	double video_pts = (double) qvo->video_stream->pts.val
//			* qvo->video_stream->time_base.num
//			/ qvo->video_stream->time_base.den;
//	D("write video frame - size: %d video pts: %f", out_size, video_pts);

	raw_picture->pts++;

	free(p_rotated_buffer);

	if (out_size == -2) {
		// network interrupted
		call_void_method(env, thiz, "onVideoLiveDisconnected");
	}
}
Пример #25
0
int movie_picture_buffer::write(AVFrame* pFrame, double dPts)
{
    movie_picture* pMoviePicture = nullptr;
    std::unique_lock<std::mutex> picBufLock(mutex);
    while(unsafe_full() && !aborting)
    {
        cond.wait(picBufLock);
    }
    picBufLock.unlock();

    if(aborting) { return -1; }

    pMoviePicture = &picture_queue[write_index];
    std::unique_lock<std::mutex> pictureLock(pMoviePicture->mutex);

    if(pMoviePicture->buffer)
    {
        sws_context = sws_getCachedContext(sws_context, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pMoviePicture->width, pMoviePicture->height, pMoviePicture->pixel_format, SWS_BICUBIC, nullptr, nullptr, nullptr);
        if(sws_context == nullptr)
        {
            std::cerr << "Failed to initialize SwsContext\n";
            return 1;
        }

        /* Allocate a new frame and buffer for the destination RGB24 data. */
        AVFrame *pFrameRGB = av_frame_alloc();
#if (defined(CORSIX_TH_USE_LIBAV) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54, 6, 0)) || \
    (defined(CORSIX_TH_USE_FFMPEG) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 63, 100))
        av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, pMoviePicture->buffer, pMoviePicture->pixel_format, pMoviePicture->width, pMoviePicture->height, 1);
#else
        avpicture_fill((AVPicture *)pFrameRGB, pMoviePicture->buffer, pMoviePicture->pixel_format, pMoviePicture->width, pMoviePicture->height);
#endif

        /* Rescale the frame data and convert it to RGB24. */
        sws_scale(sws_context, pFrame->data, pFrame->linesize, 0, pFrame->height, pFrameRGB->data, pFrameRGB->linesize);

        av_frame_free(&pFrameRGB);

        pMoviePicture->pts = dPts;

        pictureLock.unlock();
        write_index++;
        if(write_index == picture_buffer_size)
        {
            write_index = 0;
        }
        picBufLock.lock();
        picture_count++;
        picBufLock.unlock();
    }

    return 0;
}
Пример #26
0
static int vo_dx_vfmt2rgb(AVPicture * dst, AVPicture * src)
{
    static struct SwsContext *img_convert_ctx;

    img_convert_ctx = sws_getCachedContext(img_convert_ctx, dlpctxp->pwidth,
                                           dlpctxp->pheight, dlpctxp->pixfmt, dlpctxp->pwidth, dlpctxp->pheight,
                                           g_image_format, 0, NULL, NULL, NULL);

    sws_scale(img_convert_ctx, src->data, src->linesize, 0, dlpctxp->pheight,
              dst->data, dst->linesize);

    return 0;
}
int video_scaler_create(video_scaler_t **scaler_out,
		const struct video_scale_info *dst,
		const struct video_scale_info *src,
		enum video_scale_type type)
{
	enum AVPixelFormat format_src = get_ffmpeg_video_format(src->format);
	enum AVPixelFormat format_dst = get_ffmpeg_video_format(dst->format);
	int                scale_type = get_ffmpeg_scale_type(type);
	const int          *coeff_src = get_ffmpeg_coeffs(src->colorspace);
	const int          *coeff_dst = get_ffmpeg_coeffs(dst->colorspace);
	int                range_src  = get_ffmpeg_range_type(src->range);
	int                range_dst  = get_ffmpeg_range_type(dst->range);
	struct video_scaler *scaler;
	int ret;

	if (!scaler_out)
		return VIDEO_SCALER_FAILED;

	if (format_src == AV_PIX_FMT_NONE ||
	    format_dst == AV_PIX_FMT_NONE)
		return VIDEO_SCALER_BAD_CONVERSION;

	scaler = bzalloc(sizeof(struct video_scaler));
	scaler->src_height = src->height;

	scaler->swscale = sws_getCachedContext(NULL,
			src->width, src->height, format_src,
			dst->width, dst->height, format_dst,
			scale_type, NULL, NULL, NULL);
	if (!scaler->swscale) {
		blog(LOG_ERROR, "video_scaler_create: Could not create "
		                "swscale");
		goto fail;
	}

	ret = sws_setColorspaceDetails(scaler->swscale,
			coeff_src, range_src,
			coeff_dst, range_dst,
			0, FIXED_1_0, FIXED_1_0);
	if (ret < 0) {
		blog(LOG_DEBUG, "video_scaler_create: "
		                "sws_setColorspaceDetails failed, ignoring");
	}

	*scaler_out = scaler;
	return VIDEO_SCALER_SUCCESS;

fail:
	video_scaler_destroy(scaler);
	return VIDEO_SCALER_FAILED;
}
bool FFMpegVideoDecoderPriv::readFrame( int frame )
{
	AVPacket packet;
	int frameFinished;

	while ( m_currentFrameNumber < frame )
	{
		// Read a frame
		if ( av_read_frame( pFormatCtx, &packet ) < 0 )
			return false;  // Frame read failed (e.g. end of stream)

		if ( packet.stream_index == videoStream )
		{
			// Is this a packet from the video stream -> decode video frame
			avcodec_decode_video2( pCodecCtx, pFrame, &frameFinished, &packet );

			// Did we get a video frame?
			if ( frameFinished )
			{
				m_currentFrameNumber++;

				if ( m_currentFrameNumber >= frame )
				{
					int w = pCodecCtx->width;
					int h = pCodecCtx->height;

					img_convert_ctx = sws_getCachedContext(img_convert_ctx,w, h, pCodecCtx->pix_fmt, w, h, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

					if ( img_convert_ctx == NULL )
					{
						printf("Cannot initialize the conversion context!\n");
						return false;
					}

					sws_scale( img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize );

					// Convert the frame to QImage
					m_currentFrameImage = QImage( w, h, QImage::Format_RGB888 );

					for ( int y = 0; y < h; y++ )
						memcpy( m_currentFrameImage.scanLine(y), pFrameRGB->data[0] + y * pFrameRGB->linesize[0], w*3 );
				}
			}
		}

		av_free_packet( &packet );
	}

	return true;
}
Пример #29
0
static int DecodeFrame(const char* lpszOutFileName, AVCodecContext* pDecCtx, 
                       AVFrame* pFrame,  AVFrame* pFrameRGB, int* pnFrameCount, AVPacket* pAVPacket, int bLastFrame) 
{ 
    int nGotFrame = 0; 
    int nLen = avcodec_decode_video2(pDecCtx, pFrame, &nGotFrame, pAVPacket); 
    if (nLen < 0) { 
        fprintf(stderr, "Error while decoding frame %d\n", *pnFrameCount); 
        return nLen; 
    } 

    if (nGotFrame) { 
        printf("Saving %sframe %3d\n", bLastFrame ? "last " : "", *pnFrameCount); 
        fflush(stdout); 

#if 0
        char buf[1024];
        /* the picture is allocated by the decoder, no need to free it */ 
        snprintf(buf, sizeof(buf), outfilename, *pnFrameCount); 
        SavePGM(pFrame->data[0], pFrame->linesize[0], pDecCtx->width, pDecCtx->height, buf); 
#else
        //yuv420p to rgb
        if (pFrameBuffer == NULL) {
            int numBytes = avpicture_get_size(PIX_FMT_RGB24, pDecCtx->width, pDecCtx->height);

            pFrameBuffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

            avpicture_fill((AVPicture *)pFrameRGB, pFrameBuffer, PIX_FMT_RGB24, pDecCtx->width, pDecCtx->height);
        }

        SwsContext* pImgConvertCtx = NULL;
        pImgConvertCtx = sws_getCachedContext(pImgConvertCtx, pDecCtx->width, pDecCtx->height, pDecCtx->pix_fmt,
            pDecCtx->width, pDecCtx->height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

        sws_scale(pImgConvertCtx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, 
            pDecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

        //save to file
        SavePPM(pFrameRGB, pDecCtx->width, pDecCtx->height, *pnFrameCount);
        SaveYUV420P(pFrame, pDecCtx->width, pDecCtx->height, lpszOutFileName);
#endif

        (*pnFrameCount)++; 
    }

    if (pAVPacket->data) { 
        pAVPacket->size -= nLen; 
        pAVPacket->data += nLen; 
    } 
    return 0; 
} 
JNIEXPORT jlong JNICALL
Java_org_jitsi_impl_neomedia_codec_FFmpeg_sws_1getCachedContext
    (JNIEnv *env, jclass clazz, jlong ctx, jint srcW, jint srcH,
        jint srcFormat, jint dstW, jint dstH, jint dstFormat, jint flags)
{
    return
        (jlong) (intptr_t)
            sws_getCachedContext(
                (struct SwsContext *) (intptr_t) ctx,
                (int) srcW, (int) srcH, (enum PixelFormat) srcFormat,
                (int) dstW, (int) dstH, (enum PixelFormat) dstFormat,
                (int) flags,
                NULL, NULL, NULL);
}