Exemplo n.º 1
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    int ret;
    AVFrame *out_frame;

    Bs2bContext     *bs2b = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];

    if (av_frame_is_writable(frame)) {
        out_frame = frame;
    } else {
        out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
        if (!out_frame)
            return AVERROR(ENOMEM);
        av_frame_copy(out_frame, frame);
        ret = av_frame_copy_props(out_frame, frame);
        if (ret < 0) {
            av_frame_free(&out_frame);
            av_frame_free(&frame);
            return ret;
        }
    }

    bs2b->filter(bs2b->bs2bp, out_frame->extended_data[0], out_frame->nb_samples);

    if (frame != out_frame)
        av_frame_free(&frame);

    return ff_filter_frame(outlink, out_frame);
}
Exemplo n.º 2
0
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
{
    AVFrame *tmp;
    int ret;

    av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);

    if (!frame->data[0])
        return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);

    if (av_frame_is_writable(frame))
        return ff_decode_frame_props(avctx, frame);

    tmp = av_frame_alloc();
    if (!tmp)
        return AVERROR(ENOMEM);

    av_frame_move_ref(tmp, frame);

    ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
    if (ret < 0) {
        av_frame_free(&tmp);
        return ret;
    }

    av_frame_copy(frame, tmp);
    av_frame_free(&tmp);

    return 0;
}
Exemplo n.º 3
0
JNIEXPORT jint JNICALL Java_bits_jav_codec_JavFrame_nCopy
( JNIEnv *env, jclass clazz, jlong pointer, jlong srcPointer )
{
    AVFrame *frame = *(AVFrame**)&pointer;
    const AVFrame *source = *(AVFrame**)&srcPointer;
    return av_frame_copy( frame, source );
}
Exemplo n.º 4
0
void VideoDecoder::convertAndPushPicture(const AVFrame* frame) {
	// Allocate a picture to hold the YUV data
	AVFrame* yuvFrame = av_frame_alloc();
	av_frame_copy(yuvFrame, frame);
	yuvFrame->format = DESTINATION_FORMAT;

	av_image_alloc(yuvFrame->data,
				   yuvFrame->linesize,
				   m_status->videoCodecPars.width,
				   m_status->videoCodecPars.height,
				   DESTINATION_FORMAT,
				   1);

	std::unique_ptr<FFMPEGVideoFrame> videoFramePtr(new FFMPEGVideoFrame());

	if (m_status->videoCodecPars.pixel_format == DESTINATION_FORMAT) {
		av_image_copy(yuvFrame->data,
					  yuvFrame->linesize,
					  (const uint8_t**) (frame->data),
					  frame->linesize,
					  DESTINATION_FORMAT,
					  m_status->videoCodecPars.width,
					  m_status->videoCodecPars.height);
	} else {
		// Convert frame to YUV
		sws_scale(
			m_swsCtx,
			(uint8_t const* const*) frame->data,
			frame->linesize,
			0,
			m_status->videoCodecPars.height,
			yuvFrame->data,
			yuvFrame->linesize
		);
	}

	videoFramePtr->id = ++m_frameId;
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(58, 3, 102)
	videoFramePtr->frameTime = getFrameTime(frame->best_effort_timestamp, m_status->videoStream->time_base);
#else
	videoFramePtr->frameTime = getFrameTime(av_frame_get_best_effort_timestamp(frame), m_status->videoStream->time_base);
#endif
	videoFramePtr->frame = yuvFrame;

	videoFramePtr->ySize.height = static_cast<size_t>(m_status->videoCodecPars.height);
	videoFramePtr->ySize.width = static_cast<size_t>(m_status->videoCodecPars.width);
	videoFramePtr->ySize.stride = static_cast<size_t>(yuvFrame->linesize[0]);

	// 420P means that the UV channels have half the width and height
	videoFramePtr->uvSize.height = static_cast<size_t>(m_status->videoCodecPars.height / 2);
	videoFramePtr->uvSize.width = static_cast<size_t>(m_status->videoCodecPars.width / 2);
	videoFramePtr->uvSize.stride = static_cast<size_t>(yuvFrame->linesize[1]);

	pushFrame(VideoFramePtr(videoFramePtr.release()));
}
Exemplo n.º 5
0
int av_frame_make_writable(AVFrame *frame)
{
    AVFrame tmp;
    int ret;

    if (!frame->buf[0])
        return AVERROR(EINVAL);

    if (av_frame_is_writable(frame))
        return 0;

    memset(&tmp, 0, sizeof(tmp));
    tmp.format         = frame->format;
    tmp.width          = frame->width;
    tmp.height         = frame->height;
    tmp.channels       = frame->channels;
    tmp.channel_layout = frame->channel_layout;
    tmp.nb_samples     = frame->nb_samples;
    ret = av_frame_get_buffer(&tmp, 32);
    if (ret < 0)
        return ret;

    ret = av_frame_copy(&tmp, frame);
    if (ret < 0) {
        av_frame_unref(&tmp);
        return ret;
    }

    ret = av_frame_copy_props(&tmp, frame);
    if (ret < 0) {
        av_frame_unref(&tmp);
        return ret;
    }

    av_frame_unref(frame);

    *frame = tmp;
    if (tmp.data == tmp.extended_data)
        frame->extended_data = frame->data;

    return 0;
}
Exemplo n.º 6
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    MonoPartsContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];

    int part = s->current_part++;
    if (part < s->part1) // stereo passthru
	return ff_filter_frame(outlink, frame);

    if (!av_frame_is_writable(frame)) {
	AVFrame *copy = ff_get_audio_buffer(inlink, frame->nb_samples);
	av_frame_copy_props(copy, frame);
	av_frame_copy(copy, frame);
	av_frame_free(&frame);
	frame = copy;
    }

    if (part == s->part1) {
	if (part == 0)
	    s->full_mono(frame);
	else
	    s->stereo2mono(frame);
    }
    else if (part < s->part2)
	s->full_mono(frame);
    else if (part == s->part2) {
	bool smallframe = frame->nb_samples < inlink->min_samples;
	bool lastpart = *s->parts == '\0';
	if (smallframe && lastpart)
	    s->full_mono(frame);
	else
	    s->mono2stereo(frame);
	if (lastpart)
	    s->part1 = s->part2 = INT_MAX;
	else if (!scan_part(ctx))
	    return AVERROR(EINVAL);
    }

    return ff_filter_frame(outlink, frame);
}
Exemplo n.º 7
0
static int shuffleplanes_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext          *ctx = inlink->dst;
    ShufflePlanesContext       *s = ctx->priv;
    uint8_t *shuffled_data[4]     = { NULL };
    int      shuffled_linesize[4] = { 0 };
    int i, ret;

    for (i = 0; i < s->planes; i++) {
        shuffled_data[i]     = frame->data[s->map[i]];
        shuffled_linesize[i] = frame->linesize[s->map[i]];
    }
    memcpy(frame->data,     shuffled_data,     sizeof(shuffled_data));
    memcpy(frame->linesize, shuffled_linesize, sizeof(shuffled_linesize));

    if (s->copy) {
        AVFrame *copy = ff_get_video_buffer(ctx->outputs[0], frame->width, frame->height);

        if (!copy) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        av_frame_copy(copy, frame);

        ret = av_frame_copy_props(copy, frame);
        if (ret < 0) {
            av_frame_free(&copy);
            goto fail;
        }

        av_frame_free(&frame);
        frame = copy;
    }

    return ff_filter_frame(ctx->outputs[0], frame);
fail:
    av_frame_free(&frame);
    return ret;
}
Exemplo n.º 8
0
static int submit_frame(QSVEncContext *q, const AVFrame *frame,
                        mfxFrameSurface1 **surface)
{
    QSVFrame *qf;
    int ret;

    ret = get_free_frame(q, &qf);
    if (ret < 0)
        return ret;

    if (frame->format == AV_PIX_FMT_QSV) {
        ret = av_frame_ref(qf->frame, frame);
        if (ret < 0)
            return ret;

        qf->surface = (mfxFrameSurface1*)qf->frame->data[3];
        *surface = qf->surface;
        return 0;
    }

    /* make a copy if the input is not padded as libmfx requires */
    if (     frame->height & (q->height_align - 1) ||
        frame->linesize[0] & (q->width_align - 1)) {
        qf->frame->height = FFALIGN(frame->height, q->height_align);
        qf->frame->width  = FFALIGN(frame->width, q->width_align);

        ret = ff_get_buffer(q->avctx, qf->frame, AV_GET_BUFFER_FLAG_REF);
        if (ret < 0)
            return ret;

        qf->frame->height = frame->height;
        qf->frame->width  = frame->width;
        ret = av_frame_copy(qf->frame, frame);
        if (ret < 0) {
            av_frame_unref(qf->frame);
            return ret;
        }
    } else {
        ret = av_frame_ref(qf->frame, frame);
        if (ret < 0)
            return ret;
    }

    qf->surface_internal.Info = q->param.mfx.FrameInfo;

    qf->surface_internal.Info.PicStruct =
        !frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
        frame->top_field_first   ? MFX_PICSTRUCT_FIELD_TFF :
                                   MFX_PICSTRUCT_FIELD_BFF;
    if (frame->repeat_pict == 1)
        qf->surface_internal.Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
    else if (frame->repeat_pict == 2)
        qf->surface_internal.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
    else if (frame->repeat_pict == 4)
        qf->surface_internal.Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;

    qf->surface_internal.Data.PitchLow  = qf->frame->linesize[0];
    qf->surface_internal.Data.Y         = qf->frame->data[0];
    qf->surface_internal.Data.UV        = qf->frame->data[1];
    qf->surface_internal.Data.TimeStamp = av_rescale_q(frame->pts, q->avctx->time_base, (AVRational){1, 90000});

    qf->surface = &qf->surface_internal;

    *surface = qf->surface;

    return 0;
}
Exemplo n.º 9
0
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
    int i, ret = 0;

    dst->format         = src->format;
    dst->width          = src->width;
    dst->height         = src->height;
    dst->channels       = src->channels;
    dst->channel_layout = src->channel_layout;
    dst->nb_samples     = src->nb_samples;

    ret = av_frame_copy_props(dst, src);
    if (ret < 0)
        return ret;

    /* duplicate the frame data if it's not refcounted */
    if (!src->buf[0]) {
        ret = av_frame_get_buffer(dst, 32);
        if (ret < 0)
            return ret;

        ret = av_frame_copy(dst, src);
        if (ret < 0)
            av_frame_unref(dst);

        return ret;
    }

    /* ref the buffers */
    for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
        if (!src->buf[i])
            continue;
        dst->buf[i] = av_buffer_ref(src->buf[i]);
        if (!dst->buf[i]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (src->extended_buf) {
        dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
                                       src->nb_extended_buf);
        if (!dst->extended_buf) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        dst->nb_extended_buf = src->nb_extended_buf;

        for (i = 0; i < src->nb_extended_buf; i++) {
            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
            if (!dst->extended_buf[i]) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
        }
    }

    /* duplicate extended data */
    if (src->extended_data != src->data) {
        int ch = src->channels;

        if (!ch) {
            ret = AVERROR(EINVAL);
            goto fail;
        }
        CHECK_CHANNELS_CONSISTENCY(src);

        dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
        if (!dst->extended_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
    } else
        dst->extended_data = dst->data;

    memcpy(dst->data,     src->data,     sizeof(src->data));
    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));

    return 0;

fail:
    av_frame_unref(dst);
    return ret;
}
Exemplo n.º 10
0
int Capture::decodePacket(int * gotVideoPtr)
{
	int ret = 0;
	int decoded = mPkt.size;
	bool packageOk = false;

	*gotVideoPtr = 0;
	
	if (mPkt.stream_index == mVideo_stream_idx && mVideoCodecContext)
	{
		/* decode video frame */
		ret = avcodec_decode_video2(mVideoCodecContext, mFrame, gotVideoPtr, &mPkt);

		if (ret < 0)
		{
			sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Video decoding error: %s!\n", av_err2str(ret));
			return ret;
		}

		decoded = FFMIN(ret, mPkt.size);
		if (*gotVideoPtr)
		{
			packageOk = true;
			
			if (mVideoCodecContext->pix_fmt != mDstPixFmt)
			{
				//convert to destination pixel format
				ret = sws_scale(mVideoScaleContext, mFrame->data, mFrame->linesize, 0, mHeight, mTempFrame->data, mTempFrame->linesize);
				if (ret < 0)
				{
					sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Failed to convert decoded frame to %s!\n", mVideoDstFormat.c_str());
					return ret;
				}
			}
			else
			{
				ret = av_frame_copy(mTempFrame, mFrame);
				if (ret < 0)
				{
					sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Failed to copy frame!\n");
					return ret;
				}
			}

			//store mTempFrame;
			if (mVideoDecoderCallback != nullptr)
				mVideoDecoderCallback(mTempFrame->data, mWidth, mHeight);

			mDecodedVideoFrames++;
			//std::cout << "Decoded frames: " << mDecodedVideoFrames << std::endl;

			if (ret < 0)
			{
				return ret;
			}
		}
	}

#if USE_REF_COUNTER
	/* If we use the new API with reference counting, we own the data and need
	* to de-reference it when we don't use it anymore */
	if (packageOk)
	{
		av_frame_unref(mFrame);
	}
#endif

	return decoded;
}
Exemplo n.º 11
0
/* get the input surface */
static QSVFrame *submit_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
{
    QSVFrame        *qsv_frame;
    AVFilterContext *ctx = inlink->dst;

    clear_unused_frames(s->in_frame_list);

    qsv_frame = get_free_frame(&s->in_frame_list);
    if (!qsv_frame)
        return NULL;

    /* Turn AVFrame into mfxFrameSurface1.
     * For video/opaque memory mode, pix_fmt is AV_PIX_FMT_QSV, and
     * mfxFrameSurface1 is stored in AVFrame->data[3];
     * for system memory mode, raw video data is stored in
     * AVFrame, we should map it into mfxFrameSurface1.
     */
    if (!IS_SYSTEM_MEMORY(s->in_mem_mode)) {
        if (picref->format != AV_PIX_FMT_QSV) {
            av_log(ctx, AV_LOG_ERROR, "QSVVPP gets a wrong frame.\n");
            return NULL;
        }
        qsv_frame->frame   = av_frame_clone(picref);
        qsv_frame->surface = (mfxFrameSurface1 *)qsv_frame->frame->data[3];
    } else {
        /* make a copy if the input is not padded as libmfx requires */
        if (picref->height & 31 || picref->linesize[0] & 31) {
            qsv_frame->frame = ff_get_video_buffer(inlink,
                                                   FFALIGN(inlink->w, 32),
                                                   FFALIGN(inlink->h, 32));
            if (!qsv_frame->frame)
                return NULL;

            qsv_frame->frame->width   = picref->width;
            qsv_frame->frame->height  = picref->height;

            if (av_frame_copy(qsv_frame->frame, picref) < 0) {
                av_frame_free(&qsv_frame->frame);
                return NULL;
            }

            av_frame_copy_props(qsv_frame->frame, picref);
            av_frame_free(&picref);
        } else
            qsv_frame->frame = av_frame_clone(picref);

        if (map_frame_to_surface(qsv_frame->frame,
                                &qsv_frame->surface_internal) < 0) {
            av_log(ctx, AV_LOG_ERROR, "Unsupported frame.\n");
            return NULL;
        }
        qsv_frame->surface = &qsv_frame->surface_internal;
    }

    qsv_frame->surface->Info           = s->frame_infos[FF_INLINK_IDX(inlink)];
    qsv_frame->surface->Data.TimeStamp = av_rescale_q(qsv_frame->frame->pts,
                                                      inlink->time_base, default_tb);

    qsv_frame->surface->Info.PicStruct =
            !qsv_frame->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
            (qsv_frame->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
                                                 MFX_PICSTRUCT_FIELD_BFF);
    if (qsv_frame->frame->repeat_pict == 1)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
    else if (qsv_frame->frame->repeat_pict == 2)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
    else if (qsv_frame->frame->repeat_pict == 4)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;

    return qsv_frame;
}