Esempio n. 1
0
static int filter_units_filter(AVBSFContext *bsf, AVPacket *out)
{
    FilterUnitsContext      *ctx = bsf->priv_data;
    CodedBitstreamFragment *frag = &ctx->fragment;
    AVPacket *in = NULL;
    int err, i, j;

    while (1) {
        err = ff_bsf_get_packet(bsf, &in);
        if (err < 0)
            return err;

        if (ctx->mode == NOOP) {
            av_packet_move_ref(out, in);
            av_packet_free(&in);
            return 0;
        }

        err = ff_cbs_read_packet(ctx->cbc, frag, in);
        if (err < 0) {
            av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
            goto fail;
        }

        for (i = 0; i < frag->nb_units; i++) {
            for (j = 0; j < ctx->nb_types; j++) {
                if (frag->units[i].type == ctx->type_list[j])
                    break;
            }
            if (ctx->mode == REMOVE ? j <  ctx->nb_types
                                    : j >= ctx->nb_types) {
                ff_cbs_delete_unit(ctx->cbc, frag, i);
                --i;
            }
        }

        if (frag->nb_units > 0)
            break;

        // Don't return packets with nothing in them.
        av_packet_free(&in);
        ff_cbs_fragment_uninit(ctx->cbc, frag);
    }

    err = ff_cbs_write_packet(ctx->cbc, out, frag);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n");
        goto fail;
    }

    err = av_packet_copy_props(out, in);
    if (err < 0)
        goto fail;

fail:
    ff_cbs_fragment_uninit(ctx->cbc, frag);
    av_packet_free(&in);

    return err;
}
static int remove_extradata(AVBSFContext *ctx, AVPacket *out)
{
    RemoveExtradataContext *s = ctx->priv_data;

    AVPacket *in;
    int ret;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    if (s->parser && s->parser->parser->split) {
        if (s->freq == REMOVE_FREQ_ALL ||
            (s->freq == REMOVE_FREQ_NONKEYFRAME && !(in->flags & AV_PKT_FLAG_KEY)) ||
            (s->freq == REMOVE_FREQ_KEYFRAME && in->flags & AV_PKT_FLAG_KEY)) {
            int i = s->parser->parser->split(s->avctx, in->data, in->size);
            in->data += i;
            in->size -= i;
        }
    }

    av_packet_move_ref(out, in);
    av_packet_free(&in);

    return 0;
}
Esempio n. 3
0
static void ffmpeg_close_context(FREERDP_DSP_CONTEXT* context)
{
	if (context)
	{
		if (context->context)
			avcodec_free_context(&context->context);

		if (context->frame)
			av_frame_free(&context->frame);

		if (context->resampled)
			av_frame_free(&context->resampled);

		if (context->buffered)
			av_frame_free(&context->buffered);

		if (context->packet)
			av_packet_free(&context->packet);

		if (context->rcontext)
			avresample_free(&context->rcontext);

		context->id = AV_CODEC_ID_NONE;
		context->codec = NULL;
		context->isOpen = FALSE;
		context->context = NULL;
		context->frame = NULL;
		context->resampled = NULL;
		context->packet = NULL;
		context->rcontext = NULL;
	}
}
Esempio n. 4
0
static int extract_extradata_filter(AVBSFContext *ctx, AVPacket *out)
{
    ExtractExtradataContext *s = ctx->priv_data;
    AVPacket *in;
    uint8_t *extradata = NULL;
    int extradata_size;
    int ret = 0;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    ret = s->extract(ctx, in, &extradata, &extradata_size);
    if (ret < 0)
        goto fail;

    if (extradata) {
        ret = av_packet_add_side_data(in, AV_PKT_DATA_NEW_EXTRADATA,
                                      extradata, extradata_size);
        if (ret < 0) {
            av_freep(&extradata);
            goto fail;
        }
    }

    av_packet_move_ref(out, in);

fail:
    av_packet_free(&in);
    return ret;
}
Esempio n. 5
0
void StreamReceiver::ReceiveFrame(uint8_t *data)
{
	AVPacket *pkt = av_packet_alloc();
	av_init_packet(pkt);
	if (av_read_frame(avfmt, pkt) < 0)
		return;
	int got_picture;
	if (avcodec_decode_video2(avctx, avframe, &got_picture, pkt) < 0)
		return;
	av_packet_unref(pkt);
	av_packet_free(&pkt);
	if (got_picture == 0)
		return;

	int linesize_align[AV_NUM_DATA_POINTERS];
	avcodec_align_dimensions2(
			avctx, &avframe->width, &avframe->height, linesize_align);

	uint8_t *const dstSlice[] = { data };
	int dstStride[] = { width * 4 };
	if (data != nullptr)
		sws_scale(
				swctx,
				avframe->data, avframe->linesize,
				0, STREAM_HEIGHT,
				dstSlice, dstStride);
	av_frame_unref(avframe);
}
Esempio n. 6
0
FFMPEGWriter::~FFMPEGWriter()
{
	// writte missing data
	if(m_input_samples)
		encode();

	close();

	av_write_trailer(m_formatCtx);

	if(m_frame)
		av_frame_free(&m_frame);

	if(m_packet)
	{
#ifdef FFMPEG_OLD_CODE
		delete m_packet;
#else
		av_packet_free(&m_packet);
#endif
	}

#ifdef FFMPEG_OLD_CODE
	avcodec_close(m_codecCtx);
#else
	if(m_codecCtx)
		avcodec_free_context(&m_codecCtx);
#endif

	avio_closep(&m_formatCtx->pb);
	avformat_free_context(m_formatCtx);
}
static int h264_redundant_pps_filter(AVBSFContext *bsf, AVPacket *out)
{
    H264RedundantPPSContext *ctx = bsf->priv_data;
    AVPacket *in;
    CodedBitstreamFragment *au = &ctx->access_unit;
    int au_has_sps;
    int err, i;

    err = ff_bsf_get_packet(bsf, &in);
    if (err < 0)
        return err;

    err = ff_cbs_read_packet(ctx->input, au, in);
    if (err < 0)
        goto fail;

    au_has_sps = 0;
    for (i = 0; i < au->nb_units; i++) {
        CodedBitstreamUnit *nal = &au->units[i];

        if (nal->type == H264_NAL_SPS)
            au_has_sps = 1;
        if (nal->type == H264_NAL_PPS) {
            err = h264_redundant_pps_fixup_pps(ctx, nal->content);
            if (err < 0)
                goto fail;
            if (!au_has_sps) {
                av_log(bsf, AV_LOG_VERBOSE, "Deleting redundant PPS "
                       "at %"PRId64".\n", in->pts);
                err = ff_cbs_delete_unit(ctx->input, au, i);
                if (err < 0)
                    goto fail;
            }
        }
        if (nal->type == H264_NAL_SLICE ||
            nal->type == H264_NAL_IDR_SLICE) {
            H264RawSlice *slice = nal->content;
            h264_redundant_pps_fixup_slice(ctx, &slice->header);
        }
    }

    err = ff_cbs_write_packet(ctx->output, out, au);
    if (err < 0)
        goto fail;


    err = av_packet_copy_props(out, in);
    if (err < 0)
        goto fail;

    err = 0;
fail:
    ff_cbs_fragment_uninit(ctx->output, au);
    av_packet_free(&in);
    if (err < 0)
        av_packet_unref(out);

    return err;
}
Esempio n. 8
0
MediaPacket::~MediaPacket()
{
    if (shouldFreeAvCodecParameters) {
        if (pAvCodecPar_)
                avcodec_parameters_free(&pAvCodecPar_);
    }
    av_packet_free(&pAvPacket_);
}
FFmpegAudioReader::~FFmpegAudioReader() {
#if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(57, 24, 255)
    if (_currentPacket != nullptr) {
		av_packet_unref(_currentPacket);
		av_packet_free(&_currentPacket);
	}
#endif
}
Esempio n. 10
0
void ffaml_queue_clear(AVCodecContext *avctx, PacketQueue *queue)
{
  AVPacket *pkt;
  while ((pkt = ffaml_dequeue_packet(avctx, queue)))
  {
    av_packet_free(&pkt);
  }

}
Esempio n. 11
0
AVPacket *av_packet_clone(AVPacket *src)
{
    AVPacket *ret = av_packet_alloc();

    if (!ret)
        return ret;

    if (av_packet_ref(ret, src))
        av_packet_free(&ret);

    return ret;
}
Esempio n. 12
0
static int mjpeg2jpeg_filter(AVBSFContext *ctx, AVPacket *out)
{
    AVPacket *in;
    int ret = 0;
    int input_skip, output_size;
    uint8_t *output;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    if (in->size < 12) {
        av_log(ctx, AV_LOG_ERROR, "input is truncated\n");
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    if (AV_RB16(in->data) != 0xffd8) {
        av_log(ctx, AV_LOG_ERROR, "input is not MJPEG\n");
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    if (in->data[2] == 0xff && in->data[3] == APP0) {
        input_skip = (in->data[4] << 8) + in->data[5] + 4;
    } else {
        input_skip = 2;
    }
    if (in->size < input_skip) {
        av_log(ctx, AV_LOG_ERROR, "input is truncated\n");
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    output_size = in->size - input_skip +
                  sizeof(jpeg_header) + dht_segment_size;
    ret = av_new_packet(out, output_size);
    if (ret < 0)
        goto fail;

    output = out->data;

    output = append(output, jpeg_header, sizeof(jpeg_header));
    output = append_dht_segment(output);
    output = append(output, in->data + input_skip, in->size - input_skip);

    ret = av_packet_copy_props(out, in);
    if (ret < 0)
        goto fail;

fail:
    if (ret < 0)
        av_packet_unref(out);
    av_packet_free(&in);
    return ret;
}
Esempio n. 13
0
static int chomp_filter(AVBSFContext *ctx, AVPacket *out)
{
    AVPacket *in;
    int ret;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    while (in->size > 0 && !in->data[in->size - 1])
        in->size--;

    av_packet_move_ref(out, in);
    av_packet_free(&in);

    return 0;
}
Esempio n. 14
0
FormatContext::~FormatContext()
{
    if (formatCtx)
    {
        for (AVStream *stream : asConst(streams))
        {
            if (stream->codecpar && !streamNotValid(stream))
            {
                //Data is allocated in QByteArray, so FFmpeg mustn't free it!
                stream->codecpar->extradata = nullptr;
                stream->codecpar->extradata_size = 0;
            }
        }
        avformat_close_input(&formatCtx);
        av_packet_free(&packet);
    }
    delete oggHelper;
}
Esempio n. 15
0
static int trace_headers(AVBSFContext *bsf, AVPacket *out)
{
    TraceHeadersContext *ctx = bsf->priv_data;
    CodedBitstreamFragment au;
    AVPacket *in;
    char tmp[256] = { 0 };
    int err;

    err = ff_bsf_get_packet(bsf, &in);
    if (err < 0)
        return err;

    if (in->flags & AV_PKT_FLAG_KEY)
        av_strlcat(tmp, ", key frame", sizeof(tmp));
    if (in->flags & AV_PKT_FLAG_CORRUPT)
        av_strlcat(tmp, ", corrupt", sizeof(tmp));

    if (in->pts != AV_NOPTS_VALUE)
        av_strlcatf(tmp, sizeof(tmp), ", pts %"PRId64, in->pts);
    else
        av_strlcat(tmp, ", no pts", sizeof(tmp));
    if (in->dts != AV_NOPTS_VALUE)
        av_strlcatf(tmp, sizeof(tmp), ", dts %"PRId64, in->dts);
    else
        av_strlcat(tmp, ", no dts", sizeof(tmp));
    if (in->duration > 0)
        av_strlcatf(tmp, sizeof(tmp), ", duration %"PRId64, in->duration);

    av_log(bsf, AV_LOG_INFO, "Packet: %d bytes%s.\n", in->size, tmp);

    err = ff_cbs_read_packet(&ctx->cbc, &au, in);
    if (err < 0)
        return err;

    ff_cbs_fragment_uninit(&ctx->cbc, &au);

    av_packet_move_ref(out, in);
    av_packet_free(&in);

    return 0;
}
Esempio n. 16
0
struct mp_image *load_image_png_buf(void *buffer, size_t buffer_size, int imgfmt)
{
    const AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_PNG);
    if (!codec)
        return NULL;

    AVCodecContext *avctx = avcodec_alloc_context3(codec);
    if (!avctx)
        return NULL;

    if (avcodec_open2(avctx, codec, NULL) < 0) {
        avcodec_free_context(&avctx);
        return NULL;
    }

    AVPacket *pkt = av_packet_alloc();
    if (pkt) {
        if (av_new_packet(pkt, buffer_size) >= 0)
            memcpy(pkt->data, buffer, buffer_size);
    }

    // (There is only 1 outcome: either it takes it and decodes it, or not.)
    avcodec_send_packet(avctx, pkt);
    avcodec_send_packet(avctx, NULL);

    av_packet_free(&pkt);

    struct mp_image *res = NULL;
    AVFrame *frame = av_frame_alloc();
    if (frame && avcodec_receive_frame(avctx, frame) >= 0) {
        struct mp_image *r = mp_image_from_av_frame(frame);
        if (r)
            res = convert_image(r, imgfmt, mp_null_log);
        talloc_free(r);
    }
    av_frame_free(&frame);

    avcodec_free_context(&avctx);
    return res;
}
Esempio n. 17
0
static int dump_extradata(AVBSFContext *ctx, AVPacket *out)
{
    DumpExtradataContext *s = ctx->priv_data;
    AVPacket *in;
    int ret = 0;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    if (ctx->par_in->extradata &&
        (s->freq == DUMP_FREQ_ALL ||
         (s->freq == DUMP_FREQ_KEYFRAME && in->flags & AV_PKT_FLAG_KEY))) {
        if (in->size >= INT_MAX - ctx->par_in->extradata_size) {
            ret = AVERROR(ERANGE);
            goto fail;
        }

        ret = av_new_packet(out, in->size + ctx->par_in->extradata_size);
        if (ret < 0)
            goto fail;

        ret = av_packet_copy_props(out, in);
        if (ret < 0) {
            av_packet_unref(out);
            goto fail;
        }

        memcpy(out->data, ctx->par_in->extradata, ctx->par_in->extradata_size);
        memcpy(out->data + ctx->par_in->extradata_size, in->data, in->size);
    } else {
        av_packet_move_ref(out, in);
    }

fail:
    av_packet_free(&in);

    return ret;
}
Esempio n. 18
0
void av_bsf_free(AVBSFContext **pctx)
{
    AVBSFContext *ctx;

    if (!pctx || !*pctx)
        return;
    ctx = *pctx;

    if (ctx->filter->close)
        ctx->filter->close(ctx);
    if (ctx->filter->priv_class && ctx->priv_data)
        av_opt_free(ctx->priv_data);

    av_opt_free(ctx);

    av_packet_free(&ctx->internal->buffer_pkt);
    av_freep(&ctx->internal);
    av_freep(&ctx->priv_data);

    avcodec_parameters_free(&ctx->par_in);
    avcodec_parameters_free(&ctx->par_out);

    av_freep(pctx);
}
Esempio n. 19
0
static int mpeg4_unpack_bframes_filter(AVBSFContext *ctx, AVPacket *out)
{
    UnpackBFramesBSFContext *s = ctx->priv_data;
    int pos_p = -1, nb_vop = 0, pos_vop2 = -1, ret = 0;
    AVPacket *in;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    scan_buffer(in->data, in->size, &pos_p, &nb_vop, &pos_vop2);
    av_log(ctx, AV_LOG_DEBUG, "Found %d VOP startcode(s) in this packet.\n", nb_vop);

    if (pos_vop2 >= 0) {
        if (s->b_frame_buf) {
            av_log(ctx, AV_LOG_WARNING,
                   "Missing one N-VOP packet, discarding one B-frame.\n");
            av_freep(&s->b_frame_buf);
            s->b_frame_buf_size = 0;
        }
        /* store the packed B-frame in the BSFContext */
        s->b_frame_buf_size = in->size - pos_vop2;
        s->b_frame_buf      = create_new_buffer(in->data + pos_vop2, s->b_frame_buf_size);
        if (!s->b_frame_buf) {
            s->b_frame_buf_size = 0;
            av_packet_free(&in);
            return AVERROR(ENOMEM);
        }
    }

    if (nb_vop > 2) {
        av_log(ctx, AV_LOG_WARNING,
       "Found %d VOP headers in one packet, only unpacking one.\n", nb_vop);
    }

    if (nb_vop == 1 && s->b_frame_buf) {
        /* use frame from BSFContext */
        ret = av_packet_copy_props(out, in);
        if (ret < 0) {
            av_packet_free(&in);
            return ret;
        }

        ret = av_packet_from_data(out, s->b_frame_buf, s->b_frame_buf_size);
        if (ret < 0) {
            av_packet_free(&in);
            return ret;
        }
        if (in->size <= MAX_NVOP_SIZE) {
            /* N-VOP */
            av_log(ctx, AV_LOG_DEBUG, "Skipping N-VOP.\n");
            s->b_frame_buf      = NULL;
            s->b_frame_buf_size = 0;
        } else {
            /* copy packet into BSFContext */
            s->b_frame_buf_size = in->size;
            s->b_frame_buf      = create_new_buffer(in->data, in->size);
            if (!s->b_frame_buf) {
                s->b_frame_buf_size = 0;
                av_packet_unref(out);
                av_packet_free(&in);
                return AVERROR(ENOMEM);
            }
        }
    } else if (nb_vop >= 2) {
        /* use first frame of the packet */
        av_packet_move_ref(out, in);
        out->size = pos_vop2;
    } else if (pos_p >= 0) {
        av_log(ctx, AV_LOG_DEBUG, "Updating DivX userdata (remove trailing 'p').\n");
        av_packet_move_ref(out, in);
        /* remove 'p' (packed) from the end of the (DivX) userdata string */
        out->data[pos_p] = '\0';
    } else {
        /* copy packet */
        av_packet_move_ref(out, in);
    }

    av_packet_free(&in);

    return 0;
}
Esempio n. 20
0
std::shared_ptr<AVPacket> alloc_packet()
{
    const auto packet = std::shared_ptr<AVPacket>(av_packet_alloc(), [](AVPacket* ptr) { av_packet_free(&ptr); });
    if (!packet)
        FF_RET(AVERROR(ENOMEM), "av_packet_alloc");
    return packet;
}
Esempio n. 21
0
static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *out)
{
    H265MetadataContext *ctx = bsf->priv_data;
    AVPacket *in = NULL;
    CodedBitstreamFragment *au = &ctx->access_unit;
    int err, i;

    err = ff_bsf_get_packet(bsf, &in);
    if (err < 0)
        goto fail;

    err = ff_cbs_read_packet(ctx->cbc, au, in);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
        goto fail;
    }

    if (au->nb_units == 0) {
        av_log(bsf, AV_LOG_ERROR, "No NAL units in packet.\n");
        err = AVERROR_INVALIDDATA;
        goto fail;
    }

    // If an AUD is present, it must be the first NAL unit.
    if (au->units[0].type == HEVC_NAL_AUD) {
        if (ctx->aud == REMOVE)
            ff_cbs_delete_unit(ctx->cbc, au, 0);
    } else {
        if (ctx->aud == INSERT) {
            H265RawAUD *aud = &ctx->aud_nal;
            int pic_type = 0, temporal_id = 8, layer_id = 0;

            for (i = 0; i < au->nb_units; i++) {
                const H265RawNALUnitHeader *nal = au->units[i].content;
                if (!nal)
                    continue;
                if (nal->nuh_temporal_id_plus1 < temporal_id + 1)
                    temporal_id = nal->nuh_temporal_id_plus1 - 1;

                if (au->units[i].type <= HEVC_NAL_RSV_VCL31) {
                    const H265RawSlice *slice = au->units[i].content;
                    layer_id = nal->nuh_layer_id;
                    if (slice->header.slice_type == HEVC_SLICE_B &&
                        pic_type < 2)
                        pic_type = 2;
                    if (slice->header.slice_type == HEVC_SLICE_P &&
                        pic_type < 1)
                        pic_type = 1;
                }
            }

            aud->nal_unit_header = (H265RawNALUnitHeader) {
                .nal_unit_type         = HEVC_NAL_AUD,
                .nuh_layer_id          = layer_id,
                .nuh_temporal_id_plus1 = temporal_id + 1,
            };
            aud->pic_type = pic_type;

            err = ff_cbs_insert_unit_content(ctx->cbc, au,
                                             0, HEVC_NAL_AUD, aud, NULL);
            if (err) {
                av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n");
                goto fail;
            }
        }
    }

    for (i = 0; i < au->nb_units; i++) {
        if (au->units[i].type == HEVC_NAL_VPS) {
            err = h265_metadata_update_vps(bsf, au->units[i].content);
            if (err < 0)
                goto fail;
        }
        if (au->units[i].type == HEVC_NAL_SPS) {
            err = h265_metadata_update_sps(bsf, au->units[i].content);
            if (err < 0)
                goto fail;
        }
    }

    err = ff_cbs_write_packet(ctx->cbc, out, au);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n");
        goto fail;
    }

    err = av_packet_copy_props(out, in);
    if (err < 0)
        goto fail;

    err = 0;
fail:
    ff_cbs_fragment_uninit(ctx->cbc, au);

    av_packet_free(&in);

    return err;
}
Esempio n. 22
0
int main(int argc, char **argv)
{
    const char *filename;
    const AVCodec *codec;
    AVCodecContext *c = NULL;
    AVFrame *frame;
    AVPacket *pkt;
    int i, j, k, ret;
    FILE *f;
    uint16_t *samples;
    float t, tincr;

    if (argc <= 1) {
        fprintf(stderr, "Usage: %s <output file>\n", argv[0]);
        return 0;
    }
    filename = argv[1];

    /* find the MP2 encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    c->sample_fmt = AV_SAMPLE_FMT_S16;
    if (!check_sample_fmt(codec, c->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
                av_get_sample_fmt_name(c->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    c->sample_rate    = select_sample_rate(codec);
    c->channel_layout = select_channel_layout(codec);
    c->channels       = av_get_channel_layout_nb_channels(c->channel_layout);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* packet for holding encoded output */
    pkt = av_packet_alloc();
    if (!pkt) {
        fprintf(stderr, "could not allocate the packet\n");
        exit(1);
    }

    /* frame containing input raw audio */
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }

    frame->nb_samples     = c->frame_size;
    frame->format         = c->sample_fmt;
    frame->channel_layout = c->channel_layout;

    /* allocate the data buffers */
    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate audio data buffers\n");
        exit(1);
    }

    /* encode a single tone sound */
    t = 0;
    tincr = 2 * M_PI * 440.0 / c->sample_rate;
    for (i = 0; i < 200; i++) {
        /* make sure the frame is writable -- makes a copy if the encoder
         * kept a reference internally */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);
        samples = (uint16_t*)frame->data[0];

        for (j = 0; j < c->frame_size; j++) {
            samples[2 * j] = (int)(sin(t) * 10000);

            for (k = 1; k < c->channels; k++)
                samples[2 * j + k] = samples[2 * j];
            t += tincr;
        }
        encode(c, frame, pkt, f);
    }

    /* flush the encoder */
    encode(c, NULL, pkt, f);

    fclose(f);

    av_frame_free(&frame);
    av_packet_free(&pkt);
    avcodec_free_context(&c);

    return 0;
}
Esempio n. 23
0
Packet::~Packet()
{
  DeleteMediaType(pmt);
  av_packet_free(&m_Packet);
}
Esempio n. 24
0
static void vp9_raw_reorder_frame_free(VP9RawReorderFrame **frame)
{
    if (*frame)
        av_packet_free(&(*frame)->packet);
    av_freep(frame);
}
Esempio n. 25
0
static int vp9_raw_reorder_filter(AVBSFContext *bsf, AVPacket *out)
{
    VP9RawReorderContext *ctx = bsf->priv_data;
    VP9RawReorderFrame *frame;
    AVPacket *in;
    int err, s;

    if (ctx->next_frame) {
        frame = ctx->next_frame;

    } else {
        err = ff_bsf_get_packet(bsf, &in);
        if (err < 0) {
            if (err == AVERROR_EOF)
                return vp9_raw_reorder_make_output(bsf, out, NULL);
            return err;
        }

        if (in->data[in->size - 1] & 0xe0 == 0xc0) {
            av_log(bsf, AV_LOG_ERROR, "Input in superframes is not "
                   "supported.\n");
            av_packet_free(&in);
            return AVERROR(ENOSYS);
        }

        frame = av_mallocz(sizeof(*frame));
        if (!frame) {
            av_packet_free(&in);
            return AVERROR(ENOMEM);
        }

        frame->packet   = in;
        frame->pts      = in->pts;
        frame->sequence = ++ctx->sequence;
        err = vp9_raw_reorder_frame_parse(bsf, frame);
        if (err) {
            av_log(bsf, AV_LOG_ERROR, "Failed to parse input "
                   "frame: %d.\n", err);
            goto fail;
        }

        frame->needs_output  = 1;
        frame->needs_display = frame->pts != AV_NOPTS_VALUE;

        if (frame->show_existing_frame)
            av_log(bsf, AV_LOG_DEBUG, "Show frame %"PRId64" "
                   "(%"PRId64"): show %u.\n", frame->sequence,
                   frame->pts, frame->frame_to_show);
        else
            av_log(bsf, AV_LOG_DEBUG, "New frame %"PRId64" "
                   "(%"PRId64"): type %u show %u refresh %02x.\n",
                   frame->sequence, frame->pts, frame->frame_type,
                   frame->show_frame, frame->refresh_frame_flags);

        ctx->next_frame = frame;
    }

    for (s = 0; s < FRAME_SLOTS; s++) {
        if (!(frame->refresh_frame_flags & (1 << s)))
            continue;
        if (ctx->slot[s] && ctx->slot[s]->needs_display &&
            ctx->slot[s]->slots == (1 << s)) {
            // We are overwriting this slot, which is last reference
            // to the frame previously present in it.  In order to be
            // a valid stream, that frame must already have been
            // displayed before the pts of the current frame.
            err = vp9_raw_reorder_make_output(bsf, out, ctx->slot[s]);
            if (err < 0) {
                av_log(bsf, AV_LOG_ERROR, "Failed to create "
                       "output overwriting slot %d: %d.\n",
                       s, err);
                // Clear the slot anyway, so we don't end up
                // in an infinite loop.
                vp9_raw_reorder_clear_slot(ctx, s);
                return AVERROR_INVALIDDATA;
            }
            return 0;
        }
        vp9_raw_reorder_clear_slot(ctx, s);
    }

    for (s = 0; s < FRAME_SLOTS; s++) {
        if (!(frame->refresh_frame_flags & (1 << s)))
            continue;
        ctx->slot[s] = frame;
    }
    frame->slots = frame->refresh_frame_flags;

    if (!frame->refresh_frame_flags) {
        err = vp9_raw_reorder_make_output(bsf, out, frame);
        if (err < 0) {
            av_log(bsf, AV_LOG_ERROR, "Failed to create output "
                   "for transient frame.\n");
            ctx->next_frame = NULL;
            return AVERROR_INVALIDDATA;
        }
        if (!frame->needs_display) {
            vp9_raw_reorder_frame_free(&frame);
            ctx->next_frame = NULL;
        }
        return 0;
    }

    ctx->next_frame = NULL;
    return AVERROR(EAGAIN);

fail:
    vp9_raw_reorder_frame_free(&frame);
    return err;
}
Esempio n. 26
0
int h265_encode(struct videnc_state *st, bool update,
		const struct vidframe *frame, uint64_t timestamp)
{
	AVFrame *pict = NULL;
	AVPacket *pkt = NULL;
	uint64_t rtp_ts;
	int i, ret, got_packet = 0, err = 0;

	if (!st || !frame)
		return EINVAL;

	if (!st->ctx || !vidsz_cmp(&st->size, &frame->size) ||
	    st->fmt != frame->fmt) {

		enum AVPixelFormat pix_fmt;

		pix_fmt = vidfmt_to_avpixfmt(frame->fmt);
		if (pix_fmt == AV_PIX_FMT_NONE) {
			warning("h265: encode: pixel format not supported"
				" (%s)\n", vidfmt_name(frame->fmt));
			return ENOTSUP;
		}

		debug("h265: encoder: reset %u x %u (%s)\n",
		      frame->size.w, frame->size.h, vidfmt_name(frame->fmt));

		err = open_encoder(st, &frame->size, pix_fmt);
		if (err)
			return err;

		st->size = frame->size;
		st->fmt = frame->fmt;
	}

	pict = av_frame_alloc();
	if (!pict) {
		err = ENOMEM;
		goto out;
	}

	pict->format = st->ctx->pix_fmt;
	pict->width = frame->size.w;
	pict->height = frame->size.h;
	pict->pts = timestamp;

	for (i=0; i<4; i++) {
		pict->data[i]     = frame->data[i];
		pict->linesize[i] = frame->linesize[i];
	}

	if (update) {
		debug("h265: encoder picture update\n");
		pict->key_frame = 1;
		pict->pict_type = AV_PICTURE_TYPE_I;
	}

#if LIBAVUTIL_VERSION_MAJOR >= 55
	pict->color_range = AVCOL_RANGE_MPEG;
#endif

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 37, 100)

	pkt = av_packet_alloc();
	if (!pkt) {
		err = ENOMEM;
		goto out;
	}

	ret = avcodec_send_frame(st->ctx, pict);
	if (ret < 0) {
		err = EBADMSG;
		goto out;
	}

	/* NOTE: packet contains 4-byte startcode */
	ret = avcodec_receive_packet(st->ctx, pkt);
	if (ret < 0) {
		info("h265: no packet yet ..\n");
		err = 0;
		goto out;
	}

	got_packet = 1;
#else
	pkt = av_malloc(sizeof(*pkt));
	if (!pkt) {
		err = ENOMEM;
		goto out;
	}

	av_init_packet(pkt);
	av_new_packet(pkt, 65536);

	ret = avcodec_encode_video2(st->ctx, pkt, pict, &got_packet);
	if (ret < 0) {
		err = EBADMSG;
		goto out;
	}
#endif

	if (!got_packet)
		goto out;

	rtp_ts = video_calc_rtp_timestamp_fix(pkt->dts);

	err = packetize_annexb(rtp_ts, pkt->data, pkt->size,
			       st->pktsize, st->pkth, st->arg);
	if (err)
		goto out;

 out:
	if (pict)
		av_free(pict);
	if (pkt)
		av_packet_free(&pkt);

	return err;
}
Esempio n. 27
0
static int h264_mp4toannexb_filter(AVBSFContext *ctx, AVPacket *out)
{
    H264BSFContext *s = ctx->priv_data;

    AVPacket *in;
    uint8_t unit_type;
    int32_t nal_size;
    uint32_t cumul_size    = 0;
    const uint8_t *buf;
    const uint8_t *buf_end;
    int            buf_size;
    int ret = 0, i;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    /* nothing to filter */
    if (!s->extradata_parsed) {
        av_packet_move_ref(out, in);
        av_packet_free(&in);
        return 0;
    }

    buf      = in->data;
    buf_size = in->size;
    buf_end  = in->data + in->size;

    do {
        ret= AVERROR(EINVAL);
        if (buf + s->length_size > buf_end)
            goto fail;

        for (nal_size = 0, i = 0; i<s->length_size; i++)
            nal_size = (nal_size << 8) | buf[i];

        buf += s->length_size;
        unit_type = *buf & 0x1f;

        if (nal_size > buf_end - buf || nal_size < 0)
            goto fail;

        if (unit_type == 7)
            s->idr_sps_seen = s->new_idr = 1;
        else if (unit_type == 8) {
            s->idr_pps_seen = s->new_idr = 1;
            /* if SPS has not been seen yet, prepend the AVCC one to PPS */
            if (!s->idr_sps_seen) {
                if (s->sps_offset == -1)
                    av_log(ctx, AV_LOG_WARNING, "SPS not present in the stream, nor in AVCC, stream may be unreadable\n");
                else {
                    if ((ret = alloc_and_copy(out,
                                         ctx->par_out->extradata + s->sps_offset,
                                         s->pps_offset != -1 ? s->pps_offset : ctx->par_out->extradata_size - s->sps_offset,
                                         buf, nal_size)) < 0)
                        goto fail;
                    s->idr_sps_seen = 1;
                    goto next_nal;
                }
            }
        }

        /* if this is a new IDR picture following an IDR picture, reset the idr flag.
         * Just check first_mb_in_slice to be 0 as this is the simplest solution.
         * This could be checking idr_pic_id instead, but would complexify the parsing. */
        if (!s->new_idr && unit_type == 5 && (buf[1] & 0x80))
            s->new_idr = 1;

        /* prepend only to the first type 5 NAL unit of an IDR picture, if no sps/pps are already present */
        if (s->new_idr && unit_type == 5 && !s->idr_sps_seen && !s->idr_pps_seen) {
            if ((ret=alloc_and_copy(out,
                               ctx->par_out->extradata, ctx->par_out->extradata_size,
                               buf, nal_size)) < 0)
                goto fail;
            s->new_idr = 0;
        /* if only SPS has been seen, also insert PPS */
        } else if (s->new_idr && unit_type == 5 && s->idr_sps_seen && !s->idr_pps_seen) {
            if (s->pps_offset == -1) {
                av_log(ctx, AV_LOG_WARNING, "PPS not present in the stream, nor in AVCC, stream may be unreadable\n");
                if ((ret = alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
                    goto fail;
            } else if ((ret = alloc_and_copy(out,
                                        ctx->par_out->extradata + s->pps_offset, ctx->par_out->extradata_size - s->pps_offset,
                                        buf, nal_size)) < 0)
                goto fail;
        } else {
            if ((ret=alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
                goto fail;
            if (!s->new_idr && unit_type == 1) {
                s->new_idr = 1;
                s->idr_sps_seen = 0;
                s->idr_pps_seen = 0;
            }
        }

next_nal:
        buf        += nal_size;
        cumul_size += nal_size + s->length_size;
    } while (cumul_size < buf_size);

    ret = av_packet_copy_props(out, in);
    if (ret < 0)
        goto fail;

fail:
    if (ret < 0)
        av_packet_unref(out);
    av_packet_free(&in);

    return ret;
}
Esempio n. 28
0
int main(int argc, char **argv)
{
    const char *filename, *outfilename;
    const AVCodec *codec;
    AVCodecParserContext *parser;
    AVCodecContext *c= NULL;
    FILE *f;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
    uint8_t *data;
    size_t   data_size;
    int ret;
    AVPacket *pkt;

    if (argc <= 2) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(0);
    }
    filename    = argv[1];
    outfilename = argv[2];

    avcodec_register_all();

    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
    memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    /* find the MPEG-1 video decoder */
    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    parser = av_parser_init(codec->id);
    if (!parser) {
        fprintf(stderr, "parser not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture = av_frame_alloc();

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    while (!feof(f)) {
        /* read raw data from the input file */
        data_size = fread(inbuf, 1, INBUF_SIZE, f);
        if (!data_size)
            break;

        /* use the parser to split the data into frames */
        data = inbuf;
        while (data_size > 0) {
            ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
                                   data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
            if (ret < 0) {
                fprintf(stderr, "Error while parsing\n");
                exit(1);
            }
            data      += ret;
            data_size -= ret;

            if (pkt->size)
                decode(c, picture, pkt, outfilename);
        }
    }

    /* flush the decoder */
    decode(c, picture, NULL, outfilename);

    fclose(f);

    av_parser_close(parser);
    avcodec_free_context(&c);
    av_frame_free(&picture);
    av_packet_free(&pkt);

    return 0;
}
bool FFmpegAudioReader::readFrame(AVFrame* decode_frame) {
	// FFmpeg 3.1 and onwards has a new, better packet handling API but that version is relatively new so we still need
	// to support the earlier API. The compatibility code can be removed once the new version is supported by most platforms

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Process pending frames
	auto pending_res = avcodec_receive_frame(_codec_ctx, decode_frame);

	if (pending_res == 0) {
		// Got a frame
		return true;
	}
	if (pending_res == AVERROR_EOF) {
		// No more frames available
		return false;
	}

	if (pending_res != AVERROR(EAGAIN)) {
		// Unknown error.
		return false;
	}
#else
	if (_currentPacket != nullptr) {
		// Got some data left
		int finishedFrame = 0;
		auto bytes_read = avcodec_decode_audio4(_codec_ctx, decode_frame, &finishedFrame, _currentPacket);

		if (bytes_read < 0) {
			// Error!
			return false;
		}

		_currentPacket->data += bytes_read;
		_currentPacket->size -= bytes_read;

		if (_currentPacket->size <= 0) {
			// Done with this packet
			av_packet_free(&_currentPacket);
		}

		if (finishedFrame) {
			return true;
		}
	}
#endif

	AVPacket packet;
	while (av_read_frame(_format_ctx, &packet) >= 0) {
		AVPacketScope packet_scope(&packet);
		if (packet.stream_index == _stream_idx) {
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
			auto res = avcodec_send_packet(_codec_ctx, &packet);

			if (res != 0) {
				// Error or EOF
				return false;
			}

			res = avcodec_receive_frame(_codec_ctx, decode_frame);

			if (res == 0) {
				// Got a frame
				return true;
			}
			if (res == AVERROR_EOF) {
				// No more frames available
				return false;
			}

			if (res != AVERROR(EAGAIN)) {
				// Unknown error.
				return false;
			}
			// EGAIN was returned, send new input
#else
			int finishedFrame = 0;
			auto bytes_read = avcodec_decode_audio4(_codec_ctx, decode_frame, &finishedFrame, &packet);

			if (bytes_read < packet.size) {
				// Not all data was read
				packet.data += bytes_read;
				packet.size -= bytes_read;

				_currentPacket = av_packet_clone(&packet);
			}

			if (finishedFrame) {
				return true;
			}

			if (bytes_read < 0) {
				// Error
				return false;
			}
#endif
		}
	}

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Flush decoder
	if (avcodec_send_packet(_codec_ctx, nullptr) != 0) {
		return false;
	}

	auto flush_res = avcodec_receive_frame(_codec_ctx, decode_frame);

	if (flush_res == 0) {
		// Got a frame
		return true;
	}
#else
	AVPacket nullPacket;
	memset(&nullPacket, 0, sizeof(nullPacket));
	nullPacket.data = nullptr;
	nullPacket.size = 0;

	int finishedFrame = 1;
	auto err = avcodec_decode_audio4(_codec_ctx, decode_frame, &finishedFrame, &nullPacket);

	if (finishedFrame && err >= 0) {
		return true;
	}
#endif

	// If we are here then read_frame reached the end or returned an error
	return false;
}
Esempio n. 30
0
static int h264_mp4toannexb_filter(AVBSFContext *ctx, AVPacket *out)
{
    H264BSFContext *s = ctx->priv_data;

    AVPacket *in;
    uint8_t unit_type;
    int32_t nal_size;
    uint32_t cumul_size    = 0;
    const uint8_t *buf;
    const uint8_t *buf_end;
    int            buf_size;
    int ret = 0;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    /* nothing to filter */
    if (!s->extradata_parsed) {
        av_packet_move_ref(out, in);
        av_packet_free(&in);
        return 0;
    }

    buf      = in->data;
    buf_size = in->size;
    buf_end  = in->data + in->size;

    do {
        if (buf + s->length_size > buf_end)
            goto fail;

        if (s->length_size == 1) {
            nal_size = buf[0];
        } else if (s->length_size == 2) {
            nal_size = AV_RB16(buf);
        } else
            nal_size = AV_RB32(buf);

        buf += s->length_size;
        unit_type = *buf & 0x1f;

        if (buf + nal_size > buf_end || nal_size < 0)
            goto fail;

        /* prepend only to the first type 5 NAL unit of an IDR picture */
        if (s->first_idr && unit_type == 5) {
            if (alloc_and_copy(out,
                               ctx->par_out->extradata, ctx->par_out->extradata_size,
                               buf, nal_size) < 0)
                goto fail;
            s->first_idr = 0;
        } else {
            if (alloc_and_copy(out,
                               NULL, 0, buf, nal_size) < 0)
                goto fail;
            if (!s->first_idr && unit_type == 1)
                s->first_idr = 1;
        }

        buf        += nal_size;
        cumul_size += nal_size + s->length_size;
    } while (cumul_size < buf_size);

    ret = av_packet_copy_props(out, in);
    if (ret < 0)
        goto fail;

fail:
    if (ret < 0)
        av_packet_unref(out);
    av_packet_free(&in);

    return ret;
}