示例#1
0
int av_packet_ref(AVPacket *dst, const AVPacket *src)
{
    int ret;

    ret = av_packet_copy_props(dst, src);
    if (ret < 0)
        return ret;

    if (!src->buf) {
        ret = packet_alloc(&dst->buf, src->size);
        if (ret < 0)
            goto fail;
        if (src->size)
            memcpy(dst->buf->data, src->data, src->size);

        dst->data = dst->buf->data;
    } else {
        dst->buf = av_buffer_ref(src->buf);
        if (!dst->buf) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        dst->data = src->data;
    }

    dst->size = src->size;

    return 0;
fail:
    av_packet_free_side_data(dst);
    return ret;
}
示例#2
0
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
{
    av_packet_unref(avci->last_pkt_props);
    if (pkt)
        return av_packet_copy_props(avci->last_pkt_props, pkt);
    return 0;
}
示例#3
0
static int filter_units_filter(AVBSFContext *bsf, AVPacket *out)
{
    FilterUnitsContext      *ctx = bsf->priv_data;
    CodedBitstreamFragment *frag = &ctx->fragment;
    AVPacket *in = NULL;
    int err, i, j;

    while (1) {
        err = ff_bsf_get_packet(bsf, &in);
        if (err < 0)
            return err;

        if (ctx->mode == NOOP) {
            av_packet_move_ref(out, in);
            av_packet_free(&in);
            return 0;
        }

        err = ff_cbs_read_packet(ctx->cbc, frag, in);
        if (err < 0) {
            av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
            goto fail;
        }

        for (i = 0; i < frag->nb_units; i++) {
            for (j = 0; j < ctx->nb_types; j++) {
                if (frag->units[i].type == ctx->type_list[j])
                    break;
            }
            if (ctx->mode == REMOVE ? j <  ctx->nb_types
                                    : j >= ctx->nb_types) {
                ff_cbs_delete_unit(ctx->cbc, frag, i);
                --i;
            }
        }

        if (frag->nb_units > 0)
            break;

        // Don't return packets with nothing in them.
        av_packet_free(&in);
        ff_cbs_fragment_uninit(ctx->cbc, frag);
    }

    err = ff_cbs_write_packet(ctx->cbc, out, frag);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n");
        goto fail;
    }

    err = av_packet_copy_props(out, in);
    if (err < 0)
        goto fail;

fail:
    ff_cbs_fragment_uninit(ctx->cbc, frag);
    av_packet_free(&in);

    return err;
}
static int h264_redundant_pps_filter(AVBSFContext *bsf, AVPacket *out)
{
    H264RedundantPPSContext *ctx = bsf->priv_data;
    AVPacket *in;
    CodedBitstreamFragment *au = &ctx->access_unit;
    int au_has_sps;
    int err, i;

    err = ff_bsf_get_packet(bsf, &in);
    if (err < 0)
        return err;

    err = ff_cbs_read_packet(ctx->input, au, in);
    if (err < 0)
        goto fail;

    au_has_sps = 0;
    for (i = 0; i < au->nb_units; i++) {
        CodedBitstreamUnit *nal = &au->units[i];

        if (nal->type == H264_NAL_SPS)
            au_has_sps = 1;
        if (nal->type == H264_NAL_PPS) {
            err = h264_redundant_pps_fixup_pps(ctx, nal->content);
            if (err < 0)
                goto fail;
            if (!au_has_sps) {
                av_log(bsf, AV_LOG_VERBOSE, "Deleting redundant PPS "
                       "at %"PRId64".\n", in->pts);
                err = ff_cbs_delete_unit(ctx->input, au, i);
                if (err < 0)
                    goto fail;
            }
        }
        if (nal->type == H264_NAL_SLICE ||
            nal->type == H264_NAL_IDR_SLICE) {
            H264RawSlice *slice = nal->content;
            h264_redundant_pps_fixup_slice(ctx, &slice->header);
        }
    }

    err = ff_cbs_write_packet(ctx->output, out, au);
    if (err < 0)
        goto fail;


    err = av_packet_copy_props(out, in);
    if (err < 0)
        goto fail;

    err = 0;
fail:
    ff_cbs_fragment_uninit(ctx->output, au);
    av_packet_free(&in);
    if (err < 0)
        av_packet_unref(out);

    return err;
}
示例#5
0
static int mjpeg2jpeg_filter(AVBSFContext *ctx, AVPacket *out)
{
    AVPacket *in;
    int ret = 0;
    int input_skip, output_size;
    uint8_t *output;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    if (in->size < 12) {
        av_log(ctx, AV_LOG_ERROR, "input is truncated\n");
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    if (AV_RB16(in->data) != 0xffd8) {
        av_log(ctx, AV_LOG_ERROR, "input is not MJPEG\n");
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    if (in->data[2] == 0xff && in->data[3] == APP0) {
        input_skip = (in->data[4] << 8) + in->data[5] + 4;
    } else {
        input_skip = 2;
    }
    if (in->size < input_skip) {
        av_log(ctx, AV_LOG_ERROR, "input is truncated\n");
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    output_size = in->size - input_skip +
                  sizeof(jpeg_header) + dht_segment_size;
    ret = av_new_packet(out, output_size);
    if (ret < 0)
        goto fail;

    output = out->data;

    output = append(output, jpeg_header, sizeof(jpeg_header));
    output = append_dht_segment(output);
    output = append(output, in->data + input_skip, in->size - input_skip);

    ret = av_packet_copy_props(out, in);
    if (ret < 0)
        goto fail;

fail:
    if (ret < 0)
        av_packet_unref(out);
    av_packet_free(&in);
    return ret;
}
int lw_copy_av_packet
(
    AVPacket *dst,
    AVPacket *src
)
{
#if LIBAVUTIL_VERSION_MICRO >= 100
    return av_copy_packet( dst, src );
#else
    int ret;
    if( (ret = av_new_packet( dst, src->size )) != 0
     || (ret = av_packet_copy_props( dst, src )) != 0 )
    {
        av_packet_unref( dst );
        return ret;
    }
    memcpy( dst->data, src->data, src->size );
    return 0;
#endif
}
示例#7
0
static int dump_extradata(AVBSFContext *ctx, AVPacket *out)
{
    DumpExtradataContext *s = ctx->priv_data;
    AVPacket *in;
    int ret = 0;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    if (ctx->par_in->extradata &&
        (s->freq == DUMP_FREQ_ALL ||
         (s->freq == DUMP_FREQ_KEYFRAME && in->flags & AV_PKT_FLAG_KEY))) {
        if (in->size >= INT_MAX - ctx->par_in->extradata_size) {
            ret = AVERROR(ERANGE);
            goto fail;
        }

        ret = av_new_packet(out, in->size + ctx->par_in->extradata_size);
        if (ret < 0)
            goto fail;

        ret = av_packet_copy_props(out, in);
        if (ret < 0) {
            av_packet_unref(out);
            goto fail;
        }

        memcpy(out->data, ctx->par_in->extradata, ctx->par_in->extradata_size);
        memcpy(out->data + ctx->par_in->extradata_size, in->data, in->size);
    } else {
        av_packet_move_ref(out, in);
    }

fail:
    av_packet_free(&in);

    return ret;
}
示例#8
0
int av_packet_ref(AVPacket *dst, AVPacket *src)
{
    int ret;

    ret = av_packet_copy_props(dst, src);
    if (ret < 0)
        return ret;

    if (!src->buf) {
        ret = packet_alloc(&dst->buf, src->size);
        if (ret < 0)
            goto fail;
        memcpy(dst->buf->data, src->data, src->size);
    } else
        dst->buf = av_buffer_ref(src->buf);

    dst->size = src->size;
    dst->data = dst->buf->data;
    return 0;
fail:
    av_packet_free_side_data(dst);
    return ret;
}
示例#9
0
static int h265_metadata_filter(AVBSFContext *bsf, AVPacket *out)
{
    H265MetadataContext *ctx = bsf->priv_data;
    AVPacket *in = NULL;
    CodedBitstreamFragment *au = &ctx->access_unit;
    int err, i;

    err = ff_bsf_get_packet(bsf, &in);
    if (err < 0)
        goto fail;

    err = ff_cbs_read_packet(ctx->cbc, au, in);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
        goto fail;
    }

    if (au->nb_units == 0) {
        av_log(bsf, AV_LOG_ERROR, "No NAL units in packet.\n");
        err = AVERROR_INVALIDDATA;
        goto fail;
    }

    // If an AUD is present, it must be the first NAL unit.
    if (au->units[0].type == HEVC_NAL_AUD) {
        if (ctx->aud == REMOVE)
            ff_cbs_delete_unit(ctx->cbc, au, 0);
    } else {
        if (ctx->aud == INSERT) {
            H265RawAUD *aud = &ctx->aud_nal;
            int pic_type = 0, temporal_id = 8, layer_id = 0;

            for (i = 0; i < au->nb_units; i++) {
                const H265RawNALUnitHeader *nal = au->units[i].content;
                if (!nal)
                    continue;
                if (nal->nuh_temporal_id_plus1 < temporal_id + 1)
                    temporal_id = nal->nuh_temporal_id_plus1 - 1;

                if (au->units[i].type <= HEVC_NAL_RSV_VCL31) {
                    const H265RawSlice *slice = au->units[i].content;
                    layer_id = nal->nuh_layer_id;
                    if (slice->header.slice_type == HEVC_SLICE_B &&
                        pic_type < 2)
                        pic_type = 2;
                    if (slice->header.slice_type == HEVC_SLICE_P &&
                        pic_type < 1)
                        pic_type = 1;
                }
            }

            aud->nal_unit_header = (H265RawNALUnitHeader) {
                .nal_unit_type         = HEVC_NAL_AUD,
                .nuh_layer_id          = layer_id,
                .nuh_temporal_id_plus1 = temporal_id + 1,
            };
            aud->pic_type = pic_type;

            err = ff_cbs_insert_unit_content(ctx->cbc, au,
                                             0, HEVC_NAL_AUD, aud, NULL);
            if (err) {
                av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n");
                goto fail;
            }
        }
    }

    for (i = 0; i < au->nb_units; i++) {
        if (au->units[i].type == HEVC_NAL_VPS) {
            err = h265_metadata_update_vps(bsf, au->units[i].content);
            if (err < 0)
                goto fail;
        }
        if (au->units[i].type == HEVC_NAL_SPS) {
            err = h265_metadata_update_sps(bsf, au->units[i].content);
            if (err < 0)
                goto fail;
        }
    }

    err = ff_cbs_write_packet(ctx->cbc, out, au);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to write packet.\n");
        goto fail;
    }

    err = av_packet_copy_props(out, in);
    if (err < 0)
        goto fail;

    err = 0;
fail:
    ff_cbs_fragment_uninit(ctx->cbc, au);

    av_packet_free(&in);

    return err;
}
示例#10
0
文件: qsvenc.c 项目: randydom/FFmpeg
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q,
                  AVPacket *pkt, const AVFrame *frame, int *got_packet)
{
    AVPacket new_pkt = { 0 };
    mfxBitstream *bs;

    mfxFrameSurface1 *surf = NULL;
    mfxSyncPoint sync      = NULL;
    int ret;

    if (frame) {
        ret = submit_frame(q, frame, &surf);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error submitting the frame for encoding.\n");
            return ret;
        }
    }

    ret = av_new_packet(&new_pkt, q->packet_size);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating the output packet\n");
        return ret;
    }

    bs = av_mallocz(sizeof(*bs));
    if (!bs) {
        av_packet_unref(&new_pkt);
        return AVERROR(ENOMEM);
    }
    bs->Data      = new_pkt.data;
    bs->MaxLength = new_pkt.size;

    do {
        ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, bs, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY) {
            av_usleep(500);
            continue;
        }
        break;
    } while ( 1 );

    if (ret < 0) {
        av_packet_unref(&new_pkt);
        av_freep(&bs);
        if (ret == MFX_ERR_MORE_DATA)
            return 0;
        av_log(avctx, AV_LOG_ERROR, "EncodeFrameAsync returned %d\n", ret);
        return ff_qsv_error(ret);
    }

    if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM) {
        if (frame->interlaced_frame)
            print_interlace_msg(avctx, q);
        else
            av_log(avctx, AV_LOG_WARNING,
                   "EncodeFrameAsync returned 'incompatible param' code\n");
    }
    if (sync) {
        av_fifo_generic_write(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL);
        av_fifo_generic_write(q->async_fifo, &sync,    sizeof(sync),    NULL);
        av_fifo_generic_write(q->async_fifo, &bs,      sizeof(bs),    NULL);
    } else {
        av_packet_unref(&new_pkt);
        av_freep(&bs);
    }

    if (!av_fifo_space(q->async_fifo) ||
        (!frame && av_fifo_size(q->async_fifo))) {
        av_fifo_generic_read(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,    sizeof(sync),    NULL);
        av_fifo_generic_read(q->async_fifo, &bs,      sizeof(bs),      NULL);

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        new_pkt.dts  = av_rescale_q(bs->DecodeTimeStamp, (AVRational){1, 90000}, avctx->time_base);
        new_pkt.pts  = av_rescale_q(bs->TimeStamp,       (AVRational){1, 90000}, avctx->time_base);
        new_pkt.size = bs->DataLength;

        if (bs->FrameType & MFX_FRAMETYPE_IDR ||
            bs->FrameType & MFX_FRAMETYPE_xIDR)
            new_pkt.flags |= AV_PKT_FLAG_KEY;

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
        if (bs->FrameType & MFX_FRAMETYPE_I || bs->FrameType & MFX_FRAMETYPE_xI)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        else if (bs->FrameType & MFX_FRAMETYPE_P || bs->FrameType & MFX_FRAMETYPE_xP)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
        else if (bs->FrameType & MFX_FRAMETYPE_B || bs->FrameType & MFX_FRAMETYPE_xB)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

        av_freep(&bs);

        if (pkt->data) {
            if (pkt->size < new_pkt.size) {
                av_log(avctx, AV_LOG_ERROR, "Submitted buffer not large enough: %d < %d\n",
                       pkt->size, new_pkt.size);
                av_packet_unref(&new_pkt);
                return AVERROR(EINVAL);
            }

            memcpy(pkt->data, new_pkt.data, new_pkt.size);
            pkt->size = new_pkt.size;

            ret = av_packet_copy_props(pkt, &new_pkt);
            av_packet_unref(&new_pkt);
            if (ret < 0)
                return ret;
        } else
            *pkt = new_pkt;

        *got_packet = 1;
    }
示例#11
0
static int mpeg4_unpack_bframes_filter(AVBSFContext *ctx, AVPacket *out)
{
    UnpackBFramesBSFContext *s = ctx->priv_data;
    int pos_p = -1, nb_vop = 0, pos_vop2 = -1, ret = 0;
    AVPacket *in;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    scan_buffer(in->data, in->size, &pos_p, &nb_vop, &pos_vop2);
    av_log(ctx, AV_LOG_DEBUG, "Found %d VOP startcode(s) in this packet.\n", nb_vop);

    if (pos_vop2 >= 0) {
        if (s->b_frame_buf) {
            av_log(ctx, AV_LOG_WARNING,
                   "Missing one N-VOP packet, discarding one B-frame.\n");
            av_freep(&s->b_frame_buf);
            s->b_frame_buf_size = 0;
        }
        /* store the packed B-frame in the BSFContext */
        s->b_frame_buf_size = in->size - pos_vop2;
        s->b_frame_buf      = create_new_buffer(in->data + pos_vop2, s->b_frame_buf_size);
        if (!s->b_frame_buf) {
            s->b_frame_buf_size = 0;
            av_packet_free(&in);
            return AVERROR(ENOMEM);
        }
    }

    if (nb_vop > 2) {
        av_log(ctx, AV_LOG_WARNING,
       "Found %d VOP headers in one packet, only unpacking one.\n", nb_vop);
    }

    if (nb_vop == 1 && s->b_frame_buf) {
        /* use frame from BSFContext */
        ret = av_packet_copy_props(out, in);
        if (ret < 0) {
            av_packet_free(&in);
            return ret;
        }

        ret = av_packet_from_data(out, s->b_frame_buf, s->b_frame_buf_size);
        if (ret < 0) {
            av_packet_free(&in);
            return ret;
        }
        if (in->size <= MAX_NVOP_SIZE) {
            /* N-VOP */
            av_log(ctx, AV_LOG_DEBUG, "Skipping N-VOP.\n");
            s->b_frame_buf      = NULL;
            s->b_frame_buf_size = 0;
        } else {
            /* copy packet into BSFContext */
            s->b_frame_buf_size = in->size;
            s->b_frame_buf      = create_new_buffer(in->data, in->size);
            if (!s->b_frame_buf) {
                s->b_frame_buf_size = 0;
                av_packet_unref(out);
                av_packet_free(&in);
                return AVERROR(ENOMEM);
            }
        }
    } else if (nb_vop >= 2) {
        /* use first frame of the packet */
        av_packet_move_ref(out, in);
        out->size = pos_vop2;
    } else if (pos_p >= 0) {
        av_log(ctx, AV_LOG_DEBUG, "Updating DivX userdata (remove trailing 'p').\n");
        av_packet_move_ref(out, in);
        /* remove 'p' (packed) from the end of the (DivX) userdata string */
        out->data[pos_p] = '\0';
    } else {
        /* copy packet */
        av_packet_move_ref(out, in);
    }

    av_packet_free(&in);

    return 0;
}
示例#12
0
static int h264_mp4toannexb_filter(AVBSFContext *ctx, AVPacket *out)
{
    H264BSFContext *s = ctx->priv_data;

    AVPacket *in;
    uint8_t unit_type;
    int32_t nal_size;
    uint32_t cumul_size    = 0;
    const uint8_t *buf;
    const uint8_t *buf_end;
    int            buf_size;
    int ret = 0, i;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    /* nothing to filter */
    if (!s->extradata_parsed) {
        av_packet_move_ref(out, in);
        av_packet_free(&in);
        return 0;
    }

    buf      = in->data;
    buf_size = in->size;
    buf_end  = in->data + in->size;

    do {
        ret= AVERROR(EINVAL);
        if (buf + s->length_size > buf_end)
            goto fail;

        for (nal_size = 0, i = 0; i<s->length_size; i++)
            nal_size = (nal_size << 8) | buf[i];

        buf += s->length_size;
        unit_type = *buf & 0x1f;

        if (nal_size > buf_end - buf || nal_size < 0)
            goto fail;

        if (unit_type == 7)
            s->idr_sps_seen = s->new_idr = 1;
        else if (unit_type == 8) {
            s->idr_pps_seen = s->new_idr = 1;
            /* if SPS has not been seen yet, prepend the AVCC one to PPS */
            if (!s->idr_sps_seen) {
                if (s->sps_offset == -1)
                    av_log(ctx, AV_LOG_WARNING, "SPS not present in the stream, nor in AVCC, stream may be unreadable\n");
                else {
                    if ((ret = alloc_and_copy(out,
                                         ctx->par_out->extradata + s->sps_offset,
                                         s->pps_offset != -1 ? s->pps_offset : ctx->par_out->extradata_size - s->sps_offset,
                                         buf, nal_size)) < 0)
                        goto fail;
                    s->idr_sps_seen = 1;
                    goto next_nal;
                }
            }
        }

        /* if this is a new IDR picture following an IDR picture, reset the idr flag.
         * Just check first_mb_in_slice to be 0 as this is the simplest solution.
         * This could be checking idr_pic_id instead, but would complexify the parsing. */
        if (!s->new_idr && unit_type == 5 && (buf[1] & 0x80))
            s->new_idr = 1;

        /* prepend only to the first type 5 NAL unit of an IDR picture, if no sps/pps are already present */
        if (s->new_idr && unit_type == 5 && !s->idr_sps_seen && !s->idr_pps_seen) {
            if ((ret=alloc_and_copy(out,
                               ctx->par_out->extradata, ctx->par_out->extradata_size,
                               buf, nal_size)) < 0)
                goto fail;
            s->new_idr = 0;
        /* if only SPS has been seen, also insert PPS */
        } else if (s->new_idr && unit_type == 5 && s->idr_sps_seen && !s->idr_pps_seen) {
            if (s->pps_offset == -1) {
                av_log(ctx, AV_LOG_WARNING, "PPS not present in the stream, nor in AVCC, stream may be unreadable\n");
                if ((ret = alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
                    goto fail;
            } else if ((ret = alloc_and_copy(out,
                                        ctx->par_out->extradata + s->pps_offset, ctx->par_out->extradata_size - s->pps_offset,
                                        buf, nal_size)) < 0)
                goto fail;
        } else {
            if ((ret=alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
                goto fail;
            if (!s->new_idr && unit_type == 1) {
                s->new_idr = 1;
                s->idr_sps_seen = 0;
                s->idr_pps_seen = 0;
            }
        }

next_nal:
        buf        += nal_size;
        cumul_size += nal_size + s->length_size;
    } while (cumul_size < buf_size);

    ret = av_packet_copy_props(out, in);
    if (ret < 0)
        goto fail;

fail:
    if (ret < 0)
        av_packet_unref(out);
    av_packet_free(&in);

    return ret;
}
示例#13
0
文件: Packet.cpp 项目: karvaporo/QtAV
// time_base: av_q2d(format_context->streams[stream_idx]->time_base)
bool Packet::fromAVPacket(Packet* pkt, const AVPacket *avpkt, double time_base)
{
    if (!pkt || !avpkt)
        return false;

    pkt->position = avpkt->pos;
    pkt->hasKeyFrame = !!(avpkt->flags & AV_PKT_FLAG_KEY);
    // what about marking avpkt as invalid and do not use isCorrupt?
    pkt->isCorrupt = !!(avpkt->flags & AV_PKT_FLAG_CORRUPT);
    if (pkt->isCorrupt)
        qDebug("currupt packet. pts: %f", pkt->pts);

    // from av_read_frame: pkt->pts can be AV_NOPTS_VALUE if the video format has B-frames, so it is better to rely on pkt->dts if you do not decompress the payload.
    // old code set pts as dts is valid
    if (avpkt->pts != (qint64)AV_NOPTS_VALUE)
        pkt->pts = avpkt->pts * time_base;
    else if (avpkt->dts != (qint64)AV_NOPTS_VALUE) // is it ok?
        pkt->pts = avpkt->dts * time_base;
    else
        pkt->pts = 0; // TODO: init value
    if (avpkt->dts != (qint64)AV_NOPTS_VALUE) //has B-frames
        pkt->dts = avpkt->dts * time_base;
    else
        pkt->dts = pkt->pts;
    //qDebug("avpacket pts %lld, dts: %lld ", avpkt->pts, avpkt->dts);
    //TODO: pts must >= 0? look at ffplay
    pkt->pts = qMax<qreal>(0, pkt->pts);
    pkt->dts = qMax<qreal>(0, pkt->dts);

    // subtitle always has a key frame? convergence_duration may be 0
    if (avpkt->convergence_duration > 0  // mpv demux_lavf only check this
            && pkt->hasKeyFrame
#if 0
            && codec->codec_type == AVMEDIA_TYPE_SUBTITLE
#endif
            )
        pkt->duration = avpkt->convergence_duration * time_base;
    else if (avpkt->duration > 0)
        pkt->duration = avpkt->duration * time_base;
    else
        pkt->duration = 0;

    //qDebug("AVPacket.pts=%f, duration=%f, dts=%lld", pkt->pts, pkt->duration, packet.dts);
    pkt->data.clear();
    // TODO: pkt->avpkt. data is not necessary now. see mpv new_demux_packet_from_avpacket
    // copy properties and side data. does not touch data, size and ref
    pkt->d = QSharedDataPointer<PacketPrivate>(new PacketPrivate());
    pkt->d->initialized = true;
    AVPacket *p = &pkt->d->avpkt;
#if AVPACKET_REF
    av_packet_ref(p, (AVPacket*)avpkt);  //properties are copied internally
    // add ref without copy, bytearray does not copy either. bytearray options linke remove() is safe. omit FF_INPUT_BUFFER_PADDING_SIZE
    pkt->data = QByteArray::fromRawData((const char*)p->data, p->size);
#else
    if (avpkt->data) {
        // copy packet data. packet will be reset after AVDemuxer.readFrame() and in next av_read_frame
#if NO_PADDING_DATA
        pkt->data = QByteArray((const char*)avpkt->data, avpkt->size);
#else
    /*!
      larger than the actual read bytes because some optimized bitstream readers read 32 or 64 bits at once and could read over the end.
      The end of the input buffer avpkt->data should be set to 0 to ensure that no overreading happens for damaged MPEG streams
     */
        pkt->data.reserve(avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
        pkt->data.resize(avpkt->size);
        // code in ffmpe & mpv copy avpkt->size and set padding data to 0
        memcpy(pkt->data.data(), avpkt->data, avpkt->size);
        memset((char*)pkt->data.constData() + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
#endif //NO_PADDING_DATA
    }
    av_packet_copy_props(p, avpkt);
    if (!pkt->data.isEmpty()) {
        p->data = (uint8_t*)pkt->data.constData();
        p->size = pkt->data.size();
    }
#endif //AVPACKET_REF
    // QtAV always use ms (1/1000s) and s. As a result no time_base is required in Packet
    p->pts = pkt->pts * 1000.0;
    p->dts = pkt->dts * 1000.0;
    p->duration = pkt->duration * 1000.0;
    return true;
}
示例#14
0
static int h264_mp4toannexb_filter(AVBSFContext *ctx, AVPacket *out)
{
    H264BSFContext *s = ctx->priv_data;

    AVPacket *in;
    uint8_t unit_type;
    int32_t nal_size;
    uint32_t cumul_size    = 0;
    const uint8_t *buf;
    const uint8_t *buf_end;
    int            buf_size;
    int ret = 0;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    /* nothing to filter */
    if (!s->extradata_parsed) {
        av_packet_move_ref(out, in);
        av_packet_free(&in);
        return 0;
    }

    buf      = in->data;
    buf_size = in->size;
    buf_end  = in->data + in->size;

    do {
        if (buf + s->length_size > buf_end)
            goto fail;

        if (s->length_size == 1) {
            nal_size = buf[0];
        } else if (s->length_size == 2) {
            nal_size = AV_RB16(buf);
        } else
            nal_size = AV_RB32(buf);

        buf += s->length_size;
        unit_type = *buf & 0x1f;

        if (buf + nal_size > buf_end || nal_size < 0)
            goto fail;

        /* prepend only to the first type 5 NAL unit of an IDR picture */
        if (s->first_idr && unit_type == 5) {
            if (alloc_and_copy(out,
                               ctx->par_out->extradata, ctx->par_out->extradata_size,
                               buf, nal_size) < 0)
                goto fail;
            s->first_idr = 0;
        } else {
            if (alloc_and_copy(out,
                               NULL, 0, buf, nal_size) < 0)
                goto fail;
            if (!s->first_idr && unit_type == 1)
                s->first_idr = 1;
        }

        buf        += nal_size;
        cumul_size += nal_size + s->length_size;
    } while (cumul_size < buf_size);

    ret = av_packet_copy_props(out, in);
    if (ret < 0)
        goto fail;

fail:
    if (ret < 0)
        av_packet_unref(out);
    av_packet_free(&in);

    return ret;
}