Example #1
0
static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
    FlacMuxerContext *c = s->priv_data;
    uint8_t *streaminfo;
    int streaminfo_size;

    /* check for updated streaminfo */
    streaminfo = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
                                         &streaminfo_size);
    if (streaminfo && streaminfo_size == FLAC_STREAMINFO_SIZE) {
        av_freep(&c->streaminfo);

        c->streaminfo = av_malloc(FLAC_STREAMINFO_SIZE);
        if (!c->streaminfo)
            return AVERROR(ENOMEM);
        memcpy(c->streaminfo, streaminfo, FLAC_STREAMINFO_SIZE);
    }

    if (pkt->size)
        avio_write(s->pb, pkt->data, pkt->size);
    return 0;
}
Example #2
0
static int srt_decode_frame(AVCodecContext *avctx,
                            void *data, int *got_sub_ptr, AVPacket *avpkt)
{
    AVSubtitle *sub = data;
    AVBPrint buffer;
    int ts_start, ts_end, x1 = -1, y1 = -1, x2 = -1, y2 = -1;
    int size, ret;
    const uint8_t *p = av_packet_get_side_data(avpkt, AV_PKT_DATA_SUBTITLE_POSITION, &size);

    if (p && size == 16) {
        x1 = AV_RL32(p     );
        y1 = AV_RL32(p +  4);
        x2 = AV_RL32(p +  8);
        y2 = AV_RL32(p + 12);
    }

    if (avpkt->size <= 0)
        return avpkt->size;

    av_bprint_init(&buffer, 0, AV_BPRINT_SIZE_UNLIMITED);

        // TODO: reindent
            // Do final divide-by-10 outside rescale to force rounding down.
            ts_start = av_rescale_q(avpkt->pts,
                                    avctx->time_base,
                                    (AVRational){1,100});
            ts_end   = av_rescale_q(avpkt->pts + avpkt->duration,
                                    avctx->time_base,
                                    (AVRational){1,100});

    srt_to_ass(avctx, &buffer, avpkt->data, x1, y1, x2, y2);
    ret = ff_ass_add_rect_bprint(sub, &buffer, ts_start, ts_end-ts_start);
    av_bprint_finalize(&buffer, NULL);
    if (ret < 0)
        return ret;

    *got_sub_ptr = sub->num_rects > 0;
    return avpkt->size;
}
Example #3
0
static int idcin_decode_frame(AVCodecContext *avctx,
                              void *data, int *got_frame,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    IdcinContext *s = avctx->priv_data;
    const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
    int ret;

    s->buf = buf;
    s->size = buf_size;

    if (s->frame.data[0])
        avctx->release_buffer(avctx, &s->frame);

    if ((ret = ff_get_buffer(avctx, &s->frame))) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    if (idcin_decode_vlcs(s))
        return AVERROR_INVALIDDATA;

    if (pal) {
        s->frame.palette_has_changed = 1;
        memcpy(s->pal, pal, AVPALETTE_SIZE);
    }
    /* make the palette available on the way out */
    memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);

    *got_frame = 1;
    *(AVFrame*)data = s->frame;

    /* report that the buffer was completely consumed */
    return buf_size;
}
Example #4
0
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
{
    uint8_t *side_data;
    int side_data_size;
    int i;

    side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, &side_data_size);
    if (!side_data) {
        side_data_size = 4+4+8*error_count;
        side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
                                            side_data_size);
    }

    if (!side_data || side_data_size < 4+4+8*error_count)
        return AVERROR(ENOMEM);

    AV_WL32(side_data   , quality  );
    side_data[4] = pict_type;
    side_data[5] = error_count;
    for (i = 0; i<error_count; i++)
        AV_WL64(side_data+8 + 8*i , error[i]);

    return 0;
}
Example #5
0
File: 8bps.c Project: Arcen/FFmpeg
/*
 *
 * Decode a frame
 *
 */
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
        const uint8_t *buf = avpkt->data;
        int buf_size = avpkt->size;
        EightBpsContext * const c = avctx->priv_data;
        const unsigned char *encoded = buf;
        unsigned char *pixptr, *pixptr_end;
        unsigned int height = avctx->height; // Real image height
        unsigned int dlen, p, row;
        const unsigned char *lp, *dp;
        unsigned char count;
        unsigned int px_inc;
        unsigned int planes = c->planes;
        unsigned char *planemap = c->planemap;

        if(c->pic.data[0])
                avctx->release_buffer(avctx, &c->pic);

        c->pic.reference = 0;
        c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
        if(avctx->get_buffer(avctx, &c->pic) < 0){
                av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
                return -1;
        }

        /* Set data pointer after line lengths */
        dp = encoded + planes * (height << 1);

        /* Ignore alpha plane, don't know what to do with it */
        if (planes == 4)
                planes--;

        px_inc = planes + (avctx->pix_fmt == PIX_FMT_RGB32);

        for (p = 0; p < planes; p++) {
                /* Lines length pointer for this plane */
                lp = encoded + p * (height << 1);

                /* Decode a plane */
                for(row = 0; row < height; row++) {
                        pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
                        pixptr_end = pixptr + c->pic.linesize[0];
                        dlen = av_be2ne16(*(const unsigned short *)(lp+row*2));
                        /* Decode a row of this plane */
                        while(dlen > 0) {
                                if(dp + 1 >= buf+buf_size) return -1;
                                if ((count = *dp++) <= 127) {
                                        count++;
                                        dlen -= count + 1;
                                        if (pixptr + count * px_inc > pixptr_end)
                                            break;
                                        if(dp + count > buf+buf_size) return -1;
                                        while(count--) {
                                                *pixptr = *dp++;
                                                pixptr += px_inc;
                                        }
                                } else {
                                        count = 257 - count;
                                        if (pixptr + count * px_inc > pixptr_end)
                                            break;
                                        while(count--) {
                                                *pixptr = *dp;
                                                pixptr += px_inc;
                                        }
                                        dp++;
                                        dlen -= 2;
                                }
                        }
                }
        }

        if (avctx->bits_per_coded_sample <= 8) {
                const uint8_t *pal = av_packet_get_side_data(avpkt,
                                                             AV_PKT_DATA_PALETTE,
                                                             NULL);
                if (pal) {
                        c->pic.palette_has_changed = 1;
                        memcpy(c->pal, pal, AVPALETTE_SIZE);
                }

                memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
        }

        *data_size = sizeof(AVFrame);
        *(AVFrame*)data = c->pic;

        /* always report that the buffer was completely consumed */
        return buf_size;
}
/*
 * Return
 * - 0 -- one packet was read and processed
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
 *   this function should be called again
 * - AVERROR_EOF -- this function should not be called again
 */
static int process_input(int file_index)
{
    InputFile *ifile = input_files[file_index];
    AVFormatContext *is;
    InputStream *ist;
    AVPacket pkt;
    int ret, i, j;

    is  = ifile->ctx;
    // ****<<  Capture a frame: audio/video/subtitle
    ret = get_input_packet(ifile, &pkt); // ****<<  Call stack: av_read_frame() --> read_frame_internal() --> ff_read_packet() --> s->iformat->read_packet(s, pkt); --> dshow_read_frame();

    if (ret == AVERROR(EAGAIN)) {
        ifile->eagain = 1;
        return ret;
    }
    if (ret < 0) {
        if (ret != AVERROR_EOF) {
            print_error(is->filename, ret);
            if (exit_on_error)
                exit_program(1);
        }
        ifile->eof_reached = 1;

        for (i = 0; i < ifile->nb_streams; i++) {
            ist = input_streams[ifile->ist_index + i];
            if (ist->decoding_needed)
                output_packet(ist, NULL);

            /* mark all outputs that don't go through lavfi as finished */
            for (j = 0; j < nb_output_streams; j++) {
                OutputStream *ost = output_streams[j];

                if (ost->source_index == ifile->ist_index + i &&
                        (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
                    close_output_stream(ost);
            }
        }

        return AVERROR(EAGAIN);
    }

    reset_eagain();

    if (do_pkt_dump) {
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                         is->streams[pkt.stream_index]);
    }
    /* the following test is needed in case new streams appear
       dynamically in stream : we ignore them */
    if (pkt.stream_index >= ifile->nb_streams) {
        report_new_stream(file_index, &pkt);
        goto discard_packet;
    }

    ist = input_streams[ifile->ist_index + pkt.stream_index];

    ist->data_size += pkt.size;
    ist->nb_packets++;

    if (ist->discard)
        goto discard_packet;

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
               "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64) {
        int64_t stime, stime2;
        // Correcting starttime based on the enabled streams
        // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
        //       so we instead do it here as part of discontinuity handling
        if (   ist->next_dts == AV_NOPTS_VALUE
                && ifile->ts_offset == -is->start_time
                && (is->iformat->flags & AVFMT_TS_DISCONT)) {
            int64_t new_start_time = INT64_MAX;
            for (i=0; i<is->nb_streams; i++) {
                AVStream *st = is->streams[i];
                if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                    continue;
                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
            }
            if (new_start_time > is->start_time) {
                av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
                ifile->ts_offset = -new_start_time;
            }
        }

        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
        stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
        ist->wrap_correction_done = 1;

        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
    }

    /* add the stream-global side data to the first packet */
    if (ist->nb_packets == 1)
        if (ist->st->nb_side_data)
            av_packet_split_side_data(&pkt);
    for (i = 0; i < ist->st->nb_side_data; i++) {
        AVPacketSideData *src_sd = &ist->st->side_data[i];
        uint8_t *dst_data;

        if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
            continue;

        dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
        if (!dst_data)
            exit_program(1);

        memcpy(dst_data, src_sd->data, src_sd->size);
    }

    // ****<<  pkt.pts:   采集时间戳(以毫秒为单位; 一般为系统时间)
    // ****<<  ts_offset: 起始时间戳(以毫秒为单位; 一般为第一帧采集时间戳的相反数)
    // ****<<  最终.pts:  相对时间戳(以毫秒为单位; 从第一帧到现在的相对时间)
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts *= ist->ts_scale;
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts *= ist->ts_scale;

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
            && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ifile->last_ts;
        if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                 ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
            ifile->ts_offset -= delta;
            av_log(NULL, AV_LOG_DEBUG,
                   "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                   delta, ifile->ts_offset);
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
            !copy_ts) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ist->next_dts;
        if (is->iformat->flags & AVFMT_TS_DISCONT) {
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                     ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
                    pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
                ifile->ts_offset -= delta;
                av_log(NULL, AV_LOG_DEBUG,
                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                       delta, ifile->ts_offset);
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            }
        } else {
            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                pkt.dts = AV_NOPTS_VALUE;
            }
            if (pkt.pts != AV_NOPTS_VALUE) {
                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
                delta   = pkt_pts - ist->next_dts;
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                        (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                    av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                    pkt.pts = AV_NOPTS_VALUE;
                }
            }
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE)
        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    sub2video_heartbeat(ist, pkt.pts);

    ret = output_packet(ist, &pkt); // ****<<  see output_packet.c
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
               ist->file_index, ist->st->index, av_err2str(ret));
        if (exit_on_error)
            exit_program(1);
    }

discard_packet:
    av_free_packet(&pkt);

    return 0;
}
Example #7
0
static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt)
{
    int size = 0, ret;
    const uint8_t *data;
    uint32_t flags;

    data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
    if (!data)
        return 0;

    if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
        av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
               "changes, but PARAM_CHANGE side data was sent to it.\n");
        ret = AVERROR(EINVAL);
        goto fail2;
    }

    if (size < 4)
        goto fail;

    flags = bytestream_get_le32(&data);
    size -= 4;

    if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
        if (size < 4)
            goto fail;
        avctx->channels = bytestream_get_le32(&data);
        size -= 4;
    }
    if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
        if (size < 8)
            goto fail;
        avctx->channel_layout = bytestream_get_le64(&data);
        size -= 8;
    }
    if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
        if (size < 4)
            goto fail;
        avctx->sample_rate = bytestream_get_le32(&data);
        size -= 4;
    }
    if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
        if (size < 8)
            goto fail;
        avctx->width  = bytestream_get_le32(&data);
        avctx->height = bytestream_get_le32(&data);
        size -= 8;
        ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
        if (ret < 0)
            goto fail2;
    }

    return 0;
fail:
    av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
    ret = AVERROR_INVALIDDATA;
fail2:
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
        if (avctx->err_recognition & AV_EF_EXPLODE)
            return ret;
    }
    return 0;
}
Example #8
0
File: 8bps.c Project: bavison/libav
static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame, AVPacket *avpkt)
{
    AVFrame *frame = data;
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    EightBpsContext * const c = avctx->priv_data;
    const unsigned char *encoded = buf;
    unsigned char *pixptr, *pixptr_end;
    unsigned int height = avctx->height; // Real image height
    unsigned int dlen, p, row;
    const unsigned char *lp, *dp, *ep;
    unsigned char count;
    unsigned int px_inc;
    unsigned int planes     = c->planes;
    unsigned char *planemap = c->planemap;
    int ret;

    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    ep = encoded + buf_size;

    /* Set data pointer after line lengths */
    dp = encoded + planes * (height << 1);

    /* Ignore alpha plane, don't know what to do with it */
    if (planes == 4)
        planes--;

    px_inc = planes + (avctx->pix_fmt == AV_PIX_FMT_RGB32);

    for (p = 0; p < planes; p++) {
        /* Lines length pointer for this plane */
        lp = encoded + p * (height << 1);

        /* Decode a plane */
        for (row = 0; row < height; row++) {
            pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
            pixptr_end = pixptr + frame->linesize[0];
            if (ep - lp < row * 2 + 2)
                return AVERROR_INVALIDDATA;
            dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
            /* Decode a row of this plane */
            while (dlen > 0) {
                if (ep - dp <= 1)
                    return AVERROR_INVALIDDATA;
                if ((count = *dp++) <= 127) {
                    count++;
                    dlen -= count + 1;
                    if (pixptr_end - pixptr < count * px_inc)
                        break;
                    if (ep - dp < count)
                        return AVERROR_INVALIDDATA;
                    while (count--) {
                        *pixptr = *dp++;
                        pixptr += px_inc;
                    }
                } else {
                    count = 257 - count;
                    if (pixptr_end - pixptr < count * px_inc)
                        break;
                    while (count--) {
                        *pixptr = *dp;
                        pixptr += px_inc;
                    }
                    dp++;
                    dlen -= 2;
                }
            }
        }
    }

    if (avctx->bits_per_coded_sample <= 8) {
        const uint8_t *pal = av_packet_get_side_data(avpkt,
                                                     AV_PKT_DATA_PALETTE,
                                                     NULL);
        if (pal) {
            frame->palette_has_changed = 1;
            memcpy(c->pal, pal, AVPALETTE_SIZE);
        }

        memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
    }

    *got_frame = 1;

    /* always report that the buffer was completely consumed */
    return buf_size;
}
Example #9
0
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIOContext *pb      = s->pb;
    AVCodecParameters *par = s->streams[pkt->stream_index]->codecpar;
    FLVContext *flv      = s->priv_data;
    FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data;
    unsigned ts;
    int size = pkt->size;
    uint8_t *data = NULL;
    int flags = -1, flags_size, ret;
    int64_t cur_offset = avio_tell(pb);

    if (par->codec_id == AV_CODEC_ID_VP6F || par->codec_id == AV_CODEC_ID_VP6A ||
        par->codec_id == AV_CODEC_ID_VP6  || par->codec_id == AV_CODEC_ID_AAC)
        flags_size = 2;
    else if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4)
        flags_size = 5;
    else
        flags_size = 1;

    if (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_H264
            || par->codec_id == AV_CODEC_ID_MPEG4) {
        int side_size = 0;
        uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
        if (side && side_size > 0 && (side_size != par->extradata_size || memcmp(side, par->extradata, side_size))) {
            av_free(par->extradata);
            par->extradata = av_mallocz(side_size + AV_INPUT_BUFFER_PADDING_SIZE);
            if (!par->extradata) {
                par->extradata_size = 0;
                return AVERROR(ENOMEM);
            }
            memcpy(par->extradata, side, side_size);
            par->extradata_size = side_size;
            flv_write_codec_header(s, par);
        }
    }

    if (flv->delay == AV_NOPTS_VALUE)
        flv->delay = -pkt->dts;

    if (pkt->dts < -flv->delay) {
        av_log(s, AV_LOG_WARNING,
               "Packets are not in the proper order with respect to DTS\n");
        return AVERROR(EINVAL);
    }

    ts = pkt->dts;

    if (s->event_flags & AVSTREAM_EVENT_FLAG_METADATA_UPDATED) {
        write_metadata(s, ts);
        s->event_flags &= ~AVSTREAM_EVENT_FLAG_METADATA_UPDATED;
    }

    avio_write_marker(pb, av_rescale(ts, AV_TIME_BASE, 1000),
                      pkt->flags & AV_PKT_FLAG_KEY && (flv->video_par ? par->codec_type == AVMEDIA_TYPE_VIDEO : 1) ? AVIO_DATA_MARKER_SYNC_POINT : AVIO_DATA_MARKER_BOUNDARY_POINT);

    switch (par->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        avio_w8(pb, FLV_TAG_TYPE_VIDEO);

        flags = ff_codec_get_tag(flv_video_codec_ids, par->codec_id);

        flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
        break;
    case AVMEDIA_TYPE_AUDIO:
        flags = get_audio_flags(s, par);

        av_assert0(size);

        avio_w8(pb, FLV_TAG_TYPE_AUDIO);
        break;
    case AVMEDIA_TYPE_SUBTITLE:
    case AVMEDIA_TYPE_DATA:
        avio_w8(pb, FLV_TAG_TYPE_META);
        break;
    default:
        return AVERROR(EINVAL);
    }

    if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) {
        /* check if extradata looks like mp4 formatted */
        if (par->extradata_size > 0 && *(uint8_t*)par->extradata != 1)
            if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0)
                return ret;
    } else if (par->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 &&
               (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
        if (!s->streams[pkt->stream_index]->nb_frames) {
        av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: "
               "use the audio bitstream filter 'aac_adtstoasc' to fix it "
               "('-bsf:a aac_adtstoasc' option with ffmpeg)\n");
        return AVERROR_INVALIDDATA;
        }
        av_log(s, AV_LOG_WARNING, "aac bitstream error\n");
    }

    /* check Speex packet duration */
    if (par->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160)
        av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than "
                                  "8 frames per packet. Adobe Flash "
                                  "Player cannot handle this!\n");

    if (sc->last_ts < ts)
        sc->last_ts = ts;

    if (size + flags_size >= 1<<24) {
        av_log(s, AV_LOG_ERROR, "Too large packet with size %u >= %u\n",
               size + flags_size, 1<<24);
        return AVERROR(EINVAL);
    }

    avio_wb24(pb, size + flags_size);
    avio_wb24(pb, ts & 0xFFFFFF);
    avio_w8(pb, (ts >> 24) & 0x7F); // timestamps are 32 bits _signed_
    avio_wb24(pb, flv->reserved);

    if (par->codec_type == AVMEDIA_TYPE_DATA ||
        par->codec_type == AVMEDIA_TYPE_SUBTITLE ) {
        int data_size;
        int64_t metadata_size_pos = avio_tell(pb);
        if (par->codec_id == AV_CODEC_ID_TEXT) {
            // legacy FFmpeg magic?
            avio_w8(pb, AMF_DATA_TYPE_STRING);
            put_amf_string(pb, "onTextData");
            avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
            avio_wb32(pb, 2);
            put_amf_string(pb, "type");
            avio_w8(pb, AMF_DATA_TYPE_STRING);
            put_amf_string(pb, "Text");
            put_amf_string(pb, "text");
            avio_w8(pb, AMF_DATA_TYPE_STRING);
            put_amf_string(pb, pkt->data);
            put_amf_string(pb, "");
            avio_w8(pb, AMF_END_OF_OBJECT);
        } else {
            // just pass the metadata through
            avio_write(pb, data ? data : pkt->data, size);
        }
        /* write total size of tag */
        data_size = avio_tell(pb) - metadata_size_pos;
        avio_seek(pb, metadata_size_pos - 10, SEEK_SET);
        avio_wb24(pb, data_size);
        avio_seek(pb, data_size + 10 - 3, SEEK_CUR);
        avio_wb32(pb, data_size + 11);
    } else {
        av_assert1(flags>=0);
        avio_w8(pb,flags);
        if (par->codec_id == AV_CODEC_ID_VP6)
            avio_w8(pb,0);
        if (par->codec_id == AV_CODEC_ID_VP6F || par->codec_id == AV_CODEC_ID_VP6A) {
            if (par->extradata_size)
                avio_w8(pb, par->extradata[0]);
            else
                avio_w8(pb, ((FFALIGN(par->width,  16) - par->width) << 4) |
                             (FFALIGN(par->height, 16) - par->height));
        } else if (par->codec_id == AV_CODEC_ID_AAC)
            avio_w8(pb, 1); // AAC raw
        else if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) {
            avio_w8(pb, 1); // AVC NALU
            avio_wb24(pb, pkt->pts - pkt->dts);
        }

        avio_write(pb, data ? data : pkt->data, size);

        avio_wb32(pb, size + flags_size + 11); // previous tag size
        flv->duration = FFMAX(flv->duration,
                              pkt->pts + flv->delay + pkt->duration);
    }

    if (flv->flags & FLV_ADD_KEYFRAME_INDEX) {
        switch (par->codec_type) {
            case AVMEDIA_TYPE_VIDEO:
                flv->videosize += (avio_tell(pb) - cur_offset);
                flv->lasttimestamp = flv->acurframeindex / flv->framerate;
                if (pkt->flags & AV_PKT_FLAG_KEY) {
                    double ts = flv->acurframeindex / flv->framerate;
                    int64_t pos = cur_offset;

                    flv->lastkeyframetimestamp = flv->acurframeindex / flv->framerate;
                    flv->lastkeyframelocation = pos;
                    flv_append_keyframe_info(s, flv, ts, pos);
                }
                flv->acurframeindex++;
                break;

            case AVMEDIA_TYPE_AUDIO:
                flv->audiosize += (avio_tell(pb) - cur_offset);
                break;

            default:
                av_log(s, AV_LOG_WARNING, "par->codec_type is type = [%d]\n", par->codec_type);
                break;
        }
    }

    av_free(data);

    return pb->error;
}
Example #10
0
static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *out)
{
    H264MetadataContext *ctx = bsf->priv_data;
    AVPacket *in = NULL;
    CodedBitstreamFragment *au = &ctx->access_unit;
    int err, i, j, has_sps;
    H264RawAUD aud;
    uint8_t *displaymatrix_side_data = NULL;
    size_t displaymatrix_side_data_size = 0;

    err = ff_bsf_get_packet(bsf, &in);
    if (err < 0)
        return err;

    err = ff_cbs_read_packet(ctx->cbc, au, in);
    if (err < 0) {
        av_log(bsf, AV_LOG_ERROR, "Failed to read packet.\n");
        goto fail;
    }

    if (au->nb_units == 0) {
        av_log(bsf, AV_LOG_ERROR, "No NAL units in packet.\n");
        err = AVERROR_INVALIDDATA;
        goto fail;
    }

    // If an AUD is present, it must be the first NAL unit.
    if (au->units[0].type == H264_NAL_AUD) {
        if (ctx->aud == REMOVE)
            ff_cbs_delete_unit(ctx->cbc, au, 0);
    } else {
        if (ctx->aud == INSERT) {
            static const int primary_pic_type_table[] = {
                0x084, // 2, 7
                0x0a5, // 0, 2, 5, 7
                0x0e7, // 0, 1, 2, 5, 6, 7
                0x210, // 4, 9
                0x318, // 3, 4, 8, 9
                0x294, // 2, 4, 7, 9
                0x3bd, // 0, 2, 3, 4, 5, 7, 8, 9
                0x3ff, // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
            };
            int primary_pic_type_mask = 0xff;

            for (i = 0; i < au->nb_units; i++) {
                if (au->units[i].type == H264_NAL_SLICE ||
                    au->units[i].type == H264_NAL_IDR_SLICE) {
                    H264RawSlice *slice = au->units[i].content;
                    for (j = 0; j < FF_ARRAY_ELEMS(primary_pic_type_table); j++) {
                         if (!(primary_pic_type_table[j] &
                               (1 << slice->header.slice_type)))
                             primary_pic_type_mask &= ~(1 << j);
                    }
                }
            }
            for (j = 0; j < FF_ARRAY_ELEMS(primary_pic_type_table); j++)
                if (primary_pic_type_mask & (1 << j))
                    break;
            if (j >= FF_ARRAY_ELEMS(primary_pic_type_table)) {
                av_log(bsf, AV_LOG_ERROR, "No usable primary_pic_type: "
                       "invalid slice types?\n");
                err = AVERROR_INVALIDDATA;
                goto fail;
            }

            aud = (H264RawAUD) {
                .nal_unit_header.nal_unit_type = H264_NAL_AUD,
                .primary_pic_type = j,
            };

            err = ff_cbs_insert_unit_content(ctx->cbc, au,
                                             0, H264_NAL_AUD, &aud, NULL);
            if (err < 0) {
                av_log(bsf, AV_LOG_ERROR, "Failed to insert AUD.\n");
                goto fail;
            }
        }
    }

    has_sps = 0;
    for (i = 0; i < au->nb_units; i++) {
        if (au->units[i].type == H264_NAL_SPS) {
            err = h264_metadata_update_sps(bsf, au->units[i].content);
            if (err < 0)
                goto fail;
            has_sps = 1;
        }
    }

    // Only insert the SEI in access units containing SPSs, and also
    // unconditionally in the first access unit we ever see.
    if (ctx->sei_user_data && (has_sps || !ctx->done_first_au)) {
        H264RawSEIPayload payload = {
            .payload_type = H264_SEI_TYPE_USER_DATA_UNREGISTERED,
        };
        H264RawSEIUserDataUnregistered *udu =
            &payload.payload.user_data_unregistered;

        for (i = j = 0; j < 32 && ctx->sei_user_data[i]; i++) {
            int c, v;
            c = ctx->sei_user_data[i];
            if (c == '-') {
                continue;
            } else if (av_isxdigit(c)) {
                c = av_tolower(c);
                v = (c <= '9' ? c - '0' : c - 'a' + 10);
            } else {
                goto invalid_user_data;
            }
            if (i & 1)
                udu->uuid_iso_iec_11578[j / 2] |= v;
            else
                udu->uuid_iso_iec_11578[j / 2] = v << 4;
            ++j;
        }
        if (j == 32 && ctx->sei_user_data[i] == '+') {
            size_t len = strlen(ctx->sei_user_data + i + 1);

            udu->data_ref = av_buffer_alloc(len + 1);
            if (!udu->data_ref) {
                err = AVERROR(ENOMEM);
                goto fail;
            }

            udu->data        = udu->data_ref->data;
            udu->data_length = len + 1;
            memcpy(udu->data, ctx->sei_user_data + i + 1, len + 1);

            err = ff_cbs_h264_add_sei_message(ctx->cbc, au, &payload);
            if (err < 0) {
                av_log(bsf, AV_LOG_ERROR, "Failed to add user data SEI "
                       "message to access unit.\n");
                goto fail;
            }

        } else {
        invalid_user_data:
            av_log(bsf, AV_LOG_ERROR, "Invalid user data: "
                   "must be \"UUID+string\".\n");
            err = AVERROR(EINVAL);
            goto fail;
        }
    }

    if (ctx->delete_filler) {
        for (i = 0; i < au->nb_units; i++) {
            if (au->units[i].type == H264_NAL_FILLER_DATA) {
                // Filler NAL units.
                err = ff_cbs_delete_unit(ctx->cbc, au, i);
                if (err < 0) {
                    av_log(bsf, AV_LOG_ERROR, "Failed to delete "
                           "filler NAL.\n");
                    goto fail;
                }
                --i;
                continue;
            }

            if (au->units[i].type == H264_NAL_SEI) {
                // Filler SEI messages.
                H264RawSEI *sei = au->units[i].content;

                for (j = 0; j < sei->payload_count; j++) {
                    if (sei->payload[j].payload_type ==
                        H264_SEI_TYPE_FILLER_PAYLOAD) {
                        err = ff_cbs_h264_delete_sei_message(ctx->cbc, au,
                                                             &au->units[i], j);
                        if (err < 0) {
                            av_log(bsf, AV_LOG_ERROR, "Failed to delete "
                                   "filler SEI message.\n");
                            goto fail;
                        }
                        // Renumbering might have happened, start again at
                        // the same NAL unit position.
                        --i;
                        break;
                    }
                }
            }
        }
    }

    if (ctx->display_orientation != PASS) {
        for (i = 0; i < au->nb_units; i++) {
            H264RawSEI *sei;
            if (au->units[i].type != H264_NAL_SEI)
                continue;
            sei = au->units[i].content;

            for (j = 0; j < sei->payload_count; j++) {
                H264RawSEIDisplayOrientation *disp;
                int32_t *matrix;

                if (sei->payload[j].payload_type !=
                    H264_SEI_TYPE_DISPLAY_ORIENTATION)
                    continue;
                disp = &sei->payload[j].payload.display_orientation;

                if (ctx->display_orientation == REMOVE ||
                    ctx->display_orientation == INSERT) {
                    err = ff_cbs_h264_delete_sei_message(ctx->cbc, au,
                                                         &au->units[i], j);
                    if (err < 0) {
                        av_log(bsf, AV_LOG_ERROR, "Failed to delete "
                               "display orientation SEI message.\n");
                        goto fail;
                    }
                    --i;
                    break;
                }

                matrix = av_mallocz(9 * sizeof(int32_t));
                if (!matrix) {
                    err = AVERROR(ENOMEM);
                    goto fail;
                }

                av_display_rotation_set(matrix,
                                        disp->anticlockwise_rotation *
                                        180.0 / 65536.0);
                av_display_matrix_flip(matrix, disp->hor_flip, disp->ver_flip);

                // If there are multiple display orientation messages in an
                // access unit then ignore all but the last one.
                av_freep(&displaymatrix_side_data);

                displaymatrix_side_data      = (uint8_t*)matrix;
                displaymatrix_side_data_size = 9 * sizeof(int32_t);
            }
        }
    }
    if (ctx->display_orientation == INSERT) {
        H264RawSEIPayload payload = {
            .payload_type = H264_SEI_TYPE_DISPLAY_ORIENTATION,
        };
        H264RawSEIDisplayOrientation *disp =
            &payload.payload.display_orientation;
        uint8_t *data;
        int size;
        int write = 0;

        data = av_packet_get_side_data(in, AV_PKT_DATA_DISPLAYMATRIX, &size);
        if (data && size >= 9 * sizeof(int32_t)) {
            int32_t matrix[9];
            int hflip, vflip;
            double angle;

            memcpy(matrix, data, sizeof(matrix));

            hflip = vflip = 0;
            if (matrix[0] < 0 && matrix[4] > 0)
                hflip = 1;
            else if (matrix[0] > 0 && matrix[4] < 0)
                vflip = 1;
            av_display_matrix_flip(matrix, hflip, vflip);

            angle = av_display_rotation_get(matrix);

            if (!(angle >= -180.0 && angle <= 180.0 /* also excludes NaN */) ||
                matrix[2] != 0 || matrix[5] != 0 ||
                matrix[6] != 0 || matrix[7] != 0) {
                av_log(bsf, AV_LOG_WARNING, "Input display matrix is not "
                       "representable in H.264 parameters.\n");
            } else {
                disp->hor_flip = hflip;
                disp->ver_flip = vflip;
                disp->anticlockwise_rotation =
                    (uint16_t)rint((angle >= 0.0 ? angle
                                                 : angle + 360.0) *
                                   65536.0 / 360.0);
                write = 1;
            }
        }

        if (has_sps || !ctx->done_first_au) {
            if (!isnan(ctx->rotate)) {
                disp->anticlockwise_rotation =
                    (uint16_t)rint((ctx->rotate >= 0.0 ? ctx->rotate
                                                       : ctx->rotate + 360.0) *
                                   65536.0 / 360.0);
                write = 1;
            }
            if (ctx->flip) {
                disp->hor_flip = !!(ctx->flip & FLIP_HORIZONTAL);
                disp->ver_flip = !!(ctx->flip & FLIP_VERTICAL);
                write = 1;
            }
        }

        if (write) {
            disp->display_orientation_repetition_period = 1;

            err = ff_cbs_h264_add_sei_message(ctx->cbc, au, &payload);
            if (err < 0) {
                av_log(bsf, AV_LOG_ERROR, "Failed to add display orientation "
                       "SEI message to access unit.\n");
                goto fail;
            }
        }
    }
Example #11
0
bool FormatContext::read(Packet &encoded, int &idx)
{
    if (abortCtx->isAborted)
    {
        isError = true;
        return false;
    }

    if (isPaused)
    {
        isPaused = false;
        av_read_play(formatCtx);
    }

    AVPacketRAII avPacketRAII(packet);

    int ret;
    if (!maybeHasFrame)
        ret = av_read_frame(formatCtx, packet);
    else
    {
        maybeHasFrame = false;
        ret = errFromSeek;
        errFromSeek = 0;
    }

    if (ret == AVERROR_INVALIDDATA)
    {
        if (invalErrCount < 1000)
        {
            ++invalErrCount;
            return true;
        }
        isError = true;
        return false;
    }
    else
        invalErrCount = 0;
    if (ret == AVERROR(EAGAIN))
        return true;
    else if (ret)
    {
        isError = true;
        return false;
    }

    const int ff_idx = packet->stream_index;
    if (ff_idx >= streams.count())
    {
        QMPlay2Core.log("Stream index out of range: " + QString::number(ff_idx), ErrorLog | LogOnce | DontShowInGUI);
        return true;
    }

    const auto stream = streams.at(ff_idx);

    if (stream->event_flags & AVSTREAM_EVENT_FLAG_METADATA_UPDATED)
    {
        stream->event_flags = 0;
        isMetadataChanged = true;
    }
    if (fixMkvAss && stream->codecpar->codec_id == AV_CODEC_ID_ASS)
        matroska_fix_ass_packet(stream->time_base, packet);

    if (!packet->buf || forceCopy) //Buffer isn't reference-counted, so copy the data
    {
        encoded.assign(packet->data, packet->size, packet->size + AV_INPUT_BUFFER_PADDING_SIZE);
    }
    else
    {
        encoded.assign(packet->buf, packet->size, packet->data - packet->buf->data);
        packet->buf = nullptr;
    }

    if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && stream->codecpar->format == AV_PIX_FMT_PAL8)
    {
        int size = 0;
        const auto data = av_packet_get_side_data(packet, AV_PKT_DATA_PALETTE, &size);
        if (size > 0 && data)
            encoded.palette = QByteArray((const char *)data, size);
    }

    const double time_base = av_q2d(stream->time_base);

    encoded.ts.setInvalid();
    if (packet->dts != QMPLAY2_NOPTS_VALUE)
        encoded.ts.setDts(packet->dts * time_base, startTime);
    if (packet->pts != QMPLAY2_NOPTS_VALUE)
        encoded.ts.setPts(packet->pts * time_base, startTime);

    if (packet->duration > 0)
        encoded.duration = packet->duration * time_base;
    else if (!encoded.ts || (encoded.duration = encoded.ts - streamsTS.at(ff_idx)) < 0.0 /* Calculate packet duration if doesn't exists */)
        encoded.duration = 0.0;
    streamsTS[ff_idx] = encoded.ts;

    if (isStreamed)
    {
        if (!isOneStreamOgg)
            encoded.ts += streamsOffset.at(ff_idx);
        else
        {
            encoded.ts = lastTime;
            lastTime += encoded.duration;
        }
    }
    else if (lengthToPlay > 0.0 && encoded.ts > lengthToPlay)
    {
        isError = true;
        return false;
    }

    encoded.hasKeyFrame = packet->flags & AV_PKT_FLAG_KEY;
    if (stream->sample_aspect_ratio.num)
        encoded.sampleAspectRatio = av_q2d(stream->sample_aspect_ratio);

    // Generate DTS for key frames if DTS doesn't exist (workaround for some M3U8 seekable streams)
    if (encoded.hasKeyFrame && !encoded.ts.hasDts())
        encoded.ts.setDts(nextDts.at(ff_idx));
    nextDts[ff_idx] = encoded.ts + encoded.duration;

    currPos = encoded.ts;

    idx = index_map.at(ff_idx);

    return true;
}