Пример #1
0
static av_cold int dvbsub_init_decoder(AVCodecContext *avctx)
{
    int i, r, g, b, a = 0;
    DVBSubContext *ctx = avctx->priv_data;

    if (!avctx->extradata || (avctx->extradata_size < 4) || ((avctx->extradata_size % 5 != 0) && (avctx->extradata_size != 4))) {
        av_log(avctx, AV_LOG_WARNING, "Invalid DVB subtitles stream extradata!\n");
        ctx->composition_id = -1;
        ctx->ancillary_id   = -1;
    } else {
        if (avctx->extradata_size > 5) {
            av_log(avctx, AV_LOG_WARNING, "Decoding first DVB subtitles sub-stream\n");
        }

        ctx->composition_id = AV_RB16(avctx->extradata);
        ctx->ancillary_id   = AV_RB16(avctx->extradata + 2);
    }

    ctx->version = -1;

    default_clut.id = -1;
    default_clut.next = NULL;

    default_clut.clut4[0] = RGBA(  0,   0,   0,   0);
    default_clut.clut4[1] = RGBA(255, 255, 255, 255);
    default_clut.clut4[2] = RGBA(  0,   0,   0, 255);
    default_clut.clut4[3] = RGBA(127, 127, 127, 255);

    default_clut.clut16[0] = RGBA(  0,   0,   0,   0);
    for (i = 1; i < 16; i++) {
        if (i < 8) {
            r = (i & 1) ? 255 : 0;
            g = (i & 2) ? 255 : 0;
            b = (i & 4) ? 255 : 0;
        } else {
            r = (i & 1) ? 127 : 0;
            g = (i & 2) ? 127 : 0;
            b = (i & 4) ? 127 : 0;
        }
        default_clut.clut16[i] = RGBA(r, g, b, 255);
    }

    default_clut.clut256[0] = RGBA(  0,   0,   0,   0);
    for (i = 1; i < 256; i++) {
        if (i < 8) {
            r = (i & 1) ? 255 : 0;
            g = (i & 2) ? 255 : 0;
            b = (i & 4) ? 255 : 0;
            a = 63;
        } else {
            switch (i & 0x88) {
            case 0x00:
                r = ((i & 1) ? 85 : 0) + ((i & 0x10) ? 170 : 0);
                g = ((i & 2) ? 85 : 0) + ((i & 0x20) ? 170 : 0);
                b = ((i & 4) ? 85 : 0) + ((i & 0x40) ? 170 : 0);
                a = 255;
                break;
            case 0x08:
                r = ((i & 1) ? 85 : 0) + ((i & 0x10) ? 170 : 0);
                g = ((i & 2) ? 85 : 0) + ((i & 0x20) ? 170 : 0);
                b = ((i & 4) ? 85 : 0) + ((i & 0x40) ? 170 : 0);
                a = 127;
                break;
            case 0x80:
                r = 127 + ((i & 1) ? 43 : 0) + ((i & 0x10) ? 85 : 0);
                g = 127 + ((i & 2) ? 43 : 0) + ((i & 0x20) ? 85 : 0);
                b = 127 + ((i & 4) ? 43 : 0) + ((i & 0x40) ? 85 : 0);
                a = 255;
                break;
            case 0x88:
                r = ((i & 1) ? 43 : 0) + ((i & 0x10) ? 85 : 0);
                g = ((i & 2) ? 43 : 0) + ((i & 0x20) ? 85 : 0);
                b = ((i & 4) ? 43 : 0) + ((i & 0x40) ? 85 : 0);
                a = 255;
                break;
            }
        }
        default_clut.clut256[i] = RGBA(r, g, b, a);
    }

    return 0;
}
Пример #2
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *data_size,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end = avpkt->data + avpkt->size;
    int buf_size = avpkt->size;
    QdrawContext * const a = avctx->priv_data;
    AVFrame * const p= (AVFrame*)&a->pic;
    uint8_t* outdata;
    int colors;
    int i;
    uint32_t *pal;
    int r, g, b;

    if(p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference= 0;
    if(avctx->get_buffer(avctx, p) < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
    p->pict_type= AV_PICTURE_TYPE_I;
    p->key_frame= 1;

    outdata = a->pic.data[0];

    if (buf_end - buf < 0x68 + 4)
        return AVERROR_INVALIDDATA;
    buf += 0x68; /* jump to palette */
    colors = AV_RB32(buf);
    buf += 4;

    if(colors < 0 || colors > 256) {
        av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors);
        return -1;
    }
    if (buf_end - buf < (colors + 1) * 8)
        return AVERROR_INVALIDDATA;

    pal = (uint32_t*)p->data[1];
    for (i = 0; i <= colors; i++) {
        unsigned int idx;
        idx = AV_RB16(buf); /* color index */
        buf += 2;

        if (idx > 255) {
            av_log(avctx, AV_LOG_ERROR, "Palette index out of range: %u\n", idx);
            buf += 6;
            continue;
        }
        r = *buf++;
        buf++;
        g = *buf++;
        buf++;
        b = *buf++;
        buf++;
        pal[idx] = 0xFF << 24 | r << 16 | g << 8 | b;
    }
    p->palette_has_changed = 1;

    if (buf_end - buf < 18)
        return AVERROR_INVALIDDATA;
    buf += 18; /* skip unneeded data */
    for (i = 0; i < avctx->height; i++) {
        int size, left, code, pix;
        const uint8_t *next;
        uint8_t *out;
        int tsize = 0;

        /* decode line */
        out = outdata;
        size = AV_RB16(buf); /* size of packed line */
        buf += 2;
        if (buf_end - buf < size)
            return AVERROR_INVALIDDATA;

        left = size;
        next = buf + size;
        while (left > 0) {
            code = *buf++;
            if (code & 0x80 ) { /* run */
                pix = *buf++;
                if ((out + (257 - code)) > (outdata +  a->pic.linesize[0]))
                    break;
                memset(out, pix, 257 - code);
                out += 257 - code;
                tsize += 257 - code;
                left -= 2;
            } else { /* copy */
                if ((out + code) > (outdata +  a->pic.linesize[0]))
                    break;
                if (buf_end - buf < code + 1)
                    return AVERROR_INVALIDDATA;
                memcpy(out, buf, code + 1);
                out += code + 1;
                buf += code + 1;
                left -= 2 + code;
                tsize += code + 1;
            }
        }
        buf = next;
        outdata += a->pic.linesize[0];
    }

    *data_size = sizeof(AVFrame);
    *(AVFrame*)data = a->pic;

    return buf_size;
}
Пример #3
0
// return 0 on packet, no more left, 1 on packet, 1 on partial packet
static int h264_handle_packet(AVFormatContext *ctx, PayloadContext *data,
                              AVStream *st, AVPacket *pkt, uint32_t *timestamp,
                              const uint8_t *buf, int len, uint16_t seq,
                              int flags)
{
    uint8_t nal;
    uint8_t type;
    int result = 0;

    if (!len) {
        av_log(ctx, AV_LOG_ERROR, "Empty H264 RTP packet\n");
        return AVERROR_INVALIDDATA;
    }
    nal  = buf[0];
    type = nal & 0x1f;

    assert(data);
    assert(buf);

    /* Simplify the case (these are all the nal types used internally by
     * the h264 codec). */
    if (type >= 1 && type <= 23)
        type = 1;
    switch (type) {
    case 0:                    // undefined, but pass them through
    case 1:
        av_new_packet(pkt, len + sizeof(start_sequence));
        memcpy(pkt->data, start_sequence, sizeof(start_sequence));
        memcpy(pkt->data + sizeof(start_sequence), buf, len);
        COUNT_NAL_TYPE(data, nal);
        break;

    case 24:                   // STAP-A (one packet, multiple nals)
        // consume the STAP-A NAL
        buf++;
        len--;
        // first we are going to figure out the total size
        {
            int pass         = 0;
            int total_length = 0;
            uint8_t *dst     = NULL;

            for (pass = 0; pass < 2; pass++) {
                const uint8_t *src = buf;
                int src_len        = len;

                while (src_len > 2) {
                    uint16_t nal_size = AV_RB16(src);

                    // consume the length of the aggregate
                    src     += 2;
                    src_len -= 2;

                    if (nal_size <= src_len) {
                        if (pass == 0) {
                            // counting
                            total_length += sizeof(start_sequence) + nal_size;
                        } else {
                            // copying
                            assert(dst);
                            memcpy(dst, start_sequence, sizeof(start_sequence));
                            dst += sizeof(start_sequence);
                            memcpy(dst, src, nal_size);
                            COUNT_NAL_TYPE(data, *src);
                            dst += nal_size;
                        }
                    } else {
                        av_log(ctx, AV_LOG_ERROR,
                               "nal size exceeds length: %d %d\n", nal_size, src_len);
                    }

                    // eat what we handled
                    src     += nal_size;
                    src_len -= nal_size;

                    if (src_len < 0)
                        av_log(ctx, AV_LOG_ERROR,
                               "Consumed more bytes than we got! (%d)\n", src_len);
                }

                if (pass == 0) {
                    /* now we know the total size of the packet (with the
                     * start sequences added) */
                    av_new_packet(pkt, total_length);
                    dst = pkt->data;
                } else {
                    assert(dst - pkt->data == total_length);
                }
            }
        }
        break;

    case 25:                   // STAP-B
    case 26:                   // MTAP-16
    case 27:                   // MTAP-24
    case 29:                   // FU-B
        av_log(ctx, AV_LOG_ERROR,
               "Unhandled type (%d) (See RFC for implementation details\n",
               type);
        result = AVERROR(ENOSYS);
        break;

    case 28:                   // FU-A (fragmented nal)
        buf++;
        len--;                 // skip the fu_indicator
        if (len > 1) {
            // these are the same as above, we just redo them here for clarity
            uint8_t fu_indicator      = nal;
            uint8_t fu_header         = *buf;
            uint8_t start_bit         = fu_header >> 7;
            uint8_t av_unused end_bit = (fu_header & 0x40) >> 6;
            uint8_t nal_type          = fu_header & 0x1f;
            uint8_t reconstructed_nal;

            // Reconstruct this packet's true nal; only the data follows.
            /* The original nal forbidden bit and NRI are stored in this
             * packet's nal. */
            reconstructed_nal  = fu_indicator & 0xe0;
            reconstructed_nal |= nal_type;

            // skip the fu_header
            buf++;
            len--;

            if (start_bit)
                COUNT_NAL_TYPE(data, nal_type);
            if (start_bit) {
                /* copy in the start sequence, and the reconstructed nal */
                av_new_packet(pkt, sizeof(start_sequence) + sizeof(nal) + len);
                memcpy(pkt->data, start_sequence, sizeof(start_sequence));
                pkt->data[sizeof(start_sequence)] = reconstructed_nal;
                memcpy(pkt->data + sizeof(start_sequence) + sizeof(nal), buf, len);
            } else {
                av_new_packet(pkt, len);
                memcpy(pkt->data, buf, len);
            }
        } else {
Пример #4
0
static int vp8_handle_packet(AVFormatContext *ctx,
                             PayloadContext *vp8,
                             AVStream *st,
                             AVPacket *pkt,
                             uint32_t *timestamp,
                             const uint8_t *buf,
                             int len, int flags)
{
    int start_packet, end_packet, has_au, ret = AVERROR(EAGAIN);

    if (!buf) {
        // only called when vp8_handle_packet returns 1
        if (!vp8->data) {
            av_log(ctx, AV_LOG_ERROR, "Invalid VP8 data passed\n");
            return AVERROR_INVALIDDATA;
        }
        prepare_packet(pkt, vp8, st->index);
        *timestamp = vp8->timestamp;
        return 0;
    }

    start_packet = *buf & 1;
    end_packet   = flags & RTP_FLAG_MARKER;
    has_au       = *buf & 2;
    buf++;
    len--;

    if (start_packet) {
        int res;
        uint32_t ts = *timestamp;
        if (vp8->data) {
            // missing end marker; return old frame anyway. untested
            prepare_packet(pkt, vp8, st->index);
            *timestamp = vp8->timestamp; // reset timestamp from old frame

            // if current frame fits into one rtp packet, need to hold
            // that for the next av_get_packet call
            ret = end_packet ? 1 : 0;
        }
        if ((res = avio_open_dyn_buf(&vp8->data)) < 0)
            return res;
        vp8->is_keyframe = *buf & 1;
        vp8->timestamp   = ts;
     }

    if (!vp8->data || vp8->timestamp != *timestamp && ret == AVERROR(EAGAIN)) {
        av_log(ctx, AV_LOG_WARNING,
               "Received no start marker; dropping frame\n");
        return AVERROR(EAGAIN);
    }

    // cycle through VP8AU headers if needed
    // not tested with actual VP8AUs
    while (len) {
        int au_len = len;
        if (has_au && len > 2) {
            au_len = AV_RB16(buf);
            buf += 2;
            len -= 2;
            if (buf + au_len > buf + len) {
                av_log(ctx, AV_LOG_ERROR, "Invalid VP8AU length\n");
                return AVERROR_INVALIDDATA;
            }
        }

        avio_write(vp8->data, buf, au_len);
        buf += au_len;
        len -= au_len;
    }

    if (ret != AVERROR(EAGAIN)) // did we miss a end marker?
        return ret;

    if (end_packet) {
        prepare_packet(pkt, vp8, st->index);
        return 0;
    }

    return AVERROR(EAGAIN);
}
Пример #5
0
static int probe(AVProbeData *p)
{
    if (AV_RB32(p->buf) == MKBETAG('c','a','f','f') && AV_RB16(&p->buf[4]) == 1)
        return AVPROBE_SCORE_MAX;
    return 0;
}
Пример #6
0
Файл: rdt.c Проект: AWilco/xbmc
int
ff_rdt_parse_header(const uint8_t *buf, int len,
                    int *pset_id, int *pseq_no, int *pstream_id,
                    int *pis_keyframe, uint32_t *ptimestamp)
{
    GetBitContext gb;
    int consumed = 0, set_id, seq_no, stream_id, is_keyframe,
        len_included, need_reliable;
    uint32_t timestamp;

    /* skip status packets */
    while (len >= 5 && buf[1] == 0xFF /* status packet */) {
        int pkt_len;

        if (!(buf[0] & 0x80))
            return -1; /* not followed by a data packet */

        pkt_len = AV_RB16(buf+3);
        buf += pkt_len;
        len -= pkt_len;
        consumed += pkt_len;
    }
    if (len < 16)
        return -1;
    /**
     * Layout of the header (in bits):
     * 1:  len_included
     *     Flag indicating whether this header includes a length field;
     *     this can be used to concatenate multiple RDT packets in a
     *     single UDP/TCP data frame and is used to precede RDT data
     *     by stream status packets
     * 1:  need_reliable
     *     Flag indicating whether this header includes a "reliable
     *     sequence number"; these are apparently sequence numbers of
     *     data packets alone. For data packets, this flag is always
     *     set, according to the Real documentation [1]
     * 5:  set_id
     *     ID of a set of streams of identical content, possibly with
     *     different codecs or bitrates
     * 1:  is_reliable
     *     Flag set for certain streams deemed less tolerable for packet
     *     loss
     * 16: seq_no
     *     Packet sequence number; if >=0xFF00, this is a non-data packet
     *     containing stream status info, the second byte indicates the
     *     type of status packet (see wireshark docs / source code [2])
     * if (len_included) {
     *     16: packet_len
     * } else {
     *     packet_len = remainder of UDP/TCP frame
     * }
     * 1:  is_back_to_back
     *     Back-to-Back flag; used for timing, set for one in every 10
     *     packets, according to the Real documentation [1]
     * 1:  is_slow_data
     *     Slow-data flag; currently unused, according to Real docs [1]
     * 5:  stream_id
     *     ID of the stream within this particular set of streams
     * 1:  is_no_keyframe
     *     Non-keyframe flag (unset if packet belongs to a keyframe)
     * 32: timestamp (PTS)
     * if (set_id == 0x1F) {
     *     16: set_id (extended set-of-streams ID; see set_id)
     * }
     * if (need_reliable) {
     *     16: reliable_seq_no
     *         Reliable sequence number (see need_reliable)
     * }
     * if (stream_id == 0x3F) {
     *     16: stream_id (extended stream ID; see stream_id)
     * }
     * [1] https://protocol.helixcommunity.org/files/2005/devdocs/RDT_Feature_Level_20.txt
     * [2] http://www.wireshark.org/docs/dfref/r/rdt.html and
     *     http://anonsvn.wireshark.org/viewvc/trunk/epan/dissectors/packet-rdt.c
     */
    init_get_bits(&gb, buf, len << 3);
    len_included  = get_bits1(&gb);
    need_reliable = get_bits1(&gb);
    set_id        = get_bits(&gb, 5);
    skip_bits(&gb, 1);
    seq_no        = get_bits(&gb, 16);
    if (len_included)
        skip_bits(&gb, 16);
    skip_bits(&gb, 2);
    stream_id     = get_bits(&gb, 5);
    is_keyframe   = !get_bits1(&gb);
    timestamp     = get_bits_long(&gb, 32);
    if (set_id == 0x1f)
        set_id    = get_bits(&gb, 16);
    if (need_reliable)
        skip_bits(&gb, 16);
    if (stream_id == 0x1f)
        stream_id = get_bits(&gb, 16);

    if (pset_id)      *pset_id      = set_id;
    if (pseq_no)      *pseq_no      = seq_no;
    if (pstream_id)   *pstream_id   = stream_id;
    if (pis_keyframe) *pis_keyframe = is_keyframe;
    if (ptimestamp)   *ptimestamp   = timestamp;

    return consumed + (get_bits_count(&gb) >> 3);
}
Пример #7
0
static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt,
                                     const uint8_t *buf, int len)
{
    unsigned int ssrc, h;
    int payload_type, seq, ret, flags = 0;
    int ext;
    AVStream *st;
    uint32_t timestamp;
    int rv= 0;

    ext = buf[0] & 0x10;
    payload_type = buf[1] & 0x7f;
    if (buf[1] & 0x80)
        flags |= RTP_FLAG_MARKER;
    seq  = AV_RB16(buf + 2);
    timestamp = AV_RB32(buf + 4);
    ssrc = AV_RB32(buf + 8);
    /* store the ssrc in the RTPDemuxContext */
    s->ssrc = ssrc;

    /* NOTE: we can handle only one payload type */
    if (s->payload_type != payload_type)
        return -1;

    st = s->st;
    // only do something with this if all the rtp checks pass...
    if(!rtp_valid_packet_in_sequence(&s->statistics, seq))
    {
        av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n",
               payload_type, seq, ((s->seq + 1) & 0xffff));
        return -1;
    }

    if (buf[0] & 0x20) {
        int padding = buf[len - 1];
        if (len >= 12 + padding)
            len -= padding;
    }

    s->seq = seq;
    len -= 12;
    buf += 12;

    /* RFC 3550 Section 5.3.1 RTP Header Extension handling */
    if (ext) {
        if (len < 4)
            return -1;
        /* calculate the header extension length (stored as number
         * of 32-bit words) */
        ext = (AV_RB16(buf + 2) + 1) << 2;

        if (len < ext)
            return -1;
        // skip past RTP header extension
        len -= ext;
        buf += ext;
    }

    if (!st) {
        /* specific MPEG2TS demux support */
        ret = ff_mpegts_parse_packet(s->ts, pkt, buf, len);
        /* The only error that can be returned from ff_mpegts_parse_packet
         * is "no more data to return from the provided buffer", so return
         * AVERROR(EAGAIN) for all errors */
        if (ret < 0)
            return AVERROR(EAGAIN);
        if (ret < len) {
            s->read_buf_size = len - ret;
            memcpy(s->buf, buf + ret, s->read_buf_size);
            s->read_buf_index = 0;
            return 1;
        }
        return 0;
    } else if (s->parse_packet) {
        rv = s->parse_packet(s->ic, s->dynamic_protocol_context,
                             s->st, pkt, &timestamp, buf, len, flags);
    } else {
        // at this point, the RTP header has been stripped;  This is ASSUMING that there is only 1 CSRC, which in't wise.
        switch(st->codec->codec_id) {
        case CODEC_ID_MP2:
        case CODEC_ID_MP3:
            /* better than nothing: skip mpeg audio RTP header */
            if (len <= 4)
                return -1;
            h = AV_RB32(buf);
            len -= 4;
            buf += 4;
            av_new_packet(pkt, len);
            memcpy(pkt->data, buf, len);
            break;
        case CODEC_ID_MPEG1VIDEO:
        case CODEC_ID_MPEG2VIDEO:
            /* better than nothing: skip mpeg video RTP header */
            if (len <= 4)
                return -1;
            h = AV_RB32(buf);
            buf += 4;
            len -= 4;
            if (h & (1 << 26)) {
                /* mpeg2 */
                if (len <= 4)
                    return -1;
                buf += 4;
                len -= 4;
            }
            av_new_packet(pkt, len);
            memcpy(pkt->data, buf, len);
            break;
        default:
            av_new_packet(pkt, len);
            memcpy(pkt->data, buf, len);
            break;
        }

        pkt->stream_index = st->index;
    }

    // now perform timestamp things....
    finalize_packet(s, pkt, timestamp);

    return rv;
}
Пример #8
0
static int
demux_mkv_open_audio (FileInfo *finfo, mkv_track_t *track)
{
  WAVEFORMATEX *finfowf = &finfo->wf;

  if (track->ms_compat && (track->private_size >= sizeof(WAVEFORMATEX)))
    {
      WAVEFORMATEX *wf = (WAVEFORMATEX *)track->private_data;
      finfowf->wFormatTag = le2me_16 (wf->wFormatTag);
      finfowf->nChannels = le2me_16 (wf->nChannels);
      finfowf->nSamplesPerSec = le2me_32 (wf->nSamplesPerSec);
      finfowf->nAvgBytesPerSec = le2me_32 (wf->nAvgBytesPerSec);
      finfowf->nBlockAlign = le2me_16 (wf->nBlockAlign);
      finfowf->wBitsPerSample = le2me_16 (wf->wBitsPerSample);
      if (track->a_sfreq == 0.0)
        track->a_sfreq = finfowf->nSamplesPerSec;
      if (track->a_channels == 0)
        track->a_channels = finfowf->nChannels;
      if (track->a_bps == 0)
        track->a_bps = finfowf->wBitsPerSample;
      track->a_formattag = finfowf->wFormatTag;
    }
  else
    {
      memset(finfowf, 0, sizeof (WAVEFORMATEX));
      if (!strcmp(track->codec_id, MKV_A_MP3) ||
          !strcmp(track->codec_id, MKV_A_MP2))
        track->a_formattag = 0x0055;
      else if (!strncmp(track->codec_id, MKV_A_AC3, strlen(MKV_A_AC3)))
        track->a_formattag = 0x2000;
      else if (!strncmp(track->codec_id, MKV_A_EAC3, strlen(MKV_A_EAC3)))
        track->a_formattag = AUDIO_EAC3;
      else if (!strcmp(track->codec_id, MKV_A_DTS))
	  {
        track->a_formattag = 0x2001;
        //dts_packet = 1;
	  }
      else if (!strcmp(track->codec_id, MKV_A_PCM) ||
               !strcmp(track->codec_id, MKV_A_PCM_BE))
        track->a_formattag = 0x0001;
      else if (!strcmp(track->codec_id, MKV_A_AAC_2MAIN) ||
               !strncmp(track->codec_id, MKV_A_AAC_2LC,
                        strlen(MKV_A_AAC_2LC)) ||
               !strcmp(track->codec_id, MKV_A_AAC_2SSR) ||
               !strcmp(track->codec_id, MKV_A_AAC_4MAIN) ||
               !strncmp(track->codec_id, MKV_A_AAC_4LC,
                        strlen(MKV_A_AAC_4LC)) ||
               !strcmp(track->codec_id, MKV_A_AAC_4SSR) ||
               !strcmp(track->codec_id, MKV_A_AAC_4LTP) ||
               !strcmp(track->codec_id, MKV_A_AAC))
        track->a_formattag = mmioFOURCC('M', 'P', '4', 'A');
      else if (!strcmp(track->codec_id, MKV_A_VORBIS))
        {
          if (track->private_data == NULL)
            return 1;
          track->a_formattag = mmioFOURCC('v', 'r', 'b', 's');
        }
      else if (!strcmp(track->codec_id, MKV_A_QDMC))
        track->a_formattag = mmioFOURCC('Q', 'D', 'M', 'C');
      else if (!strcmp(track->codec_id, MKV_A_QDMC2))
        track->a_formattag = mmioFOURCC('Q', 'D', 'M', '2');
      else if (!strcmp(track->codec_id, MKV_A_WAVPACK))
        track->a_formattag = mmioFOURCC('W', 'V', 'P', 'K');
      else if (!strcmp(track->codec_id, MKV_A_TRUEHD))
        track->a_formattag = mmioFOURCC('T', 'R', 'H', 'D');
      else if (!strcmp(track->codec_id, MKV_A_FLAC))
        {
          if (track->private_data == NULL || track->private_size == 0)
            {
              mp_msg (MSGT_DEMUX, MSGL_WARN,
                      MSGTR_MPDEMUX_MKV_FlacTrackDoesNotContainValidHeaders);
              return 1;
            }
          track->a_formattag = mmioFOURCC ('f', 'L', 'a', 'C');
        }
      else if (track->private_size >= RAPROPERTIES4_SIZE)
        {
          if (!strcmp(track->codec_id, MKV_A_REAL28))
            track->a_formattag = mmioFOURCC('2', '8', '_', '8');
          else if (!strcmp(track->codec_id, MKV_A_REALATRC))
            track->a_formattag = mmioFOURCC('a', 't', 'r', 'c');
          else if (!strcmp(track->codec_id, MKV_A_REALCOOK))
            track->a_formattag = mmioFOURCC('c', 'o', 'o', 'k');
          else if (!strcmp(track->codec_id, MKV_A_REALDNET))
            track->a_formattag = mmioFOURCC('d', 'n', 'e', 't');
          else if (!strcmp(track->codec_id, MKV_A_REALSIPR))
            track->a_formattag = mmioFOURCC('s', 'i', 'p', 'r');
        }
      else
        {
          mp_msg (MSGT_DEMUX, MSGL_WARN, MSGTR_MPDEMUX_MKV_UnknownAudioCodec,
                  track->codec_id, track->tnum);
          return 1;
        }
    }

  finfowf->wFormatTag = track->a_formattag;
  finfowf->nChannels = track->a_channels;
  finfowf->nSamplesPerSec = (uint32_t) track->a_sfreq;
  if (track->a_bps == 0)
    {
      finfowf->wBitsPerSample = 16;
    }
  else
    {
      finfowf->wBitsPerSample = track->a_bps;
    }
  if (track->a_formattag == 0x0055)  /* MP3 || MP2 */
    {
      finfowf->nAvgBytesPerSec = 16000;
      finfowf->nBlockAlign = 1152;
    }
  else if (track->a_formattag == 0x2000 )/* AC3 */
    {
    }
  else if (track->a_formattag == AUDIO_EAC3 )/* EAC3 */
    {
    }
  else if (track->a_formattag == 0x2001) /* DTS */
    {
      //dts_packet = 1;
    }
  else if (track->a_formattag == 0x0001)  /* PCM || PCM_BE */
    {
      finfowf->nAvgBytesPerSec = track->a_channels * track->a_sfreq * 2;
      finfowf->nBlockAlign = finfowf->nAvgBytesPerSec;
	  /*
      if (!strcmp(track->codec_id, MKV_A_PCM_BE))
        sh_a->format = mmioFOURCC('t', 'w', 'o', 's');
		*/
    }
  else if (!strcmp(track->codec_id, MKV_A_QDMC) ||
           !strcmp(track->codec_id, MKV_A_QDMC2))
    {
      finfowf->nAvgBytesPerSec = 16000;
      finfowf->nBlockAlign = 1486;
      track->fix_i_bps = 1;
      track->qt_last_a_pts = 0.0;
    }
  else if (track->a_formattag == mmioFOURCC('M', 'P', '4', 'A'))
    {
      int profile, srate_idx;

      finfowf->nAvgBytesPerSec = 16000;
      finfowf->nBlockAlign = 1024;

      if (!strcmp (track->codec_id, MKV_A_AAC) &&
          (NULL != track->private_data))
        {
          return 0;
        }

      /* Recreate the 'private data' */
      /* which faad2 uses in its initialization */
      srate_idx = aac_get_sample_rate_index (track->a_sfreq);
      if (!strncmp (&track->codec_id[12], "MAIN", 4))
        profile = 0;
      else if (!strncmp (&track->codec_id[12], "LC", 2))
        profile = 1;
      else if (!strncmp (&track->codec_id[12], "SSR", 3))
        profile = 2;
      else
        profile = 3;

      if (strstr(track->codec_id, "SBR") != NULL)
        {
          /* HE-AAC (aka SBR AAC) */
          track->default_duration = 1024.0 / finfowf->nSamplesPerSec;
          finfowf->nSamplesPerSec *= 2;
        }
      else
        {
          track->default_duration = 1024.0 / (float)finfowf->nSamplesPerSec;
        }
    }
  else if (track->a_formattag == mmioFOURCC('v', 'r', 'b', 's'))  /* VORBIS */
    {
      //finfowf->cbSize = track->private_size;
    }
  else if (track->private_size >= RAPROPERTIES4_SIZE
           && !strncmp (track->codec_id, MKV_A_REALATRC, 7))
    {
      /* Common initialization for all RealAudio codecs */
      unsigned char *src = (unsigned char *)track->private_data;
      int codecdata_length, version;
      int flavor;

      finfowf->nAvgBytesPerSec = 0;  /* FIXME !? */

      version = AV_RB16(src + 4);
      flavor = AV_RB16(src + 22);
      track->coded_framesize = AV_RB32(src + 24);
      track->sub_packet_h = AV_RB16(src + 40);
      finfowf->nBlockAlign =
      track->audiopk_size = AV_RB16(src + 42);
      track->sub_packet_size = AV_RB16(src + 44);
      if (version == 4)
        {
          src += RAPROPERTIES4_SIZE;
          src += src[0] + 1;
          src += src[0] + 1;
        }
      else
        src += RAPROPERTIES5_SIZE;

      src += 3;
      if (version == 5)
        src++;
      codecdata_length = AV_RB32(src);
      src += 4;

      switch (track->a_formattag) {
        case mmioFOURCC('a', 't', 'r', 'c'):
          finfowf->nAvgBytesPerSec = atrc_fl2bps[flavor];
          finfowf->nBlockAlign = track->sub_packet_size;
          break;
        case mmioFOURCC('c', 'o', 'o', 'k'):
          finfowf->nAvgBytesPerSec = cook_fl2bps[flavor];
          finfowf->nBlockAlign = track->sub_packet_size;
          break;
        case mmioFOURCC('s', 'i', 'p', 'r'):
          finfowf->nAvgBytesPerSec = sipr_fl2bps[flavor];
          finfowf->nBlockAlign = track->coded_framesize;
          break;
        case mmioFOURCC('2', '8', '_', '8'):
          finfowf->nAvgBytesPerSec = 3600;
          finfowf->nBlockAlign = track->coded_framesize;
          break;
      }

      track->realmedia = 1;
    }
  else if (!strcmp(track->codec_id, MKV_A_FLAC) ||
           (track->a_formattag == 0xf1ac))
    {
      unsigned char *ptr;
      int size;
#if 1	//Fuchun 2010.06.23
	if(track->a_formattag == mmioFOURCC('f', 'L', 'a', 'C'))
	{
		ptr = (unsigned char *)track->private_data;
		size = track->private_size;
	}
	else
	{
		//sh_a->format = mmioFOURCC('f', 'L', 'a', 'C');
		ptr = (unsigned char *) track->private_data + sizeof (WAVEFORMATEX);
		size = track->private_size - sizeof (WAVEFORMATEX);
	}
	if(size < 4 || ptr[0] != 'f' || ptr[1] != 'L' ||ptr[2] != 'a' || ptr[3] != 'C')
	{
		//finfowf->cbSize = 4;
	}
	else
	{
		//finfowf->cbSize = size;
	}
#else
      free(finfowf);
      finfowf = NULL;

      if (track->a_formattag == mmioFOURCC('f', 'L', 'a', 'C'))
        {
          ptr = (unsigned char *)track->private_data;
          size = track->private_size;
        }
      else
        {
          sh_a->format = mmioFOURCC('f', 'L', 'a', 'C');
          ptr = (unsigned char *) track->private_data
            + sizeof (WAVEFORMATEX);
          size = track->private_size - sizeof (WAVEFORMATEX);
        }
      if (size < 4 || ptr[0] != 'f' || ptr[1] != 'L' ||
          ptr[2] != 'a' || ptr[3] != 'C')
        {
          dp = new_demux_packet (4);
          memcpy (dp->buffer, "fLaC", 4);
        }
      else
        {
          dp = new_demux_packet (size);
          memcpy (dp->buffer, ptr, size);
        }
      dp->pts = 0;
      dp->flags = 0;
      ds_add_packet (demuxer->audio, dp);
#endif
    }
  else if (track->a_formattag == mmioFOURCC('W', 'V', 'P', 'K') ||
           track->a_formattag == mmioFOURCC('T', 'R', 'H', 'D'))
    {  /* do nothing, still works */  }
  else if (!track->ms_compat || (track->private_size < sizeof(WAVEFORMATEX)))
    {
      return 1;
    }

  return 0;
}
Пример #9
0
static int h264_extradata_to_annexb(AVCodecContext *avctx, const int padding)
{
    uint16_t unit_size;
    uint64_t total_size                 = 0;
    uint8_t *out                        = NULL, unit_nb, sps_done = 0,
             sps_seen                   = 0, pps_seen = 0;
    const uint8_t *extradata            = avctx->extradata + 4;
    static const uint8_t nalu_header[4] = { 0, 0, 0, 1 };
    int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size

    /* retrieve sps and pps unit(s) */
    unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
    if (!unit_nb) {
        goto pps;
    } else {
        sps_seen = 1;
    }

    while (unit_nb--) {
        void *tmp;

        unit_size   = AV_RB16(extradata);
        total_size += unit_size + 4;
        if (total_size > INT_MAX - padding) {
            av_log(avctx, AV_LOG_ERROR,
                   "Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
            av_free(out);
            return AVERROR(EINVAL);
        }
        if (extradata + 2 + unit_size > avctx->extradata + avctx->extradata_size) {
            av_log(avctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
                   "corrupted stream or invalid MP4/AVCC bitstream\n");
            av_free(out);
            return AVERROR(EINVAL);
        }
        tmp = av_realloc(out, total_size + padding);
        if (!tmp) {
            av_free(out);
            return AVERROR(ENOMEM);
        }
        out = tmp;
        memcpy(out + total_size - unit_size - 4, nalu_header, 4);
        memcpy(out + total_size - unit_size, extradata + 2, unit_size);
        extradata += 2 + unit_size;
pps:
        if (!unit_nb && !sps_done++) {
            unit_nb = *extradata++; /* number of pps unit(s) */
            if (unit_nb)
                pps_seen = 1;
        }
    }

    if (out)
        memset(out + total_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    if (!sps_seen)
        av_log(avctx, AV_LOG_WARNING,
               "Warning: SPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    if (!pps_seen)
        av_log(avctx, AV_LOG_WARNING,
               "Warning: PPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    av_free(avctx->extradata);
    avctx->extradata      = out;
    avctx->extradata_size = total_size;

    return length_size;
}
Пример #10
0
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIOContext *pb      = s->pb;
    AVCodecContext *enc  = s->streams[pkt->stream_index]->codec;
    FLVContext *flv      = s->priv_data;
    FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data;
    unsigned ts;
    int size = pkt->size;
    uint8_t *data = NULL;
    int flags = -1, flags_size, ret;

    if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A ||
        enc->codec_id == AV_CODEC_ID_VP6  || enc->codec_id == AV_CODEC_ID_AAC)
        flags_size = 2;
    else if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4)
        flags_size = 5;
    else
        flags_size = 1;

    switch (enc->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        avio_w8(pb, FLV_TAG_TYPE_VIDEO);

        flags = enc->codec_tag;
        if (flags == 0) {
            av_log(s, AV_LOG_ERROR,
                   "Video codec '%s' is not compatible with FLV\n",
                   avcodec_get_name(enc->codec_id));
            return AVERROR(EINVAL);
        }

        flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
        break;
    case AVMEDIA_TYPE_AUDIO:
        flags = get_audio_flags(s, enc);

        av_assert0(size);

        avio_w8(pb, FLV_TAG_TYPE_AUDIO);
        break;
    case AVMEDIA_TYPE_DATA:
        avio_w8(pb, FLV_TAG_TYPE_META);
        break;
    default:
        return AVERROR(EINVAL);
    }

    if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
        /* check if extradata looks like mp4 formated */
        if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1)
            if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0)
                return ret;
    } else if (enc->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 &&
               (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
        if (!s->streams[pkt->stream_index]->nb_frames) {
        av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: "
               "use audio bitstream filter 'aac_adtstoasc' to fix it "
               "('-bsf:a aac_adtstoasc' option with ffmpeg)\n");
        return AVERROR_INVALIDDATA;
        }
        av_log(s, AV_LOG_WARNING, "aac bitstream error\n");
    }

    if (flv->delay == AV_NOPTS_VALUE)
        flv->delay = -pkt->dts;

    if (pkt->dts < -flv->delay) {
        av_log(s, AV_LOG_WARNING,
               "Packets are not in the proper order with respect to DTS\n");
        return AVERROR(EINVAL);
    }

    ts = pkt->dts + flv->delay; // add delay to force positive dts

    /* check Speex packet duration */
    if (enc->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160)
        av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than "
                                  "8 frames per packet. Adobe Flash "
                                  "Player cannot handle this!\n");

    if (sc->last_ts < ts)
        sc->last_ts = ts;

    avio_wb24(pb, size + flags_size);
    avio_wb24(pb, ts);
    avio_w8(pb, (ts >> 24) & 0x7F); // timestamps are 32 bits _signed_
    avio_wb24(pb, flv->reserved);

    if (enc->codec_type == AVMEDIA_TYPE_DATA) {
        int data_size;
        int metadata_size_pos = avio_tell(pb);
        avio_w8(pb, AMF_DATA_TYPE_STRING);
        put_amf_string(pb, "onTextData");
        avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
        avio_wb32(pb, 2);
        put_amf_string(pb, "type");
        avio_w8(pb, AMF_DATA_TYPE_STRING);
        put_amf_string(pb, "Text");
        put_amf_string(pb, "text");
        avio_w8(pb, AMF_DATA_TYPE_STRING);
        put_amf_string(pb, pkt->data);
        put_amf_string(pb, "");
        avio_w8(pb, AMF_END_OF_OBJECT);
        /* write total size of tag */
        data_size = avio_tell(pb) - metadata_size_pos;
        avio_seek(pb, metadata_size_pos - 10, SEEK_SET);
        avio_wb24(pb, data_size);
        avio_seek(pb, data_size + 10 - 3, SEEK_CUR);
        avio_wb32(pb, data_size + 11);
    } else {
        av_assert1(flags>=0);
        avio_w8(pb,flags);
        if (enc->codec_id == AV_CODEC_ID_VP6)
            avio_w8(pb,0);
        if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A) {
            if (enc->extradata_size)
                avio_w8(pb, enc->extradata[0]);
            else
                avio_w8(pb, ((FFALIGN(enc->width,  16) - enc->width) << 4) |
                             (FFALIGN(enc->height, 16) - enc->height));
        } else if (enc->codec_id == AV_CODEC_ID_AAC)
            avio_w8(pb, 1); // AAC raw
        else if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
            avio_w8(pb, 1); // AVC NALU
            avio_wb24(pb, pkt->pts - pkt->dts);
        }

        avio_write(pb, data ? data : pkt->data, size);

        avio_wb32(pb, size + flags_size + 11); // previous tag size
        flv->duration = FFMAX(flv->duration,
                              pkt->pts + flv->delay + pkt->duration);
    }

    av_free(data);

    return pb->error;
}
Пример #11
0
static int film_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FilmDemuxContext *film = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    unsigned char scratch[256];
    int i;
    unsigned int data_offset;
    unsigned int audio_frame_counter;

    film->sample_table = NULL;
    film->stereo_buffer = NULL;
    film->stereo_buffer_size = 0;

    /* load the main FILM header */
    if (get_buffer(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    data_offset = AV_RB32(&scratch[4]);
    film->version = AV_RB32(&scratch[8]);

    /* load the FDSC chunk */
    if (film->version == 0) {
        /* special case for Lemmings .film files; 20-byte header */
        if (get_buffer(pb, scratch, 20) != 20)
            return AVERROR(EIO);
        /* make some assumptions about the audio parameters */
        film->audio_type = CODEC_ID_PCM_S8;
        film->audio_samplerate = 22050;
        film->audio_channels = 1;
        film->audio_bits = 8;
    } else {
        /* normal Saturn .cpk files; 32-byte header */
        if (get_buffer(pb, scratch, 32) != 32)
            return AVERROR(EIO);
        film->audio_samplerate = AV_RB16(&scratch[24]);
        film->audio_channels = scratch[21];
        film->audio_bits = scratch[22];
        if (film->audio_bits == 8)
            film->audio_type = CODEC_ID_PCM_S8;
        else if (film->audio_bits == 16)
            film->audio_type = CODEC_ID_PCM_S16BE;
        else
            film->audio_type = CODEC_ID_NONE;
    }

    if (AV_RB32(&scratch[0]) != FDSC_TAG)
        return AVERROR_INVALIDDATA;

    if (AV_RB32(&scratch[8]) == CVID_TAG) {
        film->video_type = CODEC_ID_CINEPAK;
    } else
        film->video_type = CODEC_ID_NONE;

    /* initialize the decoder streams */
    if (film->video_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        film->video_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id = film->video_type;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->width = AV_RB32(&scratch[16]);
        st->codec->height = AV_RB32(&scratch[12]);
    }

    if (film->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        film->audio_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = film->audio_type;
        st->codec->codec_tag = 1;
        st->codec->channels = film->audio_channels;
        st->codec->bits_per_coded_sample = film->audio_bits;
        st->codec->sample_rate = film->audio_samplerate;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels *
            st->codec->bits_per_coded_sample / 8;
    }

    /* load the sample table */
    if (get_buffer(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    if (AV_RB32(&scratch[0]) != STAB_TAG)
        return AVERROR_INVALIDDATA;
    film->base_clock = AV_RB32(&scratch[8]);
    film->sample_count = AV_RB32(&scratch[12]);
    if(film->sample_count >= UINT_MAX / sizeof(film_sample))
        return -1;
    film->sample_table = av_malloc(film->sample_count * sizeof(film_sample));

    for(i=0; i<s->nb_streams; i++)
        av_set_pts_info(s->streams[i], 33, 1, film->base_clock);

    audio_frame_counter = 0;
    for (i = 0; i < film->sample_count; i++) {
        /* load the next sample record and transfer it to an internal struct */
        if (get_buffer(pb, scratch, 16) != 16) {
            av_free(film->sample_table);
            return AVERROR(EIO);
        }
        film->sample_table[i].sample_offset =
            data_offset + AV_RB32(&scratch[0]);
        film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
        if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
            film->sample_table[i].stream = film->audio_stream_index;
            film->sample_table[i].pts = audio_frame_counter;
            film->sample_table[i].pts *= film->base_clock;
            film->sample_table[i].pts /= film->audio_samplerate;

            audio_frame_counter += (film->sample_table[i].sample_size /
                (film->audio_channels * film->audio_bits / 8));
        } else {
            film->sample_table[i].stream = film->video_stream_index;
            film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
            film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
        }
    }

    film->current_sample = 0;

    return 0;
}
Пример #12
0
static int mov_text_decode_frame(AVCodecContext *avctx,
                            void *data, int *got_sub_ptr, AVPacket *avpkt)
{
    AVSubtitle *sub = data;
    MovTextContext *m = avctx->priv_data;
    int ret, ts_start, ts_end;
    AVBPrint buf;
    char *ptr = avpkt->data;
    char *end;
    int text_length, tsmb_type, ret_tsmb;
    uint64_t tsmb_size;
    const uint8_t *tsmb;

    if (!ptr || avpkt->size < 2)
        return AVERROR_INVALIDDATA;

    /*
     * A packet of size two with value zero is an empty subtitle
     * used to mark the end of the previous non-empty subtitle.
     * We can just drop them here as we have duration information
     * already. If the value is non-zero, then it's technically a
     * bad packet.
     */
    if (avpkt->size == 2)
        return AV_RB16(ptr) == 0 ? 0 : AVERROR_INVALIDDATA;

    /*
     * The first two bytes of the packet are the length of the text string
     * In complex cases, there are style descriptors appended to the string
     * so we can't just assume the packet size is the string size.
     */
    text_length = AV_RB16(ptr);
    end = ptr + FFMIN(2 + text_length, avpkt->size);
    ptr += 2;

    ts_start = av_rescale_q(avpkt->pts,
                            avctx->time_base,
                            (AVRational){1,100});
    ts_end   = av_rescale_q(avpkt->pts + avpkt->duration,
                            avctx->time_base,
                            (AVRational){1,100});

    tsmb_size = 0;
    m->tracksize = 2 + text_length;
    m->style_entries = 0;
    m->box_flags = 0;
    m->count_s = 0;
    // Note that the spec recommends lines be no longer than 2048 characters.
    av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
    if (text_length + 2 != avpkt->size) {
        while (m->tracksize + 8 <= avpkt->size) {
            // A box is a minimum of 8 bytes.
            tsmb = ptr + m->tracksize - 2;
            tsmb_size = AV_RB32(tsmb);
            tsmb += 4;
            tsmb_type = AV_RB32(tsmb);
            tsmb += 4;

            if (tsmb_size == 1) {
                if (m->tracksize + 16 > avpkt->size)
                    break;
                tsmb_size = AV_RB64(tsmb);
                tsmb += 8;
                m->size_var = 16;
            } else
                m->size_var = 8;
            //size_var is equal to 8 or 16 depending on the size of box

            if (tsmb_size == 0) {
                av_log(avctx, AV_LOG_ERROR, "tsmb_size is 0\n");
                return AVERROR_INVALIDDATA;
            }

            if (tsmb_size > avpkt->size - m->tracksize)
                break;

            for (size_t i = 0; i < box_count; i++) {
                if (tsmb_type == box_types[i].type) {
                    if (m->tracksize + m->size_var + box_types[i].base_size > avpkt->size)
                        break;
                    ret_tsmb = box_types[i].decode(tsmb, m, avpkt);
                    if (ret_tsmb == -1)
                        break;
                }
            }
            m->tracksize = m->tracksize + tsmb_size;
        }
        text_to_ass(&buf, ptr, end, m);
        mov_text_cleanup(m);
    } else
        text_to_ass(&buf, ptr, end, m);

    ret = ff_ass_add_rect_bprint(sub, &buf, ts_start, ts_end - ts_start);
    av_bprint_finalize(&buf, NULL);
    if (ret < 0)
        return ret;
    *got_sub_ptr = sub->num_rects > 0;
    return avpkt->size;
}
Пример #13
0
static int mov_text_tx3g(AVCodecContext *avctx, MovTextContext *m)
{
    uint8_t *tx3g_ptr = avctx->extradata;
    int i, box_size, font_length;
    int8_t v_align, h_align;
    int style_fontID;
    StyleBox s_default;

    m->count_f = 0;
    m->ftab_entries = 0;
    box_size = BOX_SIZE_INITIAL; /* Size till ftab_entries */
    if (avctx->extradata_size < box_size)
        return -1;

    // Display Flags
    tx3g_ptr += 4;
    // Alignment
    h_align = *tx3g_ptr++;
    v_align = *tx3g_ptr++;
    if (h_align == 0) {
        if (v_align == 0)
            m->d.alignment = TOP_LEFT;
        if (v_align == 1)
            m->d.alignment = MIDDLE_LEFT;
        if (v_align == -1)
            m->d.alignment = BOTTOM_LEFT;
    }
    if (h_align == 1) {
        if (v_align == 0)
            m->d.alignment = TOP_CENTER;
        if (v_align == 1)
            m->d.alignment = MIDDLE_CENTER;
        if (v_align == -1)
            m->d.alignment = BOTTOM_CENTER;
    }
    if (h_align == -1) {
        if (v_align == 0)
            m->d.alignment = TOP_RIGHT;
        if (v_align == 1)
            m->d.alignment = MIDDLE_RIGHT;
        if (v_align == -1)
            m->d.alignment = BOTTOM_RIGHT;
    }
    // Background Color
    m->d.back_color = AV_RB24(tx3g_ptr);
    tx3g_ptr += 4;
    // BoxRecord
    tx3g_ptr += 8;
    // StyleRecord
    tx3g_ptr += 4;
    // fontID
    style_fontID = AV_RB16(tx3g_ptr);
    tx3g_ptr += 2;
    // face-style-flags
    s_default.style_flag = *tx3g_ptr++;
    m->d.bold = s_default.style_flag & STYLE_FLAG_BOLD;
    m->d.italic = s_default.style_flag & STYLE_FLAG_ITALIC;
    m->d.underline = s_default.style_flag & STYLE_FLAG_UNDERLINE;
    // fontsize
    m->d.fontsize = *tx3g_ptr++;
    // Primary color
    m->d.color = AV_RB24(tx3g_ptr);
    tx3g_ptr += 4;
    // FontRecord
    // FontRecord Size
    tx3g_ptr += 4;
    // ftab
    tx3g_ptr += 4;

    m->ftab_entries = AV_RB16(tx3g_ptr);
    tx3g_ptr += 2;

    for (i = 0; i < m->ftab_entries; i++) {

        box_size += 3;
        if (avctx->extradata_size < box_size) {
            mov_text_cleanup_ftab(m);
            m->ftab_entries = 0;
            return -1;
        }
        m->ftab_temp = av_mallocz(sizeof(*m->ftab_temp));
        if (!m->ftab_temp) {
            mov_text_cleanup_ftab(m);
            return AVERROR(ENOMEM);
        }
        m->ftab_temp->fontID = AV_RB16(tx3g_ptr);
        tx3g_ptr += 2;
        font_length = *tx3g_ptr++;

        box_size = box_size + font_length;
        if (avctx->extradata_size < box_size) {
            mov_text_cleanup_ftab(m);
            m->ftab_entries = 0;
            return -1;
        }
        m->ftab_temp->font = av_malloc(font_length + 1);
        if (!m->ftab_temp->font) {
            mov_text_cleanup_ftab(m);
            return AVERROR(ENOMEM);
        }
        memcpy(m->ftab_temp->font, tx3g_ptr, font_length);
        m->ftab_temp->font[font_length] = '\0';
        av_dynarray_add(&m->ftab, &m->count_f, m->ftab_temp);
        if (!m->ftab) {
            mov_text_cleanup_ftab(m);
            return AVERROR(ENOMEM);
        }
        m->ftab_temp = NULL;
        tx3g_ptr = tx3g_ptr + font_length;
    }
    for (i = 0; i < m->ftab_entries; i++) {
        if (style_fontID == m->ftab[i]->fontID)
            m->d.font = m->ftab[i]->font;
    }
    return 0;
}
Пример #14
0
Файл: vmnc.c Проект: komh/kmp
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    VmncContext * const c = avctx->priv_data;
    uint8_t *outptr;
    const uint8_t *src = buf;
    int dx, dy, w, h, depth, enc, chunks, res, size_left;

    c->pic.reference = 3;
    c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if(avctx->reget_buffer(avctx, &c->pic) < 0){
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    c->pic.key_frame = 0;
    c->pic.pict_type = AV_PICTURE_TYPE_P;

    //restore screen after cursor
    if(c->screendta) {
        int i;
        w = c->cur_w;
        if(c->width < c->cur_x + w) w = c->width - c->cur_x;
        h = c->cur_h;
        if(c->height < c->cur_y + h) h = c->height - c->cur_y;
        dx = c->cur_x;
        if(dx < 0) {
            w += dx;
            dx = 0;
        }
        dy = c->cur_y;
        if(dy < 0) {
            h += dy;
            dy = 0;
        }
        if((w > 0) && (h > 0)) {
            outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
            for(i = 0; i < h; i++) {
                memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, w * c->bpp2);
                outptr += c->pic.linesize[0];
            }
        }
    }
    src += 2;
    chunks = AV_RB16(src); src += 2;
    while(chunks--) {
        dx = AV_RB16(src); src += 2;
        dy = AV_RB16(src); src += 2;
        w  = AV_RB16(src); src += 2;
        h  = AV_RB16(src); src += 2;
        enc = AV_RB32(src); src += 4;
        outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
        size_left = buf_size - (src - buf);
        switch(enc) {
        case MAGIC_WMVd: // cursor
            if(size_left < 2 + w * h * c->bpp2 * 2) {
                av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", 2 + w * h * c->bpp2 * 2, size_left);
                return -1;
            }
            src += 2;
            c->cur_w = w;
            c->cur_h = h;
            c->cur_hx = dx;
            c->cur_hy = dy;
            if((c->cur_hx > c->cur_w) || (c->cur_hy > c->cur_h)) {
                av_log(avctx, AV_LOG_ERROR, "Cursor hot spot is not in image: %ix%i of %ix%i cursor size\n", c->cur_hx, c->cur_hy, c->cur_w, c->cur_h);
                c->cur_hx = c->cur_hy = 0;
            }
            c->curbits = av_realloc(c->curbits, c->cur_w * c->cur_h * c->bpp2);
            c->curmask = av_realloc(c->curmask, c->cur_w * c->cur_h * c->bpp2);
            c->screendta = av_realloc(c->screendta, c->cur_w * c->cur_h * c->bpp2);
            load_cursor(c, src);
            src += w * h * c->bpp2 * 2;
            break;
        case MAGIC_WMVe: // unknown
            src += 2;
            break;
        case MAGIC_WMVf: // update cursor position
            c->cur_x = dx - c->cur_hx;
            c->cur_y = dy - c->cur_hy;
            break;
        case MAGIC_WMVg: // unknown
            src += 10;
            break;
        case MAGIC_WMVh: // unknown
            src += 4;
            break;
        case MAGIC_WMVi: // ServerInitialization struct
            c->pic.key_frame = 1;
            c->pic.pict_type = AV_PICTURE_TYPE_I;
            depth = *src++;
            if(depth != c->bpp) {
                av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth);
            }
            src++;
            c->bigendian = *src++;
            if(c->bigendian & (~1)) {
                av_log(avctx, AV_LOG_INFO, "Invalid header: bigendian flag = %i\n", c->bigendian);
                return -1;
            }
            //skip the rest of pixel format data
            src += 13;
            break;
        case MAGIC_WMVj: // unknown
            src += 2;
            break;
        case 0x00000000: // raw rectangle data
            if((dx + w > c->width) || (dy + h > c->height)) {
                av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
                return -1;
            }
            if(size_left < w * h * c->bpp2) {
                av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", w * h * c->bpp2, size_left);
                return -1;
            }
            paint_raw(outptr, w, h, src, c->bpp2, c->bigendian, c->pic.linesize[0]);
            src += w * h * c->bpp2;
            break;
        case 0x00000005: // HexTile encoded rectangle
            if((dx + w > c->width) || (dy + h > c->height)) {
                av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
                return -1;
            }
            res = decode_hextile(c, outptr, src, size_left, w, h, c->pic.linesize[0]);
            if(res < 0)
                return -1;
            src += res;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported block type 0x%08X\n", enc);
            chunks = 0; // leave chunks decoding loop
        }
    }
    if(c->screendta){
        int i;
        //save screen data before painting cursor
        w = c->cur_w;
        if(c->width < c->cur_x + w) w = c->width - c->cur_x;
        h = c->cur_h;
        if(c->height < c->cur_y + h) h = c->height - c->cur_y;
        dx = c->cur_x;
        if(dx < 0) {
            w += dx;
            dx = 0;
        }
        dy = c->cur_y;
        if(dy < 0) {
            h += dy;
            dy = 0;
        }
        if((w > 0) && (h > 0)) {
            outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
            for(i = 0; i < h; i++) {
                memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, w * c->bpp2);
                outptr += c->pic.linesize[0];
            }
            outptr = c->pic.data[0];
            put_cursor(outptr, c->pic.linesize[0], c, c->cur_x, c->cur_y);
        }
    }
    *data_size = sizeof(AVFrame);
    *(AVFrame*)data = c->pic;

    /* always report that the buffer was completely consumed */
    return buf_size;
}
Пример #15
0
static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt,
                     uint8_t **bufptr, int len)
{
    uint8_t* buf = bufptr ? *bufptr : NULL;
    int ret, flags = 0;
    uint32_t timestamp;
    int rv= 0;

    if (!buf) {
        /* If parsing of the previous packet actually returned 0 or an error,
         * there's nothing more to be parsed from that packet, but we may have
         * indicated that we can return the next enqueued packet. */
        if (s->prev_ret <= 0)
            return rtp_parse_queued_packet(s, pkt);
        /* return the next packets, if any */
        if(s->st && s->parse_packet) {
            /* timestamp should be overwritten by parse_packet, if not,
             * the packet is left with pts == AV_NOPTS_VALUE */
            timestamp = RTP_NOTS_VALUE;
            rv= s->parse_packet(s->ic, s->dynamic_protocol_context,
                                s->st, pkt, &timestamp, NULL, 0, flags);
            finalize_packet(s, pkt, timestamp);
            return rv;
        } else {
            // TODO: Move to a dynamic packet handler (like above)
            if (s->read_buf_index >= s->read_buf_size)
                return AVERROR(EAGAIN);
            ret = ff_mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
                                      s->read_buf_size - s->read_buf_index);
            if (ret < 0)
                return AVERROR(EAGAIN);
            s->read_buf_index += ret;
            if (s->read_buf_index < s->read_buf_size)
                return 1;
            else
                return 0;
        }
    }

    if (len < 12)
        return -1;

    if ((buf[0] & 0xc0) != (RTP_VERSION << 6))
        return -1;
    if (RTP_PT_IS_RTCP(buf[1])) {
        return rtcp_parse_packet(s, buf, len);
    }

    if ((s->seq == 0 && !s->queue) || s->queue_size <= 1) {
        /* First packet, or no reordering */
        return rtp_parse_packet_internal(s, pkt, buf, len);
    } else {
        uint16_t seq = AV_RB16(buf + 2);
        int16_t diff = seq - s->seq;
        if (diff < 0) {
            /* Packet older than the previously emitted one, drop */
            av_log(s->st ? s->st->codec : NULL, AV_LOG_WARNING,
                   "RTP: dropping old packet received too late\n");
            return -1;
        } else if (diff <= 1) {
            /* Correct packet */
            rv = rtp_parse_packet_internal(s, pkt, buf, len);
            return rv;
        } else {
            /* Still missing some packet, enqueue this one. */
            enqueue_packet(s, buf, len);
            *bufptr = NULL;
            /* Return the first enqueued packet if the queue is full,
             * even if we're missing something */
            if (s->queue_len >= s->queue_size)
                return rtp_parse_queued_packet(s, pkt);
            return -1;
        }
    }
}
Пример #16
0
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
    FLVContext *flv = s->priv_data;
    FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data;
    unsigned ts;
    int size= pkt->size;
    uint8_t *data= NULL;
    int flags, flags_size;

//    av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size);

    if(enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F ||
       enc->codec_id == CODEC_ID_AAC)
        flags_size= 2;
    else if(enc->codec_id == CODEC_ID_H264 || enc->codec_id == CODEC_ID_MPEG4)
        flags_size= 5;
    else
        flags_size= 1;

    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
        avio_w8(pb, FLV_TAG_TYPE_VIDEO);

        flags = enc->codec_tag;
        if(flags == 0) {
            av_log(enc, AV_LOG_ERROR, "video codec %s not compatible with flv\n", avcodec_get_name(enc->codec_id));
            return -1;
        }

        flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
    } else if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
        flags = get_audio_flags(enc);

        assert(size);

        avio_w8(pb, FLV_TAG_TYPE_AUDIO);
    } else {
        // In-band flv metadata ("scriptdata")
        assert(enc->codec_type == AVMEDIA_TYPE_DATA);
        avio_w8(pb, FLV_TAG_TYPE_META);
        flags_size = 0;
        flags = 0;
    }

    if (enc->codec_id == CODEC_ID_H264 || enc->codec_id == CODEC_ID_MPEG4) {
        /* check if extradata looks like mp4 formated */
        if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) {
            if (ff_avc_parse_nal_units_buf(pkt->data, &data, &size) < 0)
                return -1;
        }
    } else if (enc->codec_id == CODEC_ID_AAC && pkt->size > 2 &&
               (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
        av_log(s, AV_LOG_ERROR, "malformated aac bitstream, use -absf aac_adtstoasc\n");
        return -1;
    }
    if (flv->delay == AV_NOPTS_VALUE)
        flv->delay = -pkt->dts;
    if (pkt->dts < -flv->delay) {
        av_log(s, AV_LOG_WARNING, "Packets are not in the proper order with "
                                  "respect to DTS\n");
        return AVERROR(EINVAL);
    }

    ts = pkt->dts + flv->delay; // add delay to force positive dts

    /* check Speex packet duration */
    if (enc->codec_id == CODEC_ID_SPEEX && ts - sc->last_ts > 160) {
        av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than "
                                  "8 frames per packet. Adobe Flash "
                                  "Player cannot handle this!\n");
    }

    if (sc->last_ts < ts)
        sc->last_ts = ts;

    avio_wb24(pb,size + flags_size);
    avio_wb24(pb,ts);
    avio_w8(pb,(ts >> 24) & 0x7F); // timestamps are 32bits _signed_
    avio_wb24(pb,flv->reserved);

    if(flags_size)
        avio_w8(pb,flags);

    if (enc->codec_id == CODEC_ID_VP6)
        avio_w8(pb,0);
    if (enc->codec_id == CODEC_ID_VP6F)
        avio_w8(pb, enc->extradata_size ? enc->extradata[0] : 0);
    else if (enc->codec_id == CODEC_ID_AAC)
        avio_w8(pb,1); // AAC raw
    else if (enc->codec_id == CODEC_ID_H264 || enc->codec_id == CODEC_ID_MPEG4) {
        avio_w8(pb,1); // AVC NALU
        avio_wb24(pb,pkt->pts - pkt->dts);
    }

    avio_write(pb, data ? data : pkt->data, size);

    avio_wb32(pb,size+flags_size+11); // previous tag size
    flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration);

    avio_flush(pb);

    av_free(data);

    return pb->error;
}
Пример #17
0
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buf, int size)
{
    RTPMuxContext *s = s1->priv_data;
    const uint8_t *qtables = NULL;
    int nb_qtables = 0;
    uint8_t type;
    uint8_t w, h;
    uint8_t *p;
    int off = 0; /* fragment offset of the current JPEG frame */
    int len;
    int i;

    s->buf_ptr   = s->buf;
    s->timestamp = s->cur_timestamp;

    /* convert video pixel dimensions from pixels to blocks */
    w = s1->streams[0]->codec->width  >> 3;
    h = s1->streams[0]->codec->height >> 3;

    /* get the pixel format type or fail */
    if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ422P ||
        (s1->streams[0]->codec->color_range == AVCOL_RANGE_JPEG &&
         s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUV422P)) {
        type = 0;
    } else if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ420P ||
               (s1->streams[0]->codec->color_range == AVCOL_RANGE_JPEG &&
                s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUV420P)) {
        type = 1;
    } else {
        av_log(s1, AV_LOG_ERROR, "Unsupported pixel format\n");
        return;
    }

    /* preparse the header for getting some infos */
    for (i = 0; i < size; i++) {
        if (buf[i] != 0xff)
            continue;

        if (buf[i + 1] == DQT) {
            if (buf[i + 4])
                av_log(s1, AV_LOG_WARNING,
                       "Only 8-bit precision is supported.\n");

            /* a quantization table is 64 bytes long */
            nb_qtables = AV_RB16(&buf[i + 2]) / 65;
            if (i + 4 + nb_qtables * 65 > size) {
                av_log(s1, AV_LOG_ERROR, "Too short JPEG header. Aborted!\n");
                return;
            }

            qtables = &buf[i + 4];
        } else if (buf[i + 1] == SOF0) {
            if (buf[i + 14] != 17 || buf[i + 17] != 17) {
                av_log(s1, AV_LOG_ERROR,
                       "Only 1x1 chroma blocks are supported. Aborted!\n");
                return;
            }
        } else if (buf[i + 1] == SOS) {
            /* SOS is last marker in the header */
            i += AV_RB16(&buf[i + 2]) + 2;
            break;
        }
    }

    /* skip JPEG header */
    buf  += i;
    size -= i;

    for (i = size - 2; i >= 0; i--) {
        if (buf[i] == 0xff && buf[i + 1] == EOI) {
            /* Remove the EOI marker */
            size = i;
            break;
        }
    }

    p = s->buf_ptr;
    while (size > 0) {
        int hdr_size = 8;

        if (off == 0 && nb_qtables)
            hdr_size += 4 + 64 * nb_qtables;

        /* payload max in one packet */
        len = FFMIN(size, s->max_payload_size - hdr_size);

        /* set main header */
        bytestream_put_byte(&p, 0);
        bytestream_put_be24(&p, off);
        bytestream_put_byte(&p, type);
        bytestream_put_byte(&p, 255);
        bytestream_put_byte(&p, w);
        bytestream_put_byte(&p, h);

        if (off == 0 && nb_qtables) {
            /* set quantization tables header */
            bytestream_put_byte(&p, 0);
            bytestream_put_byte(&p, 0);
            bytestream_put_be16(&p, 64 * nb_qtables);

            for (i = 0; i < nb_qtables; i++)
                bytestream_put_buffer(&p, &qtables[65 * i + 1], 64);
        }

        /* copy payload data */
        memcpy(p, buf, len);

        /* marker bit is last packet in frame */
        ff_rtp_send_data(s1, s->buf, len + hdr_size, size == len);

        buf  += len;
        size -= len;
        off  += len;
        p     = s->buf;
    }
}
Пример #18
0
static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size)
{
    VP56RangeCoder *c = &s->c;
    int parse_filter_info = 0;
    int coeff_offset = 0;
    int vrt_shift = 0;
    int sub_version;
    int rows, cols;
    int res = 0;
    int ret;
    int separated_coeff = buf[0] & 1;

    s->frames[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
    ff_vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);

    if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
        sub_version = buf[1] >> 3;
        if (sub_version > 8)
            return AVERROR_INVALIDDATA;
        s->filter_header = buf[1] & 0x06;
        if (buf[1] & 1) {
            avpriv_report_missing_feature(s->avctx, "Interlacing");
            return AVERROR_PATCHWELCOME;
        }
        if (separated_coeff || !s->filter_header) {
            coeff_offset = AV_RB16(buf+2) - 2;
            buf += 2;
            buf_size -= 2;
        }

        rows = buf[2];  /* number of stored macroblock rows */
        cols = buf[3];  /* number of stored macroblock cols */
        /* buf[4] is number of displayed macroblock rows */
        /* buf[5] is number of displayed macroblock cols */
        if (!rows || !cols) {
            av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4);
            return AVERROR_INVALIDDATA;
        }

        if (!s->macroblocks || /* first frame */
            16*cols != s->avctx->coded_width ||
            16*rows != s->avctx->coded_height) {
            if (s->avctx->extradata_size == 0 &&
                FFALIGN(s->avctx->width,  16) == 16 * cols &&
                FFALIGN(s->avctx->height, 16) == 16 * rows) {
                // We assume this is properly signalled container cropping,
                // in an F4V file. Just set the coded_width/height, don't
                // touch the cropped ones.
                s->avctx->coded_width  = 16 * cols;
                s->avctx->coded_height = 16 * rows;
            } else {
                ret = ff_set_dimensions(s->avctx, 16 * cols, 16 * rows);
                if (ret < 0)
                    return ret;

                if (s->avctx->extradata_size == 1) {
                    s->avctx->width  -= s->avctx->extradata[0] >> 4;
                    s->avctx->height -= s->avctx->extradata[0] & 0x0F;
                }
            }
            res = VP56_SIZE_CHANGE;
        }
Пример #19
0
int main(int argc, char *argv[]) {
  av_register_all();

  media_stream_params_t params;
  params.height_video = height;
  params.width_video = width;
  params.video_fps = fps;
  params.bit_stream = bit_rate;
  params.codec_id = AV_CODEC_ID_H264;

  media_stream_t* ostream = alloc_video_stream(outfilename, &params, 0);
  if(!ostream){
    return EXIT_FAILURE;
  }

  char errbuf[PCAP_ERRBUF_SIZE];
  pcap_t* pcap = pcap_open_offline_with_tstamp_precision(infilename, PCAP_TSTAMP_PRECISION_NANO, errbuf);
  if (!pcap) {
    fprintf(stderr, "error reading pcap file: %s\n", errbuf);
    free_video_stream(ostream);
    return EXIT_FAILURE;
  }

  struct pcap_pkthdr header;
  const u_char *packet;
  while ((packet = pcap_next(pcap, &header)) != NULL) {
    packet += sizeof(struct vgem_hdr);
    bpf_u_int32 packet_len = header.caplen - sizeof(struct vgem_hdr);
    if (packet_len < sizeof(struct ether_header)) {
      pcap_close(pcap);
      free_video_stream(ostream);
      return EXIT_FAILURE;
    }

    struct ether_header* ethernet_header = (struct ether_header*)packet;
    uint16_t ht = ntohs(ethernet_header->ether_type);
    if (ht != ETHERTYPE_IP) {
      continue;
    }

    /* Skip over the Ethernet header. (14)*/
    packet += sizeof(struct ether_header);
    packet_len -= sizeof(struct ether_header);

    if (packet_len < sizeof(struct ip)) {
      pcap_close(pcap);
      free_video_stream(ostream);
      return EXIT_FAILURE;
    }

    struct iphdr* ip = (struct iphdr*) packet;
    if (!(ip->protocol != IPPROTO_UDP || ip->protocol != IPPROTO_TCP)) {
      continue;
    }

    unsigned int IP_header_length = ip->ihl * 4;  /* ip_hl is in 4-byte words */
    if (packet_len < IP_header_length) { /* didn't capture the full IP header including options */
      pcap_close(pcap);
      free_video_stream(ostream);
      return EXIT_FAILURE;
    }

    packet += IP_header_length;
    packet_len -= IP_header_length;

    if (ip->protocol == IPPROTO_UDP) {
        if (packet_len < sizeof(struct udphdr)) {
          pcap_close(pcap);
          free_video_stream(ostream);
          return EXIT_FAILURE;
        }

        packet += sizeof(struct udphdr);
        packet_len -= sizeof(struct udphdr);

        struct rtp_hdr* rtp = (struct rtp_hdr*)packet;
        if (packet_len < sizeof(struct rtp_hdr)) {
          pcap_close(pcap);
          free_video_stream(ostream);
          return EXIT_FAILURE;
        }

        // rtp payload
        packet += sizeof(struct rtp_hdr);
        packet_len -= sizeof(struct rtp_hdr);
        if (packet_len <= 2) {
          continue;
        }

        uint8_t nal = packet[0];
        uint8_t fragment_type = (nal & 0x1F);

        if (fragment_type >= 1 && fragment_type <= 23) {
          uint8_t* nal_data = NULL;
          size_t size_nal = make_nal_frame_header(packet, packet_len, nal_header, sizeof(nal_header), &nal_data);
          media_stream_write_video_frame(ostream, nal_data, size_nal);
          free(nal_data);
        } else if(fragment_type == 24) {
            packet++;
            packet_len--;
            // first we are going to figure out the total size....
            {
              int total_length= 0;
              uint8_t* dst = NULL;

              int pass;
              for(pass = 0; pass < 2; pass++) {
                  const uint8_t* src = packet;
                  int src_len = packet_len;

                  do {
                      uint16_t nal_size = AV_RB16(src); // this going to be a problem if unaligned (can it be?)

                      // consume the length of the aggregate...
                      src += 2;
                      src_len -= 2;

                      if (nal_size <= src_len) {
                          if(pass==0) {
                              // counting...
                              total_length+= sizeof(nal_header)+nal_size;
                          } else {
                              // copying
                              assert(dst);
                              memcpy(dst, nal_header, sizeof(nal_header));
                              dst += sizeof(nal_header);
                              memcpy(dst, src, nal_size);
                              dst += nal_size;
                          }
                      } else {
                          av_log(NULL, AV_LOG_ERROR,
                                 "nal size exceeds length: %d %d\n", nal_size, src_len);
                      }

                      // eat what we handled...
                      src += nal_size;
                      src_len -= nal_size;

                      if (src_len < 0)
                          av_log(NULL, AV_LOG_ERROR,
                                 "Consumed more bytes than we got! (%d)\n", src_len);
                  } while (src_len > 2);      // because there could be rtp padding..

                  if (pass == 0) {
                    dst = (uint8_t*)calloc(total_length, sizeof(uint8_t));
                  } else {
                  }
              }

          }
        } else if (fragment_type == 28 || fragment_type == 29) {
          packet++;
          packet_len--;

          uint8_t fu_indicator = nal;
          uint8_t fu_header = *packet;   // read the fu_header.
          uint8_t start_bit = fu_header >> 7;
          uint8_t end_bit = (fu_header & 0x40) >> 6;
          uint8_t nal_type = (fu_header & 0x1f);
          uint8_t reconstructed_nal = fu_indicator & (0xe0);  // the original nal forbidden bit and NRI are stored in this packet's nal;
          reconstructed_nal |= nal_type;

          packet++;
          packet_len--;

          if (fragment_type == 29) {
            packet = packet + 2;
            packet_len -= 2;
          }

          CHECK(packet_len > 0);

          if (start_bit) {
            size_t size_nal = sizeof(nal_header) + sizeof(nal) + packet_len;
            uint8_t* nal_data = (uint8_t*)calloc(size_nal, sizeof(uint8_t));
            memcpy(nal_data, nal_header, sizeof(nal_header));
            nal_data[sizeof(nal_header)]= reconstructed_nal;
            memcpy(nal_data + sizeof(nal_header) + sizeof(nal), packet, packet_len);
            media_stream_write_video_frame(ostream, nal_data, size_nal);
            free(nal_data);
          } else {
            media_stream_write_video_frame(ostream, packet, packet_len);
          }
        } else {
          NOTREACHED();
        }

        // http://stackoverflow.com/questions/3493742/problem-to-decode-h264-video-over-rtp-with-ffmpeg-libavcodec
        // http://stackoverflow.com/questions/1957427/detect-mpeg4-h264-i-frame-idr-in-rtp-stream
    } else if (ip->protocol == IPPROTO_TCP) {
Пример #20
0
static int h263_handle_packet(AVFormatContext *ctx,
                              PayloadContext *data,
                              AVStream *st,
                              AVPacket * pkt,
                              uint32_t * timestamp,
                              const uint8_t * buf,
                              int len, int flags)
{
    uint8_t *ptr;
    uint16_t header;
    int startcode, vrc, picture_header;

    if (len < 2) {
        av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n");
        return AVERROR_INVALIDDATA;
    }

    /* Decode the 16 bit H.263+ payload header, as described in section
     * 5.1 of RFC 4629. The fields of this header are:
     * - 5 reserved bits, should be ignored.
     * - One bit (P, startcode), indicating a picture start, picture segment
     *   start or video sequence end. If set, two zero bytes should be
     *   prepended to the payload.
     * - One bit (V, vrc), indicating the presence of an 8 bit Video
     *   Redundancy Coding field after this 16 bit header.
     * - 6 bits (PLEN, picture_header), the length (in bytes) of an extra
     *   picture header, following the VRC field.
     * - 3 bits (PEBIT), the number of bits to ignore of the last byte
     *   of the extra picture header. (Not used at the moment.)
     */
    header = AV_RB16(buf);
    startcode      = (header & 0x0400) >> 9;
    vrc            =  header & 0x0200;
    picture_header = (header & 0x01f8) >> 3;
    buf += 2;
    len -= 2;

    if (vrc) {
        /* Skip VRC header if present, not used at the moment. */
        buf += 1;
        len -= 1;
    }
    if (picture_header) {
        /* Skip extra picture header if present, not used at the moment. */
        buf += picture_header;
        len -= picture_header;
    }

    if (len < 0) {
        av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n");
        return AVERROR_INVALIDDATA;
    }

    if (av_new_packet(pkt, len + startcode)) {
        av_log(ctx, AV_LOG_ERROR, "Out of memory\n");
        return AVERROR(ENOMEM);
    }
    pkt->stream_index = st->index;
    ptr = pkt->data;

    if (startcode) {
        *ptr++ = 0;
        *ptr++ = 0;
    }
    memcpy(ptr, buf, len);

    return 0;
}
Пример #21
0
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *pkt)
{
    CDXLVideoContext *c = avctx->priv_data;
    AVFrame * const p = data;
    int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
    const uint8_t *buf = pkt->data;

    if (buf_size < 32)
        return AVERROR_INVALIDDATA;
    encoding        = buf[1] & 7;
    c->format       = buf[1] & 0xE0;
    w               = AV_RB16(&buf[14]);
    h               = AV_RB16(&buf[16]);
    c->bpp          = buf[19];
    c->palette_size = AV_RB16(&buf[20]);
    c->palette      = buf + 32;
    c->video        = c->palette + c->palette_size;
    c->video_size   = buf_size - c->palette_size - 32;

    if (c->palette_size > 512)
        return AVERROR_INVALIDDATA;
    if (buf_size < c->palette_size + 32)
        return AVERROR_INVALIDDATA;
    if (c->bpp < 1)
        return AVERROR_INVALIDDATA;
    if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
        avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format);
        return AVERROR_PATCHWELCOME;
    }

    if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
        return ret;

    aligned_width = FFALIGN(c->avctx->width, 16);
    c->padded_bits  = aligned_width - c->avctx->width;
    if (c->video_size < aligned_width * avctx->height * c->bpp / 8)
        return AVERROR_INVALIDDATA;
    if (!encoding && c->palette_size && c->bpp <= 8) {
        avctx->pix_fmt = AV_PIX_FMT_PAL8;
    } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
        if (c->palette_size != (1 << (c->bpp - 1)))
            return AVERROR_INVALIDDATA;
        avctx->pix_fmt = AV_PIX_FMT_BGR24;
    } else {
        avpriv_request_sample(avctx, "Encoding %d and bpp %d",
                              encoding, c->bpp);
        return AVERROR_PATCHWELCOME;
    }

    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
        return ret;
    p->pict_type = AV_PICTURE_TYPE_I;

    if (encoding) {
        av_fast_padded_malloc(&c->new_video, &c->new_video_size,
                              h * w + AV_INPUT_BUFFER_PADDING_SIZE);
        if (!c->new_video)
            return AVERROR(ENOMEM);
        if (c->bpp == 8)
            cdxl_decode_ham8(c, p);
        else
            cdxl_decode_ham6(c, p);
    } else {
        cdxl_decode_rgb(c, p);
    }
    *got_frame = 1;

    return buf_size;
}
Пример #22
0
static int film_read_header(AVFormatContext *s)
{
    FilmDemuxContext *film = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    unsigned char scratch[256];
    int i, ret;
    unsigned int data_offset;
    unsigned int audio_frame_counter;
    unsigned int video_frame_counter;

    film->sample_table = NULL;

    /* load the main FILM header */
    if (avio_read(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    data_offset = AV_RB32(&scratch[4]);
    film->version = AV_RB32(&scratch[8]);

    /* load the FDSC chunk */
    if (film->version == 0) {
        /* special case for Lemmings .film files; 20-byte header */
        if (avio_read(pb, scratch, 20) != 20)
            return AVERROR(EIO);
        /* make some assumptions about the audio parameters */
        film->audio_type = AV_CODEC_ID_PCM_S8;
        film->audio_samplerate = 22050;
        film->audio_channels = 1;
        film->audio_bits = 8;
    } else {
        /* normal Saturn .cpk files; 32-byte header */
        if (avio_read(pb, scratch, 32) != 32)
            return AVERROR(EIO);
        film->audio_samplerate = AV_RB16(&scratch[24]);
        film->audio_channels = scratch[21];
        film->audio_bits = scratch[22];
        if (scratch[23] == 2 && film->audio_channels > 0)
            film->audio_type = AV_CODEC_ID_ADPCM_ADX;
        else if (film->audio_channels > 0) {
            if (film->audio_bits == 8)
                film->audio_type = AV_CODEC_ID_PCM_S8_PLANAR;
            else if (film->audio_bits == 16)
                film->audio_type = AV_CODEC_ID_PCM_S16BE_PLANAR;
            else
                film->audio_type = AV_CODEC_ID_NONE;
        } else
            film->audio_type = AV_CODEC_ID_NONE;
    }

    if (AV_RB32(&scratch[0]) != FDSC_TAG)
        return AVERROR_INVALIDDATA;

    if (AV_RB32(&scratch[8]) == CVID_TAG) {
        film->video_type = AV_CODEC_ID_CINEPAK;
    } else if (AV_RB32(&scratch[8]) == RAW_TAG) {
        film->video_type = AV_CODEC_ID_RAWVIDEO;
    } else {
        film->video_type = AV_CODEC_ID_NONE;
    }

    /* initialize the decoder streams */
    if (film->video_type) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        film->video_stream_index = st->index;
        st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codecpar->codec_id = film->video_type;
        st->codecpar->codec_tag = 0;  /* no fourcc */
        st->codecpar->width = AV_RB32(&scratch[16]);
        st->codecpar->height = AV_RB32(&scratch[12]);

        if (film->video_type == AV_CODEC_ID_RAWVIDEO) {
            if (scratch[20] == 24) {
                st->codecpar->format = AV_PIX_FMT_RGB24;
            } else {
                av_log(s, AV_LOG_ERROR, "raw video is using unhandled %dbpp\n", scratch[20]);
                return -1;
            }
        }
    }

    if (film->audio_type) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        film->audio_stream_index = st->index;
        st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codecpar->codec_id = film->audio_type;
        st->codecpar->codec_tag = 1;
        st->codecpar->channels = film->audio_channels;
        st->codecpar->sample_rate = film->audio_samplerate;

        if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) {
            st->codecpar->bits_per_coded_sample = 18 * 8 / 32;
            st->codecpar->block_align = st->codecpar->channels * 18;
            st->need_parsing = AVSTREAM_PARSE_FULL;
        } else {
            st->codecpar->bits_per_coded_sample = film->audio_bits;
            st->codecpar->block_align = st->codecpar->channels *
                st->codecpar->bits_per_coded_sample / 8;
        }

        st->codecpar->bit_rate = st->codecpar->channels * st->codecpar->sample_rate *
            st->codecpar->bits_per_coded_sample;
    }

    /* load the sample table */
    if (avio_read(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    if (AV_RB32(&scratch[0]) != STAB_TAG)
        return AVERROR_INVALIDDATA;
    film->base_clock = AV_RB32(&scratch[8]);
    film->sample_count = AV_RB32(&scratch[12]);
    if(film->sample_count >= UINT_MAX / sizeof(film_sample))
        return -1;
    film->sample_table = av_malloc_array(film->sample_count, sizeof(film_sample));
    if (!film->sample_table)
        return AVERROR(ENOMEM);

    for (i = 0; i < s->nb_streams; i++) {
        st = s->streams[i];
        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
            avpriv_set_pts_info(st, 33, 1, film->base_clock);
        else
            avpriv_set_pts_info(st, 64, 1, film->audio_samplerate);
    }

    audio_frame_counter = video_frame_counter = 0;
    for (i = 0; i < film->sample_count; i++) {
        /* load the next sample record and transfer it to an internal struct */
        if (avio_read(pb, scratch, 16) != 16) {
            ret = AVERROR(EIO);
            goto fail;
        }
        film->sample_table[i].sample_offset =
            data_offset + AV_RB32(&scratch[0]);
        film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
        if (film->sample_table[i].sample_size > INT_MAX / 4) {
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }
        if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
            film->sample_table[i].stream = film->audio_stream_index;
            film->sample_table[i].pts = audio_frame_counter;

            if (film->audio_type == AV_CODEC_ID_ADPCM_ADX)
                audio_frame_counter += (film->sample_table[i].sample_size * 32 /
                    (18 * film->audio_channels));
            else if (film->audio_type != AV_CODEC_ID_NONE)
                audio_frame_counter += (film->sample_table[i].sample_size /
                    (film->audio_channels * film->audio_bits / 8));
        } else {
            film->sample_table[i].stream = film->video_stream_index;
            film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
            film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : AVINDEX_KEYFRAME;
            video_frame_counter++;
            if (film->video_type)
                av_add_index_entry(s->streams[film->video_stream_index],
                                   film->sample_table[i].sample_offset,
                                   film->sample_table[i].pts,
                                   film->sample_table[i].sample_size, 0,
                                   film->sample_table[i].keyframe);
        }
    }

    if (film->audio_type)
        s->streams[film->audio_stream_index]->duration = audio_frame_counter;

    if (film->video_type)
        s->streams[film->video_stream_index]->duration = video_frame_counter;

    film->current_sample = 0;

    return 0;
fail:
    film_read_close(s);
    return ret;
}
Пример #23
0
static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
                                   AVCodecContext *avctx, const char *args,
                                   uint8_t  **poutbuf, int *poutbuf_size,
                                   const uint8_t *buf, int      buf_size,
                                   int keyframe) {
    H264BSFContext *ctx = bsfc->priv_data;
    uint8_t unit_type;
    int32_t nal_size;
    uint32_t cumul_size = 0;
    const uint8_t *buf_end = buf + buf_size;

    /* nothing to filter */
    if (!avctx->extradata || avctx->extradata_size < 6) {
        *poutbuf = (uint8_t*) buf;
        *poutbuf_size = buf_size;
        return 0;
    }

    /* retrieve sps and pps NAL units from extradata */
    if (!ctx->extradata_parsed) {
        uint16_t unit_size;
        uint64_t total_size = 0;
        uint8_t *out = NULL, unit_nb, sps_done = 0, sps_seen = 0, pps_seen = 0;
        const uint8_t *extradata = avctx->extradata+4;
        static const uint8_t nalu_header[4] = {0, 0, 0, 1};

        /* retrieve length coded size */
        ctx->length_size = (*extradata++ & 0x3) + 1;
        if (ctx->length_size == 3)
            return AVERROR(EINVAL);

        /* retrieve sps and pps unit(s) */
        unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
        if (!unit_nb) {
            unit_nb = *extradata++; /* number of pps unit(s) */
            sps_done++;

            if (unit_nb)
                pps_seen = 1;
        } else {
            sps_seen = 1;
        }

        while (unit_nb--) {
            void *tmp;

            unit_size = AV_RB16(extradata);
            total_size += unit_size+4;
            if (total_size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE ||
                extradata+2+unit_size > avctx->extradata+avctx->extradata_size) {
                av_free(out);
                return AVERROR(EINVAL);
            }
            tmp = av_realloc(out, total_size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!tmp) {
                av_free(out);
                return AVERROR(ENOMEM);
            }
            out = tmp;
            memcpy(out+total_size-unit_size-4, nalu_header, 4);
            memcpy(out+total_size-unit_size,   extradata+2, unit_size);
            extradata += 2+unit_size;

            if (!unit_nb && !sps_done++) {
                unit_nb = *extradata++; /* number of pps unit(s) */
                if (unit_nb)
                    pps_seen = 1;
            }
        }

        if(out)
            memset(out + total_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

        if (!sps_seen)
            av_log(avctx, AV_LOG_WARNING, "Warning: SPS NALU missing or invalid. The resulting stream may not play.\n");
        if (!pps_seen)
            av_log(avctx, AV_LOG_WARNING, "Warning: PPS NALU missing or invalid. The resulting stream may not play.\n");

        av_free(avctx->extradata);
        avctx->extradata      = out;
        avctx->extradata_size = total_size;
        ctx->first_idr        = 1;
        ctx->extradata_parsed = 1;
    }

    *poutbuf_size = 0;
    *poutbuf = NULL;
    do {
        if (buf + ctx->length_size > buf_end)
            goto fail;

        if (ctx->length_size == 1) {
            nal_size = buf[0];
        } else if (ctx->length_size == 2) {
            nal_size = AV_RB16(buf);
        } else
            nal_size = AV_RB32(buf);

        buf += ctx->length_size;
        unit_type = *buf & 0x1f;

        if (buf + nal_size > buf_end || nal_size < 0)
            goto fail;

        /* prepend only to the first type 5 NAL unit of an IDR picture */
        if (ctx->first_idr && unit_type == 5) {
            if (alloc_and_copy(poutbuf, poutbuf_size,
                               avctx->extradata, avctx->extradata_size,
                               buf, nal_size) < 0)
                goto fail;
            ctx->first_idr = 0;
        } else {
            if (alloc_and_copy(poutbuf, poutbuf_size,
                               NULL, 0,
                               buf, nal_size) < 0)
                goto fail;
            if (!ctx->first_idr && unit_type == 1)
                ctx->first_idr = 1;
        }

        buf += nal_size;
        cumul_size += nal_size + ctx->length_size;
    } while (cumul_size < buf_size);

    return 1;

fail:
    av_freep(poutbuf);
    *poutbuf_size = 0;
    return AVERROR(EINVAL);
}
Пример #24
0
static int mxg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret;
    unsigned int size;
    uint8_t *startmarker_ptr, *end, *search_end, marker;
    MXGContext *mxg = s->priv_data;

    while (!url_feof(s->pb) && !s->pb->error) {
        if (mxg->cache_size <= OVERREAD_SIZE) {
            /* update internal buffer */
            ret = mxg_update_cache(s, DEFAULT_PACKET_SIZE + OVERREAD_SIZE);
            if (ret < 0)
                return ret;
        }
        end = mxg->buffer_ptr + mxg->cache_size;

        /* find start marker - 0xff */
        if (mxg->cache_size > OVERREAD_SIZE) {
            search_end = end - OVERREAD_SIZE;
            startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
        } else {
            search_end = end;
            startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
            if (startmarker_ptr >= search_end - 1 ||
                    *(startmarker_ptr + 1) != EOI) break;
        }

        if (startmarker_ptr != search_end) { /* start marker found */
            marker = *(startmarker_ptr + 1);
            mxg->buffer_ptr = startmarker_ptr + 2;
            mxg->cache_size = end - mxg->buffer_ptr;

            if (marker == SOI) {
                mxg->soi_ptr = startmarker_ptr;
            } else if (marker == EOI) {
                if (!mxg->soi_ptr) {
                    av_log(s, AV_LOG_WARNING, "Found EOI before SOI, skipping\n");
                    continue;
                }

                pkt->pts = pkt->dts = mxg->dts;
                pkt->stream_index = VIDEO_STREAM_INDEX;
                pkt->destruct = NULL;
                pkt->size = mxg->buffer_ptr - mxg->soi_ptr;
                pkt->data = mxg->soi_ptr;

                if (mxg->soi_ptr - mxg->buffer > mxg->cache_size) {
                    if (mxg->cache_size > 0) {
                        memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
                    }

                    mxg->buffer_ptr = mxg->buffer;
                }
                mxg->soi_ptr = 0;

                return pkt->size;
            } else if ( (SOF0 <= marker && marker <= SOF15) ||
                        (SOS  <= marker && marker <= COM) ) {
                /* all other markers that start marker segment also contain
                   length value (see specification for JPEG Annex B.1) */
                size = AV_RB16(mxg->buffer_ptr);
                if (size < 2)
                    return AVERROR(EINVAL);

                if (mxg->cache_size < size) {
                    ret = mxg_update_cache(s, size);
                    if (ret < 0)
                        return ret;
                    startmarker_ptr = mxg->buffer_ptr - 2;
                    mxg->cache_size = 0;
                } else {
                    mxg->cache_size -= size;
                }

                mxg->buffer_ptr += size;

                if (marker == APP13 && size >= 16) { /* audio data */
                    /* time (GMT) of first sample in usec since 1970, little-endian */
                    pkt->pts = pkt->dts = AV_RL64(startmarker_ptr + 8);
                    pkt->stream_index = AUDIO_STREAM_INDEX;
                    pkt->destruct = NULL;
                    pkt->size = size - 14;
                    pkt->data = startmarker_ptr + 16;

                    if (startmarker_ptr - mxg->buffer > mxg->cache_size) {
                        if (mxg->cache_size > 0) {
                            memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
                        }
                        mxg->buffer_ptr = mxg->buffer;
                    }

                    return pkt->size;
                } else if (marker == COM && size >= 18 &&
                           !strncmp(startmarker_ptr + 4, "MXF", 3)) {
                    /* time (GMT) of video frame in usec since 1970, little-endian */
                    mxg->dts = AV_RL64(startmarker_ptr + 12);
                }
            }
        } else {
            /* start marker not found */
            mxg->buffer_ptr = search_end;
            mxg->cache_size = OVERREAD_SIZE;
        }
    }

    return AVERROR_EOF;
}
Пример #25
0
static int sap_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[1500];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_READ,
                     &s->interrupt_callback, NULL);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    sap->sdp_ctx->pb        = &sap->sdp_pb;
    sap->sdp_ctx->interrupt_callback = s->interrupt_callback;
    ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = avformat_new_stream(s, NULL);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        st->id = i;
        avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
Пример #26
0
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
                             int *data_size, AVPacket *pkt)
{
    CDXLVideoContext *c = avctx->priv_data;
    AVFrame * const p = &c->frame;
    int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
    const uint8_t *buf = pkt->data;

    if (buf_size < 32)
        return AVERROR_INVALIDDATA;
    encoding        = buf[1] & 7;
    c->format       = buf[1] & 0xE0;
    w               = AV_RB16(&buf[14]);
    h               = AV_RB16(&buf[16]);
    c->bpp          = buf[19];
    c->palette_size = AV_RB16(&buf[20]);
    c->palette      = buf + 32;
    c->video        = c->palette + c->palette_size;
    c->video_size   = buf_size - c->palette_size - 32;

    if (c->palette_size > 512)
        return AVERROR_INVALIDDATA;
    if (buf_size < c->palette_size + 32)
        return AVERROR_INVALIDDATA;
    if (c->bpp < 1)
        return AVERROR_INVALIDDATA;
    if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
        av_log_ask_for_sample(avctx, "unsupported pixel format: 0x%0x\n", c->format);
        return AVERROR_PATCHWELCOME;
    }

    if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
        return ret;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);

    aligned_width = FFALIGN(c->avctx->width, 16);
    c->padded_bits  = aligned_width - c->avctx->width;
    if (c->video_size < aligned_width * avctx->height * c->bpp / 8)
        return AVERROR_INVALIDDATA;
    if (!encoding && c->palette_size && c->bpp <= 8) {
        avctx->pix_fmt = AV_PIX_FMT_PAL8;
    } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
        if (c->palette_size != (1 << (c->bpp - 1)))
            return AVERROR_INVALIDDATA;
        avctx->pix_fmt = AV_PIX_FMT_BGR24;
    } else {
        av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n",
                              encoding, c->bpp);
        return AVERROR_PATCHWELCOME;
    }

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference = 0;
    if ((ret = avctx->get_buffer(avctx, p)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    p->pict_type = AV_PICTURE_TYPE_I;

    if (encoding) {
        av_fast_padded_malloc(&c->new_video, &c->new_video_size,
                              h * w + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!c->new_video)
            return AVERROR(ENOMEM);
        if (c->bpp == 8)
            cdxl_decode_ham8(c);
        else
            cdxl_decode_ham6(c);
    } else {
        cdxl_decode_rgb(c);
    }
    *data_size      = sizeof(AVFrame);
    *(AVFrame*)data = c->frame;

    return buf_size;
}
Пример #27
0
Файл: ralf.c Проект: kx/FFmpeg
static av_cold int decode_init(AVCodecContext *avctx)
{
    RALFContext *ctx = avctx->priv_data;
    int i, j, k;
    int ret;

    if (avctx->extradata_size < 24 || memcmp(avctx->extradata, "LSD:", 4)) {
        av_log(avctx, AV_LOG_ERROR, "Extradata is not groovy, dude\n");
        return AVERROR_INVALIDDATA;
    }

    ctx->version = AV_RB16(avctx->extradata + 4);
    if (ctx->version != 0x103) {
        av_log_ask_for_sample(avctx, "unknown version %X\n", ctx->version);
        return AVERROR_PATCHWELCOME;
    }

    avctx->channels    = AV_RB16(avctx->extradata + 8);
    avctx->sample_rate = AV_RB32(avctx->extradata + 12);
    if (avctx->channels < 1 || avctx->channels > 2
        || avctx->sample_rate < 8000 || avctx->sample_rate > 96000) {
        av_log(avctx, AV_LOG_ERROR, "Invalid coding parameters %d Hz %d ch\n",
               avctx->sample_rate, avctx->channels);
        return AVERROR_INVALIDDATA;
    }
    avctx->sample_fmt     = AV_SAMPLE_FMT_S16;
    avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO
                                                   : AV_CH_LAYOUT_MONO;

    avcodec_get_frame_defaults(&ctx->frame);
    avctx->coded_frame = &ctx->frame;

    ctx->max_frame_size = AV_RB32(avctx->extradata + 16);
    if (ctx->max_frame_size > (1 << 20) || !ctx->max_frame_size) {
        av_log(avctx, AV_LOG_ERROR, "invalid frame size %d\n",
               ctx->max_frame_size);
    }
    ctx->max_frame_size = FFMAX(ctx->max_frame_size, avctx->sample_rate);

    for (i = 0; i < 3; i++) {
        ret = init_ralf_vlc(&ctx->sets[i].filter_params, filter_param_def[i],
                            FILTERPARAM_ELEMENTS);
        if (ret < 0) {
            decode_close(avctx);
            return ret;
        }
        ret = init_ralf_vlc(&ctx->sets[i].bias, bias_def[i], BIAS_ELEMENTS);
        if (ret < 0) {
            decode_close(avctx);
            return ret;
        }
        ret = init_ralf_vlc(&ctx->sets[i].coding_mode, coding_mode_def[i],
                            CODING_MODE_ELEMENTS);
        if (ret < 0) {
            decode_close(avctx);
            return ret;
        }
        for (j = 0; j < 10; j++) {
            for (k = 0; k < 11; k++) {
                ret = init_ralf_vlc(&ctx->sets[i].filter_coeffs[j][k],
                                    filter_coeffs_def[i][j][k],
                                    FILTER_COEFFS_ELEMENTS);
                if (ret < 0) {
                    decode_close(avctx);
                    return ret;
                }
            }
        }
        for (j = 0; j < 15; j++) {
            ret = init_ralf_vlc(&ctx->sets[i].short_codes[j],
                                short_codes_def[i][j], SHORT_CODES_ELEMENTS);
            if (ret < 0) {
                decode_close(avctx);
                return ret;
            }
        }
        for (j = 0; j < 125; j++) {
            ret = init_ralf_vlc(&ctx->sets[i].long_codes[j],
                                long_codes_def[i][j], LONG_CODES_ELEMENTS);
            if (ret < 0) {
                decode_close(avctx);
                return ret;
            }
        }
    }

    return 0;
}