コード例 #1
0
ファイル: dxv.c プロジェクト: Rodeo314/tim-libav
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    const char *msgcomp, *msgtext;
    uint32_t tag;
    int version_major, version_minor = 0;
    int size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        msgcomp = "DXTR1";
        msgtext = "DXT1";
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        msgcomp = "DXTR5";
        msgtext = "DXT5";
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08"PRIX32"", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        version_major = (old_type & 0x0F) - 1;

        if (old_type & 0x80) {
            msgcomp = "RAW";
            decompress_tex = dxv_decompress_raw;
        } else {
            msgcomp = "LZF";
            decompress_tex = dxv_decompress_lzf;
        }

        if (old_type & 0x40) {
            msgtext = "DXT5";

            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20 || version_major == 1) {
            msgtext = "DXT1";

            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR,
                   "Unsupported header (0x%08"PRIX32")\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        version_major = bytestream2_get_byte(gbc) - 1;
        version_minor = bytestream2_get_byte(gbc);

        /* Encoder copies texture data when compression is not advantageous. */
        if (bytestream2_get_byte(gbc)) {
            msgcomp = "RAW";
            ctx->tex_rat = 1;
            decompress_tex = dxv_decompress_raw;
        }

        bytestream2_skip(gbc, 1); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG,
           "%s compression with %s texture (version %d.%d)\n",
           msgcomp, msgtext, version_major, version_minor);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR,
               "Incomplete or invalid file (header %d, left %u).\n",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
コード例 #2
0
/**
 * @return 0 when a packet was written into /p pkt, and no more data is left;
 *         1 when a packet was written into /p pkt, and more packets might be left;
 *        <0 when not enough data was provided to return a full packet, or on error.
 */
static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
                               AVStream *st, AVPacket *pkt,
                               uint32_t *timestamp,
                               const uint8_t *buf, int len, uint16_t seq,
                               int flags)
{
    AVIOContext *pb = &asf->pb;
    int res, mflags, len_off;
    RTSPState *rt = s->priv_data;

    if (!rt->asf_ctx)
        return -1;

    if (len > 0) {
        int off, out_len = 0;

        if (len < 4)
            return -1;

        av_freep(&asf->buf);

        ffio_init_context(pb, (uint8_t *)buf, len, 0, NULL, NULL, NULL, NULL);

        while (avio_tell(pb) + 4 < len) {
            int start_off = avio_tell(pb);

            mflags = avio_r8(pb);
            len_off = avio_rb24(pb);
            if (mflags & 0x20)   /**< relative timestamp */
                avio_skip(pb, 4);
            if (mflags & 0x10)   /**< has duration */
                avio_skip(pb, 4);
            if (mflags & 0x8)    /**< has location ID */
                avio_skip(pb, 4);
            off = avio_tell(pb);

            if (!(mflags & 0x40)) {
                /**
                 * If 0x40 is not set, the len_off field specifies an offset
                 * of this packet's payload data in the complete (reassembled)
                 * ASF packet. This is used to spread one ASF packet over
                 * multiple RTP packets.
                 */
                if (asf->pktbuf && len_off != avio_tell(asf->pktbuf)) {
                    ffio_free_dyn_buf(&asf->pktbuf);
                }
                if (!len_off && !asf->pktbuf &&
                    (res = avio_open_dyn_buf(&asf->pktbuf)) < 0)
                    return res;
                if (!asf->pktbuf)
                    return AVERROR(EIO);

                avio_write(asf->pktbuf, buf + off, len - off);
                avio_skip(pb, len - off);
                if (!(flags & RTP_FLAG_MARKER))
                    return -1;
                out_len     = avio_close_dyn_buf(asf->pktbuf, &asf->buf);
                asf->pktbuf = NULL;
            } else {
                /**
                 * If 0x40 is set, the len_off field specifies the length of
                 * the next ASF packet that can be read from this payload
                 * data alone. This is commonly the same as the payload size,
                 * but could be less in case of packet splitting (i.e.
                 * multiple ASF packets in one RTP packet).
                 */

                int cur_len = start_off + len_off - off;
                int prev_len = out_len;
                out_len += cur_len;
                if (FFMIN(cur_len, len - off) < 0)
                    return -1;
                if ((res = av_reallocp(&asf->buf, out_len)) < 0)
                    return res;
                memcpy(asf->buf + prev_len, buf + off,
                       FFMIN(cur_len, len - off));
                avio_skip(pb, cur_len);
            }
        }

        init_packetizer(pb, asf->buf, out_len);
        pb->pos += rt->asf_pb_pos;
        pb->eof_reached = 0;
        rt->asf_ctx->pb = pb;
    }

    for (;;) {
        int i;

        res = ff_read_packet(rt->asf_ctx, pkt);
        rt->asf_pb_pos = avio_tell(pb);
        if (res != 0)
            break;
        for (i = 0; i < s->nb_streams; i++) {
            if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) {
                pkt->stream_index = i;
                return 1; // FIXME: return 0 if last packet
            }
        }
        av_packet_unref(pkt);
    }

    return res == 1 ? -1 : res;
}
コード例 #3
0
ファイル: concat.c プロジェクト: r-type/vice-libretro
static av_cold int concat_open(URLContext *h, const char *uri, int flags)
{
    char *node_uri = NULL;
    int err = 0;
    int64_t size;
    size_t  len, i;
    URLContext *uc;
    struct concat_data  *data = h->priv_data;
    struct concat_nodes *nodes;

    av_strstart(uri, "concat:", &uri);

    for (i = 0, len = 1; uri[i]; i++)
        if (uri[i] == *AV_CAT_SEPARATOR)
            /* integer overflow */
            if (++len == UINT_MAX / sizeof(*nodes)) {
                av_freep(&h->priv_data);
                return AVERROR(ENAMETOOLONG);
            }

    if (!(nodes = av_realloc(NULL, sizeof(*nodes) * len))) {
        return AVERROR(ENOMEM);
    } else
        data->nodes = nodes;

    /* handle input */
    if (!*uri)
        err = AVERROR(ENOENT);
    for (i = 0; *uri; i++) {
        /* parsing uri */
        len = strcspn(uri, AV_CAT_SEPARATOR);
        if ((err = av_reallocp(&node_uri, len + 1)) < 0)
            break;
        av_strlcpy(node_uri, uri, len+1);
        uri += len + strspn(uri+len, AV_CAT_SEPARATOR);

        /* creating URLContext */
        if ((err = ffurl_open(&uc, node_uri, flags,
                              &h->interrupt_callback, NULL)) < 0)
            break;

        /* creating size */
        if ((size = ffurl_size(uc)) < 0) {
            ffurl_close(uc);
            err = AVERROR(ENOSYS);
            break;
        }

        /* assembling */
        nodes[i].uc   = uc;
        nodes[i].size = size;
    }
    av_free(node_uri);
    data->length = i;

    if (err < 0)
        concat_close(h);
    else if (!(nodes = av_realloc(nodes, data->length * sizeof(*nodes)))) {
        concat_close(h);
        err = AVERROR(ENOMEM);
    } else
        data->nodes = nodes;
    return err;
}
コード例 #4
0
ファイル: hapdec.c プロジェクト: hai046/MediaPlayer
/* Prepare the texture to be decompressed */
static int setup_texture(AVCodecContext *avctx, size_t length)
{
    HapContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    int64_t snappy_size;
    const char *texture_name;
    const char *compressorstr;
    int ret;

    if ((avctx->codec_tag == MKTAG('H','a','p','1') && (ctx->section_type & 0x0F) != HAP_FMT_RGBDXT1)
        || (avctx->codec_tag == MKTAG('H','a','p','5') && (ctx->section_type & 0x0F) != HAP_FMT_RGBADXT5)
        || (avctx->codec_tag == MKTAG('H','a','p','Y') && (ctx->section_type & 0x0F) != HAP_FMT_YCOCGDXT5))
        return AVERROR_INVALIDDATA;

    switch (ctx->section_type & 0x0F) {
    case HAP_FMT_RGBDXT1:
        texture_name = "DXT1";
        break;
    case HAP_FMT_RGBADXT5:
        texture_name = "DXT5";
        break;
    case HAP_FMT_YCOCGDXT5:
        texture_name = "DXT5-YCoCg-scaled";
        break;
    default:
        av_log(avctx, AV_LOG_ERROR,
               "Invalid format mode %02X.\n", ctx->section_type);
        return AVERROR_INVALIDDATA;
    }

    switch (ctx->section_type & 0xF0) {
    case HAP_COMP_NONE:
        /* Only DXTC texture compression */
        ctx->tex_data = gbc->buffer;
        ctx->tex_size = length;
        compressorstr = "none";
        break;
    case HAP_COMP_SNAPPY:
        snappy_size = ff_snappy_peek_uncompressed_length(gbc);
        ret = av_reallocp(&ctx->snappied, snappy_size);
        if (ret < 0) {
            return ret;
        }
        /* Uncompress the frame */
        ret = ff_snappy_uncompress(gbc, ctx->snappied, &snappy_size);
        if (ret < 0) {
             av_log(avctx, AV_LOG_ERROR, "Snappy uncompress error\n");
             return ret;
        }

        ctx->tex_data = ctx->snappied;
        ctx->tex_size = snappy_size;
        compressorstr = "snappy";
        break;
    case HAP_COMP_COMPLEX:
        compressorstr = "complex";
        avpriv_request_sample(avctx, "Complex Hap compressor");
        return AVERROR_PATCHWELCOME;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR,
               "Invalid compressor mode %02X.\n", ctx->section_type);
        return AVERROR_INVALIDDATA;
    }

    av_log(avctx, AV_LOG_DEBUG, "%s texture with %s compressor\n",
           texture_name, compressorstr);

    return 0;
}
コード例 #5
0
ファイル: smacker.c プロジェクト: 0day-ci/FFmpeg
static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    SmackerContext *smk = s->priv_data;
    int flags;
    int ret;
    int i;
    int frame_size = 0;
    int palchange = 0;

    if (avio_feof(s->pb) || smk->cur_frame >= smk->frames)
        return AVERROR_EOF;

    /* if we demuxed all streams, pass another frame */
    if(smk->curstream < 0) {
        avio_seek(s->pb, smk->nextpos, 0);
        frame_size = smk->frm_size[smk->cur_frame] & (~3);
        flags = smk->frm_flags[smk->cur_frame];
        /* handle palette change event */
        if(flags & SMACKER_PAL){
            int size, sz, t, off, j, pos;
            uint8_t *pal = smk->pal;
            uint8_t oldpal[768];

            memcpy(oldpal, pal, 768);
            size = avio_r8(s->pb);
            size = size * 4 - 1;
            if(size + 1 > frame_size)
                return AVERROR_INVALIDDATA;
            frame_size -= size;
            frame_size--;
            sz = 0;
            pos = avio_tell(s->pb) + size;
            while(sz < 256){
                t = avio_r8(s->pb);
                if(t & 0x80){ /* skip palette entries */
                    sz += (t & 0x7F) + 1;
                    pal += ((t & 0x7F) + 1) * 3;
                } else if(t & 0x40){ /* copy with offset */
                    off = avio_r8(s->pb);
                    j = (t & 0x3F) + 1;
                    if (off + j > 0x100) {
                        av_log(s, AV_LOG_ERROR,
                               "Invalid palette update, offset=%d length=%d extends beyond palette size\n",
                               off, j);
                        return AVERROR_INVALIDDATA;
                    }
                    off *= 3;
                    while(j-- && sz < 256) {
                        *pal++ = oldpal[off + 0];
                        *pal++ = oldpal[off + 1];
                        *pal++ = oldpal[off + 2];
                        sz++;
                        off += 3;
                    }
                } else { /* new entries */
                    *pal++ = smk_pal[t];
                    *pal++ = smk_pal[avio_r8(s->pb) & 0x3F];
                    *pal++ = smk_pal[avio_r8(s->pb) & 0x3F];
                    sz++;
                }
            }
            avio_seek(s->pb, pos, 0);
            palchange |= 1;
        }
        flags >>= 1;
        smk->curstream = -1;
        /* if audio chunks are present, put them to stack and retrieve later */
        for(i = 0; i < 7; i++) {
            if(flags & 1) {
                uint32_t size;
                int err;

                size = avio_rl32(s->pb) - 4;
                if (!size || size + 4LL > frame_size) {
                    av_log(s, AV_LOG_ERROR, "Invalid audio part size\n");
                    return AVERROR_INVALIDDATA;
                }
                frame_size -= size;
                frame_size -= 4;
                smk->curstream++;
                if ((err = av_reallocp(&smk->bufs[smk->curstream], size)) < 0) {
                    smk->buf_sizes[smk->curstream] = 0;
                    return err;
                }
                smk->buf_sizes[smk->curstream] = size;
                ret = avio_read(s->pb, smk->bufs[smk->curstream], size);
                if(ret != size)
                    return AVERROR(EIO);
                smk->stream_id[smk->curstream] = smk->indexes[i];
            }
            flags >>= 1;
        }
        if (frame_size < 0 || frame_size >= INT_MAX/2)
            return AVERROR_INVALIDDATA;
        if (av_new_packet(pkt, frame_size + 769))
            return AVERROR(ENOMEM);
        if(smk->frm_size[smk->cur_frame] & 1)
            palchange |= 2;
        pkt->data[0] = palchange;
        memcpy(pkt->data + 1, smk->pal, 768);
        ret = avio_read(s->pb, pkt->data + 769, frame_size);
        if(ret != frame_size)
            return AVERROR(EIO);
        pkt->stream_index = smk->videoindex;
        pkt->pts          = smk->cur_frame;
        pkt->size = ret + 769;
        smk->cur_frame++;
        smk->nextpos = avio_tell(s->pb);
    } else {
コード例 #6
0
static int h264_extradata_to_annexb(H264BSFContext *ctx, AVCodecContext *avctx, const int padding)
{
    uint16_t unit_size;
    uint64_t total_size                 = 0;
    uint8_t *out                        = NULL, unit_nb, sps_done = 0,
             sps_seen                   = 0, pps_seen = 0;
    const uint8_t *extradata            = avctx->extradata + 4;
    static const uint8_t nalu_header[4] = { 0, 0, 0, 1 };
    int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size

    ctx->sps_offset = ctx->pps_offset = -1;

    /* retrieve sps and pps unit(s) */
    unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
    if (!unit_nb) {
        goto pps;
    } else {
        ctx->sps_offset = 0;
        sps_seen = 1;
    }

    while (unit_nb--) {
        int err;

        unit_size   = AV_RB16(extradata);
        total_size += unit_size + 4;
        if (total_size > INT_MAX - padding) {
            av_log(avctx, AV_LOG_ERROR,
                   "Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
            av_free(out);
            return AVERROR(EINVAL);
        }
        if (extradata + 2 + unit_size > avctx->extradata + avctx->extradata_size) {
            av_log(avctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
                   "corrupted stream or invalid MP4/AVCC bitstream\n");
            av_free(out);
            return AVERROR(EINVAL);
        }
        if ((err = av_reallocp(&out, total_size + padding)) < 0)
            return err;
        memcpy(out + total_size - unit_size - 4, nalu_header, 4);
        memcpy(out + total_size - unit_size, extradata + 2, unit_size);
        extradata += 2 + unit_size;
pps:
        if (!unit_nb && !sps_done++) {
            unit_nb = *extradata++; /* number of pps unit(s) */
            if (unit_nb) {
                ctx->pps_offset = (extradata - 1) - (avctx->extradata + 4);
                pps_seen = 1;
            }
        }
    }

    if (out)
        memset(out + total_size, 0, padding);

    if (!sps_seen)
        av_log(avctx, AV_LOG_WARNING,
               "Warning: SPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    if (!pps_seen)
        av_log(avctx, AV_LOG_WARNING,
               "Warning: PPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    if (!ctx->private_spspps) {
        av_free(avctx->extradata);
        avctx->extradata      = out;
        avctx->extradata_size = total_size;
    }
    ctx->spspps_buf  = out;
    ctx->spspps_size = total_size;

    return length_size;
}
コード例 #7
0
ファイル: mmst.c プロジェクト: 309746069/FFmpeg
/** Read incoming MMST media, header or command packet. */
static MMSSCPacketType get_tcp_server_response(MMSTContext *mmst)
{
    int read_result;
    MMSSCPacketType packet_type= -1;
    MMSContext *mms = &mmst->mms;
    for(;;) {
        read_result = ffurl_read_complete(mms->mms_hd, mms->in_buffer, 8);
        if (read_result != 8) {
            if(read_result < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "Error reading packet header: %d (%s)\n",
                       read_result, strerror(AVUNERROR(read_result)));
                packet_type = SC_PKT_CANCEL;
            } else {
                av_log(NULL, AV_LOG_ERROR,
                       "The server closed the connection\n");
                packet_type = SC_PKT_NO_DATA;
            }
            return packet_type;
        }

        // handle command packet.
        if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
            int length_remaining, hr;

            mmst->incoming_flags= mms->in_buffer[3];
            read_result= ffurl_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
            if(read_result != 4) {
                av_log(NULL, AV_LOG_ERROR,
                       "Reading command packet length failed: %d (%s)\n",
                       read_result,
                       read_result < 0 ? strerror(AVUNERROR(read_result)) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR(EIO);
            }

            length_remaining= AV_RL32(mms->in_buffer+8) + 4;
            av_log(NULL, AV_LOG_TRACE, "Length remaining is %d\n", length_remaining);
            // read the rest of the packet.
            if (length_remaining < 0
                || length_remaining > sizeof(mms->in_buffer) - 12) {
                av_log(NULL, AV_LOG_ERROR,
                       "Incoming packet length %d exceeds bufsize %"SIZE_SPECIFIER"\n",
                       length_remaining, sizeof(mms->in_buffer) - 12);
                return AVERROR_INVALIDDATA;
            }
            read_result = ffurl_read_complete(mms->mms_hd, mms->in_buffer + 12,
                                            length_remaining) ;
            if (read_result != length_remaining) {
                av_log(NULL, AV_LOG_ERROR,
                       "Reading pkt data (length=%d) failed: %d (%s)\n",
                       length_remaining, read_result,
                       read_result < 0 ? strerror(AVUNERROR(read_result)) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR(EIO);
            }
            packet_type= AV_RL16(mms->in_buffer+36);
            if (read_result >= 44 && (hr = AV_RL32(mms->in_buffer + 40))) {
                av_log(NULL, AV_LOG_ERROR,
                       "Server sent a message with packet type 0x%x and error status code 0x%08x\n", packet_type, hr);
                return AVERROR(EINVAL);
            }
        } else {
            int length_remaining;
            int packet_id_type;
            int tmp;

            // note we cache the first 8 bytes,
            // then fill up the buffer with the others
            tmp                       = AV_RL16(mms->in_buffer + 6);
            length_remaining          = (tmp - 8) & 0xffff;
            mmst->incoming_packet_seq = AV_RL32(mms->in_buffer);
            packet_id_type            = mms->in_buffer[4];
            mmst->incoming_flags      = mms->in_buffer[5];

            if (length_remaining < 0
                || length_remaining > sizeof(mms->in_buffer) - 8) {
                av_log(NULL, AV_LOG_ERROR,
                       "Data length %d is invalid or too large (max=%"SIZE_SPECIFIER")\n",
                       length_remaining, sizeof(mms->in_buffer));
                return AVERROR_INVALIDDATA;
            }
            mms->remaining_in_len    = length_remaining;
            mms->read_in_ptr         = mms->in_buffer;
            read_result= ffurl_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
            if(read_result != length_remaining) {
                av_log(NULL, AV_LOG_ERROR,
                       "Failed to read packet data of size %d: %d (%s)\n",
                       length_remaining, read_result,
                       read_result < 0 ? strerror(AVUNERROR(read_result)) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR(EIO);
            }

            // if we successfully read everything.
            if(packet_id_type == mmst->header_packet_id) {
                int err;
                packet_type = SC_PKT_ASF_HEADER;
                // Store the asf header
                if(!mms->header_parsed) {
                    if ((err = av_reallocp(&mms->asf_header,
                                           mms->asf_header_size +
                                           mms->remaining_in_len)) < 0) {
                        mms->asf_header_size = 0;
                        return err;
                    }
                    memcpy(mms->asf_header + mms->asf_header_size,
                           mms->read_in_ptr, mms->remaining_in_len);
                    mms->asf_header_size += mms->remaining_in_len;
                }
                // 0x04 means asf header is sent in multiple packets.
                if (mmst->incoming_flags == 0x04)
                    continue;
            } else if(packet_id_type == mmst->packet_id) {
                packet_type = SC_PKT_ASF_MEDIA;
            } else {
                av_log(NULL, AV_LOG_TRACE, "packet id type %d is old.", packet_id_type);
                continue;
            }
        }

        // preprocess some packet type
        if(packet_type == SC_PKT_KEEPALIVE) {
            send_keepalive_packet(mmst);
            continue;
        } else if(packet_type == SC_PKT_STREAM_CHANGING) {
            handle_packet_stream_changing_type(mmst);
        } else if(packet_type == SC_PKT_ASF_MEDIA) {
            pad_media_packet(mms);
        }
        return packet_type;
    }
}
コード例 #8
0
ファイル: avienc.c プロジェクト: Acidburn0zzz/libav
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    unsigned char tag[5];
    unsigned int flags = 0;
    const int stream_index = pkt->stream_index;
    int size               = pkt->size;
    AVIContext *avi     = s->priv_data;
    AVIOContext *pb     = s->pb;
    AVIStream *avist    = s->streams[stream_index]->priv_data;
    AVCodecContext *enc = s->streams[stream_index]->codec;

    while (enc->block_align == 0 && pkt->dts != AV_NOPTS_VALUE &&
           pkt->dts > avist->packet_count) {
        AVPacket empty_packet;

        av_init_packet(&empty_packet);
        empty_packet.size         = 0;
        empty_packet.data         = NULL;
        empty_packet.stream_index = stream_index;
        avi_write_packet(s, &empty_packet);
    }
    avist->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (pb->seekable &&
        (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
        avi_write_ix(s);
        ff_end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        ff_end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(tag, stream_index, enc->codec_type);
    if (pkt->flags & AV_PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == AVMEDIA_TYPE_AUDIO)
        avist->audio_strm_length += size;

    if (s->pb->seekable) {
        int err;
        AVIIndex *idx = &avist->indexes;
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            if ((err = av_reallocp(&idx->cluster,
                                   (cl + 1) * sizeof(*idx->cluster))) < 0) {
                idx->ents_allocated = 0;
                idx->entry          = 0;
                return err;
            }
            idx->cluster[cl] =
                av_malloc(AVI_INDEX_CLUSTER_SIZE * sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return -1;
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos   = avio_tell(pb) - avi->movi_list;
        idx->cluster[cl][id].len   = size;
        idx->entry++;
    }

    avio_write(pb, tag, 4);
    avio_wl32(pb, size);
    avio_write(pb, pkt->data, size);
    if (size & 1)
        avio_w8(pb, 0);

    return 0;
}
コード例 #9
0
ファイル: format.c プロジェクト: RAvenGEr/opencvr
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
                           const char *filename, void *logctx,
                           unsigned int offset, unsigned int max_probe_size)
{
    AVProbeData pd = { filename ? filename : "" };
    uint8_t *buf = NULL;
    int ret = 0, probe_size, buf_offset = 0;
    int score = 0;
    int ret2;

    if (!max_probe_size)
        max_probe_size = PROBE_BUF_MAX;
    else if (max_probe_size < PROBE_BUF_MIN) {
        av_log(logctx, AV_LOG_ERROR,
               "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
        return AVERROR(EINVAL);
    }

    if (offset >= max_probe_size)
        return AVERROR(EINVAL);

    if (pb->av_class) {
        uint8_t *mime_type_opt = NULL;
        char *semi;
        av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt);
        pd.mime_type = (const char *)mime_type_opt;
        semi = pd.mime_type ? strchr(pd.mime_type, ';') : NULL;
        if (semi) {
            *semi = '\0';
        }
    }
#if 0
    if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
        if (!av_strcasecmp(mime_type, "audio/aacp")) {
            *fmt = av_find_input_format("aac");
        }
        av_freep(&mime_type);
    }
#endif

    for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
            probe_size = FFMIN(probe_size << 1,
                               FFMAX(max_probe_size, probe_size + 1))) {
        score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;

        /* Read probe data. */
        if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
            goto fail;
        if ((ret = avio_read(pb, buf + buf_offset,
                             probe_size - buf_offset)) < 0) {
            /* Fail if error was not end of file, otherwise, lower score. */
            if (ret != AVERROR_EOF)
                goto fail;

            score = 0;
            ret   = 0;          /* error was end of file, nothing read */
        }
        buf_offset += ret;
        if (buf_offset < offset)
            continue;
        pd.buf_size = buf_offset - offset;
        pd.buf = &buf[offset];

        memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);

        /* Guess file format. */
        *fmt = av_probe_input_format2(&pd, 1, &score);
        if (*fmt) {
            /* This can only be true in the last iteration. */
            if (score <= AVPROBE_SCORE_RETRY) {
                av_log(logctx, AV_LOG_WARNING,
                       "Format %s detected only with low score of %d, "
                       "misdetection possible!\n", (*fmt)->name, score);
            } else
                av_log(logctx, AV_LOG_DEBUG,
                       "Format %s probed with size=%d and score=%d\n",
                       (*fmt)->name, probe_size, score);
#if 0
            FILE *f = fopen("probestat.tmp", "ab");
            fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename);
            fclose(f);
#endif
        }
    }

    if (!*fmt)
        ret = AVERROR_INVALIDDATA;

fail:
    /* Rewind. Reuse probe buffer to avoid seeking. */
    ret2 = ffio_rewind_with_probe_data(pb, &buf, buf_offset);
    if (ret >= 0)
        ret = ret2;

    av_freep(&pd.mime_type);
    return ret < 0 ? ret : score;
}
コード例 #10
0
ファイル: format.c プロジェクト: AVLeo/libav
int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
                          const char *filename, void *logctx,
                          unsigned int offset, unsigned int max_probe_size)
{
    AVProbeData pd = { filename ? filename : "" };
    uint8_t *buf = NULL;
    int ret = 0, probe_size;
    uint8_t *mime_type_opt = NULL;

    if (!max_probe_size)
        max_probe_size = PROBE_BUF_MAX;
    else if (max_probe_size > PROBE_BUF_MAX)
        max_probe_size = PROBE_BUF_MAX;
    else if (max_probe_size < PROBE_BUF_MIN)
        return AVERROR(EINVAL);

    if (offset >= max_probe_size)
        return AVERROR(EINVAL);
    avio_skip(pb, offset);
    max_probe_size -= offset;
    if (pb->av_class) {
        av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt);
        pd.mime_type = (const char *)mime_type_opt;
        mime_type_opt = NULL;
    }
    for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
         probe_size = FFMIN(probe_size << 1,
                            FFMAX(max_probe_size, probe_size + 1))) {
        int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX / 4 : 0;

        /* Read probe data. */
        if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
            goto fail;
        if ((ret = avio_read(pb, buf + pd.buf_size,
                             probe_size - pd.buf_size)) < 0) {
            /* Fail if error was not end of file, otherwise, lower score. */
            if (ret != AVERROR_EOF)
                goto fail;

            score = 0;
            ret   = 0;          /* error was end of file, nothing read */
        }
        pd.buf_size += ret;
        pd.buf       = buf;

        memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);

        /* Guess file format. */
        *fmt = av_probe_input_format2(&pd, 1, &score);
        if (*fmt) {
            /* This can only be true in the last iteration. */
            if (score <= AVPROBE_SCORE_MAX / 4) {
                av_log(logctx, AV_LOG_WARNING,
                       "Format detected only with low score of %d, "
                       "misdetection possible!\n", score);
            } else
                av_log(logctx, AV_LOG_DEBUG,
                       "Probed with size=%d and score=%d\n", probe_size, score);
        }
    }

    if (!*fmt)
        ret = AVERROR_INVALIDDATA;

fail:
    /* Rewind. Reuse probe buffer to avoid seeking. */
    if (ret < 0 ||
        (ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0) {
        av_free(buf);
    }
    av_freep(&pd.mime_type);
    return ret;
}
コード例 #11
0
ファイル: screenpresso.c プロジェクト: gbeauchesne/FFmpeg
static int screenpresso_decode_frame(AVCodecContext *avctx, void *data,
                                     int *got_frame, AVPacket *avpkt)
{
    ScreenpressoContext *ctx = avctx->priv_data;
    AVFrame *frame = data;
    int keyframe;
    int ret;

    /* Size check */
    if (avpkt->size < 3) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
        return AVERROR_INVALIDDATA;
    }

    /* Basic sanity check, but not really harmful */
    if ((avpkt->data[0] != 0x73 && avpkt->data[0] != 0x72) ||
        avpkt->data[1] != 8) { // bpp probably
        av_log(avctx, AV_LOG_WARNING, "Unknown header 0x%02X%02X\n",
               avpkt->data[0], avpkt->data[1]);
    }
    keyframe = (avpkt->data[0] == 0x73);

    /* Resize deflate buffer and frame on resolution change */
    if (ctx->inflated_size != avctx->width * avctx->height * 3) {
        av_frame_unref(ctx->current);
        ret = ff_get_buffer(avctx, ctx->current, AV_GET_BUFFER_FLAG_REF);
        if (ret < 0)
            return ret;

        /* If malloc fails, reset len to avoid preserving an invalid value */
        ctx->inflated_size = avctx->width * avctx->height * 3;
        ret = av_reallocp(&ctx->inflated_buf, ctx->inflated_size);
        if (ret < 0) {
            ctx->inflated_size = 0;
            return ret;
        }
    }

    /* Inflate the frame after the 2 byte header */
    ret = uncompress(ctx->inflated_buf, &ctx->inflated_size,
                     avpkt->data + 2, avpkt->size - 2);
    if (ret) {
        av_log(avctx, AV_LOG_ERROR, "Deflate error %d.\n", ret);
        return AVERROR_UNKNOWN;
    }

    /* When a keyframe is found, copy it (flipped) */
    if (keyframe)
        av_image_copy_plane(ctx->current->data[0] +
                            ctx->current->linesize[0] * (avctx->height - 1),
                            -1 * ctx->current->linesize[0],
                            ctx->inflated_buf, avctx->width * 3,
                            avctx->width * 3, avctx->height);
    /* Otherwise sum the delta on top of the current frame */
    else
        sum_delta_flipped(ctx->current->data[0], ctx->current->linesize[0],
                          ctx->inflated_buf, avctx->width * 3,
                          avctx->width * 3, avctx->height);

    /* Frame is ready to be output */
    ret = av_frame_ref(frame, ctx->current);
    if (ret < 0)
        return ret;

    /* Usual properties */
    if (keyframe) {
        frame->pict_type = AV_PICTURE_TYPE_I;
        frame->key_frame = 1;
    } else {
        frame->pict_type = AV_PICTURE_TYPE_P;
    }
    *got_frame = 1;

    return 0;
}
コード例 #12
0
static int h264_extradata_to_annexb_sps_pps(AVCodecContext *avctx,
        uint8_t **extradata_annexb, int *extradata_annexb_size,
        int *sps_offset, int *sps_size,
        int *pps_offset, int *pps_size)
{
    uint16_t unit_size;
    uint64_t total_size = 0;

    uint8_t i, j, unit_nb;
    uint8_t sps_seen = 0;
    uint8_t pps_seen = 0;

    const uint8_t *extradata;
    static const uint8_t nalu_header[4] = { 0x00, 0x00, 0x00, 0x01 };

    if (avctx->extradata_size < 8) {
        av_log(avctx, AV_LOG_ERROR,
            "Too small extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
        return AVERROR(EINVAL);
    }

    *extradata_annexb = NULL;
    *extradata_annexb_size = 0;

    *sps_offset = *sps_size = 0;
    *pps_offset = *pps_size = 0;

    extradata = avctx->extradata + 4;

    /* skip length size */
    extradata++;

    for (j = 0; j < 2; j ++) {

        if (j == 0) {
            /* number of sps unit(s) */
            unit_nb = *extradata++ & 0x1f;
        } else {
            /* number of pps unit(s) */
            unit_nb = *extradata++;
        }

        for (i = 0; i < unit_nb; i++) {
            int err;

            unit_size   = AV_RB16(extradata);
            total_size += unit_size + 4;

            if (total_size > INT_MAX) {
                av_log(avctx, AV_LOG_ERROR,
                    "Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
                av_freep(extradata_annexb);
                return AVERROR(EINVAL);
            }

            if (extradata + 2 + unit_size > avctx->extradata + avctx->extradata_size) {
                av_log(avctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
                    "corrupted stream or invalid MP4/AVCC bitstream\n");
                av_freep(extradata_annexb);
                return AVERROR(EINVAL);
            }

            if ((err = av_reallocp(extradata_annexb, total_size)) < 0) {
                return err;
            }

            memcpy(*extradata_annexb + total_size - unit_size - 4, nalu_header, 4);
            memcpy(*extradata_annexb + total_size - unit_size, extradata + 2, unit_size);
            extradata += 2 + unit_size;
        }

        if (unit_nb) {
            if (j == 0) {
                sps_seen = 1;
                *sps_size = total_size;
            } else {
                pps_seen = 1;
                *pps_size = total_size - *sps_size;
                *pps_offset = *sps_size;
            }
        }
    }

    *extradata_annexb_size = total_size;

    if (!sps_seen)
        av_log(avctx, AV_LOG_WARNING,
               "Warning: SPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    if (!pps_seen)
        av_log(avctx, AV_LOG_WARNING,
               "Warning: PPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    return 0;
}
コード例 #13
0
ファイル: libschroedingerenc.c プロジェクト: 309746069/FFmpeg
static int libschroedinger_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                        const AVFrame *frame, int *got_packet)
{
    int enc_size = 0;
    SchroEncoderParams *p_schro_params = avctx->priv_data;
    SchroEncoder *encoder = p_schro_params->encoder;
    struct FFSchroEncodedFrame *p_frame_output = NULL;
    int go = 1;
    SchroBuffer *enc_buf;
    int presentation_frame;
    int parse_code;
    int last_frame_in_sequence = 0;
    int pkt_size, ret;

    if (!frame) {
        /* Push end of sequence if not already signalled. */
        if (!p_schro_params->eos_signalled) {
            schro_encoder_end_of_stream(encoder);
            p_schro_params->eos_signalled = 1;
        }
    } else {
        /* Allocate frame data to schro input buffer. */
        SchroFrame *in_frame = libschroedinger_frame_from_data(avctx, frame);
        if (!in_frame)
            return AVERROR(ENOMEM);
        /* Load next frame. */
        schro_encoder_push_frame(encoder, in_frame);
    }

    if (p_schro_params->eos_pulled)
        go = 0;

    /* Now check to see if we have any output from the encoder. */
    while (go) {
        int err;
        SchroStateEnum state;
        state = schro_encoder_wait(encoder);
        switch (state) {
        case SCHRO_STATE_HAVE_BUFFER:
        case SCHRO_STATE_END_OF_STREAM:
            enc_buf = schro_encoder_pull(encoder, &presentation_frame);
            if (enc_buf->length <= 0)
                return AVERROR_BUG;
            parse_code = enc_buf->data[4];

            /* All non-frame data is prepended to actual frame data to
             * be able to set the pts correctly. So we don't write data
             * to the frame output queue until we actually have a frame
             */
            if ((err = av_reallocp(&p_schro_params->enc_buf,
                                   p_schro_params->enc_buf_size +
                                   enc_buf->length)) < 0) {
                p_schro_params->enc_buf_size = 0;
                return err;
            }

            memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size,
                   enc_buf->data, enc_buf->length);
            p_schro_params->enc_buf_size += enc_buf->length;


            if (state == SCHRO_STATE_END_OF_STREAM) {
                p_schro_params->eos_pulled = 1;
                go = 0;
            }

            if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) {
                schro_buffer_unref(enc_buf);
                break;
            }

            /* Create output frame. */
            p_frame_output = av_mallocz(sizeof(FFSchroEncodedFrame));
            if (!p_frame_output)
                return AVERROR(ENOMEM);
            /* Set output data. */
            p_frame_output->size     = p_schro_params->enc_buf_size;
            p_frame_output->p_encbuf = p_schro_params->enc_buf;
            if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) &&
                SCHRO_PARSE_CODE_IS_REFERENCE(parse_code))
                p_frame_output->key_frame = 1;

            /* Parse the coded frame number from the bitstream. Bytes 14
             * through 17 represesent the frame number. */
            p_frame_output->frame_num = AV_RB32(enc_buf->data + 13);

            ff_schro_queue_push_back(&p_schro_params->enc_frame_queue,
                                     p_frame_output);
            p_schro_params->enc_buf_size = 0;
            p_schro_params->enc_buf      = NULL;

            schro_buffer_unref(enc_buf);

            break;

        case SCHRO_STATE_NEED_FRAME:
            go = 0;
            break;

        case SCHRO_STATE_AGAIN:
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown Schro Encoder state\n");
            return -1;
        }
    }

    /* Copy 'next' frame in queue. */

    if (p_schro_params->enc_frame_queue.size == 1 &&
        p_schro_params->eos_pulled)
        last_frame_in_sequence = 1;

    p_frame_output = ff_schro_queue_pop(&p_schro_params->enc_frame_queue);

    if (!p_frame_output)
        return 0;

    pkt_size = p_frame_output->size;
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0)
        pkt_size += p_schro_params->enc_buf_size;
    if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
        goto error;

    memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size);
    avctx->coded_frame->key_frame = p_frame_output->key_frame;
    /* Use the frame number of the encoded frame as the pts. It is OK to
     * do so since Dirac is a constant frame rate codec. It expects input
     * to be of constant frame rate. */
    pkt->pts =
    avctx->coded_frame->pts = p_frame_output->frame_num;
    pkt->dts = p_schro_params->dts++;
    enc_size = p_frame_output->size;

    /* Append the end of sequence information to the last frame in the
     * sequence. */
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
        memcpy(pkt->data + enc_size, p_schro_params->enc_buf,
               p_schro_params->enc_buf_size);
        enc_size += p_schro_params->enc_buf_size;
        av_freep(&p_schro_params->enc_buf);
        p_schro_params->enc_buf_size = 0;
    }

    if (p_frame_output->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

error:
    /* free frame */
    libschroedinger_free_frame(p_frame_output);
    return ret;
}
コード例 #14
0
ファイル: dxv.c プロジェクト: vriera/libav
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    uint32_t tag;
    int channels, size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        av_log(avctx, AV_LOG_DEBUG, "DXTR1 compression and DXT1 texture ");
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        av_log(avctx, AV_LOG_DEBUG, "DXTR5 compression and DXT5 texture ");
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        channels = old_type & 0x0F;
        if (old_type & 0x40) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT5 texture ");
            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT1 texture ");
            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        decompress_tex = dxv_decompress_lzf;
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        channels = bytestream2_get_byte(gbc);
        bytestream2_skip(gbc, 3); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG, "(%d channels)\n", channels);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR, "Incomplete or invalid file (%u > %u)\n.",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
コード例 #15
0
static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding)
{
    uint16_t unit_size;
    uint64_t total_size                 = 0;
    uint8_t *out                        = NULL, unit_nb, sps_done = 0,
             sps_seen                   = 0, pps_seen = 0;
    const uint8_t *extradata            = ctx->par_in->extradata + 4;
    static const uint8_t nalu_header[4] = { 0, 0, 0, 1 };
    int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size

    if (length_size == 3)
        return AVERROR(EINVAL);

    /* retrieve sps and pps unit(s) */
    unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
    if (!unit_nb) {
        unit_nb = *extradata++; /* number of pps unit(s) */
        sps_done++;

        if (unit_nb)
            pps_seen = 1;
    } else {
        sps_seen = 1;
    }

    while (unit_nb--) {
        int err;

        unit_size   = AV_RB16(extradata);
        total_size += unit_size + 4;
        if (total_size > INT_MAX - padding ||
            extradata + 2 + unit_size > ctx->par_in->extradata +
            ctx->par_in->extradata_size) {
            av_free(out);
            return AVERROR(EINVAL);
        }
        if ((err = av_reallocp(&out, total_size + padding)) < 0)
            return err;
        memcpy(out + total_size - unit_size - 4, nalu_header, 4);
        memcpy(out + total_size - unit_size, extradata + 2, unit_size);
        extradata += 2 + unit_size;

        if (!unit_nb && !sps_done++) {
            unit_nb = *extradata++; /* number of pps unit(s) */
            if (unit_nb)
                pps_seen = 1;
        }
    }

    if (out)
        memset(out + total_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    if (!sps_seen)
        av_log(ctx, AV_LOG_WARNING,
               "Warning: SPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    if (!pps_seen)
        av_log(ctx, AV_LOG_WARNING,
               "Warning: PPS NALU missing or invalid. "
               "The resulting stream may not play.\n");

    av_freep(&ctx->par_out->extradata);
    ctx->par_out->extradata      = out;
    ctx->par_out->extradata_size = total_size;

    return length_size;
}
コード例 #16
0
ファイル: hapdec.c プロジェクト: 411697643/FFmpeg
static int hap_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    HapContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    int ret, i;
    int tex_size;

    bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);

    /* Check for section header */
    ret = hap_parse_frame_header(avctx);
    if (ret < 0)
        return ret;

    /* Get the output frame ready to receive data */
    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    if (avctx->codec->update_thread_context)
        ff_thread_finish_setup(avctx);

    /* Unpack the DXT texture */
    if (hap_can_use_tex_in_place(ctx)) {
        /* Only DXTC texture compression in a contiguous block */
        ctx->tex_data = ctx->gbc.buffer;
        tex_size = bytestream2_get_bytes_left(&ctx->gbc);
    } else {
        /* Perform the second-stage decompression */
        ret = av_reallocp(&ctx->tex_buf, ctx->tex_size);
        if (ret < 0)
            return ret;

        avctx->execute2(avctx, decompress_chunks_thread, NULL,
                        ctx->chunk_results, ctx->chunk_count);

        for (i = 0; i < ctx->chunk_count; i++) {
            if (ctx->chunk_results[i] < 0)
                return ctx->chunk_results[i];
        }

        ctx->tex_data = ctx->tex_buf;
        tex_size = ctx->tex_size;
    }

    if (tex_size < (avctx->coded_width  / TEXTURE_BLOCK_W)
                  *(avctx->coded_height / TEXTURE_BLOCK_H)
                  *ctx->tex_rat) {
        av_log(avctx, AV_LOG_ERROR, "Insufficient data\n");
        return AVERROR_INVALIDDATA;
    }

    /* Use the decompress function on the texture, one block per thread */
    avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}