Exemple #1
0
static int ogg_build_flac_headers(const uint8_t *extradata, int extradata_size,
                                  OGGStreamContext *oggstream, int bitexact)
{
    const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT;
    uint8_t *p;
    if (extradata_size != 34)
        return -1;
    oggstream->header_len[0] = 51;
    oggstream->header[0] = av_mallocz(51); // per ogg flac specs
    p = oggstream->header[0];
    bytestream_put_byte(&p, 0x7F);
    bytestream_put_buffer(&p, "FLAC", 4);
    bytestream_put_byte(&p, 1); // major version
    bytestream_put_byte(&p, 0); // minor version
    bytestream_put_be16(&p, 1); // headers packets without this one
    bytestream_put_buffer(&p, "fLaC", 4);
    bytestream_put_byte(&p, 0x00); // streaminfo
    bytestream_put_be24(&p, 34);
    bytestream_put_buffer(&p, extradata, 34);
    oggstream->header_len[1] = 1+3+4+strlen(vendor)+4;
    oggstream->header[1] = av_mallocz(oggstream->header_len[1]);
    p = oggstream->header[1];
    bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment
    bytestream_put_be24(&p, oggstream->header_len[1] - 4);
    bytestream_put_le32(&p, strlen(vendor));
    bytestream_put_buffer(&p, vendor, strlen(vendor));
    bytestream_put_le32(&p, 0); // user comment list length
    return 0;
}
Exemple #2
0
int av_packet_merge_side_data(AVPacket *pkt){
    if(pkt->side_data_elems){
        AVBufferRef *buf;
        int i;
        uint8_t *p;
        uint64_t size= pkt->size + 8LL + AV_INPUT_BUFFER_PADDING_SIZE;
        AVPacket old= *pkt;
        for (i=0; i<old.side_data_elems; i++) {
            size += old.side_data[i].size + 5LL;
        }
        if (size > INT_MAX)
            return AVERROR(EINVAL);
        buf = av_buffer_alloc(size);
        if (!buf)
            return AVERROR(ENOMEM);
        pkt->buf = buf;
        pkt->data = p = buf->data;
        pkt->size = size - AV_INPUT_BUFFER_PADDING_SIZE;
        bytestream_put_buffer(&p, old.data, old.size);
        for (i=old.side_data_elems-1; i>=0; i--) {
            bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
            bytestream_put_be32(&p, old.side_data[i].size);
            *p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
        }
        bytestream_put_be64(&p, FF_MERGE_MARKER);
        av_assert0(p-pkt->data == pkt->size);
        memset(p, 0, AV_INPUT_BUFFER_PADDING_SIZE);
        av_packet_unref(&old);
        pkt->side_data_elems = 0;
        pkt->side_data = NULL;
        return 1;
    }
    return 0;
}
static int mjpega_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
                              uint8_t **poutbuf, int *poutbuf_size,
                              const uint8_t *buf, int buf_size, int keyframe)
{
    uint8_t *poutbufp;
    int i;

    if (avctx->codec_id != CODEC_ID_MJPEG) {
        av_log(avctx, AV_LOG_ERROR, "mjpega bitstream filter only applies to mjpeg codec\n");
        return 0;
    }

    *poutbuf_size = 0;
    *poutbuf = av_malloc(buf_size + 44 + FF_INPUT_BUFFER_PADDING_SIZE);
    poutbufp = *poutbuf;
    bytestream_put_byte(&poutbufp, 0xff);
    bytestream_put_byte(&poutbufp, SOI);
    bytestream_put_byte(&poutbufp, 0xff);
    bytestream_put_byte(&poutbufp, APP1);
    bytestream_put_be16(&poutbufp, 42); /* size */
    bytestream_put_be32(&poutbufp, 0);
#ifdef __CW32__
    bytestream_put_buffer(&poutbufp, (const unsigned char*)"mjpg", 4);
#else
    bytestream_put_buffer(&poutbufp, "mjpg", 4);
#endif
    bytestream_put_be32(&poutbufp, buf_size + 44); /* field size */
    bytestream_put_be32(&poutbufp, buf_size + 44); /* pad field size */
    bytestream_put_be32(&poutbufp, 0);             /* next ptr */

    for (i = 0; i < buf_size - 1; i++) {
        if (buf[i] == 0xff) {
            switch (buf[i + 1]) {
            case DQT:  /* quant off */
            case DHT:  /* huff  off */
            case SOF0: /* image off */
                bytestream_put_be32(&poutbufp, i + 46);
                break;
            case SOS:
                bytestream_put_be32(&poutbufp, i + 46); /* scan off */
                bytestream_put_be32(&poutbufp, i + 46 + AV_RB16(buf + i + 2)); /* data off */
                bytestream_put_buffer(&poutbufp, buf + 2, buf_size - 2); /* skip already written SOI */
                *poutbuf_size = poutbufp - *poutbuf;
                return 1;
            case APP1:
                if (i + 8 < buf_size && AV_RL32(buf + i + 8) == ff_get_fourcc("mjpg")) {
                    av_log(avctx, AV_LOG_ERROR, "bitstream already formatted\n");
                    memcpy(*poutbuf, buf, buf_size);
                    *poutbuf_size = buf_size;
                    return 1;
                }
            }
        }
    }
    av_freep(poutbuf);
    av_log(avctx, AV_LOG_ERROR, "could not find SOS marker in bitstream\n");
    return 0;
}
void ff_amf_write_string2(uint8_t **dst, const char *str1, const char *str2)
{
    int len1 = 0, len2 = 0;
    if (str1)
        len1 = strlen(str1);
    if (str2)
        len2 = strlen(str2);
    bytestream_put_byte(dst, AMF_DATA_TYPE_STRING);
    bytestream_put_be16(dst, len1 + len2);
    bytestream_put_buffer(dst, str1, len1);
    bytestream_put_buffer(dst, str2, len2);
}
Exemple #5
0
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *codec = s->streams[0]->codec;
    BRSTMDemuxContext *b = s->priv_data;
    uint32_t samples, size;
    int ret;

    if (avio_feof(s->pb))
        return AVERROR_EOF;
    b->current_block++;
    if (b->current_block == b->block_count) {
        size    = b->last_block_used_bytes;
        samples = size / (8 * codec->channels) * 14;
    } else if (b->current_block < b->block_count) {
        size    = b->block_size;
        samples = b->samples_per_block;
    } else {
        return AVERROR_EOF;
    }

    if ((codec->codec_id == AV_CODEC_ID_ADPCM_THP ||
         codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) && !codec->extradata) {
        uint8_t *dst;

        if (av_new_packet(pkt, 8 + (32 + 4) * codec->channels + size) < 0)
            return AVERROR(ENOMEM);
        dst = pkt->data;
        bytestream_put_be32(&dst, size);
        bytestream_put_be32(&dst, samples);
        bytestream_put_buffer(&dst, b->table, 32 * codec->channels);
        bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels *
                                    (b->current_block - 1), 4 * codec->channels);

        ret = avio_read(s->pb, dst, size);
        if (ret != size)
            av_free_packet(pkt);
        pkt->duration = samples;
    } else {
        ret = av_get_packet(s->pb, pkt, size);
    }

    pkt->stream_index = 0;

    if (ret != size)
        ret = AVERROR(EIO);

    return ret;
}
Exemple #6
0
static int ogg_build_speex_headers(AVCodecContext *avctx,
                                   OGGStreamContext *oggstream, int bitexact)
{
    uint8_t *p;

    if (avctx->extradata_size < SPEEX_HEADER_SIZE)
        return -1;

    // first packet: Speex header
    p = av_mallocz(SPEEX_HEADER_SIZE);
    if (!p)
        return AVERROR_NOMEM;
    oggstream->header[0] = p;
    oggstream->header_len[0] = SPEEX_HEADER_SIZE;
    bytestream_put_buffer(&p, avctx->extradata, SPEEX_HEADER_SIZE);
    AV_WL32(&oggstream->header[0][68], 0);  // set extra_headers to 0

    // second packet: VorbisComment
    p = ogg_write_vorbiscomment(0, bitexact, &oggstream->header_len[1]);
    if (!p)
        return AVERROR_NOMEM;
    oggstream->header[1] = p;

    return 0;
}
Exemple #7
0
static int gif_image_write_image(uint8_t **bytestream,
                                 int x1, int y1, int width, int height,
                                 const uint8_t *buf, int linesize, int pix_fmt)
{
    PutBitContext p;
    uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
    int i, left, w;
    const uint8_t *ptr;
    /* image block */

    bytestream_put_byte(bytestream, 0x2c);
    bytestream_put_le16(bytestream, x1);
    bytestream_put_le16(bytestream, y1);
    bytestream_put_le16(bytestream, width);
    bytestream_put_le16(bytestream, height);
    bytestream_put_byte(bytestream, 0x00); /* flags */
    /* no local clut */

    bytestream_put_byte(bytestream, 0x08);

    left= width * height;

    init_put_bits(&p, buffer, 130);

/*
 * the thing here is the bitstream is written as little packets, with a size byte before
 * but it's still the same bitstream between packets (no flush !)
 */
    ptr = buf;
    w = width;
    while(left>0) {

        put_bits(&p, 9, 0x0100); /* clear code */

        for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) {
            put_bits(&p, 9, *ptr++);
            if (--w == 0) {
                w = width;
                buf += linesize;
                ptr = buf;
            }
        }

        if(left<=GIF_CHUNKS) {
            put_bits(&p, 9, 0x101); /* end of stream */
            flush_put_bits(&p);
        }
        if(pbBufPtr(&p) - p.buf > 0) {
            bytestream_put_byte(bytestream, pbBufPtr(&p) - p.buf); /* byte count of the packet */
            bytestream_put_buffer(bytestream, p.buf, pbBufPtr(&p) - p.buf); /* the actual buffer */
            p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */
        }
        left-=GIF_CHUNKS;
    }
    bytestream_put_byte(bytestream, 0x00); /* end of image block */
    bytestream_put_byte(bytestream, 0x3b);
    return 0;
}
Exemple #8
0
int ff_vorbiscomment_write(uint8_t **p, AVDictionary **m,
                           const char *vendor_string, const unsigned count)
{
    bytestream_put_le32(p, strlen(vendor_string));
    bytestream_put_buffer(p, vendor_string, strlen(vendor_string));
    if (*m) {
        AVDictionaryEntry *tag = NULL;
        bytestream_put_le32(p, count);
        while ((tag = av_dict_get(*m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
            unsigned int len1 = strlen(tag->key);
            unsigned int len2 = strlen(tag->value);
            bytestream_put_le32(p, len1+1+len2);
            bytestream_put_buffer(p, tag->key, len1);
            bytestream_put_byte(p, '=');
            bytestream_put_buffer(p, tag->value, len2);
        }
    } else
        bytestream_put_le32(p, 0);
    return 0;
}
Exemple #9
0
int av_packet_merge_side_data(AVPacket *pkt){
    if(pkt->side_data_elems){
        AVBufferRef *buf;
        int i;
        uint8_t *p;
        uint64_t size= pkt->size + 8LL + FF_INPUT_BUFFER_PADDING_SIZE;
        AVPacket old= *pkt;
        for (i=0; i<old.side_data_elems; i++) {
            size += old.side_data[i].size + 5LL;
        }
        if (size > INT_MAX)
            return AVERROR(EINVAL);
        buf = av_buffer_alloc(size);
        if (!buf)
            return AVERROR(ENOMEM);
        pkt->buf = buf;
        pkt->data = p = buf->data;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
        pkt->destruct = dummy_destruct_packet;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
        pkt->size = size - FF_INPUT_BUFFER_PADDING_SIZE;
        bytestream_put_buffer(&p, old.data, old.size);
        for (i=old.side_data_elems-1; i>=0; i--) {
            bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
            bytestream_put_be32(&p, old.side_data[i].size);
            *p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
        }
        bytestream_put_be64(&p, FF_MERGE_MARKER);
        av_assert0(p-pkt->data == pkt->size);
        memset(p, 0, FF_INPUT_BUFFER_PADDING_SIZE);
        av_free_packet(&old);
        pkt->side_data_elems = 0;
        pkt->side_data = NULL;
        return 1;
    }
    return 0;
}
Exemple #10
0
/* NOTE: Typecodes must be spooled AFTER arguments!! */
static void write_typecode(CodingSpool *s, uint8_t type)
{
    s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
    s->typeSpoolLength += 2;
    if (s->typeSpoolLength == 16) {
        bytestream_put_le16(s->pout, s->typeSpool);
        bytestream_put_buffer(s->pout, s->argumentSpool,
                              s->args - s->argumentSpool);
        s->typeSpoolLength = 0;
        s->typeSpool = 0;
        s->args = s->argumentSpool;
    }
}
Exemple #11
0
/* GIF header */
static int gif_image_write_header(AVCodecContext *avctx,
                                  uint8_t **bytestream, uint32_t *palette)
{
    int i;
    unsigned int v;

    bytestream_put_buffer(bytestream, "GIF", 3);
    bytestream_put_buffer(bytestream, "89a", 3);
    bytestream_put_le16(bytestream, avctx->width);
    bytestream_put_le16(bytestream, avctx->height);

    bytestream_put_byte(bytestream, 0xf7); /* flags: global clut, 256 entries */
    bytestream_put_byte(bytestream, 0x1f); /* background color index */
    bytestream_put_byte(bytestream, 0); /* aspect ratio */

    /* the global palette */
    for(i=0;i<256;i++) {
        v = palette[i];
        bytestream_put_be24(bytestream, v);
    }

    return 0;
}
static int imx_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
                           uint8_t **poutbuf, int *poutbuf_size,
                           const uint8_t *buf, int buf_size, int keyframe)
{
    /* MXF essence element key */
    static const uint8_t imx_header[16] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x05,0x01,0x01,0x00 };
    uint8_t *poutbufp;

    if (avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
        av_log(avctx, AV_LOG_ERROR, "imx bitstream filter only applies to mpeg2video codec\n");
        return 0;
    }

    *poutbuf = av_malloc(buf_size + 20 + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!*poutbuf)
        return AVERROR(ENOMEM);
    poutbufp = *poutbuf;
    bytestream_put_buffer(&poutbufp, imx_header, 16);
    bytestream_put_byte(&poutbufp, 0x83); /* KLV BER long form */
    bytestream_put_be24(&poutbufp, buf_size);
    bytestream_put_buffer(&poutbufp, buf, buf_size);
    *poutbuf_size = poutbufp - *poutbuf;
    return 1;
}
Exemple #13
0
static int ogg_build_flac_headers(AVCodecContext *avctx,
                                  OGGStreamContext *oggstream, int bitexact)
{
    enum FLACExtradataFormat format;
    uint8_t *streaminfo;
    uint8_t *p;

    if (!ff_flac_is_extradata_valid(avctx, &format, &streaminfo))
        return -1;

    // first packet: STREAMINFO
    oggstream->header_len[0] = 51;
    oggstream->header[0] = av_mallocz(51); // per ogg flac specs
    p = oggstream->header[0];
    if (!p)
        return AVERROR_NOMEM;
    bytestream_put_byte(&p, 0x7F);
    bytestream_put_buffer(&p, "FLAC", 4);
    bytestream_put_byte(&p, 1); // major version
    bytestream_put_byte(&p, 0); // minor version
    bytestream_put_be16(&p, 1); // headers packets without this one
    bytestream_put_buffer(&p, "fLaC", 4);
    bytestream_put_byte(&p, 0x00); // streaminfo
    bytestream_put_be24(&p, 34);
    bytestream_put_buffer(&p, streaminfo, FLAC_STREAMINFO_SIZE);

    // second packet: VorbisComment
    p = ogg_write_vorbiscomment(4, bitexact, &oggstream->header_len[1]);
    if (!p)
        return AVERROR_NOMEM;
    oggstream->header[1] = p;
    bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment
    bytestream_put_be24(&p, oggstream->header_len[1] - 4);

    return 0;
}
Exemple #14
0
static int gif_image_write_image(AVCodecContext *avctx,
                                 uint8_t **bytestream, uint8_t *end,
                                 const uint8_t *buf, int linesize)
{
    GIFContext *s = avctx->priv_data;
    int len = 0, height;
    const uint8_t *ptr;
    /* image block */

    bytestream_put_byte(bytestream, 0x2c);
    bytestream_put_le16(bytestream, 0);
    bytestream_put_le16(bytestream, 0);
    bytestream_put_le16(bytestream, avctx->width);
    bytestream_put_le16(bytestream, avctx->height);
    bytestream_put_byte(bytestream, 0x00); /* flags */
    /* no local clut */

    bytestream_put_byte(bytestream, 0x08);

    ff_lzw_encode_init(s->lzw, s->buf, avctx->width*avctx->height,
                       12, FF_LZW_GIF, put_bits);

    ptr = buf;
    for (height = avctx->height; height--;) {
        len += ff_lzw_encode(s->lzw, ptr, avctx->width);
        ptr += linesize;
    }
    len += ff_lzw_encode_flush(s->lzw, flush_put_bits);

    ptr = s->buf;
    while (len > 0) {
        int size = FFMIN(255, len);
        bytestream_put_byte(bytestream, size);
        if (end - *bytestream < size)
            return -1;
        bytestream_put_buffer(bytestream, ptr, size);
        ptr += size;
        len -= size;
    }
    bytestream_put_byte(bytestream, 0x00); /* end of image block */
    bytestream_put_byte(bytestream, 0x3b);
    return 0;
}
Exemple #15
0
static uint8_t *ogg_write_vorbiscomment(int offset, int bitexact,
                                        int *header_len)
{
    const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT;
    int size;
    uint8_t *p, *p0;

    size = offset + 4 + strlen(vendor) + 4;
    p = av_mallocz(size);
    if (!p)
        return NULL;
    p0 = p;

    p += offset;
    bytestream_put_le32(&p, strlen(vendor));
    bytestream_put_buffer(&p, vendor, strlen(vendor));
    bytestream_put_le32(&p, 0); // user comment list length

    *header_len = size;
    return p0;
}
Exemple #16
0
static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
{
    ADXContext *c = avctx->priv_data;

    bytestream_put_be16(&buf, 0x8000);              /* header signature */
    bytestream_put_be16(&buf, HEADER_SIZE - 4);     /* copyright offset */
    bytestream_put_byte(&buf, 3);                   /* encoding */
    bytestream_put_byte(&buf, BLOCK_SIZE);          /* block size */
    bytestream_put_byte(&buf, 4);                   /* sample size */
    bytestream_put_byte(&buf, avctx->channels);     /* channels */
    bytestream_put_be32(&buf, avctx->sample_rate);  /* sample rate */
    bytestream_put_be32(&buf, 0);                   /* total sample count */
    bytestream_put_be16(&buf, c->cutoff);           /* cutoff frequency */
    bytestream_put_byte(&buf, 3);                   /* version */
    bytestream_put_byte(&buf, 0);                   /* flags */
    bytestream_put_be32(&buf, 0);                   /* unknown */
    bytestream_put_be32(&buf, 0);                   /* loop enabled */
    bytestream_put_be16(&buf, 0);                   /* padding */
    bytestream_put_buffer(&buf, "(c)CRI", 6);       /* copyright signature */

    return HEADER_SIZE;
}
Exemple #17
0
/**
 * Write codebook chunk
 */
static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
{
    int i, j;
    uint8_t **outp= &enc->out_buf;

    if (tempData->numCB2) {
        bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
        bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
        bytestream_put_byte(outp, tempData->numCB4);
        bytestream_put_byte(outp, tempData->numCB2);

        for (i=0; i<tempData->numCB2; i++) {
            bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4);
            bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u);
            bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v);
        }

        for (i=0; i<tempData->numCB4; i++)
            for (j=0; j<4; j++)
                bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]);

    }
}
Exemple #18
0
static int iff_read_header(AVFormatContext *s)
{
    IffDemuxContext *iff = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    uint8_t *buf;
    uint32_t chunk_id;
    uint64_t data_size;
    uint32_t screenmode = 0, num, den;
    unsigned transparency = 0;
    unsigned masking = 0; // no mask
    uint8_t fmt[16];
    int fmt_size;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->channels = 1;
    st->codec->channel_layout = AV_CH_LAYOUT_MONO;
    iff->is_64bit = avio_rl32(pb) == ID_FRM8;
    avio_skip(pb, iff->is_64bit ? 8 : 4);
    // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content
    st->codec->codec_tag = avio_rl32(pb);
    if (st->codec->codec_tag == ID_ANIM) {
        avio_skip(pb, 8);
        st->codec->codec_tag = avio_rl32(pb);
    }
    iff->bitmap_compression = -1;
    iff->svx8_compression = -1;
    iff->maud_bits = -1;
    iff->maud_compression = -1;

    while(!avio_feof(pb)) {
        uint64_t orig_pos;
        int res;
        const char *metadata_tag = NULL;
        int version, nb_comments, i;
        chunk_id = avio_rl32(pb);
        data_size = iff->is_64bit ? avio_rb64(pb) : avio_rb32(pb);
        orig_pos = avio_tell(pb);

        switch(chunk_id) {
        case ID_VHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 14)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 12);
            st->codec->sample_rate = avio_rb16(pb);
            if (data_size >= 16) {
                avio_skip(pb, 1);
                iff->svx8_compression = avio_r8(pb);
            }
            break;

        case ID_MHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 32)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 4);
            iff->maud_bits = avio_rb16(pb);
            avio_skip(pb, 2);
            num = avio_rb32(pb);
            den = avio_rb16(pb);
            if (!den)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 2);
            st->codec->sample_rate = num / den;
            st->codec->channels = avio_rb16(pb);
            iff->maud_compression = avio_rb16(pb);
            if (st->codec->channels == 1)
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            else if (st->codec->channels == 2)
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            break;

        case ID_ABIT:
        case ID_BODY:
        case ID_DBOD:
        case ID_DSD:
        case ID_MDAT:
            iff->body_pos = avio_tell(pb);
            iff->body_end = iff->body_pos + data_size;
            iff->body_size = data_size;
            break;

        case ID_CHAN:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            if (avio_rb32(pb) < 6) {
                st->codec->channels       = 1;
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            } else {
                st->codec->channels       = 2;
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            }
            break;

        case ID_CAMG:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            screenmode                = avio_rb32(pb);
            break;

        case ID_CMAP:
            if (data_size < 3 || data_size > 768 || data_size % 3) {
                 av_log(s, AV_LOG_ERROR, "Invalid CMAP chunk size %"PRIu64"\n",
                        data_size);
                 return AVERROR_INVALIDDATA;
            }
            st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0)
                return AVERROR(EIO);
            break;

        case ID_BMHD:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size <= 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            avio_skip(pb, 4); // x, y offset
            st->codec->bits_per_coded_sample = avio_r8(pb);
            if (data_size >= 10)
                masking                      = avio_r8(pb);
            if (data_size >= 11)
                iff->bitmap_compression      = avio_r8(pb);
            if (data_size >= 14) {
                avio_skip(pb, 1); // padding
                transparency                 = avio_rb16(pb);
            }
            if (data_size >= 16) {
                st->sample_aspect_ratio.num  = avio_r8(pb);
                st->sample_aspect_ratio.den  = avio_r8(pb);
            }
            break;

        case ID_DPEL:
            if (data_size < 4 || (data_size & 3))
                return AVERROR_INVALIDDATA;
            if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0)
                return fmt_size;
            if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24)))
                st->codec->pix_fmt = AV_PIX_FMT_RGB24;
            else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba)))
                st->codec->pix_fmt = AV_PIX_FMT_RGBA;
            else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra)))
                st->codec->pix_fmt = AV_PIX_FMT_BGRA;
            else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb)))
                st->codec->pix_fmt = AV_PIX_FMT_ARGB;
            else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr)))
                st->codec->pix_fmt = AV_PIX_FMT_ABGR;
            else {
                avpriv_request_sample(s, "color format %.16s", fmt);
                return AVERROR_PATCHWELCOME;
            }
            break;

        case ID_DGBL:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size < 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            iff->bitmap_compression          = avio_rb16(pb);
            st->sample_aspect_ratio.num      = avio_r8(pb);
            st->sample_aspect_ratio.den      = avio_r8(pb);
            st->codec->bits_per_coded_sample = 24;
            break;

        case ID_DLOC:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            st->codec->width  = avio_rb16(pb);
            st->codec->height = avio_rb16(pb);
            break;

        case ID_TVDC:
            if (data_size < sizeof(iff->tvdc))
                return AVERROR_INVALIDDATA;
            res = avio_read(pb, iff->tvdc, sizeof(iff->tvdc));
            if (res < 0)
                return res;
            break;

        case ID_ANNO:
        case ID_TEXT:      metadata_tag = "comment";   break;
        case ID_AUTH:      metadata_tag = "artist";    break;
        case ID_COPYRIGHT: metadata_tag = "copyright"; break;
        case ID_NAME:      metadata_tag = "title";     break;

        /* DSD tags */

        case MKTAG('F','V','E','R'):
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            version = avio_rb32(pb);
            av_log(s, AV_LOG_DEBUG, "DSIFF v%d.%d.%d.%d\n",version >> 24, (version >> 16) & 0xFF, (version >> 8) & 0xFF, version & 0xFF);
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            break;

        case MKTAG('D','I','I','N'):
            res = parse_dsd_diin(s, st, orig_pos + data_size);
            if (res < 0)
                return res;
            break;

        case MKTAG('P','R','O','P'):
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            if (avio_rl32(pb) != MKTAG('S','N','D',' ')) {
                avpriv_request_sample(s, "unknown property type");
                break;
            }
            res = parse_dsd_prop(s, st, orig_pos + data_size);
            if (res < 0)
                return res;
            break;

        case MKTAG('C','O','M','T'):
            if (data_size < 2)
                return AVERROR_INVALIDDATA;
            nb_comments = avio_rb16(pb);
            for (i = 0; i < nb_comments; i++) {
                int year, mon, day, hour, min, type, ref;
                char tmp[24];
                const char *tag;
                int metadata_size;

                year = avio_rb16(pb);
                mon  = avio_r8(pb);
                day  = avio_r8(pb);
                hour = avio_r8(pb);
                min  = avio_r8(pb);
                snprintf(tmp, sizeof(tmp), "%04d-%02d-%02d %02d:%02d", year, mon, day, hour, min);
                av_dict_set(&st->metadata, "comment_time", tmp, 0);

                type = avio_rb16(pb);
                ref  = avio_rb16(pb);
                switch (type) {
                case 1:
                    if (!i)
                        tag = "channel_comment";
                    else {
                        snprintf(tmp, sizeof(tmp), "channel%d_comment", ref);
                        tag = tmp;
                    }
                    break;
                case 2:
                    tag = ref < FF_ARRAY_ELEMS(dsd_source_comment) ? dsd_source_comment[ref] : "source_comment";
                    break;
                case 3:
                    tag = ref < FF_ARRAY_ELEMS(dsd_history_comment) ? dsd_history_comment[ref] : "file_history";
                    break;
                default:
                    tag = "comment";
                }

                metadata_size  = avio_rb32(pb);
                if ((res = get_metadata(s, tag, metadata_size)) < 0) {
                    av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", tag);
                    return res;
                }

                if (metadata_size & 1)
                    avio_skip(pb, 1);
            }
            break;
        }

        if (metadata_tag) {
            if ((res = get_metadata(s, metadata_tag, data_size)) < 0) {
                av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag);
                return res;
            }
        }
        avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1));
    }

    avio_seek(pb, iff->body_pos, SEEK_SET);

    switch(st->codec->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);

        if (st->codec->codec_tag == ID_16SV)
            st->codec->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR;
        else if (st->codec->codec_tag == ID_MAUD) {
            if (iff->maud_bits == 8 && !iff->maud_compression) {
                st->codec->codec_id = AV_CODEC_ID_PCM_U8;
            } else if (iff->maud_bits == 16 && !iff->maud_compression) {
                st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
            } else if (iff->maud_bits ==  8 && iff->maud_compression == 2) {
                st->codec->codec_id = AV_CODEC_ID_PCM_ALAW;
            } else if (iff->maud_bits ==  8 && iff->maud_compression == 3) {
                st->codec->codec_id = AV_CODEC_ID_PCM_MULAW;
            } else {
                avpriv_request_sample(s, "compression %d and bit depth %d", iff->maud_compression, iff->maud_bits);
                return AVERROR_PATCHWELCOME;
            }
        } else if (st->codec->codec_tag != ID_DSD) {
            switch (iff->svx8_compression) {
            case COMP_NONE:
                st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR;
                break;
            case COMP_FIB:
                st->codec->codec_id = AV_CODEC_ID_8SVX_FIB;
                break;
            case COMP_EXP:
                st->codec->codec_id = AV_CODEC_ID_8SVX_EXP;
                break;
            default:
                av_log(s, AV_LOG_ERROR,
                       "Unknown SVX8 compression method '%d'\n", iff->svx8_compression);
                return -1;
            }
        }

        st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
        break;

    case AVMEDIA_TYPE_VIDEO:
        iff->bpp          = st->codec->bits_per_coded_sample;
        if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) {
            iff->ham      = iff->bpp > 6 ? 6 : 4;
            st->codec->bits_per_coded_sample = 24;
        }
        iff->flags        = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8;
        iff->masking      = masking;
        iff->transparency = transparency;

        if (!st->codec->extradata) {
            st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
        }
        av_assert0(st->codec->extradata_size >= IFF_EXTRA_VIDEO_SIZE);
        buf = st->codec->extradata;
        bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE);
        bytestream_put_byte(&buf, iff->bitmap_compression);
        bytestream_put_byte(&buf, iff->bpp);
        bytestream_put_byte(&buf, iff->ham);
        bytestream_put_byte(&buf, iff->flags);
        bytestream_put_be16(&buf, iff->transparency);
        bytestream_put_byte(&buf, iff->masking);
        bytestream_put_buffer(&buf, iff->tvdc, sizeof(iff->tvdc));
        st->codec->codec_id = AV_CODEC_ID_IFF_ILBM;
        break;
    default:
        return -1;
    }

    return 0;
}
Exemple #19
0
/* GIF header */
static int gif_image_write_header(uint8_t **bytestream,
                                  int width, int height, int loop_count,
                                  uint32_t *palette)
{
    int i;
    unsigned int v;

    bytestream_put_buffer(bytestream, "GIF", 3);
    bytestream_put_buffer(bytestream, "89a", 3);
    bytestream_put_le16(bytestream, width);
    bytestream_put_le16(bytestream, height);

    bytestream_put_byte(bytestream, 0xf7); /* flags: global clut, 256 entries */
    bytestream_put_byte(bytestream, 0x1f); /* background color index */
    bytestream_put_byte(bytestream, 0); /* aspect ratio */

    /* the global palette */
    if (!palette) {
        bytestream_put_buffer(bytestream, (const unsigned char *)gif_clut, 216*3);
        for(i=0;i<((256-216)*3);i++)
            bytestream_put_byte(bytestream, 0);
    } else {
        for(i=0;i<256;i++) {
            v = palette[i];
            bytestream_put_be24(bytestream, v);
        }
    }

        /*        update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif
                see http://members.aol.com/royalef/gifabout.htm#net-extension

                byte   1       : 33 (hex 0x21) GIF Extension code
                byte   2       : 255 (hex 0xFF) Application Extension Label
                byte   3       : 11 (hex (0x0B) Length of Application Block
                                         (eleven bytes of data to follow)
                bytes  4 to 11 : "NETSCAPE"
                bytes 12 to 14 : "2.0"
                byte  15       : 3 (hex 0x03) Length of Data Sub-Block
                                         (three bytes of data to follow)
                byte  16       : 1 (hex 0x01)
                bytes 17 to 18 : 0 to 65535, an unsigned integer in
                                         lo-hi byte format. This indicate the
                                         number of iterations the loop should
                                         be executed.
                bytes 19       : 0 (hex 0x00) a Data Sub-block Terminator
        */

    /* application extension header */
#ifdef GIF_ADD_APP_HEADER
    if (loop_count >= 0 && loop_count <= 65535) {
        bytestream_put_byte(bytestream, 0x21);
        bytestream_put_byte(bytestream, 0xff);
        bytestream_put_byte(bytestream, 0x0b);
        bytestream_put_buffer(bytestream, "NETSCAPE2.0", 11);  // bytes 4 to 14
        bytestream_put_byte(bytestream, 0x03); // byte 15
        bytestream_put_byte(bytestream, 0x01); // byte 16
        bytestream_put_le16(bytestream, (uint16_t)loop_count);
        bytestream_put_byte(bytestream, 0x00); // byte 19
    }
#endif
    return 0;
}
Exemple #20
0
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *codec = s->streams[0]->codec;
    BRSTMDemuxContext *b = s->priv_data;
    uint32_t samples, size, skip = 0;
    int ret, i;

    if (avio_feof(s->pb))
        return AVERROR_EOF;
    b->current_block++;
    if (b->current_block == b->block_count) {
        size    = b->last_block_used_bytes;
        samples = b->last_block_samples;
        skip    = b->last_block_size - b->last_block_used_bytes;

        if (samples < size * 14 / 8) {
            uint32_t adjusted_size = samples / 14 * 8;
            if (samples % 14)
                adjusted_size += (samples % 14 + 1) / 2 + 1;

            skip += size - adjusted_size;
            size = adjusted_size;
        }
    } else if (b->current_block < b->block_count) {
        size    = b->block_size;
        samples = b->samples_per_block;
    } else {
        return AVERROR_EOF;
    }

    if (codec->codec_id == AV_CODEC_ID_ADPCM_THP ||
        codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) {
        uint8_t *dst;

        if (!b->adpc) {
            av_log(s, AV_LOG_ERROR, "adpcm_thp requires ADPC chunk, but none was found.\n");
            return AVERROR_INVALIDDATA;
        }
        if (!b->table) {
            b->table = av_mallocz(32 * codec->channels);
            if (!b->table)
                return AVERROR(ENOMEM);
        }

        if (size > (INT_MAX - 32 - 4) ||
            (32 + 4 + size) > (INT_MAX / codec->channels) ||
            (32 + 4 + size) * codec->channels > INT_MAX - 8)
            return AVERROR_INVALIDDATA;
        if (av_new_packet(pkt, 8 + (32 + 4 + size) * codec->channels) < 0)
            return AVERROR(ENOMEM);
        dst = pkt->data;
        if (codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) {
            bytestream_put_le32(&dst, size * codec->channels);
            bytestream_put_le32(&dst, samples);
        } else {
            bytestream_put_be32(&dst, size * codec->channels);
            bytestream_put_be32(&dst, samples);
        }
        bytestream_put_buffer(&dst, b->table, 32 * codec->channels);
        bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels *
                                    (b->current_block - 1), 4 * codec->channels);

        for (i = 0; i < codec->channels; i++) {
            ret = avio_read(s->pb, dst, size);
            dst += size;
            avio_skip(s->pb, skip);
            if (ret != size) {
                av_free_packet(pkt);
                break;
            }
        }
        pkt->duration = samples;
    } else {
        size *= codec->channels;
        ret = av_get_packet(s->pb, pkt, size);
    }

    pkt->stream_index = 0;

    if (ret != size)
        ret = AVERROR(EIO);

    return ret;
}
Exemple #21
0
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buf, int size)
{
    RTPMuxContext *s = s1->priv_data;
    const uint8_t *qtables = NULL;
    int nb_qtables = 0;
    uint8_t type;
    uint8_t w, h;
    uint8_t *p;
    int off = 0; /* fragment offset of the current JPEG frame */
    int len;
    int i;

    s->buf_ptr   = s->buf;
    s->timestamp = s->cur_timestamp;

    /* convert video pixel dimensions from pixels to blocks */
    w = (s1->streams[0]->codec->width  + 7) >> 3;
    h = (s1->streams[0]->codec->height + 7) >> 3;

    /* get the pixel format type or fail */
    if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ422P ||
        (s1->streams[0]->codec->color_range == AVCOL_RANGE_JPEG &&
         s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUV422P)) {
        type = 0;
    } else if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ420P ||
               (s1->streams[0]->codec->color_range == AVCOL_RANGE_JPEG &&
                s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUV420P)) {
        type = 1;
    } else {
        av_log(s1, AV_LOG_ERROR, "Unsupported pixel format\n");
        return;
    }

    /* preparse the header for getting some infos */
    for (i = 0; i < size; i++) {
        if (buf[i] != 0xff)
            continue;

        if (buf[i + 1] == DQT) {
            if (buf[i + 4])
                av_log(s1, AV_LOG_WARNING,
                       "Only 8-bit precision is supported.\n");

            /* a quantization table is 64 bytes long */
            nb_qtables = AV_RB16(&buf[i + 2]) / 65;
            if (i + 4 + nb_qtables * 65 > size) {
                av_log(s1, AV_LOG_ERROR, "Too short JPEG header. Aborted!\n");
                return;
            }

            qtables = &buf[i + 4];
        } else if (buf[i + 1] == SOF0) {
            if (buf[i + 14] != 17 || buf[i + 17] != 17) {
                av_log(s1, AV_LOG_ERROR,
                       "Only 1x1 chroma blocks are supported. Aborted!\n");
                return;
            }
        } else if (buf[i + 1] == SOS) {
            /* SOS is last marker in the header */
            i += AV_RB16(&buf[i + 2]) + 2;
            break;
        }
    }

    /* skip JPEG header */
    buf  += i;
    size -= i;

    for (i = size - 2; i >= 0; i--) {
        if (buf[i] == 0xff && buf[i + 1] == EOI) {
            /* Remove the EOI marker */
            size = i;
            break;
        }
    }

    p = s->buf_ptr;
    while (size > 0) {
        int hdr_size = 8;

        if (off == 0 && nb_qtables)
            hdr_size += 4 + 64 * nb_qtables;

        /* payload max in one packet */
        len = FFMIN(size, s->max_payload_size - hdr_size);

        /* set main header */
        bytestream_put_byte(&p, 0);
        bytestream_put_be24(&p, off);
        bytestream_put_byte(&p, type);
        bytestream_put_byte(&p, 255);
        bytestream_put_byte(&p, w);
        bytestream_put_byte(&p, h);

        if (off == 0 && nb_qtables) {
            /* set quantization tables header */
            bytestream_put_byte(&p, 0);
            bytestream_put_byte(&p, 0);
            bytestream_put_be16(&p, 64 * nb_qtables);

            for (i = 0; i < nb_qtables; i++)
                bytestream_put_buffer(&p, &qtables[65 * i + 1], 64);
        }

        /* copy payload data */
        memcpy(p, buf, len);

        /* marker bit is last packet in frame */
        ff_rtp_send_data(s1, s->buf, len + hdr_size, size == len);

        buf  += len;
        size -= len;
        off  += len;
        p     = s->buf;
    }
}
Exemple #22
0
/**
 * Compute the best RLE sequence for a line
 */
static void qtrle_encode_line(QtrleEncContext *s, const AVFrame *p, int line, uint8_t **buf)
{
    int width=s->logical_width;
    int i;
    signed char rlecode;

    /* This will be the number of pixels equal to the previous frame one's
     * starting from the ith pixel */
    unsigned int skipcount;
    /* This will be the number of consecutive equal pixels in the current
     * frame, starting from the ith one also */
    unsigned int av_uninit(repeatcount);

    /* The cost of the three different possibilities */
    int total_skip_cost;
    int total_repeat_cost;

    int base_bulk_cost;
    int lowest_bulk_cost;
    int lowest_bulk_cost_index;
    int sec_lowest_bulk_cost;
    int sec_lowest_bulk_cost_index;

    uint8_t *this_line = p->               data[0] + line*p->               linesize[0] +
        (width - 1)*s->pixel_size;
    uint8_t *prev_line = s->previous_frame->data[0] + line * s->previous_frame->linesize[0] +
        (width - 1)*s->pixel_size;

    s->length_table[width] = 0;
    skipcount = 0;

    /* Initial values */
    lowest_bulk_cost = INT_MAX / 2;
    lowest_bulk_cost_index = width;
    sec_lowest_bulk_cost = INT_MAX / 2;
    sec_lowest_bulk_cost_index = width;

    base_bulk_cost = 1 + s->pixel_size;

    for (i = width - 1; i >= 0; i--) {

        int prev_bulk_cost;

        /* If our lowest bulk cost index is too far away, replace it
         * with the next lowest bulk cost */
        if (FFMIN(width, i + MAX_RLE_BULK) < lowest_bulk_cost_index) {
            lowest_bulk_cost = sec_lowest_bulk_cost;
            lowest_bulk_cost_index = sec_lowest_bulk_cost_index;

            sec_lowest_bulk_cost = INT_MAX / 2;
            sec_lowest_bulk_cost_index = width;
        }

        /* Deal with the first pixel's bulk cost */
        if (!i) {
            base_bulk_cost++;
            lowest_bulk_cost++;
            sec_lowest_bulk_cost++;
        }

        /* Look at the bulk cost of the previous loop and see if it is
         * a new lower bulk cost */
        prev_bulk_cost = s->length_table[i + 1] + base_bulk_cost;
        if (prev_bulk_cost <= sec_lowest_bulk_cost) {
            /* If it's lower than the 2nd lowest, then it may be lower
             * than the lowest */
            if (prev_bulk_cost <= lowest_bulk_cost) {

                /* If we have found a new lowest bulk cost,
                 * then the 2nd lowest bulk cost is now farther than the
                 * lowest bulk cost, and will never be used */
                sec_lowest_bulk_cost = INT_MAX / 2;

                lowest_bulk_cost = prev_bulk_cost;
                lowest_bulk_cost_index = i + 1;
            } else {
                /* Then it must be the 2nd lowest bulk cost */
                sec_lowest_bulk_cost = prev_bulk_cost;
                sec_lowest_bulk_cost_index = i + 1;
            }
        }

        if (!s->key_frame && !memcmp(this_line, prev_line, s->pixel_size))
            skipcount = FFMIN(skipcount + 1, MAX_RLE_SKIP);
        else
            skipcount = 0;

        total_skip_cost  = s->length_table[i + skipcount] + 2;
        s->skip_table[i] = skipcount;


        if (i < width - 1 && !memcmp(this_line, this_line + s->pixel_size, s->pixel_size))
            repeatcount = FFMIN(repeatcount + 1, MAX_RLE_REPEAT);
        else
            repeatcount = 1;

        total_repeat_cost = s->length_table[i + repeatcount] + 1 + s->pixel_size;

        /* skip code is free for the first pixel, it costs one byte for repeat and bulk copy
         * so let's make it aware */
        if (i == 0) {
            total_skip_cost--;
            total_repeat_cost++;
        }

        if (repeatcount > 1 && (skipcount == 0 || total_repeat_cost < total_skip_cost)) {
            /* repeat is the best */
            s->length_table[i]  = total_repeat_cost;
            s->rlecode_table[i] = -repeatcount;
        }
        else if (skipcount > 0) {
            /* skip is the best choice here */
            s->length_table[i]  = total_skip_cost;
            s->rlecode_table[i] = 0;
        }
        else {
            /* We cannot do neither skip nor repeat
             * thus we use the best bulk copy  */

            s->length_table[i]  = lowest_bulk_cost;
            s->rlecode_table[i] = lowest_bulk_cost_index - i;

        }

        /* These bulk costs increase every iteration */
        lowest_bulk_cost += s->pixel_size;
        sec_lowest_bulk_cost += s->pixel_size;

        this_line -= s->pixel_size;
        prev_line -= s->pixel_size;
    }

    /* Good! Now we have the best sequence for this line, let's output it. */

    /* We do a special case for the first pixel so that we avoid testing it in
     * the whole loop */

    i=0;
    this_line = p->               data[0] + line*p->linesize[0];

    if (s->rlecode_table[0] == 0) {
        bytestream_put_byte(buf, s->skip_table[0] + 1);
        i += s->skip_table[0];
    }
    else bytestream_put_byte(buf, 1);


    while (i < width) {
        rlecode = s->rlecode_table[i];
        bytestream_put_byte(buf, rlecode);
        if (rlecode == 0) {
            /* Write a skip sequence */
            bytestream_put_byte(buf, s->skip_table[i] + 1);
            i += s->skip_table[i];
        }
        else if (rlecode > 0) {
            /* bulk copy */
            if (s->avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
                int j;
                // QT grayscale colorspace has 0=white and 255=black, we will
                // ignore the palette that is included in the AVFrame because
                // AV_PIX_FMT_GRAY8 has defined color mapping
                for (j = 0; j < rlecode*s->pixel_size; ++j)
                    bytestream_put_byte(buf, *(this_line + i*s->pixel_size + j) ^ 0xff);
            } else {
                bytestream_put_buffer(buf, this_line + i*s->pixel_size, rlecode*s->pixel_size);
            }
            i += rlecode;
        }
        else {
            /* repeat the bits */
            if (s->avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
                int j;
                // QT grayscale colorspace has 0=white and 255=black, ...
                for (j = 0; j < s->pixel_size; ++j)
                    bytestream_put_byte(buf, *(this_line + i*s->pixel_size + j) ^ 0xff);
            } else {
                bytestream_put_buffer(buf, this_line + i*s->pixel_size, s->pixel_size);
            }
            i -= rlecode;
        }
    }
    bytestream_put_byte(buf, -1); // end RLE line
}
void ff_amf_write_string(uint8_t **dst, const char *str)
{
	bytestream_put_byte(dst, AMF_DATA_TYPE_STRING);
	bytestream_put_be16(dst, strlen(str));
	bytestream_put_buffer(dst, (uint8_t *)str, strlen(str));
}
Exemple #24
0
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buf, int size)
{
    RTPMuxContext *s = s1->priv_data;
    const uint8_t *qtables[4] = { NULL };
    int nb_qtables = 0;
    uint8_t type;
    uint8_t w, h;
    uint8_t *p;
    int off = 0; /* fragment offset of the current JPEG frame */
    int len;
    int i;
    int default_huffman_tables = 0;

    s->buf_ptr   = s->buf;
    s->timestamp = s->cur_timestamp;

    /* convert video pixel dimensions from pixels to blocks */
    w = AV_CEIL_RSHIFT(s1->streams[0]->codecpar->width, 3);
    h = AV_CEIL_RSHIFT(s1->streams[0]->codecpar->height, 3);

    /* get the pixel format type or fail */
    if (s1->streams[0]->codecpar->format == AV_PIX_FMT_YUVJ422P ||
        (s1->streams[0]->codecpar->color_range == AVCOL_RANGE_JPEG &&
         s1->streams[0]->codecpar->format == AV_PIX_FMT_YUV422P)) {
        type = 0;
    } else if (s1->streams[0]->codecpar->format == AV_PIX_FMT_YUVJ420P ||
               (s1->streams[0]->codecpar->color_range == AVCOL_RANGE_JPEG &&
                s1->streams[0]->codecpar->format == AV_PIX_FMT_YUV420P)) {
        type = 1;
    } else {
        av_log(s1, AV_LOG_ERROR, "Unsupported pixel format\n");
        return;
    }

    /* preparse the header for getting some infos */
    for (i = 0; i < size; i++) {
        if (buf[i] != 0xff)
            continue;

        if (buf[i + 1] == DQT) {
            int tables, j;
            if (buf[i + 4] & 0xF0)
                av_log(s1, AV_LOG_WARNING,
                       "Only 8-bit precision is supported.\n");

            /* a quantization table is 64 bytes long */
            tables = AV_RB16(&buf[i + 2]) / 65;
            if (i + 5 + tables * 65 > size) {
                av_log(s1, AV_LOG_ERROR, "Too short JPEG header. Aborted!\n");
                return;
            }
            if (nb_qtables + tables > 4) {
                av_log(s1, AV_LOG_ERROR, "Invalid number of quantisation tables\n");
                return;
            }

            for (j = 0; j < tables; j++)
                qtables[nb_qtables + j] = buf + i + 5 + j * 65;
            nb_qtables += tables;
        } else if (buf[i + 1] == SOF0) {
            if (buf[i + 14] != 17 || buf[i + 17] != 17) {
                av_log(s1, AV_LOG_ERROR,
                       "Only 1x1 chroma blocks are supported. Aborted!\n");
                return;
            }
        } else if (buf[i + 1] == DHT) {
            int dht_size = AV_RB16(&buf[i + 2]);
            default_huffman_tables |= 1 << 4;
            i += 3;
            dht_size -= 2;
            if (i + dht_size >= size)
                continue;
            while (dht_size > 0)
                switch (buf[i + 1]) {
                case 0x00:
                    if (   dht_size >= 29
                        && !memcmp(buf + i +  2, avpriv_mjpeg_bits_dc_luminance + 1, 16)
                        && !memcmp(buf + i + 18, avpriv_mjpeg_val_dc, 12)) {
                        default_huffman_tables |= 1;
                        i += 29;
                        dht_size -= 29;
                    } else {
                        i += dht_size;
                        dht_size = 0;
                    }
                    break;
                case 0x01:
                    if (   dht_size >= 29
                        && !memcmp(buf + i +  2, avpriv_mjpeg_bits_dc_chrominance + 1, 16)
                        && !memcmp(buf + i + 18, avpriv_mjpeg_val_dc, 12)) {
                        default_huffman_tables |= 1 << 1;
                        i += 29;
                        dht_size -= 29;
                    } else {
                        i += dht_size;
                        dht_size = 0;
                    }
                    break;
                case 0x10:
                    if (   dht_size >= 179
                        && !memcmp(buf + i +  2, avpriv_mjpeg_bits_ac_luminance   + 1, 16)
                        && !memcmp(buf + i + 18, avpriv_mjpeg_val_ac_luminance, 162)) {
                        default_huffman_tables |= 1 << 2;
                        i += 179;
                        dht_size -= 179;
                    } else {
                        i += dht_size;
                        dht_size = 0;
                    }
                    break;
                case 0x11:
                    if (   dht_size >= 179
                        && !memcmp(buf + i +  2, avpriv_mjpeg_bits_ac_chrominance + 1, 16)
                        && !memcmp(buf + i + 18, avpriv_mjpeg_val_ac_chrominance, 162)) {
                        default_huffman_tables |= 1 << 3;
                        i += 179;
                        dht_size -= 179;
                    } else {
                        i += dht_size;
                        dht_size = 0;
                    }
                    break;
                default:
                    i += dht_size;
                    dht_size = 0;
                    continue;
            }
        } else if (buf[i + 1] == SOS) {
            /* SOS is last marker in the header */
            i += AV_RB16(&buf[i + 2]) + 2;
            if (i > size) {
                av_log(s1, AV_LOG_ERROR,
                       "Insufficient data. Aborted!\n");
                return;
            }
            break;
        }
    }
    if (default_huffman_tables && default_huffman_tables != 31) {
        av_log(s1, AV_LOG_ERROR,
               "RFC 2435 requires standard Huffman tables for jpeg\n");
        return;
    }
    if (nb_qtables && nb_qtables != 2)
        av_log(s1, AV_LOG_WARNING,
               "RFC 2435 suggests two quantization tables, %d provided\n",
               nb_qtables);

    /* skip JPEG header */
    buf  += i;
    size -= i;

    for (i = size - 2; i >= 0; i--) {
        if (buf[i] == 0xff && buf[i + 1] == EOI) {
            /* Remove the EOI marker */
            size = i;
            break;
        }
    }

    p = s->buf_ptr;
    while (size > 0) {
        int hdr_size = 8;

        if (off == 0 && nb_qtables)
            hdr_size += 4 + 64 * nb_qtables;

        /* payload max in one packet */
        len = FFMIN(size, s->max_payload_size - hdr_size);

        /* set main header */
        bytestream_put_byte(&p, 0);
        bytestream_put_be24(&p, off);
        bytestream_put_byte(&p, type);
        bytestream_put_byte(&p, 255);
        bytestream_put_byte(&p, w);
        bytestream_put_byte(&p, h);

        if (off == 0 && nb_qtables) {
            /* set quantization tables header */
            bytestream_put_byte(&p, 0);
            bytestream_put_byte(&p, 0);
            bytestream_put_be16(&p, 64 * nb_qtables);

            for (i = 0; i < nb_qtables; i++)
                bytestream_put_buffer(&p, qtables[i], 64);
        }

        /* copy payload data */
        memcpy(p, buf, len);

        /* marker bit is last packet in frame */
        ff_rtp_send_data(s1, s->buf, len + hdr_size, size == len);

        buf  += len;
        size -= len;
        off  += len;
        p     = s->buf;
    }
}
Exemple #25
0
static int iff_read_header(AVFormatContext *s)
{
    IffDemuxContext *iff = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    uint8_t *buf;
    uint32_t chunk_id, data_size;
    uint32_t screenmode = 0, num, den;
    unsigned transparency = 0;
    unsigned masking = 0; // no mask
    uint8_t fmt[16];
    int fmt_size;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->channels = 1;
    st->codec->channel_layout = AV_CH_LAYOUT_MONO;
    avio_skip(pb, 8);
    // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content
    st->codec->codec_tag = avio_rl32(pb);
    iff->bitmap_compression = -1;
    iff->svx8_compression = -1;
    iff->maud_bits = -1;
    iff->maud_compression = -1;

    while(!url_feof(pb)) {
        uint64_t orig_pos;
        int res;
        const char *metadata_tag = NULL;
        chunk_id = avio_rl32(pb);
        data_size = avio_rb32(pb);
        orig_pos = avio_tell(pb);

        switch(chunk_id) {
        case ID_VHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 14)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 12);
            st->codec->sample_rate = avio_rb16(pb);
            if (data_size >= 16) {
                avio_skip(pb, 1);
                iff->svx8_compression = avio_r8(pb);
            }
            break;

        case ID_MHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 32)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 4);
            iff->maud_bits = avio_rb16(pb);
            avio_skip(pb, 2);
            num = avio_rb32(pb);
            den = avio_rb16(pb);
            if (!den)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 2);
            st->codec->sample_rate = num / den;
            st->codec->channels = avio_rb16(pb);
            iff->maud_compression = avio_rb16(pb);
            if (st->codec->channels == 1)
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            else if (st->codec->channels == 2)
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            break;

        case ID_ABIT:
        case ID_BODY:
        case ID_DBOD:
        case ID_MDAT:
            iff->body_pos = avio_tell(pb);
            iff->body_size = data_size;
            break;

        case ID_CHAN:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            if (avio_rb32(pb) < 6) {
                st->codec->channels       = 1;
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            } else {
                st->codec->channels       = 2;
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            }
            break;

        case ID_CAMG:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            screenmode                = avio_rb32(pb);
            break;

        case ID_CMAP:
            if (data_size > INT_MAX - IFF_EXTRA_VIDEO_SIZE - FF_INPUT_BUFFER_PADDING_SIZE)
                return AVERROR_INVALIDDATA;
            st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0)
                return AVERROR(EIO);
            break;

        case ID_BMHD:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size <= 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            avio_skip(pb, 4); // x, y offset
            st->codec->bits_per_coded_sample = avio_r8(pb);
            if (data_size >= 10)
                masking                      = avio_r8(pb);
            if (data_size >= 11)
                iff->bitmap_compression      = avio_r8(pb);
            if (data_size >= 14) {
                avio_skip(pb, 1); // padding
                transparency                 = avio_rb16(pb);
            }
            if (data_size >= 16) {
                st->sample_aspect_ratio.num  = avio_r8(pb);
                st->sample_aspect_ratio.den  = avio_r8(pb);
            }
            break;

        case ID_DPEL:
            if (data_size < 4 || (data_size & 3))
                return AVERROR_INVALIDDATA;
            if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0)
                return fmt_size;
            if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24)))
                st->codec->pix_fmt = AV_PIX_FMT_RGB24;
            else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba)))
                st->codec->pix_fmt = AV_PIX_FMT_RGBA;
            else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra)))
                st->codec->pix_fmt = AV_PIX_FMT_BGRA;
            else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb)))
                st->codec->pix_fmt = AV_PIX_FMT_ARGB;
            else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr)))
                st->codec->pix_fmt = AV_PIX_FMT_ABGR;
            else {
                av_log_ask_for_sample(s, "unsupported color format\n");
                return AVERROR_PATCHWELCOME;
            }
            break;

        case ID_DGBL:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size < 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            iff->bitmap_compression          = avio_rb16(pb);
            st->sample_aspect_ratio.num      = avio_r8(pb);
            st->sample_aspect_ratio.den      = avio_r8(pb);
            st->codec->bits_per_coded_sample = 24;
            break;

        case ID_DLOC:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            st->codec->width  = avio_rb16(pb);
            st->codec->height = avio_rb16(pb);
            break;

        case ID_TVDC:
            if (data_size < sizeof(iff->tvdc))
                return AVERROR_INVALIDDATA;
            res = avio_read(pb, iff->tvdc, sizeof(iff->tvdc));
            if (res < 0)
                return res;
            break;

        case ID_ANNO:
        case ID_TEXT:      metadata_tag = "comment";   break;
        case ID_AUTH:      metadata_tag = "artist";    break;
        case ID_COPYRIGHT: metadata_tag = "copyright"; break;
        case ID_NAME:      metadata_tag = "title";     break;
        }

        if (metadata_tag) {
            if ((res = get_metadata(s, metadata_tag, data_size)) < 0) {
                av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag);
                return res;
            }
        }
        avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1));
    }

    avio_seek(pb, iff->body_pos, SEEK_SET);

    switch(st->codec->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);

        if (st->codec->codec_tag == ID_16SV)
            st->codec->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR;
        else if (st->codec->codec_tag == ID_MAUD) {
            if (iff->maud_bits == 8 && !iff->maud_compression) {
                st->codec->codec_id = AV_CODEC_ID_PCM_U8;
            } else if (iff->maud_bits == 16 && !iff->maud_compression) {
                st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
            } else if (iff->maud_bits ==  8 && iff->maud_compression == 2) {
                st->codec->codec_id = AV_CODEC_ID_PCM_ALAW;
            } else if (iff->maud_bits ==  8 && iff->maud_compression == 3) {
                st->codec->codec_id = AV_CODEC_ID_PCM_MULAW;
            } else {
                av_log_ask_for_sample(s, "unsupported compression %d and bit depth %d\n", iff->maud_compression, iff->maud_bits);
                return AVERROR_PATCHWELCOME;
            }

            st->codec->bits_per_coded_sample =
                av_get_bits_per_sample(st->codec->codec_id);

            st->codec->block_align =
                st->codec->bits_per_coded_sample * st->codec->channels / 8;
        } else {
        switch (iff->svx8_compression) {
        case COMP_NONE:
            st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR;
            break;
        case COMP_FIB:
            st->codec->codec_id = AV_CODEC_ID_8SVX_FIB;
            break;
        case COMP_EXP:
            st->codec->codec_id = AV_CODEC_ID_8SVX_EXP;
            break;
        default:
            av_log(s, AV_LOG_ERROR,
                   "Unknown SVX8 compression method '%d'\n", iff->svx8_compression);
            return -1;
        }
        }

        st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
        break;

    case AVMEDIA_TYPE_VIDEO:
        iff->bpp          = st->codec->bits_per_coded_sample;
        if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) {
            iff->ham      = iff->bpp > 6 ? 6 : 4;
            st->codec->bits_per_coded_sample = 24;
        }
        iff->flags        = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8;
        iff->masking      = masking;
        iff->transparency = transparency;

        if (!st->codec->extradata) {
            st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
        }
        av_assert0(st->codec->extradata_size >= IFF_EXTRA_VIDEO_SIZE);
        buf = st->codec->extradata;
        bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE);
        bytestream_put_byte(&buf, iff->bitmap_compression);
        bytestream_put_byte(&buf, iff->bpp);
        bytestream_put_byte(&buf, iff->ham);
        bytestream_put_byte(&buf, iff->flags);
        bytestream_put_be16(&buf, iff->transparency);
        bytestream_put_byte(&buf, iff->masking);
        bytestream_put_buffer(&buf, iff->tvdc, sizeof(iff->tvdc));
        st->codec->codec_id = AV_CODEC_ID_IFF_ILBM;
        break;
    default:
        return -1;
    }

    return 0;
}
Exemple #26
0
/**
 * Computes the best RLE sequence for a line
 */
static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t **buf)
{
    int width=s->avctx->width;
    int i;
    signed char rlecode;

    /* We will use it to compute the best bulk copy sequence */
    unsigned int bulkcount;
    /* This will be the number of pixels equal to the preivous frame one's
     * starting from the ith pixel */
    unsigned int skipcount;
    /* This will be the number of consecutive equal pixels in the current
     * frame, starting from the ith one also */
    unsigned int av_uninit(repeatcount);

    /* The cost of the three different possibilities */
    int total_bulk_cost;
    int total_skip_cost;
    int total_repeat_cost;

    int temp_cost;
    int j;

    uint8_t *this_line = p->               data[0] + line*p->               linesize[0] +
        (width - 1)*s->pixel_size;
    uint8_t *prev_line = s->previous_frame.data[0] + line*s->previous_frame.linesize[0] +
        (width - 1)*s->pixel_size;

    s->length_table[width] = 0;
    skipcount = 0;

    for (i = width - 1; i >= 0; i--) {

        if (!s->frame.key_frame && !memcmp(this_line, prev_line, s->pixel_size))
            skipcount = FFMIN(skipcount + 1, MAX_RLE_SKIP);
        else
            skipcount = 0;

        total_skip_cost  = s->length_table[i + skipcount] + 2;
        s->skip_table[i] = skipcount;


        if (i < width - 1 && !memcmp(this_line, this_line + s->pixel_size, s->pixel_size))
            repeatcount = FFMIN(repeatcount + 1, MAX_RLE_REPEAT);
        else
            repeatcount = 1;

        total_repeat_cost = s->length_table[i + repeatcount] + 1 + s->pixel_size;

        /* skip code is free for the first pixel, it costs one byte for repeat and bulk copy
         * so let's make it aware */
        if (i == 0) {
            total_skip_cost--;
            total_repeat_cost++;
        }

        if (repeatcount > 1 && (skipcount == 0 || total_repeat_cost < total_skip_cost)) {
            /* repeat is the best */
            s->length_table[i]  = total_repeat_cost;
            s->rlecode_table[i] = -repeatcount;
        }
        else if (skipcount > 0) {
            /* skip is the best choice here */
            s->length_table[i]  = total_skip_cost;
            s->rlecode_table[i] = 0;
        }
        else {
            /* We cannot do neither skip nor repeat
             * thus we search for the best bulk copy to do */

            int limit = FFMIN(width - i, MAX_RLE_BULK);

            temp_cost = 1 + s->pixel_size + !i;
            total_bulk_cost = INT_MAX;

            for (j = 1; j <= limit; j++) {
                if (s->length_table[i + j] + temp_cost < total_bulk_cost) {
                    /* We have found a better bulk copy ... */
                    total_bulk_cost = s->length_table[i + j] + temp_cost;
                    bulkcount = j;
                }
                temp_cost += s->pixel_size;
            }

            s->length_table[i]  = total_bulk_cost;
            s->rlecode_table[i] = bulkcount;
        }

        this_line -= s->pixel_size;
        prev_line -= s->pixel_size;
    }

    /* Good ! Now we have the best sequence for this line, let's ouput it */

    /* We do a special case for the first pixel so that we avoid testing it in
     * the whole loop */

    i=0;
    this_line = p->               data[0] + line*p->linesize[0];

    if (s->rlecode_table[0] == 0) {
        bytestream_put_byte(buf, s->skip_table[0] + 1);
        i += s->skip_table[0];
    }
    else bytestream_put_byte(buf, 1);


    while (i < width) {
        rlecode = s->rlecode_table[i];
        bytestream_put_byte(buf, rlecode);
        if (rlecode == 0) {
            /* Write a skip sequence */
            bytestream_put_byte(buf, s->skip_table[i] + 1);
            i += s->skip_table[i];
        }
        else if (rlecode > 0) {
            /* bulk copy */
            bytestream_put_buffer(buf, this_line + i*s->pixel_size, rlecode*s->pixel_size);
            i += rlecode;
        }
        else {
            /* repeat the bits */
            bytestream_put_buffer(buf, this_line + i*s->pixel_size, s->pixel_size);
            i -= rlecode;
        }
    }
    bytestream_put_byte(buf, -1); // end RLE line
}
void ff_amf_write_field_name(uint8_t **dst, const char *str)
{
	bytestream_put_be16(dst, strlen(str));
	bytestream_put_buffer(dst, (const uint8_t*)str, strlen(str));
}