Ejemplo n.º 1
0
static int mjpega_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
                              uint8_t **poutbuf, int *poutbuf_size,
                              const uint8_t *buf, int buf_size, int keyframe)
{
    uint8_t *poutbufp;
    unsigned dqt = 0, dht = 0, sof0 = 0;
    int i;

    if (avctx->codec_id != AV_CODEC_ID_MJPEG) {
        av_log(avctx, AV_LOG_ERROR, "mjpega bitstream filter only applies to mjpeg codec\n");
        return 0;
    }

    *poutbuf_size = 0;
    *poutbuf = av_malloc(buf_size + 44 + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!*poutbuf)
        return AVERROR(ENOMEM);
    poutbufp = *poutbuf;
    bytestream_put_byte(&poutbufp, 0xff);
    bytestream_put_byte(&poutbufp, SOI);
    bytestream_put_byte(&poutbufp, 0xff);
    bytestream_put_byte(&poutbufp, APP1);
    bytestream_put_be16(&poutbufp, 42); /* size */
    bytestream_put_be32(&poutbufp, 0);
    bytestream_put_buffer(&poutbufp, "mjpg", 4);
    bytestream_put_be32(&poutbufp, buf_size + 44); /* field size */
    bytestream_put_be32(&poutbufp, buf_size + 44); /* pad field size */
    bytestream_put_be32(&poutbufp, 0);             /* next ptr */

    for (i = 0; i < buf_size - 1; i++) {
        if (buf[i] == 0xff) {
            switch (buf[i + 1]) {
            case DQT:  dqt  = i + 46; break;
            case DHT:  dht  = i + 46; break;
            case SOF0: sof0 = i + 46; break;
            case SOS:
                bytestream_put_be32(&poutbufp, dqt); /* quant off */
                bytestream_put_be32(&poutbufp, dht); /* huff off */
                bytestream_put_be32(&poutbufp, sof0); /* image off */
                bytestream_put_be32(&poutbufp, i + 46); /* scan off */
                bytestream_put_be32(&poutbufp, i + 46 + AV_RB16(buf + i + 2)); /* data off */
                bytestream_put_buffer(&poutbufp, buf + 2, buf_size - 2); /* skip already written SOI */
                *poutbuf_size = poutbufp - *poutbuf;
                return 1;
            case APP1:
                if (i + 8 < buf_size && AV_RL32(buf + i + 8) == AV_RL32("mjpg")) {
                    av_log(avctx, AV_LOG_ERROR, "bitstream already formatted\n");
                    memcpy(*poutbuf, buf, buf_size);
                    *poutbuf_size = buf_size;
                    return 1;
                }
            }
        }
    }
    av_freep(poutbuf);
    av_log(avctx, AV_LOG_ERROR, "could not find SOS marker in bitstream\n");
    return 0;
}
Ejemplo n.º 2
0
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *codec = s->streams[0]->codec;
    BRSTMDemuxContext *b = s->priv_data;
    uint32_t samples, size;
    int ret;

    if (avio_feof(s->pb))
        return AVERROR_EOF;
    b->current_block++;
    if (b->current_block == b->block_count) {
        size    = b->last_block_used_bytes;
        samples = size / (8 * codec->channels) * 14;
    } else if (b->current_block < b->block_count) {
        size    = b->block_size;
        samples = b->samples_per_block;
    } else {
        return AVERROR_EOF;
    }

    if ((codec->codec_id == AV_CODEC_ID_ADPCM_THP ||
         codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) && !codec->extradata) {
        uint8_t *dst;

        if (av_new_packet(pkt, 8 + (32 + 4) * codec->channels + size) < 0)
            return AVERROR(ENOMEM);
        dst = pkt->data;
        bytestream_put_be32(&dst, size);
        bytestream_put_be32(&dst, samples);
        bytestream_put_buffer(&dst, b->table, 32 * codec->channels);
        bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels *
                                    (b->current_block - 1), 4 * codec->channels);

        ret = avio_read(s->pb, dst, size);
        if (ret != size)
            av_free_packet(pkt);
        pkt->duration = samples;
    } else {
        ret = av_get_packet(s->pb, pkt, size);
    }

    pkt->stream_index = 0;

    if (ret != size)
        ret = AVERROR(EIO);

    return ret;
}
Ejemplo n.º 3
0
int av_packet_merge_side_data(AVPacket *pkt){
    if(pkt->side_data_elems){
        AVBufferRef *buf;
        int i;
        uint8_t *p;
        uint64_t size= pkt->size + 8LL + AV_INPUT_BUFFER_PADDING_SIZE;
        AVPacket old= *pkt;
        for (i=0; i<old.side_data_elems; i++) {
            size += old.side_data[i].size + 5LL;
        }
        if (size > INT_MAX)
            return AVERROR(EINVAL);
        buf = av_buffer_alloc(size);
        if (!buf)
            return AVERROR(ENOMEM);
        pkt->buf = buf;
        pkt->data = p = buf->data;
        pkt->size = size - AV_INPUT_BUFFER_PADDING_SIZE;
        bytestream_put_buffer(&p, old.data, old.size);
        for (i=old.side_data_elems-1; i>=0; i--) {
            bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
            bytestream_put_be32(&p, old.side_data[i].size);
            *p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
        }
        bytestream_put_be64(&p, FF_MERGE_MARKER);
        av_assert0(p-pkt->data == pkt->size);
        memset(p, 0, AV_INPUT_BUFFER_PADDING_SIZE);
        av_packet_unref(&old);
        pkt->side_data_elems = 0;
        pkt->side_data = NULL;
        return 1;
    }
    return 0;
}
Ejemplo n.º 4
0
static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
{
    ADXContext *c = avctx->priv_data;

    bytestream_put_be16(&buf, 0x8000);              /* header signature */
    bytestream_put_be16(&buf, HEADER_SIZE - 4);     /* copyright offset */
    bytestream_put_byte(&buf, 3);                   /* encoding */
    bytestream_put_byte(&buf, BLOCK_SIZE);          /* block size */
    bytestream_put_byte(&buf, 4);                   /* sample size */
    bytestream_put_byte(&buf, avctx->channels);     /* channels */
    bytestream_put_be32(&buf, avctx->sample_rate);  /* sample rate */
    bytestream_put_be32(&buf, 0);                   /* total sample count */
    bytestream_put_be16(&buf, c->cutoff);           /* cutoff frequency */
    bytestream_put_byte(&buf, 3);                   /* version */
    bytestream_put_byte(&buf, 0);                   /* flags */
    bytestream_put_be32(&buf, 0);                   /* unknown */
    bytestream_put_be32(&buf, 0);                   /* loop enabled */
    bytestream_put_be16(&buf, 0);                   /* padding */
    bytestream_put_buffer(&buf, "(c)CRI", 6);       /* copyright signature */

    return HEADER_SIZE;
}
Ejemplo n.º 5
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *pic, int *got_packet)
{
    int i, j, ret;
    int aligned_width = FFALIGN(avctx->width,
                                avctx->codec_id == AV_CODEC_ID_R10K ? 1 : 64);
    int pad = (aligned_width - avctx->width) * 4;
    uint8_t *src_line;
    uint8_t *dst;

    if ((ret = ff_alloc_packet2(avctx, pkt, 4 * aligned_width * avctx->height)) < 0)
        return ret;

    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    src_line = pic->data[0];
    dst = pkt->data;

    for (i = 0; i < avctx->height; i++) {
        uint16_t *src = (uint16_t *)src_line;
        for (j = 0; j < avctx->width; j++) {
            uint32_t pixel;
            uint16_t r = *src++ >> 6;
            uint16_t g = *src++ >> 6;
            uint16_t b = *src++ >> 6;
            if (avctx->codec_id == AV_CODEC_ID_R210)
                pixel = (r << 20) | (g << 10) | b;
            else
                pixel = (r << 22) | (g << 12) | (b << 2);
            if (avctx->codec_id == AV_CODEC_ID_AVRP)
                bytestream_put_le32(&dst, pixel);
            else
                bytestream_put_be32(&dst, pixel);
        }
        memset(dst, 0, pad);
        dst += pad;
        src_line += pic->linesize[0];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 6
0
/** Encode frame including header */
static int encode_frame(QtrleEncContext *s, AVFrame *p, uint8_t *buf)
{
	int i;
	int start_line = 0;
	int end_line = s->avctx->height;
	uint8_t *orig_buf = buf;

	if (!s->frame.key_frame)
	{
		unsigned line_size = s->avctx->width * s->pixel_size;
		for (start_line = 0; start_line < s->avctx->height; start_line++)
			if (memcmp(p->data[0] + start_line*p->linesize[0],
			           s->previous_frame.data[0] + start_line*s->previous_frame.linesize[0],
			           line_size))
				break;

		for (end_line=s->avctx->height; end_line > start_line; end_line--)
			if (memcmp(p->data[0] + (end_line - 1)*p->linesize[0],
			           s->previous_frame.data[0] + (end_line - 1)*s->previous_frame.linesize[0],
			           line_size))
				break;
	}

	bytestream_put_be32(&buf, 0);                         // CHUNK SIZE, patched later

	if ((start_line == 0 && end_line == s->avctx->height) || start_line == s->avctx->height)
		bytestream_put_be16(&buf, 0);                     // header
	else
	{
		bytestream_put_be16(&buf, 8);                     // header
		bytestream_put_be16(&buf, start_line);            // starting line
		bytestream_put_be16(&buf, 0);                     // unknown
		bytestream_put_be16(&buf, end_line - start_line); // lines to update
		bytestream_put_be16(&buf, 0);                     // unknown
	}
	for (i = start_line; i < end_line; i++)
		qtrle_encode_line(s, p, i, &buf);

	bytestream_put_byte(&buf, 0);                         // zero skip code = frame finished
	AV_WB32(orig_buf, buf - orig_buf);                    // patch the chunk size
	return buf - orig_buf;
}
Ejemplo n.º 7
0
int av_packet_merge_side_data(AVPacket *pkt){
    if(pkt->side_data_elems){
        AVBufferRef *buf;
        int i;
        uint8_t *p;
        uint64_t size= pkt->size + 8LL + FF_INPUT_BUFFER_PADDING_SIZE;
        AVPacket old= *pkt;
        for (i=0; i<old.side_data_elems; i++) {
            size += old.side_data[i].size + 5LL;
        }
        if (size > INT_MAX)
            return AVERROR(EINVAL);
        buf = av_buffer_alloc(size);
        if (!buf)
            return AVERROR(ENOMEM);
        pkt->buf = buf;
        pkt->data = p = buf->data;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
        pkt->destruct = dummy_destruct_packet;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
        pkt->size = size - FF_INPUT_BUFFER_PADDING_SIZE;
        bytestream_put_buffer(&p, old.data, old.size);
        for (i=old.side_data_elems-1; i>=0; i--) {
            bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
            bytestream_put_be32(&p, old.side_data[i].size);
            *p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
        }
        bytestream_put_be64(&p, FF_MERGE_MARKER);
        av_assert0(p-pkt->data == pkt->size);
        memset(p, 0, FF_INPUT_BUFFER_PADDING_SIZE);
        av_free_packet(&old);
        pkt->side_data_elems = 0;
        pkt->side_data = NULL;
        return 1;
    }
    return 0;
}
Ejemplo n.º 8
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    int width, height, bits_pixel, i, j, length, ret;
    uint8_t *in_buf, *buf;

    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    avctx->coded_frame->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;

    if (width > 65535 || height > 65535 ||
        width * height >= INT_MAX / 4 - ALIAS_HEADER_SIZE) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", width, height);
        return AVERROR_INVALIDDATA;
    }

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
        bits_pixel = 8;
        break;
    case AV_PIX_FMT_BGR24:
        bits_pixel = 24;
        break;
    default:
        return AVERROR(EINVAL);
    }

    length = ALIAS_HEADER_SIZE + 4 * width * height; // max possible
    if ((ret = ff_alloc_packet(pkt, length)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length);
        return ret;
    }

    buf = pkt->data;

    /* Encode header. */
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be32(&buf, 0); /* X, Y offset */
    bytestream_put_be16(&buf, bits_pixel);

    for (j = 0; j < height; j++) {
        in_buf = frame->data[0] + frame->linesize[0] * j;
        for (i = 0; i < width; ) {
            int count = 0;
            int pixel;

            if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
                pixel = *in_buf;
                while (count < 255 && count + i < width && pixel == *in_buf) {
                    count++;
                    in_buf++;
                }
                bytestream_put_byte(&buf, count);
                bytestream_put_byte(&buf, pixel);
            } else { /* AV_PIX_FMT_BGR24 */
                pixel = AV_RB24(in_buf);
                while (count < 255 && count + i < width &&
                       pixel == AV_RB24(in_buf)) {
                    count++;
                    in_buf += 3;
                }
                bytestream_put_byte(&buf, count);
                bytestream_put_be24(&buf, pixel);
            }
            i += count;
        }
    }

    /* Total length */
    av_shrink_packet(pkt, buf - pkt->data);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Ejemplo n.º 9
0
static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
                        int buf_size, void *data)
{
    SgiContext *s = avctx->priv_data;
    AVFrame * const p = &s->picture;
    uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf;
    int x, y, z, length, tablesize;
    unsigned int width, height, depth, dimension;
    unsigned char *orig_buf = buf, *end_buf = buf + buf_size;

    *p = *(AVFrame*)data;
    p->pict_type = FF_I_TYPE;
    p->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;

    switch (avctx->pix_fmt) {
    case PIX_FMT_GRAY8:
        dimension = SGI_SINGLE_CHAN;
        depth     = SGI_GRAYSCALE;
        break;
    case PIX_FMT_RGB24:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGB;
        break;
    case PIX_FMT_RGBA:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGBA;
        break;
    default:
        return AVERROR_INVALIDDATA;
    }

    tablesize = depth * height * 4;
    length = tablesize * 2 + SGI_HEADER_SIZE;

    if (buf_size < length) {
        av_log(avctx, AV_LOG_ERROR, "buf_size too small(need %d, got %d)\n", length, buf_size);
        return -1;
    }

    /* Encode header. */
    bytestream_put_be16(&buf, SGI_MAGIC);
    bytestream_put_byte(&buf, avctx->coder_type != FF_CODER_TYPE_RAW); /* RLE 1 - VERBATIM 0*/
    bytestream_put_byte(&buf, 1); /* bytes_per_channel */
    bytestream_put_be16(&buf, dimension);
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be16(&buf, depth);

    /* The rest are constant in this implementation. */
    bytestream_put_be32(&buf, 0L); /* pixmin */
    bytestream_put_be32(&buf, 255L); /* pixmax */
    bytestream_put_be32(&buf, 0L); /* dummy */

    /* name */
    memset(buf, 0, SGI_HEADER_SIZE);
    buf += 80;

    /* colormap */
    bytestream_put_be32(&buf, 0L);

    /* The rest of the 512 byte header is unused. */
    buf += 404;
    offsettab = buf;

    if (avctx->coder_type  != FF_CODER_TYPE_RAW) {
        /* Skip RLE offset table. */
        buf += tablesize;
        lengthtab = buf;

        /* Skip RLE length table. */
        buf += tablesize;

        /* Make an intermediate consecutive buffer. */
        if (!(encode_buf = av_malloc(width)))
            return -1;

        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                bytestream_put_be32(&offsettab, buf - orig_buf);

                for (x = 0; x < width; x++)
                    encode_buf[x] = in_buf[depth * x];

                if ((length = ff_rle_encode(buf, end_buf - buf - 1, encode_buf, 1, width, 0, 0, 0x80, 0)) < 1) {
                    av_free(encode_buf);
                    return -1;
                }

                buf += length;
                bytestream_put_byte(&buf, 0);
                bytestream_put_be32(&lengthtab, length + 1);
                in_buf -= p->linesize[0];
            }
        }

        av_free(encode_buf);
    } else {
        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                for (x = 0; x < width * depth; x += depth)
                    bytestream_put_byte(&buf, in_buf[x]);

                in_buf -= p->linesize[0];
            }
        }
    }

    /* total length */
    return buf - orig_buf;
}
Ejemplo n.º 10
0
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
                         int chunk_size, RTMPPacket *prev_pkt)
{
	uint8_t pkt_hdr[16], *p = pkt_hdr;
	int mode = RTMP_PS_TWELVEBYTES;
	int off = 0;
	int size = 0;

	pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp;

	//if channel_id = 0, this is first presentation of prev_pkt, send full hdr.
	if (prev_pkt[pkt->channel_id].channel_id &&
	        pkt->extra == prev_pkt[pkt->channel_id].extra)
	{
		if (pkt->type == prev_pkt[pkt->channel_id].type &&
		        pkt->data_size == prev_pkt[pkt->channel_id].data_size)
		{
			mode = RTMP_PS_FOURBYTES;
			if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta)
				mode = RTMP_PS_ONEBYTE;
		}
		else
		{
			mode = RTMP_PS_EIGHTBYTES;
		}
	}

	if (pkt->channel_id < 64)
	{
		bytestream_put_byte(&p, pkt->channel_id | (mode << 6));
	}
	else if (pkt->channel_id < 64 + 256)
	{
		bytestream_put_byte(&p, 0               | (mode << 6));
		bytestream_put_byte(&p, pkt->channel_id - 64);
	}
	else
	{
		bytestream_put_byte(&p, 1               | (mode << 6));
		bytestream_put_le16(&p, pkt->channel_id - 64);
	}
	if (mode != RTMP_PS_ONEBYTE)
	{
		uint32_t timestamp = pkt->timestamp;
		if (mode != RTMP_PS_TWELVEBYTES)
			timestamp = pkt->ts_delta;
		bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp);
		if (mode != RTMP_PS_FOURBYTES)
		{
			bytestream_put_be24(&p, pkt->data_size);
			bytestream_put_byte(&p, pkt->type);
			if (mode == RTMP_PS_TWELVEBYTES)
				bytestream_put_le32(&p, pkt->extra);
		}
		if (timestamp >= 0xFFFFFF)
			bytestream_put_be32(&p, timestamp);
	}
	// save history
	prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
	prev_pkt[pkt->channel_id].type       = pkt->type;
	prev_pkt[pkt->channel_id].data_size  = pkt->data_size;
	prev_pkt[pkt->channel_id].timestamp  = pkt->timestamp;
	if (mode != RTMP_PS_TWELVEBYTES)
	{
		prev_pkt[pkt->channel_id].ts_delta   = pkt->ts_delta;
	}
	else
	{
		prev_pkt[pkt->channel_id].ts_delta   = pkt->timestamp;
	}
	prev_pkt[pkt->channel_id].extra      = pkt->extra;

	ffurl_write(h, pkt_hdr, p-pkt_hdr);
	size = p - pkt_hdr + pkt->data_size;
	while (off < pkt->data_size)
	{
		int towrite = FFMIN(chunk_size, pkt->data_size - off);
		ffurl_write(h, pkt->data + off, towrite);
		off += towrite;
		if (off < pkt->data_size)
		{
			uint8_t marker = 0xC0 | pkt->channel_id;
			ffurl_write(h, &marker, 1);
			size++;
		}
	}
	return size;
}
Ejemplo n.º 11
0
Archivo: sgienc.c Proyecto: Arcen/libav
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    SgiContext *s = avctx->priv_data;
    AVFrame * const p = &s->picture;
    uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf, *buf;
    int x, y, z, length, tablesize, ret;
    unsigned int width, height, depth, dimension;
    unsigned char *end_buf;

    *p = *frame;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;

    switch (avctx->pix_fmt) {
    case PIX_FMT_GRAY8:
        dimension = SGI_SINGLE_CHAN;
        depth     = SGI_GRAYSCALE;
        break;
    case PIX_FMT_RGB24:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGB;
        break;
    case PIX_FMT_RGBA:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGBA;
        break;
    default:
        return AVERROR_INVALIDDATA;
    }

    tablesize = depth * height * 4;
    length = SGI_HEADER_SIZE;
    if (avctx->coder_type == FF_CODER_TYPE_RAW)
        length += depth * height * width;
    else // assume ff_rl_encode() produces at most 2x size of input
        length += tablesize * 2 + depth * height * (2 * width + 1);

    if ((ret = ff_alloc_packet(pkt, length)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length);
        return ret;
    }
    buf     = pkt->data;
    end_buf = pkt->data + pkt->size;

    /* Encode header. */
    bytestream_put_be16(&buf, SGI_MAGIC);
    bytestream_put_byte(&buf, avctx->coder_type != FF_CODER_TYPE_RAW); /* RLE 1 - VERBATIM 0*/
    bytestream_put_byte(&buf, 1); /* bytes_per_channel */
    bytestream_put_be16(&buf, dimension);
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be16(&buf, depth);

    /* The rest are constant in this implementation. */
    bytestream_put_be32(&buf, 0L); /* pixmin */
    bytestream_put_be32(&buf, 255L); /* pixmax */
    bytestream_put_be32(&buf, 0L); /* dummy */

    /* name */
    memset(buf, 0, SGI_HEADER_SIZE);
    buf += 80;

     /* colormap */
    bytestream_put_be32(&buf, 0L);

    /* The rest of the 512 byte header is unused. */
    buf += 404;
    offsettab = buf;

    if (avctx->coder_type  != FF_CODER_TYPE_RAW) {
        /* Skip RLE offset table. */
        buf += tablesize;
        lengthtab = buf;

        /* Skip RLE length table. */
        buf += tablesize;

        /* Make an intermediate consecutive buffer. */
        if (!(encode_buf = av_malloc(width)))
            return -1;

        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                bytestream_put_be32(&offsettab, buf - pkt->data);

                for (x = 0; x < width; x++)
                    encode_buf[x] = in_buf[depth * x];

                if ((length = ff_rle_encode(buf, end_buf - buf - 1, encode_buf, 1, width, 0, 0, 0x80, 0)) < 1) {
                    av_free(encode_buf);
                    return -1;
                }

                buf += length;
                bytestream_put_byte(&buf, 0);
                bytestream_put_be32(&lengthtab, length + 1);
                in_buf -= p->linesize[0];
            }
        }

        av_free(encode_buf);
    } else {
        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                for (x = 0; x < width * depth; x += depth)
                    bytestream_put_byte(&buf, in_buf[x]);

                in_buf -= p->linesize[0];
            }
        }
    }

    /* total length */
    pkt->size = buf - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Ejemplo n.º 12
0
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *codec = s->streams[0]->codec;
    BRSTMDemuxContext *b = s->priv_data;
    uint32_t samples, size, skip = 0;
    int ret, i;

    if (avio_feof(s->pb))
        return AVERROR_EOF;
    b->current_block++;
    if (b->current_block == b->block_count) {
        size    = b->last_block_used_bytes;
        samples = b->last_block_samples;
        skip    = b->last_block_size - b->last_block_used_bytes;

        if (samples < size * 14 / 8) {
            uint32_t adjusted_size = samples / 14 * 8;
            if (samples % 14)
                adjusted_size += (samples % 14 + 1) / 2 + 1;

            skip += size - adjusted_size;
            size = adjusted_size;
        }
    } else if (b->current_block < b->block_count) {
        size    = b->block_size;
        samples = b->samples_per_block;
    } else {
        return AVERROR_EOF;
    }

    if (codec->codec_id == AV_CODEC_ID_ADPCM_THP ||
        codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) {
        uint8_t *dst;

        if (!b->adpc) {
            av_log(s, AV_LOG_ERROR, "adpcm_thp requires ADPC chunk, but none was found.\n");
            return AVERROR_INVALIDDATA;
        }
        if (!b->table) {
            b->table = av_mallocz(32 * codec->channels);
            if (!b->table)
                return AVERROR(ENOMEM);
        }

        if (size > (INT_MAX - 32 - 4) ||
            (32 + 4 + size) > (INT_MAX / codec->channels) ||
            (32 + 4 + size) * codec->channels > INT_MAX - 8)
            return AVERROR_INVALIDDATA;
        if (av_new_packet(pkt, 8 + (32 + 4 + size) * codec->channels) < 0)
            return AVERROR(ENOMEM);
        dst = pkt->data;
        if (codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) {
            bytestream_put_le32(&dst, size * codec->channels);
            bytestream_put_le32(&dst, samples);
        } else {
            bytestream_put_be32(&dst, size * codec->channels);
            bytestream_put_be32(&dst, samples);
        }
        bytestream_put_buffer(&dst, b->table, 32 * codec->channels);
        bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels *
                                    (b->current_block - 1), 4 * codec->channels);

        for (i = 0; i < codec->channels; i++) {
            ret = avio_read(s->pb, dst, size);
            dst += size;
            avio_skip(s->pb, skip);
            if (ret != size) {
                av_free_packet(pkt);
                break;
            }
        }
        pkt->duration = samples;
    } else {
        size *= codec->channels;
        ret = av_get_packet(s->pb, pkt, size);
    }

    pkt->stream_index = 0;

    if (ret != size)
        ret = AVERROR(EIO);

    return ret;
}
Ejemplo n.º 13
0
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
                         int chunk_size, RTMPPacket **prev_pkt_ptr,
                         int *nb_prev_pkt)
{
    uint8_t pkt_hdr[16], *p = pkt_hdr;
    int mode = RTMP_PS_TWELVEBYTES;
    int off = 0;
    int written = 0;
    int ret;
    RTMPPacket *prev_pkt;
    int use_delta; // flag if using timestamp delta, not RTMP_PS_TWELVEBYTES
    uint32_t timestamp; // full 32-bit timestamp or delta value

    if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt,
                                         pkt->channel_id)) < 0)
        return ret;
    prev_pkt = *prev_pkt_ptr;

    //if channel_id = 0, this is first presentation of prev_pkt, send full hdr.
    use_delta = prev_pkt[pkt->channel_id].channel_id &&
        pkt->extra == prev_pkt[pkt->channel_id].extra &&
        pkt->timestamp >= prev_pkt[pkt->channel_id].timestamp;

    timestamp = pkt->timestamp;
    if (use_delta) {
        timestamp -= prev_pkt[pkt->channel_id].timestamp;
    }
    if (timestamp >= 0xFFFFFF) {
        pkt->ts_field = 0xFFFFFF;
    } else {
        pkt->ts_field = timestamp;
    }

    if (use_delta) {
        if (pkt->type == prev_pkt[pkt->channel_id].type &&
            pkt->size == prev_pkt[pkt->channel_id].size) {
            mode = RTMP_PS_FOURBYTES;
            if (pkt->ts_field == prev_pkt[pkt->channel_id].ts_field)
                mode = RTMP_PS_ONEBYTE;
        } else {
            mode = RTMP_PS_EIGHTBYTES;
        }
    }

    if (pkt->channel_id < 64) {
        bytestream_put_byte(&p, pkt->channel_id | (mode << 6));
    } else if (pkt->channel_id < 64 + 256) {
        bytestream_put_byte(&p, 0               | (mode << 6));
        bytestream_put_byte(&p, pkt->channel_id - 64);
    } else {
        bytestream_put_byte(&p, 1               | (mode << 6));
        bytestream_put_le16(&p, pkt->channel_id - 64);
    }
    if (mode != RTMP_PS_ONEBYTE) {
        bytestream_put_be24(&p, pkt->ts_field);
        if (mode != RTMP_PS_FOURBYTES) {
            bytestream_put_be24(&p, pkt->size);
            bytestream_put_byte(&p, pkt->type);
            if (mode == RTMP_PS_TWELVEBYTES)
                bytestream_put_le32(&p, pkt->extra);
        }
    }
    if (pkt->ts_field == 0xFFFFFF)
        bytestream_put_be32(&p, timestamp);
    // save history
    prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
    prev_pkt[pkt->channel_id].type       = pkt->type;
    prev_pkt[pkt->channel_id].size       = pkt->size;
    prev_pkt[pkt->channel_id].timestamp  = pkt->timestamp;
    prev_pkt[pkt->channel_id].ts_field   = pkt->ts_field;
    prev_pkt[pkt->channel_id].extra      = pkt->extra;

    if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0)
        return ret;
    written = p - pkt_hdr + pkt->size;
    while (off < pkt->size) {
        int towrite = FFMIN(chunk_size, pkt->size - off);
        if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0)
            return ret;
        off += towrite;
        if (off < pkt->size) {
            uint8_t marker = 0xC0 | pkt->channel_id;
            if ((ret = ffurl_write(h, &marker, 1)) < 0)
                return ret;
            written++;
        }
    }
    return written;
}
Ejemplo n.º 14
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    const AVFrame * const p = frame;
    uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf, *buf;
    int x, y, z, length, tablesize, ret;
    unsigned int width, height, depth, dimension, bytes_per_channel, pixmax, put_be;
    unsigned char *end_buf;

    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    avctx->coded_frame->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;
    bytes_per_channel = 1;
    pixmax = 0xFF;
    put_be = HAVE_BIGENDIAN;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
        dimension = SGI_SINGLE_CHAN;
        depth     = SGI_GRAYSCALE;
        break;
    case AV_PIX_FMT_RGB24:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGB;
        break;
    case AV_PIX_FMT_RGBA:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGBA;
        break;
    case AV_PIX_FMT_GRAY16LE:
        put_be = !HAVE_BIGENDIAN;
    case AV_PIX_FMT_GRAY16BE:
        avctx->coder_type = FF_CODER_TYPE_RAW;
        bytes_per_channel = 2;
        pixmax = 0xFFFF;
        dimension = SGI_SINGLE_CHAN;
        depth     = SGI_GRAYSCALE;
        break;
    case AV_PIX_FMT_RGB48LE:
        put_be = !HAVE_BIGENDIAN;
    case AV_PIX_FMT_RGB48BE:
        avctx->coder_type = FF_CODER_TYPE_RAW;
        bytes_per_channel = 2;
        pixmax = 0xFFFF;
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGB;
        break;
    case AV_PIX_FMT_RGBA64LE:
        put_be = !HAVE_BIGENDIAN;
    case AV_PIX_FMT_RGBA64BE:
        avctx->coder_type = FF_CODER_TYPE_RAW;
        bytes_per_channel = 2;
        pixmax = 0xFFFF;
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGBA;
        break;
    default:
        return AVERROR_INVALIDDATA;
    }

    tablesize = depth * height * 4;
    length = SGI_HEADER_SIZE;
    if (avctx->coder_type == FF_CODER_TYPE_RAW)
        length += depth * height * width;
    else // assume ff_rl_encode() produces at most 2x size of input
        length += tablesize * 2 + depth * height * (2 * width + 1);

    if ((ret = ff_alloc_packet2(avctx, pkt, bytes_per_channel * length)) < 0)
        return ret;
    buf     = pkt->data;
    end_buf = pkt->data + pkt->size;

    /* Encode header. */
    bytestream_put_be16(&buf, SGI_MAGIC);
    bytestream_put_byte(&buf, avctx->coder_type != FF_CODER_TYPE_RAW); /* RLE 1 - VERBATIM 0*/
    bytestream_put_byte(&buf, bytes_per_channel);
    bytestream_put_be16(&buf, dimension);
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be16(&buf, depth);

    bytestream_put_be32(&buf, 0L); /* pixmin */
    bytestream_put_be32(&buf, pixmax);
    bytestream_put_be32(&buf, 0L); /* dummy */

    /* name */
    memset(buf, 0, SGI_HEADER_SIZE);
    buf += 80;

     /* colormap */
    bytestream_put_be32(&buf, 0L);

    /* The rest of the 512 byte header is unused. */
    buf += 404;
    offsettab = buf;

    if (avctx->coder_type  != FF_CODER_TYPE_RAW) {
        /* Skip RLE offset table. */
        buf += tablesize;
        lengthtab = buf;

        /* Skip RLE length table. */
        buf += tablesize;

        /* Make an intermediate consecutive buffer. */
        if (!(encode_buf = av_malloc(width)))
            return -1;

        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                bytestream_put_be32(&offsettab, buf - pkt->data);

                for (x = 0; x < width; x++)
                    encode_buf[x] = in_buf[depth * x];

                if ((length = ff_rle_encode(buf, end_buf - buf - 1, encode_buf, 1, width, 0, 0, 0x80, 0)) < 1) {
                    av_free(encode_buf);
                    return -1;
                }

                buf += length;
                bytestream_put_byte(&buf, 0);
                bytestream_put_be32(&lengthtab, length + 1);
                in_buf -= p->linesize[0];
            }
        }

        av_free(encode_buf);
    } else {
        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z * bytes_per_channel;

            for (y = 0; y < height; y++) {
                for (x = 0; x < width * depth; x += depth)
                    if (bytes_per_channel == 1) {
                    bytestream_put_byte(&buf, in_buf[x]);
                    } else {
                        if (put_be) {
                            bytestream_put_be16(&buf, ((uint16_t *)in_buf)[x]);
                        } else {
                            bytestream_put_le16(&buf, ((uint16_t *)in_buf)[x]);
                        }
                    }

                in_buf -= p->linesize[0];
            }
        }
    }

    /* total length */
    pkt->size = buf - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}