Exemple #1
0
static int SkipFile(struct stream *s, int *count, rar_file_t ***file,
                    const rar_block_t *hdr, const char *volume_mrl)
{
    int min_size = 7+21;
    if (hdr->flags & RAR_BLOCK_FILE_HAS_HIGH)
        min_size += 8;
    if (hdr->size < (unsigned)min_size)
        return -1;

    bstr data = stream_peek(s, min_size);
    if (data.len < min_size)
        return -1;
    const uint8_t *peek = (uint8_t *)data.start;

    /* */
    uint32_t file_size_low = AV_RL32(&peek[7+4]);
    uint8_t  method = peek[7+18];
    uint16_t name_size = AV_RL16(&peek[7+19]);
    uint32_t file_size_high = 0;
    if (hdr->flags & RAR_BLOCK_FILE_HAS_HIGH)
        file_size_high = AV_RL32(&peek[7+29]);
    const uint64_t file_size = ((uint64_t)file_size_high << 32) | file_size_low;

    char *name = calloc(1, name_size + 1);
    if (!name)
        return -1;

    const int name_offset = (hdr->flags & RAR_BLOCK_FILE_HAS_HIGH) ? (7+33) : (7+25);
    if (name_offset + name_size <= hdr->size) {
        const int max_size = name_offset + name_size;
        bstr namedata = stream_peek(s, max_size);
        if (namedata.len < max_size) {
            free(name);
            return -1;
        }
        memcpy(name, &namedata.start[name_offset], name_size);
    }

    rar_file_t *current = NULL;
    if (method != 0x30) {
        MP_WARN(s, "Ignoring compressed file %s (method=0x%2.2x)\n", name, method);
        goto exit;
    }

    /* */
    if( *count > 0 )
        current = (*file)[*count - 1];

    if (current &&
        (current->is_complete ||
          strcmp(current->name, name) ||
          (hdr->flags & RAR_BLOCK_FILE_HAS_PREVIOUS) == 0))
        current = NULL;

    if (!current) {
        if (hdr->flags & RAR_BLOCK_FILE_HAS_PREVIOUS)
            goto exit;
        current = calloc(1, sizeof(*current));
        if (!current)
            goto exit;
        MP_TARRAY_APPEND(NULL, *file, *count, current);

        current->name = name;
        current->size = file_size;
        current->is_complete = false;
        current->real_size = 0;
        current->chunk_count = 0;
        current->chunk = NULL;

        name = NULL;
    }

    /* Append chunks */
    rar_file_chunk_t *chunk = malloc(sizeof(*chunk));
    if (chunk) {
        chunk->mrl = strdup(volume_mrl);
        chunk->offset = stream_tell(s) + hdr->size;
        chunk->size = hdr->add_size;
        chunk->cummulated_size = 0;
        if (current->chunk_count > 0) {
            rar_file_chunk_t *previous = current->chunk[current->chunk_count-1];

            chunk->cummulated_size += previous->cummulated_size +
                                      previous->size;
        }

        MP_TARRAY_APPEND(NULL, current->chunk, current->chunk_count, chunk);

        current->real_size += hdr->add_size;
    }
    if ((hdr->flags & RAR_BLOCK_FILE_HAS_NEXT) == 0)
        current->is_complete = true;

exit:
    /* */
    free(name);

    /* We stop on the first non empty file if we cannot seek */
    if (current) {
        bool can_seek = s->end_pos > 0;
        if (!can_seek && current->size > 0)
            return -1;
    }

    if (SkipBlock(s, hdr))
        return -1;
    return 0;
}
Exemple #2
0
static int str_read_packet(AVFormatContext *s,
                           AVPacket *ret_pkt)
{
    AVIOContext *pb = s->pb;
    StrDemuxContext *str = s->priv_data;
    unsigned char sector[RAW_CD_SECTOR_SIZE];
    int channel;
    AVPacket *pkt;
    AVStream *st;

    while (1) {

        if (avio_read(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
            return AVERROR(EIO);

        channel = sector[0x11];
        if (channel >= 32)
            return AVERROR_INVALIDDATA;

        switch (sector[0x12] & CDXA_TYPE_MASK) {

        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO:
            {

                int current_sector = AV_RL16(&sector[0x1C]);
                int sector_count   = AV_RL16(&sector[0x1E]);
                int frame_size = AV_RL32(&sector[0x24]);

                if(!(   frame_size>=0
                     && current_sector < sector_count
                     && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
                    av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size);
                    break;
                }

                if(str->channels[channel].video_stream_index < 0){
                    /* allocate a new AVStream */
                    st = avformat_new_stream(s, NULL);
                    if (!st)
                        return AVERROR(ENOMEM);
                    avpriv_set_pts_info(st, 64, 1, 15);

                    str->channels[channel].video_stream_index = st->index;

                    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
                    st->codec->codec_id   = CODEC_ID_MDEC;
                    st->codec->codec_tag  = 0;  /* no fourcc */
                    st->codec->width      = AV_RL16(&sector[0x28]);
                    st->codec->height     = AV_RL16(&sector[0x2A]);
                }

                /* if this is the first sector of the frame, allocate a pkt */
                pkt = &str->channels[channel].tmp_pkt;

                if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){
                    if(pkt->data)
                        av_log(s, AV_LOG_ERROR, "missmatching sector_count\n");
                    av_free_packet(pkt);
                    if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE))
                        return AVERROR(EIO);

                    pkt->pos= avio_tell(pb) - RAW_CD_SECTOR_SIZE;
                    pkt->stream_index =
                        str->channels[channel].video_stream_index;
                }

                memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
                       sector + VIDEO_DATA_HEADER_SIZE,
                       VIDEO_DATA_CHUNK_SIZE);

                if (current_sector == sector_count-1) {
                    pkt->size= frame_size;
                    *ret_pkt = *pkt;
                    pkt->data= NULL;
                    pkt->size= -1;
                    return 0;
                }

            }
            break;

        case CDXA_TYPE_AUDIO:
            if(str->channels[channel].audio_stream_index < 0){
                int fmt = sector[0x13];
                /* allocate a new AVStream */
                st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);

                str->channels[channel].audio_stream_index = st->index;

                st->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id    = CODEC_ID_ADPCM_XA;
                st->codec->codec_tag   = 0;  /* no fourcc */
                st->codec->channels    = (fmt&1)?2:1;
                st->codec->sample_rate = (fmt&4)?18900:37800;
            //    st->codec->bit_rate = 0; //FIXME;
                st->codec->block_align = 128;

                avpriv_set_pts_info(st, 64, 18 * 224 / st->codec->channels,
                                    st->codec->sample_rate);
                st->start_time = 0;
            }
            pkt = ret_pkt;
            if (av_new_packet(pkt, 2304))
                return AVERROR(EIO);
            memcpy(pkt->data,sector+24,2304);

            pkt->stream_index =
                str->channels[channel].audio_stream_index;
            pkt->duration = 1;
            return 0;
        default:
            av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]);
            /* drop the sector and move on */
            break;
        }

        if (url_feof(pb))
            return AVERROR(EIO);
    }
}
Exemple #3
0
static int decode_rle(CamtasiaContext *c, unsigned int srcsize)
{
    unsigned char *src = c->decomp_buf;
    unsigned char *output, *output_end;
    int p1, p2, line=c->height, pos=0, i;
    uint16_t pix16;
    uint32_t pix32;

    output = c->pic.data[0] + (c->height - 1) * c->pic.linesize[0];
    output_end = c->pic.data[0] + (c->height) * c->pic.linesize[0];
    while(src < c->decomp_buf + srcsize) {
        p1 = *src++;
        if(p1 == 0) { //Escape code
            p2 = *src++;
            if(p2 == 0) { //End-of-line
                output = c->pic.data[0] + (--line) * c->pic.linesize[0];
                if (line < 0)
                    return -1;
                pos = 0;
                continue;
            } else if(p2 == 1) { //End-of-picture
                return 0;
            } else if(p2 == 2) { //Skip
                p1 = *src++;
                p2 = *src++;
                line -= p2;
                if (line < 0)
                    return -1;
                pos += p1;
                output = c->pic.data[0] + line * c->pic.linesize[0] + pos * (c->bpp / 8);
                continue;
            }
            // Copy data
            if (output + p2 * (c->bpp / 8) > output_end) {
                src += p2 * (c->bpp / 8);
                continue;
            }
            if ((c->bpp == 8) || (c->bpp == 24)) {
                for(i = 0; i < p2 * (c->bpp / 8); i++) {
                    *output++ = *src++;
                }
                // RLE8 copy is actually padded - and runs are not!
                if(c->bpp == 8 && (p2 & 1)) {
                    src++;
                }
            } else if (c->bpp == 16) {
                for(i = 0; i < p2; i++) {
                    pix16 = AV_RL16(src);
                    src += 2;
                    *(uint16_t*)output = pix16;
                    output += 2;
                }
            } else if (c->bpp == 32) {
                for(i = 0; i < p2; i++) {
                    pix32 = AV_RL32(src);
                    src += 4;
                    *(uint32_t*)output = pix32;
                    output += 4;
                }
            }
            pos += p2;
        } else { //Run of pixels
            int pix[4]; //original pixel
            switch(c->bpp){
            case  8: pix[0] = *src++;
                     break;
            case 16: pix16 = AV_RL16(src);
                     src += 2;
                     *(uint16_t*)pix = pix16;
                     break;
            case 24: pix[0] = *src++;
                     pix[1] = *src++;
                     pix[2] = *src++;
                     break;
            case 32: pix32 = AV_RL32(src);
                     src += 4;
                     *(uint32_t*)pix = pix32;
                     break;
            }
            if (output + p1 * (c->bpp / 8) > output_end)
                continue;
            for(i = 0; i < p1; i++) {
                switch(c->bpp){
                case  8: *output++ = pix[0];
                         break;
                case 16: *(uint16_t*)output = pix16;
                         output += 2;
                         break;
                case 24: *output++ = pix[0];
                         *output++ = pix[1];
                         *output++ = pix[2];
                         break;
                case 32: *(uint32_t*)output = pix32;
                         output += 4;
                         break;
                }
            }
            pos += p1;
        }
    }

    av_log(c->avctx, AV_LOG_ERROR, "Camtasia warning: no End-of-picture code\n");
    return 1;
}
Exemple #4
0
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
                                      void *data, int *data_size,
                                      const uint8_t *buf, int buf_size)
{
    /* Note, the only difference between the 15Bpp and 16Bpp */
    /* Format is the pixel format, the packets are processed the same. */
    FlicDecodeContext *s = avctx->priv_data;

    int stream_ptr = 0;
    int pixel_ptr;
    unsigned char palette_idx1;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j;

    int lines;
    int compressed_lines;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    int pixel;
    unsigned int pixel_limit;

    s->frame.reference = 1;
    s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &s->frame) < 0) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    pixels = s->frame.data[0];
    pixel_limit = s->avctx->height * s->frame.linesize[0];

    frame_size = AV_RL32(&buf[stream_ptr]);
    stream_ptr += 6;  /* skip the magic number */
    num_chunks = AV_RL16(&buf[stream_ptr]);
    stream_ptr += 10;  /* skip padding */

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0)) {
        chunk_size = AV_RL32(&buf[stream_ptr]);
        stream_ptr += 4;
        chunk_type = AV_RL16(&buf[stream_ptr]);
        stream_ptr += 2;

        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* For some reason, it seems that non-palettized flics do
             * include one of these chunks in their first frame.
             * Why I do not know, it seems rather extraneous. */
/*            av_log(avctx, AV_LOG_ERROR, "Unexpected Palette chunk %d in non-paletised FLC\n",chunk_type);*/
            stream_ptr = stream_ptr + chunk_size - 6;
            break;

        case FLI_DELTA:
        case FLI_DTA_LC:
            y_ptr = 0;
            compressed_lines = AV_RL16(&buf[stream_ptr]);
            stream_ptr += 2;
            while (compressed_lines > 0) {
                line_packets = AV_RL16(&buf[stream_ptr]);
                stream_ptr += 2;
                if (line_packets < 0) {
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame.linesize[0];
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = buf[stream_ptr++];
                        pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */
                        pixel_countdown -= pixel_skip;
                        byte_run = (signed char)(buf[stream_ptr++]);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            pixel    = AV_RL16(&buf[stream_ptr]);
                            stream_ptr += 2;
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                *((signed short*)(&pixels[pixel_ptr])) = pixel;
                                pixel_ptr += 2;
                            }
                        } else {
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[stream_ptr]);
                                stream_ptr += 2;
                                pixel_ptr += 2;
                            }
                        }
                    }

                    y_ptr += s->frame.linesize[0];
                }
            }
            break;

        case FLI_LC:
            av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-paletised FLC\n");
            stream_ptr = stream_ptr + chunk_size - 6;
            break;

        case FLI_BLACK:
            /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */
            memset(pixels, 0x0000,
                   s->frame.linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                stream_ptr++;
                pixel_countdown = (s->avctx->width * 2);

                while (pixel_countdown > 0) {
                    byte_run = (signed char)(buf[stream_ptr++]);
                    if (byte_run > 0) {
                        palette_idx1 = buf[stream_ptr++];
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = buf[stream_ptr++];
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed.
                 * This does not give us any good oportunity to perform word endian conversion
                 * during decompression. So if it is required (i.e., this is not a LE target, we do
                 * a second pass over the line here, swapping the bytes.
                 */
#if HAVE_BIGENDIAN
                pixel_ptr = y_ptr;
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]);
                    pixel_ptr += 2;
                }
#endif
                y_ptr += s->frame.linesize[0];
            }
            break;

        case FLI_DTA_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                stream_ptr++;
                pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */

                while (pixel_countdown > 0) {
                    byte_run = (signed char)(buf[stream_ptr++]);
                    if (byte_run > 0) {
                        pixel    = AV_RL16(&buf[stream_ptr]);
                        stream_ptr += 2;
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = pixel;
                            pixel_ptr += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    } else {  /* copy pixels if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[stream_ptr]);
                            stream_ptr += 2;
                            pixel_ptr  += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    }
                }

                y_ptr += s->frame.linesize[0];
            }
            break;

        case FLI_COPY:
        case FLI_DTA_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                stream_ptr += chunk_size - 6;
            } else {

                for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
                     y_ptr += s->frame.linesize[0]) {

                    pixel_countdown = s->avctx->width;
                    pixel_ptr = 0;
                    while (pixel_countdown > 0) {
                      *((signed short*)(&pixels[y_ptr + pixel_ptr])) = AV_RL16(&buf[stream_ptr+pixel_ptr]);
                      pixel_ptr += 2;
                      pixel_countdown--;
                    }
                    stream_ptr += s->avctx->width*2;
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            stream_ptr += chunk_size - 6;
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, stream_ptr);


    *data_size=sizeof(AVFrame);
    *(AVFrame*)data = s->frame;

    return buf_size;
}
Exemple #5
0
/**
 * Decode Smacker audio data
 */
static int smka_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    SmackerAudioContext *s = avctx->priv_data;
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    GetBitContext gb;
    HuffContext h[4];
    VLC vlc[4];
    int16_t *samples;
    uint8_t *samples8;
    int val;
    int i, res, ret;
    int unp_size;
    int bits, stereo;
    int pred[2] = {0, 0};

    if (buf_size <= 4) {
        av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
        return AVERROR(EINVAL);
    }

    unp_size = AV_RL32(buf);

    init_get_bits(&gb, buf + 4, (buf_size - 4) * 8);

    if(!get_bits1(&gb)){
        av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
        *got_frame_ptr = 0;
        return 1;
    }
    stereo = get_bits1(&gb);
    bits = get_bits1(&gb);
    if (stereo ^ (avctx->channels != 1)) {
        av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
        return AVERROR(EINVAL);
    }
    if (bits && avctx->sample_fmt == AV_SAMPLE_FMT_U8) {
        av_log(avctx, AV_LOG_ERROR, "sample format mismatch\n");
        return AVERROR(EINVAL);
    }

    /* get output buffer */
    s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1));
    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    samples  = (int16_t *)s->frame.data[0];
    samples8 =            s->frame.data[0];

    memset(vlc, 0, sizeof(VLC) * 4);
    memset(h, 0, sizeof(HuffContext) * 4);
    // Initialize
    for(i = 0; i < (1 << (bits + stereo)); i++) {
        h[i].length = 256;
        h[i].maxlength = 0;
        h[i].current = 0;
        h[i].bits = av_mallocz(256 * 4);
        h[i].lengths = av_mallocz(256 * sizeof(int));
        h[i].values = av_mallocz(256 * sizeof(int));
        skip_bits1(&gb);
        smacker_decode_tree(&gb, &h[i], 0, 0);
        skip_bits1(&gb);
        if(h[i].current > 1) {
            res = init_vlc(&vlc[i], SMKTREE_BITS, h[i].length,
                    h[i].lengths, sizeof(int), sizeof(int),
                    h[i].bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
            if(res < 0) {
                av_log(avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
                return -1;
            }
        }
    }
    if(bits) { //decode 16-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = av_bswap16(get_bits(&gb, 16));
        for(i = 0; i <= stereo; i++)
            *samples++ = pred[i];
        for(; i < unp_size / 2; i++) {
            if(get_bits_left(&gb)<0)
                return -1;
            if(i & stereo) {
                if(vlc[2].table)
                    res = get_vlc2(&gb, vlc[2].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val  = h[2].values[res];
                if(vlc[3].table)
                    res = get_vlc2(&gb, vlc[3].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val |= h[3].values[res] << 8;
                pred[1] += sign_extend(val, 16);
                *samples++ = av_clip_int16(pred[1]);
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val  = h[0].values[res];
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val |= h[1].values[res] << 8;
                pred[0] += sign_extend(val, 16);
                *samples++ = av_clip_int16(pred[0]);
            }
        }
    } else { //8-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = get_bits(&gb, 8);
        for(i = 0; i <= stereo; i++)
            *samples8++ = pred[i];
        for(; i < unp_size; i++) {
            if(get_bits_left(&gb)<0)
                return -1;
            if(i & stereo){
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                pred[1] += sign_extend(h[1].values[res], 8);
                *samples8++ = av_clip_uint8(pred[1]);
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                pred[0] += sign_extend(h[0].values[res], 8);
                *samples8++ = av_clip_uint8(pred[0]);
            }
        }
    }

    for(i = 0; i < 4; i++) {
        if(vlc[i].table)
            free_vlc(&vlc[i]);
        av_free(h[i].bits);
        av_free(h[i].lengths);
        av_free(h[i].values);
    }

    *got_frame_ptr   = 1;
    *(AVFrame *)data = s->frame;

    return buf_size;
}
Exemple #6
0
static int decode_frame(AVCodecContext *avctx,
                        void *data,
                        int *data_size,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    DPXContext *const s = avctx->priv_data;
    AVFrame *picture  = data;
    AVFrame *const p = &s->picture;
    uint8_t *ptr;

    int magic_num, offset, endian;
    int x, y;
    int w, h, stride, bits_per_color, descriptor, elements, target_packet_size, source_packet_size;

    unsigned int rgbBuffer;

    magic_num = AV_RB32(buf);
    buf += 4;

    /* Check if the files "magic number" is "SDPX" which means it uses
     * big-endian or XPDS which is for little-endian files */
    if (magic_num == AV_RL32("SDPX")) {
        endian = 0;
    } else if (magic_num == AV_RB32("SDPX")) {
        endian = 1;
    } else {
        av_log(avctx, AV_LOG_ERROR, "DPX marker not found\n");
        return -1;
    }

    offset = read32(&buf, endian);
    // Need to end in 0x304 offset from start of file
    buf = avpkt->data + 0x304;
    w = read32(&buf, endian);
    h = read32(&buf, endian);

    // Need to end in 0x320 to read the descriptor
    buf += 20;
    descriptor = buf[0];

    // Need to end in 0x323 to read the bits per color
    buf += 3;
    avctx->bits_per_raw_sample =
    bits_per_color = buf[0];

    switch (descriptor) {
        case 51: // RGBA
            elements = 4;
            break;
        case 50: // RGB
            elements = 3;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported descriptor %d\n", descriptor);
            return -1;
    }

    switch (bits_per_color) {
        case 8:
            if (elements == 4) {
                avctx->pix_fmt = PIX_FMT_RGBA;
            } else {
                avctx->pix_fmt = PIX_FMT_RGB24;
            }
            source_packet_size = elements;
            target_packet_size = elements;
            break;
        case 10:
            avctx->pix_fmt = PIX_FMT_RGB48;
            target_packet_size = 6;
            source_packet_size = elements * 2;
            break;
        case 12:
        case 16:
            if (endian) {
                avctx->pix_fmt = PIX_FMT_RGB48BE;
            } else {
                avctx->pix_fmt = PIX_FMT_RGB48LE;
            }
            target_packet_size = 6;
            source_packet_size = elements * 2;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported color depth : %d\n", bits_per_color);
            return -1;
    }

    if (s->picture.data[0])
        avctx->release_buffer(avctx, &s->picture);
    if (avcodec_check_dimensions(avctx, w, h))
        return -1;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);
    if (avctx->get_buffer(avctx, p) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    // Move pointer to offset from start of file
    buf =  avpkt->data + offset;

    ptr    = p->data[0];
    stride = p->linesize[0];

    switch (bits_per_color) {
        case 10:
            for (x = 0; x < avctx->height; x++) {
               uint16_t *dst = (uint16_t*)ptr;
               for (y = 0; y < avctx->width; y++) {
                   rgbBuffer = read32(&buf, endian);
                   // Read out the 10-bit colors and convert to 16-bit
                   *dst++ = make_16bit(rgbBuffer >> 16);
                   *dst++ = make_16bit(rgbBuffer >>  6);
                   *dst++ = make_16bit(rgbBuffer <<  4);
               }
               ptr += stride;
            }
            break;
        case 8:
        case 12: // Treat 12-bit as 16-bit
        case 16:
            if (source_packet_size == target_packet_size) {
                for (x = 0; x < avctx->height; x++) {
                    memcpy(ptr, buf, target_packet_size*avctx->width);
                    ptr += stride;
                    buf += source_packet_size*avctx->width;
                }
            } else {
                for (x = 0; x < avctx->height; x++) {
                    uint8_t *dst = ptr;
                    for (y = 0; y < avctx->width; y++) {
                        memcpy(dst, buf, target_packet_size);
                        dst += target_packet_size;
                        buf += source_packet_size;
                    }
                    ptr += stride;
                }
            }
            break;
    }

    *picture   = s->picture;
    *data_size = sizeof(AVPicture);

    return buf_size;
}
Exemple #7
0
static int fourxm_read_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int header_size;
    FourxmDemuxContext *fourxm = s->priv_data;
    unsigned char *header;
    int i, ret;
    AVStream *st;

    fourxm->track_count = 0;
    fourxm->tracks = NULL;
    fourxm->fps = 1.0;

    /* skip the first 3 32-bit numbers */
    avio_skip(pb, 12);

    /* check for LIST-HEAD */
    GET_LIST_HEADER();
    header_size = size - 4;
    if (fourcc_tag != HEAD_TAG || header_size < 0)
        return AVERROR_INVALIDDATA;

    /* allocate space for the header and load the whole thing */
    header = av_malloc(header_size);
    if (!header)
        return AVERROR(ENOMEM);
    if (avio_read(pb, header, header_size) != header_size){
        av_free(header);
        return AVERROR(EIO);
    }

    /* take the lazy approach and search for any and all vtrk and strk chunks */
    for (i = 0; i < header_size - 8; i++) {
        fourcc_tag = AV_RL32(&header[i]);
        size = AV_RL32(&header[i + 4]);
        if (size > header_size - i - 8 && (fourcc_tag == vtrk_TAG || fourcc_tag == strk_TAG)) {
            av_log(s, AV_LOG_ERROR, "chunk larger than array %d>%d\n", size, header_size - i - 8);
            return AVERROR_INVALIDDATA;
        }

        if (fourcc_tag == std__TAG) {
            fourxm->fps = av_int2float(AV_RL32(&header[i + 12]));
        } else if (fourcc_tag == vtrk_TAG) {
            /* check that there is enough data */
            if (size != vtrk_SIZE) {
                ret= AVERROR_INVALIDDATA;
                goto fail;
            }
            fourxm->width  = AV_RL32(&header[i + 36]);
            fourxm->height = AV_RL32(&header[i + 40]);

            /* allocate a new AVStream */
            st = avformat_new_stream(s, NULL);
            if (!st){
                ret= AVERROR(ENOMEM);
                goto fail;
            }
            avpriv_set_pts_info(st, 60, 1, fourxm->fps);

            fourxm->video_stream_index = st->index;

            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
            st->codec->codec_id = AV_CODEC_ID_4XM;
            st->codec->extradata_size = 4;
            st->codec->extradata = av_malloc(4);
            AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16]));
            st->codec->width  = fourxm->width;
            st->codec->height = fourxm->height;

            i += 8 + size;
        } else if (fourcc_tag == strk_TAG) {
            int current_track;
            /* check that there is enough data */
            if (size != strk_SIZE) {
                ret= AVERROR_INVALIDDATA;
                goto fail;
            }
            current_track = AV_RL32(&header[i + 8]);
            if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){
                av_log(s, AV_LOG_ERROR, "current_track too large\n");
                ret = AVERROR_INVALIDDATA;
                goto fail;
            }
            if (current_track + 1 > fourxm->track_count) {
                fourxm->tracks = av_realloc_f(fourxm->tracks,
                                              sizeof(AudioTrack),
                                              current_track + 1);
                if (!fourxm->tracks) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                memset(&fourxm->tracks[fourxm->track_count], 0,
                       sizeof(AudioTrack) * (current_track + 1 - fourxm->track_count));
                fourxm->track_count = current_track + 1;
            }
            fourxm->tracks[current_track].adpcm       = AV_RL32(&header[i + 12]);
            fourxm->tracks[current_track].channels    = AV_RL32(&header[i + 36]);
            fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]);
            fourxm->tracks[current_track].bits        = AV_RL32(&header[i + 44]);
            fourxm->tracks[current_track].audio_pts   = 0;
            if(   fourxm->tracks[current_track].channels    <= 0
               || fourxm->tracks[current_track].sample_rate <= 0
               || fourxm->tracks[current_track].bits        <  0){
                av_log(s, AV_LOG_ERROR, "audio header invalid\n");
                ret = AVERROR_INVALIDDATA;
                goto fail;
            }
            if(!fourxm->tracks[current_track].adpcm && fourxm->tracks[current_track].bits<8){
                av_log(s, AV_LOG_ERROR, "bits unspecified for non ADPCM\n");
                ret = AVERROR_INVALIDDATA;
                goto fail;
            }
            i += 8 + size;

            /* allocate a new AVStream */
            st = avformat_new_stream(s, NULL);
            if (!st){
                ret= AVERROR(ENOMEM);
                goto fail;
            }

            st->id = current_track;
            avpriv_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);

            fourxm->tracks[current_track].stream_index = st->index;

            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            st->codec->codec_tag = 0;
            st->codec->channels              = fourxm->tracks[current_track].channels;
            st->codec->sample_rate           = fourxm->tracks[current_track].sample_rate;
            st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits;
            st->codec->bit_rate              = st->codec->channels * st->codec->sample_rate *
                st->codec->bits_per_coded_sample;
            st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
            if (fourxm->tracks[current_track].adpcm){
                st->codec->codec_id = AV_CODEC_ID_ADPCM_4XM;
            }else if (st->codec->bits_per_coded_sample == 8){
                st->codec->codec_id = AV_CODEC_ID_PCM_U8;
            }else
                st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
        }
    }

    /* skip over the LIST-MOVI chunk (which is where the stream should be */
    GET_LIST_HEADER();
    if (fourcc_tag != MOVI_TAG){
        ret= AVERROR_INVALIDDATA;
        goto fail;
    }

    av_free(header);
    /* initialize context members */
    fourxm->video_pts = -1;  /* first frame will push to 0 */

    return 0;
fail:
    av_freep(&fourxm->tracks);
    av_free(header);
    return ret;
}
Exemple #8
0
static int flic_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FlicDemuxContext *flic = s->priv_data;
    ByteIOContext *pb = s->pb;
    int packet_read = 0;
    unsigned int size;
    int magic;
    int ret = 0;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    while (!packet_read) {

        if ((ret = get_buffer(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
            FLIC_PREAMBLE_SIZE) {
            ret = AVERROR(EIO);
            break;
        }

        size = AV_RL32(&preamble[0]);
        magic = AV_RL16(&preamble[4]);

        if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }
            pkt->stream_index = flic->video_stream_index;
            pkt->pts = flic->frame_number++;
            pkt->pos = url_ftell(pb);
            memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
            ret = get_buffer(pb, pkt->data + FLIC_PREAMBLE_SIZE,
                size - FLIC_PREAMBLE_SIZE);
            if (ret != size - FLIC_PREAMBLE_SIZE) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }
            packet_read = 1;
        } else if (magic == FLIC_TFTD_CHUNK_AUDIO) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }

            /* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
            url_fseek(pb, 10, SEEK_CUR);

            pkt->stream_index = flic->audio_stream_index;
            pkt->pos = url_ftell(pb);
            ret = get_buffer(pb, pkt->data, size);

            if (ret != size) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }

            packet_read = 1;
        } else {
            /* not interested in this chunk */
            url_fseek(pb, size - 6, SEEK_CUR);
        }
    }

    return ret;
}
Exemple #9
0
static int flic_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FlicDemuxContext *flic = s->priv_data;
    ByteIOContext *pb = s->pb;
    unsigned char header[FLIC_HEADER_SIZE];
    AVStream *st, *ast;
    int speed;
    int magic_number;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    flic->frame_number = 0;

    /* load the whole header and pull out the width and height */
    if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
        return AVERROR(EIO);

    magic_number = AV_RL16(&header[4]);
    speed = AV_RL32(&header[0x10]);
    if (speed == 0)
        speed = FLIC_DEFAULT_SPEED;

    /* initialize the decoder streams */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    flic->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_FLIC;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = AV_RL16(&header[0x08]);
    st->codec->height = AV_RL16(&header[0x0A]);

    if (!st->codec->width || !st->codec->height) {
        /* Ugly hack needed for the following sample: */
        /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
        av_log(s, AV_LOG_WARNING,
               "File with no specified width/height. Trying 640x480.\n");
        st->codec->width  = 640;
        st->codec->height = 480;
    }

    /* send over the whole 128-byte FLIC header */
    st->codec->extradata_size = FLIC_HEADER_SIZE;
    st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
    memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);

    /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */
    if (get_buffer(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) {
        av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n");
        return AVERROR(EIO);
    }

    url_fseek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR);

    /* Time to figure out the framerate:
     * If the first preamble's magic number is 0xAAAA then this file is from
     * X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk
     * magic number at offset 0x10 assume this file is from Magic Carpet instead.
     * If neither of the above is true then this is a normal FLIC file.
     */
    if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) {
        /* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */
        ast = av_new_stream(s, 1);
        if (!ast)
            return AVERROR(ENOMEM);

        flic->audio_stream_index = ast->index;

        /* all audio frames are the same size, so use the size of the first chunk for block_align */
        ast->codec->block_align = AV_RL32(&preamble[0]);
        ast->codec->codec_type = CODEC_TYPE_AUDIO;
        ast->codec->codec_id = CODEC_ID_PCM_U8;
        ast->codec->codec_tag = 0;
        ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE;
        ast->codec->channels = 1;
        ast->codec->sample_fmt = SAMPLE_FMT_U8;
        ast->codec->bit_rate = st->codec->sample_rate * 8;
        ast->codec->bits_per_coded_sample = 8;
        ast->codec->channel_layout = CH_LAYOUT_MONO;
        ast->codec->extradata_size = 0;

        /* Since the header information is incorrect we have to figure out the
         * framerate using block_align and the fact that the audio is 22050 Hz.
         * We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */
        av_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE);
        av_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE);
    } else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
        av_set_pts_info(st, 64, FLIC_MC_SPEED, 70);

        /* rewind the stream since the first chunk is at offset 12 */
        url_fseek(pb, 12, SEEK_SET);

        /* send over abbreviated FLIC header chunk */
        av_free(st->codec->extradata);
        st->codec->extradata_size = 12;
        st->codec->extradata = av_malloc(12);
        memcpy(st->codec->extradata, header, 12);

    } else if (magic_number == FLIC_FILE_MAGIC_1) {
        av_set_pts_info(st, 64, speed, 70);
    } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
               (magic_number == FLIC_FILE_MAGIC_3)) {
        av_set_pts_info(st, 64, speed, 1000);
    } else {
        av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
        return AVERROR_INVALIDDATA;
    }

    return 0;
}
Exemple #10
0
/** Read incoming MMST media, header or command packet. */
static MMSSCPacketType get_tcp_server_response(MMSContext *mms)
{
    int read_result;
    MMSSCPacketType packet_type= -1;

    for(;;) {
        read_result = url_read_complete(mms->mms_hd, mms->in_buffer, 8);
        if (read_result != 8) {
            if(read_result < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "Error reading packet header: %d (%s)\n",
                       read_result, strerror(read_result));
                packet_type = SC_PKT_CANCEL;
            } else {
                av_log(NULL, AV_LOG_ERROR,
                       "The server closed the connection\n");
                packet_type = SC_PKT_NO_DATA;
            }
            return packet_type;
        }

        // handle command packet.
        if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
            int length_remaining, hr;

            mms->incoming_flags= mms->in_buffer[3];
            read_result= url_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
            if(read_result != 4) {
                av_log(NULL, AV_LOG_ERROR,
                       "Reading command packet length failed: %d (%s)\n",
                       read_result,
                       read_result < 0 ? strerror(read_result) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR_IO;
            }

            length_remaining= AV_RL32(mms->in_buffer+8) + 4;
            dprintf(NULL, "Length remaining is %d\n", length_remaining);
            // read the rest of the packet.
            if (length_remaining < 0
                || length_remaining > sizeof(mms->in_buffer) - 12) {
                av_log(NULL, AV_LOG_ERROR,
                       "Incoming packet length %d exceeds bufsize %zu\n",
                       length_remaining, sizeof(mms->in_buffer) - 12);
                return AVERROR_INVALIDDATA;
            }
            read_result = url_read_complete(mms->mms_hd, mms->in_buffer + 12,
                                            length_remaining) ;
            if (read_result != length_remaining) {
                av_log(NULL, AV_LOG_ERROR,
                       "Reading pkt data (length=%d) failed: %d (%s)\n",
                       length_remaining, read_result,
                       read_result < 0 ? strerror(read_result) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR_IO;
            }
            packet_type= AV_RL16(mms->in_buffer+36);
            hr = AV_RL32(mms->in_buffer + 40);
            if (hr) {
                av_log(NULL, AV_LOG_ERROR,
                       "Server sent an error status code: 0x%08x\n", hr);
                return AVERROR_UNKNOWN;
            }
        } else {
            int length_remaining;
            int packet_id_type;
            int tmp;

            // note we cache the first 8 bytes,
            // then fill up the buffer with the others
            tmp                       = AV_RL16(mms->in_buffer + 6);
            length_remaining          = (tmp - 8) & 0xffff;
            mms->incoming_packet_seq  = AV_RL32(mms->in_buffer);
            packet_id_type            = mms->in_buffer[4];
            mms->incoming_flags       = mms->in_buffer[5];

            if (length_remaining < 0
                || length_remaining > sizeof(mms->in_buffer) - 8) {
                av_log(NULL, AV_LOG_ERROR,
                       "Data length %d is invalid or too large (max=%zu)\n",
                       length_remaining, sizeof(mms->in_buffer));
                return AVERROR_INVALIDDATA;
            }
            mms->remaining_in_len    = length_remaining;
            mms->read_in_ptr         = mms->in_buffer;
            read_result= url_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
            if(read_result != length_remaining) {
                av_log(NULL, AV_LOG_ERROR,
                       "Failed to read packet data of size %d: %d (%s)\n",
                       length_remaining, read_result,
                       read_result < 0 ? strerror(read_result) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR_IO;
            }

            // if we successfully read everything.
            if(packet_id_type == mms->header_packet_id) {
                packet_type = SC_PKT_ASF_HEADER;
                // Store the asf header
                if(!mms->header_parsed) {
                    void *p = av_realloc(mms->asf_header,
                                  mms->asf_header_size + mms->remaining_in_len);
                    if (!p) {
                        av_freep(&mms->asf_header);
                        return AVERROR(ENOMEM);
                    }
                    mms->asf_header = p;
                    memcpy(mms->asf_header + mms->asf_header_size,
                           mms->read_in_ptr, mms->remaining_in_len);
                    mms->asf_header_size += mms->remaining_in_len;
                }
                // 0x04 means asf header is sent in multiple packets.
                if (mms->incoming_flags == 0x04)
                    continue;
            } else if(packet_id_type == mms->packet_id) {
                packet_type = SC_PKT_ASF_MEDIA;
            } else {
                dprintf(NULL, "packet id type %d is old.", packet_id_type);
                continue;
            }
        }

        // preprocess some packet type
        if(packet_type == SC_PKT_KEEPALIVE) {
            send_keepalive_packet(mms);
            continue;
        } else if(packet_type == SC_PKT_STREAM_CHANGING) {
            handle_packet_stream_changing_type(mms);
        } else if(packet_type == SC_PKT_ASF_MEDIA) {
            pad_media_packet(mms);
        }
        return packet_type;
    }
}
Exemple #11
0
static int wv_read_block_header(AVFormatContext *ctx, ByteIOContext *pb)
{
    WVContext *wc = ctx->priv_data;
    uint32_t tag, ver;
    int size;
    int rate, bpp, chan;

    wc->pos = url_ftell(pb);
    tag = get_le32(pb);
    if (tag != MKTAG('w', 'v', 'p', 'k'))
        return -1;
    size = get_le32(pb);
    if(size < 24 || size > WV_BLOCK_LIMIT){
        av_log(ctx, AV_LOG_ERROR, "Incorrect block size %i\n", size);
        return -1;
    }
    wc->blksize = size;
    ver = get_le16(pb);
    if(ver < 0x402 || ver > 0x410){
        av_log(ctx, AV_LOG_ERROR, "Unsupported version %03X\n", ver);
        return -1;
    }
    get_byte(pb); // track no
    get_byte(pb); // track sub index
    wc->samples = get_le32(pb); // total samples in file
    wc->soff = get_le32(pb); // offset in samples of current block
    get_buffer(pb, wc->extra, WV_EXTRA_SIZE);
    wc->flags = AV_RL32(wc->extra + 4);
    //parse flags
    if(wc->flags & WV_FLOAT){
        av_log(ctx, AV_LOG_ERROR, "Floating point data is not supported\n");
        return -1;
    }
    if(wc->flags & WV_HYBRID){
        av_log(ctx, AV_LOG_ERROR, "Hybrid coding mode is not supported\n");
        return -1;
    }

    bpp = ((wc->flags & 3) + 1) << 3;
    chan = 1 + !(wc->flags & WV_MONO);
    rate = wv_rates[(wc->flags >> 23) & 0xF];
    if(rate == -1){
        av_log(ctx, AV_LOG_ERROR, "Unknown sampling rate\n");
        return -1;
    }
    if(!wc->bpp) wc->bpp = bpp;
    if(!wc->chan) wc->chan = chan;
    if(!wc->rate) wc->rate = rate;

    if(wc->flags && bpp != wc->bpp){
        av_log(ctx, AV_LOG_ERROR, "Bits per sample differ, this block: %i, header block: %i\n", bpp, wc->bpp);
        return -1;
    }
    if(wc->flags && chan != wc->chan){
        av_log(ctx, AV_LOG_ERROR, "Channels differ, this block: %i, header block: %i\n", chan, wc->chan);
        return -1;
    }
    if(wc->flags && rate != wc->rate){
        av_log(ctx, AV_LOG_ERROR, "Sampling rate differ, this block: %i, header block: %i\n", rate, wc->rate);
        return -1;
    }
    wc->blksize = size - 24;
    return 0;
}
Exemple #12
0
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    BinkDemuxContext *bink = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret;

    if (bink->current_track < 0) {
        int index_entry;
        AVStream *st = s->streams[0]; // stream 0 is video stream with index

        if (bink->video_pts >= st->duration)
            return AVERROR(EIO);

        index_entry = av_index_search_timestamp(st, bink->video_pts,
                                                AVSEEK_FLAG_ANY);
        if (index_entry < 0) {
            av_log(s, AV_LOG_ERROR,
                   "could not find index entry for frame %"PRId64"\n",
                   bink->video_pts);
            return AVERROR(EIO);
        }

        bink->remain_packet_size = st->index_entries[index_entry].size;
        bink->current_track = 0;
    }

    while (bink->current_track < bink->num_audio_tracks) {
        uint32_t audio_size = avio_rl32(pb);
        if (audio_size > bink->remain_packet_size - 4) {
            av_log(s, AV_LOG_ERROR,
                   "frame %"PRId64": audio size in header (%u) > size of packet left (%u)\n",
                   bink->video_pts, audio_size, bink->remain_packet_size);
            return AVERROR(EIO);
        }
        bink->remain_packet_size -= 4 + audio_size;
        bink->current_track++;
        if (audio_size >= 4) {
            /* get one audio packet per track */
            if ((ret = av_get_packet(pb, pkt, audio_size)) < 0)
                return ret;
            pkt->stream_index = bink->current_track;
            pkt->pts = bink->audio_pts[bink->current_track - 1];

            /* Each audio packet reports the number of decompressed samples
               (in bytes). We use this value to calcuate the audio PTS */
            if (pkt->size >= 4)
                bink->audio_pts[bink->current_track -1] +=
                    AV_RL32(pkt->data) / (2 * s->streams[bink->current_track]->codec->channels);
            return 0;
        } else {
            avio_skip(pb, audio_size);
        }
    }

    /* get video packet */
    if ((ret = av_get_packet(pb, pkt, bink->remain_packet_size)) < 0)
        return ret;
    pkt->stream_index = 0;
    pkt->pts = bink->video_pts++;
    pkt->flags |= AV_PKT_FLAG_KEY;

    /* -1 instructs the next call to read_packet() to read the next frame */
    bink->current_track = -1;

    return 0;
}
Exemple #13
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    JvContext *s = avctx->priv_data;
    int buf_size = avpkt->size;
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end = buf + buf_size;
    int video_size, video_type, i, j, ret;

    video_size = AV_RL32(buf);
    video_type = buf[4];
    buf += 5;

    if (video_size) {
        if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
            return ret;
        }

        if (video_type == 0 || video_type == 1) {
            GetBitContext gb;
            init_get_bits(&gb, buf, 8 * FFMIN(video_size, buf_end - buf));

            for (j = 0; j < avctx->height; j += 8)
                for (i = 0; i < avctx->width; i += 8)
                    decode8x8(&gb,
                              s->frame->data[0] + j * s->frame->linesize[0] + i,
                              s->frame->linesize[0], &s->bdsp);

            buf += video_size;
        } else if (video_type == 2) {
            if (buf + 1 <= buf_end) {
                int v = *buf++;
                for (j = 0; j < avctx->height; j++)
                    memset(s->frame->data[0] + j * s->frame->linesize[0],
                           v, avctx->width);
            }
        } else {
            av_log(avctx, AV_LOG_WARNING,
                   "unsupported frame type %i\n", video_type);
            return AVERROR_INVALIDDATA;
        }
    }

    if (buf < buf_end) {
        for (i = 0; i < AVPALETTE_COUNT && buf + 3 <= buf_end; i++) {
            s->palette[i] = AV_RB24(buf) << 2;
            buf += 3;
        }
        s->palette_has_changed = 1;
    }

    if (video_size) {
        s->frame->key_frame           = 1;
        s->frame->pict_type           = AV_PICTURE_TYPE_I;
        s->frame->palette_has_changed = s->palette_has_changed;
        s->palette_has_changed        = 0;
        memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);

        if ((ret = av_frame_ref(data, s->frame)) < 0)
            return ret;
        *got_frame = 1;
    }

    return buf_size;
}
Exemple #14
0
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
    VideoData *img = s->priv_data;
    AVIOContext *pb[3];
    char filename[1024];
    AVCodecContext *codec = s->streams[ pkt->stream_index ]->codec;
    int i;

    if (!img->is_pipe)
    {
        if (av_get_frame_filename(filename, sizeof(filename),
                                  img->path, img->img_number) < 0 && img->img_number > 1)
        {
            av_log(s, AV_LOG_ERROR,
                   "Could not get frame filename number %d from pattern '%s'\n",
                   img->img_number, img->path);
            return AVERROR(EIO);
        }
        for(i = 0; i < 3; i++)
        {
            if (avio_open(&pb[i], filename, AVIO_WRONLY) < 0)
            {
                av_log(s, AV_LOG_ERROR, "Could not open file : %s\n", filename);
                return AVERROR(EIO);
            }

            if(!img->split_planes)
                break;
            filename[ strlen(filename) - 1 ] = 'U' + i;
        }
    }
    else
    {
        pb[0] = s->pb;
    }

    if(img->split_planes)
    {
        int ysize = codec->width * codec->height;
        avio_write(pb[0], pkt->data        , ysize);
        avio_write(pb[1], pkt->data + ysize, (pkt->size - ysize) / 2);
        avio_write(pb[2], pkt->data + ysize + (pkt->size - ysize) / 2, (pkt->size - ysize) / 2);
        avio_flush(pb[1]);
        avio_flush(pb[2]);
        avio_close(pb[1]);
        avio_close(pb[2]);
    }
    else
    {
        if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000)
        {
            AVStream *st = s->streams[0];
            if(st->codec->extradata_size > 8 &&
                    AV_RL32(st->codec->extradata + 4) == MKTAG('j', 'p', '2', 'h'))
            {
                if(pkt->size < 8 || AV_RL32(pkt->data + 4) != MKTAG('j', 'p', '2', 'c'))
                    goto error;
                avio_wb32(pb[0], 12);
                ffio_wfourcc(pb[0], "jP  ");
                avio_wb32(pb[0], 0x0D0A870A); // signature
                avio_wb32(pb[0], 20);
                ffio_wfourcc(pb[0], "ftyp");
                ffio_wfourcc(pb[0], "jp2 ");
                avio_wb32(pb[0], 0);
                ffio_wfourcc(pb[0], "jp2 ");
                avio_write(pb[0], st->codec->extradata, st->codec->extradata_size);
            }
            else if(pkt->size < 8 ||
                    (!st->codec->extradata_size &&
                     AV_RL32(pkt->data + 4) != MKTAG('j', 'P', ' ', ' '))) // signature
            {
error:
                av_log(s, AV_LOG_ERROR, "malformated jpeg2000 codestream\n");
                return -1;
            }
        }
        avio_write(pb[0], pkt->data, pkt->size);
    }
    avio_flush(pb[0]);
    if (!img->is_pipe)
    {
        avio_close(pb[0]);
    }

    img->img_number++;
    return 0;
}
Exemple #15
0
static int roq_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    RoqDemuxContext *roq = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    unsigned int chunk_size;
    unsigned int chunk_type;
    unsigned int codebook_size;
    unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
    int packet_read = 0;
    int64_t codebook_offset;

    while (!packet_read) {

        if (avio_feof(s->pb))
            return AVERROR(EIO);

        /* get the next chunk preamble */
        if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
            RoQ_CHUNK_PREAMBLE_SIZE)
            return AVERROR(EIO);

        chunk_type = AV_RL16(&preamble[0]);
        chunk_size = AV_RL32(&preamble[2]);
        if(chunk_size > INT_MAX)
            return AVERROR_INVALIDDATA;

        chunk_size = ffio_limit(pb, chunk_size);

        switch (chunk_type) {

        case RoQ_INFO:
            if (roq->video_stream_index == -1) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                avpriv_set_pts_info(st, 63, 1, roq->frame_rate);
                roq->video_stream_index = st->index;
                st->codec->codec_type   = AVMEDIA_TYPE_VIDEO;
                st->codec->codec_id     = AV_CODEC_ID_ROQ;
                st->codec->codec_tag    = 0;  /* no fourcc */

                if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE)
                    return AVERROR(EIO);
                st->codec->width  = roq->width  = AV_RL16(preamble);
                st->codec->height = roq->height = AV_RL16(preamble + 2);
                break;
            }
            /* don't care about this chunk anymore */
            avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE);
            break;

        case RoQ_QUAD_CODEBOOK:
            if (roq->video_stream_index < 0)
                return AVERROR_INVALIDDATA;
            /* packet needs to contain both this codebook and next VQ chunk */
            codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
            codebook_size = chunk_size;
            avio_skip(pb, codebook_size);
            if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
                RoQ_CHUNK_PREAMBLE_SIZE)
                return AVERROR(EIO);
            chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
                codebook_size;

            /* rewind */
            avio_seek(pb, codebook_offset, SEEK_SET);

            /* load up the packet */
            ret= av_get_packet(pb, pkt, chunk_size);
            if (ret != chunk_size)
                return AVERROR(EIO);
            pkt->stream_index = roq->video_stream_index;
            pkt->pts = roq->video_pts++;

            packet_read = 1;
            break;

        case RoQ_SOUND_MONO:
        case RoQ_SOUND_STEREO:
            if (roq->audio_stream_index == -1) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                avpriv_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE);
                roq->audio_stream_index = st->index;
                st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id = AV_CODEC_ID_ROQ_DPCM;
                st->codec->codec_tag = 0;  /* no tag */
                if (chunk_type == RoQ_SOUND_STEREO) {
                    st->codec->channels       = 2;
                    st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
                } else {
                    st->codec->channels       = 1;
                    st->codec->channel_layout = AV_CH_LAYOUT_MONO;
                }
                roq->audio_channels    = st->codec->channels;
                st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
                st->codec->bits_per_coded_sample = 16;
                st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
                    st->codec->bits_per_coded_sample;
                st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
            }
        case RoQ_QUAD_VQ:
            if (chunk_type == RoQ_QUAD_VQ) {
                if (roq->video_stream_index < 0)
                    return AVERROR_INVALIDDATA;
            }

            /* load up the packet */
            if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
                return AVERROR(EIO);
            /* copy over preamble */
            memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);

            if (chunk_type == RoQ_QUAD_VQ) {
                pkt->stream_index = roq->video_stream_index;
                pkt->pts = roq->video_pts++;
            } else {
                pkt->stream_index = roq->audio_stream_index;
                pkt->pts = roq->audio_frame_count;
                roq->audio_frame_count += (chunk_size / roq->audio_channels);
            }

            pkt->pos= avio_tell(pb);
            ret = avio_read(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
                chunk_size);
            if (ret != chunk_size)
                ret = AVERROR(EIO);

            packet_read = 1;
            break;

        default:
            av_log(s, AV_LOG_ERROR, "  unknown RoQ chunk (%04X)\n", chunk_type);
            return AVERROR_INVALIDDATA;
        }
    }

    return ret;
}
Exemple #16
0
static int ea_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    EaDemuxContext *ea = s->priv_data;
    AVIOContext *pb    = s->pb;
    int partial_packet = 0;
    unsigned int chunk_type, chunk_size;
    int ret = 0, packet_read = 0, key = 0;
    int av_uninit(num_samples);

    while (!packet_read || partial_packet) {
        chunk_type = avio_rl32(pb);
        chunk_size = ea->big_endian ? avio_rb32(pb) : avio_rl32(pb);
        if (chunk_size <= 8)
            return AVERROR_INVALIDDATA;
        chunk_size -= 8;

        switch (chunk_type) {
        /* audio data */
        case ISNh_TAG:
            /* header chunk also contains data; skip over the header portion */
            if (chunk_size < 32)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 32);
            chunk_size -= 32;
        case ISNd_TAG:
        case SCDl_TAG:
        case SNDC_TAG:
        case SDEN_TAG:
            if (!ea->audio_codec) {
                avio_skip(pb, chunk_size);
                break;
            } else if (ea->audio_codec == AV_CODEC_ID_PCM_S16LE_PLANAR ||
                       ea->audio_codec == AV_CODEC_ID_MP3) {
                num_samples = avio_rl32(pb);
                avio_skip(pb, 8);
                chunk_size -= 12;
            }
            if (partial_packet) {
                avpriv_request_sample(s, "video header followed by audio packet");
                av_free_packet(pkt);
                partial_packet = 0;
            }
            ret = av_get_packet(pb, pkt, chunk_size);
            if (ret < 0)
                return ret;
            pkt->stream_index = ea->audio_stream_index;

            switch (ea->audio_codec) {
            case AV_CODEC_ID_ADPCM_EA:
            case AV_CODEC_ID_ADPCM_EA_R1:
            case AV_CODEC_ID_ADPCM_EA_R2:
            case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
            case AV_CODEC_ID_ADPCM_EA_R3:
                if (pkt->size < 4) {
                    av_log(s, AV_LOG_ERROR, "Packet is too short\n");
                    av_free_packet(pkt);
                    return AVERROR_INVALIDDATA;
                }
                if (ea->audio_codec == AV_CODEC_ID_ADPCM_EA_R3)
                    pkt->duration = AV_RB32(pkt->data);
                else
                    pkt->duration = AV_RL32(pkt->data);
                break;
            case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
                pkt->duration = ret * 2 / ea->num_channels;
                break;
            case AV_CODEC_ID_PCM_S16LE_PLANAR:
            case AV_CODEC_ID_MP3:
                pkt->duration = num_samples;
                break;
            default:
                pkt->duration = chunk_size / (ea->bytes * ea->num_channels);
            }

            packet_read = 1;
            break;

        /* ending tag */
        case 0:
        case ISNe_TAG:
        case SCEl_TAG:
        case SEND_TAG:
        case SEEN_TAG:
            ret         = AVERROR(EIO);
            packet_read = 1;
            break;

        case MVIh_TAG:
        case kVGT_TAG:
        case pQGT_TAG:
        case TGQs_TAG:
        case MADk_TAG:
            key = AV_PKT_FLAG_KEY;
        case MVIf_TAG:
        case fVGT_TAG:
        case MADm_TAG:
        case MADe_TAG:
            avio_seek(pb, -8, SEEK_CUR);    // include chunk preamble
            chunk_size += 8;
            goto get_video_packet;

        case mTCD_TAG:
            avio_skip(pb, 8);               // skip ea DCT header
            chunk_size -= 8;
            goto get_video_packet;

        case MV0K_TAG:
        case MPCh_TAG:
        case pIQT_TAG:
            key = AV_PKT_FLAG_KEY;
        case MV0F_TAG:
get_video_packet:
            if (partial_packet) {
                ret = av_append_packet(pb, pkt, chunk_size);
            } else
                ret = av_get_packet(pb, pkt, chunk_size);
            if (ret < 0) {
                packet_read = 1;
                break;
            }
            partial_packet = chunk_type == MVIh_TAG;
            pkt->stream_index = ea->video_stream_index;
            pkt->flags       |= key;
            packet_read       = 1;
            break;

        default:
            avio_skip(pb, chunk_size);
            break;
        }
    }

    if (ret < 0 && partial_packet)
        av_free_packet(pkt);
    return ret;
}
Exemple #17
0
static av_cold int flic_decode_init(AVCodecContext *avctx)
{
    FlicDecodeContext *s = avctx->priv_data;
    unsigned char *fli_header = (unsigned char *)avctx->extradata;
    int depth;

    if (avctx->extradata_size != 0 &&
        avctx->extradata_size != 12 &&
        avctx->extradata_size != 128 &&
        avctx->extradata_size != 256 &&
        avctx->extradata_size != 904 &&
        avctx->extradata_size != 1024) {
        av_log(avctx, AV_LOG_ERROR, "Unexpected extradata size %d\n", avctx->extradata_size);
        return AVERROR_INVALIDDATA;
    }

    s->avctx = avctx;

    if (s->avctx->extradata_size == 12) {
        /* special case for magic carpet FLIs */
        s->fli_type = FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE;
        depth = 8;
    } else if (avctx->extradata_size == 1024) {
        uint8_t *ptr = avctx->extradata;
        int i;

        for (i = 0; i < 256; i++) {
            s->palette[i] = AV_RL32(ptr);
            ptr += 4;
        }
        depth = 8;
        /* FLI in MOV, see e.g. FFmpeg trac issue #626 */
    } else if (avctx->extradata_size == 0 ||
               avctx->extradata_size == 256 ||
        /* see FFmpeg ticket #1234 */
               avctx->extradata_size == 904) {
        s->fli_type = FLI_TYPE_CODE;
        depth = 8;
    } else {
        s->fli_type = AV_RL16(&fli_header[4]);
        depth = AV_RL16(&fli_header[12]);
    }

    if (depth == 0) {
        depth = 8; /* Some FLC generators set depth to zero, when they mean 8Bpp. Fix up here */
    }

    if ((s->fli_type == FLC_FLX_TYPE_CODE) && (depth == 16)) {
        depth = 15; /* Original Autodesk FLX's say the depth is 16Bpp when it is really 15Bpp */
    }

    switch (depth) {
        case 8  : avctx->pix_fmt = AV_PIX_FMT_PAL8; break;
        case 15 : avctx->pix_fmt = AV_PIX_FMT_RGB555; break;
        case 16 : avctx->pix_fmt = AV_PIX_FMT_RGB565; break;
        case 24 : avctx->pix_fmt = AV_PIX_FMT_BGR24; /* Supposedly BGR, but havent any files to test with */
                  avpriv_request_sample(avctx, "24Bpp FLC/FLX");
                  return AVERROR_PATCHWELCOME;
        default :
                  av_log(avctx, AV_LOG_ERROR, "Unknown FLC/FLX depth of %d Bpp is unsupported.\n",depth);
                  return AVERROR_INVALIDDATA;
    }

    s->frame = av_frame_alloc();
    if (!s->frame)
        return AVERROR(ENOMEM);

    s->new_palette = 0;

    return 0;
}
Exemple #18
0
static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
                                     AVStream *st, RMStream *ast, int read_all)
{
    char buf[256];
    uint32_t version;
    int ret;

    /* ra type header */
    version = avio_rb16(pb); /* version */
    if (version == 3) {
        unsigned bytes_per_minute;
        int header_size = avio_rb16(pb);
        int64_t startpos = avio_tell(pb);
        avio_skip(pb, 8);
        bytes_per_minute = avio_rb16(pb);
        avio_skip(pb, 4);
        rm_read_metadata(s, pb, 0);
        if ((startpos + header_size) >= avio_tell(pb) + 2) {
            // fourcc (should always be "lpcJ")
            avio_r8(pb);
            get_str8(pb, buf, sizeof(buf));
        }
        // Skip extra header crap (this should never happen)
        if ((startpos + header_size) > avio_tell(pb))
            avio_skip(pb, header_size + startpos - avio_tell(pb));
        if (bytes_per_minute)
            st->codec->bit_rate = 8LL * bytes_per_minute / 60;
        st->codec->sample_rate = 8000;
        st->codec->channels = 1;
        st->codec->channel_layout = AV_CH_LAYOUT_MONO;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = AV_CODEC_ID_RA_144;
        ast->deint_id = DEINT_ID_INT0;
    } else {
        int flavor, sub_packet_h, coded_framesize, sub_packet_size;
        int codecdata_length;
        unsigned bytes_per_minute;
        /* old version (4) */
        avio_skip(pb, 2); /* unused */
        avio_rb32(pb); /* .ra4 */
        avio_rb32(pb); /* data size */
        avio_rb16(pb); /* version2 */
        avio_rb32(pb); /* header size */
        flavor= avio_rb16(pb); /* add codec info / flavor */
        ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */
        avio_rb32(pb); /* ??? */
        bytes_per_minute = avio_rb32(pb);
        if (version == 4) {
            if (bytes_per_minute)
                st->codec->bit_rate = 8LL * bytes_per_minute / 60;
        }
        avio_rb32(pb); /* ??? */
        ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */
        st->codec->block_align= avio_rb16(pb); /* frame size */
        ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */
        avio_rb16(pb); /* ??? */
        if (version == 5) {
            avio_rb16(pb); avio_rb16(pb); avio_rb16(pb);
        }
        st->codec->sample_rate = avio_rb16(pb);
        avio_rb32(pb);
        st->codec->channels = avio_rb16(pb);
        if (version == 5) {
            ast->deint_id = avio_rl32(pb);
            avio_read(pb, buf, 4);
            buf[4] = 0;
        } else {
            AV_WL32(buf, 0);
            get_str8(pb, buf, sizeof(buf)); /* desc */
            ast->deint_id = AV_RL32(buf);
            get_str8(pb, buf, sizeof(buf)); /* desc */
        }
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_tag  = AV_RL32(buf);
        st->codec->codec_id   = ff_codec_get_id(ff_rm_codec_tags,
                                                st->codec->codec_tag);

        switch (st->codec->codec_id) {
        case AV_CODEC_ID_AC3:
            st->need_parsing = AVSTREAM_PARSE_FULL;
            break;
        case AV_CODEC_ID_RA_288:
            st->codec->extradata_size= 0;
            ast->audio_framesize = st->codec->block_align;
            st->codec->block_align = coded_framesize;
            break;
        case AV_CODEC_ID_COOK:
            st->need_parsing = AVSTREAM_PARSE_HEADERS;
        case AV_CODEC_ID_ATRAC3:
        case AV_CODEC_ID_SIPR:
            if (read_all) {
                codecdata_length = 0;
            } else {
                avio_rb16(pb); avio_r8(pb);
                if (version == 5)
                    avio_r8(pb);
                codecdata_length = avio_rb32(pb);
                if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
                    av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
                    return -1;
                }
            }

            ast->audio_framesize = st->codec->block_align;
            if (st->codec->codec_id == AV_CODEC_ID_SIPR) {
                if (flavor > 3) {
                    av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n",
                           flavor);
                    return -1;
                }
                st->codec->block_align = ff_sipr_subpk_size[flavor];
            } else {
                if(sub_packet_size <= 0){
                    av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
                    return -1;
                }
                st->codec->block_align = ast->sub_packet_size;
            }
            if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0)
                return ret;

            break;
        case AV_CODEC_ID_AAC:
            avio_rb16(pb); avio_r8(pb);
            if (version == 5)
                avio_r8(pb);
            codecdata_length = avio_rb32(pb);
            if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
                av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
                return -1;
            }
            if (codecdata_length >= 1) {
                avio_r8(pb);
                if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0)
                    return ret;
            }
            break;
        }
        switch (ast->deint_id) {
        case DEINT_ID_INT4:
            if (ast->coded_framesize > ast->audio_framesize ||
                sub_packet_h <= 1 ||
                ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize)
                return AVERROR_INVALIDDATA;
            if (ast->coded_framesize * sub_packet_h != 2*ast->audio_framesize) {
                avpriv_request_sample(s, "mismatching interleaver parameters");
                return AVERROR_INVALIDDATA;
            }
            break;
        case DEINT_ID_GENR:
            if (ast->sub_packet_size <= 0 ||
                ast->sub_packet_size > ast->audio_framesize)
                return AVERROR_INVALIDDATA;
            if (ast->audio_framesize % ast->sub_packet_size)
                return AVERROR_INVALIDDATA;
            break;
        case DEINT_ID_SIPR:
        case DEINT_ID_INT0:
        case DEINT_ID_VBRS:
        case DEINT_ID_VBRF:
            break;
        default:
            av_log(s, AV_LOG_ERROR ,"Unknown interleaver %"PRIX32"\n", ast->deint_id);
            return AVERROR_INVALIDDATA;
        }
        if (ast->deint_id == DEINT_ID_INT4 ||
            ast->deint_id == DEINT_ID_GENR ||
            ast->deint_id == DEINT_ID_SIPR) {
            if (st->codec->block_align <= 0 ||
                ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX ||
                ast->audio_framesize * sub_packet_h < st->codec->block_align)
                return AVERROR_INVALIDDATA;
            if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0)
                return AVERROR(ENOMEM);
        }

        if (read_all) {
            avio_r8(pb);
            avio_r8(pb);
            avio_r8(pb);
            rm_read_metadata(s, pb, 0);
        }
    }
    return 0;
}
Exemple #19
0
static int fourxm_read_packet(AVFormatContext *s,
                              AVPacket *pkt)
{
    FourxmDemuxContext *fourxm = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int ret = 0;
    unsigned int track_number;
    int packet_read = 0;
    unsigned char header[8];
    int audio_frame_count;

    while (!packet_read) {

        if ((ret = avio_read(s->pb, header, 8)) < 0)
            return ret;
        fourcc_tag = AV_RL32(&header[0]);
        size = AV_RL32(&header[4]);
        if (url_feof(pb))
            return AVERROR(EIO);
        switch (fourcc_tag) {

        case LIST_TAG:
            /* this is a good time to bump the video pts */
            fourxm->video_pts ++;

            /* skip the LIST-* tag and move on to the next fourcc */
            avio_rl32(pb);
            break;

        case ifrm_TAG:
        case pfrm_TAG:
        case cfrm_TAG:
        case ifr2_TAG:
        case pfr2_TAG:
        case cfr2_TAG:
            /* allocate 8 more bytes than 'size' to account for fourcc
             * and size */
            if (size + 8 < size || av_new_packet(pkt, size + 8))
                return AVERROR(EIO);
            pkt->stream_index = fourxm->video_stream_index;
            pkt->pts = fourxm->video_pts;
            pkt->pos = avio_tell(s->pb);
            memcpy(pkt->data, header, 8);
            ret = avio_read(s->pb, &pkt->data[8], size);

            if (ret < 0){
                av_free_packet(pkt);
            }else
                packet_read = 1;
            break;

        case snd__TAG:
            track_number = avio_rl32(pb);
            avio_skip(pb, 4);
            size-=8;

            if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) {
                ret= av_get_packet(s->pb, pkt, size);
                if(ret<0)
                    return AVERROR(EIO);
                pkt->stream_index =
                    fourxm->tracks[track_number].stream_index;
                pkt->pts = fourxm->tracks[track_number].audio_pts;
                packet_read = 1;

                /* pts accounting */
                audio_frame_count = size;
                if (fourxm->tracks[track_number].adpcm)
                    audio_frame_count -=
                        2 * (fourxm->tracks[track_number].channels);
                audio_frame_count /=
                      fourxm->tracks[track_number].channels;
                if (fourxm->tracks[track_number].adpcm){
                    audio_frame_count *= 2;
                }else
                    audio_frame_count /=
                    (fourxm->tracks[track_number].bits / 8);
                fourxm->tracks[track_number].audio_pts += audio_frame_count;

            } else {
                avio_skip(pb, size);
            }
            break;

        default:
            avio_skip(pb, size);
            break;
        }
    }
    return ret;
}
Exemple #20
0
int ff_rm_read_mdpr_codecdata(AVFormatContext *s, AVIOContext *pb,
                              AVStream *st, RMStream *rst,
                              unsigned int codec_data_size, const uint8_t *mime)
{
    unsigned int v;
    int size;
    int64_t codec_pos;
    int ret;

    if (codec_data_size > INT_MAX)
        return AVERROR_INVALIDDATA;

    avpriv_set_pts_info(st, 64, 1, 1000);
    codec_pos = avio_tell(pb);
    v = avio_rb32(pb);

    if (v == MKBETAG('M', 'L', 'T', 'I')) {
        int number_of_streams = avio_rb16(pb);
        int number_of_mdpr;
        int i;
        for (i = 0; i<number_of_streams; i++)
            avio_rb16(pb);
        number_of_mdpr = avio_rb16(pb);
        if (number_of_mdpr != 1) {
            avpriv_request_sample(s, "MLTI with multiple MDPR");
        }
        avio_rb32(pb);
        v = avio_rb32(pb);
    }

    if (v == MKTAG(0xfd, 'a', 'r', '.')) {
        /* ra type header */
        if (rm_read_audio_stream_info(s, pb, st, rst, 0))
            return -1;
    } else if (v == MKBETAG('L', 'S', 'D', ':')) {
        avio_seek(pb, -4, SEEK_CUR);
        if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0)
            return ret;

        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_tag  = AV_RL32(st->codec->extradata);
        st->codec->codec_id   = ff_codec_get_id(ff_rm_codec_tags,
                                                st->codec->codec_tag);
    } else if(mime && !strcmp(mime, "logical-fileinfo")){
        int stream_count, rule_count, property_count, i;
        ff_free_stream(s, st);
        if (avio_rb16(pb) != 0) {
            av_log(s, AV_LOG_WARNING, "Unsupported version\n");
            goto skip;
        }
        stream_count = avio_rb16(pb);
        avio_skip(pb, 6*stream_count);
        rule_count = avio_rb16(pb);
        avio_skip(pb, 2*rule_count);
        property_count = avio_rb16(pb);
        for(i=0; i<property_count; i++){
            uint8_t name[128], val[128];
            avio_rb32(pb);
            if (avio_rb16(pb) != 0) {
                av_log(s, AV_LOG_WARNING, "Unsupported Name value property version\n");
                goto skip; //FIXME skip just this one
            }
            get_str8(pb, name, sizeof(name));
            switch(avio_rb32(pb)) {
            case 2: get_strl(pb, val, sizeof(val), avio_rb16(pb));
                av_dict_set(&s->metadata, name, val, 0);
                break;
            default: avio_skip(pb, avio_rb16(pb));
            }
        }
    } else {
        int fps;
        if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
        fail1:
            av_log(s, AV_LOG_WARNING, "Unsupported stream type %08x\n", v);
            goto skip;
        }
        st->codec->codec_tag = avio_rl32(pb);
        st->codec->codec_id  = ff_codec_get_id(ff_rm_codec_tags,
                                               st->codec->codec_tag);
        av_dlog(s, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
        if (st->codec->codec_id == AV_CODEC_ID_NONE)
            goto fail1;
        st->codec->width  = avio_rb16(pb);
        st->codec->height = avio_rb16(pb);
        avio_skip(pb, 2); // looks like bits per sample
        avio_skip(pb, 4); // always zero?
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
        fps = avio_rb32(pb);

        if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)
            return ret;

        if (fps > 0) {
            av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num,
                      0x10000, fps, (1 << 30) - 1);
#if FF_API_R_FRAME_RATE
            st->r_frame_rate = st->avg_frame_rate;
#endif
        } else if (s->error_recognition & AV_EF_EXPLODE) {
            av_log(s, AV_LOG_ERROR, "Invalid framerate\n");
            return AVERROR_INVALIDDATA;
        }
    }

skip:
    /* skip codec info */
    size = avio_tell(pb) - codec_pos;
    if (codec_data_size >= size) {
        avio_skip(pb, codec_data_size - size);
    } else {
        av_log(s, AV_LOG_WARNING, "codec_data_size %u < size %d\n", codec_data_size, size);
    }

    return 0;
}
Exemple #21
0
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
                                  void *data, int *data_size,
                                  const uint8_t *buf, int buf_size)
{
    FlicDecodeContext *s = avctx->priv_data;

    int stream_ptr = 0;
    int stream_ptr_after_color_chunk;
    int pixel_ptr;
    int palette_ptr;
    unsigned char palette_idx1;
    unsigned char palette_idx2;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j;

    int color_packets;
    int color_changes;
    int color_shift;
    unsigned char r, g, b;

    int lines;
    int compressed_lines;
    int starting_line;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    unsigned int pixel_limit;

    s->frame.reference = 1;
    s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &s->frame) < 0) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    pixels = s->frame.data[0];
    pixel_limit = s->avctx->height * s->frame.linesize[0];

    frame_size = AV_RL32(&buf[stream_ptr]);
    stream_ptr += 6;  /* skip the magic number */
    num_chunks = AV_RL16(&buf[stream_ptr]);
    stream_ptr += 10;  /* skip padding */

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0)) {
        chunk_size = AV_RL32(&buf[stream_ptr]);
        if (chunk_size > frame_size) {
            av_log(avctx, AV_LOG_WARNING,
                   "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
            chunk_size = frame_size;
        }
        stream_ptr += 4;
        chunk_type = AV_RL16(&buf[stream_ptr]);
        stream_ptr += 2;

        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6;

            /* check special case: If this file is from the Magic Carpet
             * game and uses 6-bit colors even though it reports 256-color
             * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
             * initialization) */
            if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE))
                color_shift = 0;
            else
                color_shift = 2;
            /* set up the palette */
            color_packets = AV_RL16(&buf[stream_ptr]);
            stream_ptr += 2;
            palette_ptr = 0;
            for (i = 0; i < color_packets; i++) {
                /* first byte is how many colors to skip */
                palette_ptr += buf[stream_ptr++];

                /* next byte indicates how many entries to change */
                color_changes = buf[stream_ptr++];

                /* if there are 0 color changes, there are actually 256 */
                if (color_changes == 0)
                    color_changes = 256;

                for (j = 0; j < color_changes; j++) {
                    unsigned int entry;

                    /* wrap around, for good measure */
                    if ((unsigned)palette_ptr >= 256)
                        palette_ptr = 0;

                    r = buf[stream_ptr++] << color_shift;
                    g = buf[stream_ptr++] << color_shift;
                    b = buf[stream_ptr++] << color_shift;
                    entry = (r << 16) | (g << 8) | b;
                    if (s->palette[palette_ptr] != entry)
                        s->new_palette = 1;
                    s->palette[palette_ptr++] = entry;
                }
            }

            /* color chunks sometimes have weird 16-bit alignment issues;
             * therefore, take the hardline approach and set the stream_ptr
             * to the value calculated w.r.t. the size specified by the color
             * chunk header */
            stream_ptr = stream_ptr_after_color_chunk;

            break;

        case FLI_DELTA:
            y_ptr = 0;
            compressed_lines = AV_RL16(&buf[stream_ptr]);
            stream_ptr += 2;
            while (compressed_lines > 0) {
                line_packets = AV_RL16(&buf[stream_ptr]);
                stream_ptr += 2;
                if ((line_packets & 0xC000) == 0xC000) {
                    // line skip opcode
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame.linesize[0];
                } else if ((line_packets & 0xC000) == 0x4000) {
                    av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets);
                } else if ((line_packets & 0xC000) == 0x8000) {
                    // "last byte" opcode
                    pixel_ptr= y_ptr + s->frame.linesize[0] - 1;
                    CHECK_PIXEL_PTR(0);
                    pixels[pixel_ptr] = line_packets & 0xff;
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = buf[stream_ptr++];
                        pixel_ptr += pixel_skip;
                        pixel_countdown -= pixel_skip;
                        byte_run = (signed char)(buf[stream_ptr++]);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            palette_idx1 = buf[stream_ptr++];
                            palette_idx2 = buf[stream_ptr++];
                            CHECK_PIXEL_PTR(byte_run * 2);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                pixels[pixel_ptr++] = palette_idx1;
                                pixels[pixel_ptr++] = palette_idx2;
                            }
                        } else {
                            CHECK_PIXEL_PTR(byte_run * 2);
                            for (j = 0; j < byte_run * 2; j++, pixel_countdown--) {
                                palette_idx1 = buf[stream_ptr++];
                                pixels[pixel_ptr++] = palette_idx1;
                            }
                        }
                    }

                    y_ptr += s->frame.linesize[0];
                }
            }
            break;

        case FLI_LC:
            /* line compressed */
            starting_line = AV_RL16(&buf[stream_ptr]);
            stream_ptr += 2;
            y_ptr = 0;
            y_ptr += starting_line * s->frame.linesize[0];

            compressed_lines = AV_RL16(&buf[stream_ptr]);
            stream_ptr += 2;
            while (compressed_lines > 0) {
                pixel_ptr = y_ptr;
                CHECK_PIXEL_PTR(0);
                pixel_countdown = s->avctx->width;
                line_packets = buf[stream_ptr++];
                if (line_packets > 0) {
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = buf[stream_ptr++];
                        pixel_ptr += pixel_skip;
                        pixel_countdown -= pixel_skip;
                        byte_run = (signed char)(buf[stream_ptr++]);
                        if (byte_run > 0) {
                            CHECK_PIXEL_PTR(byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                palette_idx1 = buf[stream_ptr++];
                                pixels[pixel_ptr++] = palette_idx1;
                            }
                        } else if (byte_run < 0) {
                            byte_run = -byte_run;
                            palette_idx1 = buf[stream_ptr++];
                            CHECK_PIXEL_PTR(byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = palette_idx1;
                            }
                        }
                    }
                }

                y_ptr += s->frame.linesize[0];
                compressed_lines--;
            }
            break;

        case FLI_BLACK:
            /* set the whole frame to color 0 (which is usually black) */
            memset(pixels, 0,
                s->frame.linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            /* Byte run compression: This chunk type only occurs in the first
             * FLI frame and it will update the entire frame. */
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                stream_ptr++;
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    byte_run = (signed char)(buf[stream_ptr++]);
                    if (byte_run > 0) {
                        palette_idx1 = buf[stream_ptr++];
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = buf[stream_ptr++];
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                y_ptr += s->frame.linesize[0];
            }
            break;

        case FLI_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > s->avctx->width * s->avctx->height) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                stream_ptr += chunk_size - 6;
            } else {
                for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
                     y_ptr += s->frame.linesize[0]) {
                    memcpy(&pixels[y_ptr], &buf[stream_ptr],
                        s->avctx->width);
                    stream_ptr += s->avctx->width;
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            stream_ptr += chunk_size - 6;
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, stream_ptr);

    /* make the palette available on the way out */
    memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
    if (s->new_palette) {
        s->frame.palette_has_changed = 1;
        s->new_palette = 0;
    }

    *data_size=sizeof(AVFrame);
    *(AVFrame*)data = s->frame;

    return buf_size;
}
Exemple #22
0
static int sox_probe(AVProbeData *p)
{
    if (AV_RL32(p->buf) == SOX_TAG || AV_RB32(p->buf) == SOX_TAG)
        return AVPROBE_SCORE_MAX;
    return 0;
}
Exemple #23
0
static int nuv_packet(AVFormatContext *s, AVPacket *pkt)
{
    NUVContext *ctx = s->priv_data;
    AVIOContext *pb = s->pb;
    uint8_t hdr[HDRSIZE];
    nuv_frametype frametype;
    int ret, size;

    while (!avio_feof(pb)) {
        int copyhdrsize = ctx->rtjpg_video ? HDRSIZE : 0;
        uint64_t pos    = avio_tell(pb);

        ret = avio_read(pb, hdr, HDRSIZE);
        if (ret < HDRSIZE)
            return ret < 0 ? ret : AVERROR(EIO);

        frametype = hdr[0];
        size      = PKTSIZE(AV_RL32(&hdr[8]));

        switch (frametype) {
        case NUV_EXTRADATA:
            if (!ctx->rtjpg_video) {
                avio_skip(pb, size);
                break;
            }
        case NUV_VIDEO:
            if (ctx->v_id < 0) {
                av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
                avio_skip(pb, size);
                break;
            }
            ret = av_new_packet(pkt, copyhdrsize + size);
            if (ret < 0)
                return ret;

            pkt->pos          = pos;
            pkt->flags       |= hdr[2] == 0 ? AV_PKT_FLAG_KEY : 0;
            pkt->pts          = AV_RL32(&hdr[4]);
            pkt->stream_index = ctx->v_id;
            memcpy(pkt->data, hdr, copyhdrsize);
            ret = avio_read(pb, pkt->data + copyhdrsize, size);
            if (ret < 0) {
                av_free_packet(pkt);
                return ret;
            }
            if (ret < size)
                av_shrink_packet(pkt, copyhdrsize + ret);
            return 0;
        case NUV_AUDIO:
            if (ctx->a_id < 0) {
                av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
                avio_skip(pb, size);
                break;
            }
            ret               = av_get_packet(pb, pkt, size);
            pkt->flags       |= AV_PKT_FLAG_KEY;
            pkt->pos          = pos;
            pkt->pts          = AV_RL32(&hdr[4]);
            pkt->stream_index = ctx->a_id;
            if (ret < 0)
                return ret;
            return 0;
        case NUV_SEEKP:
            // contains no data, size value is invalid
            break;
        default:
            avio_skip(pb, size);
            break;
        }
    }

    return AVERROR(EIO);
}
Exemple #24
0
static int aasc_decode_frame(AVCodecContext *avctx,
                              void *data, int *got_frame,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    AascContext *s     = avctx->priv_data;
    int compr, i, stride, psize, ret;

    if (buf_size < 4) {
        av_log(avctx, AV_LOG_ERROR, "frame too short\n");
        return AVERROR_INVALIDDATA;
    }

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
        return ret;

    compr     = AV_RL32(buf);
    buf      += 4;
    buf_size -= 4;
    psize = avctx->bits_per_coded_sample / 8;
    switch (avctx->codec_tag) {
    case MKTAG('A', 'A', 'S', '4'):
        bytestream2_init(&s->gb, buf - 4, buf_size + 4);
        ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
        break;
    case MKTAG('A', 'A', 'S', 'C'):
    switch (compr) {
    case 0:
        stride = (avctx->width * psize + psize) & ~psize;
        if (buf_size < stride * avctx->height)
            return AVERROR_INVALIDDATA;
        for (i = avctx->height - 1; i >= 0; i--) {
            memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * psize);
            buf += stride;
            buf_size -= stride;
        }
        break;
    case 1:
        bytestream2_init(&s->gb, buf, buf_size);
        ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
        return AVERROR_INVALIDDATA;
    }
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unknown FourCC: %X\n", avctx->codec_tag);
        return -1;
    }

    if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
        memcpy(s->frame->data[1], s->palette, s->palette_size);

    *got_frame = 1;
    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;

    /* report that the buffer was completely consumed */
    return buf_size;
}
Exemple #25
0
STDMETHODIMP CDecAvcodec::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtStartIn, REFERENCE_TIME rtStopIn, BOOL bSyncPoint, BOOL bDiscontinuity)
{
  int     got_picture = 0;
  int     used_bytes  = 0;
  BOOL    bParserFrame = FALSE;
  BOOL    bFlush = (buffer == NULL);
  BOOL    bEndOfSequence = FALSE;

  AVPacket avpkt;
  av_init_packet(&avpkt);

  if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
    if (!m_bFFReordering) {
      m_tcThreadBuffer[m_CurrentThread].rtStart = rtStartIn;
      m_tcThreadBuffer[m_CurrentThread].rtStop  = rtStopIn;
    }

    m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
  } else if (m_bBFrameDelay) {
    m_tcBFrameDelay[m_nBFramePos].rtStart = rtStartIn;
    m_tcBFrameDelay[m_nBFramePos].rtStop = rtStopIn;
    m_nBFramePos = !m_nBFramePos;
  }

  uint8_t *pDataBuffer = NULL;
  if (!bFlush && buflen > 0) {
    if (!m_bInputPadded && (!(m_pAVCtx->active_thread_type & FF_THREAD_FRAME) || m_pParser)) {
      // Copy bitstream into temporary buffer to ensure overread protection
      // Verify buffer size
      if (buflen > m_nFFBufferSize) {
        m_nFFBufferSize	= buflen;
        m_pFFBuffer = (BYTE *)av_realloc_f(m_pFFBuffer, m_nFFBufferSize + FF_INPUT_BUFFER_PADDING_SIZE, 1);
        if (!m_pFFBuffer) {
          m_nFFBufferSize = 0;
          return E_OUTOFMEMORY;
        }
      }
      
      memcpy(m_pFFBuffer, buffer, buflen);
      memset(m_pFFBuffer+buflen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
      pDataBuffer = m_pFFBuffer;
    } else {
      pDataBuffer = (uint8_t *)buffer;
    }

    if (m_nCodecId == AV_CODEC_ID_H264) {
      BOOL bRecovered = m_h264RandomAccess.searchRecoveryPoint(pDataBuffer, buflen);
      if (!bRecovered) {
        return S_OK;
      }
    } else if (m_nCodecId == AV_CODEC_ID_VP8 && m_bWaitingForKeyFrame) {
      if (!(pDataBuffer[0] & 1)) {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found VP8 key-frame, resuming decoding"));
        m_bWaitingForKeyFrame = FALSE;
      } else {
        return S_OK;
      }
    }
  }

  while (buflen > 0 || bFlush) {
    REFERENCE_TIME rtStart = rtStartIn, rtStop = rtStopIn;

    if (!bFlush) {
      avpkt.data = pDataBuffer;
      avpkt.size = buflen;
      avpkt.pts = rtStartIn;
      if (rtStartIn != AV_NOPTS_VALUE && rtStopIn != AV_NOPTS_VALUE)
        avpkt.duration = (int)(rtStopIn - rtStartIn);
      else
        avpkt.duration = 0;
      avpkt.flags = AV_PKT_FLAG_KEY;

      if (m_bHasPalette) {
        m_bHasPalette = FALSE;
        uint32_t *pal = (uint32_t *)av_packet_new_side_data(&avpkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
        int pal_size = FFMIN((1 << m_pAVCtx->bits_per_coded_sample) << 2, m_pAVCtx->extradata_size);
        uint8_t *pal_src = m_pAVCtx->extradata + m_pAVCtx->extradata_size - pal_size;

        for (int i = 0; i < pal_size/4; i++)
          pal[i] = 0xFF<<24 | AV_RL32(pal_src+4*i);
      }
    } else {
      avpkt.data = NULL;
      avpkt.size = 0;
    }

    // Parse the data if a parser is present
    // This is mandatory for MPEG-1/2
    if (m_pParser) {
      BYTE *pOut = NULL;
      int pOut_size = 0;

      used_bytes = av_parser_parse2(m_pParser, m_pAVCtx, &pOut, &pOut_size, avpkt.data, avpkt.size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);

      if (used_bytes == 0 && pOut_size == 0 && !bFlush) {
        DbgLog((LOG_TRACE, 50, L"::Decode() - could not process buffer, starving?"));
        break;
      }

      // Update start time cache
      // If more data was read then output, update the cache (incomplete frame)
      // If output is bigger, a frame was completed, update the actual rtStart with the cached value, and then overwrite the cache
      if (used_bytes > pOut_size) {
        if (rtStartIn != AV_NOPTS_VALUE)
          m_rtStartCache = rtStartIn;
      } else if (used_bytes == pOut_size || ((used_bytes + 9) == pOut_size)) {
        // Why +9 above?
        // Well, apparently there are some broken MKV muxers that like to mux the MPEG-2 PICTURE_START_CODE block (which is 9 bytes) in the package with the previous frame
        // This would cause the frame timestamps to be delayed by one frame exactly, and cause timestamp reordering to go wrong.
        // So instead of failing on those samples, lets just assume that 9 bytes are that case exactly.
        m_rtStartCache = rtStartIn = AV_NOPTS_VALUE;
      } else if (pOut_size > used_bytes) {
        rtStart = m_rtStartCache;
        m_rtStartCache = rtStartIn;
        // The value was used once, don't use it for multiple frames, that ends up in weird timings
        rtStartIn = AV_NOPTS_VALUE;
      }

       bParserFrame = (pOut_size > 0);

      if (pOut_size > 0 || bFlush) {

        if (pOut && pOut_size > 0) {
          if (pOut_size > m_nFFBufferSize2) {
            m_nFFBufferSize2	= pOut_size;
            m_pFFBuffer2 = (BYTE *)av_realloc_f(m_pFFBuffer2, m_nFFBufferSize2 + FF_INPUT_BUFFER_PADDING_SIZE, 1);
            if (!m_pFFBuffer2) {
              m_nFFBufferSize2 = 0;
              return E_OUTOFMEMORY;
            }
          }
          memcpy(m_pFFBuffer2, pOut, pOut_size);
          memset(m_pFFBuffer2+pOut_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

          avpkt.data = m_pFFBuffer2;
          avpkt.size = pOut_size;
          avpkt.pts = rtStart;
          avpkt.duration = 0;

          const uint8_t *eosmarker = CheckForEndOfSequence(m_nCodecId, avpkt.data, avpkt.size, &m_MpegParserState);
          if (eosmarker) {
            bEndOfSequence = TRUE;
          }
        } else {
          avpkt.data = NULL;
          avpkt.size = 0;
        }

        int ret2 = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
        if (ret2 < 0) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - decoding failed despite successfull parsing"));
          got_picture = 0;
        }
      } else {
        got_picture = 0;
      }
    } else {
      used_bytes = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
    }

    if (FAILED(PostDecode())) {
      av_frame_unref(m_pFrame);
      return E_FAIL;
    }

    // Decoding of this frame failed ... oh well!
    if (used_bytes < 0) {
      av_frame_unref(m_pFrame);
      return S_OK;
    }

    // When Frame Threading, we won't know how much data has been consumed, so it by default eats everything.
    // In addition, if no data got consumed, and no picture was extracted, the frame probably isn't all that useufl.
    // The MJPEB decoder is somewhat buggy and doesn't let us know how much data was consumed really...
    if ((!m_pParser && (m_pAVCtx->active_thread_type & FF_THREAD_FRAME || (!got_picture && used_bytes == 0))) || m_bNoBufferConsumption || bFlush) {
      buflen = 0;
    } else {
      buflen -= used_bytes;
      pDataBuffer += used_bytes;
    }

    // Judge frame usability
    // This determines if a frame is artifact free and can be delivered
    // For H264 this does some wicked magic hidden away in the H264RandomAccess class
    // MPEG-2 and VC-1 just wait for a keyframe..
    if (m_nCodecId == AV_CODEC_ID_H264 && (bParserFrame || !m_pParser || got_picture)) {
      m_h264RandomAccess.judgeFrameUsability(m_pFrame, &got_picture);
    } else if (m_bResumeAtKeyFrame) {
      if (m_bWaitingForKeyFrame && got_picture) {
        if (m_pFrame->key_frame) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - Found Key-Frame, resuming decoding at %I64d", m_pFrame->pkt_pts));
          m_bWaitingForKeyFrame = FALSE;
        } else {
          got_picture = 0;
        }
      }
    }

    // Handle B-frame delay for frame threading codecs
    if ((m_pAVCtx->active_thread_type & FF_THREAD_FRAME) && m_bBFrameDelay) {
      m_tcBFrameDelay[m_nBFramePos] = m_tcThreadBuffer[m_CurrentThread];
      m_nBFramePos = !m_nBFramePos;
    }

    if (!got_picture || !m_pFrame->data[0]) {
      if (!avpkt.size)
        bFlush = FALSE; // End flushing, no more frames
      av_frame_unref(m_pFrame);
      continue;
    }

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Determine the proper timestamps for the frame, based on different possible flags.
    ///////////////////////////////////////////////////////////////////////////////////////////////
    if (m_bFFReordering) {
      rtStart = m_pFrame->pkt_pts;
      if (m_pFrame->pkt_duration)
        rtStop = m_pFrame->pkt_pts + m_pFrame->pkt_duration;
      else
        rtStop = AV_NOPTS_VALUE;
    } else if (m_bBFrameDelay && m_pAVCtx->has_b_frames) {
      rtStart = m_tcBFrameDelay[m_nBFramePos].rtStart;
      rtStop  = m_tcBFrameDelay[m_nBFramePos].rtStop;
    } else if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
      unsigned index = m_CurrentThread;
      rtStart = m_tcThreadBuffer[index].rtStart;
      rtStop  = m_tcThreadBuffer[index].rtStop;
    }

    if (m_bRVDropBFrameTimings && m_pFrame->pict_type == AV_PICTURE_TYPE_B) {
      rtStart = AV_NOPTS_VALUE;
    }

    if (m_bCalculateStopTime)
      rtStop = AV_NOPTS_VALUE;

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // All required values collected, deliver the frame
    ///////////////////////////////////////////////////////////////////////////////////////////////
    LAVFrame *pOutFrame = NULL;
    AllocateFrame(&pOutFrame);

    AVRational display_aspect_ratio;
    int64_t num = (int64_t)m_pFrame->sample_aspect_ratio.num * m_pFrame->width;
    int64_t den = (int64_t)m_pFrame->sample_aspect_ratio.den * m_pFrame->height;
    av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, num, den, 1 << 30);

    pOutFrame->width        = m_pFrame->width;
    pOutFrame->height       = m_pFrame->height;
    pOutFrame->aspect_ratio = display_aspect_ratio;
    pOutFrame->repeat       = m_pFrame->repeat_pict;
    pOutFrame->key_frame    = m_pFrame->key_frame;
    pOutFrame->frame_type   = av_get_picture_type_char(m_pFrame->pict_type);
    pOutFrame->ext_format   = GetDXVA2ExtendedFlags(m_pAVCtx, m_pFrame);

    if (m_pFrame->interlaced_frame || (!m_pAVCtx->progressive_sequence && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)))
      m_iInterlaced = 1;
    else if (m_pAVCtx->progressive_sequence)
      m_iInterlaced = 0;

    pOutFrame->interlaced   = (m_pFrame->interlaced_frame || (m_iInterlaced == 1 && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

    LAVDeintFieldOrder fo   = m_pSettings->GetDeintFieldOrder();
    pOutFrame->tff          = (fo == DeintFieldOrder_Auto) ? m_pFrame->top_field_first : (fo == DeintFieldOrder_TopFieldFirst);

    pOutFrame->rtStart      = rtStart;
    pOutFrame->rtStop       = rtStop;

    PixelFormatMapping map  = getPixFmtMapping((AVPixelFormat)m_pFrame->format);
    pOutFrame->format       = map.lavpixfmt;
    pOutFrame->bpp          = map.bpp;

    if (m_nCodecId == AV_CODEC_ID_MPEG2VIDEO || m_nCodecId == AV_CODEC_ID_MPEG1VIDEO)
      pOutFrame->avgFrameDuration = GetFrameDuration();

    if (map.conversion) {
      ConvertPixFmt(m_pFrame, pOutFrame);
    } else {
      for (int i = 0; i < 4; i++) {
        pOutFrame->data[i]   = m_pFrame->data[i];
        pOutFrame->stride[i] = m_pFrame->linesize[i];
      }

      pOutFrame->priv_data = av_frame_alloc();
      av_frame_ref((AVFrame *)pOutFrame->priv_data, m_pFrame);
      pOutFrame->destruct  = lav_avframe_free;
    }

    if (bEndOfSequence)
      pOutFrame->flags |= LAV_FRAME_FLAG_END_OF_SEQUENCE;

    if (pOutFrame->format == LAVPixFmt_DXVA2) {
      pOutFrame->data[0] = m_pFrame->data[4];
      HandleDXVA2Frame(pOutFrame);
    } else {
      Deliver(pOutFrame);
    }

    if (bEndOfSequence) {
      bEndOfSequence = FALSE;
      if (pOutFrame->format == LAVPixFmt_DXVA2) {
        HandleDXVA2Frame(m_pCallback->GetFlushFrame());
      } else {
        Deliver(m_pCallback->GetFlushFrame());
      }
    }

    if (bFlush) {
      m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
    }
    av_frame_unref(m_pFrame);
  }

  return S_OK;
}
Exemple #26
0
static int ea_read_packet(AVFormatContext *s,
                          AVPacket *pkt)
{
    EaDemuxContext *ea = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    int packet_read = 0;
    unsigned int chunk_type, chunk_size;
    int key = 0;
    int num_samples = 0;

    while (!packet_read) {
        chunk_type = avio_rl32(pb);
        chunk_size = (ea->big_endian ? avio_rb32(pb) : avio_rl32(pb)) - 8;

        switch (chunk_type) {
        /* audio data */
        case ISNh_TAG:
            /* header chunk also contains data; skip over the header portion*/
            avio_skip(pb, 32);
            chunk_size -= 32;
        case ISNd_TAG:
        case SCDl_TAG:
        case SNDC_TAG:
        case SDEN_TAG:
            if (!ea->audio_codec) {
                avio_skip(pb, chunk_size);
                break;
            } else if (ea->audio_codec == CODEC_ID_PCM_S16LE_PLANAR ||
                       ea->audio_codec == CODEC_ID_MP3) {
                num_samples = avio_rl32(pb);
                avio_skip(pb, 8);
                chunk_size -= 12;
            }

            ret = av_get_packet(pb, pkt, chunk_size);
            if (ret < 0)
                return ret;

            if (ea->audio_codec == CODEC_ID_ADPCM_EA_R1 ||
                ea->audio_codec == CODEC_ID_ADPCM_EA_R2 ||
                ea->audio_codec == CODEC_ID_ADPCM_EA_R3) {
                num_samples = ea->audio_codec == CODEC_ID_ADPCM_EA_R3 ?
                    AV_RB32(pkt->data) : AV_RL32(pkt->data);
            }

            pkt->stream_index = ea->audio_stream_index;
            pkt->pts = ea->audio_frame_counter;

            switch (ea->audio_codec) {
            case CODEC_ID_ADPCM_EA:
                /* 2 samples/byte, 1 or 2 samples per frame depending
                 * on stereo; chunk also has 12-byte header */
                ea->audio_frame_counter += ((chunk_size - 12) * 2) /
                    ea->num_channels;
                break;
            default:
                if (num_samples > 0)
                    ea->audio_frame_counter += num_samples;
                else
                    ea->audio_frame_counter += chunk_size /
                    (ea->bytes * ea->num_channels);
            }

            packet_read = 1;
            break;

        /* ending tag */
        case 0:
        case ISNe_TAG:
        case SCEl_TAG:
        case SEND_TAG:
        case SEEN_TAG:
            ret = AVERROR(EIO);
            packet_read = 1;
            break;

        case MVIh_TAG:
        case kVGT_TAG:
        case pQGT_TAG:
        case TGQs_TAG:
        case MADk_TAG:
            key = AV_PKT_FLAG_KEY;
        case MVIf_TAG:
        case fVGT_TAG:
        case MADm_TAG:
        case MADe_TAG:
            avio_seek(pb, -8, SEEK_CUR);     // include chunk preamble
            chunk_size += 8;
            goto get_video_packet;

        case mTCD_TAG:
            avio_skip(pb, 8);  // skip ea dct header
            chunk_size -= 8;
            goto get_video_packet;

        case MV0K_TAG:
        case MPCh_TAG:
        case pIQT_TAG:
            key = AV_PKT_FLAG_KEY;
        case MV0F_TAG:
get_video_packet:
            ret = av_get_packet(pb, pkt, chunk_size);
            if (ret < 0)
                return ret;
            pkt->stream_index = ea->video_stream_index;
            pkt->flags |= key;
            packet_read = 1;
            break;

        default:
            avio_skip(pb, chunk_size);
            break;
        }
    }

    return ret;
}
Exemple #27
0
static int str_probe(AVProbeData *p)
{
    uint8_t *sector= p->buf;
    uint8_t *end= sector + p->buf_size;
    int aud=0, vid=0;

    if (p->buf_size < RAW_CD_SECTOR_SIZE)
        return 0;

    if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
        (AV_RL32(&p->buf[8]) == CDXA_TAG)) {

        /* RIFF header seen; skip 0x2C bytes */
        sector += RIFF_HEADER_SIZE;
    }

    while (end - sector >= RAW_CD_SECTOR_SIZE) {
        /* look for CD sync header (00, 0xFF x 10, 00) */
        if (memcmp(sector,sync_header,sizeof(sync_header)))
            return 0;

        if (sector[0x11] >= 32)
            return 0;

        switch (sector[0x12] & CDXA_TYPE_MASK) {
        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO: {
                int current_sector = AV_RL16(&sector[0x1C]);
                int sector_count   = AV_RL16(&sector[0x1E]);
                int frame_size = AV_RL32(&sector[0x24]);

                if(!(   frame_size>=0
                     && current_sector < sector_count
                     && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
                    return 0;
                }

                /*st->codec->width      = AV_RL16(&sector[0x28]);
                st->codec->height     = AV_RL16(&sector[0x2A]);*/

//                 if (current_sector == sector_count-1) {
                    vid++;
//                 }

            }
            break;
        case CDXA_TYPE_AUDIO:
            if(sector[0x13]&0x2A)
                return 0;
            aud++;
            break;
        default:
            if(sector[0x12] & CDXA_TYPE_MASK)
                return 0;
        }
        sector += RAW_CD_SECTOR_SIZE;
    }
    /* MPEG files (like those ripped from VCDs) can also look like this;
     * only return half certainty */
    if(vid+aud > 3)  return 50;
    else if(vid+aud) return 1;
    else             return 0;
}
Exemple #28
0
static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    TTAContext *c = s->priv_data;
    AVStream *st;
    int i, channels, bps, samplerate, datalen, framelen;
    uint64_t framepos, start_offset;

    if (!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
        ff_id3v1_read(s);

    start_offset = avio_tell(s->pb);
    if (avio_rl32(s->pb) != AV_RL32("TTA1"))
        return -1; // not tta file

    avio_skip(s->pb, 2); // FIXME: flags
    channels = avio_rl16(s->pb);
    bps = avio_rl16(s->pb);
    samplerate = avio_rl32(s->pb);
    if(samplerate <= 0 || samplerate > 1000000){
        av_log(s, AV_LOG_ERROR, "nonsense samplerate\n");
        return -1;
    }

    datalen = avio_rl32(s->pb);
    if(datalen < 0){
        av_log(s, AV_LOG_ERROR, "nonsense datalen\n");
        return -1;
    }

    avio_skip(s->pb, 4); // header crc

    framelen = samplerate*256/245;
    c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0);
    c->currentframe = 0;

    if(c->totalframes >= UINT_MAX/sizeof(uint32_t)){
        av_log(s, AV_LOG_ERROR, "totalframes too large\n");
        return -1;
    }

    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

    av_set_pts_info(st, 64, 1, samplerate);
    st->start_time = 0;
    st->duration = datalen;

    framepos = avio_tell(s->pb) + 4*c->totalframes + 4;

    for (i = 0; i < c->totalframes; i++) {
        uint32_t size = avio_rl32(s->pb);
        av_add_index_entry(st, framepos, i*framelen, size, 0, AVINDEX_KEYFRAME);
        framepos += size;
    }
    avio_skip(s->pb, 4); // seektable crc

    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id = CODEC_ID_TTA;
    st->codec->channels = channels;
    st->codec->sample_rate = samplerate;
    st->codec->bits_per_coded_sample = bps;

    st->codec->extradata_size = avio_tell(s->pb) - start_offset;
    if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
        //this check is redundant as avio_read should fail
        av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
        return -1;
    }
    st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
    avio_seek(s->pb, start_offset, SEEK_SET);
    avio_read(s->pb, st->codec->extradata, st->codec->extradata_size);

    return 0;
}
Exemple #29
0
/* This function loads and processes a single chunk in an IP movie file.
 * It returns the type of chunk that was processed. */
static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
    AVPacket *pkt)
{
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;
    int chunk_size;
    unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
    unsigned char opcode_type;
    unsigned char opcode_version;
    int opcode_size;
    unsigned char scratch[1024];
    int i, j;
    int first_color, last_color;
    int audio_flags;
    unsigned char r, g, b;

    /* see if there are any pending packets */
    chunk_type = load_ipmovie_packet(s, pb, pkt);
    if (chunk_type != CHUNK_DONE)
        return chunk_type;

    /* read the next chunk, wherever the file happens to be pointing */
    if (url_feof(pb))
        return CHUNK_EOF;
    if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return CHUNK_BAD;
    chunk_size = AV_RL16(&chunk_preamble[0]);
    chunk_type = AV_RL16(&chunk_preamble[2]);

    debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);

    switch (chunk_type) {

    case CHUNK_INIT_AUDIO:
        debug_ipmovie("initialize audio\n");
        break;

    case CHUNK_AUDIO_ONLY:
        debug_ipmovie("audio only\n");
        break;

    case CHUNK_INIT_VIDEO:
        debug_ipmovie("initialize video\n");
        break;

    case CHUNK_VIDEO:
        debug_ipmovie("video (and audio)\n");
        break;

    case CHUNK_SHUTDOWN:
        debug_ipmovie("shutdown\n");
        break;

    case CHUNK_END:
        debug_ipmovie("end\n");
        break;

    default:
        debug_ipmovie("invalid chunk\n");
        chunk_type = CHUNK_BAD;
        break;

    }

    while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {

        /* read the next chunk, wherever the file happens to be pointing */
       if (url_feof(pb)) {
            chunk_type = CHUNK_EOF;
            break;
        }
        if (get_buffer(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
            CHUNK_PREAMBLE_SIZE) {
            chunk_type = CHUNK_BAD;
            break;
        }

        opcode_size = AV_RL16(&opcode_preamble[0]);
        opcode_type = opcode_preamble[2];
        opcode_version = opcode_preamble[3];

        chunk_size -= OPCODE_PREAMBLE_SIZE;
        chunk_size -= opcode_size;
        if (chunk_size < 0) {
            debug_ipmovie("chunk_size countdown just went negative\n");
            chunk_type = CHUNK_BAD;
            break;
        }

        debug_ipmovie("  opcode type %02X, version %d, 0x%04X bytes: ",
            opcode_type, opcode_version, opcode_size);
        switch (opcode_type) {

        case OPCODE_END_OF_STREAM:
            debug_ipmovie("end of stream\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_END_OF_CHUNK:
            debug_ipmovie("end of chunk\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_CREATE_TIMER:
            debug_ipmovie("create timer\n");
            if ((opcode_version > 0) || (opcode_size > 6)) {
                debug_ipmovie("bad create_timer opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->fps = 1000000.0 / (AV_RL32(&scratch[0]) * AV_RL16(&scratch[4]));
            s->frame_pts_inc = 90000 / s->fps;
            debug_ipmovie("  %.2f frames/second (timer div = %d, subdiv = %d)\n",
                s->fps, AV_RL32(&scratch[0]), AV_RL16(&scratch[4]));
            break;

        case OPCODE_INIT_AUDIO_BUFFERS:
            debug_ipmovie("initialize audio buffers\n");
            if ((opcode_version > 1) || (opcode_size > 10)) {
                debug_ipmovie("bad init_audio_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->audio_sample_rate = AV_RL16(&scratch[4]);
            audio_flags = AV_RL16(&scratch[2]);
            /* bit 0 of the flags: 0 = mono, 1 = stereo */
            s->audio_channels = (audio_flags & 1) + 1;
            /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
            s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
            /* bit 2 indicates compressed audio in version 1 opcode */
            if ((opcode_version == 1) && (audio_flags & 0x4))
                s->audio_type = CODEC_ID_INTERPLAY_DPCM;
            else if (s->audio_bits == 16)
                s->audio_type = CODEC_ID_PCM_S16LE;
            else
                s->audio_type = CODEC_ID_PCM_U8;
            debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n",
                s->audio_bits,
                s->audio_sample_rate,
                (s->audio_channels == 2) ? "stereo" : "mono",
                (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
                "Interplay audio" : "PCM");
            break;

        case OPCODE_START_STOP_AUDIO:
            debug_ipmovie("start/stop audio\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_INIT_VIDEO_BUFFERS:
            debug_ipmovie("initialize video buffers\n");
            if ((opcode_version > 2) || (opcode_size > 8)) {
                debug_ipmovie("bad init_video_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->video_width = AV_RL16(&scratch[0]) * 8;
            s->video_height = AV_RL16(&scratch[2]) * 8;
            debug_ipmovie("video resolution: %d x %d\n",
                s->video_width, s->video_height);
            break;

        case OPCODE_UNKNOWN_06:
        case OPCODE_UNKNOWN_0E:
        case OPCODE_UNKNOWN_10:
        case OPCODE_UNKNOWN_12:
        case OPCODE_UNKNOWN_13:
        case OPCODE_UNKNOWN_14:
        case OPCODE_UNKNOWN_15:
            debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type);
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SEND_BUFFER:
            debug_ipmovie("send buffer\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_AUDIO_FRAME:
            debug_ipmovie("audio frame\n");

            /* log position and move on for now */
            s->audio_chunk_offset = url_ftell(pb);
            s->audio_chunk_size = opcode_size;
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SILENCE_FRAME:
            debug_ipmovie("silence frame\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_INIT_VIDEO_MODE:
            debug_ipmovie("initialize video mode\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_CREATE_GRADIENT:
            debug_ipmovie("create gradient\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SET_PALETTE:
            debug_ipmovie("set palette\n");
            /* check for the logical maximum palette size
             * (3 * 256 + 4 bytes) */
            if (opcode_size > 0x304) {
                debug_ipmovie("demux_ipmovie: set_palette opcode too large\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) != opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }

            /* load the palette into internal data structure */
            first_color = AV_RL16(&scratch[0]);
            last_color = first_color + AV_RL16(&scratch[2]) - 1;
            /* sanity check (since they are 16 bit values) */
            if ((first_color > 0xFF) || (last_color > 0xFF)) {
                debug_ipmovie("demux_ipmovie: set_palette indexes out of range (%d -> %d)\n",
                    first_color, last_color);
                chunk_type = CHUNK_BAD;
                break;
            }
            j = 4;  /* offset of first palette data */
            for (i = first_color; i <= last_color; i++) {
                /* the palette is stored as a 6-bit VGA palette, thus each
                 * component is shifted up to a 8-bit range */
                r = scratch[j++] * 4;
                g = scratch[j++] * 4;
                b = scratch[j++] * 4;
                s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
            }
            /* indicate a palette change */
            s->palette_control.palette_changed = 1;
            break;

        case OPCODE_SET_PALETTE_COMPRESSED:
            debug_ipmovie("set palette compressed\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SET_DECODING_MAP:
            debug_ipmovie("set decoding map\n");

            /* log position and move on for now */
            s->decode_map_chunk_offset = url_ftell(pb);
            s->decode_map_chunk_size = opcode_size;
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_VIDEO_DATA:
            debug_ipmovie("set video data\n");

            /* log position and move on for now */
            s->video_chunk_offset = url_ftell(pb);
            s->video_chunk_size = opcode_size;
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        default:
            debug_ipmovie("*** unknown opcode type\n");
            chunk_type = CHUNK_BAD;
            break;

        }
    }

    /* make a note of where the stream is sitting */
    s->next_chunk_offset = url_ftell(pb);

    /* dispatch the first of any pending packets */
    if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
        chunk_type = load_ipmovie_packet(s, pb, pkt);

    return chunk_type;
}
Exemple #30
0
static int wv_read_block_header(AVFormatContext *ctx, AVIOContext *pb, int append)
{
    WVContext *wc = ctx->priv_data;
    uint32_t tag, ver;
    int size;
    int rate, bpp, chan;
    uint32_t chmask;

    wc->pos = avio_tell(pb);
    if(!append){
        tag = avio_rl32(pb);
        if (tag != MKTAG('w', 'v', 'p', 'k'))
            return -1;
        size = avio_rl32(pb);
        if(size < 24 || size > WV_BLOCK_LIMIT){
            av_log(ctx, AV_LOG_ERROR, "Incorrect block size %i\n", size);
            return -1;
        }
        wc->blksize = size;
        ver = avio_rl16(pb);
        if(ver < 0x402 || ver > 0x410){
            av_log(ctx, AV_LOG_ERROR, "Unsupported version %03X\n", ver);
            return -1;
        }
        avio_r8(pb); // track no
        avio_r8(pb); // track sub index
        wc->samples = avio_rl32(pb); // total samples in file
        wc->soff = avio_rl32(pb); // offset in samples of current block
        avio_read(pb, wc->extra, WV_EXTRA_SIZE);
    }else{
        size = wc->blksize;
    }
    wc->flags = AV_RL32(wc->extra + 4);
    //parse flags
    bpp = ((wc->flags & 3) + 1) << 3;
    chan = 1 + !(wc->flags & WV_MONO);
    chmask = wc->flags & WV_MONO ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
    rate = wv_rates[(wc->flags >> 23) & 0xF];
    wc->multichannel = !!((wc->flags & WV_SINGLE_BLOCK) != WV_SINGLE_BLOCK);
    if(wc->multichannel){
        chan = wc->chan;
        chmask = wc->chmask;
    }
    if((rate == -1 || !chan) && !wc->block_parsed){
        int64_t block_end = avio_tell(pb) + wc->blksize - 24;
        if(!pb->seekable){
            av_log(ctx, AV_LOG_ERROR, "Cannot determine additional parameters\n");
            return -1;
        }
        while(avio_tell(pb) < block_end){
            int id, size;
            id = avio_r8(pb);
            size = (id & 0x80) ? avio_rl24(pb) : avio_r8(pb);
            size <<= 1;
            if(id&0x40)
                size--;
            switch(id&0x3F){
            case 0xD:
                if(size <= 1){
                    av_log(ctx, AV_LOG_ERROR, "Insufficient channel information\n");
                    return -1;
                }
                chan = avio_r8(pb);
                switch(size - 2){
                case 0:
                    chmask = avio_r8(pb);
                    break;
                case 1:
                    chmask = avio_rl16(pb);
                    break;
                case 2:
                    chmask = avio_rl24(pb);
                    break;
                case 3:
                    chmask = avio_rl32(pb);
                    break;
                case 5:
                    avio_skip(pb, 1);
                    chan |= (avio_r8(pb) & 0xF) << 8;
                    chmask = avio_rl24(pb);
                    break;
                default:
                    av_log(ctx, AV_LOG_ERROR, "Invalid channel info size %d\n", size);
                    return -1;
                }
                break;
            case 0x27:
                rate = avio_rl24(pb);
                break;
            default:
                avio_skip(pb, size);
            }
            if(id&0x40)
                avio_skip(pb, 1);
        }
        if(rate == -1){
            av_log(ctx, AV_LOG_ERROR, "Cannot determine custom sampling rate\n");
            return -1;
        }
        avio_seek(pb, block_end - wc->blksize + 24, SEEK_SET);
    }
    if(!wc->bpp) wc->bpp = bpp;
    if(!wc->chan) wc->chan = chan;
    if(!wc->chmask) wc->chmask = chmask;
    if(!wc->rate) wc->rate = rate;

    if(wc->flags && bpp != wc->bpp){
        av_log(ctx, AV_LOG_ERROR, "Bits per sample differ, this block: %i, header block: %i\n", bpp, wc->bpp);
        return -1;
    }
    if(wc->flags && !wc->multichannel && chan != wc->chan){
        av_log(ctx, AV_LOG_ERROR, "Channels differ, this block: %i, header block: %i\n", chan, wc->chan);
        return -1;
    }
    if(wc->flags && rate != -1 && rate != wc->rate){
        av_log(ctx, AV_LOG_ERROR, "Sampling rate differ, this block: %i, header block: %i\n", rate, wc->rate);
        return -1;
    }
    wc->blksize = size - 24;
    return 0;
}