예제 #1
0
파일: segafilm.c 프로젝트: Haaaaaank/avbin
static int film_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FilmDemuxContext *film = s->priv_data;
    ByteIOContext *pb = s->pb;
    film_sample_t *sample;
    int ret = 0;
    int i;
    int left, right;

    if (film->current_sample >= film->sample_count)
        return AVERROR(EIO);

    sample = &film->sample_table[film->current_sample];

    /* position the stream (will probably be there anyway) */
    url_fseek(pb, sample->sample_offset, SEEK_SET);

    /* do a special song and dance when loading FILM Cinepak chunks */
    if ((sample->stream == film->video_stream_index) &&
        (film->video_type == CODEC_ID_CINEPAK)) {
        pkt->pos= url_ftell(pb);
        if (av_new_packet(pkt, sample->sample_size))
            return AVERROR(ENOMEM);
        get_buffer(pb, pkt->data, sample->sample_size);
    } else if ((sample->stream == film->audio_stream_index) &&
        (film->audio_channels == 2)) {
        /* stereo PCM needs to be interleaved */

        if (av_new_packet(pkt, sample->sample_size))
            return AVERROR(ENOMEM);

        /* make sure the interleave buffer is large enough */
        if (sample->sample_size > film->stereo_buffer_size) {
            av_free(film->stereo_buffer);
            film->stereo_buffer_size = sample->sample_size;
            film->stereo_buffer = av_malloc(film->stereo_buffer_size);
        }

        pkt->pos= url_ftell(pb);
        ret = get_buffer(pb, film->stereo_buffer, sample->sample_size);
        if (ret != sample->sample_size)
            ret = AVERROR(EIO);

        left = 0;
        right = sample->sample_size / 2;
        for (i = 0; i < sample->sample_size; ) {
            if (film->audio_bits == 8) {
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[right++];
            } else {
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[right++];
                pkt->data[i++] = film->stereo_buffer[right++];
            }
        }
    } else {
        ret= av_get_packet(pb, pkt, sample->sample_size);
        if (ret != sample->sample_size)
            ret = AVERROR(EIO);
    }

    pkt->stream_index = sample->stream;
    pkt->pts = sample->pts;

    film->current_sample++;

    return ret;
}
예제 #2
0
static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    SmackerContext *smk = s->priv_data;
    int flags;
    int ret;
    int i;
    int frame_size = 0;
    int palchange = 0;
    int pos;

    if (url_feof(&s->pb) || smk->cur_frame >= smk->frames)
        return AVERROR(EIO);

    /* if we demuxed all streams, pass another frame */
    if(smk->curstream < 0) {
        url_fseek(&s->pb, smk->nextpos, 0);
        frame_size = smk->frm_size[smk->cur_frame] & (~3);
        flags = smk->frm_flags[smk->cur_frame];
        /* handle palette change event */
        pos = url_ftell(&s->pb);
        if(flags & SMACKER_PAL){
            int size, sz, t, off, j, pos;
            uint8_t *pal = smk->pal;
            uint8_t oldpal[768];

            memcpy(oldpal, pal, 768);
            size = get_byte(&s->pb);
            size = size * 4 - 1;
            frame_size -= size;
            frame_size--;
            sz = 0;
            pos = url_ftell(&s->pb) + size;
            while(sz < 256){
                t = get_byte(&s->pb);
                if(t & 0x80){ /* skip palette entries */
                    sz += (t & 0x7F) + 1;
                    pal += ((t & 0x7F) + 1) * 3;
                } else if(t & 0x40){ /* copy with offset */
                    off = get_byte(&s->pb) * 3;
                    j = (t & 0x3F) + 1;
                    while(j-- && sz < 256) {
                        *pal++ = oldpal[off + 0];
                        *pal++ = oldpal[off + 1];
                        *pal++ = oldpal[off + 2];
                        sz++;
                        off += 3;
                    }
                } else { /* new entries */
                    *pal++ = smk_pal[t];
                    *pal++ = smk_pal[get_byte(&s->pb) & 0x3F];
                    *pal++ = smk_pal[get_byte(&s->pb) & 0x3F];
                    sz++;
                }
            }
            url_fseek(&s->pb, pos, 0);
            palchange |= 1;
        }
        flags >>= 1;
        smk->curstream = -1;
        /* if audio chunks are present, put them to stack and retrieve later */
        for(i = 0; i < 7; i++) {
            if(flags & 1) {
                int size;
                size = get_le32(&s->pb) - 4;
                frame_size -= size;
                frame_size -= 4;
                smk->curstream++;
                smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size);
                smk->buf_sizes[smk->curstream] = size;
                ret = get_buffer(&s->pb, smk->bufs[smk->curstream], size);
                if(ret != size)
                    return AVERROR(EIO);
                smk->stream_id[smk->curstream] = smk->indexes[i];
            }
            flags >>= 1;
        }
        if (av_new_packet(pkt, frame_size + 768))
            return AVERROR(ENOMEM);
        if(smk->frm_size[smk->cur_frame] & 1)
            palchange |= 2;
        pkt->data[0] = palchange;
        memcpy(pkt->data + 1, smk->pal, 768);
        ret = get_buffer(&s->pb, pkt->data + 769, frame_size);
        if(ret != frame_size)
            return AVERROR(EIO);
        pkt->stream_index = smk->videoindex;
        pkt->size = ret + 769;
        smk->cur_frame++;
        smk->nextpos = url_ftell(&s->pb);
    } else {
예제 #3
0
파일: aviobuf.c 프로젝트: RJVB/FFusion
int64_t url_ftell(ByteIOContext *s)
{
    return url_fseek(s, 0, SEEK_CUR);
}
예제 #4
0
static int flic_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FlicDemuxContext *flic = s->priv_data;
    ByteIOContext *pb = s->pb;
    unsigned char header[FLIC_HEADER_SIZE];
    AVStream *st, *ast;
    int speed;
    int magic_number;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    flic->frame_number = 0;

    /* load the whole header and pull out the width and height */
    if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
        return AVERROR(EIO);

    magic_number = AV_RL16(&header[4]);
    speed = AV_RL32(&header[0x10]);
    if (speed == 0)
        speed = FLIC_DEFAULT_SPEED;

    /* initialize the decoder streams */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    flic->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_FLIC;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = AV_RL16(&header[0x08]);
    st->codec->height = AV_RL16(&header[0x0A]);

    if (!st->codec->width || !st->codec->height) {
        /* Ugly hack needed for the following sample: */
        /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
        av_log(s, AV_LOG_WARNING,
               "File with no specified width/height. Trying 640x480.\n");
        st->codec->width  = 640;
        st->codec->height = 480;
    }

    /* send over the whole 128-byte FLIC header */
    st->codec->extradata_size = FLIC_HEADER_SIZE;
    st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
    memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);

    /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */
    if (get_buffer(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) {
        av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n");
        return AVERROR(EIO);
    }

    url_fseek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR);

    /* Time to figure out the framerate:
     * If the first preamble's magic number is 0xAAAA then this file is from
     * X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk
     * magic number at offset 0x10 assume this file is from Magic Carpet instead.
     * If neither of the above is true then this is a normal FLIC file.
     */
    if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) {
        /* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */
        ast = av_new_stream(s, 1);
        if (!ast)
            return AVERROR(ENOMEM);

        flic->audio_stream_index = ast->index;

        /* all audio frames are the same size, so use the size of the first chunk for block_align */
        ast->codec->block_align = AV_RL32(&preamble[0]);
        ast->codec->codec_type = CODEC_TYPE_AUDIO;
        ast->codec->codec_id = CODEC_ID_PCM_U8;
        ast->codec->codec_tag = 0;
        ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE;
        ast->codec->channels = 1;
        ast->codec->sample_fmt = SAMPLE_FMT_U8;
        ast->codec->bit_rate = st->codec->sample_rate * 8;
        ast->codec->bits_per_coded_sample = 8;
        ast->codec->channel_layout = CH_LAYOUT_MONO;
        ast->codec->extradata_size = 0;

        /* Since the header information is incorrect we have to figure out the
         * framerate using block_align and the fact that the audio is 22050 Hz.
         * We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */
        av_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE);
        av_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE);
    } else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
        av_set_pts_info(st, 64, FLIC_MC_SPEED, 70);

        /* rewind the stream since the first chunk is at offset 12 */
        url_fseek(pb, 12, SEEK_SET);

        /* send over abbreviated FLIC header chunk */
        av_free(st->codec->extradata);
        st->codec->extradata_size = 12;
        st->codec->extradata = av_malloc(12);
        memcpy(st->codec->extradata, header, 12);

    } else if (magic_number == FLIC_FILE_MAGIC_1) {
        av_set_pts_info(st, 64, speed, 70);
    } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
               (magic_number == FLIC_FILE_MAGIC_3)) {
        av_set_pts_info(st, 64, speed, 1000);
    } else {
        av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
        return AVERROR_INVALIDDATA;
    }

    return 0;
}
static int roq_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    RoqDemuxContext *roq = s->priv_data;
    ByteIOContext *pb = &s->pb;
    AVStream *st;
    unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
    int i;
    unsigned int chunk_size;
    unsigned int chunk_type;

    /* get the main header */
    if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
        RoQ_CHUNK_PREAMBLE_SIZE)
        return AVERROR(EIO);
    roq->framerate = AV_RL16(&preamble[6]);
    roq->frame_pts_inc = 90000 / roq->framerate;

    /* init private context parameters */
    roq->width = roq->height = roq->audio_channels = roq->video_pts =
    roq->audio_frame_count = 0;

    /* scan the first n chunks searching for A/V parameters */
    for (i = 0; i < RoQ_CHUNKS_TO_SCAN; i++) {
        if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
            RoQ_CHUNK_PREAMBLE_SIZE)
            return AVERROR(EIO);

        chunk_type = AV_RL16(&preamble[0]);
        chunk_size = AV_RL32(&preamble[2]);

        switch (chunk_type) {

        case RoQ_INFO:
            /* fetch the width and height; reuse the preamble bytes */
            if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
                RoQ_CHUNK_PREAMBLE_SIZE)
                return AVERROR(EIO);
            roq->width = AV_RL16(&preamble[0]);
            roq->height = AV_RL16(&preamble[2]);
            break;

        case RoQ_QUAD_CODEBOOK:
        case RoQ_QUAD_VQ:
            /* ignore during this scan */
            url_fseek(pb, chunk_size, SEEK_CUR);
            break;

        case RoQ_SOUND_MONO:
            roq->audio_channels = 1;
            url_fseek(pb, chunk_size, SEEK_CUR);
            break;

        case RoQ_SOUND_STEREO:
            roq->audio_channels = 2;
            url_fseek(pb, chunk_size, SEEK_CUR);
            break;

        default:
            av_log(s, AV_LOG_ERROR, " unknown RoQ chunk type (%04X)\n", AV_RL16(&preamble[0]));
            return AVERROR_INVALIDDATA;
            break;
        }

        /* if all necessary parameters have been gathered, exit early */
        if ((roq->width && roq->height) && roq->audio_channels)
            break;
    }

    /* seek back to the first chunk */
    url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_SET);

    /* initialize the decoders */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    /* set the pts reference (1 pts = 1/90000) */
    av_set_pts_info(st, 33, 1, 90000);
    roq->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_ROQ;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = roq->width;
    st->codec->height = roq->height;

    if (roq->audio_channels) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, 90000);
        roq->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = CODEC_ID_ROQ_DPCM;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->channels = roq->audio_channels;
        st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
        st->codec->bits_per_sample = 16;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_sample;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
    }

    return 0;
}
예제 #6
0
/* This function loads and processes a single chunk in an IP movie file.
 * It returns the type of chunk that was processed. */
static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
    AVPacket *pkt)
{
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;
    int chunk_size;
    unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
    unsigned char opcode_type;
    unsigned char opcode_version;
    int opcode_size;
    unsigned char scratch[1024];
    int i, j;
    int first_color, last_color;
    int audio_flags;
    unsigned char r, g, b;

    /* see if there are any pending packets */
    chunk_type = load_ipmovie_packet(s, pb, pkt);
    if (chunk_type != CHUNK_DONE)
        return chunk_type;

    /* read the next chunk, wherever the file happens to be pointing */
    if (url_feof(pb))
        return CHUNK_EOF;
    if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return CHUNK_BAD;
    chunk_size = AV_RL16(&chunk_preamble[0]);
    chunk_type = AV_RL16(&chunk_preamble[2]);

    debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);

    switch (chunk_type) {

    case CHUNK_INIT_AUDIO:
        debug_ipmovie("initialize audio\n");
        break;

    case CHUNK_AUDIO_ONLY:
        debug_ipmovie("audio only\n");
        break;

    case CHUNK_INIT_VIDEO:
        debug_ipmovie("initialize video\n");
        break;

    case CHUNK_VIDEO:
        debug_ipmovie("video (and audio)\n");
        break;

    case CHUNK_SHUTDOWN:
        debug_ipmovie("shutdown\n");
        break;

    case CHUNK_END:
        debug_ipmovie("end\n");
        break;

    default:
        debug_ipmovie("invalid chunk\n");
        chunk_type = CHUNK_BAD;
        break;

    }

    while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {

        /* read the next chunk, wherever the file happens to be pointing */
       if (url_feof(pb)) {
            chunk_type = CHUNK_EOF;
            break;
        }
        if (get_buffer(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
            CHUNK_PREAMBLE_SIZE) {
            chunk_type = CHUNK_BAD;
            break;
        }

        opcode_size = AV_RL16(&opcode_preamble[0]);
        opcode_type = opcode_preamble[2];
        opcode_version = opcode_preamble[3];

        chunk_size -= OPCODE_PREAMBLE_SIZE;
        chunk_size -= opcode_size;
        if (chunk_size < 0) {
            debug_ipmovie("chunk_size countdown just went negative\n");
            chunk_type = CHUNK_BAD;
            break;
        }

        debug_ipmovie("  opcode type %02X, version %d, 0x%04X bytes: ",
            opcode_type, opcode_version, opcode_size);
        switch (opcode_type) {

        case OPCODE_END_OF_STREAM:
            debug_ipmovie("end of stream\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_END_OF_CHUNK:
            debug_ipmovie("end of chunk\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_CREATE_TIMER:
            debug_ipmovie("create timer\n");
            if ((opcode_version > 0) || (opcode_size > 6)) {
                debug_ipmovie("bad create_timer opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->fps = 1000000.0 / (AV_RL32(&scratch[0]) * AV_RL16(&scratch[4]));
            s->frame_pts_inc = 90000 / s->fps;
            debug_ipmovie("  %.2f frames/second (timer div = %d, subdiv = %d)\n",
                s->fps, AV_RL32(&scratch[0]), AV_RL16(&scratch[4]));
            break;

        case OPCODE_INIT_AUDIO_BUFFERS:
            debug_ipmovie("initialize audio buffers\n");
            if ((opcode_version > 1) || (opcode_size > 10)) {
                debug_ipmovie("bad init_audio_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->audio_sample_rate = AV_RL16(&scratch[4]);
            audio_flags = AV_RL16(&scratch[2]);
            /* bit 0 of the flags: 0 = mono, 1 = stereo */
            s->audio_channels = (audio_flags & 1) + 1;
            /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
            s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
            /* bit 2 indicates compressed audio in version 1 opcode */
            if ((opcode_version == 1) && (audio_flags & 0x4))
                s->audio_type = CODEC_ID_INTERPLAY_DPCM;
            else if (s->audio_bits == 16)
                s->audio_type = CODEC_ID_PCM_S16LE;
            else
                s->audio_type = CODEC_ID_PCM_U8;
            debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n",
                s->audio_bits,
                s->audio_sample_rate,
                (s->audio_channels == 2) ? "stereo" : "mono",
                (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
                "Interplay audio" : "PCM");
            break;

        case OPCODE_START_STOP_AUDIO:
            debug_ipmovie("start/stop audio\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_INIT_VIDEO_BUFFERS:
            debug_ipmovie("initialize video buffers\n");
            if ((opcode_version > 2) || (opcode_size > 8)) {
                debug_ipmovie("bad init_video_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->video_width = AV_RL16(&scratch[0]) * 8;
            s->video_height = AV_RL16(&scratch[2]) * 8;
            debug_ipmovie("video resolution: %d x %d\n",
                s->video_width, s->video_height);
            break;

        case OPCODE_UNKNOWN_06:
        case OPCODE_UNKNOWN_0E:
        case OPCODE_UNKNOWN_10:
        case OPCODE_UNKNOWN_12:
        case OPCODE_UNKNOWN_13:
        case OPCODE_UNKNOWN_14:
        case OPCODE_UNKNOWN_15:
            debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type);
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SEND_BUFFER:
            debug_ipmovie("send buffer\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_AUDIO_FRAME:
            debug_ipmovie("audio frame\n");

            /* log position and move on for now */
            s->audio_chunk_offset = url_ftell(pb);
            s->audio_chunk_size = opcode_size;
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SILENCE_FRAME:
            debug_ipmovie("silence frame\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_INIT_VIDEO_MODE:
            debug_ipmovie("initialize video mode\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_CREATE_GRADIENT:
            debug_ipmovie("create gradient\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SET_PALETTE:
            debug_ipmovie("set palette\n");
            /* check for the logical maximum palette size
             * (3 * 256 + 4 bytes) */
            if (opcode_size > 0x304) {
                debug_ipmovie("demux_ipmovie: set_palette opcode too large\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (get_buffer(pb, scratch, opcode_size) != opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }

            /* load the palette into internal data structure */
            first_color = AV_RL16(&scratch[0]);
            last_color = first_color + AV_RL16(&scratch[2]) - 1;
            /* sanity check (since they are 16 bit values) */
            if ((first_color > 0xFF) || (last_color > 0xFF)) {
                debug_ipmovie("demux_ipmovie: set_palette indexes out of range (%d -> %d)\n",
                    first_color, last_color);
                chunk_type = CHUNK_BAD;
                break;
            }
            j = 4;  /* offset of first palette data */
            for (i = first_color; i <= last_color; i++) {
                /* the palette is stored as a 6-bit VGA palette, thus each
                 * component is shifted up to a 8-bit range */
                r = scratch[j++] * 4;
                g = scratch[j++] * 4;
                b = scratch[j++] * 4;
                s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
            }
            /* indicate a palette change */
            s->palette_control.palette_changed = 1;
            break;

        case OPCODE_SET_PALETTE_COMPRESSED:
            debug_ipmovie("set palette compressed\n");
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_SET_DECODING_MAP:
            debug_ipmovie("set decoding map\n");

            /* log position and move on for now */
            s->decode_map_chunk_offset = url_ftell(pb);
            s->decode_map_chunk_size = opcode_size;
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        case OPCODE_VIDEO_DATA:
            debug_ipmovie("set video data\n");

            /* log position and move on for now */
            s->video_chunk_offset = url_ftell(pb);
            s->video_chunk_size = opcode_size;
            url_fseek(pb, opcode_size, SEEK_CUR);
            break;

        default:
            debug_ipmovie("*** unknown opcode type\n");
            chunk_type = CHUNK_BAD;
            break;

        }
    }

    /* make a note of where the stream is sitting */
    s->next_chunk_offset = url_ftell(pb);

    /* dispatch the first of any pending packets */
    if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
        chunk_type = load_ipmovie_packet(s, pb, pkt);

    return chunk_type;
}
예제 #7
0
/**
 * Write an RTP hint (that may contain one or more RTP packets)
 * for the packets in data. data contains one or more packets with a
 * BE32 size header.
 *
 * @param out buffer where the hints are written
 * @param data buffer containing RTP packets
 * @param size the size of the data buffer
 * @param trk the MOVTrack for the hint track
 * @param pts pointer where the timestamp for the written RTP hint is stored
 * @return the number of RTP packets in the written hint
 */
static int write_hint_packets(ByteIOContext *out, const uint8_t *data,
                              int size, MOVTrack *trk, int64_t *pts)
{
    int64_t curpos;
    int64_t count_pos, entries_pos;
    int count = 0, entries;

    count_pos = url_ftell(out);
    /* RTPsample header */
    put_be16(out, 0); /* packet count */
    put_be16(out, 0); /* reserved */

    while (size > 4) {
        uint32_t packet_len = AV_RB32(data);
        uint16_t seq;
        uint32_t ts;

        data += 4;
        size -= 4;
        if (packet_len > size || packet_len <= 12)
            break;
        if (data[1] >= 200 && data[1] <= 204) {
            /* RTCP packet, just skip */
            data += packet_len;
            size -= packet_len;
            continue;
        }

        if (packet_len > trk->max_packet_size)
            trk->max_packet_size = packet_len;

        seq = AV_RB16(&data[2]);
        ts = AV_RB32(&data[4]);

        if (trk->prev_rtp_ts == 0)
            trk->prev_rtp_ts = ts;
        /* Unwrap the 32-bit RTP timestamp that wraps around often
         * into a not (as often) wrapping 64-bit timestamp. */
        trk->cur_rtp_ts_unwrapped += (int32_t) (ts - trk->prev_rtp_ts);
        trk->prev_rtp_ts = ts;
        if (*pts == AV_NOPTS_VALUE)
            *pts = trk->cur_rtp_ts_unwrapped;

        count++;
        /* RTPpacket header */
        put_be32(out, 0); /* relative_time */
        put_buffer(out, data, 2); /* RTP header */
        put_be16(out, seq); /* RTPsequenceseed */
        put_be16(out, 0); /* reserved + flags */
        entries_pos = url_ftell(out);
        put_be16(out, 0); /* entry count */

        data += 12;
        size -= 12;
        packet_len -= 12;

        entries = 0;
        /* Write one or more constructors describing the payload data */
        describe_payload(data, packet_len, out, &entries, &trk->sample_queue);
        data += packet_len;
        size -= packet_len;

        curpos = url_ftell(out);
        url_fseek(out, entries_pos, SEEK_SET);
        put_be16(out, entries);
        url_fseek(out, curpos, SEEK_SET);
    }

    curpos = url_ftell(out);
    url_fseek(out, count_pos, SEEK_SET);
    put_be16(out, count);
    url_fseek(out, curpos, SEEK_SET);
    return count;
}
예제 #8
0
파일: 4xm.c 프로젝트: apakian/rtmp-cpp
static int fourxm_read_packet(AVFormatContext *s,
                              AVPacket *pkt)
{
    FourxmDemuxContext *fourxm = s->priv_data;
    ByteIOContext *pb = s->pb;
    unsigned int fourcc_tag;
    unsigned int size, out_size;
    int ret = 0;
    int track_number;
    int packet_read = 0;
    unsigned char header[8];
    int audio_frame_count;

    while (!packet_read) {

        if ((ret = get_buffer(s->pb, header, 8)) < 0)
            return ret;
        fourcc_tag = AV_RL32(&header[0]);
        size = AV_RL32(&header[4]);
        if (url_feof(pb))
            return AVERROR(EIO);
        switch (fourcc_tag) {

        case LIST_TAG:
            /* this is a good time to bump the video pts */
            fourxm->video_pts ++;

            /* skip the LIST-* tag and move on to the next fourcc */
            get_le32(pb);
            break;

        case ifrm_TAG:
        case pfrm_TAG:
        case cfrm_TAG:
        case ifr2_TAG:
        case pfr2_TAG:
        case cfr2_TAG:
        {

            /* allocate 8 more bytes than 'size' to account for fourcc
             * and size */
            if (size + 8 < size || av_new_packet(pkt, size + 8))
                return AVERROR(EIO);
            pkt->stream_index = fourxm->video_stream_index;
            pkt->pts = fourxm->video_pts;
            pkt->pos = url_ftell(s->pb);
            memcpy(pkt->data, header, 8);
            ret = get_buffer(s->pb, &pkt->data[8], size);

            if (ret < 0)
                av_free_packet(pkt);
            else
                packet_read = 1;
            break;
        }

        case snd__TAG:
            track_number = get_le32(pb);
            out_size= get_le32(pb);
            size-=8;

            if (track_number == fourxm->selected_track) {
                ret= av_get_packet(s->pb, pkt, size);
                if(ret<0)
                    return AVERROR(EIO);
                pkt->stream_index =
                    fourxm->tracks[fourxm->selected_track].stream_index;
                pkt->pts = fourxm->audio_pts;
                packet_read = 1;

                /* pts accounting */
                audio_frame_count = size;
                if (fourxm->tracks[fourxm->selected_track].adpcm)
                    audio_frame_count -=
                        2 * (fourxm->tracks[fourxm->selected_track].channels);
                audio_frame_count /=
                      fourxm->tracks[fourxm->selected_track].channels;
                if (fourxm->tracks[fourxm->selected_track].adpcm)
                    audio_frame_count *= 2;
                else
                    audio_frame_count /=
                    (fourxm->tracks[fourxm->selected_track].bits / 8);
                fourxm->audio_pts += audio_frame_count;

            } else {
                url_fseek(pb, size, SEEK_CUR);
            }
            break;

        default:
            url_fseek(pb, size, SEEK_CUR);
            break;
        }
    }
    return ret;
}
예제 #9
0
파일: 4xm.c 프로젝트: apakian/rtmp-cpp
static int fourxm_read_header(AVFormatContext *s,
                              AVFormatParameters *ap)
{
    ByteIOContext *pb = s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int header_size;
    FourxmDemuxContext *fourxm = s->priv_data;
    unsigned char *header;
    int i;
    int current_track = -1;
    AVStream *st;

    fourxm->track_count = 0;
    fourxm->tracks = NULL;
    fourxm->selected_track = 0;
    fourxm->fps = 1.0;

    /* skip the first 3 32-bit numbers */
    url_fseek(pb, 12, SEEK_CUR);

    /* check for LIST-HEAD */
    GET_LIST_HEADER();
    header_size = size - 4;
    if (fourcc_tag != HEAD_TAG)
        return AVERROR_INVALIDDATA;

    /* allocate space for the header and load the whole thing */
    header = av_malloc(header_size);
    if (!header)
        return AVERROR(ENOMEM);
    if (get_buffer(pb, header, header_size) != header_size)
        return AVERROR(EIO);

    /* take the lazy approach and search for any and all vtrk and strk chunks */
    for (i = 0; i < header_size - 8; i++) {
        fourcc_tag = AV_RL32(&header[i]);
        size = AV_RL32(&header[i + 4]);

        if (fourcc_tag == std__TAG) {
            fourxm->fps = av_int2flt(AV_RL32(&header[i + 12]));
        } else if (fourcc_tag == vtrk_TAG) {
            /* check that there is enough data */
            if (size != vtrk_SIZE) {
                av_free(header);
                return AVERROR_INVALIDDATA;
            }
            fourxm->width = AV_RL32(&header[i + 36]);
            fourxm->height = AV_RL32(&header[i + 40]);

            /* allocate a new AVStream */
            st = av_new_stream(s, 0);
            if (!st)
                return AVERROR(ENOMEM);
            av_set_pts_info(st, 60, 1, fourxm->fps);

            fourxm->video_stream_index = st->index;

            st->codec->codec_type = CODEC_TYPE_VIDEO;
            st->codec->codec_id = CODEC_ID_4XM;
            st->codec->extradata_size = 4;
            st->codec->extradata = av_malloc(4);
            AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16]));
            st->codec->width = fourxm->width;
            st->codec->height = fourxm->height;

            i += 8 + size;
        } else if (fourcc_tag == strk_TAG) {
            /* check that there is enough data */
            if (size != strk_SIZE) {
                av_free(header);
                return AVERROR_INVALIDDATA;
            }
            current_track = AV_RL32(&header[i + 8]);
            if (current_track + 1 > fourxm->track_count) {
                fourxm->track_count = current_track + 1;
                if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack))
                    return -1;
                fourxm->tracks = av_realloc(fourxm->tracks,
                    fourxm->track_count * sizeof(AudioTrack));
                if (!fourxm->tracks) {
                    av_free(header);
                    return AVERROR(ENOMEM);
                }
            }
            fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]);
            fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]);
            fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]);
            fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]);
            i += 8 + size;

            /* allocate a new AVStream */
            st = av_new_stream(s, current_track);
            if (!st)
                return AVERROR(ENOMEM);

            av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);

            fourxm->tracks[current_track].stream_index = st->index;

            st->codec->codec_type = CODEC_TYPE_AUDIO;
            st->codec->codec_tag = 0;
            st->codec->channels = fourxm->tracks[current_track].channels;
            st->codec->sample_rate = fourxm->tracks[current_track].sample_rate;
            st->codec->bits_per_sample = fourxm->tracks[current_track].bits;
            st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
                st->codec->bits_per_sample;
            st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
            if (fourxm->tracks[current_track].adpcm)
                st->codec->codec_id = CODEC_ID_ADPCM_4XM;
            else if (st->codec->bits_per_sample == 8)
                st->codec->codec_id = CODEC_ID_PCM_U8;
            else
                st->codec->codec_id = CODEC_ID_PCM_S16LE;
        }
    }

    av_free(header);

    /* skip over the LIST-MOVI chunk (which is where the stream should be */
    GET_LIST_HEADER();
    if (fourcc_tag != MOVI_TAG)
        return AVERROR_INVALIDDATA;

    /* initialize context members */
    fourxm->video_pts = -1;  /* first frame will push to 0 */
    fourxm->audio_pts = 0;

    return 0;
}
예제 #10
0
static int vmd_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    VmdDemuxContext *vmd = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st = NULL, *vst;
    unsigned int toc_offset;
    unsigned char *raw_frame_table;
    int raw_frame_table_size;
    offset_t current_offset;
    int i, j;
    unsigned int total_frames;
    int64_t pts_inc = 1;
    int64_t current_video_pts = 0, current_audio_pts = 0;
    unsigned char chunk[BYTES_PER_FRAME_RECORD];
    int num, den;
    int sound_buffers;

    /* fetch the main header, including the 2 header length bytes */
    url_fseek(pb, 0, SEEK_SET);
    if (get_buffer(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE)
        return AVERROR(EIO);

    /* start up the decoders */
    vst = av_new_stream(s, 0);
    if (!vst)
        return AVERROR(ENOMEM);
    av_set_pts_info(vst, 33, 1, 10);
    vmd->video_stream_index = vst->index;
    vst->codec->codec_type = CODEC_TYPE_VIDEO;
    vst->codec->codec_id = CODEC_ID_VMDVIDEO;
    vst->codec->codec_tag = 0;  /* no fourcc */
    vst->codec->width = AV_RL16(&vmd->vmd_header[12]);
    vst->codec->height = AV_RL16(&vmd->vmd_header[14]);
    vst->codec->extradata_size = VMD_HEADER_SIZE;
    vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
    memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);

    /* if sample rate is 0, assume no audio */
    vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]);
    if (vmd->sample_rate) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        vmd->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = CODEC_ID_VMDAUDIO;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
        st->codec->sample_rate = vmd->sample_rate;
        st->codec->block_align = AV_RL16(&vmd->vmd_header[806]);
        if (st->codec->block_align & 0x8000) {
            st->codec->bits_per_sample = 16;
            st->codec->block_align = -(st->codec->block_align - 0x10000);
        } else {
            st->codec->bits_per_sample = 8;
        }
        st->codec->bit_rate = st->codec->sample_rate *
            st->codec->bits_per_sample * st->codec->channels;

        /* calculate pts */
        num = st->codec->block_align;
        den = st->codec->sample_rate * st->codec->channels;
        av_reduce(&den, &num, den, num, (1UL<<31)-1);
        av_set_pts_info(vst, 33, num, den);
        av_set_pts_info(st, 33, num, den);
        pts_inc = num;
    }

    toc_offset = AV_RL32(&vmd->vmd_header[812]);
    vmd->frame_count = AV_RL16(&vmd->vmd_header[6]);
    vmd->frames_per_block = AV_RL16(&vmd->vmd_header[18]);
    url_fseek(pb, toc_offset, SEEK_SET);

    raw_frame_table = NULL;
    vmd->frame_table = NULL;
    sound_buffers = AV_RL16(&vmd->vmd_header[808]);
    raw_frame_table_size = vmd->frame_count * 6;
    if(vmd->frame_count * vmd->frames_per_block  >= UINT_MAX / sizeof(vmd_frame_t)){
        av_log(s, AV_LOG_ERROR, "vmd->frame_count * vmd->frames_per_block too large\n");
        return -1;
    }
    raw_frame_table = av_malloc(raw_frame_table_size);
    vmd->frame_table = av_malloc((vmd->frame_count * vmd->frames_per_block + sound_buffers) * sizeof(vmd_frame_t));
    if (!raw_frame_table || !vmd->frame_table) {
        av_free(raw_frame_table);
        av_free(vmd->frame_table);
        return AVERROR(ENOMEM);
    }
    if (get_buffer(pb, raw_frame_table, raw_frame_table_size) !=
        raw_frame_table_size) {
        av_free(raw_frame_table);
        av_free(vmd->frame_table);
        return AVERROR(EIO);
    }

    total_frames = 0;
    for (i = 0; i < vmd->frame_count; i++) {

        current_offset = AV_RL32(&raw_frame_table[6 * i + 2]);

        /* handle each entry in index block */
        for (j = 0; j < vmd->frames_per_block; j++) {
            int type;
            uint32_t size;

            get_buffer(pb, chunk, BYTES_PER_FRAME_RECORD);
            type = chunk[0];
            size = AV_RL32(&chunk[2]);
            if(!size)
                continue;
            switch(type) {
            case 1: /* Audio Chunk */
                if (!st) break;
                /* first audio chunk contains several audio buffers */
                if(current_audio_pts){
                    vmd->frame_table[total_frames].frame_offset = current_offset;
                    vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
                    vmd->frame_table[total_frames].frame_size = size;
                    memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
                    vmd->frame_table[total_frames].pts = current_audio_pts;
                    total_frames++;
                    current_audio_pts += pts_inc;
                }else{
                    uint32_t flags;
                    int k;
                    int noff;
                    int64_t pos;

                    pos = url_ftell(pb);
                    url_fseek(pb, current_offset, SEEK_SET);
                    flags = get_le32(pb);
                    noff = 4;
                    url_fseek(pb, pos, SEEK_SET);
                    av_log(s, AV_LOG_DEBUG, "Sound mapping = %08X (%i bufs)\n", flags, sound_buffers);
                    for(k = 0; k < sound_buffers - 1; k++){
                        if(flags & 1) { /* silent block */
                            vmd->frame_table[total_frames].frame_size = 0;
                        }else{
                            vmd->frame_table[total_frames].frame_size = st->codec->block_align + (st->codec->block_align & 1);
                        }
                        noff += vmd->frame_table[total_frames].frame_size;
                        vmd->frame_table[total_frames].frame_offset = current_offset + noff;
                        vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
                        memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
                        vmd->frame_table[total_frames].pts = current_audio_pts;
                        total_frames++;
                        current_audio_pts += pts_inc;
                        flags >>= 1;
                    }
                }
                break;
            case 2: /* Video Chunk */
                vmd->frame_table[total_frames].frame_offset = current_offset;
                vmd->frame_table[total_frames].stream_index = vmd->video_stream_index;
                vmd->frame_table[total_frames].frame_size = size;
                memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
                vmd->frame_table[total_frames].pts = current_video_pts;
                total_frames++;
                break;
            }
            current_offset += size;
        }
        current_video_pts += pts_inc;
    }

    av_free(raw_frame_table);

    vmd->current_frame = 0;
    vmd->frame_count = total_frames;

    return 0;
}
예제 #11
0
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
{
    ASFContext *asf = s->priv_data;
    ByteIOContext *pb = &s->pb;
    int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
    int has_title;
    AVCodecContext *enc;
    int64_t header_offset, cur_pos, hpos;
    int bit_rate;
    int64_t duration;

    duration = asf->duration + preroll_time * 10000;
    has_title = (s->title[0] || s->author[0] || s->copyright[0] || s->comment[0]);

    bit_rate = 0;
    for(n=0;n<s->nb_streams;n++) {
        enc = s->streams[n]->codec;

        av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */

        bit_rate += enc->bit_rate;
    }

    if (asf->is_streamed) {
        put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
    }

    put_guid(pb, &asf_header);
    put_le64(pb, -1); /* header length, will be patched after */
    put_le32(pb, 3 + has_title + s->nb_streams); /* number of chunks in header */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 2); /* ??? */

    /* file header */
    header_offset = url_ftell(pb);
    hpos = put_header(pb, &file_header);
    put_guid(pb, &my_guid);
    put_le64(pb, file_size);
    file_time = 0;
    put_le64(pb, unix_to_file_time(file_time));
    put_le64(pb, asf->nb_packets); /* number of packets */
    put_le64(pb, duration); /* end time stamp (in 100ns units) */
    put_le64(pb, duration); /* duration (in 100ns units) */
    put_le32(pb, preroll_time); /* start time stamp */
    put_le32(pb, 0); /* ??? */
    put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */
    put_le32(pb, asf->packet_size); /* packet size */
    put_le32(pb, asf->packet_size); /* packet size */
    put_le32(pb, bit_rate); /* Nominal data rate in bps */
    end_header(pb, hpos);

    /* unknown headers */
    hpos = put_header(pb, &head1_guid);
    put_guid(pb, &head2_guid);
    put_le32(pb, 6);
    put_le16(pb, 0);
    end_header(pb, hpos);

    /* title and other infos */
    if (has_title) {
        hpos = put_header(pb, &comment_header);
        if ( s->title[0]     ) { put_le16(pb, 2 * (strlen(s->title    ) + 1)); } else { put_le16(pb, 0); }
        if ( s->author[0]    ) { put_le16(pb, 2 * (strlen(s->author   ) + 1)); } else { put_le16(pb, 0); }
        if ( s->copyright[0] ) { put_le16(pb, 2 * (strlen(s->copyright) + 1)); } else { put_le16(pb, 0); }
        if ( s->comment[0]   ) { put_le16(pb, 2 * (strlen(s->comment  ) + 1)); } else { put_le16(pb, 0); }
        put_le16(pb, 0);
        if ( s->title[0]     ) put_str16_nolen(pb, s->title);
        if ( s->author[0]    ) put_str16_nolen(pb, s->author);
        if ( s->copyright[0] ) put_str16_nolen(pb, s->copyright);
        if ( s->comment[0]   ) put_str16_nolen(pb, s->comment);
        end_header(pb, hpos);
    }

    /* stream headers */
    for(n=0;n<s->nb_streams;n++) {
        int64_t es_pos;
        const uint8_t *er_spr = NULL;
        int er_spr_len = 0;
        //        ASFStream *stream = &asf->streams[n];

        enc = s->streams[n]->codec;
        asf->streams[n].num = n + 1;
        asf->streams[n].seq = 0;


        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            if (enc->codec_id == CODEC_ID_ADPCM_G726) {
                er_spr     = error_spread_ADPCM_G726;
                er_spr_len = sizeof(error_spread_ADPCM_G726);
            }
        }

        switch(enc->codec_type) {
        case CODEC_TYPE_AUDIO:
            wav_extra_size = 0;
            extra_size = 18 + wav_extra_size;
            extra_size2 = er_spr_len;
            break;
        default:
        case CODEC_TYPE_VIDEO:
            wav_extra_size = enc->extradata_size;
            extra_size = 0x33 + wav_extra_size;
            extra_size2 = 0;
            break;
        }

        hpos = put_header(pb, &stream_header);
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            put_guid(pb, &audio_stream);
            if ((er_spr != NULL) && (er_spr_len != 0)) {
                put_guid(pb, &audio_conceal_spread);
            } else {
                put_guid(pb, &video_conceal_none);
            }
        } else {
            put_guid(pb, &video_stream);
            put_guid(pb, &video_conceal_none);
        }
        put_le64(pb, 0); /* ??? */
        es_pos = url_ftell(pb);
        put_le32(pb, extra_size); /* wav header len */
        put_le32(pb, extra_size2); /* additional data len */
        put_le16(pb, n + 1); /* stream number */
        put_le32(pb, 0); /* ??? */

        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            /* WAVEFORMATEX header */
            int wavsize = put_wav_header(pb, enc);
            if ((enc->codec_id != CODEC_ID_MP3) && (enc->codec_id != CODEC_ID_MP2) && (enc->codec_id != CODEC_ID_ADPCM_IMA_WAV) && (enc->extradata_size==0)) {
                wavsize += 2;
                put_le16(pb, 0);
            }

            if (wavsize < 0)
                return -1;
            if (wavsize != extra_size) {
                cur_pos = url_ftell(pb);
                url_fseek(pb, es_pos, SEEK_SET);
                put_le32(pb, wavsize); /* wav header len */
                url_fseek(pb, cur_pos, SEEK_SET);
            }
            /* ERROR Correction */
            if ((er_spr != NULL) && (er_spr_len != 0))
                put_buffer(pb, er_spr, er_spr_len);
        } else {
            put_le32(pb, enc->width);
            put_le32(pb, enc->height);
            put_byte(pb, 2); /* ??? */
            put_le16(pb, 40 + enc->extradata_size); /* size */

            /* BITMAPINFOHEADER header */
            put_bmp_header(pb, enc, codec_bmp_tags, 1);
        }
        end_header(pb, hpos);
    }

    /* media comments */

    hpos = put_header(pb, &codec_comment_header);
    put_guid(pb, &codec_comment1_header);
    put_le32(pb, s->nb_streams);
    for(n=0;n<s->nb_streams;n++) {
        AVCodec *p;

        enc = s->streams[n]->codec;
        p = avcodec_find_encoder(enc->codec_id);

        put_le16(pb, asf->streams[n].num);
        put_str16(pb, p ? p->name : enc->codec_name);
        put_le16(pb, 0); /* no parameters */


        /* id */
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            put_le16(pb, 2);
            if(!enc->codec_tag)
                enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
            if(!enc->codec_tag)
                return -1;
            put_le16(pb, enc->codec_tag);
        } else {
            put_le16(pb, 4);
            if(!enc->codec_tag)
                enc->codec_tag = codec_get_tag(codec_bmp_tags, enc->codec_id);
            if(!enc->codec_tag)
                return -1;
            put_le32(pb, enc->codec_tag);
        }
    }
    end_header(pb, hpos);

    /* patch the header size fields */

    cur_pos = url_ftell(pb);
    header_size = cur_pos - header_offset;
    if (asf->is_streamed) {
        header_size += 8 + 30 + 50;

        url_fseek(pb, header_offset - 10 - 30, SEEK_SET);
        put_le16(pb, header_size);
        url_fseek(pb, header_offset - 2 - 30, SEEK_SET);
        put_le16(pb, header_size);

        header_size -= 8 + 30 + 50;
    }
    header_size += 24 + 6;
    url_fseek(pb, header_offset - 14, SEEK_SET);
    put_le64(pb, header_size);
    url_fseek(pb, cur_pos, SEEK_SET);

    /* movie chunk, followed by packets of packet_size */
    asf->data_offset = cur_pos;
    put_guid(pb, &data_header);
    put_le64(pb, data_chunk_size);
    put_guid(pb, &my_guid);
    put_le64(pb, asf->nb_packets); /* nb packets */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 1); /* ??? */
    return 0;
}
예제 #12
0
static int flv_write_header(AVFormatContext *s)
{
    ByteIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    AVCodecContext *audio_enc = NULL, *video_enc = NULL;
    int i;
    double framerate = 0.0;
    int metadata_size_pos, data_size;

    for(i=0; i<s->nb_streams; i++){
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) {
                framerate = av_q2d(s->streams[i]->r_frame_rate);
            } else {
                framerate = 1/av_q2d(s->streams[i]->codec->time_base);
            }
            video_enc = enc;
            if(enc->codec_tag == 0) {
                av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n");
                return -1;
            }
        } else {
            audio_enc = enc;
            if(get_audio_flags(enc)<0)
                return -1;
        }
        av_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */
    }
    put_tag(pb,"FLV");
    put_byte(pb,1);
    put_byte(pb,   FLV_HEADER_FLAG_HASAUDIO * !!audio_enc
                 + FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
    put_be32(pb,9);
    put_be32(pb,0);

    for(i=0; i<s->nb_streams; i++){
        if(s->streams[i]->codec->codec_tag == 5){
            put_byte(pb,8); // message type
            put_be24(pb,0); // include flags
            put_be24(pb,0); // time stamp
            put_be32(pb,0); // reserved
            put_be32(pb,11); // size
            flv->reserved=5;
        }
    }

    /* write meta_tag */
    put_byte(pb, 18);         // tag type META
    metadata_size_pos= url_ftell(pb);
    put_be24(pb, 0);          // size of data part (sum of all parts below)
    put_be24(pb, 0);          // time stamp
    put_be32(pb, 0);          // reserved

    /* now data of data_size size */

    /* first event name as a string */
    put_byte(pb, AMF_DATA_TYPE_STRING);
    put_amf_string(pb, "onMetaData"); // 12 bytes

    /* mixed array (hash) with size and string/type/data tuples */
    put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY);
    put_be32(pb, 5*!!video_enc + 5*!!audio_enc + 2); // +2 for duration and file size

    put_amf_string(pb, "duration");
    flv->duration_offset= url_ftell(pb);
    put_amf_double(pb, s->duration / AV_TIME_BASE); // fill in the guessed duration, it'll be corrected later if incorrect

    if(video_enc){
        put_amf_string(pb, "width");
        put_amf_double(pb, video_enc->width);

        put_amf_string(pb, "height");
        put_amf_double(pb, video_enc->height);

        put_amf_string(pb, "videodatarate");
        put_amf_double(pb, video_enc->bit_rate / 1024.0);

        put_amf_string(pb, "framerate");
        put_amf_double(pb, framerate);

        put_amf_string(pb, "videocodecid");
        put_amf_double(pb, video_enc->codec_tag);
    }

    if(audio_enc){
        put_amf_string(pb, "audiodatarate");
        put_amf_double(pb, audio_enc->bit_rate / 1024.0);

        put_amf_string(pb, "audiosamplerate");
        put_amf_double(pb, audio_enc->sample_rate);

        put_amf_string(pb, "audiosamplesize");
        put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_U8 ? 8 : 16);

        put_amf_string(pb, "stereo");
        put_amf_bool(pb, audio_enc->channels == 2);

        put_amf_string(pb, "audiocodecid");
        put_amf_double(pb, audio_enc->codec_tag);
    }

    put_amf_string(pb, "filesize");
    flv->filesize_offset= url_ftell(pb);
    put_amf_double(pb, 0); // delayed write

    put_amf_string(pb, "");
    put_byte(pb, AMF_END_OF_OBJECT);

    /* write total size of tag */
    data_size= url_ftell(pb) - metadata_size_pos - 10;
    url_fseek(pb, metadata_size_pos, SEEK_SET);
    put_be24(pb, data_size);
    url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
    put_be32(pb, data_size + 11);

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) {
            int64_t pos;
            put_byte(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
                     FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
            put_be24(pb, 0); // size patched later
            put_be24(pb, 0); // ts
            put_byte(pb, 0); // ts ext
            put_be24(pb, 0); // streamid
            pos = url_ftell(pb);
            if (enc->codec_id == CODEC_ID_AAC) {
                put_byte(pb, get_audio_flags(enc));
                put_byte(pb, 0); // AAC sequence header
                put_buffer(pb, enc->extradata, enc->extradata_size);
            } else {
                put_byte(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
                put_byte(pb, 0); // AVC sequence header
                put_be24(pb, 0); // composition time
                ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
            }
            data_size = url_ftell(pb) - pos;
            url_fseek(pb, -data_size - 10, SEEK_CUR);
            put_be24(pb, data_size);
            url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
            put_be32(pb, data_size + 11); // previous tag size
        }
    }

    return 0;
}
예제 #13
0
static int read_header(AVFormatContext *s,
                       AVFormatParameters *ap)
{
    ByteIOContext *pb = s->pb;
    CaffContext *caf  = s->priv_data;
    AVStream *st;
    uint32_t tag = 0;
    int found_data, ret;
    int64_t size;

    url_fskip(pb, 8); /* magic, version, file flags */

    /* audio description chunk */
    if (get_be32(pb) != MKBETAG('d','e','s','c')) {
        av_log(s, AV_LOG_ERROR, "desc chunk not present\n");
        return AVERROR_INVALIDDATA;
    }
    size = get_be64(pb);
    if (size != 32)
        return AVERROR_INVALIDDATA;

    ret = read_desc_chunk(s);
    if (ret)
        return ret;
    st = s->streams[0];

    /* parse each chunk */
    found_data = 0;
    while (!url_feof(pb)) {

        /* stop at data chunk if seeking is not supported or
           data chunk size is unknown */
        if (found_data && (caf->data_size < 0 || url_is_streamed(pb)))
            break;

        tag  = get_be32(pb);
        size = get_be64(pb);
        if (url_feof(pb))
            break;

        switch (tag) {
        case MKBETAG('d','a','t','a'):
            url_fskip(pb, 4); /* edit count */
            caf->data_start = url_ftell(pb);
            caf->data_size  = size < 0 ? -1 : size - 4;
            if (caf->data_size > 0 && !url_is_streamed(pb))
                url_fskip(pb, caf->data_size);
            found_data = 1;
            break;

        /* magic cookie chunk */
        case MKBETAG('k','u','k','i'):
            if (read_kuki_chunk(s, size))
                return AVERROR_INVALIDDATA;
            break;

        /* packet table chunk */
        case MKBETAG('p','a','k','t'):
            if (read_pakt_chunk(s, size))
                return AVERROR_INVALIDDATA;
            break;

        case MKBETAG('i','n','f','o'):
            read_info_chunk(s, size);
            break;

        default:
#define _(x) ((x) >= ' ' ? (x) : ' ')
            av_log(s, AV_LOG_WARNING, "skipping CAF chunk: %08X (%c%c%c%c)\n",
                tag, _(tag>>24), _((tag>>16)&0xFF), _((tag>>8)&0xFF), _(tag&0xFF));
#undef _
        case MKBETAG('f','r','e','e'):
            if (size < 0)
                return AVERROR_INVALIDDATA;
            url_fskip(pb, size);
            break;
        }
    }

    if (!found_data)
        return AVERROR_INVALIDDATA;

    if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) {
        if (caf->data_size > 0)
            st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet;
    } else if (st->nb_index_entries) {
        st->codec->bit_rate = st->codec->sample_rate * caf->data_size * 8 /
                              st->duration;
    } else {
        av_log(s, AV_LOG_ERROR, "Missing packet table. It is required when "
                                "block size or frame size are variable.\n");
        return AVERROR_INVALIDDATA;
    }
    s->file_size = url_fsize(pb);
    s->file_size = FFMAX(0, s->file_size);

    av_set_pts_info(st, 64, 1, st->codec->sample_rate);
    st->start_time = 0;

    /* position the stream at the start of data */
    if (caf->data_size >= 0)
        url_fseek(pb, caf->data_start, SEEK_SET);

    return 0;
}
예제 #14
0
static int iff_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    IffDemuxContext *iff = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    uint32_t chunk_id, data_size;
    int padding, done = 0;
    int compression = -1;
    char *buf;

    st = av_new_stream(s, 0);
    if (!st)
      return AVERROR(ENOMEM);

    st->codec->channels = 1;
    url_fskip(pb, 8);
    // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content
    st->codec->codec_tag = get_le32(pb);

    while(!done && !url_feof(pb)) {
        chunk_id = get_le32(pb);
        data_size = get_be32(pb);
        padding = data_size & 1;

        switch(chunk_id) {
        case ID_VHDR:
            st->codec->codec_type = CODEC_TYPE_AUDIO;
            url_fskip(pb, 12);
            st->codec->sample_rate = get_be16(pb);
            url_fskip(pb, 1);
            compression            = get_byte(pb);
            url_fskip(pb, 4);
            break;

        case ID_BODY:
            iff->body_size = data_size;
            done = 1;
            break;

        case ID_CHAN:
            st->codec->channels = (get_be32(pb) < 6) ? 1 : 2;
            break;

        case ID_CMAP:
            st->codec->extradata_size = data_size;
            st->codec->extradata      = av_malloc(data_size);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            if (get_buffer(pb, st->codec->extradata, data_size) < 0)
                return AVERROR(EIO);
            break;

        case ID_BMHD:
            st->codec->codec_type            = CODEC_TYPE_VIDEO;
            st->codec->width                 = get_be16(pb);
            st->codec->height                = get_be16(pb);
            url_fskip(pb, 4); // x, y offset
            st->codec->bits_per_coded_sample = get_byte(pb);
            url_fskip(pb, 1); // masking
            compression                      = get_byte(pb);
            url_fskip(pb, 3); // paddding, transparent
            st->sample_aspect_ratio.num      = get_byte(pb);
            st->sample_aspect_ratio.den      = get_byte(pb);
            url_fskip(pb, 4); // source page width, height
            break;

        case ID_ANNO:
            buf = av_malloc(data_size + 1);
            if (!buf)
                break;
            get_buffer(pb, buf, data_size);
            buf[data_size] = 0;
            av_metadata_set2(&s->metadata, "comment", buf, AV_METADATA_DONT_STRDUP_VAL);
            break;

        default:
            url_fseek(pb, data_size + padding, SEEK_CUR);
            break;
        }
    }

    switch(st->codec->codec_type) {
    case CODEC_TYPE_AUDIO:
    av_set_pts_info(st, 32, 1, st->codec->sample_rate);

    switch(compression) {
    case COMP_NONE:
        st->codec->codec_id = CODEC_ID_PCM_S8;
        break;
    case COMP_FIB:
        st->codec->codec_id = CODEC_ID_8SVX_FIB;
        break;
    case COMP_EXP:
        st->codec->codec_id = CODEC_ID_8SVX_EXP;
        break;
    default:
        av_log(s, AV_LOG_ERROR, "iff: unknown compression method\n");
        return -1;
    }

    st->codec->bits_per_coded_sample = 8;
    st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample;
    st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
    break;

    case CODEC_TYPE_VIDEO:
        switch (compression) {
        case BITMAP_RAW:
            if (st->codec->codec_tag == ID_ILBM) {
                st->codec->codec_id = CODEC_ID_IFF_ILBM;
            } else {
                st->codec->codec_id = CODEC_ID_RAWVIDEO;
                st->codec->pix_fmt  = PIX_FMT_PAL8;
                st->codec->codec_tag = 0;
            }
            break;
        case BITMAP_BYTERUN1:
            st->codec->codec_id = CODEC_ID_IFF_BYTERUN1;
            break;
        default:
            av_log(s, AV_LOG_ERROR, "unknown compression method\n");
            return AVERROR_INVALIDDATA;
        }
        break;
    default:
        return -1;
    }

    return 0;
}
예제 #15
0
파일: thp.c 프로젝트: WangCrystal/FFplayer
static int thp_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    ThpDemuxContext *thp = s->priv_data;
    AVStream *st;
    ByteIOContext *pb = s->pb;
    int i;

    /* Read the file header.  */
                           get_be32(pb); /* Skip Magic.  */
    thp->version         = get_be32(pb);

                           get_be32(pb); /* Max buf size.  */
                           get_be32(pb); /* Max samples.  */

    thp->fps             = av_d2q(av_int2flt(get_be32(pb)), INT_MAX);
    thp->framecnt        = get_be32(pb);
    thp->first_framesz   = get_be32(pb);
                           get_be32(pb); /* Data size.  */

    thp->compoff         = get_be32(pb);
                           get_be32(pb); /* offsetDataOffset.  */
    thp->first_frame     = get_be32(pb);
    thp->last_frame      = get_be32(pb);

    thp->next_framesz    = thp->first_framesz;
    thp->next_frame      = thp->first_frame;

    /* Read the component structure.  */
    url_fseek (pb, thp->compoff, SEEK_SET);
    thp->compcount       = get_be32(pb);

    /* Read the list of component types.  */
    get_buffer(pb, thp->components, 16);

    for (i = 0; i < thp->compcount; i++) {
        if (thp->components[i] == 0) {
            if (thp->vst != 0)
                break;

            /* Video component.  */
            st = av_new_stream(s, 0);
            if (!st)
                return AVERROR(ENOMEM);

            /* The denominator and numerator are switched because 1/fps
               is required.  */
            av_set_pts_info(st, 64, thp->fps.den, thp->fps.num);
            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
            st->codec->codec_id = CODEC_ID_THP;
            st->codec->codec_tag = 0;  /* no fourcc */
            st->codec->width = get_be32(pb);
            st->codec->height = get_be32(pb);
            st->codec->sample_rate = av_q2d(thp->fps);
            thp->vst = st;
            thp->video_stream_index = st->index;

            if (thp->version == 0x11000)
                get_be32(pb); /* Unknown.  */
        } else if (thp->components[i] == 1) {
            if (thp->has_audio != 0)
                break;

            /* Audio component.  */
            st = av_new_stream(s, 0);
            if (!st)
                return AVERROR(ENOMEM);

            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            st->codec->codec_id = CODEC_ID_ADPCM_THP;
            st->codec->codec_tag = 0;  /* no fourcc */
            st->codec->channels    = get_be32(pb); /* numChannels.  */
            st->codec->sample_rate = get_be32(pb); /* Frequency.  */

            av_set_pts_info(st, 64, 1, st->codec->sample_rate);

            thp->audio_stream_index = st->index;
            thp->has_audio = 1;
        }
    }

    return 0;
}
예제 #16
0
파일: tta.c 프로젝트: AllardJ/Tomato
static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    TTAContext *c = s->priv_data;
    AVStream *st;
    int i, channels, bps, samplerate, datalen, framelen;
    uint64_t framepos, start_offset;

    ff_id3v2_read(s);
    if (!av_metadata_get(s->metadata, "", NULL, AV_METADATA_IGNORE_SUFFIX))
        ff_id3v1_read(s);

    start_offset = url_ftell(s->pb);
    if (get_le32(s->pb) != AV_RL32("TTA1"))
        return -1; // not tta file

    url_fskip(s->pb, 2); // FIXME: flags
    channels = get_le16(s->pb);
    bps = get_le16(s->pb);
    samplerate = get_le32(s->pb);
    if(samplerate <= 0 || samplerate > 1000000){
        av_log(s, AV_LOG_ERROR, "nonsense samplerate\n");
        return -1;
    }

    datalen = get_le32(s->pb);
    if(datalen < 0){
        av_log(s, AV_LOG_ERROR, "nonsense datalen\n");
        return -1;
    }

    url_fskip(s->pb, 4); // header crc

    framelen = samplerate*256/245;
    c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0);
    c->currentframe = 0;

    if(c->totalframes >= UINT_MAX/sizeof(uint32_t)){
        av_log(s, AV_LOG_ERROR, "totalframes too large\n");
        return -1;
    }

    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

    av_set_pts_info(st, 64, 1, samplerate);
    st->start_time = 0;
    st->duration = datalen;

    framepos = url_ftell(s->pb) + 4*c->totalframes + 4;

    for (i = 0; i < c->totalframes; i++) {
        uint32_t size = get_le32(s->pb);
        av_add_index_entry(st, framepos, i*framelen, size, 0, AVINDEX_KEYFRAME);
        framepos += size;
    }
    url_fskip(s->pb, 4); // seektable crc

    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id = CODEC_ID_TTA;
    st->codec->channels = channels;
    st->codec->sample_rate = samplerate;
    st->codec->bits_per_coded_sample = bps;

    st->codec->extradata_size = url_ftell(s->pb) - start_offset;
    if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
        //this check is redundant as get_buffer should fail
        av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
        return -1;
    }
    st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
    url_fseek(s->pb, start_offset, SEEK_SET);
    get_buffer(s->pb, st->codec->extradata, st->codec->extradata_size);

    return 0;
}
예제 #17
0
static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
    AVPacket *pkt) {

    int chunk_type;
    int64_t audio_pts = 0;

    if (s->audio_chunk_offset) {

        /* adjust for PCM audio by skipping chunk header */
        if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) {
            s->audio_chunk_offset += 6;
            s->audio_chunk_size -= 6;
        }

        url_fseek(pb, s->audio_chunk_offset, SEEK_SET);
        s->audio_chunk_offset = 0;

        /* figure out the audio pts */
        audio_pts = 90000;
        audio_pts *= s->audio_frame_count;
        audio_pts /= s->audio_sample_rate;

        if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size))
            return CHUNK_EOF;

        pkt->stream_index = s->audio_stream_index;
        pkt->pts = audio_pts;

        /* audio frame maintenance */
        if (s->audio_type != CODEC_ID_INTERPLAY_DPCM)
            s->audio_frame_count +=
            (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8));
        else
            s->audio_frame_count +=
                (s->audio_chunk_size - 6) / s->audio_channels;

        debug_ipmovie("sending audio frame with pts %"PRId64" (%d audio frames)\n",
            audio_pts, s->audio_frame_count);

        chunk_type = CHUNK_VIDEO;

    } else if (s->decode_map_chunk_offset) {

        /* send both the decode map and the video data together */

        if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
            return CHUNK_NOMEM;

        pkt->pos= s->decode_map_chunk_offset;
        url_fseek(pb, s->decode_map_chunk_offset, SEEK_SET);
        s->decode_map_chunk_offset = 0;

        if (get_buffer(pb, pkt->data, s->decode_map_chunk_size) !=
            s->decode_map_chunk_size) {
            av_free_packet(pkt);
            return CHUNK_EOF;
        }

        url_fseek(pb, s->video_chunk_offset, SEEK_SET);
        s->video_chunk_offset = 0;

        if (get_buffer(pb, pkt->data + s->decode_map_chunk_size,
            s->video_chunk_size) != s->video_chunk_size) {
            av_free_packet(pkt);
            return CHUNK_EOF;
        }

        pkt->stream_index = s->video_stream_index;
        pkt->pts = s->video_pts;

        debug_ipmovie("sending video frame with pts %"PRId64"\n",
            pkt->pts);

        s->video_pts += s->frame_pts_inc;

        chunk_type = CHUNK_VIDEO;

    } else {

        url_fseek(pb, s->next_chunk_offset, SEEK_SET);
        chunk_type = CHUNK_DONE;

    }

    return chunk_type;
}
예제 #18
0
파일: bink.c 프로젝트: AWilco/xbmc
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    BinkDemuxContext *bink = s->priv_data;
    ByteIOContext *pb = s->pb;
    int ret;

    if (bink->current_track < 0) {
        int index_entry;
        AVStream *st = s->streams[0]; // stream 0 is video stream with index

        if (bink->video_pts >= st->duration)
            return AVERROR(EIO);

        index_entry = av_index_search_timestamp(st, bink->video_pts,
                                                AVSEEK_FLAG_ANY);
        if (index_entry < 0) {
            av_log(s, AV_LOG_ERROR,
                   "could not find index entry for frame %"PRId64"\n",
                   bink->video_pts);
            return AVERROR(EIO);
        }

        bink->remain_packet_size = st->index_entries[index_entry].size;
        bink->current_track = 0;
    }

    while (bink->current_track < bink->num_audio_tracks) {
        uint32_t audio_size = get_le32(pb);
        if (audio_size > bink->remain_packet_size - 4) {
            av_log(s, AV_LOG_ERROR,
                   "frame %"PRId64": audio size in header (%u) > size of packet left (%u)\n",
                   bink->video_pts, audio_size, bink->remain_packet_size);
            return AVERROR(EIO);
        }
        bink->remain_packet_size -= 4 + audio_size;
        bink->current_track++;
        if (audio_size >= 4) {
            /* get one audio packet per track */
            if ((ret = av_get_packet(pb, pkt, audio_size)) < 0)
                return ret;
            pkt->stream_index = bink->current_track;
            pkt->pts = bink->audio_pts[bink->current_track - 1];

            /* Each audio packet reports the number of decompressed samples
               (in bytes). We use this value to calcuate the audio PTS */
            if (pkt->size >= 4)
                bink->audio_pts[bink->current_track -1] +=
                    AV_RL32(pkt->data) / (2 * s->streams[bink->current_track]->codec->channels);
            return 0;
        } else {
            url_fseek(pb, audio_size, SEEK_CUR);
        }
    }

    /* get video packet */
    if ((ret = av_get_packet(pb, pkt, bink->remain_packet_size)) < 0)
        return ret;
    pkt->stream_index = 0;
    pkt->pts = bink->video_pts++;
    pkt->flags |= AV_PKT_FLAG_KEY;

    /* -1 instructs the next call to read_packet() to read the next frame */
    bink->current_track = -1;

    return 0;
}
예제 #19
0
static int ipmovie_read_header(AVFormatContext *s,
                               AVFormatParameters *ap)
{
    IPMVEContext *ipmovie = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVPacket pkt;
    AVStream *st;
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;

    /* initialize private context members */
    ipmovie->video_pts = ipmovie->audio_frame_count = 0;
    ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
    ipmovie->decode_map_chunk_offset = 0;

    /* on the first read, this will position the stream at the first chunk */
    ipmovie->next_chunk_offset = IPMOVIE_SIGNATURE_SIZE + 6;

    /* process the first chunk which should be CHUNK_INIT_VIDEO */
    if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
        return AVERROR_INVALIDDATA;

    /* peek ahead to the next chunk-- if it is an init audio chunk, process
     * it; if it is the first video chunk, this is a silent file */
    if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return AVERROR(EIO);
    chunk_type = AV_RL16(&chunk_preamble[2]);
    url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);

    if (chunk_type == CHUNK_VIDEO)
        ipmovie->audio_type = 0;  /* no audio */
    else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
        return AVERROR_INVALIDDATA;

    /* initialize the stream decoders */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 33, 1, 90000);
    ipmovie->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_INTERPLAY_VIDEO;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = ipmovie->video_width;
    st->codec->height = ipmovie->video_height;

    /* palette considerations */
    st->codec->palctrl = &ipmovie->palette_control;

    if (ipmovie->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, 90000);
        ipmovie->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = ipmovie->audio_type;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->channels = ipmovie->audio_channels;
        st->codec->sample_rate = ipmovie->audio_sample_rate;
        st->codec->bits_per_sample = ipmovie->audio_bits;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_sample;
        if (st->codec->codec_id == CODEC_ID_INTERPLAY_DPCM)
            st->codec->bit_rate /= 2;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
    }

    return 0;
}
예제 #20
0
static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    VideoData *s = s1->priv_data;
    int ret, first_index, last_index;
    char buf[1024];
    ByteIOContext pb1, *f = &pb1;
    AVStream *st;

    st = av_new_stream(s1, 0);
    if (!st) {
        return -ENOMEM;
    }

    if (ap->image_format)
        s->img_fmt = ap->image_format;

    pstrcpy(s->path, sizeof(s->path), s1->filename);
    s->img_number = 0;
    s->img_count = 0;

    /* find format */
    if (s1->iformat->flags & AVFMT_NOFILE)
        s->is_pipe = 0;
    else
        s->is_pipe = 1;

    if (!ap->time_base.num) {
        st->codec->time_base= (AVRational){1,25};
    } else {
        st->codec->time_base= ap->time_base;
    }

    if (!s->is_pipe) {
        if (find_image_range(&first_index, &last_index, s->path) < 0)
            goto fail;
        s->img_first = first_index;
        s->img_last = last_index;
        s->img_number = first_index;
        /* compute duration */
        st->start_time = 0;
        st->duration = last_index - first_index + 1;
        if (get_frame_filename(buf, sizeof(buf), s->path, s->img_number) < 0)
            goto fail;
        if (url_fopen(f, buf, URL_RDONLY) < 0)
            goto fail;
    } else {
        f = &s1->pb;
    }

    ret = av_read_image(f, s1->filename, s->img_fmt, read_header_alloc_cb, s);
    if (ret < 0)
        goto fail1;

    if (!s->is_pipe) {
        url_fclose(f);
    } else {
        url_fseek(f, 0, SEEK_SET);
    }

    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_RAWVIDEO;
    st->codec->width = s->width;
    st->codec->height = s->height;
    st->codec->pix_fmt = s->pix_fmt;
    s->img_size = avpicture_get_size(s->pix_fmt, (s->width+15)&(~15), (s->height+15)&(~15));

    return 0;
 fail1:
    if (!s->is_pipe)
        url_fclose(f);
 fail:
    return AVERROR_IO;
}
예제 #21
0
static int flic_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FlicDemuxContext *flic = s->priv_data;
    ByteIOContext *pb = s->pb;
    int packet_read = 0;
    unsigned int size;
    int magic;
    int ret = 0;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    while (!packet_read) {

        if ((ret = get_buffer(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
            FLIC_PREAMBLE_SIZE) {
            ret = AVERROR(EIO);
            break;
        }

        size = AV_RL32(&preamble[0]);
        magic = AV_RL16(&preamble[4]);

        if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }
            pkt->stream_index = flic->video_stream_index;
            pkt->pts = flic->frame_number++;
            pkt->pos = url_ftell(pb);
            memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
            ret = get_buffer(pb, pkt->data + FLIC_PREAMBLE_SIZE,
                size - FLIC_PREAMBLE_SIZE);
            if (ret != size - FLIC_PREAMBLE_SIZE) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }
            packet_read = 1;
        } else if (magic == FLIC_TFTD_CHUNK_AUDIO) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }

            /* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
            url_fseek(pb, 10, SEEK_CUR);

            pkt->stream_index = flic->audio_stream_index;
            pkt->pos = url_ftell(pb);
            ret = get_buffer(pb, pkt->data, size);

            if (ret != size) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }

            packet_read = 1;
        } else {
            /* not interested in this chunk */
            url_fseek(pb, size - 6, SEEK_CUR);
        }
    }

    return ret;
}
예제 #22
0
파일: mmf.c 프로젝트: apeliom/tikitv
/* mmf input */
static int mmf_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    MMFContext *mmf = s->priv_data;
    unsigned int tag;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    offset_t file_size, size;
    int rate, params;

    tag = get_le32(pb);
    if (tag != MKTAG('M', 'M', 'M', 'D'))
        return -1;
    file_size = get_be32(pb);

    /* Skip some unused chunks that may or may not be present */
    for(;; url_fseek(pb, size, SEEK_CUR)) {
        tag = get_le32(pb);
        size = get_be32(pb);
        if(tag == MKTAG('C','N','T','I')) continue;
        if(tag == MKTAG('O','P','D','A')) continue;
        break;
    }

    /* Tag = "ATRx", where "x" = track number */
    if ((tag & 0xffffff) == MKTAG('M', 'T', 'R', 0)) {
        av_log(s, AV_LOG_ERROR, "MIDI like format found, unsupported\n");
        return -1;
    }
    if ((tag & 0xffffff) != MKTAG('A', 'T', 'R', 0)) {
        av_log(s, AV_LOG_ERROR, "Unsupported SMAF chunk %08x\n", tag);
        return -1;
    }

    get_byte(pb); /* format type */
    get_byte(pb); /* sequence type */
    params = get_byte(pb); /* (channel << 7) | (format << 4) | rate */
    rate = mmf_rate(params & 0x0f);
    if(rate  < 0) {
        av_log(s, AV_LOG_ERROR, "Invalid sample rate\n");
        return -1;
    }
    get_byte(pb); /* wave base bit */
    get_byte(pb); /* time base d */
    get_byte(pb); /* time base g */

    /* Skip some unused chunks that may or may not be present */
    for(;; url_fseek(pb, size, SEEK_CUR)) {
        tag = get_le32(pb);
        size = get_be32(pb);
        if(tag == MKTAG('A','t','s','q')) continue;
        if(tag == MKTAG('A','s','p','I')) continue;
        break;
    }

    /* Make sure it's followed by an Awa chunk, aka wave data */
    if ((tag & 0xffffff) != MKTAG('A', 'w', 'a', 0)) {
        av_log(s, AV_LOG_ERROR, "Unexpected SMAF chunk %08x\n", tag);
        return -1;
    }
    mmf->data_size = size;

    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->codec_type = CODEC_TYPE_AUDIO;
    st->codec->codec_id = CODEC_ID_ADPCM_YAMAHA;
    st->codec->sample_rate = rate;
    st->codec->channels = 1;
    st->codec->bits_per_sample = 4;
    st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample;

    av_set_pts_info(st, 64, 1, st->codec->sample_rate);

    return 0;
}
static int roq_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    RoqDemuxContext *roq = s->priv_data;
    ByteIOContext *pb = &s->pb;
    int ret = 0;
    unsigned int chunk_size;
    unsigned int chunk_type;
    unsigned int codebook_size;
    unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
    int packet_read = 0;
    offset_t codebook_offset;

    while (!packet_read) {

        if (url_feof(&s->pb))
            return AVERROR(EIO);

        /* get the next chunk preamble */
        if ((ret = get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
            RoQ_CHUNK_PREAMBLE_SIZE)
            return AVERROR(EIO);

        chunk_type = AV_RL16(&preamble[0]);
        chunk_size = AV_RL32(&preamble[2]);
        if(chunk_size > INT_MAX)
            return AVERROR_INVALIDDATA;

        switch (chunk_type) {

        case RoQ_INFO:
            /* don't care about this chunk anymore */
            url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_CUR);
            break;

        case RoQ_QUAD_CODEBOOK:
            /* packet needs to contain both this codebook and next VQ chunk */
            codebook_offset = url_ftell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
            codebook_size = chunk_size;
            url_fseek(pb, codebook_size, SEEK_CUR);
            if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
                RoQ_CHUNK_PREAMBLE_SIZE)
                return AVERROR(EIO);
            chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
                codebook_size;

            /* rewind */
            url_fseek(pb, codebook_offset, SEEK_SET);

            /* load up the packet */
            ret= av_get_packet(pb, pkt, chunk_size);
            if (ret != chunk_size)
                return AVERROR(EIO);
            pkt->stream_index = roq->video_stream_index;
            pkt->pts = roq->video_pts;

            roq->video_pts += roq->frame_pts_inc;
            packet_read = 1;
            break;

        case RoQ_SOUND_MONO:
        case RoQ_SOUND_STEREO:
        case RoQ_QUAD_VQ:
            /* load up the packet */
            if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
                return AVERROR(EIO);
            /* copy over preamble */
            memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);

            if (chunk_type == RoQ_QUAD_VQ) {
                pkt->stream_index = roq->video_stream_index;
                pkt->pts = roq->video_pts;
                roq->video_pts += roq->frame_pts_inc;
            } else {
                pkt->stream_index = roq->audio_stream_index;
                pkt->pts = roq->audio_frame_count;
                pkt->pts *= 90000;
                pkt->pts /= RoQ_AUDIO_SAMPLE_RATE;
                roq->audio_frame_count += (chunk_size / roq->audio_channels);
            }

            pkt->pos= url_ftell(pb);
            ret = get_buffer(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
                chunk_size);
            if (ret != chunk_size)
                ret = AVERROR(EIO);

            packet_read = 1;
            break;

        default:
            av_log(s, AV_LOG_ERROR, "  unknown RoQ chunk (%04X)\n", chunk_type);
            return AVERROR_INVALIDDATA;
            break;
        }
    }

    return ret;
}
예제 #24
0
static int read_frame(BVID_DemuxContext *vid, ByteIOContext *pb, AVPacket *pkt,
                      uint8_t block_type, AVFormatContext *s, int npixels)
{
    uint8_t * vidbuf_start = NULL;
    int vidbuf_nbytes = 0;
    int code;
    int bytes_copied = 0;
    int position;
    unsigned int vidbuf_capacity;

    vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
    if(!vidbuf_start)
        return AVERROR(ENOMEM);

    // save the file position for the packet, include block type
    position = url_ftell(pb) - 1;

    vidbuf_start[vidbuf_nbytes++] = block_type;

    // get the video delay (next int16), and set the presentation time
    vid->video_pts += vid->bethsoft_global_delay + get_le16(pb);

    // set the y offset if it exists (decoder header data should be in data section)
    if(block_type == VIDEO_YOFF_P_FRAME) {
        if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2)
            goto fail;
        vidbuf_nbytes += 2;
    }

    do {
        vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
        if(!vidbuf_start)
            return AVERROR(ENOMEM);

        code = get_byte(pb);
        vidbuf_start[vidbuf_nbytes++] = code;

        if(code >= 0x80) { // rle sequence
            if(block_type == VIDEO_I_FRAME)
                vidbuf_start[vidbuf_nbytes++] = get_byte(pb);
        } else if(code) { // plain sequence
            if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], code) != code)
                goto fail;
            vidbuf_nbytes += code;
        }
        bytes_copied += code & 0x7F;
        if(bytes_copied == npixels) { // sometimes no stop character is given, need to keep track of bytes copied
            // may contain a 0 byte even if read all pixels
            if(get_byte(pb))
                url_fseek(pb, -1, SEEK_CUR);
            break;
        }
        if(bytes_copied > npixels)
            goto fail;
    } while(code);

    // copy data into packet
    if(av_new_packet(pkt, vidbuf_nbytes) < 0)
        goto fail;
    memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
    av_free(vidbuf_start);

    pkt->pos = position;
    pkt->stream_index = 0;  // use the video decoder, which was initialized as the first stream
    pkt->pts = vid->video_pts;

    vid->nframes--;  // used to check if all the frames were read
    return vidbuf_nbytes;
fail:
    av_free(vidbuf_start);
    return -1;
}
예제 #25
0
파일: flic.c 프로젝트: pcercuei/dcplaya
static int flic_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
    ByteIOContext *pb = &s->pb;
    unsigned char header[FLIC_HEADER_SIZE];
    AVStream *st;
    int speed;
    int magic_number;

    flic->pts = 0;

    /* load the whole header and pull out the width and height */
    if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
        return AVERROR_IO;

    magic_number = LE_16(&header[4]);
    speed = LE_32(&header[0x10]);

    /* initialize the decoder streams */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR_NOMEM;
    flic->video_stream_index = st->index;
    st->codec.codec_type = CODEC_TYPE_VIDEO;
    st->codec.codec_id = CODEC_ID_FLIC;
    st->codec.codec_tag = 0;  /* no fourcc */
    st->codec.width = LE_16(&header[0x08]);
    st->codec.height = LE_16(&header[0x0A]);

    if (!st->codec.width || !st->codec.height)
        return AVERROR_INVALIDDATA;

    /* send over the whole 128-byte FLIC header */
    st->codec.extradata_size = FLIC_HEADER_SIZE;
    st->codec.extradata = av_malloc(FLIC_HEADER_SIZE);
    memcpy(st->codec.extradata, header, FLIC_HEADER_SIZE);

    av_set_pts_info(st, 33, 1, 90000);

    /* Time to figure out the framerate: If there is a FLIC chunk magic
     * number at offset 0x10, assume this is from the Bullfrog game,
     * Magic Carpet. */
    if (LE_16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {

        flic->frame_pts_inc = FLIC_MC_PTS_INC;

        /* rewind the stream since the first chunk is at offset 12 */
        url_fseek(pb, 12, SEEK_SET);

        /* send over abbreviated FLIC header chunk */
        av_free(st->codec.extradata);
        st->codec.extradata_size = 12;
        st->codec.extradata = av_malloc(12);
        memcpy(st->codec.extradata, header, 12);

    } else if (magic_number == FLIC_FILE_MAGIC_1) {
        /*
         * in this case, the speed (n) is number of 1/70s ticks between frames:
         *
         *    pts        n * frame #
         *  --------  =  -----------  => pts = n * (90000/70) * frame #
         *   90000           70
         *
         *  therefore, the frame pts increment = n * 1285.7
         */
        flic->frame_pts_inc = speed * 1285.7;
    } else if (magic_number == FLIC_FILE_MAGIC_2) {
        /*
         * in this case, the speed (n) is number of milliseconds between frames:
         *
         *    pts        n * frame #
         *  --------  =  -----------  => pts = n * 90 * frame #
         *   90000          1000
         *
         *  therefore, the frame pts increment = n * 90
         */
        flic->frame_pts_inc = speed * 90;
    } else
        return AVERROR_INVALIDDATA;

    if (flic->frame_pts_inc == 0)
        flic->frame_pts_inc = FLIC_DEFAULT_PTS_INC;

    return 0;
}
예제 #26
0
static int flic_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FlicDemuxContext *flic = s->priv_data;
    ByteIOContext *pb = &s->pb;
    unsigned char header[FLIC_HEADER_SIZE];
    AVStream *st;
    int speed;
    int magic_number;

    flic->frame_number = 0;

    /* load the whole header and pull out the width and height */
    if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
        return AVERROR(EIO);

    magic_number = AV_RL16(&header[4]);
    speed = AV_RL32(&header[0x10]);
    if (speed == 0)
        speed = FLIC_DEFAULT_SPEED;

    /* initialize the decoder streams */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    flic->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_FLIC;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = AV_RL16(&header[0x08]);
    st->codec->height = AV_RL16(&header[0x0A]);

    if (!st->codec->width || !st->codec->height) {
        /* Ugly hack needed for the following sample: */
        /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
        av_log(s, AV_LOG_WARNING,
               "File with no specified width/height. Trying 640x480.\n");
        st->codec->width  = 640;
        st->codec->height = 480;
    }

    /* send over the whole 128-byte FLIC header */
    st->codec->extradata_size = FLIC_HEADER_SIZE;
    st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
    memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);

    /* Time to figure out the framerate: If there is a FLIC chunk magic
     * number at offset 0x10, assume this is from the Bullfrog game,
     * Magic Carpet. */
    if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {

        av_set_pts_info(st, 64, FLIC_MC_SPEED, 70);

        /* rewind the stream since the first chunk is at offset 12 */
        url_fseek(pb, 12, SEEK_SET);

        /* send over abbreviated FLIC header chunk */
        av_free(st->codec->extradata);
        st->codec->extradata_size = 12;
        st->codec->extradata = av_malloc(12);
        memcpy(st->codec->extradata, header, 12);

    } else if (magic_number == FLIC_FILE_MAGIC_1) {
        av_set_pts_info(st, 64, speed, 70);
    } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
               (magic_number == FLIC_FILE_MAGIC_3)) {
        av_set_pts_info(st, 64, speed, 1000);
    } else {
        av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
        return AVERROR_INVALIDDATA;
    }

    return 0;
}
예제 #27
0
파일: aviobuf.c 프로젝트: RJVB/FFusion
int url_fskip(ByteIOContext *s, int64_t offset)
{
    int64_t ret = url_fseek(s, offset, SEEK_CUR);
    return ret < 0 ? ret : 0;
}
예제 #28
0
파일: idroqdec.c 프로젝트: AndyA/ffmbc
static int roq_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    RoqDemuxContext *roq = s->priv_data;
    ByteIOContext *pb = s->pb;
    int ret = 0;
    unsigned int chunk_size;
    unsigned int chunk_type;
    unsigned int codebook_size;
    unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
    int packet_read = 0;
    int64_t codebook_offset;

    while (!packet_read) {

        if (url_feof(s->pb))
            return AVERROR(EIO);

        /* get the next chunk preamble */
        if ((ret = get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
            RoQ_CHUNK_PREAMBLE_SIZE)
            return AVERROR(EIO);

        chunk_type = AV_RL16(&preamble[0]);
        chunk_size = AV_RL32(&preamble[2]);
        if(chunk_size > INT_MAX)
            return AVERROR_INVALIDDATA;

        switch (chunk_type) {

        case RoQ_INFO:
            if (!roq->width || !roq->height) {
                AVStream *st = s->streams[roq->video_stream_index];
                if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE)
                    return AVERROR(EIO);
                st->codec->width  = roq->width  = AV_RL16(preamble);
                st->codec->height = roq->height = AV_RL16(preamble + 2);
                break;
            }
            /* don't care about this chunk anymore */
            url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_CUR);
            break;

        case RoQ_QUAD_CODEBOOK:
            /* packet needs to contain both this codebook and next VQ chunk */
            codebook_offset = url_ftell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
            codebook_size = chunk_size;
            url_fseek(pb, codebook_size, SEEK_CUR);
            if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
                RoQ_CHUNK_PREAMBLE_SIZE)
                return AVERROR(EIO);
            chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
                codebook_size;

            /* rewind */
            url_fseek(pb, codebook_offset, SEEK_SET);

            /* load up the packet */
            ret= av_get_packet(pb, pkt, chunk_size);
            if (ret != chunk_size)
                return AVERROR(EIO);
            pkt->stream_index = roq->video_stream_index;
            pkt->dts = pkt->pts = roq->video_pts++;

            packet_read = 1;
            break;

        case RoQ_SOUND_MONO:
        case RoQ_SOUND_STEREO:
            if (roq->audio_stream_index == -1) {
                AVStream *st = av_new_stream(s, 1);
                if (!st)
                    return AVERROR(ENOMEM);
                av_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE);
                roq->audio_stream_index = st->index;
                st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id = CODEC_ID_ROQ_DPCM;
                st->codec->codec_tag = 0;  /* no tag */
                st->codec->channels = roq->audio_channels = chunk_type == RoQ_SOUND_STEREO ? 2 : 1;
                st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
                st->codec->bits_per_coded_sample = 16;
                st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
                    st->codec->bits_per_coded_sample;
                st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
            }
        case RoQ_QUAD_VQ:
            /* load up the packet */
            if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
                return AVERROR(EIO);
            /* copy over preamble */
            memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);

            if (chunk_type == RoQ_QUAD_VQ) {
                pkt->stream_index = roq->video_stream_index;
                pkt->dts = pkt->pts = roq->video_pts++;
            } else {
                pkt->stream_index = roq->audio_stream_index;
                pkt->dts = pkt->pts = roq->audio_frame_count;
                roq->audio_frame_count += (chunk_size / roq->audio_channels);
            }

            pkt->pos= url_ftell(pb);
            ret = get_buffer(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
                chunk_size);
            if (ret != chunk_size)
                ret = AVERROR(EIO);

            packet_read = 1;
            break;

        default:
            av_log(s, AV_LOG_ERROR, "  unknown RoQ chunk (%04X)\n", chunk_type);
            return AVERROR_INVALIDDATA;
            break;
        }
    }

    return ret;
}
예제 #29
0
void url_fskip(ByteIOContext *s, int64_t offset)
{
    url_fseek(s, offset, SEEK_CUR);
}
예제 #30
0
파일: iff.c 프로젝트: AnthonyNystrom/MobiVU
static int iff_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    IffDemuxContext *iff = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    uint32_t chunk_id, data_size;
    int padding, done = 0;

    st = av_new_stream(s, 0);
    if (!st)
      return AVERROR(ENOMEM);

    st->codec->channels = 1;
    url_fskip(pb, 12);

    while(!done && !url_feof(pb)) {
        chunk_id = get_le32(pb);
        data_size = get_be32(pb);
        padding = data_size & 1;

        switch(chunk_id) {
        case ID_VHDR:
            url_fskip(pb, 12);
            st->codec->sample_rate = get_be16(pb);
            url_fskip(pb, 1);
            st->codec->codec_tag = get_byte(pb);
            url_fskip(pb, 4);
            break;

        case ID_BODY:
            iff->body_size = data_size;
            done = 1;
            break;

        case ID_CHAN:
            st->codec->channels = (get_be32(pb) < 6) ? 1 : 2;
            break;

        default:
            url_fseek(pb, data_size + padding, SEEK_CUR);
            break;
        }
    }

    if(!st->codec->sample_rate)
        return AVERROR_INVALIDDATA;

    av_set_pts_info(st, 32, 1, st->codec->sample_rate);
    st->codec->codec_type = CODEC_TYPE_AUDIO;

    switch(st->codec->codec_tag) {
    case COMP_NONE:
        st->codec->codec_id = CODEC_ID_PCM_S8;
        break;
    case COMP_FIB:
        st->codec->codec_id = CODEC_ID_8SVX_FIB;
        break;
    case COMP_EXP:
        st->codec->codec_id = CODEC_ID_8SVX_EXP;
        break;
    default:
        av_log(s, AV_LOG_ERROR, "iff: unknown compression method\n");
        return -1;
    }

    st->codec->bits_per_sample = 8;
    st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_sample;
    st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;

    return 0;
}