예제 #1
0
파일: siff.c 프로젝트: Brhett/FFmpeg
static int siff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    SIFFContext *c = s->priv_data;
    int size;

    if (c->has_video){
        if (c->cur_frame >= c->frames)
            return AVERROR(EIO);
        if (c->curstrm == -1){
            c->pktsize = avio_rl32(s->pb) - 4;
            c->flags = avio_rl16(s->pb);
            c->gmcsize = (c->flags & VB_HAS_GMC) ? 4 : 0;
            if (c->gmcsize)
                avio_read(s->pb, c->gmc, c->gmcsize);
            c->sndsize = (c->flags & VB_HAS_AUDIO) ? avio_rl32(s->pb): 0;
            c->curstrm = !!(c->flags & VB_HAS_AUDIO);
        }

        if (!c->curstrm){
            size = c->pktsize - c->sndsize - c->gmcsize - 2;
            size = ffio_limit(s->pb, size);
            if(size < 0 || c->pktsize < c->sndsize)
                return AVERROR_INVALIDDATA;
            if (av_new_packet(pkt, size + c->gmcsize + 2) < 0)
                return AVERROR(ENOMEM);
            AV_WL16(pkt->data, c->flags);
            if (c->gmcsize)
                memcpy(pkt->data + 2, c->gmc, c->gmcsize);
            avio_read(s->pb, pkt->data + 2 + c->gmcsize, size);
            pkt->stream_index = 0;
            c->curstrm = -1;
        }else{
            if ((size = av_get_packet(s->pb, pkt, c->sndsize - 4)) < 0)
                return AVERROR(EIO);
            pkt->stream_index = 1;
            pkt->duration     = size;
            c->curstrm = 0;
        }
        if(!c->cur_frame || c->curstrm)
            pkt->flags |= AV_PKT_FLAG_KEY;
        if (c->curstrm == -1)
            c->cur_frame++;
    }else{
        size = av_get_packet(s->pb, pkt, c->block_align);
        if(size <= 0)
            return AVERROR(EIO);
        pkt->duration = size;
    }
    return pkt->size;
}
예제 #2
0
파일: dsicin.c 프로젝트: 1c0n/xbmc
static int cin_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    CinDemuxContext *cin = s->priv_data;
    AVIOContext *pb = s->pb;
    CinFrameHeader *hdr = &cin->frame_header;
    int rc, palette_type, pkt_size;
    int ret;

    if (cin->audio_buffer_size == 0) {
        rc = cin_read_frame_header(cin, pb);
        if (rc)
            return rc;

        if ((int16_t)hdr->pal_colors_count < 0) {
            hdr->pal_colors_count = -(int16_t)hdr->pal_colors_count;
            palette_type = 1;
        } else {
            palette_type = 0;
        }

        /* palette and video packet */
        pkt_size = (palette_type + 3) * hdr->pal_colors_count + hdr->video_frame_size;

        pkt_size = ffio_limit(pb, pkt_size);

        ret = av_new_packet(pkt, 4 + pkt_size);
        if (ret < 0)
            return ret;

        pkt->stream_index = cin->video_stream_index;
        pkt->pts = cin->video_stream_pts++;

        pkt->data[0] = palette_type;
        pkt->data[1] = hdr->pal_colors_count & 0xFF;
        pkt->data[2] = hdr->pal_colors_count >> 8;
        pkt->data[3] = hdr->video_frame_type;

        ret = avio_read(pb, &pkt->data[4], pkt_size);
        if (ret < 0) {
            av_free_packet(pkt);
            return ret;
        }
        if (ret < pkt_size)
            av_shrink_packet(pkt, 4 + ret);

        /* sound buffer will be processed on next read_packet() call */
        cin->audio_buffer_size = hdr->audio_frame_size;
        return 0;
    }
예제 #3
0
static int append_packet_chunked_with_pts(AVIOContext *s, AVPacket *pkt, int explicit_pts, int64_t *pts, int size)
{
    int64_t orig_pos   = pkt->pos; // av_grow_packet might reset pos
    int orig_size      = pkt->size;
    int ret;

    do {
        int prev_size = pkt->size;
        int read_size;

        /* When the caller requests a lot of data, limit it to the amount
         * left in file or SANE_CHUNK_SIZE when it is not known. */
        read_size = size;
        if (read_size > SANE_CHUNK_SIZE/10) {
            read_size = ffio_limit(s, read_size);
            // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
            if (s->maxsize < 0)
                read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
        }

        ret = av_grow_packet(pkt, read_size);
        if (ret < 0)
            break;

        ret = avio_read(s, (unsigned char*)pts, sizeof(int64_t));
        if (ret != sizeof(int64_t)) {
            av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
            break;
        }

        ret = avio_read(s, pkt->data + prev_size, read_size);
        if (ret != read_size) {
            av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
            break;
        }

        size -= read_size;
    } while (size > 0);
    if (size > 0)
        pkt->flags |= AV_PKT_FLAG_CORRUPT;

    pkt->pos = orig_pos;
    if (!pkt->size)
        av_free_packet(pkt);
    return pkt->size > orig_size ? pkt->size - orig_size : ret;
}
예제 #4
0
static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb,
                                   RMDemuxContext *rm, RMStream *vst,
                                   AVPacket *pkt, int len, int *pseq,
                                   int64_t *timestamp)
{
    int hdr;
    int seq = 0, pic_num = 0, len2 = 0, pos = 0; //init to silcense compiler warning
    int type;
    int ret;

    hdr = avio_r8(pb); len--;
    type = hdr >> 6;

    if(type != 3){  // not frame as a part of packet
        seq = avio_r8(pb); len--;
    }
    if(type != 1){  // not whole frame
        len2 = get_num(pb, &len);
        pos  = get_num(pb, &len);
        pic_num = avio_r8(pb); len--;
    }
    if(len<0) {
        av_log(s, AV_LOG_ERROR, "Insufficient data\n");
        return -1;
    }
    rm->remaining_len = len;
    if(type&1){     // frame, not slice
        if(type == 3){  // frame as a part of packet
            len= len2;
            *timestamp = pos;
        }
        if(rm->remaining_len < len) {
            av_log(s, AV_LOG_ERROR, "Insufficient remaining len\n");
            return -1;
        }
        rm->remaining_len -= len;
        if(av_new_packet(pkt, len + 9) < 0)
            return AVERROR(EIO);
        pkt->data[0] = 0;
        AV_WL32(pkt->data + 1, 1);
        AV_WL32(pkt->data + 5, 0);
        if ((ret = avio_read(pb, pkt->data + 9, len)) != len) {
            av_free_packet(pkt);
            av_log(s, AV_LOG_ERROR, "Failed to read %d bytes\n", len);
            return ret < 0 ? ret : AVERROR(EIO);
        }
        return 0;
    }
    //now we have to deal with single slice

    *pseq = seq;
    if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
        if (len2 > ffio_limit(pb, len2)) {
            av_log(s, AV_LOG_ERROR, "Impossibly sized packet\n");
            return AVERROR_INVALIDDATA;
        }
        vst->slices = ((hdr & 0x3F) << 1) + 1;
        vst->videobufsize = len2 + 8*vst->slices + 1;
        av_free_packet(&vst->pkt); //FIXME this should be output.
        if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
            return AVERROR(ENOMEM);
        memset(vst->pkt.data, 0, vst->pkt.size);
        vst->videobufpos = 8*vst->slices + 1;
        vst->cur_slice = 0;
        vst->curpic_num = pic_num;
        vst->pktpos = avio_tell(pb);
    }
    if(type == 2)
        len = FFMIN(len, pos);

    if(++vst->cur_slice > vst->slices) {
        av_log(s, AV_LOG_ERROR, "cur slice %d, too large\n", vst->cur_slice);
        return 1;
    }
    if(!vst->pkt.data)
        return AVERROR(ENOMEM);
    AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
    AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
    if(vst->videobufpos + len > vst->videobufsize) {
        av_log(s, AV_LOG_ERROR, "outside videobufsize\n");
        return 1;
    }
    if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len)
        return AVERROR(EIO);
    vst->videobufpos += len;
    rm->remaining_len-= len;

    if (type == 2 || vst->videobufpos == vst->videobufsize) {
        vst->pkt.data[0] = vst->cur_slice-1;
        *pkt= vst->pkt;
        vst->pkt.data= NULL;
        vst->pkt.size= 0;
        vst->pkt.buf = NULL;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
        vst->pkt.destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
        if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
            memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
                vst->videobufpos - 1 - 8*vst->slices);
        pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
        pkt->pts = AV_NOPTS_VALUE;
        pkt->pos = vst->pktpos;
        vst->slices = 0;
        return 0;
    }
예제 #5
0
static int roq_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    RoqDemuxContext *roq = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    unsigned int chunk_size;
    unsigned int chunk_type;
    unsigned int codebook_size;
    unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
    int packet_read = 0;
    int64_t codebook_offset;

    while (!packet_read) {

        if (avio_feof(s->pb))
            return AVERROR(EIO);

        /* get the next chunk preamble */
        if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
            RoQ_CHUNK_PREAMBLE_SIZE)
            return AVERROR(EIO);

        chunk_type = AV_RL16(&preamble[0]);
        chunk_size = AV_RL32(&preamble[2]);
        if(chunk_size > INT_MAX)
            return AVERROR_INVALIDDATA;

        chunk_size = ffio_limit(pb, chunk_size);

        switch (chunk_type) {

        case RoQ_INFO:
            if (roq->video_stream_index == -1) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                avpriv_set_pts_info(st, 63, 1, roq->frame_rate);
                roq->video_stream_index = st->index;
                st->codec->codec_type   = AVMEDIA_TYPE_VIDEO;
                st->codec->codec_id     = AV_CODEC_ID_ROQ;
                st->codec->codec_tag    = 0;  /* no fourcc */

                if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE)
                    return AVERROR(EIO);
                st->codec->width  = roq->width  = AV_RL16(preamble);
                st->codec->height = roq->height = AV_RL16(preamble + 2);
                break;
            }
            /* don't care about this chunk anymore */
            avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE);
            break;

        case RoQ_QUAD_CODEBOOK:
            if (roq->video_stream_index < 0)
                return AVERROR_INVALIDDATA;
            /* packet needs to contain both this codebook and next VQ chunk */
            codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
            codebook_size = chunk_size;
            avio_skip(pb, codebook_size);
            if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
                RoQ_CHUNK_PREAMBLE_SIZE)
                return AVERROR(EIO);
            chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
                codebook_size;

            /* rewind */
            avio_seek(pb, codebook_offset, SEEK_SET);

            /* load up the packet */
            ret= av_get_packet(pb, pkt, chunk_size);
            if (ret != chunk_size)
                return AVERROR(EIO);
            pkt->stream_index = roq->video_stream_index;
            pkt->pts = roq->video_pts++;

            packet_read = 1;
            break;

        case RoQ_SOUND_MONO:
        case RoQ_SOUND_STEREO:
            if (roq->audio_stream_index == -1) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                avpriv_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE);
                roq->audio_stream_index = st->index;
                st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id = AV_CODEC_ID_ROQ_DPCM;
                st->codec->codec_tag = 0;  /* no tag */
                if (chunk_type == RoQ_SOUND_STEREO) {
                    st->codec->channels       = 2;
                    st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
                } else {
                    st->codec->channels       = 1;
                    st->codec->channel_layout = AV_CH_LAYOUT_MONO;
                }
                roq->audio_channels    = st->codec->channels;
                st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
                st->codec->bits_per_coded_sample = 16;
                st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
                    st->codec->bits_per_coded_sample;
                st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
            }
        case RoQ_QUAD_VQ:
            if (chunk_type == RoQ_QUAD_VQ) {
                if (roq->video_stream_index < 0)
                    return AVERROR_INVALIDDATA;
            }

            /* load up the packet */
            if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
                return AVERROR(EIO);
            /* copy over preamble */
            memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);

            if (chunk_type == RoQ_QUAD_VQ) {
                pkt->stream_index = roq->video_stream_index;
                pkt->pts = roq->video_pts++;
            } else {
                pkt->stream_index = roq->audio_stream_index;
                pkt->pts = roq->audio_frame_count;
                roq->audio_frame_count += (chunk_size / roq->audio_channels);
            }

            pkt->pos= avio_tell(pb);
            ret = avio_read(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
                chunk_size);
            if (ret != chunk_size)
                ret = AVERROR(EIO);

            packet_read = 1;
            break;

        default:
            av_log(s, AV_LOG_ERROR, "  unknown RoQ chunk (%04X)\n", chunk_type);
            return AVERROR_INVALIDDATA;
        }
    }

    return ret;
}
예제 #6
0
static int film_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FilmDemuxContext *film = s->priv_data;
    AVIOContext *pb = s->pb;
    film_sample *sample;
    int ret = 0;
    int i;
    int left, right;

    if (film->current_sample >= film->sample_count)
        return AVERROR(EIO);

    sample = &film->sample_table[film->current_sample];

    /* position the stream (will probably be there anyway) */
    avio_seek(pb, sample->sample_offset, SEEK_SET);

    /* do a special song and dance when loading FILM Cinepak chunks */
    if ((sample->stream == film->video_stream_index) &&
        (film->video_type == CODEC_ID_CINEPAK)) {
        pkt->pos= avio_tell(pb);
        if (av_new_packet(pkt, sample->sample_size))
            return AVERROR(ENOMEM);
        avio_read(pb, pkt->data, sample->sample_size);
    } else if ((sample->stream == film->audio_stream_index) &&
        (film->audio_channels == 2) &&
        (film->audio_type != CODEC_ID_ADPCM_ADX)) {
        /* stereo PCM needs to be interleaved */

        if (ffio_limit(pb, sample->sample_size) != sample->sample_size)
            return AVERROR(EIO);
        if (av_new_packet(pkt, sample->sample_size))
            return AVERROR(ENOMEM);

        /* make sure the interleave buffer is large enough */
        if (sample->sample_size > film->stereo_buffer_size) {
            av_free(film->stereo_buffer);
            film->stereo_buffer_size = sample->sample_size;
            film->stereo_buffer = av_malloc(film->stereo_buffer_size);
            if (!film->stereo_buffer) {
                film->stereo_buffer_size = 0;
                return AVERROR(ENOMEM);
            }
        }

        pkt->pos= avio_tell(pb);
        ret = avio_read(pb, film->stereo_buffer, sample->sample_size);
        if (ret != sample->sample_size)
            ret = AVERROR(EIO);

        left = 0;
        right = sample->sample_size / 2;
        for (i = 0; i < sample->sample_size; ) {
            if (film->audio_bits == 8) {
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[right++];
            } else {
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[left++];
                pkt->data[i++] = film->stereo_buffer[right++];
                pkt->data[i++] = film->stereo_buffer[right++];
            }
        }
    } else {
        ret= av_get_packet(pb, pkt, sample->sample_size);
        if (ret != sample->sample_size)
            ret = AVERROR(EIO);
    }

    pkt->stream_index = sample->stream;
    pkt->pts = sample->pts;

    film->current_sample++;

    return ret;
}