Пример #1
0
static int skeleton_header(AVFormatContext *s, int idx)
{
    struct ogg *ogg = s->priv_data;
    struct ogg_stream *os = ogg->streams + idx;
    AVStream *st = s->streams[idx];
    uint8_t *buf = os->buf + os->pstart;
    int version_major, version_minor;
    int64_t start_num, start_den;
    uint64_t start_granule;
    int target_idx, start_time;

    strcpy(st->codec->codec_name, "skeleton");
    st->codec->codec_type = AVMEDIA_TYPE_DATA;

    if ((os->flags & OGG_FLAG_EOS) && os->psize == 0)
        return 1;

    if (os->psize < 8)
        return -1;

    if (!strncmp(buf, "fishead", 8)) {
        if (os->psize < 64)
            return -1;

        version_major = AV_RL16(buf+8);
        version_minor = AV_RL16(buf+10);

        if (version_major != 3 && version_major != 4) {
            av_log(s, AV_LOG_WARNING, "Unknown skeleton version %d.%d\n",
                   version_major, version_minor);
            return -1;
        }

        // This is the overall start time. We use it for the start time of
        // of the skeleton stream since if left unset lavf assumes 0,
        // which we don't want since skeleton is timeless
        // FIXME: the real meaning of this field is "start playback at
        // this time which can be in the middle of a packet
        start_num = AV_RL64(buf+12);
        start_den = AV_RL64(buf+20);

        if (start_den > 0 && start_num > 0) {
            int base_den;
            av_reduce(&start_time, &base_den, start_num, start_den, INT_MAX);
            avpriv_set_pts_info(st, 64, 1, base_den);
            os->lastpts =
            st->start_time = start_time;
        }
    } else if (!strncmp(buf, "fisbone", 8)) {
        if (os->psize < 52)
            return -1;

        target_idx = ogg_find_stream(ogg, AV_RL32(buf+12));
        start_granule = AV_RL64(buf+36);
        if (target_idx < 0) {
            av_log(s, AV_LOG_WARNING, "Serial number in fisbone doesn't match any stream\n");
            return 1;
        }
        os = ogg->streams + target_idx;
        if (os->start_granule != OGG_NOGRANULE_VALUE) {
            av_log(s, AV_LOG_WARNING, "Multiple fisbone for the same stream\n");
            return 1;
        }
        if (start_granule != OGG_NOGRANULE_VALUE) {
            os->start_granule = start_granule;
        }
    }

    return 1;
}
Пример #2
0
static int ipmovie_read_header(AVFormatContext *s)
{
    IPMVEContext *ipmovie = s->priv_data;
    AVIOContext *pb = s->pb;
    AVPacket pkt;
    AVStream *st;
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type, i;
    uint8_t signature_buffer[sizeof(signature)];

    avio_read(pb, signature_buffer, sizeof(signature_buffer));
    while (memcmp(signature_buffer, signature, sizeof(signature))) {
        memmove(signature_buffer, signature_buffer + 1, sizeof(signature_buffer) - 1);
        signature_buffer[sizeof(signature_buffer) - 1] = avio_r8(pb);
        if (avio_feof(pb))
            return AVERROR_EOF;
    }
    /* initialize private context members */
    ipmovie->video_pts = ipmovie->audio_frame_count = 0;
    ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
    ipmovie->decode_map_chunk_offset = 0;

    /* on the first read, this will position the stream at the first chunk */
    ipmovie->next_chunk_offset = avio_tell(pb) + 4;

    for (i = 0; i < 256; i++)
        ipmovie->palette[i] = 0xFFU << 24;

    /* process the first chunk which should be CHUNK_INIT_VIDEO */
    if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
        return AVERROR_INVALIDDATA;

    /* peek ahead to the next chunk-- if it is an init audio chunk, process
     * it; if it is the first video chunk, this is a silent file */
    if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return AVERROR(EIO);
    chunk_type = AV_RL16(&chunk_preamble[2]);
    avio_seek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);

    if (chunk_type == CHUNK_VIDEO)
        ipmovie->audio_type = AV_CODEC_ID_NONE;  /* no audio */
    else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
        return AVERROR_INVALIDDATA;

    /* initialize the stream decoders */
    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);
    avpriv_set_pts_info(st, 63, 1, 1000000);
    ipmovie->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = AV_CODEC_ID_INTERPLAY_VIDEO;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = ipmovie->video_width;
    st->codec->height = ipmovie->video_height;
    st->codec->bits_per_coded_sample = ipmovie->video_bpp;

    if (ipmovie->audio_type) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        avpriv_set_pts_info(st, 32, 1, ipmovie->audio_sample_rate);
        ipmovie->audio_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = ipmovie->audio_type;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->channels = ipmovie->audio_channels;
        st->codec->channel_layout = st->codec->channels == 1 ? AV_CH_LAYOUT_MONO :
                                                               AV_CH_LAYOUT_STEREO;
        st->codec->sample_rate = ipmovie->audio_sample_rate;
        st->codec->bits_per_coded_sample = ipmovie->audio_bits;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample;
        if (st->codec->codec_id == AV_CODEC_ID_INTERPLAY_DPCM)
            st->codec->bit_rate /= 2;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
    }

    return 0;
}
Пример #3
0
/** Read incoming MMST media, header or command packet. */
static MMSSCPacketType get_tcp_server_response(MMSTContext *mmst)
{
    int read_result;
    MMSSCPacketType packet_type= -1;
    MMSContext *mms = &mmst->mms;
    for(;;) {
        read_result = url_read_complete(mms->mms_hd, mms->in_buffer, 8);
        if (read_result != 8) {
            if(read_result < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "Error reading packet header: %d (%s)\n",
                       read_result, strerror(read_result));
                packet_type = SC_PKT_CANCEL;
            } else {
                av_log(NULL, AV_LOG_ERROR,
                       "The server closed the connection\n");
                packet_type = SC_PKT_NO_DATA;
            }
            return packet_type;
        }

        // handle command packet.
        if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
            int length_remaining, hr;

            mmst->incoming_flags= mms->in_buffer[3];
            read_result= url_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
            if(read_result != 4) {
                av_log(NULL, AV_LOG_ERROR,
                       "Reading command packet length failed: %d (%s)\n",
                       read_result,
                       read_result < 0 ? strerror(read_result) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR_IO;
            }

            length_remaining= AV_RL32(mms->in_buffer+8) + 4;
            dprintf(NULL, "Length remaining is %d\n", length_remaining);
            // read the rest of the packet.
            if (length_remaining < 0
                || length_remaining > sizeof(mms->in_buffer) - 12) {
                av_log(NULL, AV_LOG_ERROR,
                       "Incoming packet length %d exceeds bufsize %zu\n",
                       length_remaining, sizeof(mms->in_buffer) - 12);
                return AVERROR_INVALIDDATA;
            }
            read_result = url_read_complete(mms->mms_hd, mms->in_buffer + 12,
                                            length_remaining) ;
            if (read_result != length_remaining) {
                av_log(NULL, AV_LOG_ERROR,
                       "Reading pkt data (length=%d) failed: %d (%s)\n",
                       length_remaining, read_result,
                       read_result < 0 ? strerror(read_result) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR_IO;
            }
            packet_type= AV_RL16(mms->in_buffer+36);
            hr = AV_RL32(mms->in_buffer + 40);
            if (hr) {
                av_log(NULL, AV_LOG_ERROR,
                       "Server sent an error status code: 0x%08x\n", hr);
                return AVERROR_UNKNOWN;
            }
        } else {
            int length_remaining;
            int packet_id_type;
            int tmp;

            // note we cache the first 8 bytes,
            // then fill up the buffer with the others
            tmp                       = AV_RL16(mms->in_buffer + 6);
            length_remaining          = (tmp - 8) & 0xffff;
            mmst->incoming_packet_seq = AV_RL32(mms->in_buffer);
            packet_id_type            = mms->in_buffer[4];
            mmst->incoming_flags      = mms->in_buffer[5];

            if (length_remaining < 0
                || length_remaining > sizeof(mms->in_buffer) - 8) {
                av_log(NULL, AV_LOG_ERROR,
                       "Data length %d is invalid or too large (max=%zu)\n",
                       length_remaining, sizeof(mms->in_buffer));
                return AVERROR_INVALIDDATA;
            }
            mms->remaining_in_len    = length_remaining;
            mms->read_in_ptr         = mms->in_buffer;
            read_result= url_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
            if(read_result != length_remaining) {
                av_log(NULL, AV_LOG_ERROR,
                       "Failed to read packet data of size %d: %d (%s)\n",
                       length_remaining, read_result,
                       read_result < 0 ? strerror(read_result) :
                           "The server closed the connection");
                return read_result < 0 ? read_result : AVERROR_IO;
            }

            // if we successfully read everything.
            if(packet_id_type == mmst->header_packet_id) {
                packet_type = SC_PKT_ASF_HEADER;
                // Store the asf header
                if(!mms->header_parsed) {
                    void *p = av_realloc(mms->asf_header,
                                  mms->asf_header_size + mms->remaining_in_len);
                    if (!p) {
                        av_freep(&mms->asf_header);
                        return AVERROR(ENOMEM);
                    }
                    mms->asf_header = p;
                    memcpy(mms->asf_header + mms->asf_header_size,
                           mms->read_in_ptr, mms->remaining_in_len);
                    mms->asf_header_size += mms->remaining_in_len;
                }
                // 0x04 means asf header is sent in multiple packets.
                if (mmst->incoming_flags == 0x04)
                    continue;
            } else if(packet_id_type == mmst->packet_id) {
                packet_type = SC_PKT_ASF_MEDIA;
            } else {
                dprintf(NULL, "packet id type %d is old.", packet_id_type);
                continue;
            }
        }

        // preprocess some packet type
        if(packet_type == SC_PKT_KEEPALIVE) {
            send_keepalive_packet(mmst);
            continue;
        } else if(packet_type == SC_PKT_STREAM_CHANGING) {
            handle_packet_stream_changing_type(mmst);
        } else if(packet_type == SC_PKT_ASF_MEDIA) {
            pad_media_packet(mms);
        }
        return packet_type;
    }
}
Пример #4
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    AVFrame *frame = data;
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end = buf + avpkt->size;
    KgvContext * const c = avctx->priv_data;
    int offsets[8];
    uint8_t *out, *prev;
    int outcnt = 0, maxcnt;
    int w, h, i, res;

    if (avpkt->size < 2)
        return AVERROR_INVALIDDATA;

    w = (buf[0] + 1) * 8;
    h = (buf[1] + 1) * 8;
    buf += 2;

    if (w != avctx->width || h != avctx->height) {
        av_freep(&c->frame_buffer);
        av_freep(&c->last_frame_buffer);
        if ((res = ff_set_dimensions(avctx, w, h)) < 0)
            return res;
    }

    if (!c->frame_buffer) {
        c->frame_buffer      = av_mallocz(avctx->width * avctx->height * 2);
        c->last_frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
        if (!c->frame_buffer || !c->last_frame_buffer) {
            decode_flush(avctx);
            return AVERROR(ENOMEM);
        }
    }

    maxcnt = w * h;

    if ((res = ff_get_buffer(avctx, frame, 0)) < 0)
        return res;
    out  = (uint8_t*)c->frame_buffer;
    prev = (uint8_t*)c->last_frame_buffer;

    for (i = 0; i < 8; i++)
        offsets[i] = -1;

    while (outcnt < maxcnt && buf_end - 2 >= buf) {
        int code = AV_RL16(buf);
        buf += 2;

        if (!(code & 0x8000)) {
            AV_WN16A(&out[2 * outcnt], code); // rgb555 pixel coded directly
            outcnt++;
        } else {
            int count;

            if ((code & 0x6000) == 0x6000) {
                // copy from previous frame
                int oidx = (code >> 10) & 7;
                int start;

                count = (code & 0x3FF) + 3;

                if (offsets[oidx] < 0) {
                    if (buf_end - 3 < buf)
                        break;
                    offsets[oidx] = AV_RL24(buf);
                    buf += 3;
                }

                start = (outcnt + offsets[oidx]) % maxcnt;

                if (maxcnt - start < count || maxcnt - outcnt < count)
                    break;

                if (!prev) {
                    av_log(avctx, AV_LOG_ERROR,
                           "Frame reference does not exist\n");
                    break;
                }

                memcpy(out + 2 * outcnt, prev + 2 * start, 2 * count);
            } else {
                // copy from earlier in this frame
                int offset = (code & 0x1FFF) + 1;

                if (!(code & 0x6000)) {
                    count = 2;
                } else if ((code & 0x6000) == 0x2000) {
                    count = 3;
                } else {
                    if (buf_end - 1 < buf)
                        break;
                    count = 4 + *buf++;
                }

                if (outcnt < offset || maxcnt - outcnt < count)
                    break;

                av_memcpy_backptr(out + 2 * outcnt, 2 * offset, 2 * count);
            }
            outcnt += count;
        }
Пример #5
0
/* Returns the number of bytes consumed from the bytestream. Returns -1 if
 * there was an error while decoding the header */
static int truemotion1_decode_header(TrueMotion1Context *s)
{
    int i;
    int width_shift = 0;
    int new_pix_fmt;
    struct frame_header header;
    uint8_t header_buffer[128] = { 0 };  /* logical maximum size of the header */
    const uint8_t *sel_vector_table;

    header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
    if (s->buf[0] < 0x10 || header.header_size >= s->size)
    {
        av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
        return -1;
    }

    /* unscramble the header bytes with a XOR operation */
    for (i = 1; i < header.header_size; i++)
        header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];

    header.compression = header_buffer[0];
    header.deltaset = header_buffer[1];
    header.vectable = header_buffer[2];
    header.ysize = AV_RL16(&header_buffer[3]);
    header.xsize = AV_RL16(&header_buffer[5]);
    header.checksum = AV_RL16(&header_buffer[7]);
    header.version = header_buffer[9];
    header.header_type = header_buffer[10];
    header.flags = header_buffer[11];
    header.control = header_buffer[12];

    /* Version 2 */
    if (header.version >= 2)
    {
        if (header.header_type > 3)
        {
            av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
            return -1;
        } else if ((header.header_type == 2) || (header.header_type == 3)) {
            s->flags = header.flags;
            if (!(s->flags & FLAG_INTERFRAME))
                s->flags |= FLAG_KEYFRAME;
        } else
            s->flags = FLAG_KEYFRAME;
    } else /* Version 1 */
        s->flags = FLAG_KEYFRAME;

    if (s->flags & FLAG_SPRITE) {
        av_log_ask_for_sample(s->avctx, "SPRITE frame found.\n");
        /* FIXME header.width, height, xoffset and yoffset aren't initialized */
#if 0
        s->w = header.width;
        s->h = header.height;
        s->x = header.xoffset;
        s->y = header.yoffset;
#else
        return -1;
#endif
    } else {
        s->w = header.xsize;
        s->h = header.ysize;
        if (header.header_type < 2) {
            if ((s->w < 213) && (s->h >= 176))
            {
                s->flags |= FLAG_INTERPOLATED;
                av_log_ask_for_sample(s->avctx, "INTERPOLATION selected.\n");
            }
        }
    }

    if (header.compression >= 17) {
        av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
        return -1;
    }

    if ((header.deltaset != s->last_deltaset) ||
        (header.vectable != s->last_vectable))
        select_delta_tables(s, header.deltaset);

    if ((header.compression & 1) && header.header_type)
        sel_vector_table = pc_tbl2;
    else {
        if (header.vectable > 0 && header.vectable < 4)
            sel_vector_table = tables[header.vectable - 1];
        else {
            av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
            return -1;
        }
    }

    if (compression_types[header.compression].algorithm == ALGO_RGB24H) {
        new_pix_fmt = PIX_FMT_RGB32;
        width_shift = 1;
    } else
        new_pix_fmt = PIX_FMT_RGB555; // RGB565 is supported as well

    s->w >>= width_shift;
    if (av_image_check_size(s->w, s->h, 0, s->avctx) < 0)
        return -1;

    if (s->w != s->avctx->width || s->h != s->avctx->height ||
        new_pix_fmt != s->avctx->pix_fmt) {
        if (s->frame.data[0])
            s->avctx->release_buffer(s->avctx, &s->frame);
        s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 };
        s->avctx->pix_fmt = new_pix_fmt;
        avcodec_set_dimensions(s->avctx, s->w, s->h);
        av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));
    }

    /* There is 1 change bit per 4 pixels, so each change byte represents
     * 32 pixels; divide width by 4 to obtain the number of change bits and
     * then round up to the nearest byte. */
    s->mb_change_bits_row_size = ((s->avctx->width >> (2 - width_shift)) + 7) >> 3;

    if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
    {
        if (compression_types[header.compression].algorithm == ALGO_RGB24H)
            gen_vector_table24(s, sel_vector_table);
        else
        if (s->avctx->pix_fmt == PIX_FMT_RGB555)
            gen_vector_table15(s, sel_vector_table);
        else
            gen_vector_table16(s, sel_vector_table);
    }

    /* set up pointers to the other key data chunks */
    s->mb_change_bits = s->buf + header.header_size;
    if (s->flags & FLAG_KEYFRAME) {
        /* no change bits specified for a keyframe; only index bytes */
        s->index_stream = s->mb_change_bits;
    } else {
        /* one change bit per 4x4 block */
        s->index_stream = s->mb_change_bits +
            (s->mb_change_bits_row_size * (s->avctx->height >> 2));
    }
    s->index_stream_size = s->size - (s->index_stream - s->buf);

    s->last_deltaset = header.deltaset;
    s->last_vectable = header.vectable;
    s->compression = header.compression;
    s->block_width = compression_types[header.compression].block_width;
    s->block_height = compression_types[header.compression].block_height;
    s->block_type = compression_types[header.compression].block_type;

    if (s->avctx->debug & FF_DEBUG_PICT_INFO)
        av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
            s->last_deltaset, s->last_vectable, s->compression, s->block_width,
            s->block_height, s->block_type,
            s->flags & FLAG_KEYFRAME ? " KEY" : "",
            s->flags & FLAG_INTERFRAME ? " INTER" : "",
            s->flags & FLAG_SPRITE ? " SPRITE" : "",
            s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");

    return header.header_size;
}
Пример #6
0
static int str_probe(AVProbeData *p)
{
    uint8_t *sector= p->buf;
    uint8_t *end= sector + p->buf_size;
    int aud=0, vid=0;

    if (p->buf_size < RAW_CD_SECTOR_SIZE)
        return 0;

    if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
        (AV_RL32(&p->buf[8]) == CDXA_TAG)) {

        /* RIFF header seen; skip 0x2C bytes */
        sector += RIFF_HEADER_SIZE;
    }

    while (end - sector >= RAW_CD_SECTOR_SIZE) {
        /* look for CD sync header (00, 0xFF x 10, 00) */
        if (memcmp(sector,sync_header,sizeof(sync_header)))
            return 0;

        if (sector[0x11] >= 32)
            return 0;

        switch (sector[0x12] & CDXA_TYPE_MASK) {
        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO: {
                int current_sector = AV_RL16(&sector[0x1C]);
                int sector_count   = AV_RL16(&sector[0x1E]);
                int frame_size = AV_RL32(&sector[0x24]);

                if(!(   frame_size>=0
                     && current_sector < sector_count
                     && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
                    return 0;
                }

                /*st->codec->width      = AV_RL16(&sector[0x28]);
                st->codec->height     = AV_RL16(&sector[0x2A]);*/

//                 if (current_sector == sector_count-1) {
                    vid++;
//                 }

            }
            break;
        case CDXA_TYPE_AUDIO:
            if(sector[0x13]&0x2A)
                return 0;
            aud++;
            break;
        default:
            if(sector[0x12] & CDXA_TYPE_MASK)
                return 0;
        }
        sector += RAW_CD_SECTOR_SIZE;
    }
    /* MPEG files (like those ripped from VCDs) can also look like this;
     * only return half certainty */
    if(vid+aud > 3)  return 50;
    else if(vid+aud) return 1;
    else             return 0;
}
Пример #7
0
static int get_ext_stream_properties(char *buf, int buf_len, int stream_num, struct asf_priv* asf, int is_video)
{
  int pos=0;
  uint8_t *buffer = &buf[0];
  uint64_t avg_ft;
  unsigned bitrate;

  while ((pos = find_asf_guid(buf, asf_ext_stream_header, pos, buf_len)) >= 0) {
    int this_stream_num, stnamect, payct, i;
    int buf_max_index=pos+50;
    if (buf_max_index > buf_len) return 0;
    buffer = &buf[pos];

    // the following info is available
    // some of it may be useful but we're skipping it for now
    // starttime(8 bytes), endtime(8),
    // leak-datarate(4), bucket-datasize(4), init-bucket-fullness(4),
    // alt-leak-datarate(4), alt-bucket-datasize(4), alt-init-bucket-fullness(4),
    // max-object-size(4),
    // flags(4) (reliable,seekable,no_cleanpoints?,resend-live-cleanpoints, rest of bits reserved)

    buffer += 8+8;
    bitrate = AV_RL32(buffer);
    buffer += 8*4;
    this_stream_num=AV_RL16(buffer);buffer+=2;

    if (this_stream_num == stream_num) {
      buf_max_index+=14;
      if (buf_max_index > buf_len) return 0;
      buffer+=2; //skip stream-language-id-index
      avg_ft = AV_RL64(buffer); // provided in 100ns units
      buffer+=8;
      asf->bps = bitrate / 8;

      // after this are values for stream-name-count and
      // payload-extension-system-count
      // followed by associated info for each
      stnamect = AV_RL16(buffer);buffer+=2;
      payct = AV_RL16(buffer);buffer+=2;

      // need to read stream names if present in order
      // to get lengths - values are ignored for now
      for (i=0; i<stnamect; i++) {
        int stream_name_len;
        buf_max_index+=4;
        if (buf_max_index > buf_len) return 0;
        buffer+=2; //language_id_index
        stream_name_len = AV_RL16(buffer);buffer+=2;
        buffer+=stream_name_len; //stream_name
        buf_max_index+=stream_name_len;
        if (buf_max_index > buf_len) return 0;
      }

      if (is_video) {
        asf->vid_repdata_count = payct;
        asf->vid_repdata_sizes = malloc(payct*sizeof(int));
      } else {
        asf->aud_repdata_count = payct;
        asf->aud_repdata_sizes = malloc(payct*sizeof(int));
      }

      for (i=0; i<payct; i++) {
        int payload_len;
        buf_max_index+=22;
        if (buf_max_index > buf_len) return 0;
        // Each payload extension definition starts with a GUID.
        // In dvr-ms files one of these indicates the presence an
        // extension that contains pts values and this is always present
        // in the video and audio streams.
        // Another GUID indicates the presence of an extension
        // that contains useful video frame demuxing information.
        // Note that the extension data in each packet does not contain
        // these GUIDs and that this header section defines the order the data
        // will appear in.
        if (memcmp(buffer, asf_dvr_ms_timing_rep_data, 16) == 0) {
          if (is_video)
            asf->vid_ext_timing_index = i;
          else
            asf->aud_ext_timing_index = i;
        } else if (is_video && memcmp(buffer, asf_dvr_ms_vid_frame_rep_data, 16) == 0)
          asf->vid_ext_frame_index = i;
        buffer+=16;

        payload_len = AV_RL16(buffer);buffer+=2;

        if (is_video)
          asf->vid_repdata_sizes[i] = payload_len;
        else
          asf->aud_repdata_sizes[i] = payload_len;
        buffer+=4;//sys_len
      }

      return 1;
    }
  }
  return 1;
}
Пример #8
0
static int wsvqa_read_header(AVFormatContext *s,
                             AVFormatParameters *ap)
{
    WsVqaDemuxContext *wsvqa = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    unsigned char *header;
    unsigned char scratch[VQA_PREAMBLE_SIZE];
    unsigned int chunk_tag;
    unsigned int chunk_size;

    /* initialize the video decoder stream */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
    wsvqa->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_WS_VQA;
    st->codec->codec_tag = 0;  /* no fourcc */

    /* skip to the start of the VQA header */
    url_fseek(pb, 20, SEEK_SET);

    /* the VQA header needs to go to the decoder */
    st->codec->extradata_size = VQA_HEADER_SIZE;
    st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
    header = (unsigned char *)st->codec->extradata;
    if (get_buffer(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
        VQA_HEADER_SIZE) {
        av_free(st->codec->extradata);
        return AVERROR(EIO);
    }
    st->codec->width = AV_RL16(&header[6]);
    st->codec->height = AV_RL16(&header[8]);

    /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
    if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        if (AV_RL16(&header[0]) == 1)
            st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
        else
            st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->sample_rate = AV_RL16(&header[24]);
        if (!st->codec->sample_rate)
            st->codec->sample_rate = 22050;
        st->codec->channels = header[26];
        if (!st->codec->channels)
            st->codec->channels = 1;
        st->codec->bits_per_coded_sample = 16;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample / 4;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;

        wsvqa->audio_stream_index = st->index;
        wsvqa->audio_samplerate = st->codec->sample_rate;
        wsvqa->audio_channels = st->codec->channels;
        wsvqa->audio_frame_counter = 0;
    }

    /* there are 0 or more chunks before the FINF chunk; iterate until
     * FINF has been skipped and the file will be ready to be demuxed */
    do {
        if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
            av_free(st->codec->extradata);
            return AVERROR(EIO);
        }
        chunk_tag = AV_RB32(&scratch[0]);
        chunk_size = AV_RB32(&scratch[4]);

        /* catch any unknown header tags, for curiousity */
        switch (chunk_tag) {
        case CINF_TAG:
        case CINH_TAG:
        case CIND_TAG:
        case PINF_TAG:
        case PINH_TAG:
        case PIND_TAG:
        case FINF_TAG:
        case CMDS_TAG:
            break;

        default:
            av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
                scratch[0], scratch[1],
                scratch[2], scratch[3]);
            break;
        }

        url_fseek(pb, chunk_size, SEEK_CUR);
    } while (chunk_tag != FINF_TAG);

    wsvqa->video_pts = wsvqa->audio_frame_counter = 0;

    return 0;
}
Пример #9
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *data_size,
                        AVPacket *avpkt)
{
    PicContext *s = avctx->priv_data;
    int buf_size = avpkt->size;
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end = avpkt->data + buf_size;
    uint32_t *palette;
    int bits_per_plane, bpp, etype, esize, npal;
    int i, x, y, plane;

    if (buf_size < 11)
        return AVERROR_INVALIDDATA;

    if (bytestream_get_le16(&buf) != 0x1234)
        return AVERROR_INVALIDDATA;
    s->width  = bytestream_get_le16(&buf);
    s->height = bytestream_get_le16(&buf);
    buf += 4;
    bits_per_plane    = *buf & 0xF;
    s->nb_planes      = (*buf++ >> 4) + 1;
    bpp               = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane;
    if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
        av_log_ask_for_sample(s, "unsupported bit depth\n");
        return AVERROR_INVALIDDATA;
    }

    if (*buf == 0xFF) {
        buf += 2;
        etype  = bytestream_get_le16(&buf);
        esize  = bytestream_get_le16(&buf);
        if (buf_end - buf < esize)
            return AVERROR_INVALIDDATA;
    } else {
        etype = -1;
        esize = 0;
    }

    avctx->pix_fmt = PIX_FMT_PAL8;

    if (s->width != avctx->width && s->height != avctx->height) {
        if (av_image_check_size(s->width, s->height, 0, avctx) < 0)
            return -1;
        avcodec_set_dimensions(avctx, s->width, s->height);
        if (s->frame.data[0])
            avctx->release_buffer(avctx, &s->frame);
    }

    if (avctx->get_buffer(avctx, &s->frame) < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
    memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
    s->frame.pict_type           = AV_PICTURE_TYPE_I;
    s->frame.palette_has_changed = 1;

    palette = (uint32_t*)s->frame.data[1];
    if (etype == 1 && esize > 1 && *buf < 6) {
        int idx = *buf;
        npal = 4;
        for (i = 0; i < npal; i++)
            palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
    } else if (etype == 2) {
        npal = FFMIN(esize, 16);
        for (i = 0; i < npal; i++)
            palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)];
    } else if (etype == 3) {
        npal = FFMIN(esize, 16);
        for (i = 0; i < npal; i++)
            palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)];
    } else if (etype == 4 || etype == 5) {
        npal = FFMIN(esize / 3, 256);
        for (i = 0; i < npal; i++)
            palette[i] = AV_RB24(buf + i*3) << 2;
    } else {
        if (bpp == 1) {
            npal = 2;
            palette[0] = 0x000000;
            palette[1] = 0xFFFFFF;
        } else if (bpp == 2) {
            npal = 4;
            for (i = 0; i < npal; i++)
                palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ];
        } else {
            npal = 16;
            memcpy(palette, ff_cga_palette, npal * 4);
        }
    }
    // fill remaining palette entries
    memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4);
    buf += esize;


    x = 0;
    y = s->height - 1;
    plane = 0;
    if (bytestream_get_le16(&buf)) {
        while (buf_end - buf >= 6) {
            const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf);
            //ignore uncompressed block size reported at buf[2]
            int marker = buf[4];
            buf += 5;

            while (plane < s->nb_planes && buf_pend - buf >= 1) {
                int run = 1;
                int val = *buf++;
                if (val == marker) {
                    run = *buf++;
                    if (run == 0)
                        run = bytestream_get_le16(&buf);
                    val = *buf++;
                }
                if (buf > buf_end)
                    break;

                if (bits_per_plane == 8) {
                    picmemset_8bpp(s, val, run, &x, &y);
                    if (y < 0)
                        break;
                } else {
                    picmemset(s, val, run, &x, &y, &plane, bits_per_plane);
                }
            }
        }
    } else {
        av_log_ask_for_sample(s, "uncompressed image\n");
        return buf_size;
    }

    *data_size = sizeof(AVFrame);
    *(AVFrame*)data = s->frame;
    return buf_size;
}
Пример #10
0
static int ws_snd_decode_frame(AVCodecContext *avctx,
                void *data, int *data_size,
                AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
//    WSSNDContext *c = avctx->priv_data;

    int in_size, out_size;
    int sample = 0;
    int i;
    short *samples = data;

    if (!buf_size)
        return 0;

    out_size = AV_RL16(&buf[0]);
    *data_size = out_size * 2;
    in_size = AV_RL16(&buf[2]);
    buf += 4;

    if (out_size > *data_size) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
        return -1;
    }
    if (in_size > buf_size) {
        av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
        return -1;
    }
    if (in_size == out_size) {
        for (i = 0; i < out_size; i++)
            *samples++ = (*buf++ - 0x80) << 8;
        return buf_size;
    }

    while (out_size > 0) {
        int code;
        uint8_t count;
        code = (*buf) >> 6;
        count = (*buf) & 0x3F;
        buf++;
        switch(code) {
        case 0: /* ADPCM 2-bit */
            for (count++; count > 0; count--) {
                code = *buf++;
                sample += ws_adpcm_2bit[code & 0x3];
                CLIP8(sample);
                *samples++ = sample << 8;
                sample += ws_adpcm_2bit[(code >> 2) & 0x3];
                CLIP8(sample);
                *samples++ = sample << 8;
                sample += ws_adpcm_2bit[(code >> 4) & 0x3];
                CLIP8(sample);
                *samples++ = sample << 8;
                sample += ws_adpcm_2bit[(code >> 6) & 0x3];
                CLIP8(sample);
                *samples++ = sample << 8;
                out_size -= 4;
            }
            break;
        case 1: /* ADPCM 4-bit */
            for (count++; count > 0; count--) {
                code = *buf++;
                sample += ws_adpcm_4bit[code & 0xF];
                CLIP8(sample);
                *samples++ = sample << 8;
                sample += ws_adpcm_4bit[code >> 4];
                CLIP8(sample);
                *samples++ = sample << 8;
                out_size -= 2;
            }
            break;
        case 2: /* no compression */
            if (count & 0x20) { /* big delta */
                int8_t t;
                t = count;
                t <<= 3;
                sample += t >> 3;
                *samples++ = sample << 8;
                out_size--;
            } else { /* copy */
                for (count++; count > 0; count--) {
                    *samples++ = (*buf++ - 0x80) << 8;
                    out_size--;
                }
                sample = buf[-1] - 0x80;
            }
            break;
        default: /* run */
            for(count++; count > 0; count--) {
                *samples++ = sample << 8;
                out_size--;
            }
        }
Пример #11
0
    vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    vst->codec->codec_id = vmd->is_indeo3 ? CODEC_ID_INDEO3 : CODEC_ID_VMDVIDEO;
    vst->codec->codec_tag = 0;  /* no fourcc */
    vst->codec->width = AV_RL16(&vmd->vmd_header[12]);
    vst->codec->height = AV_RL16(&vmd->vmd_header[14]);
    if(vmd->is_indeo3 && vst->codec->width > 320)
    {
        vst->codec->width >>= 1;
        vst->codec->height >>= 1;
    }
    vst->codec->extradata_size = VMD_HEADER_SIZE;
    vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
    memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);

    /* if sample rate is 0, assume no audio */
    vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]);
    if (vmd->sample_rate)
    {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        vmd->audio_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = CODEC_ID_VMDAUDIO;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
        st->codec->sample_rate = vmd->sample_rate;
        st->codec->block_align = AV_RL16(&vmd->vmd_header[806]);
        if (st->codec->block_align & 0x8000)
        {
            st->codec->bits_per_coded_sample = 16;
Пример #12
0
static int flic_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FlicDemuxContext *flic = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char header[FLIC_HEADER_SIZE];
    AVStream *st, *ast;
    int speed;
    int magic_number;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    flic->frame_number = 0;

    /* load the whole header and pull out the width and height */
    if (avio_read(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
        return AVERROR(EIO);

    magic_number = AV_RL16(&header[4]);
    speed = AV_RL32(&header[0x10]);
    if (speed == 0)
        speed = FLIC_DEFAULT_SPEED;

    /* initialize the decoder streams */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    flic->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_FLIC;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = AV_RL16(&header[0x08]);
    st->codec->height = AV_RL16(&header[0x0A]);

    if (!st->codec->width || !st->codec->height) {
        /* Ugly hack needed for the following sample: */
        /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
        av_log(s, AV_LOG_WARNING,
               "File with no specified width/height. Trying 640x480.\n");
        st->codec->width  = 640;
        st->codec->height = 480;
    }

    /* send over the whole 128-byte FLIC header */
    st->codec->extradata_size = FLIC_HEADER_SIZE;
    st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
    memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);

    /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */
    if (avio_read(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) {
        av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n");
        return AVERROR(EIO);
    }

    avio_seek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR);

    /* Time to figure out the framerate:
     * If the first preamble's magic number is 0xAAAA then this file is from
     * X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk
     * magic number at offset 0x10 assume this file is from Magic Carpet instead.
     * If neither of the above is true then this is a normal FLIC file.
     */
    if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) {
        /* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */
        ast = av_new_stream(s, 1);
        if (!ast)
            return AVERROR(ENOMEM);

        flic->audio_stream_index = ast->index;

        /* all audio frames are the same size, so use the size of the first chunk for block_align */
        ast->codec->block_align = AV_RL32(&preamble[0]);
        ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        ast->codec->codec_id = CODEC_ID_PCM_U8;
        ast->codec->codec_tag = 0;
        ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE;
        ast->codec->channels = 1;
        ast->codec->sample_fmt = AV_SAMPLE_FMT_U8;
        ast->codec->bit_rate = st->codec->sample_rate * 8;
        ast->codec->bits_per_coded_sample = 8;
        ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
        ast->codec->extradata_size = 0;

        /* Since the header information is incorrect we have to figure out the
         * framerate using block_align and the fact that the audio is 22050 Hz.
         * We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */
        av_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE);
        av_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE);
    } else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
        av_set_pts_info(st, 64, FLIC_MC_SPEED, 70);

        /* rewind the stream since the first chunk is at offset 12 */
        avio_seek(pb, 12, SEEK_SET);

        /* send over abbreviated FLIC header chunk */
        av_free(st->codec->extradata);
        st->codec->extradata_size = 12;
        st->codec->extradata = av_malloc(12);
        memcpy(st->codec->extradata, header, 12);

    } else if (magic_number == FLIC_FILE_MAGIC_1) {
        av_set_pts_info(st, 64, speed, 70);
    } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
               (magic_number == FLIC_FILE_MAGIC_3)) {
        av_set_pts_info(st, 64, speed, 1000);
    } else {
        av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
        return AVERROR_INVALIDDATA;
    }

    return 0;
}
Пример #13
0
static int flic_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FlicDemuxContext *flic = s->priv_data;
    AVIOContext *pb = s->pb;
    int packet_read = 0;
    unsigned int size;
    int magic;
    int ret = 0;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    while (!packet_read) {

        if ((ret = avio_read(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
            FLIC_PREAMBLE_SIZE) {
            ret = AVERROR(EIO);
            break;
        }

        size = AV_RL32(&preamble[0]);
        magic = AV_RL16(&preamble[4]);

        if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }
            pkt->stream_index = flic->video_stream_index;
            pkt->pts = flic->frame_number++;
            pkt->pos = avio_tell(pb);
            memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
            ret = avio_read(pb, pkt->data + FLIC_PREAMBLE_SIZE,
                size - FLIC_PREAMBLE_SIZE);
            if (ret != size - FLIC_PREAMBLE_SIZE) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }
            packet_read = 1;
        } else if (magic == FLIC_TFTD_CHUNK_AUDIO) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }

            /* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
            avio_skip(pb, 10);

            pkt->stream_index = flic->audio_stream_index;
            pkt->pos = avio_tell(pb);
            ret = avio_read(pb, pkt->data, size);

            if (ret != size) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }

            packet_read = 1;
        } else {
            /* not interested in this chunk */
            avio_skip(pb, size - 6);
        }
    }

    return ret;
}
Пример #14
0
static int
avs_decode_frame(AVCodecContext * avctx,
                 void *data, int *data_size, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end = avpkt->data + avpkt->size;
    int buf_size = avpkt->size;
    AvsContext *const avs = avctx->priv_data;
    AVFrame *picture = data;
    AVFrame *const p = (AVFrame *) & avs->picture;
    const uint8_t *table, *vect;
    uint8_t *out;
    int i, j, x, y, stride, vect_w = 3, vect_h = 3;
    AvsVideoSubType sub_type;
    AvsBlockType type;
    GetBitContext change_map;

    if (avctx->reget_buffer(avctx, p)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }
    p->reference = 1;
    p->pict_type = AV_PICTURE_TYPE_P;
    p->key_frame = 0;

    out = avs->picture.data[0];
    stride = avs->picture.linesize[0];

    if (buf_end - buf < 4)
        return AVERROR_INVALIDDATA;
    sub_type = buf[0];
    type = buf[1];
    buf += 4;

    if (type == AVS_PALETTE) {
        int first, last;
        uint32_t *pal = (uint32_t *) avs->picture.data[1];

        first = AV_RL16(buf);
        last = first + AV_RL16(buf + 2);
        if (first >= 256 || last > 256 || buf_end - buf < 4 + 4 + 3 * (last - first))
            return AVERROR_INVALIDDATA;
        buf += 4;
        for (i=first; i<last; i++, buf+=3)
            pal[i] = (buf[0] << 18) | (buf[1] << 10) | (buf[2] << 2);

        sub_type = buf[0];
        type = buf[1];
        buf += 4;
    }

    if (type != AVS_VIDEO)
        return -1;

    switch (sub_type) {
    case AVS_I_FRAME:
        p->pict_type = AV_PICTURE_TYPE_I;
        p->key_frame = 1;
    case AVS_P_FRAME_3X3:
        vect_w = 3;
        vect_h = 3;
        break;

    case AVS_P_FRAME_2X2:
        vect_w = 2;
        vect_h = 2;
        break;

    case AVS_P_FRAME_2X3:
        vect_w = 2;
        vect_h = 3;
        break;

    default:
      return -1;
    }

    if (buf_end - buf < 256 * vect_w * vect_h)
        return AVERROR_INVALIDDATA;
    table = buf + (256 * vect_w * vect_h);
    if (sub_type != AVS_I_FRAME) {
        int map_size = ((318 / vect_w + 7) / 8) * (198 / vect_h);
        if (buf_end - table < map_size)
            return AVERROR_INVALIDDATA;
        init_get_bits(&change_map, table, map_size * 8);
        table += map_size;
    }

    for (y=0; y<198; y+=vect_h) {
        for (x=0; x<318; x+=vect_w) {
            if (sub_type == AVS_I_FRAME || get_bits1(&change_map)) {
                if (buf_end - table < 1)
                    return AVERROR_INVALIDDATA;
                vect = &buf[*table++ * (vect_w * vect_h)];
                for (j=0; j<vect_w; j++) {
                    out[(y + 0) * stride + x + j] = vect[(0 * vect_w) + j];
                    out[(y + 1) * stride + x + j] = vect[(1 * vect_w) + j];
                    if (vect_h == 3)
                        out[(y + 2) * stride + x + j] =
                            vect[(2 * vect_w) + j];
                }
            }
        }
        if (sub_type != AVS_I_FRAME)
            align_get_bits(&change_map);
    }

    *picture = *(AVFrame *) & avs->picture;
    *data_size = sizeof(AVPicture);

    return buf_size;
}
Пример #15
0
static int roq_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    RoqDemuxContext *roq = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    unsigned int chunk_size;
    unsigned int chunk_type;
    unsigned int codebook_size;
    unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
    int packet_read = 0;
    int64_t codebook_offset;

    while (!packet_read) {

        if (avio_feof(s->pb))
            return AVERROR(EIO);

        /* get the next chunk preamble */
        if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
                RoQ_CHUNK_PREAMBLE_SIZE)
            return AVERROR(EIO);

        chunk_type = AV_RL16(&preamble[0]);
        chunk_size = AV_RL32(&preamble[2]);
        if(chunk_size > INT_MAX)
            return AVERROR_INVALIDDATA;

        chunk_size = ffio_limit(pb, chunk_size);

        switch (chunk_type) {

        case RoQ_INFO:
            if (roq->video_stream_index == -1) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                avpriv_set_pts_info(st, 63, 1, roq->frame_rate);
                roq->video_stream_index = st->index;
                st->codecpar->codec_type   = AVMEDIA_TYPE_VIDEO;
                st->codecpar->codec_id     = AV_CODEC_ID_ROQ;
                st->codecpar->codec_tag    = 0;  /* no fourcc */

                if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE)
                    return AVERROR(EIO);
                st->codecpar->width  = roq->width  = AV_RL16(preamble);
                st->codecpar->height = roq->height = AV_RL16(preamble + 2);
                break;
            }
            /* don't care about this chunk anymore */
            avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE);
            break;

        case RoQ_QUAD_CODEBOOK:
            if (roq->video_stream_index < 0)
                return AVERROR_INVALIDDATA;
            /* packet needs to contain both this codebook and next VQ chunk */
            codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
            codebook_size = chunk_size;
            avio_skip(pb, codebook_size);
            if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
                    RoQ_CHUNK_PREAMBLE_SIZE)
                return AVERROR(EIO);
            chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
                         codebook_size;

            /* rewind */
            avio_seek(pb, codebook_offset, SEEK_SET);

            /* load up the packet */
            ret= av_get_packet(pb, pkt, chunk_size);
            if (ret != chunk_size)
                return AVERROR(EIO);
            pkt->stream_index = roq->video_stream_index;
            pkt->pts = roq->video_pts++;

            packet_read = 1;
            break;

        case RoQ_SOUND_MONO:
        case RoQ_SOUND_STEREO:
            if (roq->audio_stream_index == -1) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                avpriv_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE);
                roq->audio_stream_index = st->index;
                st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
                st->codecpar->codec_id = AV_CODEC_ID_ROQ_DPCM;
                st->codecpar->codec_tag = 0;  /* no tag */
                if (chunk_type == RoQ_SOUND_STEREO) {
                    st->codecpar->channels       = 2;
                    st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
                } else {
                    st->codecpar->channels       = 1;
                    st->codecpar->channel_layout = AV_CH_LAYOUT_MONO;
                }
                roq->audio_channels    = st->codecpar->channels;
                st->codecpar->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
                st->codecpar->bits_per_coded_sample = 16;
                st->codecpar->bit_rate = st->codecpar->channels * st->codecpar->sample_rate *
                                         st->codecpar->bits_per_coded_sample;
                st->codecpar->block_align = st->codecpar->channels * st->codecpar->bits_per_coded_sample;
            }
        case RoQ_QUAD_VQ:
            if (chunk_type == RoQ_QUAD_VQ) {
                if (roq->video_stream_index < 0)
                    return AVERROR_INVALIDDATA;
            }

            /* load up the packet */
            if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
                return AVERROR(EIO);
            /* copy over preamble */
            memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);

            if (chunk_type == RoQ_QUAD_VQ) {
                pkt->stream_index = roq->video_stream_index;
                pkt->pts = roq->video_pts++;
            } else {
                pkt->stream_index = roq->audio_stream_index;
                pkt->pts = roq->audio_frame_count;
                roq->audio_frame_count += (chunk_size / roq->audio_channels);
            }

            pkt->pos= avio_tell(pb);
            ret = avio_read(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
                            chunk_size);
            if (ret != chunk_size)
                ret = AVERROR(EIO);

            packet_read = 1;
            break;

        default:
            av_log(s, AV_LOG_ERROR, "  unknown RoQ chunk (%04X)\n", chunk_type);
            return AVERROR_INVALIDDATA;
        }
    }

    return ret;
}
Пример #16
0
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
                        AVPacket *avpkt) {
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    NuvContext *c = avctx->priv_data;
    AVFrame *picture = data;
    int orig_size = buf_size;
    int keyframe;
    int result;
    enum {NUV_UNCOMPRESSED = '0', NUV_RTJPEG = '1',
          NUV_RTJPEG_IN_LZO = '2', NUV_LZO = '3',
          NUV_BLACK = 'N', NUV_COPY_LAST = 'L'} comptype;

    if (buf_size < 12) {
        av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
        return -1;
    }

    // codec data (rtjpeg quant tables)
    if (buf[0] == 'D' && buf[1] == 'R') {
        int ret;
        // skip rest of the frameheader.
        buf = &buf[12];
        buf_size -= 12;
        ret = get_quant(avctx, c, buf, buf_size);
        if (ret < 0)
            return ret;
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
        return orig_size;
    }

    if (buf[0] != 'V' || buf_size < 12) {
        av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n");
        return -1;
    }
    comptype = buf[1];
    switch (comptype) {
        case NUV_RTJPEG_IN_LZO:
        case NUV_RTJPEG:
            keyframe = !buf[2]; break;
        case NUV_COPY_LAST:
            keyframe = 0; break;
        default:
            keyframe = 1; break;
    }
    // skip rest of the frameheader.
    buf = &buf[12];
    buf_size -= 12;
    if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
        int outlen = c->decomp_size, inlen = buf_size;
        if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen))
            av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
        buf = c->decomp_buf;
        buf_size = c->decomp_size;
    }
    if (c->codec_frameheader) {
        int w, h, q;
        if (buf_size < 12) {
            av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
            return -1;
        }
        w = AV_RL16(&buf[6]);
        h = AV_RL16(&buf[8]);
        q = buf[10];
        if (!codec_reinit(avctx, w, h, q))
            return -1;
        buf = &buf[12];
        buf_size -= 12;
    }

    if (keyframe && c->pic.data[0])
        avctx->release_buffer(avctx, &c->pic);
    c->pic.reference = 3;
    c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
                          FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    result = avctx->reget_buffer(avctx, &c->pic);
    if (result < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    c->pic.pict_type = keyframe ? FF_I_TYPE : FF_P_TYPE;
    c->pic.key_frame = keyframe;
    // decompress/copy/whatever data
    switch (comptype) {
        case NUV_LZO:
        case NUV_UNCOMPRESSED: {
            int height = c->height;
            if (buf_size < c->width * height * 3 / 2) {
                av_log(avctx, AV_LOG_ERROR, "uncompressed frame too short\n");
                height = buf_size / c->width / 3 * 2;
            }
            copy_frame(&c->pic, buf, c->width, height);
            break;
        }
        case NUV_RTJPEG_IN_LZO:
        case NUV_RTJPEG: {
            rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size);
            break;
        }
        case NUV_BLACK: {
            memset(c->pic.data[0], 0, c->width * c->height);
            memset(c->pic.data[1], 128, c->width * c->height / 4);
            memset(c->pic.data[2], 128, c->width * c->height / 4);
            break;
        }
        case NUV_COPY_LAST: {
            /* nothing more to do here */
            break;
        }
        default:
            av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
            return -1;
    }

    *picture = c->pic;
    *data_size = sizeof(AVFrame);
    return orig_size;
}
Пример #17
0
static int str_read_packet(AVFormatContext *s,
                           AVPacket *ret_pkt)
{
    AVIOContext *pb = s->pb;
    StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
    unsigned char sector[RAW_CD_SECTOR_SIZE];
    int channel;
    AVPacket *pkt;
    AVStream *st;

    while (1) {

        if (avio_read(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
            return AVERROR(EIO);

        channel = sector[0x11];
        if (channel >= 32)
            return AVERROR_INVALIDDATA;

        switch (sector[0x12] & CDXA_TYPE_MASK) {

        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO:
            {

                int current_sector = AV_RL16(&sector[0x1C]);
                int sector_count   = AV_RL16(&sector[0x1E]);
                int frame_size = AV_RL32(&sector[0x24]);

                if(!(   frame_size>=0
                     && current_sector < sector_count
                     && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
                    av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size);
                    break;
                }

                if(str->channels[channel].video_stream_index < 0){
                    /* allocate a new AVStream */
                    st = avformat_new_stream(s, NULL);
                    if (!st)
                        return AVERROR(ENOMEM);
                    avpriv_set_pts_info(st, 64, 1, 15);

                    str->channels[channel].video_stream_index = st->index;

                    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
                    st->codec->codec_id   = CODEC_ID_MDEC;
                    st->codec->codec_tag  = 0;  /* no fourcc */
                    st->codec->width      = AV_RL16(&sector[0x28]);
                    st->codec->height     = AV_RL16(&sector[0x2A]);
                }

                /* if this is the first sector of the frame, allocate a pkt */
                pkt = &str->channels[channel].tmp_pkt;

                if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){
                    if(pkt->data)
                        av_log(s, AV_LOG_ERROR, "missmatching sector_count\n");
                    av_free_packet(pkt);
                    if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE))
                        return AVERROR(EIO);

                    pkt->pos= avio_tell(pb) - RAW_CD_SECTOR_SIZE;
                    pkt->stream_index =
                        str->channels[channel].video_stream_index;
                }

                memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
                       sector + VIDEO_DATA_HEADER_SIZE,
                       VIDEO_DATA_CHUNK_SIZE);

                if (current_sector == sector_count-1) {
                    pkt->size= frame_size;
                    *ret_pkt = *pkt;
                    pkt->data= NULL;
                    pkt->size= -1;
                    return 0;
                }

            }
            break;

        case CDXA_TYPE_AUDIO:
            if(str->channels[channel].audio_stream_index < 0){
                int fmt = sector[0x13];
                /* allocate a new AVStream */
                st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);

                str->channels[channel].audio_stream_index = st->index;

                st->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id    = CODEC_ID_ADPCM_XA;
                st->codec->codec_tag   = 0;  /* no fourcc */
                st->codec->channels    = (fmt&1)?2:1;
                st->codec->sample_rate = (fmt&4)?18900:37800;
            //    st->codec->bit_rate = 0; //FIXME;
                st->codec->block_align = 128;

                avpriv_set_pts_info(st, 64, 128, st->codec->sample_rate);
            }
            pkt = ret_pkt;
            if (av_new_packet(pkt, 2304))
                return AVERROR(EIO);
            memcpy(pkt->data,sector+24,2304);

            pkt->stream_index =
                str->channels[channel].audio_stream_index;
            return 0;
        default:
            av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]);
            /* drop the sector and move on */
            break;
        }

        if (url_feof(pb))
            return AVERROR(EIO);
    }
}
Пример #18
0
static int rtmp_packet_read_one_chunk(URLContext *h, RTMPPacket *p,
                                      int chunk_size, RTMPPacket **prev_pkt_ptr,
                                      int *nb_prev_pkt, uint8_t hdr)
{

    uint8_t buf[16];
    int channel_id, timestamp, size;
    uint32_t ts_field; // non-extended timestamp or delta field
    uint32_t extra = 0;
    enum RTMPPacketType type;
    int written = 0;
    int ret, toread;
    RTMPPacket *prev_pkt;

    written++;
    channel_id = hdr & 0x3F;

    if (channel_id < 2) { //special case for channel number >= 64
        buf[1] = 0;
        if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1)
            return AVERROR(EIO);
        written += channel_id + 1;
        channel_id = AV_RL16(buf) + 64;
    }
    if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt,
                                         channel_id)) < 0)
        return ret;
    prev_pkt = *prev_pkt_ptr;
    size  = prev_pkt[channel_id].size;
    type  = prev_pkt[channel_id].type;
    extra = prev_pkt[channel_id].extra;

    hdr >>= 6; // header size indicator
    if (hdr == RTMP_PS_ONEBYTE) {
        ts_field = prev_pkt[channel_id].ts_field;
    } else {
        if (ffurl_read_complete(h, buf, 3) != 3)
            return AVERROR(EIO);
        written += 3;
        ts_field = AV_RB24(buf);
        if (hdr != RTMP_PS_FOURBYTES) {
            if (ffurl_read_complete(h, buf, 3) != 3)
                return AVERROR(EIO);
            written += 3;
            size = AV_RB24(buf);
            if (ffurl_read_complete(h, buf, 1) != 1)
                return AVERROR(EIO);
            written++;
            type = buf[0];
            if (hdr == RTMP_PS_TWELVEBYTES) {
                if (ffurl_read_complete(h, buf, 4) != 4)
                    return AVERROR(EIO);
                written += 4;
                extra = AV_RL32(buf);
            }
        }
    }
    if (ts_field == 0xFFFFFF) {
        if (ffurl_read_complete(h, buf, 4) != 4)
            return AVERROR(EIO);
        timestamp = AV_RB32(buf);
    } else {
        timestamp = ts_field;
    }
    if (hdr != RTMP_PS_TWELVEBYTES)
        timestamp += prev_pkt[channel_id].timestamp;

    if (!prev_pkt[channel_id].read) {
        if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp,
                                         size)) < 0)
            return ret;
        p->read = written;
        p->offset = 0;
        prev_pkt[channel_id].ts_field   = ts_field;
        prev_pkt[channel_id].timestamp  = timestamp;
    } else {
        // previous packet in this channel hasn't completed reading
        RTMPPacket *prev = &prev_pkt[channel_id];
        p->data          = prev->data;
        p->size          = prev->size;
        p->channel_id    = prev->channel_id;
        p->type          = prev->type;
        p->ts_field      = prev->ts_field;
        p->extra         = prev->extra;
        p->offset        = prev->offset;
        p->read          = prev->read + written;
        p->timestamp     = prev->timestamp;
        prev->data       = NULL;
    }
    p->extra = extra;
    // save history
    prev_pkt[channel_id].channel_id = channel_id;
    prev_pkt[channel_id].type       = type;
    prev_pkt[channel_id].size       = size;
    prev_pkt[channel_id].extra      = extra;
    size = size - p->offset;

    toread = FFMIN(size, chunk_size);
    if (ffurl_read_complete(h, p->data + p->offset, toread) != toread) {
        ff_rtmp_packet_destroy(p);
        return AVERROR(EIO);
    }
    size      -= toread;
    p->read   += toread;
    p->offset += toread;

    if (size > 0) {
       RTMPPacket *prev = &prev_pkt[channel_id];
       prev->data = p->data;
       prev->read = p->read;
       prev->offset = p->offset;
       return AVERROR(EAGAIN);
    }

    prev_pkt[channel_id].read = 0; // read complete; reset if needed
    return p->read;
}
Пример #19
0
static int ipmovie_read_header(AVFormatContext *s,
                               AVFormatParameters *ap)
{
    IPMVEContext *ipmovie = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVPacket pkt;
    AVStream *st;
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;

    /* initialize private context members */
    ipmovie->video_pts = ipmovie->audio_frame_count = 0;
    ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
                                      ipmovie->decode_map_chunk_offset = 0;

    /* on the first read, this will position the stream at the first chunk */
    ipmovie->next_chunk_offset = IPMOVIE_SIGNATURE_SIZE + 6;

    /* process the first chunk which should be CHUNK_INIT_VIDEO */
    if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
        return AVERROR_INVALIDDATA;

    /* peek ahead to the next chunk-- if it is an init audio chunk, process
     * it; if it is the first video chunk, this is a silent file */
    if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
            CHUNK_PREAMBLE_SIZE)
        return AVERROR(EIO);
    chunk_type = AV_RL16(&chunk_preamble[2]);
    url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);

    if (chunk_type == CHUNK_VIDEO)
        ipmovie->audio_type = 0;  /* no audio */
    else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
        return AVERROR_INVALIDDATA;

    /* initialize the stream decoders */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 33, 1, 90000);
    ipmovie->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_INTERPLAY_VIDEO;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = ipmovie->video_width;
    st->codec->height = ipmovie->video_height;

    /* palette considerations */
    st->codec->palctrl = &ipmovie->palette_control;

    if (ipmovie->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, 90000);
        ipmovie->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = ipmovie->audio_type;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->channels = ipmovie->audio_channels;
        st->codec->sample_rate = ipmovie->audio_sample_rate;
        st->codec->bits_per_sample = ipmovie->audio_bits;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
                              st->codec->bits_per_sample;
        if (st->codec->codec_id == CODEC_ID_INTERPLAY_DPCM)
            st->codec->bit_rate /= 2;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
    }

    return 0;
}
Пример #20
0
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
                        int chunk_size, RTMPPacket *prev_pkt)
{
    uint8_t hdr, t, buf[16];
    int channel_id, timestamp, data_size, offset = 0;
    uint32_t extra = 0;
    uint8_t type;

    if (url_read(h, &hdr, 1) != 1)
        return AVERROR(EIO);
    channel_id = hdr & 0x3F;

    if (channel_id < 2) { //special case for channel number >= 64
        buf[1] = 0;
        if (url_read_complete(h, buf, channel_id + 1) != channel_id + 1)
            return AVERROR(EIO);
        channel_id = AV_RL16(buf) + 64;
    }
    data_size = prev_pkt[channel_id].data_size;
    type      = prev_pkt[channel_id].type;
    extra     = prev_pkt[channel_id].extra;

    hdr >>= 6;
    if (hdr == RTMP_PS_ONEBYTE) {
        timestamp = prev_pkt[channel_id].timestamp;
    } else {
        if (url_read_complete(h, buf, 3) != 3)
            return AVERROR(EIO);
        timestamp = AV_RB24(buf);
        if (hdr != RTMP_PS_FOURBYTES) {
            if (url_read_complete(h, buf, 3) != 3)
                return AVERROR(EIO);
            data_size = AV_RB24(buf);
            if (url_read_complete(h, &type, 1) != 1)
                return AVERROR(EIO);
            if (hdr == RTMP_PS_TWELVEBYTES) {
                if (url_read_complete(h, buf, 4) != 4)
                    return AVERROR(EIO);
                extra = AV_RL32(buf);
            }
        }
    }
    if (ff_rtmp_packet_create(p, channel_id, type, timestamp, data_size))
        return -1;
    p->extra = extra;
    // save history
    prev_pkt[channel_id].channel_id = channel_id;
    prev_pkt[channel_id].type       = type;
    prev_pkt[channel_id].data_size  = data_size;
    prev_pkt[channel_id].timestamp  = timestamp;
    prev_pkt[channel_id].extra      = extra;
    while (data_size > 0) {
        int toread = FFMIN(data_size, chunk_size);
        if (url_read_complete(h, p->data + offset, toread) != toread) {
            ff_rtmp_packet_destroy(p);
            return AVERROR(EIO);
        }
        data_size -= chunk_size;
        offset    += chunk_size;
        if (data_size > 0) {
            url_read_complete(h, &t, 1); //marker
            if (t != (0xC0 + channel_id))
                return -1;
        }
    }
    return 0;
}
Пример #21
0
int read_asf_header(demuxer_t *demuxer,struct asf_priv* asf){
  int hdr_len = asf->header.objh.size - sizeof(asf->header);
  int hdr_skip = 0;
  char *hdr = NULL;
  char guid_buffer[16];
  int pos, start = stream_tell(demuxer->stream);
  uint32_t* streams = NULL;
  int audio_streams=0;
  int video_streams=0;
  uint16_t stream_count=0;
  int best_video = -1;
  int best_audio = -1;
  uint64_t data_len;
  ASF_stream_header_t *streamh;
  uint8_t *buffer;
  int audio_pos=0;

  if(hdr_len < 0) {
    mp_msg(MSGT_HEADER, MSGL_FATAL, "Header size is too small.\n");
    return 0;
  }

  if (hdr_len > 1024 * 1024) {
    mp_msg(MSGT_HEADER, MSGL_ERR, MSGTR_MPDEMUX_ASFHDR_HeaderSizeOver1MB,
			hdr_len);
    hdr_skip = hdr_len - 1024 * 1024;
    hdr_len = 1024 * 1024;
  }
  hdr = malloc(hdr_len);
  if (!hdr) {
    mp_msg(MSGT_HEADER, MSGL_FATAL, MSGTR_MPDEMUX_ASFHDR_HeaderMallocFailed,
            hdr_len);
    return 0;
  }
  stream_read(demuxer->stream, hdr, hdr_len);
  if (hdr_skip)
    stream_skip(demuxer->stream, hdr_skip);
  if (stream_eof(demuxer->stream)) {
    mp_msg(MSGT_HEADER, MSGL_FATAL, MSGTR_MPDEMUX_ASFHDR_EOFWhileReadingHeader);
    goto err_out;
  }

  if (is_drm(hdr, hdr_len))
    mp_msg(MSGT_HEADER, MSGL_FATAL, MSGTR_MPDEMUX_ASFHDR_DRMProtected);

  if ((pos = find_asf_guid(hdr, asf_ext_stream_audio, 0, hdr_len)) >= 0)
  {
    // Special case: found GUID for dvr-ms audio.
    // Now skip back to associated stream header.
    int sh_pos=0;

    sh_pos = find_backwards_asf_guid(hdr, asf_stream_header_guid, pos);

    if (sh_pos > 0) {
      sh_audio_t *sh_audio;

       mp_msg(MSGT_HEADER, MSGL_V, "read_asf_header found dvr-ms audio stream header pos=%d\n", sh_pos);
      // found audio stream header - following code reads header and
      // initializes audio stream.
      audio_pos = pos - 16 - 8;
      streamh = (ASF_stream_header_t *)&hdr[sh_pos];
      le2me_ASF_stream_header_t(streamh);
      audio_pos += 64; //16+16+4+4+4+16+4;
      buffer = &hdr[audio_pos];
      sh_audio=new_sh_audio(demuxer,streamh->stream_no & 0x7F, NULL);
      sh_audio->needs_parsing = 1;
      mp_msg(MSGT_DEMUX, MSGL_INFO, MSGTR_AudioID, "asfheader", streamh->stream_no & 0x7F);
      ++audio_streams;
      if (!asf_init_audio_stream(demuxer, asf, sh_audio, streamh, &audio_pos, &buffer, hdr, hdr_len))
        goto len_err_out;
      if (!get_ext_stream_properties(hdr, hdr_len, streamh->stream_no, asf, 0))
        goto len_err_out;
    }
  }
  // find stream headers
  // only reset pos if we didnt find dvr_ms audio stream
  // if we did find it then we want to avoid reading its header twice
  if (audio_pos == 0)
    pos = 0;

  while ((pos = find_asf_guid(hdr, asf_stream_header_guid, pos, hdr_len)) >= 0)
  {
    streamh = (ASF_stream_header_t *)&hdr[pos];
    pos += sizeof(ASF_stream_header_t);
    if (pos > hdr_len) goto len_err_out;
    le2me_ASF_stream_header_t(streamh);
    mp_msg(MSGT_HEADER, MSGL_V, "stream type: %s\n",
            asf_chunk_type(streamh->type));
    mp_msg(MSGT_HEADER, MSGL_V, "stream concealment: %s\n",
            asf_chunk_type(streamh->concealment));
    mp_msg(MSGT_HEADER, MSGL_V, "type: %d bytes,  stream: %d bytes  ID: %d\n",
            (int)streamh->type_size, (int)streamh->stream_size,
            (int)streamh->stream_no);
    mp_msg(MSGT_HEADER, MSGL_V, "unk1: %lX  unk2: %X\n",
            (unsigned long)streamh->unk1, (unsigned int)streamh->unk2);
    mp_msg(MSGT_HEADER, MSGL_V, "FILEPOS=0x%X\n", pos + start);
    // type-specific data:
    buffer = &hdr[pos];
    pos += streamh->type_size;
    if (pos > hdr_len) goto len_err_out;
    switch(ASF_LOAD_GUID_PREFIX(streamh->type)){
      case ASF_GUID_PREFIX_audio_stream: {
        sh_audio_t* sh_audio=new_sh_audio(demuxer,streamh->stream_no & 0x7F, NULL);
        mp_msg(MSGT_DEMUX, MSGL_INFO, MSGTR_AudioID, "asfheader", streamh->stream_no & 0x7F);
        ++audio_streams;
        if (!asf_init_audio_stream(demuxer, asf, sh_audio, streamh, &pos, &buffer, hdr, hdr_len))
          goto len_err_out;
	//if(demuxer->audio->id==-1) demuxer->audio->id=streamh.stream_no & 0x7F;
        break;
        }
      case ASF_GUID_PREFIX_video_stream: {
        unsigned int len;
        float asp_ratio;
        sh_video_t* sh_video=new_sh_video(demuxer,streamh->stream_no & 0x7F);
        mp_msg(MSGT_DEMUX, MSGL_INFO, MSGTR_VideoID, "asfheader", streamh->stream_no & 0x7F);
        len=streamh->type_size-(4+4+1+2);
	++video_streams;
//        sh_video->bih=malloc(chunksize); memset(sh_video->bih,0,chunksize);
        sh_video->bih=calloc((len<sizeof(*sh_video->bih))?sizeof(*sh_video->bih):len,1);
        memcpy(sh_video->bih,&buffer[4+4+1+2],len);
	le2me_BITMAPINFOHEADER(sh_video->bih);
	if (sh_video->bih->biSize > len && sh_video->bih->biSize > sizeof(*sh_video->bih))
		sh_video->bih->biSize = len;
        if (sh_video->bih->biCompression == mmioFOURCC('D', 'V', 'R', ' ')) {
          //mp_msg(MSGT_DEMUXER, MSGL_WARN, MSGTR_MPDEMUX_ASFHDR_DVRWantsLibavformat);
          //sh_video->fps=(float)sh_video->video.dwRate/(float)sh_video->video.dwScale;
          //sh_video->frametime=(float)sh_video->video.dwScale/(float)sh_video->video.dwRate;
          asf->asf_frame_state=-1;
          asf->asf_frame_start_found=0;
          asf->asf_is_dvr_ms=1;
          asf->dvr_last_vid_pts=0.0;
        } else asf->asf_is_dvr_ms=0;
        if (!get_ext_stream_properties(hdr, hdr_len, streamh->stream_no, asf, 1))
            goto len_err_out;
        if (get_meta(hdr, hdr_len, streamh->stream_no, &asp_ratio)) {
          sh_video->original_aspect = asp_ratio * sh_video->bih->biWidth /
            sh_video->bih->biHeight;
        }
        sh_video->i_bps = asf->bps;

        if( mp_msg_test(MSGT_DEMUX,MSGL_V) ) print_video_header(sh_video->bih, MSGL_V);
        //asf_video_id=streamh.stream_no & 0x7F;
	//if(demuxer->video->id==-1) demuxer->video->id=streamh.stream_no & 0x7F;
        break;
        }
      }
      // stream-specific data:
      // stream_read(demuxer->stream,(char*) buffer,streamh.stream_size);
  }

  // find file header
  pos = find_asf_guid(hdr, asf_file_header_guid, 0, hdr_len);
  if (pos >= 0) {
      ASF_file_header_t *fileh = (ASF_file_header_t *)&hdr[pos];
      pos += sizeof(ASF_file_header_t);
      if (pos > hdr_len) goto len_err_out;
      le2me_ASF_file_header_t(fileh);
      mp_msg(MSGT_HEADER, MSGL_V, "ASF: packets: %d  flags: %d  "
              "max_packet_size: %d  min_packet_size: %d  max_bitrate: %d  "
              "preroll: %d\n",
              (int)fileh->num_packets, (int)fileh->flags,
              (int)fileh->min_packet_size, (int)fileh->max_packet_size,
              (int)fileh->max_bitrate, (int)fileh->preroll);
      asf->packetsize=fileh->max_packet_size;
      asf->packet=malloc(asf->packetsize); // !!!
      asf->packetrate=fileh->max_bitrate/8.0/(double)asf->packetsize;
      asf->movielength=FFMAX(0.0, (fileh->play_duration / 10000.0 - fileh->preroll) / 1000.0);
  }

  // find content header
  pos = find_asf_guid(hdr, asf_content_desc_guid, 0, hdr_len);
  if (pos >= 0) {
        ASF_content_description_t *contenth = (ASF_content_description_t *)&hdr[pos];
        char *string=NULL;
        uint16_t* wstring = NULL;
        uint16_t len;
        pos += sizeof(ASF_content_description_t);
        if (pos > hdr_len) goto len_err_out;
	le2me_ASF_content_description_t(contenth);
	mp_msg(MSGT_HEADER,MSGL_V,"\n");
        // extract the title
        if((len = contenth->title_size) != 0) {
          wstring = (uint16_t*)&hdr[pos];
          pos += len;
          if (pos > hdr_len) goto len_err_out;
          if ((string = get_ucs2str(wstring, len))) {
            mp_msg(MSGT_HEADER,MSGL_V," Title: %s\n", string);
            demux_info_add(demuxer, "title", string);
            free(string);
          }
        }
        // extract the author
        if((len = contenth->author_size) != 0) {
          wstring = (uint16_t*)&hdr[pos];
          pos += len;
          if (pos > hdr_len) goto len_err_out;
          if ((string = get_ucs2str(wstring, len))) {
            mp_msg(MSGT_HEADER,MSGL_V," Author: %s\n", string);
            demux_info_add(demuxer, "author", string);
            free(string);
          }
        }
        // extract the copyright
        if((len = contenth->copyright_size) != 0) {
          wstring = (uint16_t*)&hdr[pos];
          pos += len;
          if (pos > hdr_len) goto len_err_out;
          if ((string = get_ucs2str(wstring, len))) {
            mp_msg(MSGT_HEADER,MSGL_V," Copyright: %s\n", string);
            demux_info_add(demuxer, "copyright", string);
            free(string);
          }
        }
        // extract the comment
        if((len = contenth->comment_size) != 0) {
          wstring = (uint16_t*)&hdr[pos];
          pos += len;
          if (pos > hdr_len) goto len_err_out;
          if ((string = get_ucs2str(wstring, len))) {
            mp_msg(MSGT_HEADER,MSGL_V," Comment: %s\n", string);
            demux_info_add(demuxer, "comments", string);
            free(string);
          }
        }
        // extract the rating
        if((len = contenth->rating_size) != 0) {
          wstring = (uint16_t*)&hdr[pos];
          pos += len;
          if (pos > hdr_len) goto len_err_out;
          if ((string = get_ucs2str(wstring, len))) {
            mp_msg(MSGT_HEADER,MSGL_V," Rating: %s\n", string);
            free(string);
          }
        }
	mp_msg(MSGT_HEADER,MSGL_V,"\n");
  }

  // find content header
  pos = find_asf_guid(hdr, asf_stream_group_guid, 0, hdr_len);
  if (pos >= 0) {
        int max_streams = (hdr_len - pos - 2) / 6;
        uint16_t stream_id, i;
        uint32_t max_bitrate;
        char *ptr = &hdr[pos];
        mp_msg(MSGT_HEADER,MSGL_V,"============ ASF Stream group == START ===\n");
        if(max_streams <= 0) goto len_err_out;
        stream_count = AV_RL16(ptr);
        ptr += sizeof(uint16_t);
        if(stream_count > max_streams) stream_count = max_streams;
        if(stream_count > 0)
              streams = malloc(2*stream_count*sizeof(uint32_t));
        mp_msg(MSGT_HEADER,MSGL_V," stream count=[0x%x][%u]\n", stream_count, stream_count );
        for( i=0 ; i<stream_count ; i++ ) {
          stream_id = AV_RL16(ptr);
          ptr += sizeof(uint16_t);
          max_bitrate = AV_RL32(ptr);
          ptr += sizeof(uint32_t);
          mp_msg(MSGT_HEADER,MSGL_V,"   stream id=[0x%x][%u]\n", stream_id, stream_id );
          mp_msg(MSGT_HEADER,MSGL_V,"   max bitrate=[0x%x][%u]\n", max_bitrate, max_bitrate );
          streams[2*i] = stream_id;
          streams[2*i+1] = max_bitrate;
        }
        mp_msg(MSGT_HEADER,MSGL_V,"============ ASF Stream group == END ===\n");
  }
  free(hdr);
  hdr = NULL;
  start = stream_tell(demuxer->stream); // start of first data chunk
  stream_read(demuxer->stream, guid_buffer, 16);
  if (memcmp(guid_buffer, asf_data_chunk_guid, 16) != 0) {
    mp_msg(MSGT_HEADER, MSGL_FATAL, MSGTR_MPDEMUX_ASFHDR_NoDataChunkAfterHeader);
    free(streams);
    streams = NULL;
    return 0;
  }
  // read length of chunk
  data_len = stream_read_qword_le(demuxer->stream);
  demuxer->movi_start = stream_tell(demuxer->stream) + 26;
  demuxer->movi_end = start + data_len;
  mp_msg(MSGT_HEADER, MSGL_V, "Found movie at 0x%X - 0x%X\n",
          (int)demuxer->movi_start, (int)demuxer->movi_end);

if(streams) {
  // stream selection is done in the network code, it shouldn't be done here
  // as the servers often do not care about what we requested.
#if 0
  uint32_t vr = 0, ar = 0,i;
#ifdef CONFIG_NETWORKING
  if( demuxer->stream->streaming_ctrl!=NULL ) {
	  if( demuxer->stream->streaming_ctrl->bandwidth!=0 && demuxer->stream->streaming_ctrl->data!=NULL ) {
		  best_audio = ((asf_http_streaming_ctrl_t*)demuxer->stream->streaming_ctrl->data)->audio_id;
		  best_video = ((asf_http_streaming_ctrl_t*)demuxer->stream->streaming_ctrl->data)->video_id;
	  }
  } else
#endif
  for(i = 0; i < stream_count; i++) {
    uint32_t id = streams[2*i];
    uint32_t rate = streams[2*i+1];
    if(demuxer->v_streams[id] && rate > vr) {
      vr = rate;
      best_video = id;
    } else if(demuxer->a_streams[id] && rate > ar) {
      ar = rate;
      best_audio = id;
    }
  }
#endif
  free(streams);
  streams = NULL;
}

mp_msg(MSGT_HEADER,MSGL_V,"ASF: %d audio and %d video streams found\n",audio_streams,video_streams);
if(!audio_streams) demuxer->audio->id=-2;  // nosound
else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
if(!video_streams){
    if(!audio_streams){
	mp_msg(MSGT_HEADER,MSGL_ERR,MSGTR_MPDEMUX_ASFHDR_AudioVideoHeaderNotFound);
	return 0;
    }
    demuxer->video->id=-2; // audio-only
} else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

#if 0
if( mp_msg_test(MSGT_HEADER,MSGL_V) ){
    printf("ASF duration: %d\n",(int)fileh.duration);
    printf("ASF start pts: %d\n",(int)fileh.start_timestamp);
    printf("ASF end pts: %d\n",(int)fileh.end_timestamp);
}
#endif

return 1;

len_err_out:
  mp_msg(MSGT_HEADER, MSGL_FATAL, MSGTR_MPDEMUX_ASFHDR_InvalidLengthInASFHeader);
err_out:
  free(hdr);
  free(streams);
  return 0;
}
Пример #22
0
static int str_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    ByteIOContext *pb = &s->pb;
    StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
    AVStream *st;
    unsigned char sector[RAW_CD_SECTOR_SIZE];
    int start;
    int i;
    int channel;

    /* initialize context members */
    str->pts = 0;
    str->audio_channel = -1;  /* assume to audio or video */
    str->video_channel = -1;
    str->video_chunk = NULL;


    /* skip over any RIFF header */
    if (get_buffer(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
        return AVERROR_IO;
    if (AV_RL32(&sector[0]) == RIFF_TAG)
        start = RIFF_HEADER_SIZE;
    else
        start = 0;

    url_fseek(pb, start, SEEK_SET);

    /* check through the first 32 sectors for individual channels */
    for (i = 0; i < 32; i++) {
        if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
            return AVERROR_IO;

//printf("%02x %02x %02x %02x\n",sector[0x10],sector[0x11],sector[0x12],sector[0x13]);

        channel = sector[0x11];
        if (channel >= 32)
            return AVERROR_INVALIDDATA;

        switch (sector[0x12] & CDXA_TYPE_MASK) {

        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO:
            /* check if this channel gets to be the dominant video channel */
            if (str->video_channel == -1) {
                /* qualify the magic number */
                if (AV_RL32(&sector[0x18]) != STR_MAGIC)
                    break;
                str->video_channel = channel;
                str->channels[channel].type = STR_VIDEO;
                str->channels[channel].width = AV_RL16(&sector[0x28]);
                str->channels[channel].height = AV_RL16(&sector[0x2A]);

                /* allocate a new AVStream */
                st = av_new_stream(s, 0);
                if (!st)
                    return AVERROR_NOMEM;
                av_set_pts_info(st, 64, 1, 15);

                str->channels[channel].video_stream_index = st->index;

                st->codec->codec_type = CODEC_TYPE_VIDEO;
                st->codec->codec_id = CODEC_ID_MDEC;
                st->codec->codec_tag = 0;  /* no fourcc */
                st->codec->width = str->channels[channel].width;
                st->codec->height = str->channels[channel].height;
            }
            break;

        case CDXA_TYPE_AUDIO:
            /* check if this channel gets to be the dominant audio channel */
            if (str->audio_channel == -1) {
                int fmt;
                str->audio_channel = channel;
                str->channels[channel].type = STR_AUDIO;
                str->channels[channel].channels =
                    (sector[0x13] & 0x01) ? 2 : 1;
                str->channels[channel].sample_rate =
                    (sector[0x13] & 0x04) ? 18900 : 37800;
                str->channels[channel].bits =
                    (sector[0x13] & 0x10) ? 8 : 4;

                /* allocate a new AVStream */
                st = av_new_stream(s, 0);
                if (!st)
                    return AVERROR_NOMEM;
                av_set_pts_info(st, 64, 128, str->channels[channel].sample_rate);

                str->channels[channel].audio_stream_index = st->index;

                fmt = sector[0x13];
                st->codec->codec_type = CODEC_TYPE_AUDIO;
                st->codec->codec_id = CODEC_ID_ADPCM_XA;
                st->codec->codec_tag = 0;  /* no fourcc */
                st->codec->channels = (fmt&1)?2:1;
                st->codec->sample_rate = (fmt&4)?18900:37800;
            //    st->codec->bit_rate = 0; //FIXME;
                st->codec->block_align = 128;
            }
            break;

        default:
            /* ignore */
            break;
        }
    }

if (str->video_channel != -1)
  av_log (s, AV_LOG_DEBUG, " video channel = %d, %d x %d %d\n", str->video_channel,
    str->channels[str->video_channel].width,
    str->channels[str->video_channel].height,str->channels[str->video_channel].video_stream_index);
if (str->audio_channel != -1)
   av_log (s, AV_LOG_DEBUG, " audio channel = %d, %d Hz, %d channels, %d bits/sample %d\n",
    str->audio_channel,
    str->channels[str->audio_channel].sample_rate,
    str->channels[str->audio_channel].channels,
    str->channels[str->audio_channel].bits,str->channels[str->audio_channel].audio_stream_index);

    /* back to the start */
    url_fseek(pb, start, SEEK_SET);

    return 0;
}
Пример #23
0
static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
                            AVPacket *avpkt) {
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    PCXContext * const s = avctx->priv_data;
    AVFrame *picture = data;
    AVFrame * const p = &s->picture;
    int compressed, xmin, ymin, xmax, ymax;
    unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x,
                 bytes_per_scanline;
    uint8_t *ptr;
    uint8_t const *bufstart = buf;

    if (buf[0] != 0x0a || buf[1] > 5) {
        av_log(avctx, AV_LOG_ERROR, "this is not PCX encoded data\n");
        return -1;
    }

    compressed = buf[2];
    xmin = AV_RL16(buf+ 4);
    ymin = AV_RL16(buf+ 6);
    xmax = AV_RL16(buf+ 8);
    ymax = AV_RL16(buf+10);

    if (xmax < xmin || ymax < ymin) {
        av_log(avctx, AV_LOG_ERROR, "invalid image dimensions\n");
        return -1;
    }

    w = xmax - xmin + 1;
    h = ymax - ymin + 1;

    bits_per_pixel     = buf[3];
    bytes_per_line     = AV_RL16(buf+66);
    nplanes            = buf[65];
    bytes_per_scanline = nplanes * bytes_per_line;

    if (bytes_per_scanline < w * bits_per_pixel * nplanes / 8) {
        av_log(avctx, AV_LOG_ERROR, "PCX data is corrupted\n");
        return -1;
    }

    switch ((nplanes<<8) + bits_per_pixel) {
        case 0x0308:
            avctx->pix_fmt = PIX_FMT_RGB24;
            break;
        case 0x0108:
        case 0x0104:
        case 0x0102:
        case 0x0101:
        case 0x0401:
        case 0x0301:
        case 0x0201:
            avctx->pix_fmt = PIX_FMT_PAL8;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "invalid PCX file\n");
            return -1;
    }

    buf += 128;

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    if (avcodec_check_dimensions(avctx, w, h))
        return -1;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);
    if (avctx->get_buffer(avctx, p) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    p->pict_type = FF_I_TYPE;

    ptr    = p->data[0];
    stride = p->linesize[0];

    if (nplanes == 3 && bits_per_pixel == 8) {
#ifndef _MSC_VER
		uint8_t scanline[bytes_per_scanline];
#else
		uint8_t *scanline = av_malloc(bytes_per_scanline);
#endif

        for (y=0; y<h; y++) {
            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);

            for (x=0; x<w; x++) {
                ptr[3*x  ] = scanline[x                    ];
                ptr[3*x+1] = scanline[x+ bytes_per_line    ];
                ptr[3*x+2] = scanline[x+(bytes_per_line<<1)];
            }

            ptr += stride;
        }
#ifdef _MSC_VER
		av_free(scanline);
#endif

    } else if (nplanes == 1 && bits_per_pixel == 8) {
#ifndef _MSC_VER
        uint8_t scanline[bytes_per_scanline];
#else
        uint8_t *scanline = av_malloc(bytes_per_scanline);
#endif
        const uint8_t *palstart = bufstart + buf_size - 769;

        for (y=0; y<h; y++, ptr+=stride) {
            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);
            memcpy(ptr, scanline, w);
        }

        if (buf != palstart) {
            av_log(avctx, AV_LOG_WARNING, "image data possibly corrupted\n");
            buf = palstart;
        }
        if (*buf++ != 12) {
			av_log(avctx, AV_LOG_ERROR, "expected palette after image data\n");
#ifdef _MSC_VER
			av_free(scanline);
#endif
            return -1;
        }

#ifdef _MSC_VER
		av_free(scanline);
#endif

    } else if (nplanes == 1) {   /* all packed formats, max. 16 colors */
#ifndef _MSC_VER
		uint8_t scanline[bytes_per_scanline];
#else
		uint8_t *scanline = av_malloc(bytes_per_scanline);
#endif
        GetBitContext s;

        for (y=0; y<h; y++) {
            init_get_bits(&s, scanline, bytes_per_scanline<<3);

            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);

            for (x=0; x<w; x++)
                ptr[x] = get_bits(&s, bits_per_pixel);
            ptr += stride;
		}
#ifdef _MSC_VER
		av_free(scanline);
#endif

    } else {    /* planar, 4, 8 or 16 colors */
#ifndef _MSC_VER
		uint8_t scanline[bytes_per_scanline];
#else
		uint8_t *scanline = av_malloc(bytes_per_scanline);
#endif
        int i;

        for (y=0; y<h; y++) {
            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);

            for (x=0; x<w; x++) {
                int m = 0x80 >> (x&7), v = 0;
                for (i=nplanes - 1; i>=0; i--) {
                    v <<= 1;
                    v  += !!(scanline[i*bytes_per_line + (x>>3)] & m);
                }
                ptr[x] = v;
            }
            ptr += stride;
		}
#ifdef _MSC_VER
		av_free(scanline);
#endif
    }

    if (nplanes == 1 && bits_per_pixel == 8) {
        pcx_palette(&buf, (uint32_t *) p->data[1], 256);
    } else if (bits_per_pixel < 8) {
        const uint8_t *palette = bufstart+16;
        pcx_palette(&palette, (uint32_t *) p->data[1], 16);
    }

    *picture = s->picture;
    *data_size = sizeof(AVFrame);

    return buf - bufstart;
}
Пример #24
0
static int str_read_packet(AVFormatContext *s,
                           AVPacket *ret_pkt)
{
    ByteIOContext *pb = &s->pb;
    StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
    unsigned char sector[RAW_CD_SECTOR_SIZE];
    int channel;
    int packet_read = 0;
    int ret = 0;
    AVPacket *pkt;

    while (!packet_read) {

        if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
            return AVERROR_IO;

        channel = sector[0x11];
        if (channel >= 32)
            return AVERROR_INVALIDDATA;

        switch (sector[0x12] & CDXA_TYPE_MASK) {

        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO:
            /* check if this the video channel we care about */
            if (channel == str->video_channel) {

                int current_sector = AV_RL16(&sector[0x1C]);
                int sector_count   = AV_RL16(&sector[0x1E]);
                int frame_size = AV_RL32(&sector[0x24]);
                int bytes_to_copy;
//        printf("%d %d %d\n",current_sector,sector_count,frame_size);
                /* if this is the first sector of the frame, allocate a pkt */
                pkt = &str->tmp_pkt;
                if (current_sector == 0) {
                    if (av_new_packet(pkt, frame_size))
                        return AVERROR_IO;

                    pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE;
                    pkt->stream_index =
                        str->channels[channel].video_stream_index;
               //     pkt->pts = str->pts;

                    /* if there is no audio, adjust the pts after every video
                     * frame; assume 15 fps */
                   if (str->audio_channel != -1)
                       str->pts += (90000 / 15);
                }

                /* load all the constituent chunks in the video packet */
                bytes_to_copy = frame_size - current_sector*VIDEO_DATA_CHUNK_SIZE;
                if (bytes_to_copy>0) {
                    if (bytes_to_copy>VIDEO_DATA_CHUNK_SIZE) bytes_to_copy=VIDEO_DATA_CHUNK_SIZE;
                    memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
                        sector + VIDEO_DATA_HEADER_SIZE, bytes_to_copy);
                }
                if (current_sector == sector_count-1) {
                    *ret_pkt = *pkt;
                    return 0;
                }

            }
            break;

        case CDXA_TYPE_AUDIO:
#ifdef PRINTSTUFF
printf (" dropping audio sector\n");
#endif
#if 1
            /* check if this the video channel we care about */
            if (channel == str->audio_channel) {
                pkt = ret_pkt;
                if (av_new_packet(pkt, 2304))
                    return AVERROR_IO;
                memcpy(pkt->data,sector+24,2304);

                pkt->stream_index =
                    str->channels[channel].audio_stream_index;
                //pkt->pts = str->pts;
                return 0;
            }
#endif
            break;
        default:
            /* drop the sector and move on */
#ifdef PRINTSTUFF
printf (" dropping other sector\n");
#endif
            break;
        }

        if (url_feof(pb))
            return AVERROR_IO;
    }

    return ret;
}
Пример #25
0
/* This function loads and processes a single chunk in an IP movie file.
 * It returns the type of chunk that was processed. */
static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb,
    AVPacket *pkt)
{
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;
    int chunk_size;
    unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
    unsigned char opcode_type;
    unsigned char opcode_version;
    int opcode_size;
    unsigned char scratch[1024];
    int i, j;
    int first_color, last_color;
    int audio_flags;
    unsigned char r, g, b;
    unsigned int width, height;

    /* see if there are any pending packets */
    chunk_type = load_ipmovie_packet(s, pb, pkt);
    if (chunk_type != CHUNK_DONE)
        return chunk_type;

    /* read the next chunk, wherever the file happens to be pointing */
    if (avio_feof(pb))
        return CHUNK_EOF;
    if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return CHUNK_BAD;
    chunk_size = AV_RL16(&chunk_preamble[0]);
    chunk_type = AV_RL16(&chunk_preamble[2]);

    av_dlog(NULL, "chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);

    switch (chunk_type) {

    case CHUNK_INIT_AUDIO:
        av_dlog(NULL, "initialize audio\n");
        break;

    case CHUNK_AUDIO_ONLY:
        av_dlog(NULL, "audio only\n");
        break;

    case CHUNK_INIT_VIDEO:
        av_dlog(NULL, "initialize video\n");
        break;

    case CHUNK_VIDEO:
        av_dlog(NULL, "video (and audio)\n");
        break;

    case CHUNK_SHUTDOWN:
        av_dlog(NULL, "shutdown\n");
        break;

    case CHUNK_END:
        av_dlog(NULL, "end\n");
        break;

    default:
        av_dlog(NULL, "invalid chunk\n");
        chunk_type = CHUNK_BAD;
        break;

    }

    while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {

        /* read the next chunk, wherever the file happens to be pointing */
        if (avio_feof(pb)) {
            chunk_type = CHUNK_EOF;
            break;
        }
        if (avio_read(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
            CHUNK_PREAMBLE_SIZE) {
            chunk_type = CHUNK_BAD;
            break;
        }

        opcode_size = AV_RL16(&opcode_preamble[0]);
        opcode_type = opcode_preamble[2];
        opcode_version = opcode_preamble[3];

        chunk_size -= OPCODE_PREAMBLE_SIZE;
        chunk_size -= opcode_size;
        if (chunk_size < 0) {
            av_dlog(NULL, "chunk_size countdown just went negative\n");
            chunk_type = CHUNK_BAD;
            break;
        }

        av_dlog(NULL, "  opcode type %02X, version %d, 0x%04X bytes: ",
                opcode_type, opcode_version, opcode_size);
        switch (opcode_type) {

        case OPCODE_END_OF_STREAM:
            av_dlog(NULL, "end of stream\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_END_OF_CHUNK:
            av_dlog(NULL, "end of chunk\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_CREATE_TIMER:
            av_dlog(NULL, "create timer\n");
            if ((opcode_version > 0) || (opcode_size != 6)) {
                av_dlog(NULL, "bad create_timer opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]);
            av_dlog(NULL, "  %.2f frames/second (timer div = %d, subdiv = %d)\n",
                    1000000.0 / s->frame_pts_inc, AV_RL32(&scratch[0]),
                    AV_RL16(&scratch[4]));
            break;

        case OPCODE_INIT_AUDIO_BUFFERS:
            av_dlog(NULL, "initialize audio buffers\n");
            if (opcode_version > 1 || opcode_size > 10 || opcode_size < 6) {
                av_dlog(NULL, "bad init_audio_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->audio_sample_rate = AV_RL16(&scratch[4]);
            audio_flags = AV_RL16(&scratch[2]);
            /* bit 0 of the flags: 0 = mono, 1 = stereo */
            s->audio_channels = (audio_flags & 1) + 1;
            /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
            s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
            /* bit 2 indicates compressed audio in version 1 opcode */
            if ((opcode_version == 1) && (audio_flags & 0x4))
                s->audio_type = AV_CODEC_ID_INTERPLAY_DPCM;
            else if (s->audio_bits == 16)
                s->audio_type = AV_CODEC_ID_PCM_S16LE;
            else
                s->audio_type = AV_CODEC_ID_PCM_U8;
            av_dlog(NULL, "audio: %d bits, %d Hz, %s, %s format\n",
                    s->audio_bits, s->audio_sample_rate,
                    (s->audio_channels == 2) ? "stereo" : "mono",
                    (s->audio_type == AV_CODEC_ID_INTERPLAY_DPCM) ?
                    "Interplay audio" : "PCM");
            break;

        case OPCODE_START_STOP_AUDIO:
            av_dlog(NULL, "start/stop audio\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_INIT_VIDEO_BUFFERS:
            av_dlog(NULL, "initialize video buffers\n");
            if ((opcode_version > 2) || (opcode_size > 8) || opcode_size < 4
                || opcode_version == 2 && opcode_size < 8
            ) {
                av_dlog(NULL, "bad init_video_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            width  = AV_RL16(&scratch[0]) * 8;
            height = AV_RL16(&scratch[2]) * 8;
            if (width != s->video_width) {
                s->video_width = width;
                s->changed++;
            }
            if (height != s->video_height) {
                s->video_height = height;
                s->changed++;
            }
            if (opcode_version < 2 || !AV_RL16(&scratch[6])) {
                s->video_bpp = 8;
            } else {
                s->video_bpp = 16;
            }
            av_dlog(NULL, "video resolution: %d x %d\n",
                    s->video_width, s->video_height);
            break;

        case OPCODE_UNKNOWN_06:
        case OPCODE_UNKNOWN_0E:
        case OPCODE_UNKNOWN_10:
        case OPCODE_UNKNOWN_12:
        case OPCODE_UNKNOWN_13:
        case OPCODE_UNKNOWN_14:
        case OPCODE_UNKNOWN_15:
            av_dlog(NULL, "unknown (but documented) opcode %02X\n", opcode_type);
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SEND_BUFFER:
            av_dlog(NULL, "send buffer\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_AUDIO_FRAME:
            av_dlog(NULL, "audio frame\n");

            /* log position and move on for now */
            s->audio_chunk_offset = avio_tell(pb);
            s->audio_chunk_size = opcode_size;
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SILENCE_FRAME:
            av_dlog(NULL, "silence frame\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_INIT_VIDEO_MODE:
            av_dlog(NULL, "initialize video mode\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_CREATE_GRADIENT:
            av_dlog(NULL, "create gradient\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SET_PALETTE:
            av_dlog(NULL, "set palette\n");
            /* check for the logical maximum palette size
             * (3 * 256 + 4 bytes) */
            if (opcode_size > 0x304 || opcode_size < 4) {
                av_dlog(NULL, "demux_ipmovie: set_palette opcode with invalid size\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) != opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }

            /* load the palette into internal data structure */
            first_color = AV_RL16(&scratch[0]);
            last_color = first_color + AV_RL16(&scratch[2]) - 1;
            /* sanity check (since they are 16 bit values) */
            if (   (first_color > 0xFF) || (last_color > 0xFF)
                || (last_color - first_color + 1)*3 + 4 > opcode_size) {
                av_dlog(NULL, "demux_ipmovie: set_palette indexes out of range (%d -> %d)\n",
                    first_color, last_color);
                chunk_type = CHUNK_BAD;
                break;
            }
            j = 4;  /* offset of first palette data */
            for (i = first_color; i <= last_color; i++) {
                /* the palette is stored as a 6-bit VGA palette, thus each
                 * component is shifted up to a 8-bit range */
                r = scratch[j++] * 4;
                g = scratch[j++] * 4;
                b = scratch[j++] * 4;
                s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | (b);
                s->palette[i] |= s->palette[i] >> 6 & 0x30303;
            }
            s->has_palette = 1;
            break;

        case OPCODE_SET_PALETTE_COMPRESSED:
            av_dlog(NULL, "set palette compressed\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SET_DECODING_MAP:
            av_dlog(NULL, "set decoding map\n");

            /* log position and move on for now */
            s->decode_map_chunk_offset = avio_tell(pb);
            s->decode_map_chunk_size = opcode_size;
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_VIDEO_DATA:
            av_dlog(NULL, "set video data\n");

            /* log position and move on for now */
            s->video_chunk_offset = avio_tell(pb);
            s->video_chunk_size = opcode_size;
            avio_skip(pb, opcode_size);
            break;

        default:
            av_dlog(NULL, "*** unknown opcode type\n");
            chunk_type = CHUNK_BAD;
            break;

        }
    }

    /* make a note of where the stream is sitting */
    s->next_chunk_offset = avio_tell(pb);

    /* dispatch the first of any pending packets */
    if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
        chunk_type = load_ipmovie_packet(s, pb, pkt);

    return chunk_type;
}
Пример #26
0
int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
                                 RTMPPacket *prev_pkt, uint8_t hdr)
{

    uint8_t t, buf[16];
    int channel_id, timestamp, data_size, offset = 0;
    uint32_t extra = 0;
    enum RTMPPacketType type;
    int size = 0;
    int ret;

    size++;
    channel_id = hdr & 0x3F;

    if (channel_id < 2) { //special case for channel number >= 64
        buf[1] = 0;
        if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1)
            return AVERROR(EIO);
        size += channel_id + 1;
        channel_id = AV_RL16(buf) + 64;
    }
    data_size = prev_pkt[channel_id].data_size;
    type      = prev_pkt[channel_id].type;
    extra     = prev_pkt[channel_id].extra;

    hdr >>= 6;
    if (hdr == RTMP_PS_ONEBYTE) {
        timestamp = prev_pkt[channel_id].ts_delta;
    } else {
        if (ffurl_read_complete(h, buf, 3) != 3)
            return AVERROR(EIO);
        size += 3;
        timestamp = AV_RB24(buf);
        if (hdr != RTMP_PS_FOURBYTES) {
            if (ffurl_read_complete(h, buf, 3) != 3)
                return AVERROR(EIO);
            size += 3;
            data_size = AV_RB24(buf);
            if (ffurl_read_complete(h, buf, 1) != 1)
                return AVERROR(EIO);
            size++;
            type = buf[0];
            if (hdr == RTMP_PS_TWELVEBYTES) {
                if (ffurl_read_complete(h, buf, 4) != 4)
                    return AVERROR(EIO);
                size += 4;
                extra = AV_RL32(buf);
            }
        }
        if (timestamp == 0xFFFFFF) {
            if (ffurl_read_complete(h, buf, 4) != 4)
                return AVERROR(EIO);
            timestamp = AV_RB32(buf);
        }
    }
    if (hdr != RTMP_PS_TWELVEBYTES)
        timestamp += prev_pkt[channel_id].timestamp;

    if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp,
                                     data_size)) < 0)
        return ret;
    p->extra = extra;
    // save history
    prev_pkt[channel_id].channel_id = channel_id;
    prev_pkt[channel_id].type       = type;
    prev_pkt[channel_id].data_size  = data_size;
    prev_pkt[channel_id].ts_delta   = timestamp - prev_pkt[channel_id].timestamp;
    prev_pkt[channel_id].timestamp  = timestamp;
    prev_pkt[channel_id].extra      = extra;
    while (data_size > 0) {
        int toread = FFMIN(data_size, chunk_size);
        if (ffurl_read_complete(h, p->data + offset, toread) != toread) {
            ff_rtmp_packet_destroy(p);
            return AVERROR(EIO);
        }
        data_size -= chunk_size;
        offset    += chunk_size;
        size      += chunk_size;
        if (data_size > 0) {
            if ((ret = ffurl_read_complete(h, &t, 1)) < 0) { // marker
                ff_rtmp_packet_destroy(p);
                return ret;
            }
            size++;
            if (t != (0xC0 + channel_id))
                return -1;
        }
    }
    return size;
}
Пример #27
0
static av_cold int libopus_decode_init(AVCodecContext *avc)
{
    struct libopus_context *opus = avc->priv_data;
    int ret, channel_map = 0, gain_db = 0, nb_streams, nb_coupled;
    uint8_t mapping_arr[8] = { 0, 1 }, *mapping;

    avc->sample_rate    = 48000;
    avc->sample_fmt     = avc->request_sample_fmt == AV_SAMPLE_FMT_FLT ?
                          AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_S16;
    avc->channel_layout = avc->channels > 8 ? 0 :
                          ff_vorbis_channel_layouts[avc->channels - 1];

    if (avc->extradata_size >= OPUS_HEAD_SIZE) {
        gain_db     = sign_extend(AV_RL16(avc->extradata + 16), 16);
        channel_map = AV_RL8 (avc->extradata + 18);
    }
    if (avc->extradata_size >= OPUS_HEAD_SIZE + 2 + avc->channels) {
        nb_streams = avc->extradata[OPUS_HEAD_SIZE + 0];
        nb_coupled = avc->extradata[OPUS_HEAD_SIZE + 1];
        if (nb_streams + nb_coupled != avc->channels)
            av_log(avc, AV_LOG_WARNING, "Inconsistent channel mapping.\n");
        mapping = avc->extradata + OPUS_HEAD_SIZE + 2;
    } else {
        if (avc->channels > 2 || channel_map) {
            av_log(avc, AV_LOG_ERROR,
                   "No channel mapping for %d channels.\n", avc->channels);
            return AVERROR(EINVAL);
        }
        nb_streams = 1;
        nb_coupled = avc->channels > 1;
        mapping    = mapping_arr;
    }

    if (avc->channels > 2 && avc->channels <= 8) {
        const uint8_t *vorbis_offset = ff_vorbis_channel_layout_offsets[avc->channels - 1];
        int ch;

        /* Remap channels from vorbis order to libav order */
        for (ch = 0; ch < avc->channels; ch++)
            mapping_arr[ch] = mapping[vorbis_offset[ch]];
        mapping = mapping_arr;
    }

    opus->dec = opus_multistream_decoder_create(avc->sample_rate, avc->channels,
                                                nb_streams, nb_coupled,
                                                mapping, &ret);
    if (!opus->dec) {
        av_log(avc, AV_LOG_ERROR, "Unable to create decoder: %s\n",
               opus_strerror(ret));
        return ff_opus_error_to_averror(ret);
    }

    ret = opus_multistream_decoder_ctl(opus->dec, OPUS_SET_GAIN(gain_db));
    if (ret != OPUS_OK)
        av_log(avc, AV_LOG_WARNING, "Failed to set gain: %s\n",
               opus_strerror(ret));

    avc->delay = 3840;  /* Decoder delay (in samples) at 48kHz */

    return 0;
}
Пример #28
0
/**
 * Helper function to read 16 bits little-endian and advance pointer
 */
static uint16_t get_le16_inc(const uint8_t **buf)
{
  uint16_t v = AV_RL16(*buf);
  *buf += 2;
  return v;
}
Пример #29
0
static int yop_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    YopDecContext *yop = s->priv_data;
    AVIOContext *pb  = s->pb;

    AVCodecContext *audio_dec, *video_dec;
    AVStream *audio_stream, *video_stream;

    int frame_rate, ret;

    audio_stream = av_new_stream(s, 0);
    video_stream = av_new_stream(s, 1);

    // Extra data that will be passed to the decoder
    video_stream->codec->extradata_size = 8;

    video_stream->codec->extradata = av_mallocz(video_stream->codec->extradata_size +
                                                FF_INPUT_BUFFER_PADDING_SIZE);

    if (!video_stream->codec->extradata)
        return AVERROR(ENOMEM);

    // Audio
    audio_dec               = audio_stream->codec;
    audio_dec->codec_type   = AVMEDIA_TYPE_AUDIO;
    audio_dec->codec_id     = CODEC_ID_ADPCM_IMA_WS;
    audio_dec->channels     = 1;
    audio_dec->sample_rate  = 22050;

    // Video
    video_dec               = video_stream->codec;
    video_dec->codec_type   = AVMEDIA_TYPE_VIDEO;
    video_dec->codec_id     = CODEC_ID_YOP;

    avio_skip(pb, 6);

    frame_rate              = avio_r8(pb);
    yop->frame_size         = avio_r8(pb) * 2048;
    video_dec->width        = avio_rl16(pb);
    video_dec->height       = avio_rl16(pb);

    video_stream->sample_aspect_ratio = (AVRational){1, 2};

    ret = avio_read(pb, video_dec->extradata, 8);
    if (ret < 8)
        return ret < 0 ? ret : AVERROR_EOF;

    yop->palette_size       = video_dec->extradata[0] * 3 + 4;
    yop->audio_block_length = AV_RL16(video_dec->extradata + 6);

    // 1840 samples per frame, 1 nibble per sample; hence 1840/2 = 920
    if (yop->audio_block_length < 920 ||
        yop->audio_block_length + yop->palette_size >= yop->frame_size) {
        av_log(s, AV_LOG_ERROR, "YOP has invalid header\n");
        return AVERROR_INVALIDDATA;
    }

    avio_seek(pb, 2048, SEEK_SET);

    av_set_pts_info(video_stream, 32, 1, frame_rate);

    return 0;
}
Пример #30
0
Файл: rar.c Проект: 0x0all/mpv
static int SkipFile(struct stream *s, int *count, rar_file_t ***file,
                    const rar_block_t *hdr, const char *volume_mrl)
{
    int min_size = 7+21;
    if (hdr->flags & RAR_BLOCK_FILE_HAS_HIGH)
        min_size += 8;
    if (hdr->size < (unsigned)min_size)
        return -1;

    bstr data = stream_peek(s, min_size);
    if (data.len < min_size)
        return -1;
    const uint8_t *peek = (uint8_t *)data.start;

    /* */
    uint32_t file_size_low = AV_RL32(&peek[7+4]);
    uint8_t  method = peek[7+18];
    uint16_t name_size = AV_RL16(&peek[7+19]);
    uint32_t file_size_high = 0;
    if (hdr->flags & RAR_BLOCK_FILE_HAS_HIGH)
        file_size_high = AV_RL32(&peek[7+29]);
    const uint64_t file_size = ((uint64_t)file_size_high << 32) | file_size_low;

    char *name = calloc(1, name_size + 1);
    if (!name)
        return -1;

    const int name_offset = (hdr->flags & RAR_BLOCK_FILE_HAS_HIGH) ? (7+33) : (7+25);
    if (name_offset + name_size <= hdr->size) {
        const int max_size = name_offset + name_size;
        bstr namedata = stream_peek(s, max_size);
        if (namedata.len < max_size) {
            free(name);
            return -1;
        }
        memcpy(name, &namedata.start[name_offset], name_size);
    }

    rar_file_t *current = NULL;
    if (method != 0x30) {
        MP_WARN(s, "Ignoring compressed file %s (method=0x%2.2x)\n", name, method);
        goto exit;
    }

    /* */
    if( *count > 0 )
        current = (*file)[*count - 1];

    if (current &&
        (current->is_complete ||
          strcmp(current->name, name) ||
          (hdr->flags & RAR_BLOCK_FILE_HAS_PREVIOUS) == 0))
        current = NULL;

    if (!current) {
        if (hdr->flags & RAR_BLOCK_FILE_HAS_PREVIOUS)
            goto exit;
        current = calloc(1, sizeof(*current));
        if (!current)
            goto exit;
        MP_TARRAY_APPEND(NULL, *file, *count, current);

        current->name = name;
        current->size = file_size;
        current->is_complete = false;
        current->real_size = 0;
        current->chunk_count = 0;
        current->chunk = NULL;

        name = NULL;
    }

    /* Append chunks */
    rar_file_chunk_t *chunk = malloc(sizeof(*chunk));
    if (chunk) {
        chunk->mrl = strdup(volume_mrl);
        chunk->offset = stream_tell(s) + hdr->size;
        chunk->size = hdr->add_size;
        chunk->cummulated_size = 0;
        if (current->chunk_count > 0) {
            rar_file_chunk_t *previous = current->chunk[current->chunk_count-1];

            chunk->cummulated_size += previous->cummulated_size +
                                      previous->size;
        }

        MP_TARRAY_APPEND(NULL, current->chunk, current->chunk_count, chunk);

        current->real_size += hdr->add_size;
    }
    if ((hdr->flags & RAR_BLOCK_FILE_HAS_NEXT) == 0)
        current->is_complete = true;

exit:
    /* */
    free(name);

    /* We stop on the first non empty file if we cannot seek */
    if (current) {
        bool can_seek = s->end_pos > 0;
        if (!can_seek && current->size > 0)
            return -1;
    }

    if (SkipBlock(s, hdr))
        return -1;
    return 0;
}