Esempio n. 1
0
static int hap_encode(AVCodecContext *avctx, AVPacket *pkt,
                      const AVFrame *frame, int *got_packet)
{
    HapContext *ctx = avctx->priv_data;
    int header_length = hap_header_length(ctx);
    int final_data_size, ret;
    int pktsize = FFMAX(ctx->tex_size, ctx->max_snappy * ctx->chunk_count) + header_length;

    /* Allocate maximum size packet, shrink later. */
    ret = ff_alloc_packet2(avctx, pkt, pktsize, header_length);
    if (ret < 0)
        return ret;

    /* DXTC compression. */
    compress_texture(avctx, frame);

    /* Compress (using Snappy) the frame */
    final_data_size = hap_compress_frame(avctx, pkt->data + header_length);
    if (final_data_size < 0)
        return final_data_size;

    /* Write header at the start. */
    hap_write_frame_header(ctx, pkt->data, final_data_size + header_length);

    av_shrink_packet(pkt, final_data_size + header_length);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Esempio n. 2
0
bool FFmpeg_Decoder::getAVAudioData()
{
    int got_frame, len;

    if((*mStream)->codec->codec_type != AVMEDIA_TYPE_AUDIO)
        return false;

    do {
        if(mPacket.size == 0 && !getNextPacket())
            return false;

        /* Decode some data, and check for errors */
        if((len=avcodec_decode_audio4((*mStream)->codec, mFrame, &got_frame, &mPacket)) < 0)
            return false;

        /* Move the unread data to the front and clear the end bits */
        int remaining = mPacket.size - len;
        if(remaining <= 0)
            av_free_packet(&mPacket);
        else
        {
            memmove(mPacket.data, &mPacket.data[len], remaining);
            av_shrink_packet(&mPacket, remaining);
        }
    } while(got_frame == 0 || mFrame->nb_samples == 0);
    mNextPts += (double)mFrame->nb_samples / (double)(*mStream)->codec->sample_rate;

    return true;
}
Esempio n. 3
0
static int yop_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    YopDecContext *yop = s->priv_data;
    AVIOContext *pb  = s->pb;

    int ret;
    int actual_video_data_size = yop->frame_size -
                                 yop->audio_block_length - yop->palette_size;

    yop->video_packet.stream_index = 1;

    if (yop->video_packet.data) {
        *pkt                   =  yop->video_packet;
        yop->video_packet.data =  NULL;
        yop->video_packet.buf  =  NULL;
        yop->video_packet.size =  0;
        pkt->data[0]           =  yop->odd_frame;
        pkt->flags             |= AV_PKT_FLAG_KEY;
        yop->odd_frame         ^= 1;
        return pkt->size;
    }
    ret = av_new_packet(&yop->video_packet,
                        yop->frame_size - yop->audio_block_length);
    if (ret < 0)
        return ret;

    yop->video_packet.pos = avio_tell(pb);

    ret = avio_read(pb, yop->video_packet.data, yop->palette_size);
    if (ret < 0) {
        goto err_out;
    }else if (ret < yop->palette_size) {
        ret = AVERROR_EOF;
        goto err_out;
    }

    ret = av_get_packet(pb, pkt, 920);
    if (ret < 0)
        goto err_out;

    // Set position to the start of the frame
    pkt->pos = yop->video_packet.pos;

    avio_skip(pb, yop->audio_block_length - ret);

    ret = avio_read(pb, yop->video_packet.data + yop->palette_size,
                     actual_video_data_size);
    if (ret < 0)
        goto err_out;
    else if (ret < actual_video_data_size)
        av_shrink_packet(&yop->video_packet, yop->palette_size + ret);

    // Arbitrarily return the audio data first
    return yop->audio_block_length;

err_out:
    av_packet_unref(&yop->video_packet);
    return ret;
}
Esempio n. 4
0
static int append_packet_chunked_with_pts(AVIOContext *s, AVPacket *pkt, int explicit_pts, int64_t *pts, int size)
{
    int64_t orig_pos   = pkt->pos; // av_grow_packet might reset pos
    int orig_size      = pkt->size;
    int ret;

    do {
        int prev_size = pkt->size;
        int read_size;

        /* When the caller requests a lot of data, limit it to the amount
         * left in file or SANE_CHUNK_SIZE when it is not known. */
        read_size = size;
        if (read_size > SANE_CHUNK_SIZE/10) {
            read_size = ffio_limit(s, read_size);
            // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
            if (s->maxsize < 0)
                read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
        }

        ret = av_grow_packet(pkt, read_size);
        if (ret < 0)
            break;

        ret = avio_read(s, (unsigned char*)pts, sizeof(int64_t));
        if (ret != sizeof(int64_t)) {
            av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
            break;
        }

        ret = avio_read(s, pkt->data + prev_size, read_size);
        if (ret != read_size) {
            av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
            break;
        }

        size -= read_size;
    } while (size > 0);
    if (size > 0)
        pkt->flags |= AV_PKT_FLAG_CORRUPT;

    pkt->pos = orig_pos;
    if (!pkt->size)
        av_free_packet(pkt);
    return pkt->size > orig_size ? pkt->size - orig_size : ret;
}
Esempio n. 5
0
bool FFmpeg_Decoder::getAVAudioData()
{
    int got_frame;

    if((*mStream)->codec->codec_type != AVMEDIA_TYPE_AUDIO)
        return false;

    do {
        if(mPacket.size == 0 && !getNextPacket())
            return false;

        /* Decode some data, and check for errors */
        int len = 0;
        if((len=avcodec_decode_audio4((*mStream)->codec, mFrame, &got_frame, &mPacket)) < 0)
            return false;

        /* Move the unread data to the front and clear the end bits */
        int remaining = mPacket.size - len;
        if(remaining <= 0)
            av_free_packet(&mPacket);
        else
        {
            memmove(mPacket.data, &mPacket.data[len], remaining);
            av_shrink_packet(&mPacket, remaining);
        }

        if (!got_frame || mFrame->nb_samples == 0)
            continue;

        if(mSwr)
        {
            if(!mDataBuf || mDataBufLen < mFrame->nb_samples)
            {
                av_freep(&mDataBuf);
                if(av_samples_alloc(&mDataBuf, NULL, av_get_channel_layout_nb_channels(mOutputChannelLayout),
                                    mFrame->nb_samples, mOutputSampleFormat, 0) < 0)
                    return false;
                else
                    mDataBufLen = mFrame->nb_samples;
            }

            if(swr_convert(mSwr, (uint8_t**)&mDataBuf, mFrame->nb_samples,
                (const uint8_t**)mFrame->extended_data, mFrame->nb_samples) < 0)
            {
                return false;
            }
            mFrameData = &mDataBuf;
        }
        else
            mFrameData = &mFrame->data[0];

    } while(got_frame == 0 || mFrame->nb_samples == 0);
    mNextPts += (double)mFrame->nb_samples / (double)(*mStream)->codec->sample_rate;

    return true;
}
Esempio n. 6
0
File: dsicin.c Progetto: 1c0n/xbmc
static int cin_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    CinDemuxContext *cin = s->priv_data;
    AVIOContext *pb = s->pb;
    CinFrameHeader *hdr = &cin->frame_header;
    int rc, palette_type, pkt_size;
    int ret;

    if (cin->audio_buffer_size == 0) {
        rc = cin_read_frame_header(cin, pb);
        if (rc)
            return rc;

        if ((int16_t)hdr->pal_colors_count < 0) {
            hdr->pal_colors_count = -(int16_t)hdr->pal_colors_count;
            palette_type = 1;
        } else {
            palette_type = 0;
        }

        /* palette and video packet */
        pkt_size = (palette_type + 3) * hdr->pal_colors_count + hdr->video_frame_size;

        pkt_size = ffio_limit(pb, pkt_size);

        ret = av_new_packet(pkt, 4 + pkt_size);
        if (ret < 0)
            return ret;

        pkt->stream_index = cin->video_stream_index;
        pkt->pts = cin->video_stream_pts++;

        pkt->data[0] = palette_type;
        pkt->data[1] = hdr->pal_colors_count & 0xFF;
        pkt->data[2] = hdr->pal_colors_count >> 8;
        pkt->data[3] = hdr->video_frame_type;

        ret = avio_read(pb, &pkt->data[4], pkt_size);
        if (ret < 0) {
            av_free_packet(pkt);
            return ret;
        }
        if (ret < pkt_size)
            av_shrink_packet(pkt, 4 + ret);

        /* sound buffer will be processed on next read_packet() call */
        cin->audio_buffer_size = hdr->audio_frame_size;
        return 0;
    }
Esempio n. 7
0
static int iff_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    IffDemuxContext *iff = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st = s->streams[0];
    int ret;
    int64_t pos = avio_tell(pb);

    if (pos >= iff->body_end)
        return AVERROR_EOF;

    if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
        if (st->codec->codec_tag == ID_MAUD) {
            ret = av_get_packet(pb, pkt, FFMIN(iff->body_end - pos, 1024 * st->codec->block_align));
        } else {
            ret = av_get_packet(pb, pkt, iff->body_size);
        }
    } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
        uint8_t *buf;

        if (av_new_packet(pkt, iff->body_size + 2) < 0) {
            return AVERROR(ENOMEM);
        }

        buf = pkt->data;
        bytestream_put_be16(&buf, 2);
        ret = avio_read(pb, buf, iff->body_size);
        if (ret<0) {
            av_free_packet(pkt);
        } else if (ret < iff->body_size)
            av_shrink_packet(pkt, ret + 2);
    } else {
        av_assert0(0);
    }

    if (pos == iff->body_pos)
        pkt->flags |= AV_PKT_FLAG_KEY;
    if (ret < 0)
        return ret;
    pkt->stream_index = 0;
    return ret;
}
Esempio n. 8
0
int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, size;

    size = RAW_PACKET_SIZE;

    if (av_new_packet(pkt, size) < 0)
        return AVERROR(ENOMEM);

    pkt->pos= avio_tell(s->pb);
    pkt->stream_index = 0;
    ret = ffio_read_partial(s->pb, pkt->data, size);
    if (ret < 0) {
        av_free_packet(pkt);
        return ret;
    }
    av_shrink_packet(pkt, ret);
    return ret;
}
Esempio n. 9
0
//cwm at libavformat/utils.c:682
int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, size;

    size = RAW_PACKET_SIZE;//cwm 1024

    if (av_new_packet(pkt, size) < 0)
        return AVERROR(ENOMEM);

    pkt->pos= avio_tell(s->pb);//得到当前文件的写位置
    pkt->stream_index = 0;
    ret = ffio_read_partial(s->pb, pkt->data, size);//cwm 读文件的数据到pkt->data
    if (ret < 0) {
        av_packet_unref(pkt);
        return ret;
    }
    av_shrink_packet(pkt, ret);//cwm pkt->size=size,且把未使用的buffer清0
    return ret;
}
Esempio n. 10
0
static int adp_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, size = 1024;

    if (avio_feof(s->pb))
        return AVERROR_EOF;

    ret = av_get_packet(s->pb, pkt, size);

    if (ret != size) {
        if (ret < 0) {
            av_packet_unref(pkt);
            return ret;
        }
        av_shrink_packet(pkt, ret);
    }
    pkt->stream_index = 0;

    return ret;
}
Esempio n. 11
0
static int sdr2_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int64_t pos;
    unsigned next;
    int flags, ret = 0, is_video;

    pos = avio_tell(s->pb);

    flags = avio_rl32(s->pb);
    avio_skip(s->pb, 4);

    next = avio_rl32(s->pb);
    if (next <= 52)
        return AVERROR_INVALIDDATA;

    avio_skip(s->pb, 6);
    is_video = avio_rl32(s->pb);
    avio_skip(s->pb, 30);

    if (pos == FIRST) {
        if (av_new_packet(pkt, next - 52 + 24) < 0)
            return AVERROR(ENOMEM);
        memcpy(pkt->data, header, 24);
        ret = avio_read(s->pb, pkt->data + 24, next - 52);
        if (ret < 0) {
            av_free_packet(pkt);
            return ret;
        }
        av_shrink_packet(pkt, ret + 24);
    } else {
        ret = av_get_packet(s->pb, pkt, next - 52);
    }
    pkt->stream_index = !!is_video;
    pkt->pos = pos;
    if (flags & (1 << 12))
        pkt->flags |= AV_PKT_FLAG_KEY;

    return ret;
}
Esempio n. 12
0
/*
 * Class:     org_jitsi_impl_neomedia_codec_FFmpeg
 * Method:    avpacket_set_data
 * Signature: (J[BII)V
 */
JNIEXPORT void JNICALL
Java_org_jitsi_impl_neomedia_codec_FFmpeg_avpacket_1set_1data
    (JNIEnv *env, jclass clazz, jlong pkt, jbyteArray data, jint offset,
        jint length)
{
    jbyte *data_;
    jboolean ok;

    if (data)
    {
        data_ = (*env)->GetPrimitiveArrayCritical(env, data, NULL);
        ok = data_ ? JNI_TRUE : JNI_FALSE;
    }
    else
    {
        data_ = NULL;
        ok = JNI_TRUE;
    }
    if (JNI_TRUE == ok)
    {
        int delta;
        AVPacket *pkt_ = (AVPacket *) (intptr_t) pkt;

        delta = length - pkt_->size;
        if (delta > 0)
        {
            if (av_grow_packet(pkt_, delta) != 0)
                ok = JNI_FALSE;
        }
        else if (delta < 0)
        {
            av_shrink_packet(pkt_, length);
        }
        if (JNI_TRUE == ok)
            memcpy(pkt_->data, data_ + offset, length);
        if (data_)
            (*env)->ReleasePrimitiveArrayCritical(env, data, data_, JNI_ABORT);
    }
}
Esempio n. 13
0
int Packet::SetDataSize(int len)
{
  if (len < 0)
    return -1;

  if (len <= GetDataSize()) {
    av_shrink_packet(m_Packet, len);
    return 0;
  }

  if (!m_Packet) {
    m_Packet = av_packet_alloc();
    if (av_new_packet(m_Packet, len) < 0)
      return -1;
  }
  else
  {
    if (av_grow_packet(m_Packet, (len - m_Packet->size)) < 0)
      return -1;
  }

  return 0;
}
Esempio n. 14
0
static int rsd_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *codec = s->streams[0]->codec;
    int ret, size = 1024;

    if (url_feof(s->pb))
        return AVERROR_EOF;

    if (codec->codec_id == AV_CODEC_ID_ADPCM_IMA_RAD)
        ret = av_get_packet(s->pb, pkt, codec->block_align);
    else
        ret = av_get_packet(s->pb, pkt, size);

    if (ret != size) {
        if (ret < 0) {
            av_free_packet(pkt);
            return ret;
        }
        av_shrink_packet(pkt, ret);
    }
    pkt->stream_index = 0;

    return ret;
}
Esempio n. 15
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    int width, height, bits_pixel, i, j, length, ret;
    uint8_t *in_buf, *buf;

    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    avctx->coded_frame->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;

    if (width > 65535 || height > 65535 ||
        width * height >= INT_MAX / 4 - ALIAS_HEADER_SIZE) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", width, height);
        return AVERROR_INVALIDDATA;
    }

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
        bits_pixel = 8;
        break;
    case AV_PIX_FMT_BGR24:
        bits_pixel = 24;
        break;
    default:
        return AVERROR(EINVAL);
    }

    length = ALIAS_HEADER_SIZE + 4 * width * height; // max possible
    if ((ret = ff_alloc_packet(pkt, length)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length);
        return ret;
    }

    buf = pkt->data;

    /* Encode header. */
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be32(&buf, 0); /* X, Y offset */
    bytestream_put_be16(&buf, bits_pixel);

    for (j = 0; j < height; j++) {
        in_buf = frame->data[0] + frame->linesize[0] * j;
        for (i = 0; i < width; ) {
            int count = 0;
            int pixel;

            if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
                pixel = *in_buf;
                while (count < 255 && count + i < width && pixel == *in_buf) {
                    count++;
                    in_buf++;
                }
                bytestream_put_byte(&buf, count);
                bytestream_put_byte(&buf, pixel);
            } else { /* AV_PIX_FMT_BGR24 */
                pixel = AV_RB24(in_buf);
                while (count < 255 && count + i < width &&
                       pixel == AV_RB24(in_buf)) {
                    count++;
                    in_buf += 3;
                }
                bytestream_put_byte(&buf, count);
                bytestream_put_be24(&buf, pixel);
            }
            i += count;
        }
    }

    /* Total length */
    av_shrink_packet(pkt, buf - pkt->data);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Esempio n. 16
0
static int fourxm_read_packet(AVFormatContext *s,
                              AVPacket *pkt)
{
    FourxmDemuxContext *fourxm = s->priv_data;
    AVIOContext *pb            = s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int ret = 0;
    unsigned int track_number;
    int packet_read = 0;
    unsigned char header[8];
    int audio_frame_count;

    while (!packet_read) {
        if ((ret = avio_read(s->pb, header, 8)) < 0)
            return ret;
        fourcc_tag = AV_RL32(&header[0]);
        size       = AV_RL32(&header[4]);
        if (url_feof(pb))
            return AVERROR(EIO);
        switch (fourcc_tag) {
        case LIST_TAG:
            /* this is a good time to bump the video pts */
            fourxm->video_pts++;

            /* skip the LIST-* tag and move on to the next fourcc */
            avio_rl32(pb);
            break;

        case ifrm_TAG:
        case pfrm_TAG:
        case cfrm_TAG:
        case ifr2_TAG:
        case pfr2_TAG:
        case cfr2_TAG:
            /* allocate 8 more bytes than 'size' to account for fourcc
             * and size */
            if (size + 8 < size || av_new_packet(pkt, size + 8))
                return AVERROR(EIO);
            pkt->stream_index = fourxm->video_stream_index;
            pkt->pts          = fourxm->video_pts;
            pkt->pos          = avio_tell(s->pb);
            memcpy(pkt->data, header, 8);
            ret = avio_read(s->pb, &pkt->data[8], size);

            if (ret < 0) {
                av_free_packet(pkt);
            } else {
                packet_read = 1;
                av_shrink_packet(pkt, ret + 8);
            }
            break;

        case snd__TAG:
            track_number = avio_rl32(pb);
            avio_skip(pb, 4);
            size -= 8;

            if (track_number < fourxm->track_count &&
                fourxm->tracks[track_number].channels > 0) {
                ret = av_get_packet(s->pb, pkt, size);
                if (ret < 0)
                    return AVERROR(EIO);
                pkt->stream_index =
                    fourxm->tracks[track_number].stream_index;
                pkt->pts    = fourxm->tracks[track_number].audio_pts;
                packet_read = 1;

                /* pts accounting */
                audio_frame_count = size;
                if (fourxm->tracks[track_number].adpcm)
                    audio_frame_count -= 2 * (fourxm->tracks[track_number].channels);
                audio_frame_count /= fourxm->tracks[track_number].channels;
                if (fourxm->tracks[track_number].adpcm) {
                    audio_frame_count *= 2;
                } else
                    audio_frame_count /=
                        (fourxm->tracks[track_number].bits / 8);
                fourxm->tracks[track_number].audio_pts += audio_frame_count;
            } else {
                avio_skip(pb, size);
            }
            break;

        default:
            avio_skip(pb, size);
            break;
        }
    }
    return ret;
}
//------------------------------------------------------------------------------
int decodeAudioPacket(  AVPacket& p_packet, AVCodecContext* p_audioCodecContext, AVStream* p_stream, 
                        AVFrame* p_frame, SwrContext* p_swrContext, uint8_t** p_destBuffer, int p_destLinesize,
                        FFmpegVideoPlayer* p_player, VideoInfo& p_videoInfo, bool p_isLoop)
{
    // Decode audio frame
    int got_frame = 0;
    int decoded = avcodec_decode_audio4(p_audioCodecContext, p_frame, &got_frame, &p_packet);
    if (decoded < 0) 
    {
        p_videoInfo.error = "Error decoding audio frame.";
        return decoded;
    }
    
    if(decoded <= p_packet.size)
    {
        /* Move the unread data to the front and clear the end bits */
        int remaining = p_packet.size - decoded;
        memmove(p_packet.data, &p_packet.data[decoded], remaining);
        av_shrink_packet(&p_packet, remaining);
    }
    
    // Frame is complete, store it in audio frame queue
    if (got_frame)
    {
        int outputSamples = swr_convert(p_swrContext, 
                                        p_destBuffer, p_destLinesize, 
                                        (const uint8_t**)p_frame->extended_data, p_frame->nb_samples);
        
        int bufferSize = av_get_bytes_per_sample(AV_SAMPLE_FMT_FLT) * p_videoInfo.audioNumChannels
                            * outputSamples;
        
        int64_t duration = p_frame->pkt_duration;
        int64_t pts = p_frame->pts;
        int64_t dts = p_frame->pkt_dts;
        
        if (staticOgreLog && p_player->getLogLevel() == LOGLEVEL_EXCESSIVE)
        {
            staticOgreLog->logMessage("Audio frame bufferSize / duration / pts / dts: " 
                    + boost::lexical_cast<std::string>(bufferSize) + " / "
                    + boost::lexical_cast<std::string>(duration) + " / "
                    + boost::lexical_cast<std::string>(pts) + " / "
                    + boost::lexical_cast<std::string>(dts), Ogre::LML_NORMAL);
        }
        
        // Calculate frame life time
        double frameLifeTime = ((double)p_stream->time_base.num) / (double)p_stream->time_base.den;
        frameLifeTime *= duration;
        p_videoInfo.audioDecodedDuration += frameLifeTime;
        
        // If we are a loop, only start adding frames after 0.5 seconds have been decoded
        if (p_isLoop && p_videoInfo.audioDecodedDuration < 0.5)
        {
            staticOgreLog->logMessage("Skipping audio frame");
            return decoded;
        }
        
        // Create the audio frame
        AudioFrame* frame = new AudioFrame();
        frame->dataSize = bufferSize;
        frame->data = new uint8_t[bufferSize];
        memcpy(frame->data, p_destBuffer[0], bufferSize);
        frame->lifeTime = frameLifeTime;
        
        p_player->addAudioFrame(frame);
    }
    
    return decoded;
}
static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
    NUVContext *ctx = s->priv_data;
    AVIOContext *pb = s->pb;
    uint8_t hdr[HDRSIZE];
    nuv_frametype frametype;
    int ret, size;
    while (!url_feof(pb)) {
        int copyhdrsize = ctx->rtjpg_video ? HDRSIZE : 0;
        uint64_t pos = avio_tell(pb);
        ret = avio_read(pb, hdr, HDRSIZE);
        if (ret < HDRSIZE)
            return ret < 0 ? ret : AVERROR(EIO);
        frametype = hdr[0];
        size = PKTSIZE(AV_RL32(&hdr[8]));
        switch (frametype) {
            case NUV_EXTRADATA:
                if (!ctx->rtjpg_video) {
                    avio_skip(pb, size);
                    break;
                }
            case NUV_VIDEO:
                if (ctx->v_id < 0) {
                    av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
                    avio_skip(pb, size);
                    break;
                }
                ret = av_new_packet(pkt, copyhdrsize + size);
                if (ret < 0)
                    return ret;

                pkt->pos = pos;
                pkt->flags |= hdr[2] == 0 ? AV_PKT_FLAG_KEY : 0;
                pkt->pts = AV_RL32(&hdr[4]);
                pkt->stream_index = ctx->v_id;
                memcpy(pkt->data, hdr, copyhdrsize);
                ret = avio_read(pb, pkt->data + copyhdrsize, size);
                if (ret < 0) {
                    av_free_packet(pkt);
                    return ret;
                }
                if (ret < size)
                    av_shrink_packet(pkt, copyhdrsize + ret);
                return 0;
            case NUV_AUDIO:
                if (ctx->a_id < 0) {
                    av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
                    avio_skip(pb, size);
                    break;
                }
                ret = av_get_packet(pb, pkt, size);
                pkt->flags |= AV_PKT_FLAG_KEY;
                pkt->pos = pos;
                pkt->pts = AV_RL32(&hdr[4]);
                pkt->stream_index = ctx->a_id;
                if (ret < 0) return ret;
                return 0;
            case NUV_SEEKP:
                // contains no data, size value is invalid
                break;
            default:
                avio_skip(pb, size);
                break;
        }
    }
    return AVERROR(EIO);
}