Exemplo n.º 1
0
 void parsePkt(AVPacket* pkt, std::vector<unsigned long>& result){
     unsigned long keyIndex = 4;
     unsigned long currentSegmentLength = 0;
     unsigned long size = pkt->size;
     uint8_t* data = pkt->data;
     if (getNalSize(data, keyIndex) + 4 == size){
         NalType type = getNalType(data, keyIndex);
         if (type == H264Analyst::NalType::NAL_IDR_Slice || type == H264Analyst::NalType::NAL_Slice){
             result.push_back(keyIndex);
         }
         return;
     }
     while (keyIndex < size){
         auto type = getNalType(data, keyIndex);
         if (NalType::NAL_IDR_Slice == type || NalType::NAL_Slice == type){
             result.push_back(keyIndex);
         }
         currentSegmentLength = getNalSize(data, keyIndex);
         keyIndex += currentSegmentLength + 4;
         continue;
     }
 }
Exemplo n.º 2
0
/**
   Decodes the video stream until the first frame with number larger or equal than 'after' is found.

   Returns:
   - true if a frame is found, false otherwise.
   - the image as a QImage if img is non-null
   - time frame time, if frametime is non-null
   - the frame number, if framenumber is non-null

   All times are in milliseconds.
**/
bool QVideoDecoder::skipFrame()
{
    if(!ok)
        return false;

    // Read a frame
    if(av_read_frame(pFormatCtx, &packet) < 0)
        return false;                             // Frame read failed (e.g. end of stream)

    bool done = false;

    if(0x67 != getNalType(packet.data, packet.size)) {
        done = true;
        avcodec_flush_buffers(pCodecCtx);
        av_free_packet(&packet);      // Free the packet that was allocated by av_read_frame
        DesiredFrameNumber++;
    } else {
        while(!done)
        {
            // Read a frame
            //int status = av_read_frame(pFormatCtx, &packet);
            //if(status<0)
            //   return false;                             // Frame read failed (e.g. end of stream)

            //printf("Packet of stream %d, size %d\n",packet.stream_index,packet.size);

            if(packet.stream_index==videoStream)
            {
                // Is this a packet from the video stream -> decode video frame
                ffmpeg::AVRational millisecondbase = {1, 1000};
                int f = packet.dts;
                int t = ffmpeg::av_rescale_q(packet.dts,pFormatCtx->streams[videoStream]->time_base,millisecondbase);
                if(LastFrameOk==false)
                {
                    LastFrameOk=true;
                    LastLastFrameTime=LastFrameTime=t;
                    LastLastFrameNumber=LastFrameNumber=f;
                }
                else
                {
                    // If we decoded 2 frames in a row, the last times are okay
                    LastLastFrameTime = LastFrameTime;
                    LastLastFrameNumber = LastFrameNumber;
                    LastFrameTime=t;
                    LastFrameNumber=f;
                }


                int frameFinished;
                avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,&packet);

                // Did we get a video frame?
                if(frameFinished)
                {
                    // Convert the image format (init the context the first time)
                    int w = pCodecCtx->width;
                    int h = pCodecCtx->height;
                    img_convert_ctx = ffmpeg::sws_getCachedContext(img_convert_ctx,w, h, pCodecCtx->pix_fmt, w, h, ffmpeg::PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

                    if(img_convert_ctx == NULL)
                    {
                        printf("Cannot initialize the conversion context!\n");
                        return false;
                    }
                    ffmpeg::sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

                    // Convert the frame to QImage
                    LastFrame=QImage(w,h,QImage::Format_RGB888);

                    for(int y=0; y<h; y++)
                        memcpy(LastFrame.scanLine(y),pFrameRGB->data[0]+y*pFrameRGB->linesize[0],w*3);

                    // Set the time
                    DesiredFrameTime = ffmpeg::av_rescale_q(0,pFormatCtx->streams[videoStream]->time_base,millisecondbase);
                    LastFrameOk=true;


                    done = true;
                    DesiredFrameNumber++;
                    //} // frame of interest
                }  // frameFinished
            }  // stream_index==videoStream
            av_free_packet(&packet);      // Free the packet that was allocated by av_read_frame
        }
    }
    //printf("Returning new frame %d @ %d. LastLastT: %d. LastLastF: %d. LastFrameOk: %d\n",LastFrameNumber,LastFrameTime,LastLastFrameTime,LastLastFrameNumber,(int)LastFrameOk);
    //printf("\n");
    return done;   // done indicates whether or not we found a frame
}