コード例 #1
1
ファイル: videoplayer.cpp プロジェクト: qyvlik/VideoItem
void DecodeVideo::run()
{
    int frameFinished = 0;
    AVFrame *pFrame = avcodec_alloc_frame();
    SDL_LockMutex(mutex);
    avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data,packet.size);
    SDL_UnlockMutex(mutex);

    AVFrame *pFrameRGB;
    pFrameRGB = avcodec_alloc_frame();
    avpicture_fill((AVPicture *)pFrameRGB, bufferRGB, PIX_FMT_RGB24,pCodecCtx->width, pCodecCtx->height);

    /*
     * 最後再整理一次,要使用swscale,
     * 只要使用 sws_getContext() 進行初始化、
     * sws_scale() 進行主要轉換、
     * sws_freeContext() 結束,即可完成全部動作。
     */


    /*
     * SwsContext *
     * sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat,
     *                int dstW, int dstH, enum PixelFormat dstFormat,
     *                int flags, SwsFilter *srcFilter,
     *                SwsFilter *dstFilter, const double *param)
     * 總共有十個參數,其中,較重要的是前七個;
     * 前三個參數分別代表 原视频 的寬、高及PixelFormat;
     * 四到六個參數分別代表 目标视频 的寬、高及PixelFormat;
     * 第七個參數則代表要使用哪種scale的方法;此參數可用的方法可在 libswscale/swscale.h 內找到。
     *
     *
     * 个人建议,如果对图像的缩放,要追求高效,比如说是视频图像的处理,在不明确是放大还是缩小时,
     * 直接使用 SWS_FAST_BILINEAR 算法即可。
     * 如果明确是要缩小并显示,建议使用Point算法,如果是明确要放大并显示,其实使用CImage的Strech更高效。
     * 当然,如果不计速度追求画面质量。在上面的算法中,选择帧率最低的那个即可,画面效果一般是最好的。
     *
     * 最後三個參數,如無使用,可以都填上NULL。
     */

    SwsContext *convert_ctx = sws_getContext(width,height,pix_fmt,
                              width,height,PIX_FMT_RGB24,
                              SWS_BICUBIC,         // SWS_FAST_BILINEAR or SWS_BICUBIC
                              NULL,NULL,NULL);
    /*
     * int
     * sws_scale(SwsContext *c,
     *           uint8_t* src[],
     *           int srcStride[],
     *           int srcSliceY,
     *           int srcSliceH,
     *           uint8_t* dst[],
     *           int dstStride[]);
     * 總共有七個參數;
     * 第一個參數即是由 sws_getContext 所取得的參數。
     *
     * 第二個 src 及第六個 dst 分別指向input 和 output 的 buffer。
     * 第三個 srcStride 及第七個 dstStride 分別指向 input 及 output 的 stride
     *
     *    如果不知道什麼是 stride,姑且可以先把它看成是每一列的 byte 數。
     * 第四個 srcSliceY,就註解的意思來看,是指第一列要處理的位置;這裡我是從頭處理,所以直接填0。
     *    想知道更詳細說明的人,可以參考 swscale.h 的註解。
     * 第五個srcSliceH指的是 source slice 的高度。
     */

    sws_scale(convert_ctx,
              (const uint8_t*  const*)pFrame->data,
              pFrame->linesize,
              0,
              height,
              pFrameRGB->data,
              pFrameRGB->linesize);

    //! 这里正式获取视频帧

    QImage tmpImage((uchar *)bufferRGB,width,height,QImage::Format_RGB888);
    emit readOneFrame(QPixmap::fromImage(tmpImage),width,height);

    av_free(pFrameRGB);
    sws_freeContext(convert_ctx);

    av_free_packet(&packet);
}
コード例 #2
0
ファイル: phone.c プロジェクト: AllanDaemon/ProjectTox-Core
void *encode_video_thread(void *arg)
{
    INFO("Started encode video thread!");

    av_session_t *_phone = arg;

    _phone->running_encvid = 1;
    //CodecState *cs = get_cs_temp(_phone->av);
    AVPacket pkt1, *packet = &pkt1;
    //int p = 0;
    //int got_packet;
    int video_frame_finished;
    AVFrame *s_video_frame;
    AVFrame *webcam_frame;
    s_video_frame = avcodec_alloc_frame();
    webcam_frame = avcodec_alloc_frame();
    //AVPacket enc_video_packet;

    uint8_t *buffer;
    int numBytes;
    /* Determine required buffer size and allocate buffer */
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height);
    buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t), 1);
    avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width,
                   _phone->webcam_decoder_ctx->height);
    _phone->sws_ctx = sws_getContext(_phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height,
                                     _phone->webcam_decoder_ctx->pix_fmt, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height,
                                     PIX_FMT_YUV420P,
                                     SWS_BILINEAR, NULL, NULL, NULL);


    vpx_image_t *image =
        vpx_img_alloc(NULL, VPX_IMG_FMT_I420, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height, 1);

    //uint32_t frame_counter = 0;
    while (_phone->running_encvid) {

        if (av_read_frame(_phone->video_format_ctx, packet) < 0) {
            printf("error reading frame\n");

            if (_phone->video_format_ctx->pb->error != 0)
                break;

            continue;
        }

        if (packet->stream_index == _phone->video_stream) {
            if (avcodec_decode_video2(_phone->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) {
                printf("couldn't decode\n");
                continue;
            }

            av_free_packet(packet);
            sws_scale(_phone->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0,
                      _phone->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize);
            /* create a new I-frame every 60 frames */
            //++p;
            /*
            if (p == 60) {

                s_video_frame->pict_type = AV_PICTURE_TYPE_BI ;
            } else if (p == 61) {
                s_video_frame->pict_type = AV_PICTURE_TYPE_I ;
                p = 0;
            } else {
                s_video_frame->pict_type = AV_PICTURE_TYPE_P ;
            }*/

            if (video_frame_finished) {
                memcpy(image->planes[VPX_PLANE_Y], s_video_frame->data[0],
                       s_video_frame->linesize[0] * _phone->webcam_decoder_ctx->height);
                memcpy(image->planes[VPX_PLANE_U], s_video_frame->data[1],
                       s_video_frame->linesize[1] * _phone->webcam_decoder_ctx->height / 2);
                memcpy(image->planes[VPX_PLANE_V], s_video_frame->data[2],
                       s_video_frame->linesize[2] * _phone->webcam_decoder_ctx->height / 2);
                toxav_send_video (_phone->av, image);
                //if (avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet) < 0) {
                /*if (vpx_codec_encode(&cs->v_encoder, image, frame_counter, 1, 0, 0) != VPX_CODEC_OK) {
                    printf("could not encode video frame\n");
                    continue;
                }
                ++frame_counter;

                vpx_codec_iter_t iter = NULL;
                vpx_codec_cx_pkt_t *pkt;
                while( (pkt = vpx_codec_get_cx_data(&cs->v_encoder, &iter)) ) {
                    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT)
                        toxav_send_rtp_payload(_phone->av, TypeVideo, pkt->data.frame.buf, pkt->data.frame.sz);
                }*/
                //if (!got_packet) {
                //    continue;
                //}

                //if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL\n");

                //toxav_send_rtp_payload(_phone->av, TypeVideo, enc_video_packet.data, enc_video_packet.size);

                //av_free_packet(&enc_video_packet);
            }
        } else {
            av_free_packet(packet);
        }
    }

    vpx_img_free(image);

    /* clean up codecs */
    //pthread_mutex_lock(&cs->ctrl_mutex);
    av_free(buffer);
    av_free(webcam_frame);
    av_free(s_video_frame);
    sws_freeContext(_phone->sws_ctx);
    //avcodec_close(webcam_decoder_ctx);
    //avcodec_close(cs->video_encoder_ctx);
    //pthread_mutex_unlock(&cs->ctrl_mutex);

    _phone->running_encvid = -1;

    pthread_exit ( NULL );
}
コード例 #3
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer = NULL;

  AVDictionary    *optionsDict = NULL;
  struct SwsContext      *sws_ctx = NULL;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  av_register_all();
  
  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
      videoStream=i;
      break;
    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
  
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;
  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
			      pCodecCtx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );
  
  // Assign appropriate parts of buffer to image planes in pFrameRGB
  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
  // of AVPicture
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
  
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
        sws_scale
        (
            sws_ctx,
            (uint8_t const * const *)pFrame->data,
            pFrame->linesize,
            0,
            pCodecCtx->height,
            pFrameRGB->data,
            pFrameRGB->linesize
        );
	
	// Save the frame to disk
	if(++i<=5)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }
  
  // Free the RGB image
  av_free(buffer);
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
コード例 #4
0
ファイル: redirect_av.c プロジェクト: golgol7777/gpac
/**
 * This thread sends the frame to TS mux
 * \param Parameter The GF_AVRedirect pointer
 */
static Bool video_encoding_thread_run(void *param)
{
    GF_AVRedirect * avr = (GF_AVRedirect*) param;
    u64 currentFrameTimeProcessed = 0;
    u32 lastEncodedFrameTime = 0;
    AVCodecContext * ctx = NULL;
    assert( avr );
    gf_sc_add_video_listener ( avr->term->compositor, &avr->video_listen );
    while (avr->is_running && (!ctx || !avr->swsContext)) {
        ctx = ts_get_video_codec_context(avr->ts_implementation);
        gf_sleep(16);
    }
    if (!ctx) {
        goto exit;
    }
    printf("******* Video Codec Context = %d/%d, start="LLU"\n", ctx->time_base.num, ctx->time_base.den, ctx->timecode_frame_start);
    while (avr->is_running) {
        {
            gf_mx_p(avr->frameMutex);
            while (!avr->frameTime || currentFrameTimeProcessed == avr->frameTime) {
                gf_mx_v(avr->frameMutex);
                if (!avr->is_running) {
                    goto exit;
                }
                gf_mx_p(avr->frameMutex);
				gf_sleep(1);
            }
            assert( currentFrameTimeProcessed != avr->frameTime);
            currentFrameTimeProcessed = avr->frameTime;
            {
                avpicture_fill ( ( AVPicture * ) avr->RGBpicture, avr->frame, PIX_FMT_RGB24, avr->srcWidth, avr->srcHeight );
                assert( avr->swsContext );
                sws_scale ( avr->swsContext,
#ifdef USE_AVCODEC2
                            ( const uint8_t * const * )
#else
                            ( uint8_t ** )
#endif /* USE_AVCODEC2 */
                            avr->RGBpicture->data, avr->RGBpicture->linesize,
                            0, avr->srcHeight,
                            avr->YUVpicture->data, avr->YUVpicture->linesize );
#ifdef AVR_DUMP_RAW_AVI
                if ( AVI_write_frame ( avr->avi_out, avr->frame, avr->size, 1 ) <0 )
                {
                    GF_LOG ( GF_LOG_ERROR, GF_LOG_MODULE, ( "[AVRedirect] Error writing video frame\n" ) );
                }
#endif /* AVR_DUMP_RAW_AVI */
                gf_mx_v(avr->frameMutex);
                if (avr->encode)
                {
                    int written;
                    //u32 sysclock = gf_sys_clock();
                    avr->YUVpicture->pts = currentFrameTimeProcessed;
                    //printf("Encoding frame PTS="LLU", frameNum=%u, time=%u...", avr->YUVpicture->pts, avr->YUVpicture->coded_picture_number, currentFrameTimeProcessed);
                    written = avcodec_encode_video ( ctx, avr->videoOutbuf, avr->videoOutbufSize, avr->YUVpicture );
                    //ctx->coded_frame->pts = currentFrameTimeProcessed;
                    if ( written < 0 )
                    {
                        GF_LOG ( GF_LOG_ERROR, GF_LOG_MODULE, ( "[AVRedirect] Error while encoding video frame =%d\n", written ) );
                    } else
                        if ( written > 0 )
                        {
                            ts_encode_video_frame(avr->ts_implementation, avr->videoOutbuf, written);
                        }
                    lastEncodedFrameTime = currentFrameTimeProcessed;
                }
            }
        }
        avr->frameTimeEncoded = currentFrameTimeProcessed;
		gf_sleep(1);
    } /* End of main loop */
exit:
    GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] Ending video encoding thread...\n"));
    if (avr->term)
        gf_sc_remove_video_listener ( avr->term->compositor, &avr->video_listen );
    return 0;
}
コード例 #5
0
C_RESULT ffmpeg_stage_decoding_transform(ffmpeg_stage_decoding_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  static const int        sws_flags = SWS_FAST_BILINEAR;
  AVCodecContext  *pCodecCtxMP4 = cfg->pCodecCtxMP4;
  AVCodecContext  *pCodecCtxH264 = cfg->pCodecCtxH264;
  AVFrame         *pFrame = cfg->pFrame;
  AVFrame	  *pFrameOutput = cfg->pFrameOutput;
  static AVPacket packet;
  int	frameFinished = 0;
    
  bool_t frameDimChanged = FALSE;
  static parrot_video_encapsulation_t PaVE, prevPaVE;
    
#if WAIT_FOR_I_FRAME
  static bool_t waitForIFrame = TRUE;
#endif
    
#ifdef NUM_SAMPLES
  static struct timeval start_time, start_time2;
  static int numsamples = 0;
#endif	
    
  if (0 == in->size) // No frame
    {
      FFMPEG_DEBUG ("in->size is zero, don't do anything");
      return C_OK;
    }
  
  vp_os_mutex_lock( &out->lock );
  
  if(out->status == VP_API_STATUS_INIT) // Init only code
    {		
      out->numBuffers   = 1;
      out->buffers      = cfg->bufferArray;
      out->buffers[0]   = NULL;
      out->indexBuffer  = 0;
      out->lineSize     = 0;
        
      av_init_packet(&packet);
 
        
#if __FFMPEG_DEBUG_ENABLED
#else
      av_log_set_callback (&empty_av_log_callback);
#endif
    }
 
  if (! check_and_copy_PaVE(&PaVE, in, &prevPaVE, &frameDimChanged))
    {
      FFMPEG_DEBUG("Received a frame without PaVE informations");
      vp_os_mutex_unlock( &out->lock );
      return C_FAIL;
    }
    
  if ((out->status == VP_API_STATUS_INIT) || frameDimChanged) // Init and "new frame dimensions" code
    {
      pCodecCtxMP4->width = PaVE.encoded_stream_width;
      pCodecCtxMP4->height = PaVE.encoded_stream_height;
      pCodecCtxH264->width = PaVE.encoded_stream_width;
      pCodecCtxH264->height = PaVE.encoded_stream_height;
		
      cfg->src_picture.width = PaVE.display_width;
      cfg->src_picture.height = PaVE.display_height;
      cfg->src_picture.format = pCodecCtxH264->pix_fmt;
      cfg->dst_picture.width = PaVE.display_width;
      cfg->dst_picture.height = PaVE.display_height;
		
      out->size = avpicture_get_size(cfg->dst_picture.format, cfg->dst_picture.width, cfg->dst_picture.height);
      cfg->buffer = (uint8_t *)av_realloc(cfg->buffer, out->size * sizeof(uint8_t));
      out->buffers[0] = cfg->buffer;
		
      avpicture_fill((AVPicture *)pFrameOutput, (uint8_t*)out->buffers[out->indexBuffer], cfg->dst_picture.format,
                     cfg->dst_picture.width, cfg->dst_picture.height);
		
        
      cfg->img_convert_ctx = sws_getCachedContext(cfg->img_convert_ctx, PaVE.display_width, PaVE.display_height,
                                             pCodecCtxH264->pix_fmt, PaVE.display_width, PaVE.display_height,
                                             cfg->dst_picture.format, sws_flags, NULL, NULL, NULL);

      if (out->status == VP_API_STATUS_INIT)
        {
#ifdef NUM_SAMPLES
          gettimeofday(&start_time, NULL);
#endif		
          out->status = VP_API_STATUS_PROCESSING;
          FFMPEG_DEBUG("End of init");
        }
    }

#if	WAIT_FOR_I_FRAME
  if ( (PaVE.frame_number != (prevPaVE.frame_number +1)) 
        && 
        ( PaVE.frame_number != prevPaVE.frame_number || PaVE.slice_index != (prevPaVE.slice_index+1) )   )
    {
      FFMPEG_DEBUG ("Missed a frame :\nPrevious was %d of type %d\nNew is %d of type %d", prevPaVE.frame_number, prevPaVE.frame_type,
                    PaVE.frame_number, PaVE.frame_type);
      waitForIFrame = TRUE;  
    }
    
#if DISPLAY_DROPPED_FRAMES
  if (waitForIFrame && PaVE.frame_type == FRAME_TYPE_P_FRAME)
    {
      FFMPEG_DEBUG ("Dropped a P frame\n");
      dropped_frames++;
    }
#endif
    
  if(out->status == VP_API_STATUS_PROCESSING && (!waitForIFrame || (PaVE.frame_type == FRAME_TYPE_IDR_FRAME) || (PaVE.frame_type == FRAME_TYPE_I_FRAME))) // Processing code
    {
      waitForIFrame = FALSE;
#else
      if(out->status == VP_API_STATUS_PROCESSING) // Processing code  
        {
#endif
          /* The 'check_and_copy_PaVE' function already removed the PaVE from the 'in' buffer */
          packet.data = ((unsigned char*)in->buffers[in->indexBuffer]);
          packet.size = in->size;
          FFMPEG_DEBUG("Size : %d", packet.size);
        
#ifdef NUM_SAMPLES
          struct timeval end_time;
          static float32_t frame_decoded_time = 0;

          gettimeofday(&start_time2, NULL);
#endif
          // Decode video frame
          if (PaVE.video_codec == CODEC_MPEG4_VISUAL)
            {
              avcodec_decode_video2 (pCodecCtxMP4, pFrame, &frameFinished, &packet);
            }
          else if (PaVE.video_codec == CODEC_MPEG4_AVC)
            {
              avcodec_decode_video2 (pCodecCtxH264, pFrame, &frameFinished, &packet);
            }
        
          // Did we get a video frame?
          if(frameFinished)
            {
              pFrameOutput->data[0] = (uint8_t*)out->buffers[out->indexBuffer];
              sws_scale(cfg->img_convert_ctx, (const uint8_t *const*)pFrame->data, 
                        pFrame->linesize, 0, 
                        PaVE.display_height,
                        pFrameOutput->data, pFrameOutput->linesize);
				
              cfg->num_picture_decoded++;

#ifdef NUM_SAMPLES
              gettimeofday(&end_time, NULL);
              frame_decoded_time += ((end_time.tv_sec * 1000.0 + end_time.tv_usec / 1000.0) - (start_time2.tv_sec * 1000.0 + start_time2.tv_usec / 1000.0));

              if(numsamples++ > NUM_SAMPLES)
                {
                  float32_t value = ((end_time.tv_sec * 1000.0 + end_time.tv_usec / 1000.0) - (start_time.tv_sec * 1000.0 + start_time.tv_usec / 1000.0));
					
                  printf("Frames decoded in average %f fps, received and decoded in average %f fps\n", (1000.0 / (frame_decoded_time / (float32_t)NUM_SAMPLES)), 1000.0 / (value / (float32_t)NUM_SAMPLES));
                  gettimeofday(&start_time, NULL);
                  frame_decoded_time = 0;
                  numsamples = 0;
                }					
#endif
            }
          else
            {
        	  /* Skip frames are usually 7 bytes long
        	   * and make FFMPEG return an error. It is however normal to get
        	   * skip frames from the drone.
        	   */
        	  if (7!=PaVE.payload_size)
              printf ("Decoding failed for a %s\n", (PaVE.frame_type == FRAME_TYPE_P_FRAME) ? "P Frame" : "I Frame");
            }
        
#if DISPLAY_DROPPED_FRAMES
          if ((PaVE.frame_type == FRAME_TYPE_IDR_FRAME) || (PaVE.frame_type == FRAME_TYPE_I_FRAME))
            {
              if (previous_ok_frame != 0)
                {
                  static int globalMiss = 0, globalDrop = 0, globalFrames = 0;
                  globalMiss += missed_frames;
                  globalDrop += dropped_frames;
                  int globalMissDrop = globalMiss + globalDrop;
                  int total_miss = missed_frames + dropped_frames;
                  int total_frames = PaVE.frame_number - previous_ok_frame;
                  globalFrames += total_frames;
                  float missPercent = (100.0 * missed_frames) / (1.0 * total_frames);
                  float dropPercent = (100.0 * dropped_frames) / (1.0 * total_frames);
                  float totalPercent = (100.0 * total_miss) / (1.0 * total_frames);
                  float missMean = (100.0 * globalMiss) / (1.0 * globalFrames);
                  float dropMean = (100.0 * globalDrop) / (1.0 * globalFrames);
                  float totalMean = (100.0 * globalMissDrop) / (1.0 * globalFrames);
                  printf ("LAST %4d F => M %4d (%4.1f%%) / D %4d (%4.1f%%) / T %4d (%4.1f%%) <=> ALL %4d F => M %4d (%4.1f%%) / D %4d (%4.1f%%) / T %4d (%4.1f%%)\n", total_frames, missed_frames, missPercent, dropped_frames, dropPercent, total_miss, totalPercent, globalFrames, globalMiss, missMean, globalDrop, dropMean, globalMissDrop, totalMean);
                }
              missed_frames = 0; dropped_frames = 0;
              previous_ok_frame = PaVE.frame_number;
            }
#endif
        
	}
	
      vp_os_mutex_unlock( &out->lock );
	
      return C_OK;
    }

#define FFMPEG_CHECK_AND_FREE(pointer, freeFunc)        \
  do                                                    \
    {                                                   \
      if (NULL != pointer)                              \
        {                                               \
          freeFunc (pointer);                           \
          pointer = NULL;                               \
        }                                               \
    } while (0)

#define FFMPEG_CHECK_AND_FREE_WITH_CALL(pointer, func, freeFunc)        \
  do                                                                    \
    {                                                                   \
      if (NULL != pointer)                                              \
        {                                                               \
          func (pointer);                                               \
          freeFunc (pointer);                                           \
          pointer = NULL;                                               \
        }                                                               \
    } while (0)
  

  C_RESULT ffmpeg_stage_decoding_close(ffmpeg_stage_decoding_config_t *cfg)
  {
    FFMPEG_CHECK_AND_FREE_WITH_CALL(cfg->pCodecCtxMP4, avcodec_close, av_free);
    FFMPEG_CHECK_AND_FREE_WITH_CALL(cfg->pCodecCtxH264, avcodec_close, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->pFrame, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->pFrameOutput, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->bufferArray, vp_os_free);
    FFMPEG_CHECK_AND_FREE(cfg->buffer, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->img_convert_ctx, sws_freeContext);
    return C_OK;
  }
コード例 #6
0
ファイル: libav.c プロジェクト: Ralbarker/showtime
static void
libav_deliver_frame(video_decoder_t *vd,
                    media_pipe_t *mp, media_queue_t *mq,
                    AVCodecContext *ctx, AVFrame *frame,
                    const media_buf_meta_t *mbm, int decode_time,
                    const media_codec_t *mc)
{
  frame_info_t fi;

  /* Compute aspect ratio */
  switch(mbm->mbm_aspect_override) {
  case 0:

    fi.fi_dar_num = frame->width;
    fi.fi_dar_den = frame->height;

    if(frame->sample_aspect_ratio.num) {
      fi.fi_dar_num *= frame->sample_aspect_ratio.num;
      fi.fi_dar_den *= frame->sample_aspect_ratio.den;
    } else if(mc->sar_num) {
      fi.fi_dar_num *= mc->sar_num;
      fi.fi_dar_den *= mc->sar_den;
    }

    break;
  case 1:
    fi.fi_dar_num = 4;
    fi.fi_dar_den = 3;
    break;
  case 2:
    fi.fi_dar_num = 16;
    fi.fi_dar_den = 9;
    break;
  }

  int64_t pts = video_decoder_infer_pts(mbm, vd,
					frame->pict_type == AV_PICTURE_TYPE_B);

  int duration = mbm->mbm_duration;

  if(!vd_valid_duration(duration)) {
    /* duration is zero or very invalid, use duration from last output */
    duration = vd->vd_estimated_duration;
  }

  if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE)
    pts = vd->vd_nextpts; /* no pts set, use estimated pts */

  if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) {
    /* we know PTS of a prior frame */
    int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt;

    if(vd_valid_duration(t)) {
      /* inter frame duration seems valid, store it */
      vd->vd_estimated_duration = t;
      if(duration == 0)
	duration = t;

    }
  }
  
  duration += frame->repeat_pict * duration / 2;
 
  if(pts != AV_NOPTS_VALUE) {
    vd->vd_prevpts = pts;
    vd->vd_prevpts_cnt = 0;
  }
  vd->vd_prevpts_cnt++;

  if(duration == 0) {
    TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0");
    return;
  }

  prop_set_int(mq->mq_prop_too_slow, decode_time > duration);

  if(pts != AV_NOPTS_VALUE) {
    vd->vd_nextpts = pts + duration;
  } else {
    vd->vd_nextpts = AV_NOPTS_VALUE;
  }
#if 0
  static int64_t lastpts = AV_NOPTS_VALUE;
  if(lastpts != AV_NOPTS_VALUE) {
    printf("DEC: %20"PRId64" : %-20"PRId64" %d %"PRId64" %6d %d\n", pts, pts - lastpts, mbm->mbm_drive_clock,
           mbm->mbm_delta, duration, mbm->mbm_sequence);
    if(pts - lastpts > 1000000) {
      abort();
    }
  }
  lastpts = pts;
#endif

  vd->vd_interlaced |=
    frame->interlaced_frame && !mbm->mbm_disable_deinterlacer;

  fi.fi_width = frame->width;
  fi.fi_height = frame->height;
  fi.fi_pts = pts;
  fi.fi_epoch = mbm->mbm_epoch;
  fi.fi_delta = mbm->mbm_delta;
  fi.fi_duration = duration;
  fi.fi_drive_clock = mbm->mbm_drive_clock;

  fi.fi_interlaced = !!vd->vd_interlaced;
  fi.fi_tff = !!frame->top_field_first;
  fi.fi_prescaled = 0;

  fi.fi_color_space = 
    ctx->colorspace < ARRAYSIZE(libav_colorspace_tbl) ? 
    libav_colorspace_tbl[ctx->colorspace] : 0;

  fi.fi_type = 'LAVC';

  // Check if we should skip directly to convert code
  if(vd->vd_convert_width  != frame->width ||
     vd->vd_convert_height != frame->height ||
     vd->vd_convert_pixfmt != frame->format) {

    // Nope, go ahead and deliver frame as-is

    fi.fi_data[0] = frame->data[0];
    fi.fi_data[1] = frame->data[1];
    fi.fi_data[2] = frame->data[2];

    fi.fi_pitch[0] = frame->linesize[0];
    fi.fi_pitch[1] = frame->linesize[1];
    fi.fi_pitch[2] = frame->linesize[2];

    fi.fi_pix_fmt = frame->format;
    fi.fi_avframe = frame;

    int r = video_deliver_frame(vd, &fi);

    /* return value
     * 0  = OK
     * 1  = Need convert to YUV420P
     * -1 = Fail
     */

    if(r != 1)
      return;
  }

  // Need to convert frame

  vd->vd_sws =
    sws_getCachedContext(vd->vd_sws,
                         frame->width, frame->height, frame->format,
                         frame->width, frame->height, PIX_FMT_YUV420P,
                         0, NULL, NULL, NULL);

  if(vd->vd_sws == NULL) {
    TRACE(TRACE_ERROR, "Video", "Unable to convert from %s to %s",
	  av_get_pix_fmt_name(frame->format),
	  av_get_pix_fmt_name(PIX_FMT_YUV420P));
    return;
  }

  if(vd->vd_convert_width  != frame->width  ||
     vd->vd_convert_height != frame->height ||
     vd->vd_convert_pixfmt != frame->format) {
    avpicture_free(&vd->vd_convert);

    vd->vd_convert_width  = frame->width;
    vd->vd_convert_height = frame->height;
    vd->vd_convert_pixfmt = frame->format;

    avpicture_alloc(&vd->vd_convert, PIX_FMT_YUV420P, frame->width,
                    frame->height);

    TRACE(TRACE_DEBUG, "Video", "Converting from %s to %s",
	  av_get_pix_fmt_name(frame->format),
	  av_get_pix_fmt_name(PIX_FMT_YUV420P));
  }

  sws_scale(vd->vd_sws, (void *)frame->data, frame->linesize, 0,
            frame->height, vd->vd_convert.data, vd->vd_convert.linesize);

  fi.fi_data[0] = vd->vd_convert.data[0];
  fi.fi_data[1] = vd->vd_convert.data[1];
  fi.fi_data[2] = vd->vd_convert.data[2];

  fi.fi_pitch[0] = vd->vd_convert.linesize[0];
  fi.fi_pitch[1] = vd->vd_convert.linesize[1];
  fi.fi_pitch[2] = vd->vd_convert.linesize[2];

  fi.fi_type = 'LAVC';
  fi.fi_pix_fmt = PIX_FMT_YUV420P;
  fi.fi_avframe = NULL;
  video_deliver_frame(vd, &fi);
}
コード例 #7
0
int _tmain(int argc, _TCHAR* argv[])
{

	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	char filepath[]="src01_480x272_22.h265";

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.(无法打开输入流)\n");
		return -1;
	}
	if(av_find_stream_info(pFormatCtx)<0)
	{
		printf("Couldn't find stream information.(无法获取流信息)\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
		{
			videoindex=i;
			break;
		}
	if(videoindex==-1)
	{
		printf("Didn't find a video stream.(没有找到视频流)\n");
		return -1;
	}
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL)
	{
		printf("Codec not found.(没有找到解码器)\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
	{
		printf("Could not open codec.(无法打开解码器)\n");
		return -1;
	}
	AVFrame	*pFrame,*pFrameYUV;
	pFrame=avcodec_alloc_frame();
	pFrameYUV=avcodec_alloc_frame();
	uint8_t *out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
//------------SDL----------------
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 

	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	//SDL 2.0 Support for multiple windows
	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}


	SDL_Renderer* sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	SDL_Texture* sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	SDL_Rect sdlRect;  
	sdlRect.x = 0;  
	sdlRect.y = 0;  
	sdlRect.w = screen_w;  
	sdlRect.h = screen_h;  

	int ret, got_picture;

	AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("File Information(文件信息)---------------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	
	struct SwsContext *img_convert_ctx;
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
	//--------------
	SDL_Thread *video_tid = SDL_CreateThread(sfp_refresh_thread,NULL,NULL);
	//
	//Event Loop
	SDL_Event event;
	for (;;) {
		//Wait
		SDL_WaitEvent(&event);
		if(event.type==SFM_REFRESH_EVENT){
			//------------------------------
			if(av_read_frame(pFormatCtx, packet)>=0){
				if(packet->stream_index==videoindex){
					ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
					if(ret < 0){
						printf("Decode Error.(解码错误)\n");
						return -1;
					}
					if(got_picture){
						sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
						//SDL---------------------------
						SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
						SDL_RenderClear( sdlRenderer );  
						SDL_RenderCopy( sdlRenderer, sdlTexture, &sdlRect, &sdlRect );  
						SDL_RenderPresent( sdlRenderer );  
						//SDL End-----------------------
					}
				}
				av_free_packet(packet);
			}else{
				//Exit Thread
				thread_exit=1;
				break;
			}
		}

	}

	sws_freeContext(img_convert_ctx);

	SDL_Quit();
	//--------------
	av_free(out_buffer);
	av_free(pFrameYUV);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
コード例 #8
0
ファイル: cellVpost.cpp プロジェクト: kallew/rpcs3
s32 cellVpostExec(u32 handle, vm::cptr<u8> inPicBuff, vm::cptr<CellVpostCtrlParam> ctrlParam, vm::ptr<u8> outPicBuff, vm::ptr<CellVpostPictureInfo> picInfo)
{
	cellVpost.Log("cellVpostExec(handle=0x%x, inPicBuff=*0x%x, ctrlParam=*0x%x, outPicBuff=*0x%x, picInfo=*0x%x)", handle, inPicBuff, ctrlParam, outPicBuff, picInfo);

	const auto vpost = Emu.GetIdManager().get<VpostInstance>(handle);

	if (!vpost)
	{
		return CELL_VPOST_ERROR_E_ARG_HDL_INVALID;
	}

	s32 w = ctrlParam->inWidth;
	u32 h = ctrlParam->inHeight;
	u32 ow = ctrlParam->outWidth;
	u32 oh = ctrlParam->outHeight;

	ctrlParam->inWindow; // ignored
	if (ctrlParam->inWindow.x) cellVpost.Notice("*** inWindow.x = %d", (u32)ctrlParam->inWindow.x);
	if (ctrlParam->inWindow.y) cellVpost.Notice("*** inWindow.y = %d", (u32)ctrlParam->inWindow.y);
	if (ctrlParam->inWindow.width != w) cellVpost.Notice("*** inWindow.width = %d", (u32)ctrlParam->inWindow.width);
	if (ctrlParam->inWindow.height != h) cellVpost.Notice("*** inWindow.height = %d", (u32)ctrlParam->inWindow.height);
	ctrlParam->outWindow; // ignored
	if (ctrlParam->outWindow.x) cellVpost.Notice("*** outWindow.x = %d", (u32)ctrlParam->outWindow.x);
	if (ctrlParam->outWindow.y) cellVpost.Notice("*** outWindow.y = %d", (u32)ctrlParam->outWindow.y);
	if (ctrlParam->outWindow.width != ow) cellVpost.Notice("*** outWindow.width = %d", (u32)ctrlParam->outWindow.width);
	if (ctrlParam->outWindow.height != oh) cellVpost.Notice("*** outWindow.height = %d", (u32)ctrlParam->outWindow.height);
	ctrlParam->execType; // ignored
	ctrlParam->scalerType; // ignored
	ctrlParam->ipcType; // ignored

	picInfo->inWidth = w; // copy
	picInfo->inHeight = h; // copy
	picInfo->inDepth = CELL_VPOST_PIC_DEPTH_8; // fixed
	picInfo->inScanType = CELL_VPOST_SCAN_TYPE_P; // TODO
	picInfo->inPicFmt = CELL_VPOST_PIC_FMT_IN_YUV420_PLANAR; // fixed
	picInfo->inChromaPosType = ctrlParam->inChromaPosType; // copy
	picInfo->inPicStruct = CELL_VPOST_PIC_STRUCT_PFRM; // TODO
	picInfo->inQuantRange = ctrlParam->inQuantRange; // copy
	picInfo->inColorMatrix = ctrlParam->inColorMatrix; // copy

	picInfo->outWidth = ow; // copy
	picInfo->outHeight = oh; // copy
	picInfo->outDepth = CELL_VPOST_PIC_DEPTH_8; // fixed
	picInfo->outScanType = CELL_VPOST_SCAN_TYPE_P; // TODO
	picInfo->outPicFmt = CELL_VPOST_PIC_FMT_OUT_RGBA_ILV; // TODO
	picInfo->outChromaPosType = ctrlParam->inChromaPosType; // ignored
	picInfo->outPicStruct = picInfo->inPicStruct; // ignored
	picInfo->outQuantRange = ctrlParam->inQuantRange; // ignored
	picInfo->outColorMatrix = ctrlParam->inColorMatrix; // ignored

	picInfo->userData = ctrlParam->userData; // copy
	picInfo->reserved1 = 0;
	picInfo->reserved2 = 0;

	//u64 stamp0 = get_system_time();
	std::unique_ptr<u8[]> pA(new u8[w*h]);

	memset(pA.get(), ctrlParam->outAlpha, w*h);

	//u64 stamp1 = get_system_time();

	std::unique_ptr<SwsContext, void(*)(SwsContext*)> sws(sws_getContext(w, h, AV_PIX_FMT_YUVA420P, ow, oh, AV_PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL), sws_freeContext);

	//u64 stamp2 = get_system_time();

	const u8* in_data[4] = { &inPicBuff[0], &inPicBuff[w * h], &inPicBuff[w * h * 5 / 4], pA.get() };
	int in_line[4] = { w, w/2, w/2, w };
	u8* out_data[4] = { outPicBuff.get_ptr(), NULL, NULL, NULL };
	int out_line[4] = { static_cast<int>(ow*4), 0, 0, 0 };

	sws_scale(sws.get(), in_data, in_line, 0, h, out_data, out_line);

	//ConLog.Write("cellVpostExec() perf (access=%d, getContext=%d, scale=%d, finalize=%d)",
		//stamp1 - stamp0, stamp2 - stamp1, stamp3 - stamp2, get_system_time() - stamp3);
	return CELL_OK;
}
コード例 #9
0
ファイル: VideoPlayer.cpp プロジェクト: iiiCode/VideoPlayer
//static
void *VideoPlayer::doProcessVideo(void *args)
{
    AVFrame *frame;
    AVPicture *picture;
    AVPacket packet;
    int got_frame = 0;
    
    VideoPlayer *player = static_cast<VideoPlayer*>(args);
    
    while(! player->mStop) {
        
        if (player->mSeek) {
            player->doSeek();
        }
        
        if (player->mAccurateSeek) {
            player->doAccurateSeek();
        }

        if (av_read_frame(player->mFormatCtx, &packet) < 0) {
            vLOGE("END OF FILE.\n");
            player->mVideoEndCallback(player, "stop");
            av_free_packet(&packet);
            break;
        }
        
        if (packet.stream_index == player->mVideoStreamIndex) {
            
            avcodec_decode_video2(player->mCodecCtx, player->mFrame, &got_frame, &packet);
            
            if (got_frame == 0) {
                vLOGE("Do not get a frame.\n");
                av_free_packet(&packet);
                continue;
            }
            
            picture = new AVPicture;
            if (! picture) {
                vLOGE("new AVPicture failed.");
                continue;
            }

            avpicture_alloc(picture, PIX_FMT_RGB24, player->mWidth, player->mHeight);

            sws_scale (player->mImageConvertCtx,
                       player->mFrame->data,
                       player->mFrame->linesize,
                       0, player->mCodecCtx->height,
                       picture->data,
                       picture->linesize);
            
            player->mPictureRingBuffer.enqueue(picture);
            
        } else {
            vLOGE("Not video stream packet, ignore it.\n");
        }
        
        av_free_packet(&packet);
        usleep(10);
        
    }
    
    return nullptr;
}
コード例 #10
0
ファイル: nowebcam.c プロジェクト: korobool/linphonecdbus
static mblk_t *jpeg2yuv(uint8_t *jpgbuf, int bufsize, MSVideoSize *reqsize) {
#ifndef NO_FFMPEG
    AVCodecContext av_context;
    int got_picture=0;
    AVFrame orig;
    mblk_t *ret;
    struct SwsContext *sws_ctx;
    AVPacket pkt;
    MSPicture dest;
    AVCodec *codec=avcodec_find_decoder(CODEC_ID_MJPEG);

    if (codec==NULL) {
        ms_error("Could not find MJPEG decoder in ffmpeg.");
        return NULL;
    }

    avcodec_get_context_defaults3(&av_context,NULL);
    if (avcodec_open2(&av_context,codec,NULL)<0) {
        ms_error("jpeg2yuv: avcodec_open failed");
        return NULL;
    }
    av_init_packet(&pkt);
    pkt.data=jpgbuf;
    pkt.size=bufsize;

    memset(&orig, 0, sizeof(orig));
    if (avcodec_decode_video2(&av_context,&orig,&got_picture,&pkt) < 0) {
        ms_error("jpeg2yuv: avcodec_decode_video failed");
        avcodec_close(&av_context);
        return NULL;
    }
    ret=ms_yuv_buf_alloc(&dest, reqsize->width,reqsize->height);
    /* not using SWS_FAST_BILINEAR because it doesn't play well with
     * av_context.pix_fmt set to PIX_FMT_YUVJ420P by jpeg decoder */
    sws_ctx=sws_getContext(av_context.width,av_context.height,av_context.pix_fmt,
                           reqsize->width,reqsize->height,PIX_FMT_YUV420P,SWS_BILINEAR,
                           NULL, NULL, NULL);
    if (sws_ctx==NULL) {
        ms_error("jpeg2yuv: ms_sws_getContext() failed.");
        avcodec_close(&av_context);
        freemsg(ret);
        return NULL;
    }

#if LIBSWSCALE_VERSION_INT >= AV_VERSION_INT(0,9,0)
    if (sws_scale(sws_ctx,(const uint8_t* const *)orig.data,orig.linesize,0,av_context.height,dest.planes,dest.strides)<0) {
#else
    if (sws_scale(sws_ctx,(uint8_t**)orig.data,orig.linesize,0,av_context.height,dest.planes,dest.strides)<0) {
#endif
        ms_error("jpeg2yuv: ms_sws_scale() failed.");
        sws_freeContext(sws_ctx);
        avcodec_close(&av_context);
        freemsg(ret);
        return NULL;
    }
    sws_freeContext(sws_ctx);
    avcodec_close(&av_context);
    return ret;
#elif TARGET_OS_IPHONE
    MSPicture dest;
    CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, jpgbuf, bufsize, NULL);
    // use the data provider to get a CGImage; release the data provider
    CGImageRef image = CGImageCreateWithJPEGDataProvider(dataProvider, NULL, FALSE,
                       kCGRenderingIntentDefault);
    CGDataProviderRelease(dataProvider);
    reqsize->width = CGImageGetWidth(image);
    reqsize->height = CGImageGetHeight(image);

    uint8_t* tmp = (uint8_t*) malloc(reqsize->width * reqsize->height * 4);
    mblk_t* ret=ms_yuv_buf_alloc(&dest, reqsize->width, reqsize->height);
    CGColorSpaceRef colourSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef imageContext =
        CGBitmapContextCreate(tmp, reqsize->width, reqsize->height, 8, reqsize->width*4, colourSpace, kCGImageAlphaNoneSkipLast);
    CGColorSpaceRelease(colourSpace);
    // draw the image to the context, release it
    CGContextDrawImage(imageContext, CGRectMake(0, 0, reqsize->width, reqsize->height), image);
    CGImageRelease(image);

    /* convert tmp/RGB -> ret/YUV */
    for(int y=0; y<reqsize->height; y++) {
        for(int x=0; x<reqsize->width; x++) {
            uint8_t r = tmp[y * reqsize->width * 4 + x * 4 + 0];
            uint8_t g = tmp[y * reqsize->width * 4 + x * 4 + 1];
            uint8_t b = tmp[y * reqsize->width * 4 + x * 4 + 2];

            // Y
            *dest.planes[0]++ = (uint8_t)((0.257 * r) + (0.504 * g) + (0.098 * b) + 16);

            // U/V subsampling
            if ((y % 2==0) && (x%2==0)) {
                uint32_t r32=0, g32=0, b32=0;
                for(int i=0; i<2; i++) {
                    for(int j=0; j<2; j++) {
                        r32 += tmp[(y+i) * reqsize->width * 4 + (x+j) * 4 + 0];
                        g32 += tmp[(y+i) * reqsize->width * 4 + (x+j) * 4 + 1];
                        b32 += tmp[(y+i) * reqsize->width * 4 + (x+j) * 4 + 2];
                    }
                }
                r32 = (uint32_t)(r32 * 0.25f);
                g32 = (uint32_t)(g32 * 0.25f);
                b32 = (uint32_t) (b32 * 0.25f);

                // U
                *dest.planes[1]++ = (uint8_t)(-(0.148 * r32) - (0.291 * g32) + (0.439 * b32) + 128);
                // V
                *dest.planes[2]++ = (uint8_t)((0.439 * r32) - (0.368 * g32) - (0.071 * b32) + 128);
            }
        }
    }
    free(tmp);
    return ret;
#else
    return NULL;
#endif
}




mblk_t *ms_load_jpeg_as_yuv(const char *jpgpath, MSVideoSize *reqsize) {
#if defined(WIN32)
    mblk_t *m=NULL;
    DWORD st_sizel;
    DWORD st_sizeh;
    uint8_t *jpgbuf;
    DWORD err;
    HANDLE fd;

#ifdef UNICODE
    WCHAR wUnicode[1024];
    MultiByteToWideChar(CP_UTF8, 0, jpgpath, -1, wUnicode, 1024);
    fd = CreateFile(wUnicode, GENERIC_READ, FILE_SHARE_READ, NULL,
                    OPEN_EXISTING, 0, NULL);
#else
    fd = CreateFile(jpgpath, GENERIC_READ, FILE_SHARE_READ, NULL,
                    OPEN_EXISTING, 0, NULL);
#endif
    if (fd==INVALID_HANDLE_VALUE) {
        ms_error("Failed to open %s",jpgpath);
        return NULL;
    }
    st_sizel=0;
    st_sizeh=0;
    st_sizel = GetFileSize(fd, &st_sizeh);
    if (st_sizeh>0 || st_sizel<=0)
    {
        CloseHandle(fd);
        ms_error("Can't load file %s",jpgpath);
        return NULL;
    }
    jpgbuf=(uint8_t*)ms_malloc0(st_sizel);
    if (jpgbuf==NULL)
    {
        CloseHandle(fd);
        ms_error("Cannot allocate buffer for %s",jpgpath);
        return NULL;
    }
    err=0;
    ReadFile(fd, jpgbuf, st_sizel, &err, NULL) ;

    if (err!=st_sizel) {
        ms_error("Could not read as much as wanted !");
    }
    m=jpeg2yuv(jpgbuf,st_sizel,reqsize);
    ms_free(jpgbuf);
    if (m==NULL)
    {
        CloseHandle(fd);
        ms_error("Cannot load image from buffer for %s",jpgpath);
        return NULL;
    }
    CloseHandle(fd);
    return m;
#else
    mblk_t *m=NULL;
    struct stat statbuf;
    uint8_t *jpgbuf;
    int err;
    int fd=open(jpgpath,O_RDONLY);

    if (fd!=-1) {
        fstat(fd,&statbuf);
        if (statbuf.st_size<=0)
        {
            close(fd);
            ms_error("Cannot load %s",jpgpath);
            return NULL;
        }
        jpgbuf=(uint8_t*)ms_malloc0(statbuf.st_size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (jpgbuf==NULL)
        {
            close(fd);
            ms_error("Cannot allocate buffer for %s",jpgpath);
            return NULL;
        }
        err=read(fd,jpgbuf,statbuf.st_size);
        if (err!=statbuf.st_size) {
            ms_error("Could not read as much as wanted: %i<>%li !",err,(long)statbuf.st_size);
        }
        m=jpeg2yuv(jpgbuf,statbuf.st_size,reqsize);
        ms_free(jpgbuf);
        if (m==NULL)
        {
            close(fd);
            ms_error("Cannot load image from buffer for %s",jpgpath);
            return NULL;
        }
    } else {
        ms_error("Cannot load %s",jpgpath);
        return NULL;
    }
    close(fd);
    return m;
#endif
}
コード例 #11
0
int main(int argc, char* argv[])
{
    AVFormatContext *pFormatCtx;
    int             i, videoindex;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame *pFrame, *pFrameYUV;
    unsigned char *out_buffer;
    AVPacket *packet;
    int y_size;
    int ret, got_picture;
    struct SwsContext *img_convert_ctx;

    char filepath[] = "F:\\codes\\simplest_ffmpeg_player\\simplest_ffmpeg_player\\bigbuckbunny_480x272.h265";
    //SDL---------------------------
    int screen_w = 0, screen_h = 0;
//  SDL_Window *screen;
//  SDL_Renderer* sdlRenderer;
//  SDL_Texture* sdlTexture;
//  SDL_Rect sdlRect;

    FILE *fp_yuv;

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    /* filepath should be absolute path?! */
    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoindex = i;
            break;
        }
    if (videoindex == -1) {
        printf("Didn't find a video stream.\n");
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoindex]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        printf("Codec not found.\n");
        return -1;
    }
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        printf("Could not open codec.\n");
        return -1;
    }

    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();
    out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
    av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
                         AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    //Output Info-----------------------------
    printf("--------------- File Information ----------------\n");
    av_dump_format(pFormatCtx, 0, filepath, 0);
    printf("-------------------------------------------------\n");
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                     pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,
                                     SWS_BICUBIC, NULL, NULL, NULL);

#if OUTPUT_YUV420P
    fp_yuv = fopen("output.yuv", "wb+");
#endif

//  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
//      printf( "Could not initialize SDL - %s\n", SDL_GetError());
//      return -1;
//  }

    screen_w = pCodecCtx->width;
    screen_h = pCodecCtx->height;
    //SDL 2.0 Support for multiple windows
//  screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
//      screen_w, screen_h,
//      SDL_WINDOW_OPENGL);

//  if(!screen) {
//      printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
//      return -1;
//  }

//  sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
//  //IYUV: Y + U + V  (3 planes)
//  //YV12: Y + V + U  (3 planes)
//  sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);

//  sdlRect.x=0;
//  sdlRect.y=0;
//  sdlRect.w=screen_w;
//  sdlRect.h=screen_h;

    //SDL End----------------------
    while (av_read_frame(pFormatCtx, packet) >= 0) {
        if (packet->stream_index == videoindex) {
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if (ret < 0) {
                printf("Decode Error.\n");
                return -1;
            }
            if (got_picture) {
                /* scale operation */
                sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data,
                          pFrame->linesize, 0, pCodecCtx->height,
                          pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
                y_size = pCodecCtx->width * pCodecCtx->height;
                fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
                fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
                fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
                //SDL---------------------------
#if 0
                SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
//              SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
//              pFrameYUV->data[0], pFrameYUV->linesize[0],
//              pFrameYUV->data[1], pFrameYUV->linesize[1],
//              pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif

//              SDL_RenderClear( sdlRenderer );
//              SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
//              SDL_RenderPresent( sdlRenderer );
//              //SDL End-----------------------
//              //Delay 40ms
//              SDL_Delay(40);
            }
        }
        av_free_packet(packet);
    }
    //flush decoder
    //FIX: Flush Frames remained in Codec
    while (1) {
        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
        if (ret < 0)
            break;
        if (!got_picture)
            break;
        sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data,
                  pFrame->linesize, 0, pCodecCtx->height,
                  pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
        int y_size = pCodecCtx->width * pCodecCtx->height;
        fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
        fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
        fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
        //SDL---------------------------
//      SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );
//      SDL_RenderClear( sdlRenderer );
//      SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
//      SDL_RenderPresent( sdlRenderer );
//      //SDL End-----------------------
//      //Delay 40ms
//      SDL_Delay(40);
    }

    sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P
    fclose(fp_yuv);
#endif

//  SDL_Quit();

    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}
コード例 #12
0
static int add_to_proxy_output_ffmpeg(
    struct proxy_output_ctx * ctx, AVFrame * frame)
{
    int outsize = 0;

    if (!ctx) {
        return 0;
    }

    if (ctx->sws_ctx && frame &&
            (frame->data[0] || frame->data[1] ||
             frame->data[2] || frame->data[3])) {
        sws_scale(ctx->sws_ctx, (const uint8_t * const*) frame->data,
                  frame->linesize, 0, ctx->orig_height,
                  ctx->frame->data, ctx->frame->linesize);
    }

    frame = ctx->sws_ctx ? (frame ? ctx->frame : 0) : frame;

    if (frame) {
        frame->pts = ctx->cfra++;
    }

    outsize = avcodec_encode_video(
                  ctx->c, ctx->video_buffer, ctx->video_buffersize,
                  frame);

    if (outsize < 0) {
        fprintf(stderr, "Error encoding proxy frame %d for '%s'\n",
                ctx->cfra - 1, ctx->of->filename);
        return 0;
    }

    if (outsize != 0) {
        AVPacket packet;
        av_init_packet(&packet);

        if (ctx->c->coded_frame->pts != AV_NOPTS_VALUE) {
            packet.pts = av_rescale_q(ctx->c->coded_frame->pts,
                                      ctx->c->time_base,
                                      ctx->st->time_base);
        }
        if (ctx->c->coded_frame->key_frame)
            packet.flags |= AV_PKT_FLAG_KEY;

        packet.stream_index = ctx->st->index;
        packet.data = ctx->video_buffer;
        packet.size = outsize;

        if (av_interleaved_write_frame(ctx->of, &packet) != 0) {
            fprintf(stderr, "Error writing proxy frame %d "
                    "into '%s'\n", ctx->cfra - 1,
                    ctx->of->filename);
            return 0;
        }

        return 1;
    } else {
        return 0;
    }
}
コード例 #13
0
ファイル: output-example.c プロジェクト: raff/libav
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *img_convert_ctx;

    c = st->codec;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* no more frame to compress. The codec has a latency of a few
           frames if using B frames, so we get the last frames by
           passing the same picture again */
    } else {
        if (c->pix_fmt != PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
               to the codec pixel format if needed */
            if (img_convert_ctx == NULL) {
                img_convert_ctx = sws_getContext(c->width, c->height,
                                                 PIX_FMT_YUV420P,
                                                 c->width, c->height,
                                                 c->pix_fmt,
                                                 sws_flags, NULL, NULL, NULL);
                if (img_convert_ctx == NULL) {
                    fprintf(stderr, "Cannot initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
                      0, c->height, picture->data, picture->linesize);
        } else {
            fill_yuv_image(picture, frame_count, c->width, c->height);
        }
    }


    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
           futur for that */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index= st->index;
        pkt.data= (uint8_t *)picture;
        pkt.size= sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
        /* if zero size, it means the image was buffered */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
            if(c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index= st->index;
            pkt.data= video_outbuf;
            pkt.size= out_size;

            /* write the compressed frame in the media file */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
    frame_count++;
}
コード例 #14
0
ファイル: zm_mpeg.cpp プロジェクト: KristofRobot/ZoneMinder
double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size, bool add_timestamp, unsigned int timestamp )
{
#ifdef HAVE_LIBSWSCALE
	static struct SwsContext *img_convert_ctx = 0;
#endif // HAVE_LIBSWSCALE

	AVCodecContext *c = ost->codec;

	if ( c->pix_fmt != pf )
	{
		memcpy( tmp_opicture->data[0], buffer, buffer_size );
#ifdef HAVE_LIBSWSCALE
		if ( !img_convert_ctx )
		{
			img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
			if ( !img_convert_ctx )
				Panic( "Unable to initialise image scaling context" );
		}
		sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize );
#else // HAVE_LIBSWSCALE
		Fatal( "swscale is required for MPEG mode" );
#endif // HAVE_LIBSWSCALE
	}
	else
	{
		memcpy( opicture->data[0], buffer, buffer_size );
	}
	AVFrame *opicture_ptr = opicture;
	
	AVPacket *pkt = packet_buffers[packet_index];
	av_init_packet( pkt );
    int got_packet = 0;
	if ( of->flags & AVFMT_RAWPICTURE )
	{
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 2, 1)
		pkt->flags |= AV_PKT_FLAG_KEY;
#else
		pkt->flags |= PKT_FLAG_KEY;
#endif
		pkt->stream_index = ost->index;
		pkt->data = (uint8_t *)opicture_ptr;
		pkt->size = sizeof (AVPicture);
        got_packet = 1;
	}
	else
	{
		opicture_ptr->pts = c->frame_number;
		opicture_ptr->quality = c->global_quality;

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(54, 0, 0)
		int ret = avcodec_encode_video2( c, pkt, opicture_ptr, &got_packet );
		if ( ret != 0 )
		{
			Fatal( "avcodec_encode_video2 failed with errorcode %d \"%s\"", ret, av_err2str( ret ) );
		}
#else
		int out_size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, opicture_ptr );
		got_packet = out_size > 0 ? 1 : 0;
		pkt->data = got_packet ? video_outbuf : NULL;
		pkt->size = got_packet ? out_size : 0;
#endif
		if ( got_packet )
		{
			if ( c->coded_frame->key_frame )
			{
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
				pkt->flags |= AV_PKT_FLAG_KEY;
#else
				pkt->flags |= PKT_FLAG_KEY;
#endif
			}

			if ( pkt->pts != (int64_t)AV_NOPTS_VALUE )
			{
				pkt->pts = av_rescale_q( pkt->pts, c->time_base, ost->time_base );
			}
			if ( pkt->dts != (int64_t)AV_NOPTS_VALUE )
			{
				pkt->dts = av_rescale_q( pkt->dts, c->time_base, ost->time_base );
			}
			pkt->duration = av_rescale_q( pkt->duration, c->time_base, ost->time_base );
			pkt->stream_index = ost->index;
		}
	}
    
	return ( opicture_ptr->pts);
}
コード例 #15
0
ファイル: Gear_VideoSource.cpp プロジェクト: jukea/drone
void Gear_VideoSource::runVideo()
{
  int frameFinished=0;

	if (_currentMovie != _MOVIE_IN->type()->value())
	{
		_currentMovie=_MOVIE_IN->type()->value();
		if (!loadMovie(_currentMovie))
			return;
	}

	if (!_movieReady)
		return;

  _VIDEO_OUT->type()->resize(_codecContext->width, _codecContext->height);

  if ((int)_RESET_IN->type()->value() == 1)
  {
    av_seek_frame(_formatContext, -1, _formatContext->start_time, AVSEEK_FLAG_BACKWARD);
  }
    

  //loop until we get a videoframe
  //if we reach end, return to the beginning
  if (av_read_frame(_formatContext, &_packet)<0)
	{
    av_seek_frame(_formatContext, -1, _formatContext->start_time, AVSEEK_FLAG_BACKWARD); 
		_FINISH_OUT->type()->setValue(1.0f);
	}
	else
		_FINISH_OUT->type()->setValue(0.0f);
	
  while (_packet.stream_index!=_videoStreamIndex)
  {    
    av_free_packet(&_packet);
    if (av_read_frame(_formatContext, &_packet)<0)
      av_seek_frame(_formatContext, -1, _formatContext->start_time, AVSEEK_FLAG_BACKWARD);
  }
  
  // Decode video frame
  do
  {    
    avcodec_decode_video2(_codecContext, _frame, &frameFinished, &_packet);
  } while (!frameFinished);

  // Convert the image from its native format to RGBA
  
  sws_scale
        (
            _sws_ctx,
            (uint8_t const * const *)_frame->data,
            _frame->linesize,
            0,
            _codecContext->height,
            _frameRGBA->data,
            _frameRGBA->linesize
        );

  
  //img_convert((AVPicture *)_frameRGBA, PIX_FMT_RGB24, (AVPicture*)_frame, _codecContext->pix_fmt, _codecContext->width, _codecContext->height);

  register char *out=(char*)_VIDEO_OUT->type()->data();
  register char *in=(char*)_frameRGBA->data[0];  
  register int size=_codecContext->width*_codecContext->height;
  for (register int i=0;i<size;i++)
  {
    *out++=*in++;
    *out++=*in++;
    *out++=*in++;
    *out++=255;
  }

  // Free the packet that was allocated by av_read_frame
  av_free_packet(&_packet);
}
コード例 #16
0
ファイル: simple_ffmpeg_player.c プロジェクト: kostyll/sff
int main(int argc, char* argv[]) {
    if (argc < 2) {
        printf("Usage: %s filename\n", argv[0]);
        return 0;
    }
    f = fopen("002.avi", "wb");
    if (signal(SIGINT, mysigint) == SIG_ERR)
       printf("Cannot handle SIGINT!\n");
    //if (signal(SIGHUP, mysighup) == SIG_ERR)
    //   printf("Cannot handle SIGHUP!\n");
    //if (signal(SIGTERM, mysigterm) == SIG_ERR)
    //   printf("Cannot handle SIGTERM!\n");

    /* can SIGKILL be handled by our own function? */
    //if (signal(SIGKILL, mysigkill) == SIG_ERR)
    //   printf("Cannot handle SIGKILL!\n");

    // Register all available file formats and codecs
    av_register_all();

    int err;
    // Init SDL with video support
    err = SDL_Init(SDL_INIT_VIDEO);
    if (err < 0) {
        fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
        return -1;
    }

    // Open video file
    const char* filename = argv[1];
    AVFormatContext* format_context = NULL;
    err = avformat_open_input(&format_context, filename, NULL, NULL);
    if (err < 0) {
        fprintf(stderr, "ffmpeg: Unable to open input file\n");
        return -1;
    }

    // Retrieve stream information
    err = avformat_find_stream_info(format_context, NULL);
    if (err < 0) {
        fprintf(stderr, "ffmpeg: Unable to find stream info\n");
        return -1;
    }

    // Dump information about file onto standard error
    av_dump_format(format_context, 0, argv[1], 0);

    // Find the first video stream
    int video_stream;
    for (video_stream = 0; video_stream < format_context->nb_streams; ++video_stream) {
        if (format_context->streams[video_stream]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            break;
        }
    }
    if (video_stream == format_context->nb_streams) {
        fprintf(stderr, "ffmpeg: Unable to find video stream\n");
        return -1;
    }

    AVCodecContext* codec_context = format_context->streams[video_stream]->codec;
    AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
    err = avcodec_open2(codec_context, codec, NULL);
    if (err < 0) {
        fprintf(stderr, "ffmpeg: Unable to open codec\n");
        return -1;
    }

    SDL_Surface* screen = SDL_SetVideoMode(codec_context->width, codec_context->height, 0, 0);
    if (screen == NULL) {
        fprintf(stderr, "Couldn't set video mode\n");
        return -1;
    }

    SDL_Overlay* bmp = SDL_CreateYUVOverlay(codec_context->width, codec_context->height,
                                            SDL_YV12_OVERLAY, screen);

    struct SwsContext* img_convert_context;
    img_convert_context = sws_getCachedContext(NULL,
                                                codec_context->width, codec_context->height,
                                                codec_context->pix_fmt,
                                                codec_context->width, codec_context->height,
                                                PIX_FMT_YUV420P, SWS_BICUBIC,
                                                NULL, NULL, NULL);
    if (img_convert_context == NULL) {
        fprintf(stderr, "Cannot initialize the conversion context\n");
        return -1;
    }


    AVFrame* frame = avcodec_alloc_frame();
    AVPacket packet;
    AVPacket packet_copy;

    // preparing output ...
    int i, ret;
    char * outputfile = "test.mpg";
    AVFormatContext * oformat_context = NULL;
    AVOutputFormat *ofmt = NULL;
    avformat_alloc_output_context2(&oformat_context, NULL, NULL, outputfile);
    if (!oformat_context) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        return (-1);
    };
    ofmt = oformat_context->oformat;

    for (i = 0; i < format_context->nb_streams; i++) {
        AVStream *in_stream = format_context->streams[i];
        AVStream *out_stream = avformat_new_stream(oformat_context, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            return (-1);
        }
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            return (-1);
        }
        out_stream->codec->codec_tag = 0;
        if (oformat_context->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    av_dump_format(oformat_context, 0, outputfile, 1);
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oformat_context->pb, outputfile, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", outputfile);
            return (-1);
        }
    }
    ret = avformat_write_header(oformat_context, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        return (-1);
    };
    AVStream *in_stream, *out_stream;


    while (av_read_frame(format_context, &packet) >= 0) {
        av_copy_packet(&packet_copy, &packet);
        // in_stream  = format_context->streams[packet_copy.stream_index];
        // out_stream = oformat_context->streams[packet_copy.stream_index];

        // packet_copy.pts = av_rescale_q_rnd(packet_copy.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        // packet_copy.dts = av_rescale_q_rnd(packet_copy.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        // packet_copy.duration = av_rescale_q(packet_copy.duration, in_stream->time_base, out_stream->time_base);
        // packet_copy.pos = -1;
        ret = av_interleaved_write_frame(oformat_context, &packet_copy);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
        };

        if (packet.stream_index == video_stream) {
            // Video stream packet
            int frame_finished;

            avcodec_decode_video2(codec_context, frame, &frame_finished, &packet);

            if (frame_finished) {
                SDL_LockYUVOverlay(bmp);

                // Convert frame to YV12 pixel format for display in SDL overlay

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];  // it's because YV12
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                sws_scale(img_convert_context,
                            frame->data, frame->linesize,
                            0, codec_context->height,
                            pict.data, pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                SDL_Rect rect;
                rect.x = 0;
                rect.y = 0;
                rect.w = codec_context->width;
                rect.h = codec_context->height;
                SDL_DisplayYUVOverlay(bmp, &rect);

                printf("%d\n", packet.size);
                fwrite(packet.data, 1, packet.size, f);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);

        // Handling SDL events there
        SDL_Event event;
        if (SDL_PollEvent(&event)) {
            if (event.type == SDL_QUIT) {
                break;
            }
        }
    }

    fclose(f);

    av_write_trailer(oformat_context);

    if (oformat_context && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&oformat_context->pb);
    avformat_free_context(oformat_context);

    sws_freeContext(img_convert_context);

    // Free the YUV frame
    av_free(frame);

    // Close the codec
    avcodec_close(codec_context);

    // Close the video file
    avformat_close_input(&format_context);

    // Quit SDL
    SDL_Quit();
    return 0;
}
コード例 #17
0
ファイル: swscale-example.c プロジェクト: vapd-radi/rspf_v2.0
// test by ref -> src -> dst -> out & compare out against ref
// ref & out are YV12
static int doTest(uint8_t *ref[4], int refStride[4], int w, int h, int srcFormat, int dstFormat,
                  int srcW, int srcH, int dstW, int dstH, int flags){
    uint8_t *src[4] = {0};
    uint8_t *dst[4] = {0};
    uint8_t *out[4] = {0};
    int srcStride[4], dstStride[4];
    int i;
    uint64_t ssdY, ssdU, ssdV, ssdA=0;
    struct SwsContext *srcContext = NULL, *dstContext = NULL,
                      *outContext = NULL;
    int res;

    res = 0;
    for (i=0; i<4; i++){
        // avoid stride % bpp != 0
        if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
            srcStride[i]= srcW*3;
        else if (srcFormat==PIX_FMT_RGB48BE || srcFormat==PIX_FMT_RGB48LE)
            srcStride[i]= srcW*6;
        else
            srcStride[i]= srcW*4;

        if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
            dstStride[i]= dstW*3;
        else if (dstFormat==PIX_FMT_RGB48BE || dstFormat==PIX_FMT_RGB48LE)
            dstStride[i]= dstW*6;
        else
            dstStride[i]= dstW*4;

        src[i]= (uint8_t*) malloc(srcStride[i]*srcH);
        dst[i]= (uint8_t*) malloc(dstStride[i]*dstH);
        out[i]= (uint8_t*) malloc(refStride[i]*h);
        if (!src[i] || !dst[i] || !out[i]) {
            perror("Malloc");
            res = -1;

            goto end;
        }
    }

    srcContext= sws_getContext(w, h, PIX_FMT_YUVA420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
    if (!srcContext) {
        fprintf(stderr, "Failed to get %s ---> %s\n",
                sws_format_name(PIX_FMT_YUVA420P),
                sws_format_name(srcFormat));
        res = -1;

        goto end;
    }
    dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
    if (!dstContext) {
        fprintf(stderr, "Failed to get %s ---> %s\n",
                sws_format_name(srcFormat),
                sws_format_name(dstFormat));
        res = -1;

        goto end;
    }
    outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUVA420P, flags, NULL, NULL, NULL);
    if (!outContext) {
        fprintf(stderr, "Failed to get %s ---> %s\n",
                sws_format_name(dstFormat),
                sws_format_name(PIX_FMT_YUVA420P));
        res = -1;

        goto end;
    }
//    printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
//        (int)src[0], (int)src[1], (int)src[2]);

    sws_scale(srcContext, ref, refStride, 0, h   , src, srcStride);
    sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
    sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);

    ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
    ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
    ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
    if (isALPHA(srcFormat) && isALPHA(dstFormat))
        ssdA= getSSD(ref[3], out[3], refStride[3], refStride[3], w, h);

    if (srcFormat == PIX_FMT_GRAY8 || dstFormat==PIX_FMT_GRAY8) ssdU=ssdV=0; //FIXME check that output is really gray

    ssdY/= w*h;
    ssdU/= w*h/4;
    ssdV/= w*h/4;
    ssdA/= w*h;

    printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5"PRId64",%5"PRId64",%5"PRId64",%5"PRId64"\n",
           sws_format_name(srcFormat), srcW, srcH,
           sws_format_name(dstFormat), dstW, dstH,
           flags, ssdY, ssdU, ssdV, ssdA);
    fflush(stdout);

    end:

    sws_freeContext(srcContext);
    sws_freeContext(dstContext);
    sws_freeContext(outContext);

    for (i=0; i<4; i++){
        free(src[i]);
        free(dst[i]);
        free(out[i]);
    }

    return res;
}
コード例 #18
0
ファイル: Tool.cpp プロジェクト: corefan/rabbitim
int CTool::ConvertFormat(/*[in]*/ const AVPicture &inFrame,
                         /*[in]*/ int nInWidth,
                         /*[in]*/ int nInHeight,
                         /*[in]*/ AVPixelFormat inPixelFormat,
                         /*[out]*/AVPicture &outFrame,
                         /*[in]*/ int nOutWidth,
                         /*[in]*/ int nOutHeight,
                         /*[in]*/ AVPixelFormat outPixelFormat)
{
    int nRet = 0;
    struct SwsContext* pSwsCtx = NULL;
    
    //分配输出空间  
    nRet = avpicture_alloc(&outFrame, outPixelFormat, nOutWidth, nOutHeight);
    if(nRet)
    {
        LOG_MODEL_ERROR("Tool", "avpicture_alloc fail:%x", nRet);
        return nRet;
    }
    
    if(inPixelFormat == outPixelFormat
            && nInWidth == nOutWidth
            && nInHeight == nOutHeight)
    {
        av_picture_copy(&outFrame, &inFrame, inPixelFormat,
                        nInWidth, nInHeight);
        return 0;
    }
    
    //设置图像转换上下文  
    pSwsCtx = sws_getCachedContext (NULL,
                                    nInWidth,                //源宽度  
                                    nInHeight,               //源高度  
                                    inPixelFormat,           //源格式  
                                    nOutWidth,               //目标宽度  
                                    nOutHeight,              //目标高度  
                                    outPixelFormat,          //目的格式  
                                    SWS_FAST_BILINEAR,       //转换算法  
                                    NULL, NULL, NULL);
    if(NULL == pSwsCtx)
    {
        LOG_MODEL_ERROR("Tool", "sws_getContext false");
        avpicture_free(&outFrame);
        return -3;
    }
    
    //进行图片转换  
    nRet = sws_scale(pSwsCtx,
                     inFrame.data, inFrame.linesize,
                     0, nInHeight,
                     outFrame.data, outFrame.linesize);
    if(nRet < 0)
    {
        LOG_MODEL_ERROR("Tool", "sws_scale fail:%x", nRet);
        avpicture_free(&outFrame);
    }
    else
    {
        nRet = 0;
    }
    
    sws_freeContext(pSwsCtx);
    return nRet;
}
コード例 #19
0
ファイル: tutorial05.c プロジェクト: DeepinW/ffmpeg-tutorial
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {

  VideoPicture *vp;
  AVPicture pict;

  /* wait until we have space for a new pic */
  SDL_LockMutex(is->pictq_mutex);
  while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
	!is->quit) {
    SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  }
  SDL_UnlockMutex(is->pictq_mutex);

  if(is->quit)
    return -1;

  // windex is set to 0 initially
  vp = &is->pictq[is->pictq_windex];

  /* allocate or resize the buffer! */
  if(!vp->bmp ||
     vp->width != is->video_st->codec->width ||
     vp->height != is->video_st->codec->height) {
    SDL_Event event;

    vp->allocated = 0;
    /* we have to do it in the main thread */
    event.type = FF_ALLOC_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);

    /* wait until we have a picture allocated */
    SDL_LockMutex(is->pictq_mutex);
    while(!vp->allocated && !is->quit) {
      SDL_CondWait(is->pictq_cond, is->pictq_mutex);
    }
    SDL_UnlockMutex(is->pictq_mutex);
    if(is->quit) {
      return -1;
    }
  }
  /* We have a place to put our picture on the queue */
  /* If we are skipping a frame, do we set this to null 
     but still return vp->allocated = 1? */


  if(vp->bmp) {

    SDL_LockYUVOverlay(vp->bmp);
    
    /* point pict at the queue */

    pict.data[0] = vp->bmp->pixels[0];
    pict.data[1] = vp->bmp->pixels[2];
    pict.data[2] = vp->bmp->pixels[1];
    
    pict.linesize[0] = vp->bmp->pitches[0];
    pict.linesize[1] = vp->bmp->pitches[2];
    pict.linesize[2] = vp->bmp->pitches[1];
    
    // Convert the image into YUV format that SDL uses
    sws_scale
    (
        is->sws_ctx,
        (uint8_t const * const *)pFrame->data,
        pFrame->linesize,
        0, 
        is->video_st->codec->height, 
        pict.data, 
        pict.linesize
    );
    
    SDL_UnlockYUVOverlay(vp->bmp);
    vp->pts = pts;

    /* now we inform our display thread that we have a pic ready */
    if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
      is->pictq_windex = 0;
    }
    SDL_LockMutex(is->pictq_mutex);
    is->pictq_size++;
    SDL_UnlockMutex(is->pictq_mutex);
  }
  return 0;
}
コード例 #20
0
ファイル: image_writer.c プロジェクト: divVerent/mplayer2
int write_image(struct mp_image *image, const struct mp_csp_details *csp,
                const struct image_writer_opts *opts, const char *filename)
{
    struct mp_image *allocated_image = NULL;
    struct image_writer_opts defs = image_writer_opts_defaults;
    bool is_anamorphic = image->w != image->width || image->h != image->height;

    if (!opts)
        opts = &defs;

    const struct img_writer *writer = get_writer(opts);
    struct image_writer_ctx ctx = { opts, writer };
    int destfmt = IMGFMT_RGB24;

    if (writer->pixfmts) {
        destfmt = writer->pixfmts[0];   // default to first pixel format
        for (int *fmt = writer->pixfmts; *fmt; fmt++) {
            if (*fmt == image->imgfmt) {
                destfmt = *fmt;
                break;
            }
        }
    }

    if (image->imgfmt != destfmt || is_anamorphic) {
        struct mp_image *dst = alloc_mpi(image->w, image->h, destfmt);

        struct SwsContext *sws = sws_getContextFromCmdLine_hq(image->width,
                                                              image->height,
                                                              image->imgfmt,
                                                              dst->width,
                                                              dst->height,
                                                              dst->imgfmt);

        struct mp_csp_details colorspace = MP_CSP_DETAILS_DEFAULTS;
        if (csp)
            colorspace = *csp;
        // This is a property of the output device; images always use
        // full-range RGB.
        colorspace.levels_out = MP_CSP_LEVELS_PC;
        mp_sws_set_colorspace(sws, &colorspace);

        sws_scale(sws, (const uint8_t **)image->planes, image->stride, 0,
                  image->height, dst->planes, dst->stride);

        sws_freeContext(sws);

        allocated_image = dst;
        image = dst;
    }

    FILE *fp = fopen(filename, "wb");
    int success = 0;
    if (fp == NULL) {
        mp_msg(MSGT_CPLAYER, MSGL_ERR,
               "Error opening '%s' for writing!\n", filename);
    } else {
        success = writer->write(&ctx, image, fp);
        success = !fclose(fp) && success;
        if (!success)
            mp_msg(MSGT_CPLAYER, MSGL_ERR, "Error writing file '%s'!\n",
                   filename);
    }

    free_mp_image(allocated_image);

    return success;
}
コード例 #21
0
ファイル: ffmpeg_impl.hpp プロジェクト: 119/vdc
/// write a frame with FFMPEG
inline bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
{
    bool ret = false;

    if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
        return false;
    width = frame_width;
    height = frame_height;

    // typecast from opaque data type to implemented struct
#if LIBAVFORMAT_BUILD > 4628
    AVCodecContext *c = video_st->codec;
#else
    AVCodecContext *c = &(video_st->codec);
#endif

#if LIBAVFORMAT_BUILD < 5231
    // It is not needed in the latest versions of the ffmpeg
    if( c->codec_id == CODEC_ID_RAWVIDEO && origin != 1 )
    {
        if( !temp_image.data )
        {
            temp_image.step = (width*cn + 3) & -4;
            temp_image.width = width;
            temp_image.height = height;
            temp_image.cn = cn;
            temp_image.data = (unsigned char*)malloc(temp_image.step*temp_image.height);
        }
        for( int y = 0; y < height; y++ )
            memcpy(temp_image.data + y*temp_image.step, data + (height-1-y)*step, width*cn);
        data = temp_image.data;
        step = temp_image.step;
    }
#else
    if( width*cn != step )
    {
        if( !temp_image.data )
        {
            temp_image.step = width*cn;
            temp_image.width = width;
            temp_image.height = height;
            temp_image.cn = cn;
            temp_image.data = (unsigned char*)malloc(temp_image.step*temp_image.height);
        }
        if (origin == 1)
            for( int y = 0; y < height; y++ )
                memcpy(temp_image.data + y*temp_image.step, data + (height-1-y)*step, temp_image.step);
        else
            for( int y = 0; y < height; y++ )
                memcpy(temp_image.data + y*temp_image.step, data + y*step, temp_image.step);
        data = temp_image.data;
        step = temp_image.step;
    }
#endif

    // check parameters
    if (input_pix_fmt == PIX_FMT_BGR24) {
        if (cn != 3) {
            return false;
        }
    }
    else if (input_pix_fmt == PIX_FMT_GRAY8) {
        if (cn != 1) {
            return false;
        }
    }
    else {
        assert(false);
    }

    if ( c->pix_fmt != input_pix_fmt ) {
        assert( input_picture );
        // let input_picture point to the raw data buffer of 'image'
        avpicture_fill((AVPicture *)input_picture, (uint8_t *) data,
                       (PixelFormat)input_pix_fmt, width, height);

        if( !img_convert_ctx )
        {
            img_convert_ctx = sws_getContext(width,
                                             height,
                                             (PixelFormat)input_pix_fmt,
                                             c->width,
                                             c->height,
                                             c->pix_fmt,
                                             SWS_BICUBIC,
                                             NULL, NULL, NULL);
            if( !img_convert_ctx )
                return false;
        }

        if ( sws_scale(img_convert_ctx, input_picture->data,
                       input_picture->linesize, 0,
                       height,
                       picture->data, picture->linesize) < 0 )
            return false;
    }
    else{
        avpicture_fill((AVPicture *)picture, (uint8_t *) data,
                       (PixelFormat)input_pix_fmt, width, height);
    }

    ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, picture) >= 0;

    return ret;
}
コード例 #22
0
void VCapsConvertElement::iStream(const QbPacket &packet)
{
    if (!packet.caps().isValid() ||
        packet.caps().mimeType() != "video/x-raw" ||
        this->state() != ElementStatePlaying)
        return;

    if (packet.caps() == this->m_caps)
    {
        emit this->oStream(packet);

        return;
    }

    int iWidth = packet.caps().property("width").toInt();
    int iHeight = packet.caps().property("height").toInt();
    QString format = packet.caps().property("format").toString();

    PixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str());

    QList<QByteArray> props = this->m_caps.dynamicPropertyNames();

    int oWidth = props.contains("width")?
                     this->m_caps.property("width").toInt():
                     iWidth;

    int oHeight = props.contains("height")?
                      this->m_caps.property("height").toInt():
                      iHeight;

    PixelFormat oFormat;

    if (props.contains("format"))
    {
        QString oFormatString = this->m_caps.property("format").toString();

        oFormat = av_get_pix_fmt(oFormatString.toStdString().c_str());
    }
    else
        oFormat = iFormat;

    SwsContext *scaleContext = sws_getCachedContext(NULL,
                                                    iWidth,
                                                    iHeight,
                                                    iFormat,
                                                    oWidth,
                                                    oHeight,
                                                    oFormat,
                                                    SWS_FAST_BILINEAR,
                                                    NULL,
                                                    NULL,
                                                    NULL);

    if (!scaleContext)
        return;

    int oBufferSize = avpicture_get_size(oFormat,
                                         oWidth,
                                         oHeight);

    QSharedPointer<uchar> oBuffer(new uchar[oBufferSize]);

    AVPicture iPicture;

    avpicture_fill(&iPicture,
                   (uint8_t *) packet.buffer().data(),
                   iFormat,
                   iWidth,
                   iHeight);

    AVPicture oPicture;

    avpicture_fill(&oPicture,
                   (uint8_t *) oBuffer.data(),
                   oFormat,
                   oWidth,
                   oHeight);

    sws_scale(scaleContext,
              (uint8_t **) iPicture.data,
              iPicture.linesize,
              0,
              iHeight,
              oPicture.data,
              oPicture.linesize);

    sws_freeContext(scaleContext);

    QbPacket oPacket(packet.caps().update(this->m_caps),
                     oBuffer,
                     oBufferSize);

    oPacket.setPts(packet.pts());
    oPacket.setDuration(packet.duration());
    oPacket.setTimeBase(packet.timeBase());
    oPacket.setIndex(packet.index());

    emit this->oStream(oPacket);
}
コード例 #23
0
ファイル: MediaEngine.cpp プロジェクト: Bigpet/ppsspp
bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
#ifdef USE_FFMPEG
	auto codecIter = m_pCodecCtxs.find(m_videoStream);
	AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;

	if (!m_pFormatCtx)
		return false;
	if (!m_pCodecCtx)
		return false;
	if ((!m_pFrame)||(!m_pFrameRGB))
		return false;

	updateSwsFormat(videoPixelMode);
	// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
	// Update the linesize for the new format too.  We started with the largest size, so it should fit.
	m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;

	AVPacket packet;
	av_init_packet(&packet);
	int frameFinished;
	bool bGetFrame = false;
	while (!bGetFrame) {
		bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
		// Even if we've read all frames, some may have been re-ordered frames at the end.
		// Still need to decode those, so keep calling avcodec_decode_video2().
		if (dataEnd || packet.stream_index == m_videoStream) {
			// avcodec_decode_video2() gives us the re-ordered frames with a NULL packet.
			if (dataEnd)
				av_free_packet(&packet);

			int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
			if (frameFinished) {
				if (!skipFrame) {
					sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
						m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
				}

				if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE)
					m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp;
				else
					m_videopts += av_frame_get_pkt_duration(m_pFrame);
				bGetFrame = true;
			}
			if (result <= 0 && dataEnd) {
				// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
				// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
				m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
				if (m_isVideoEnd)
					m_decodingsize = 0;
				break;
			}
		}
		av_free_packet(&packet);
	}
	return bGetFrame;
#else
	// If video engine is not available, just add to the timestamp at least.
	m_videopts += 3003;
	return true;
#endif // USE_FFMPEG
}
コード例 #24
0
ファイル: AVCodecEncoder.cpp プロジェクト: ModeenF/haiku
status_t
AVCodecEncoder::_EncodeVideo(const void* buffer, int64 frameCount,
	media_encode_info* info)
{
	TRACE_IO("AVCodecEncoder::_EncodeVideo(%p, %lld, %p)\n", buffer, frameCount,
		info);

	if (fChunkBuffer == NULL)
		return B_NO_MEMORY;

	status_t ret = B_OK;

	while (frameCount > 0) {
		size_t bpr = fInputFormat.u.raw_video.display.bytes_per_row;
		size_t bufferSize = fInputFormat.u.raw_video.display.line_count * bpr;

		// We should always get chunky bitmaps, so this code should be safe.
		fSrcFrame.data[0] = (uint8_t*)buffer;
		fSrcFrame.linesize[0] = bpr;

		// Run the pixel format conversion
		sws_scale(fSwsContext, fSrcFrame.data, fSrcFrame.linesize, 0,
			fInputFormat.u.raw_video.display.line_count, fDstFrame.data,
			fDstFrame.linesize);

		// Encode one video chunk/frame.
		int usedBytes = avcodec_encode_video(fContext, fChunkBuffer,
			kDefaultChunkBufferSize, fFrame);

		// avcodec.h says we need to set it.
		fFrame->pts++;

		if (usedBytes < 0) {
			TRACE("  avcodec_encode_video() failed: %d\n", usedBytes);
			return B_ERROR;
		}

		// Maybe we need to use this PTS to calculate start_time:
		if (fContext->coded_frame->pts != kNoPTSValue) {
			TRACE("  codec frame PTS: %lld (codec time_base: %d/%d)\n",
				fContext->coded_frame->pts, fContext->time_base.num,
				fContext->time_base.den);
		} else {
			TRACE("  codec frame PTS: N/A (codec time_base: %d/%d)\n",
				fContext->time_base.num, fContext->time_base.den);
		}

		// Setup media_encode_info, most important is the time stamp.
		info->start_time = (bigtime_t)(fFramesWritten * 1000000LL
			/ fInputFormat.u.raw_video.field_rate);

		info->flags = 0;
		if (fContext->coded_frame->key_frame)
			info->flags |= B_MEDIA_KEY_FRAME;

		// Write the chunk
		ret = WriteChunk(fChunkBuffer, usedBytes, info);
		if (ret != B_OK) {
			TRACE("  error writing chunk: %s\n", strerror(ret));
			break;
		}

		// Skip to the next frame (but usually, there is only one to encode
		// for video).
		frameCount--;
		fFramesWritten++;
		buffer = (const void*)((const uint8*)buffer + bufferSize);
	}

	return ret;
}
コード例 #25
0
ファイル: zwvideothread.cpp プロジェクト: weinkym/src_miao
int video_thread(void *arg)
{
    ZW_LOG_WARNING(QString("TTTTTTTV"));
    VideoState *is = (VideoState *) arg;
    AVPacket pkt1, *packet = &pkt1;

    int ret, got_picture, numBytes;

    double video_pts = 0; //当前视频的pts
    double audio_pts = 0; //音频pts


    ///解码视频相关
    AVFrame *pFrame, *pFrameRGB;
    uint8_t *out_buffer_rgb; //解码后的rgb数据
    struct SwsContext *img_convert_ctx;  //用于解码后的视频格式转换

    AVCodecContext *pCodecCtx = is->video_st->codec; //视频解码器

    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();

    ///这里我们改成了 将解码后的YUV数据转换成RGB32
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
            AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

    numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);

    out_buffer_rgb = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *) pFrameRGB, out_buffer_rgb, AV_PIX_FMT_RGB32,
            pCodecCtx->width, pCodecCtx->height);

    while(1)
    {
        if (is->quit)
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            break;
        }

        if (is->isPause == true) //判断暂停
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            SDL_Delay(10);
            continue;
        }

        if (packet_queue_get(&is->videoq, packet, 0) <= 0)
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            if (is->readFinished)
            {//队列里面没有数据了且读取完毕了
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                break;
            }
            else
            {
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                SDL_Delay(1); //队列只是暂时没有数据而已
                continue;
            }
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        //收到这个数据 说明刚刚执行过跳转 现在需要把解码器的数据 清除一下
        if(strcmp((char*)packet->data,FLUSH_DATA) == 0)
        {
            avcodec_flush_buffers(is->video_st->codec);
            av_free_packet(packet);
            continue;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);

        if (ret < 0) {
            qDebug()<<"decode error.\n";
            av_free_packet(packet);
            continue;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE)
        {
            video_pts = *(uint64_t *) pFrame->opaque;
        }
        else if (packet->dts != AV_NOPTS_VALUE)
        {
            video_pts = packet->dts;
        }
        else
        {
            video_pts = 0;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        video_pts *= av_q2d(is->video_st->time_base);
        video_pts = synchronize_video(is, pFrame, video_pts);

        if (is->seek_flag_video)
        {
            //发生了跳转 则跳过关键帧到目的时间的这几帧
           if (video_pts < is->seek_time)
           {
               ZW_LOG_WARNING(QString("TTTTTTTV"));
               av_free_packet(packet);
               continue;
           }
           else
           {
               ZW_LOG_WARNING(QString("TTTTTTTV"));
               is->seek_flag_video = 0;
           }
        }

        while(1)
        {
            if (is->quit)
            {
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                break;
            }

            audio_pts = is->audio_clock;

            //主要是 跳转的时候 我们把video_clock设置成0了
            //因此这里需要更新video_pts
            //否则当从后面跳转到前面的时候 会卡在这里
            video_pts = is->video_clock;
            ZW_LOG_WARNING(QString("TTTTTTTVaudio_pts=%1,video_pts=%2").arg(audio_pts).arg(video_pts));


            if (video_pts <= audio_pts) break;
//            if (video_pts >= audio_pts) break;

            int delayTime = (video_pts - audio_pts) * 1000;

            delayTime = delayTime > 5 ? 5:delayTime;
            ZW_LOG_WARNING(QString("TTTTTTTV"));

            SDL_Delay(delayTime);
        }

        if (got_picture) {
            sws_scale(img_convert_ctx,
                    (uint8_t const * const *) pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
                    pFrameRGB->linesize);

            //把这个RGB数据 用QImage加载
            QImage tmpImg((uchar *)out_buffer_rgb,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
            QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            is->player->disPlayVideo(image); //调用激发信号的函数
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        av_free_packet(packet);
        ZW_LOG_WARNING(QString("TTTTTTTV"));

    }

    av_free(pFrame);
    av_free(pFrameRGB);
    av_free(out_buffer_rgb);

    if (!is->quit)
    {
        ZW_LOG_WARNING(QString("TTTTTTTV"));
        is->quit = true;
    }

    ZW_LOG_WARNING(QString("TTTTTTTV"));
    is->videoThreadFinished = true;

    return 0;
}
コード例 #26
0
ファイル: kr_udp_recvr.c プロジェクト: dsheeler/krad_radio
void kr_udp_recvr (kr_udp_recvr_t *udp_recvr, int port) {

	krad_rebuilder_t *krad_rebuilder;
	int sd;
	int keyframe;
	int started;
	int ret;
	int slen;
	unsigned char *buffer;
	unsigned char *packet_buffer;
	struct sockaddr_in local_address;
	struct sockaddr_in remote_address;
  struct SwsContext *scaler;
	
	scaler = NULL;
	started = 0;
	slen = sizeof (remote_address);
	
	buffer = calloc (1, 45000);
	packet_buffer = calloc (1, 2300000);
	sd = socket (AF_INET, SOCK_DGRAM, 0);
	krad_rebuilder = krad_rebuilder_create ();

	memset((char *) &local_address, 0, sizeof(local_address));
	local_address.sin_family = AF_INET;
	local_address.sin_port = htons (port);
	local_address.sin_addr.s_addr = htonl(INADDR_ANY);

	if (bind (sd, (struct sockaddr *)&local_address, sizeof(local_address)) == -1 ) {
		printf("bind error\n");
		exit(1);
	}
	
  kr_udp_recvr_alloc_framebuf (udp_recvr);
	
  udp_recvr->vpxdec = krad_vpx_decoder_create ();

	while (1) {
	
	  if (destroy == 1) {
		  printf ("Got signal!\n");
	    break;
	  }
    if (kr_videoport_error (udp_recvr->videoport)) {
      printf ("Error: %s\n", "videoport Error");
      break;
    }	
	
		ret = recvfrom (sd, buffer, 2000, 0, (struct sockaddr *)&remote_address, (socklen_t *)&slen);
		
		if (ret == -1) {
			printf("failed recvin udp\n");
			exit (1);
		}
		
    //printf ("Received packet from %s:%d\n", 
  	//	inet_ntoa(remote_address.sin_addr), ntohs(remote_address.sin_port));


		krad_rebuilder_write (krad_rebuilder, buffer, ret);
		ret = krad_rebuilder_read_packet (krad_rebuilder, packet_buffer, 1, &keyframe);
  
    if (ret != 0) {

		  //printf ("read a packet with %d bytes key: %d     \n", ret, keyframe);

      if ((started == 1) || ((started == 0) && (keyframe == 1))) {
		    started = 1;
			} else {
			  continue;
			}
			
      krad_vpx_decoder_decode (udp_recvr->vpxdec, packet_buffer, ret);
      
      while (udp_recvr->vpxdec->img != NULL) {
     
          int rgb_stride_arr[3] = {4*udp_recvr->width, 0, 0};
          uint8_t *dst[4];
          
          scaler = sws_getCachedContext ( scaler,
                                          udp_recvr->vpxdec->width,
                                          udp_recvr->vpxdec->height,
                                          PIX_FMT_YUV420P,
                                          udp_recvr->width,
                                          udp_recvr->height,
                                          PIX_FMT_RGB32, 
                                          SWS_BICUBIC,
                                          NULL, NULL, NULL);


        int pos = ((udp_recvr->frames_dec + 1) % udp_recvr->framebufsize) * udp_recvr->frame_size;
        dst[0] = (unsigned char *)udp_recvr->rgba + pos;

        sws_scale (scaler,
                   (const uint8_t * const*)udp_recvr->vpxdec->img->planes,
                    udp_recvr->vpxdec->img->stride,
                   0, udp_recvr->vpxdec->height,
                   dst, rgb_stride_arr);


          udp_recvr->frames_dec++;
          
        krad_vpx_decoder_decode_again (udp_recvr->vpxdec);
          
		  }
	  }
	}

  krad_vpx_decoder_destroy (&udp_recvr->vpxdec);

  kr_udp_recvr_free_framebuf (udp_recvr);

	krad_rebuilder_destroy (krad_rebuilder);
	close (sd);
  sws_freeContext ( scaler );
	free (packet_buffer);
	free (buffer);
}
コード例 #27
0
ファイル: cimgffmpeg.cpp プロジェクト: aurora/phash
int ReadFrames(VFInfo *st_info, CImgList<uint8_t> *pFrameList, unsigned int low_index, unsigned int hi_index)
{
        //target pixel format
	PixelFormat ffmpeg_pixfmt;
	if (st_info->pixelformat == 0)
	    ffmpeg_pixfmt = PIX_FMT_GRAY8;
	else 
	    ffmpeg_pixfmt = PIX_FMT_RGB24;

	st_info->next_index = low_index;

	if (st_info->pFormatCtx == NULL){
	    st_info->current_index= 0;

        av_log_set_level(AV_LOG_QUIET);
	    av_register_all();
	
	    // Open video file
	    if(av_open_input_file(&st_info->pFormatCtx, st_info->filename, NULL, 0, NULL)!=0)
		return -1 ; // Couldn't open file
	 
	    // Retrieve stream information
	    if(av_find_stream_info(st_info->pFormatCtx)<0)
		return -1; // Couldn't find stream information
	
	    //dump_format(pFormatCtx,0,NULL,0);//debugging function to print infomation about format
	
	    unsigned int i;
	    // Find the video stream
	    for(i=0; i<st_info->pFormatCtx->nb_streams; i++)
	    {
		if(st_info->pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) 
	        {
		    st_info->videoStream=i;
		    break;
		}
	    }
	    if(st_info->videoStream==-1)
		return -1; //no video stream
	
	
	    // Get a pointer to the codec context for the video stream
	    st_info->pCodecCtx = st_info->pFormatCtx->streams[st_info->videoStream]->codec;
	    if (st_info->pCodecCtx == NULL){
		return -1;
	    }

	    // Find the decoder
	    st_info->pCodec = avcodec_find_decoder(st_info->pCodecCtx->codec_id);
	    if(st_info->pCodec==NULL) 
	    {
	  	return -1 ; // Codec not found
	    }
	    // Open codec
	    if(avcodec_open(st_info->pCodecCtx, st_info->pCodec)<0)
		return -1; // Could not open codec

	    st_info->height = (st_info->height<=0) ? st_info->pCodecCtx->height : st_info->height;
	    st_info->width  = (st_info->width<= 0) ? st_info->pCodecCtx->width : st_info->width;
	}

        AVFrame *pFrame;

	// Allocate video frame
	pFrame=avcodec_alloc_frame();
	if (pFrame==NULL)
	    return -1;

	// Allocate an AVFrame structure
	AVFrame *pConvertedFrame = avcodec_alloc_frame();
	if(pConvertedFrame==NULL)
	  return -1;
		
	uint8_t *buffer;
	int numBytes;
	// Determine required buffer size and allocate buffer
	numBytes=avpicture_get_size(ffmpeg_pixfmt, st_info->width,st_info->height);
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	if (buffer == NULL)
	    return -1;

	avpicture_fill((AVPicture *)pConvertedFrame,buffer,ffmpeg_pixfmt,st_info->width,st_info->height);
		
	int frameFinished;
	int size = 0;
	
	AVPacket packet;
	int result = 1;
	CImg<uint8_t> next_image;
	SwsContext *c = sws_getContext(st_info->pCodecCtx->width, st_info->pCodecCtx->height, st_info->pCodecCtx->pix_fmt, st_info->width, st_info->height, ffmpeg_pixfmt , SWS_BICUBIC, NULL, NULL, NULL);
	while ((result>=0)&&(size<st_info->nb_retrieval)&&(st_info->current_index<=hi_index)){  
	  result =  av_read_frame(st_info->pFormatCtx, &packet);
          if (result < 0)
	      break;
    	  if(packet.stream_index==st_info->videoStream) {
	      avcodec_decode_video(st_info->pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);
	      if(frameFinished) {
		  if (st_info->current_index == st_info->next_index){
		      st_info->next_index += st_info->step;
		      sws_scale(c, pFrame->data, pFrame->linesize, 0, st_info->pCodecCtx->height, pConvertedFrame->data, pConvertedFrame->linesize);
			
                      if (ffmpeg_pixfmt == PIX_FMT_GRAY8) {
			  next_image.assign(pConvertedFrame->data[0],1,st_info->width,st_info->height,1,true);
			  next_image.permute_axes("yzcx");
			  pFrameList->push_back(next_image);
			  size++;
		      }
		      else if (ffmpeg_pixfmt == PIX_FMT_RGB24){
			  next_image.assign(*pConvertedFrame->data,3,st_info->width,st_info->height,1,true);
			  next_image.permute_axes("yzcx");
			  pFrameList->push_back(next_image);
			  size++;
		      }
		  }    
		  st_info->current_index++;
	      }
	      av_free_packet(&packet);
	  }
	}


	if (result < 0){
	    avcodec_close(st_info->pCodecCtx);
	    av_close_input_file(st_info->pFormatCtx);
	    st_info->pFormatCtx = NULL;
	    st_info->pCodecCtx = NULL;
	    st_info->width = -1;
	    st_info->height = -1;
	}

	av_free(buffer);
	buffer = NULL;
	av_free(pConvertedFrame);
	pConvertedFrame = NULL;
	av_free(pFrame);
	pFrame = NULL;
	sws_freeContext(c);
	c = NULL;

	return size; 
}
コード例 #28
0
int RemoteCameraRtsp::Capture( Image &image )
{
	AVPacket packet;
	uint8_t* directbuffer;
	int frameComplete = false;
	
	/* Request a writeable buffer of the target image */
	directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
	if(directbuffer == NULL) {
		Error("Failed requesting writeable buffer for the captured image.");
		return (-1);
	}
	
    while ( true )
    {
        buffer.clear();
        if ( !rtspThread->isRunning() )
            return (-1);

        if ( rtspThread->getFrame( buffer ) )
        {
            Debug( 3, "Read frame %d bytes", buffer.size() );
            Debug( 4, "Address %p", buffer.head() );
            Hexdump( 4, buffer.head(), 16 );

            if ( !buffer.size() )
                return( -1 );

            if(mCodecContext->codec_id == AV_CODEC_ID_H264)
            {
                // SPS and PPS frames should be saved and appended to IDR frames
                int nalType = (buffer.head()[3] & 0x1f);
                
                // SPS
                if(nalType == 7)
                {
                    lastSps = buffer;
                    continue;
                }
                // PPS
                else if(nalType == 8)
                {
                    lastPps = buffer;
                    continue;
                }
                // IDR
                else if(nalType == 5)
                {
                    buffer += lastSps;
                    buffer += lastPps;
                }
            }

            av_init_packet( &packet );
            
	    while ( !frameComplete && buffer.size() > 0 )
	    {
		packet.data = buffer.head();
		packet.size = buffer.size();
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
		int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else
		int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif
		if ( len < 0 )
		{
			Error( "Error while decoding frame %d", frameCount );
			Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
			buffer.clear();
			continue;
		}
		Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
		//if ( buffer.size() < 400 )
		   //Hexdump( 0, buffer.head(), buffer.size() );
		   
		buffer -= len;

	    }
            if ( frameComplete ) {
	       
		Debug( 3, "Got frame %d", frameCount );
			    
		avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
			
#if HAVE_LIBSWSCALE
		if(mConvertContext == NULL) {
			if(config.cpu_extensions && sseversion >= 20) {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC | SWS_CPU_CAPS_SSE2, NULL, NULL, NULL );
			} else {
				mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
			}
			if(mConvertContext == NULL)
				Fatal( "Unable to create conversion context");
		}
	
		if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
			Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
		Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
	
		frameCount++;

	     } /* frame complete */
	     
	     av_free_packet( &packet );
	} /* getFrame() */
 
	if(frameComplete)
		return (0);
	
    }
    return (0) ;
}
コード例 #29
0
ファイル: convertvideo.cpp プロジェクト: Anupam-/webcamoid
AkPacket ConvertVideo::convert(const AkPacket &packet)
{
    AkVideoPacket videoPacket(packet);

    // Convert input format.
    QString format = AkVideoCaps::pixelFormatToString(videoPacket.caps().format());
    AVPixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str());

    // Initialize rescaling context.
    this->m_scaleContext = sws_getCachedContext(this->m_scaleContext,
                                                videoPacket.caps().width(),
                                                videoPacket.caps().height(),
                                                iFormat,
                                                videoPacket.caps().width(),
                                                videoPacket.caps().height(),
                                                AV_PIX_FMT_BGRA,
                                                SWS_FAST_BILINEAR,
                                                NULL,
                                                NULL,
                                                NULL);

    if (!this->m_scaleContext)
        return AkPacket();

    // Create iPicture.
    AVFrame iFrame;
    memset(&iFrame, 0, sizeof(AVFrame));

    if (av_image_fill_arrays((uint8_t **) iFrame.data,
                         iFrame.linesize,
                         (const uint8_t *) videoPacket.buffer().constData(),
                         iFormat,
                         videoPacket.caps().width(),
                         videoPacket.caps().height(),
                         1) < 0)
        return AkPacket();

    // Create oPicture
    int frameSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA,
                                             videoPacket.caps().width(),
                                             videoPacket.caps().height(),
                                             1);

    QByteArray oBuffer(frameSize, Qt::Uninitialized);
    AVFrame oFrame;
    memset(&oFrame, 0, sizeof(AVFrame));

    if (av_image_fill_arrays((uint8_t **) oFrame.data,
                         oFrame.linesize,
                         (const uint8_t *) oBuffer.constData(),
                         AV_PIX_FMT_BGRA,
                         videoPacket.caps().width(),
                         videoPacket.caps().height(),
                         1) < 0)
        return AkPacket();

    // Convert picture format
    sws_scale(this->m_scaleContext,
              iFrame.data,
              iFrame.linesize,
              0,
              videoPacket.caps().height(),
              oFrame.data,
              oFrame.linesize);

    // Create packet
    AkVideoPacket oPacket(packet);
    oPacket.caps().format() = AkVideoCaps::Format_bgra;
    oPacket.buffer() = oBuffer;

    return oPacket.toPacket();
}
コード例 #30
0
    void decode()
    {
        if (host.state < CodecBoxDecoderState::Metadata) return;
        AVPacket packet;
        if(av_read_frame(formatContext, &packet) < 0)
        {
            host.state = CodecBoxDecoderState::Ended; // TODO error?
            close();
            return;
        }

        host.state = CodecBoxDecoderState::Metadata;
        host.buffer = nullptr;
        int frameFinished;
        auto timeBase = formatContext->streams[packet.stream_index]->time_base;
        host.bufferPresentationTime = (double)packet.pts * timeBase.num / timeBase.den;
        host.bufferDuration = (double)packet.duration * timeBase.num / timeBase.den;

        if(host.videoEnabled && (packet.stream_index == videoStream))
        {
            ensureVideoPostProcess();
            if (host.state < CodecBoxDecoderState::Metadata) goto err;
            avcodec_decode_video2(videoCodecContext, decodedFrame, &frameFinished, &packet);
            if(frameFinished)
            {
                uint8_t* dest[] = { videoBuffer };
                int destLineSize[] = { videoLineSize };
                sws_scale(sws, decodedFrame->data, decodedFrame->linesize, 0, videoCodecContext->height,
                        dest, destLineSize);
                host.state = CodecBoxDecoderState::Video;
                host.buffer = videoBuffer;
            }
        }
        else if (host.audioEnabled && (packet.stream_index == audioStream))
        {
            host.bufferSampleCount = 0;
            ensureAudioPostProcess();
            if (host.state < CodecBoxDecoderState::Metadata) goto err;

            while (packet.size > 0)
            {
                int len = avcodec_decode_audio4(audioCodecContext, decodedFrame, &frameFinished, &packet);
                if (len <= 0) break;
                packet.size -= len;
                packet.data += len;
                if (!frameFinished) continue;
                int padding = 32;
                int newSize = std::min(2048, host.bufferSampleCount + decodedFrame->nb_samples + padding) *
                        host.channels * sizeof(audioBuffer[0]);
                auto buf = (float*)realloc(audioBuffer, newSize);
                if (buf == nullptr) break; // TODO handle error
                audioBuffer = buf;
                auto tail = audioBuffer + host.bufferSampleCount * host.channels;
                int n = swr_convert(swr, (uint8_t **)(&tail), decodedFrame->nb_samples + padding,
                        (const uint8_t **)decodedFrame->data, decodedFrame->nb_samples);
                if (n < 0) break;
                host.bufferSampleCount += n;
            }
            host.state = CodecBoxDecoderState::Audio;
            host.buffer = audioBuffer;
        }
    err:
        av_packet_unref(&packet);
    }