Esempio n. 1
1
int decode_thread(void *arg) {
// 上半部分的函数没什么新东西;它的工作就是打开文件和找到视频流和音频流的索引。
// 唯一不同的地方是把格式内容保存到大结构体中。当找到流后,调用另一个将要定义的函数 stream_component_open()。
// 这是一个一般的分离的方法,自从我们设置很多相似的视频和音频解码的代码,我们通过编写这个函数来重用它们。
  VideoState *is = (VideoState *)arg;
  AVFormatContext *pFormatCtx = NULL;
  AVPacket pkt1, *packet = &pkt1;
  // http://www.ffmpeg.org/doxygen/3.1/group__lavu__dict.html#details
  AVDictionary *io_dict = NULL; // AVDictionary 元数据,Simple key:value store.
  // http://www.ffmpeg.org/doxygen/3.1/structAVIOInterruptCB.html#details
  AVIOInterruptCB callback;     // Callback for checking whether to abort blocking functions.

  int video_index = -1;
  int audio_index = -1;
  int i;

  is->videoStream=-1;
  is->audioStream=-1;

  global_video_state = is;
  // will interrupt blocking functions if we quit!
  callback.callback = decode_interrupt_cb;
  callback.opaque = is;
  // http://www.ffmpeg.org/doxygen/3.1/aviobuf_8c.html#ae8589aae955d16ca228b6b9d66ced33d
  // 使用is->filename的内容初始化is->io_context,用于管理文件的读写
  if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict))
  {
    fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
    return -1;
  }

  // 从传入的第二个参数获得文件路径,这个函数会读取文件头信息,并把信息保存在 pFormatCtx 结构体当中。
  // 这个函数后面两个参数分别是: 指定文件格式、格式化选项,当我们设置为 NULL 或 0 时,libavformat 会自动完成这些工作。
  if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
    return -1; // Couldn't open file

  is->pFormatCtx = pFormatCtx;

  // Retrieve stream information
  // 得到流信息
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information

  // Dump information about file onto standard error
  // 这个函数填充了 pFormatCtx->streams 流信息, 可以通过 dump_format 把信息打印出来:
  av_dump_format(pFormatCtx, 0, is->filename, 0);

  // 查找一个音频和视频流
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       video_index < 0) {
      video_index=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audio_index < 0) {
      audio_index=i;
    }
  }

  // stream_component_open()函数的作用是找到解码器,设置音频参数,保存重要信息到大结构体中,然后启动音频和视频线程。
  if(audio_index >= 0) {
    stream_component_open(is, audio_index);
  }
  if(video_index >= 0) {
    stream_component_open(is, video_index);
  }

  if(is->videoStream < 0 || is->audioStream < 0) {
    fprintf(stderr, "%s: could not open codecs\n", is->filename);
    goto fail;
  }

  // main decode loop
  // 上面都是打开文件和找到视频流和音频流的索引等工作,下面才是主循环
  for(;;) {
    if(is->quit) {
      break;  // 控制循环是否退出
    }
    // seek stuff goes here
    if(is->audioq.size > MAX_AUDIOQ_SIZE ||
       is->videoq.size > MAX_VIDEOQ_SIZE) {
      SDL_Delay(10);
      continue;
    }
    if(av_read_frame(is->pFormatCtx, packet) < 0) {
      if(is->pFormatCtx->pb->error == 0) {
	      SDL_Delay(100); /* no error; wait for user input */
	      continue;
      } else {
	      break;
      }
    }
    // Is this a packet from the video stream?
    if(packet->stream_index == is->videoStream) {
      packet_queue_put(&is->videoq, packet);
    } else if(packet->stream_index == is->audioStream) {
      packet_queue_put(&is->audioq, packet);
    } else {
      av_free_packet(packet);
    }
  }
  /* all done - wait for it */
  while(!is->quit) {
    SDL_Delay(100);
  }

 fail:
  {
    SDL_Event event;
    event.type = FF_QUIT_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);
  }
  return 0;
}
Esempio n. 2
0
bool VideoDecoder::Load()
{
  unsigned int i;
  int numBytes;
  uint8_t *tmp;

  if ( avformat_open_input(&mFormatContext, mFilename.c_str(), NULL, NULL) != 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_open_input_file failed\n");
    return false;
  }

  if ( avformat_find_stream_info(mFormatContext, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_find_stream_info failed\n");
    return false;
  }

  /* Some debug info */
  av_dump_format(mFormatContext, 0, mFilename.c_str(), false);

  for (i = 0; i < mFormatContext->nb_streams; i++)
  {
    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
    {
      mVideoStream = i;
      break;
    }
  }

  if ( mVideoStream == -1 )
  {
    fprintf(stderr, "VideoDecoder::Load - No video stream found.\n");
    return false;
  }

  mCodecContext = mFormatContext->streams[mVideoStream]->codec;
  mCodec = avcodec_find_decoder(mCodecContext->codec_id);
  if ( !mCodec )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_find_decoder failed\n");
    return false;
  }

  if ( avcodec_open2(mCodecContext, mCodec, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_open failed\n");
    return false;
  }

  mFrame = avcodec_alloc_frame();
  if ( !mFrame )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating frame.\n");
    return false;
  }

  mFrameRGB = avcodec_alloc_frame();
  if ( !mFrameRGB )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating RGB frame.\n");
    return false;
  }

  /* Determine required buffer size and allocate buffer */
  numBytes = avpicture_get_size(PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);
  tmp = (uint8_t *)realloc(mBuffer, numBytes * sizeof(uint8_t));
  if ( !tmp )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating buffer.\n");
    return false;
  }
  mBuffer = tmp;

  avpicture_fill((AVPicture *)mFrameRGB, mBuffer, PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);

  mSwsContext = sws_getContext(mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt,
                               mCodecContext->width, mCodecContext->height, PIX_FMT_RGB24,
                               SWS_BICUBIC, NULL, NULL, NULL);
  if ( !mSwsContext )
  {
    fprintf(stderr, "VideoDecoder::Load - sws_getContext failed.\n");
    return false;
  }

  mDoneReading = false;

  return true;
}
Esempio n. 3
0
// --------------------------------------------------------------------------
// ARDrone::initVideo()
// Description  : Initialize video.
// Return value : SUCCESS: 1  FAILURE: 0
// --------------------------------------------------------------------------
int ARDrone::initVideo(void)
{
    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Open the IP address and port
        char filename[256];
        sprintf(filename, "tcp://%s:%d", ip, ARDRONE_VIDEO_PORT);
        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {
            CVDRONE_ERROR("avformat_open_input() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Retrive and dump stream information
        avformat_find_stream_info(pFormatCtx, NULL);
        av_dump_format(pFormatCtx, 0, filename, 0);

        // Find the decoder for the video stream
        pCodecCtx = pFormatCtx->streams[0]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            CVDRONE_ERROR("avcodec_find_decoder() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            CVDRONE_ERROR("avcodec_open2() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Allocate video frames and a buffer
        pFrame = avcodec_alloc_frame();
        pFrameBGR = avcodec_alloc_frame();
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameBGR
        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

        // Convert it to BGR
        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
    }
    // AR.Drone 1.0
    else {
        // Open the IP address and port
        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {
            CVDRONE_ERROR("UDPSocket::open(port=%d) was failed. (%s, %d)\n", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);
            return 0;
        }

        // Set codec
        //pCodecCtx = avcodec_alloc_context();
        pCodecCtx=avcodec_alloc_context3(NULL);
        pCodecCtx->width = 320;
        pCodecCtx->height = 240;

        // Allocate a buffer
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));
    }

    // Allocate an IplImage
    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
    if (!img) {
        CVDRONE_ERROR("cvCreateImage() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    // Clear the image
    cvZero(img);

    // Create a mutex
    mutexVideo = new pthread_mutex_t;
    pthread_mutex_init(mutexVideo, NULL);

    // Create a thread
    threadVideo = new pthread_t;
    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {
        CVDRONE_ERROR("pthread_create() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    return 1;
}
Esempio n. 4
0
int decode_thread(void *arg) {

  VideoState *is = (VideoState *)arg;
  AVFormatContext *pFormatCtx = NULL;
  AVPacket pkt1, *packet = &pkt1;

  AVDictionary *io_dict = NULL;
  AVIOInterruptCB callback;

  int video_index = -1;
  int audio_index = -1;
  int i;

  is->videoStream=-1;
  is->audioStream=-1;

  global_video_state = is;
  // will interrupt blocking functions if we quit!
  callback.callback = decode_interrupt_cb;
  callback.opaque = is;
  if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict))
  {
    fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
    //return -1;
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
    return -1; // Couldn't open file

  is->pFormatCtx = pFormatCtx;

  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information

  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, is->filename, 0);

  // Find the first video stream

  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       video_index < 0) {
      video_index=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audio_index < 0) {
      audio_index=i;
    }
  }
  if(audio_index >= 0) {
    stream_component_open(is, audio_index);
  }
  if(video_index >= 0) {
    stream_component_open(is, video_index);
  }

  if(is->videoStream < 0 /*|| is->audioStream < 0*/) {
    fprintf(stderr, "%s: could not open codecs\n", is->filename);
    goto fail;
  }

  // main decode loop

  for(;;) {
    if(is->quit) {
      break;
    }
    // seek stuff goes here
    if(is->audioq.size > MAX_AUDIOQ_SIZE ||
       is->videoq.size > MAX_VIDEOQ_SIZE) {
      SDL_Delay(10);
      continue;
    }
    if(av_read_frame(is->pFormatCtx, packet) < 0) {
      if(is->pFormatCtx->pb->error == 0) {
	SDL_Delay(100); /* no error; wait for user input */
	continue;
      } else {
	break;
      }
    }
    // Is this a packet from the video stream?
    if(packet->stream_index == is->videoStream) {
      packet_queue_put(&is->videoq, packet);
    } else if(packet->stream_index == is->audioStream) {
      packet_queue_put(&is->audioq, packet);
    } else {
      av_free_packet(packet);
    }
  }
  /* all done - wait for it */
  while(!is->quit) {
    SDL_Delay(100);
  }

 fail:
  {
    SDL_Event event;
    event.type = FF_QUIT_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);
  }
  return 0;
}
Esempio n. 5
0
int test_video()
{
#if 1
	return 0;
#elif 0
	video_recorder vidrec;
	int i;
	int16_t silence[44100 / 25] = {0};

	av_register_all();
	
	vidrec_init(&vidrec, "test.mpeg", 320, 200);

	for (i = 0; i < 25 * 10; ++i)
	{
		memset(vidrec.tmp_picture->data[0], (i % 255), 320 * 200 * 4);

		vidrec_write_frame(&vidrec, silence, 44100 / 25, vidrec.tmp_picture);
	}

	while (vidrec_write_frame(&vidrec, silence, 44100 / 25, NULL))
	{
		//
	}

	vidrec_finalize(&vidrec);

	return 0;

#elif 0
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

#if 0
    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }
#endif

    //filename = argv[1];

	filename = "test.mp4";

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    sprintf(oc->filename, "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
#endif
}
Esempio n. 6
0
int ff_open_movie(void *ptr, char *file_name, int render_fmt) {
  int i;
  AVCodec *pCodec;
  ffst *ff = (ffst*) ptr;

  if (ff->pFrameFMT) {
    if (ff->current_file && !strcmp(file_name, ff->current_file)) return(0);
    /* close currently open movie */
    if (!want_quiet)
      fprintf(stderr, "replacing current video file buffer\n");
    ff_close_movie(ff);
  }

  // initialize values
  ff->pFormatCtx = NULL;
  ff->pFrameFMT = NULL;
  ff->movie_width  = 320;
  ff->movie_height = 180;
  ff->buf_width = ff->buf_height = 0;
  ff->movie_height = 180;
  ff->framerate = ff->duration = ff->frames = 1;
  ff->file_frame_offset = 0.0;
  ff->videoStream = -1;
  ff->tpf = 1;
  ff->avprev = -1;
  ff->stream_pts_offset = AV_NOPTS_VALUE;
  ff->render_fmt = render_fmt;

  /* Open video file */
  if(avformat_open_input(&ff->pFormatCtx, file_name, NULL, NULL) <0)
  {
    if (!want_quiet)
      fprintf(stderr, "Cannot open video file %s\n", file_name);
    return (-1);
  }

  pthread_mutex_lock(&avcodec_lock);
  /* Retrieve stream information */
  if(avformat_find_stream_info(ff->pFormatCtx, NULL) < 0) {
    if (!want_quiet)
      fprintf(stderr, "Cannot find stream information in file %s\n", file_name);
    avformat_close_input(&ff->pFormatCtx);
    pthread_mutex_unlock(&avcodec_lock);
    return (-1);
  }
  pthread_mutex_unlock(&avcodec_lock);

  if (want_verbose) av_dump_format(ff->pFormatCtx, 0, file_name, 0);

  /* Find the first video stream */
  for(i = 0; i < ff->pFormatCtx->nb_streams; i++)
#if LIBAVFORMAT_BUILD > 0x350000
    if(ff->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
#elif LIBAVFORMAT_BUILD > 4629
    if(ff->pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
#else
    if(ff->pFormatCtx->streams[i]->codec.codec_type == CODEC_TYPE_VIDEO)
#endif
    {
      ff->videoStream = i;
      break;
    }

  if(ff->videoStream == -1) {
    if (!want_quiet)
      fprintf(stderr, "Cannot find a video stream in file %s\n", file_name);
    avformat_close_input(&ff->pFormatCtx);
    return (-1);
  }

  ff_set_framerate(ff);

  {
  AVStream *avs = ff->pFormatCtx->streams[ff->videoStream];

#if 0 // DEBUG duration
  printf("DURATION frames from AVstream: %"PRIi64"\n", avs->nb_frames);
  printf("DURATION duration from FormatContext: %.2f\n", ff->pFormatCtx->duration * ff->framerate / AV_TIME_BASE);
#endif

  if (avs->nb_frames > 0) {
    ff->frames = avs->nb_frames;
    ff->duration = ff->frames / ff->framerate;
  } else {
    ff->duration = ff->pFormatCtx->duration / (double)AV_TIME_BASE;
    ff->frames = ff->pFormatCtx->duration * ff->framerate / AV_TIME_BASE;
  }

  const AVRational fr_Q = { ff->tc.den, ff->tc.num };
  ff->tpf = av_rescale_q (1, fr_Q, avs->time_base);
  }

  ff->file_frame_offset = ff->framerate*((double) ff->pFormatCtx->start_time/ (double) AV_TIME_BASE);

  if (want_verbose) {
    fprintf(stdout, "frame rate: %g\n", ff->framerate);
    fprintf(stdout, "length in seconds: %g\n", ff->duration);
    fprintf(stdout, "total frames: %ld\n", ff->frames);
    fprintf(stdout, "start offset: %.0f [frames]\n", ff->file_frame_offset);
  }

  // Get a pointer to the codec context for the video stream
#if LIBAVFORMAT_BUILD > 4629
  ff->pCodecCtx = ff->pFormatCtx->streams[ff->videoStream]->codec;
#else
  ff->pCodecCtx = &(ff->pFormatCtx->streams[ff->videoStream]->codec);
#endif

// FIXME: don't scale here - announce aspect ratio
// out_width/height remains in aspect 1:1
#ifdef SCALE_UP
  ff->movie_width = (int) floor((double)ff->pCodecCtx->height * ff_get_aspectratio(ff));
  ff->movie_height = ff->pCodecCtx->height;
#else
  ff->movie_width = ff->pCodecCtx->width;
  ff->movie_height = (int) floor((double)ff->pCodecCtx->width / ff_get_aspectratio(ff));
#endif

  // somewhere around LIBAVFORMAT_BUILD  4630
#ifdef AVFMT_FLAG_GENPTS
  if (ff->want_genpts) {
    ff->pFormatCtx->flags |= AVFMT_FLAG_GENPTS;
//  ff->pFormatCtx->flags |= AVFMT_FLAG_IGNIDX;
  }
#endif

  if (want_verbose)
    fprintf(stdout, "movie size:  %ix%i px\n", ff->movie_width, ff->movie_height);

  // Find the decoder for the video stream
  pCodec = avcodec_find_decoder(ff->pCodecCtx->codec_id);
  if(pCodec == NULL) {
    if (!want_quiet)
      fprintf(stderr, "Cannot find a codec for file: %s\n", file_name);
    avformat_close_input(&ff->pFormatCtx);
    return(-1);
  }

  // Open codec
  pthread_mutex_lock(&avcodec_lock);
  if(avcodec_open2(ff->pCodecCtx, pCodec, NULL) < 0) {
    if (!want_quiet)
      fprintf(stderr, "Cannot open the codec for file %s\n", file_name);
    pthread_mutex_unlock(&avcodec_lock);
    avformat_close_input(&ff->pFormatCtx);
    return(-1);
  }
  pthread_mutex_unlock(&avcodec_lock);

  if (!(ff->pFrame = av_frame_alloc())) {
    if (!want_quiet)
      fprintf(stderr, "Cannot allocate video frame buffer\n");
    avcodec_close(ff->pCodecCtx);
    avformat_close_input(&ff->pFormatCtx);
    return(-1);
  }

  if (!(ff->pFrameFMT = av_frame_alloc())) {
    if (!want_quiet)
      fprintf(stderr, "Cannot allocate display frame buffer\n");
    av_free(ff->pFrame);
    avcodec_close(ff->pCodecCtx);
    avformat_close_input(&ff->pFormatCtx);
    return(-1);
  }

  ff->out_width = ff->out_height = -1;

  ff->current_file = strdup(file_name);
  return(0);
}
Esempio n. 7
0
int decode_thread(void *arg) {

    VideoState *is = (VideoState *)arg;
    AVFormatContext *pFormatCtx = NULL;
    AVPacket pkt1, *packet = &pkt1;

    AVDictionary *io_dict = NULL;
    AVIOInterruptCB callback;

    int video_index = -1;
    int audio_index = -1;
    int i;

    is->videoStream=-1;
    is->audioStream=-1;
    is->audio_need_resample = 0;

    global_video_state = is;
    // will interrupt blocking functions if we quit!
    callback.callback = decode_interrupt_cb;
    callback.opaque = is;
    if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) {
        fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
        return -1;
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
        return -1; // Couldn't open file

    is->pFormatCtx = pFormatCtx;

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, is->filename, 0);

    // Find the first video stream
    for(i=0; i<pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
                video_index < 0) {
            video_index=i;
        } else if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
                  audio_index < 0) {
            audio_index=i;
        }
    }
    if(audio_index >= 0) {
        stream_component_open(is, audio_index);
    }
    if(video_index >= 0) {
        stream_component_open(is, video_index);
    }

    if(is->videoStream < 0 && is->audioStream < 0) {
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
        goto fail;
    }

#ifdef __RESAMPLER__
    if( audio_index >= 0
            && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) {
        is->audio_need_resample=1;
        is->pResampledOut=NULL;
        is->pSwrCtx = NULL;

        printf("Configure resampler: ");

#ifdef __LIBAVRESAMPLE__
        printf("libAvResample\n");
        is->pSwrCtx = avresample_alloc_context();
#endif

#ifdef __LIBSWRESAMPLE__
        printf("libSwResample\n");
        is->pSwrCtx = swr_alloc();
#endif

        // Some MP3/WAV don't tell this so make assumtion that
        // They are stereo not 5.1
        if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                && pFormatCtx->streams[audio_index]->codec->channels == 2) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 1) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO;
        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 0) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            pFormatCtx->streams[audio_index]->codec->channels = 2;
        }

        av_opt_set_int(is->pSwrCtx,"in_channel_layout",
                       pFormatCtx->streams[audio_index]->codec->channel_layout, 0);
        av_opt_set_int(is->pSwrCtx,"in_sample_fmt",
                       pFormatCtx->streams[audio_index]->codec->sample_fmt, 0);
        av_opt_set_int(is->pSwrCtx,"in_sample_rate",
                       pFormatCtx->streams[audio_index]->codec->sample_rate, 0);

        av_opt_set_int(is->pSwrCtx,"out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(is->pSwrCtx,"out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int(is->pSwrCtx,"out_sample_rate", 44100, 0);

#ifdef __LIBAVRESAMPLE__
        if (avresample_open(is->pSwrCtx) < 0) {
#else
        if (swr_init(is->pSwrCtx) < 0) {
#endif
            fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n",
                    pFormatCtx->streams[audio_index]->codec->sample_rate,
                    av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt));
            fprintf(stderr, "         To 44100 Sample format: s16\n");
            is->audio_need_resample=0;
            is->pSwrCtx = NULL;;
        }

    }
#endif

    // main decode loop

    for(;;) {
        if(is->quit) {
            break;
        }
        // seek stuff goes here
        if(is->seek_req) {
            int stream_index= -1;

            int64_t seek_target = is->seek_pos;

            if     (is->videoStream >= 0) stream_index = is->videoStream;
            else if(is->audioStream >= 0) stream_index = is->audioStream;

            if(stream_index>=0) {
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q,
                                          pFormatCtx->streams[stream_index]->time_base);
            }
            printf("Seek_target: %ld/%ld stream_index: %ld\n",is->seek_pos, seek_target, stream_index);

            if(av_seek_frame(is->pFormatCtx, stream_index, seek_target,
                             is->seek_flags) < 0) {
                fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename);
            } else {
                if(is->audioStream >= 0) {
                    packet_queue_flush(&is->audioq);
                    packet_queue_put(&is->audioq, &flush_pkt);
                }
                if(is->videoStream >= 0) {
                    packet_queue_flush(&is->videoq);
                    packet_queue_put(&is->videoq, &flush_pkt);
                }
            }
            is->seek_req = 0;
        }

        if(is->audioq.size > MAX_AUDIOQ_SIZE ||
                is->videoq.size > MAX_VIDEOQ_SIZE) {
            SDL_Delay(10);
            continue;
        }
        if(av_read_frame(is->pFormatCtx, packet) < 0) {
            if(is->pFormatCtx->pb->error == 0) {
                SDL_Delay(100); /* no error; wait for user input */
                continue;
            } else {
                break;
            }
        }
        // Is this a packet from the video stream?
        if(packet->stream_index == is->videoStream) {
            packet_queue_put(&is->videoq, packet);
        } else if(packet->stream_index == is->audioStream) {
            packet_queue_put(&is->audioq, packet);
        } else {
            av_free_packet(packet);
        }
    }
    /* all done - wait for it */
    while(!is->quit) {
        SDL_Delay(100);
    }
fail: {
        SDL_Event event;
        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    return 0;
}

void stream_seek(VideoState *is, int64_t pos, int rel) {

  if(!is->seek_req) {
    is->seek_pos = pos;
    is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
    is->seek_req = 1;
  }
}
int main(int argc, char *argv[]) {

  SDL_Event       event;
  //double          pts;
  VideoState      *is;

  is = av_mallocz(sizeof(VideoState));

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Make a screen to put our video
#ifndef __DARWIN__
  screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
  screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }

  av_strlcpy(is->filename, argv[1], 1024);

  is->pictq_mutex = SDL_CreateMutex();
  is->pictq_cond = SDL_CreateCond();

  schedule_refresh(is, 40);

  is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
  is->parse_tid = SDL_CreateThread(decode_thread, is);
  if(!is->parse_tid) {
    av_free(is);
    return -1;
  }

  av_init_packet(&flush_pkt);
  flush_pkt.data = (unsigned char *)"FLUSH";
  
  for(;;) {
    double incr, pos;
    SDL_WaitEvent(&event);
    switch(event.type) {
    case SDL_KEYDOWN:
      switch(event.key.keysym.sym) {
      case SDLK_LEFT:
	incr = -10.0;
	goto do_seek;
      case SDLK_RIGHT:
	incr = 10.0;
	goto do_seek;
      case SDLK_UP:
	incr = 60.0;
	goto do_seek;
      case SDLK_DOWN:
	incr = -60.0;
	goto do_seek;
      do_seek:
	if(global_video_state) {
	  pos = get_master_clock(global_video_state);
	  pos += incr;
	  stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr);
	}
	break;
      default:
	break;
      }
      break;
    case FF_QUIT_EVENT:
    case SDL_QUIT:
      is->quit = 1;
      /*
       * If the video has finished playing, then both the picture and
       * audio queues are waiting for more data.  Make them stop
       * waiting and terminate normally.
       */
      SDL_CondSignal(is->audioq.cond);
      SDL_CondSignal(is->videoq.cond);
      SDL_Quit();
      exit(0);
      break;
    case FF_ALLOC_EVENT:
      alloc_picture(event.user.data1);
      break;
    case FF_REFRESH_EVENT:
      video_refresh_timer(event.user.data1);
      break;
    default:
      break;
    }
  }
  return 0;
}
Esempio n. 8
0
int main(int argc, char* argv[]) {

	int i, videoStream, audioStream;

	VideoState	*is;
	is = av_mallocz(sizeof(VideoState));

	if(argc < 2) {
		fprintf(stderr, "Usage: test <file>\n");
		exit(1);
	}

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}


	av_register_all();

	AVFormatContext *pFormatCtx = NULL;

	av_strlcpy(is->filename, argv[1], sizeof(is->filename));
	is->pictq_mutex = SDL_CreateMutex();
	is->pictq_cond	= SDL_CreateCond();

	schedule_refresh(is, 40);

	is->parse_tid = SDL_CreateThread(decode_thread, is);
	if(!is->parse_tid) {
		av_free(is);
		return -1;
	}

	// Open video file
	if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
		return -1; // Couldn't open file
	}

	// Retrive stream information
	if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		return -1; //Couldn't find stream information
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	AVCodecContext *pCodecCtxOrig = NULL;
	AVCodecContext *pCodecCtx = NULL;

	AVCodecContext *aCodecCtxOrig = NULL;
	AVCodecContext *aCodecCtx = NULL;

	// Find the first video stream
	videoStream = -1;
	audioStream = -1;

	for(i=0; i < pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
			videoStream = i;
		}
	}
	
	for(i=0; i < pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
			audioStream = i;
		}
	}

	if(videoStream == -1) {
		return -1; // Didn't find a video stream
	}
	if(audioStream == -1) {
		return -1;
	}
	// Get a pointer to the codec context for the video stream
	pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
	aCodecCtxOrig = pFormatCtx->streams[audioStream]->codec;

	AVCodec *pCodec = NULL;
	AVCodec *aCodec = NULL;

	//Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
	if(pCodec == NULL) {
		return -1;
	}
	aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id);
	if(aCodec == NULL) {
		return -1;
	}
	// Copy context
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
		return -1;
	}

	aCodecCtx = avcodec_alloc_context3(aCodec);
	if(avcodec_copy_context(aCodecCtx, aCodecCtxOrig) != 0) {
		return -1;
	}
	SDL_AudioSpec wanted_spec, spec;

	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;

	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}



	
	// Open codec
	AVDictionary *optionDict = NULL;
	if(avcodec_open2(pCodecCtx, pCodec, &optionDict) < 0) {
		return -1;
	}
	
	if(avcodec_open2(aCodecCtx, aCodec, NULL) < 0) {
		return -1;
	}

	packet_queue_init(&audioq);
	SDL_PauseAudio(0);

	
	// Allocate video frame
	AVFrame *pFrame = NULL;
	pFrame = av_frame_alloc();


	SDL_Surface *screen;
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
	if(!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	SDL_Overlay *bmp = NULL;

	bmp = SDL_CreateYUVOverlay(pCodecCtx->width, 
							   pCodecCtx->height, 
							   SDL_YV12_OVERLAY, 
							   screen);


	printf("[loop]==========================\n");

	struct SwsContext *sws_ctx = NULL;
	int frameFinished;
	AVPacket packet;

	//initialize SWS context for software scaling
	sws_ctx = sws_getContext(pCodecCtx->width, 
							pCodecCtx->height,
							pCodecCtx->pix_fmt,
							pCodecCtx->width,
							pCodecCtx->height,
							AV_PIX_FMT_YUV420P,
							SWS_BILINEAR,
							NULL,
							NULL,
							NULL);

	// Read frame and display							
	i = 0;
	while(av_read_frame(pFormatCtx, &packet) >= 0) {
		if(packet.stream_index == videoStream) {
			//Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if(frameFinished) {
				SDL_LockYUVOverlay(bmp);

				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];

				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];

				// Convert the image into YUV format that SDL uses

				sws_scale(sws_ctx, 
						  (uint8_t const * const *)pFrame->data,
						  pFrame->linesize, 
						  0, 
						  pCodecCtx->height, 
						  pict.data, 
						  pict.linesize);

				SDL_UnlockYUVOverlay(bmp);

				SDL_Rect rect;
				rect.x = 0;
				rect.y = 0;
				rect.w = pCodecCtx->width;
				rect.h = pCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				av_free_packet(&packet);	
			}
		} else if (packet.stream_index == audioStream) {
			packet_queue_put(&audioq, &packet);

		} else {
			// Free the packet that was allocated by av_read_frame
			av_free_packet(&packet);
		}

		SDL_Event event;
		SDL_PollEvent(&event);
		switch(event.type) {
		case SDL_QUIT:
			quit = 1;
			SDL_Quit();
			exit(0);
			break;
		default:
			break;
		}
	}


	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);
	avcodec_close(pCodecCtxOrig);

	// Close the video file
	avformat_close_input(&pFormatCtx);
	return 0;
}
Esempio n. 9
0
int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	AVOutputFormat* fmt;
	AVStream* audio_st;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;

	uint8_t* frame_buf;
	AVFrame* pFrame;
	AVPacket pkt;

	int got_frame=0;
	int ret=0;
	int size=0;

	FILE *in_file=NULL;	                        //Raw PCM data
	int framenum=1000;                          //Audio frame number
	const char* out_file = "tdjm.aac";          //Output URL
	int i;

	in_file= fopen("tdjm.pcm", "rb");

	av_register_all();

	//Method 1.
	pFormatCtx = avformat_alloc_context();
	fmt = av_guess_format(NULL, out_file, NULL);
	pFormatCtx->oformat = fmt;


	//Method 2.
	//avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
	//fmt = pFormatCtx->oformat;

	//Open output URL
	if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
		printf("Failed to open output file!\n");
		return -1;
	}

	audio_st = avformat_new_stream(pFormatCtx, 0);
	if (audio_st==NULL){
		return -1;
	}
	pCodecCtx = audio_st->codec;
	pCodecCtx->codec_id = fmt->audio_codec;
	pCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
	pCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
	pCodecCtx->sample_rate= 44100;
	pCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
	pCodecCtx->channels = av_get_channel_layout_nb_channels(pCodecCtx->channel_layout);
	pCodecCtx->bit_rate = 64000;  

	//Show some information
	av_dump_format(pFormatCtx, 0, out_file, 1);

	pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
	if (!pCodec){
		printf("Can not find encoder!\n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
		printf("Failed to open encoder!\n");
		return -1;
	}
	pFrame = av_frame_alloc();
	pFrame->nb_samples= pCodecCtx->frame_size;
	pFrame->format= pCodecCtx->sample_fmt;
	
	size = av_samples_get_buffer_size(NULL, pCodecCtx->channels,pCodecCtx->frame_size,pCodecCtx->sample_fmt, 1);
	frame_buf = (uint8_t *)av_malloc(size);
	avcodec_fill_audio_frame(pFrame, pCodecCtx->channels, pCodecCtx->sample_fmt,(const uint8_t*)frame_buf, size, 1);
	
	//Write Header
	avformat_write_header(pFormatCtx,NULL);

	av_new_packet(&pkt,size);

	for (i=0; i<framenum; i++){
		//Read PCM
		if (fread(frame_buf, 1, size, in_file) <= 0){
			printf("Failed to read raw data! \n");
			return -1;
		}else if(feof(in_file)){
			break;
		}
		pFrame->data[0] = frame_buf;  //PCM Data

		pFrame->pts=i*100;
		got_frame=0;
		//Encode
		ret = avcodec_encode_audio2(pCodecCtx, &pkt,pFrame, &got_frame);
		if(ret < 0){
			printf("Failed to encode!\n");
			return -1;
		}
		if (got_frame==1){
			printf("Succeed to encode 1 frame! \tsize:%5d\n",pkt.size);
			pkt.stream_index = audio_st->index;
			ret = av_write_frame(pFormatCtx, &pkt);
			av_free_packet(&pkt);
		}
	}
	
	//Flush Encoder
	ret = flush_encoder(pFormatCtx,0);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}

	//Write Trailer
	av_write_trailer(pFormatCtx);

	//Clean
	if (audio_st){
		avcodec_close(audio_st->codec);
		av_free(pFrame);
		av_free(frame_buf);
	}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);

	fclose(in_file);

	return 0;
}
int main(int argc, char* argv[])
{

    AVFormatContext	*pFormatCtx;
    int				i, videoindex;
    AVCodecContext	*pCodecCtx;
    AVCodec			*pCodec;
    AVFrame	*pFrame,*pFrameYUV;
    uint8_t *out_buffer;
    AVPacket *packet;
    int ret, got_picture;

    //------------SDL----------------
    int screen_w,screen_h;
    SDL_Window *screen;
    SDL_Renderer* sdlRenderer;
    SDL_Texture* sdlTexture;
    SDL_Rect sdlRect;
    SDL_Thread *video_tid;
    SDL_Event event;

    struct SwsContext *img_convert_ctx;

    char filepath[]="bigbuckbunny_480x272.h265";

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0) {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    if(avformat_find_stream_info(pFormatCtx,NULL)<0) {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoindex=i;
            break;
        }
    if(videoindex==-1) {
        printf("Didn't find a video stream.\n");
        return -1;
    }
    pCodecCtx=pFormatCtx->streams[videoindex]->codec;
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        printf("Codec not found.\n");
        return -1;
    }
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) {
        printf("Could not open codec.\n");
        return -1;
    }
    pFrame=av_frame_alloc();
    pFrameYUV=av_frame_alloc();
    out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

    //Output Info-----------------------------
    printf("---------------- File Information ---------------\n");
    av_dump_format(pFormatCtx,0,filepath,0);
    printf("-------------------------------------------------\n");

    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                     pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);


    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        printf( "Could not initialize SDL - %s\n", SDL_GetError());
        return -1;
    }
    //SDL 2.0 Support for multiple windows
    screen_w = pCodecCtx->width;
    screen_h = pCodecCtx->height;
    screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
                              screen_w, screen_h,SDL_WINDOW_OPENGL);

    if(!screen) {
        printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
        return -1;
    }
    sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
    //IYUV: Y + U + V  (3 planes)
    //YV12: Y + V + U  (3 planes)
    sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);

    sdlRect.x=0;
    sdlRect.y=0;
    sdlRect.w=screen_w;
    sdlRect.h=screen_h;

    packet=(AVPacket *)av_malloc(sizeof(AVPacket));

    video_tid = SDL_CreateThread(sfp_refresh_thread,NULL,NULL);
    //------------SDL End------------
    //Event Loop

    for (;;) {
        //Wait
        SDL_WaitEvent(&event);
        if(event.type==SFM_REFRESH_EVENT) {
            //------------------------------
            if(av_read_frame(pFormatCtx, packet)>=0) {
                if(packet->stream_index==videoindex) {
                    ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
                    if(ret < 0) {
                        printf("Decode Error.\n");
                        return -1;
                    }
                    if(got_picture) {
                        sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                        //SDL---------------------------
                        SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );
                        SDL_RenderClear( sdlRenderer );
                        //SDL_RenderCopy( sdlRenderer, sdlTexture, &sdlRect, &sdlRect );
                        SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, NULL);
                        SDL_RenderPresent( sdlRenderer );
                        //SDL End-----------------------
                    }
                }
                av_free_packet(packet);
            } else {
                //Exit Thread
                thread_exit=1;
            }
        } else if(event.type==SDL_KEYDOWN) {
            //Pause
            if(event.key.keysym.sym==SDLK_SPACE)
                thread_pause=!thread_pause;
        } else if(event.type==SDL_QUIT) {
            thread_exit=1;
        } else if(event.type==SFM_BREAK_EVENT) {
            break;
        }

    }

    sws_freeContext(img_convert_ctx);

    SDL_Quit();
    //--------------
    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}
Esempio n. 11
0
static ImBuf *avi_fetchibuf(struct anim *anim, int position)
{
	ImBuf *ibuf = NULL;
	int *tmp;
	int y;
	
	if (anim == NULL) return (NULL);

#if defined(_WIN32) && !defined(FREE_WINDOWS)
	if (anim->avistreams) {
		LPBITMAPINFOHEADER lpbi;

		if (anim->pgf) {
			lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo]));
			if (lpbi) {
				ibuf = IMB_ibImageFromMemory((unsigned char *) lpbi, 100, IB_rect, "<avi_fetchibuf>");
//Oh brother...
			}
		}
	}
	else {
#else
	if (1) {
#endif
		ibuf = IMB_allocImBuf(anim->x, anim->y, 24, IB_rect);

		tmp = AVI_read_frame(anim->avi, AVI_FORMAT_RGB32, position,
		                     AVI_get_stream(anim->avi, AVIST_VIDEO, 0));
		
		if (tmp == NULL) {
			printf("Error reading frame from AVI");
			IMB_freeImBuf(ibuf);
			return NULL;
		}

		for (y = 0; y < anim->y; y++) {
			memcpy(&(ibuf->rect)[((anim->y - y) - 1) * anim->x],  &tmp[y * anim->x],
			       anim->x * 4);
		}
		
		MEM_freeN(tmp);
	}
	
	ibuf->profile = IB_PROFILE_SRGB;
	
	return ibuf;
}

#ifdef WITH_FFMPEG

extern void do_init_ffmpeg(void);

static int startffmpeg(struct anim *anim)
{
	int i, videoStream;

	AVCodec *pCodec;
	AVFormatContext *pFormatCtx = NULL;
	AVCodecContext *pCodecCtx;
	int frs_num;
	double frs_den;
	int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* The following for color space determination */
	int srcRange, dstRange, brightness, contrast, saturation;
	int *table;
	const int *inv_table;
#endif

	if (anim == 0) return(-1);

	streamcount = anim->streamindex;

	do_init_ffmpeg();

	if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
		return -1;
	}

	if (av_find_stream_info(pFormatCtx) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	av_dump_format(pFormatCtx, 0, anim->name, 0);


	/* Find the video stream */
	videoStream = -1;

	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			if (streamcount > 0) {
				streamcount--;
				continue;
			}
			videoStream = i;
			break;
		}

	if (videoStream == -1) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	/* Find the decoder for the video stream */
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx->workaround_bugs = 1;

	if (avcodec_open(pCodecCtx, pCodec) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	anim->duration = ceil(pFormatCtx->duration *
	                      av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) /
	                      AV_TIME_BASE);

	frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num;
	frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den;

	frs_den *= AV_TIME_BASE;

	while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
		frs_num /= 10;
		frs_den /= 10;
	}

	anim->frs_sec = frs_num;
	anim->frs_sec_base = frs_den;

	anim->params = 0;

	anim->x = pCodecCtx->width;
	anim->y = pCodecCtx->height;
	anim->interlacing = 0;
	anim->orientation = 0;
	anim->framesize = anim->x * anim->y * 4;

	anim->curposition = -1;
	anim->last_frame = 0;
	anim->last_pts = -1;
	anim->next_pts = -1;
	anim->next_packet.stream_index = -1;

	anim->pFormatCtx = pFormatCtx;
	anim->pCodecCtx = pCodecCtx;
	anim->pCodec = pCodec;
	anim->videoStream = videoStream;

	anim->pFrame = avcodec_alloc_frame();
	anim->pFrameComplete = FALSE;
	anim->pFrameDeinterlaced = avcodec_alloc_frame();
	anim->pFrameRGB = avcodec_alloc_frame();

	if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) !=
	    anim->x * anim->y * 4)
	{
		fprintf(stderr,
		        "ffmpeg has changed alloc scheme ... ARGHHH!\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

	if (anim->ib_flags & IB_animdeinterlace) {
		avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
		               MEM_callocN(avpicture_get_size(
		                               anim->pCodecCtx->pix_fmt,
		                               anim->x, anim->y),
		                           "ffmpeg deinterlace"),
		               anim->pCodecCtx->pix_fmt, anim->x, anim->y);
	}

	if (pCodecCtx->has_b_frames) {
		anim->preseek = 25; /* FIXME: detect gopsize ... */
	}
	else {
		anim->preseek = 0;
	}
	
	anim->img_convert_ctx = sws_getContext(
	        anim->pCodecCtx->width,
	        anim->pCodecCtx->height,
	        anim->pCodecCtx->pix_fmt,
	        anim->pCodecCtx->width,
	        anim->pCodecCtx->height,
	        PIX_FMT_RGBA,
	        SWS_FAST_BILINEAR | SWS_PRINT_INFO,
	        NULL, NULL, NULL);
		
	if (!anim->img_convert_ctx) {
		fprintf(stderr,
		        "Can't transform color space??? Bailing out...\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
	if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
	                              &table, &dstRange, &brightness, &contrast, &saturation))
	{
		srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
		inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

		if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
		                             table, dstRange, brightness, contrast, saturation))
		{
			printf("Warning: Could not set libswscale colorspace details.\n");
		}
	}
	else {
		printf("Warning: Could not set libswscale colorspace details.\n");
	}
#endif
		
	return (0);
}
int main(int argc, char* argv[]) {
	printf("Play simple video\n");
	if(argc < 2) {
		printf("Miss input video");
		return -1;
	}
	int ret = -1, i = -1, v_stream_idx = -1;
	char* vf_path = argv[1];
	// f**k, fmt_ctx must be inited by NULL
	AVFormatContext* fmt_ctx = NULL;
	AVCodecContext* codec_ctx = NULL;
	AVCodec* codec;
	AVFrame * frame;
	AVPacket packet;

	av_register_all();
	ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL);
	if(ret < 0){
		printf("Open video file %s failed \n", vf_path);
		goto end;
	}
	if(avformat_find_stream_info(fmt_ctx, NULL)<0)
    	goto end;
    av_dump_format(fmt_ctx, 0, vf_path, 0);
    for(i = 0; i< fmt_ctx->nb_streams; i++) {
    	if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
    		v_stream_idx = i;
    		break;
    	}
    }
    if(v_stream_idx == -1) {
		printf("Cannot find video stream\n");
		goto end;
	}

	codec_ctx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar);
	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if(codec == NULL){
		printf("Unsupported codec for video file\n");
		goto end;
	}
	if(avcodec_open2(codec_ctx, codec, NULL) < 0){
		printf("Can not open codec\n");
		goto end;
	}

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    	printf("Could not init SDL due to %s", SDL_GetError());
    	goto end;
    }
    SDL_Window *window;
    SDL_Renderer *renderer;
    SDL_Texture *texture;
    SDL_Event event;
    SDL_Rect r;
    window = SDL_CreateWindow("SDL_CreateTexture", SDL_WINDOWPOS_UNDEFINED,
    	SDL_WINDOWPOS_UNDEFINED, codec_ctx->width, codec_ctx->height,
    	SDL_WINDOW_RESIZABLE);
    r.x = 0;
    r.y = 0;
    r.w = codec_ctx->width;
    r.h = codec_ctx->height;

    renderer = SDL_CreateRenderer(window, -1, 0);
    // texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET,
    // 	codec_ctx->width, codec_ctx->height);
    texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
    	codec_ctx->width, codec_ctx->height);

    struct SwsContext      *sws_ctx = NULL;
    sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
    	codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    frame = av_frame_alloc();

    int ret1, ret2;
    AVFrame* pict;
    pict = av_frame_alloc();


	int             numBytes;
	uint8_t         *buffer = NULL;
  	numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
			      codec_ctx->height);
  	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    // required, or bad dst image pointers
	avpicture_fill((AVPicture *)pict, buffer, AV_PIX_FMT_YUV420P,
		 codec_ctx->width, codec_ctx->height);
    i = 0;
	while (1) {
        SDL_PollEvent(&event);
        if(event.type == SDL_QUIT)
                break;
        ret = av_read_frame(fmt_ctx, &packet);
        if(ret <0){
        	continue;
        }
        if(packet.stream_index == v_stream_idx) {
			ret1 = avcodec_send_packet(codec_ctx, &packet);
			ret2 = avcodec_receive_frame(codec_ctx, frame);
			if(ret2 < 0 ){
				continue;
	    	}
	    	sws_scale(sws_ctx, (uint8_t const * const *)frame->data,
			      frame->linesize, 0, codec_ctx->height,
			      pict->data, pict->linesize);
	   //  	if(++i <=5 ){
				// save_frame(pict, codec_ctx->width, codec_ctx->height, i);
	   //  	}
	        SDL_UpdateYUVTexture(texture, &r, pict->data[0], pict->linesize[0],
	        	pict->data[1], pict->linesize[1],
	        	pict->data[2], pict->linesize[2]);
	        // SDL_UpdateTexture(texture, &r, pict->data[0], pict->linesize[0]);

	        // r.x=rand()%500;
	        // r.y=rand()%500;

	        // SDL_SetRenderTarget(renderer, texture);
	        // SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0x00);
	        SDL_RenderClear(renderer);
	        // SDL_RenderDrawRect(renderer,&r);
	        // SDL_SetRenderDrawColor(renderer, 0xFF, 0x00, 0x00, 0x00);
	        // SDL_RenderFillRect(renderer, &r);
	        // SDL_SetRenderTarget(renderer, NULL);
	        SDL_RenderCopy(renderer, texture, NULL, NULL);
	        // SDL_RenderCopy(renderer, texture, &r, &r);
	        SDL_RenderPresent(renderer);
	        // SDL_Delay(50);
        }
        av_packet_unref(&packet);
    }

    SDL_DestroyRenderer(renderer);
    SDL_Quit();
	av_frame_free(&frame);
	avcodec_close(codec_ctx);
	avcodec_free_context(&codec_ctx);
    end:
	avformat_close_input(&fmt_ctx);
	printf("Shutdown\n");
	return 0;
}
Esempio n. 13
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc) {
        return 1;
    }
    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, &video_codec, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, &audio_codec, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    if (avformat_write_header(oc, NULL) < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        return 1;
    }

    frame->pts = 0;
    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
            frame->pts++;
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Esempio n. 14
0
// setup libAV related structs.
bool AV::setupAV() {

	ct.of = av_guess_format(NULL, "roxlu.flv", NULL);
	if(!ct.of) {
		printf("Cannot create flv AVOutputFormat\n");
		return false;
	}
	
	ct.c = avformat_alloc_context();
	if(!ct.c) {
		printf("Cannot allocate the AVFormatContext\n");
		return false;
	}
	ct.c->video_codec_id = CODEC_ID_H264;
	ct.c->debug = 3;
	ct.c->oformat = ct.of;

//	const char* output_filename = "tcp://127.0.0.1:6665";
//	const char* output_filename = "rtmp://gethinlewis.rtmphost.com";
	const char* output_filename = "rtmp://gethinlewis.rtmphost.com/event/_definst_";
//	const char* output_filename = "test.flv";
	snprintf(ct.c->filename, sizeof(ct.c->filename), "%s", output_filename);
	//ct.vs = addVideoStream(ct, ct.of->video_codec);

	printf("%d -- %d \n", CODEC_ID_H264, ct.of->video_codec);
	ct.of->video_codec = CODEC_ID_H264;
	ct.vs = addVideoStream(ct, ct.of->video_codec);
	if(!ct.vs) {
		printf("Cannot create video stream: %d.\n", ct.of->video_codec);
		return false;
	}
	
	if(!openVideo(ct)) {
		printf("Cannot open video stream.\n");
		return false;
	}
	
	//av_dict_set(&ct.c->metadata, "streamName", "video_test", 0);
	av_dict_set(&ct.c->metadata, "streamName", "livefeed", 0);
	
	if(use_audio) {
		bool use_mp3 = true;
		if(!use_mp3) {
			ct.asample_fmt = AV_SAMPLE_FMT_S16;
			ct.abit_rate = 64000;
			ct.asample_rate = 8000;
			ct.as = addAudioStream(ct, CODEC_ID_SPEEX);
		}
		else {
			ct.asample_fmt = AV_SAMPLE_FMT_S16;
			ct.abit_rate = 64000;
			ct.asample_rate = 44100;
			ct.as = addAudioStream(ct, CODEC_ID_MP3);
		}
		
		if(!ct.as) {
			printf("Cannot create audio stream.\n");
			return false;
		}
		
		if(!openAudio(ct)) {
			printf("Cannot open audio stream.\n");
			return false;
		}
	}
	
	av_dump_format(ct.c, 0, output_filename, 1);
	
	if(!(ct.of->flags & AVFMT_NOFILE)) {
		if(avio_open(&ct.c->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
			printf("Cannot open: %s\n", output_filename);
			return false;
		}
	}
	avformat_write_header(ct.c, NULL);
	
	return true;
}
Esempio n. 15
0
int init_send_video(codec_state *cs)
{
    cs->video_input_format = av_find_input_format(VIDEO_DRIVER);

    if (avformat_open_input(&cs->video_format_ctx, DEFAULT_WEBCAM, cs->video_input_format, NULL) != 0) {
        printf("opening video_input_format failed\n");
        return 0;
    }

    avformat_find_stream_info(cs->video_format_ctx, NULL);
    av_dump_format(cs->video_format_ctx, 0, DEFAULT_WEBCAM, 0);

    int i;

    for (i = 0; i < cs->video_format_ctx->nb_streams; ++i) {
        if (cs->video_format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            cs->video_stream = i;
            break;
        }
    }

    cs->webcam_decoder_ctx = cs->video_format_ctx->streams[cs->video_stream]->codec;
    cs->webcam_decoder = avcodec_find_decoder(cs->webcam_decoder_ctx->codec_id);

    if (cs->webcam_decoder == NULL) {
        printf("Unsupported codec\n");
        return 0;
    }

    if (cs->webcam_decoder_ctx == NULL) {
        printf("init webcam_decoder_ctx failed\n");
        return 0;
    }

    if (avcodec_open2(cs->webcam_decoder_ctx, cs->webcam_decoder, NULL) < 0) {
        printf("opening webcam decoder failed\n");
        return 0;
    }

    cs->video_encoder = avcodec_find_encoder(VIDEO_CODEC);

    if (!cs->video_encoder) {
        printf("init video_encoder failed\n");
        return 0;
    }

    cs->video_encoder_ctx = avcodec_alloc_context3(cs->video_encoder);

    if (!cs->video_encoder_ctx) {
        printf("init video_encoder_ctx failed\n");
        return 0;
    }

    cs->video_encoder_ctx->bit_rate = VIDEO_BITRATE;
    cs->video_encoder_ctx->rc_min_rate = cs->video_encoder_ctx->rc_max_rate = cs->video_encoder_ctx->bit_rate;
    av_opt_set_double(cs->video_encoder_ctx->priv_data, "max-intra-rate", 90, 0);
    av_opt_set(cs->video_encoder_ctx->priv_data, "quality", "realtime", 0);

    cs->video_encoder_ctx->thread_count = 4;
    cs->video_encoder_ctx->rc_buffer_aggressivity = 0.95;
    cs->video_encoder_ctx->rc_buffer_size = VIDEO_BITRATE * 6;
    cs->video_encoder_ctx->profile = 3;
    cs->video_encoder_ctx->qmax = 54;
    cs->video_encoder_ctx->qmin = 4;
    AVRational myrational = {1, 25};
    cs->video_encoder_ctx->time_base = myrational;
    cs->video_encoder_ctx->gop_size = 99999;
    cs->video_encoder_ctx->pix_fmt = PIX_FMT_YUV420P;
    cs->video_encoder_ctx->width = cs->webcam_decoder_ctx->width;
    cs->video_encoder_ctx->height = cs->webcam_decoder_ctx->height;

    if (avcodec_open2(cs->video_encoder_ctx, cs->video_encoder, NULL) < 0) {
        printf("opening video encoder failed\n");
        return 0;
    }

    printf("init video encoder successful\n");
    return 1;
}
Esempio n. 16
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer= NULL;
  int ret, got_frame;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  //  avcodec_register_all();
  av_register_all();
  

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"파일을 열 수 없습니다.\n");
    return -1;
  }

  
    
  // Retrieve stream information
  if((ret = avformat_find_stream_info(pFormatCtx,NULL)) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"stream 정보을 찾을 수 없습니다.\n");
    return ret; // Couldn't find stream information
  }
  
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream = av_find_best_stream(pFormatCtx,AVMEDIA_TYPE_VIDEO,-1,-1,&pCodec,0);

  
  if(videoStream < 0 ) {
    av_log(NULL,AV_LOG_ERROR,"Cannot find a video stream in the input file\n");
    return videoStream;
  }

  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  /* pCodec=avcodec_find_decoder(pCodecCtx->codec_id); */
  /* if(pCodec==NULL) { */
  /*   av_log(NULL, AV_LOG_ERROR,"지원되지 않는 코덱입니다.\n"); */
  /*   return -1; // Codec not found */
  /* } */
  // Open codec


  if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
  
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;

  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, 
         		      pCodecCtx->height); 
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 
  
  /* // Assign appropriate parts of buffer to image planes in pFrameRGB */
  /* // Note that pFrameRGB is an AVFrame, but AVFrame is a superset */
  /* // of AVPicture */
   avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 
         	 pCodecCtx->width, pCodecCtx->height); 


  av_init_packet(&packet);
  packet.data = NULL;
  packet.size = 0;

  // Read frames and save first five frames to disk
  i=0;

  
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
	av_picture_crop((AVPicture *)pFrameRGB, (AVPicture*)pFrame, 
                        PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
	
	// Save the frame to disk
	if(++i<=100)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
        if( i >100 )
          break;
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    
  }
  
  // Free the RGB image
  av_free(buffer);
  
  printf(" av_free ");
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
Esempio n. 17
0
static DecoderContext *init_decoder(const char *filename)
{
    DecoderContext *dc = (DecoderContext *)calloc(1, sizeof(DecoderContext));
    AVCodecContext *codecCtx;

    // Open the stream
    if(avformat_open_input(&(dc->formatCtx), filename, NULL, NULL) != 0) {
        fprintf(stderr, "Couldn't open file");
        exit(1);
    }

    // Retrieve stream information
    if(avformat_find_stream_info(dc->formatCtx, NULL) < 0) {
        fprintf(stderr, "Couldn't find stream information");
        exit(1);
    }

    // Dump information about file onto standard error
    av_dump_format(dc->formatCtx, 0, filename, 0);


    // Get video Stream
    dc->videoStream = get_video_stream(dc->formatCtx);
    if (dc->videoStream == -1) {
        fprintf(stderr, "Couldn't find video stream");
        exit(1);
    }

    codecCtx = dc->formatCtx->streams[dc->videoStream]->codec;

    /* find the decoder */
    dc->codec = avcodec_find_decoder(codecCtx->codec_id);
    if (!dc->codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    /* Allocate codec context */
    dc->codecCtx = avcodec_alloc_context3(dc->codec);
    if (!dc->codecCtx) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if(avcodec_copy_context(dc->codecCtx, codecCtx) != 0) {
        fprintf(stderr, "Couldn't copy codec context");
        exit(1); // Error copying codec context
    }

    if(dc->codec->capabilities & CODEC_CAP_TRUNCATED)
        dc->codecCtx->flags |= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(dc->codecCtx, dc->codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    dc->frame = av_frame_alloc();
    if (!dc->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    // Allocate input buffer
    dc->numBytes = avpicture_get_size(dc->codecCtx->pix_fmt,
                                      dc->codecCtx->width,
                                      dc->codecCtx->height);

    dc->inbuf = calloc(1, dc->numBytes + FF_INPUT_BUFFER_PADDING_SIZE);

    if (!dc->inbuf) {
        fprintf(stderr, "Could not allocate buffer");
        exit(1);
    }

    memset(dc->inbuf + dc->numBytes, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    dc->frame_count = 0;

    return dc;
}
Esempio n. 18
0
static int startffmpeg(struct anim *anim)
{
    int i, videoStream;

    AVCodec *pCodec;
    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext *pCodecCtx;
    AVRational frame_rate;
    int frs_num;
    double frs_den;
    int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* The following for color space determination */
    int srcRange, dstRange, brightness, contrast, saturation;
    int *table;
    const int *inv_table;
#endif

    if (anim == NULL) return(-1);

    streamcount = anim->streamindex;

    if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    av_dump_format(pFormatCtx, 0, anim->name, 0);


    /* Find the video stream */
    videoStream = -1;

    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (streamcount > 0) {
                streamcount--;
                continue;
            }
            videoStream = i;
            break;
        }

    if (videoStream == -1) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    /* Find the decoder for the video stream */
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx->workaround_bugs = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    frame_rate = av_get_r_frame_rate_compat(pFormatCtx->streams[videoStream]);
    if (pFormatCtx->streams[videoStream]->nb_frames != 0) {
        anim->duration = pFormatCtx->streams[videoStream]->nb_frames;
    }
    else {
        anim->duration = ceil(pFormatCtx->duration *
                              av_q2d(frame_rate) /
                              AV_TIME_BASE);
    }

    frs_num = frame_rate.num;
    frs_den = frame_rate.den;

    frs_den *= AV_TIME_BASE;

    while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
        frs_num /= 10;
        frs_den /= 10;
    }

    anim->frs_sec = frs_num;
    anim->frs_sec_base = frs_den;

    anim->params = 0;

    anim->x = pCodecCtx->width;
    anim->y = av_get_cropped_height_from_codec(pCodecCtx);

    anim->pFormatCtx = pFormatCtx;
    anim->pCodecCtx = pCodecCtx;
    anim->pCodec = pCodec;
    anim->videoStream = videoStream;

    anim->interlacing = 0;
    anim->orientation = 0;
    anim->framesize = anim->x * anim->y * 4;

    anim->curposition = -1;
    anim->last_frame = 0;
    anim->last_pts = -1;
    anim->next_pts = -1;
    anim->next_packet.stream_index = -1;

    anim->pFrame = av_frame_alloc();
    anim->pFrameComplete = false;
    anim->pFrameDeinterlaced = av_frame_alloc();
    anim->pFrameRGB = av_frame_alloc();

    if (need_aligned_ffmpeg_buffer(anim)) {
        anim->pFrameRGB->format = AV_PIX_FMT_RGBA;
        anim->pFrameRGB->width  = anim->x;
        anim->pFrameRGB->height = anim->y;

        if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
            fprintf(stderr, "Could not allocate frame data.\n");
            avcodec_close(anim->pCodecCtx);
            avformat_close_input(&anim->pFormatCtx);
            av_frame_free(&anim->pFrameRGB);
            av_frame_free(&anim->pFrameDeinterlaced);
            av_frame_free(&anim->pFrame);
            anim->pCodecCtx = NULL;
            return -1;
        }
    }

    if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) !=
            anim->x * anim->y * 4)
    {
        fprintf(stderr,
                "ffmpeg has changed alloc scheme ... ARGHHH!\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

    if (anim->ib_flags & IB_animdeinterlace) {
        avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
                       MEM_callocN(avpicture_get_size(
                                       anim->pCodecCtx->pix_fmt,
                                       anim->pCodecCtx->width,
                                       anim->pCodecCtx->height),
                                   "ffmpeg deinterlace"),
                       anim->pCodecCtx->pix_fmt,
                       anim->pCodecCtx->width,
                       anim->pCodecCtx->height);
    }

    if (pCodecCtx->has_b_frames) {
        anim->preseek = 25; /* FIXME: detect gopsize ... */
    }
    else {
        anim->preseek = 0;
    }

    anim->img_convert_ctx = sws_getContext(
                                anim->x,
                                anim->y,
                                anim->pCodecCtx->pix_fmt,
                                anim->x,
                                anim->y,
                                AV_PIX_FMT_RGBA,
                                SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
                                NULL, NULL, NULL);

    if (!anim->img_convert_ctx) {
        fprintf(stderr,
                "Can't transform color space??? Bailing out...\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
    if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
                                  &table, &dstRange, &brightness, &contrast, &saturation))
    {
        srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
        inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

        if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
                                     table, dstRange, brightness, contrast, saturation))
        {
            fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
        }
    }
    else {
        fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
    }
#endif

    return (0);
}
Esempio n. 19
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer = NULL;

  AVDictionary    *optionsDict = NULL;
  struct SwsContext      *sws_ctx = NULL;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  av_register_all();
  
  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
      videoStream=i;
      break;
    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=av_frame_alloc();
  
  // Allocate an AVFrame structure
  pFrameRGB=av_frame_alloc();
  if(pFrameRGB==NULL)
    return -1;
  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width,
			      pCodecCtx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        AV_PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );
  
  // Assign appropriate parts of buffer to image planes in pFrameRGB
  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
  // of AVPicture
  avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
  
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
        sws_scale
        (
            sws_ctx,
            (uint8_t const * const *)pFrame->data,
            pFrame->linesize,
            0,
            pCodecCtx->height,
            pFrameRGB->data,
            pFrameRGB->linesize
        );
	
	// Save the frame to disk
	if(++i<=5)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }
  
  // Free the RGB image
  av_free(buffer);
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
Esempio n. 20
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
Esempio n. 21
0
int main(int argc, char *argv[])
{
	AVFormatContext *pFormatCtx = NULL;
	int             i, videoStream, audioStream;
	AVCodecContext  *pCodecCtx;
	AVCodec         *pCodec;
	AVFrame         *pFrame; 
	AVPacket        packet;
	int             frameFinished;
	float           aspect_ratio;
	SwsContext *pConvertCtx = NULL;

	AVCodecContext  *aCodecCtx;
	AVCodec         *aCodec = NULL;

	SDL_Overlay     *bmp;
	SDL_Surface     *screen;
	SDL_Rect        rect;
	SDL_Event       event;
	SDL_AudioSpec   wanted_spec, spec;
	
	if(argc < 2) {
		fprintf(stderr, "Usage: test <file>\n");
		exit(1);
	}
	
	// Register all formats and codecs
	av_register_all();
	printf("\r\n This is so cool!!\r\n");
	
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}
	
	if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
		printf("\r\n Open fail!!\r\n");
		return -1; // Couldn't open file
	} else {
		printf("\r\n Open successful!!\r\n");
	}
		
	// Retrieve stream information
	if(avformat_find_stream_info(pFormatCtx, NULL)<0) {
		printf("\r\n Find stream fail!!\r\n");
 		return -1; // Couldn't find stream information
	} else {
		printf("\r\n Find stream successful!!\r\n");
	}

	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream = -1;
	audioStream = -1;
 	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if((pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
			&& videoStream < 0) {
			videoStream=i;
		}
		else if((pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
			&& audioStream < 0) {
			audioStream = i;
		}
	}

	if(videoStream == -1) {
		printf("\r\n Didn't find a video stream!!\r\n");
		return -1; // Didn't find a video stream
	}
	if(audioStream == -1) {
		printf("\r\n Didn't find a audio stream!!\r\n");
		return -1; // Didn't find a audio stream
	}

	aCodecCtx=pFormatCtx->streams[audioStream]->codec;
	// Set audio settings from codec info
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;

	if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
  	}

	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}
  
	// Open codec
	if(avcodec_open2(aCodecCtx, aCodec, NULL)<0) {
		fprintf(stderr, "codec can't open!\n");
		return -1; // Could not open codec

	}

	packet_queue_init(&audioq);
	SDL_PauseAudio(0);
  
	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
  		return -1; // Codec not found
	}

	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
		fprintf(stderr, "codec can't open!\n");
		return -1; // Could not open codec

	}
		
	// Allocate video frame
	pFrame = av_frame_alloc();
		
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
	if(!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	 // Allocate a place to put our YUV image on that screen
	bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
							   SDL_YV12_OVERLAY, screen);

	i = 0;
	while(av_read_frame(pFormatCtx, &packet)>=0) {
	  	// Is this a packet from the video stream?
		if(packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
			                     &packet);
				
			// Did we get a video frame?
			if(frameFinished) {
				SDL_LockYUVOverlay(bmp);
				
				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];
			
				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];
				//Convert the image into YUV format that SDL uses
				pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,  
					pCodecCtx->width, pCodecCtx->height,  AV_PIX_FMT_YUV420P,  SWS_BILINEAR, 
					NULL, NULL, NULL);  
				if (pConvertCtx == NULL) {  
					fprintf(stderr, "Cannot initialize the conversion context/n");  
					return -1;
				}  
				
				sws_scale(pConvertCtx, pFrame->data, pFrame->linesize, 0,
					pCodecCtx->height, pict.data, pict.linesize);

				SDL_UnlockYUVOverlay(bmp);
				rect.x = 0;
				rect.y = 0;
				rect.w = pCodecCtx->width;
				rect.h = pCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				sws_freeContext(pConvertCtx); 
			}
		} 
		else if(packet.stream_index==audioStream) {
//			printf("\r\n audioStream!\r\n");
			packet_queue_put(&audioq, &packet);
		} else {
			av_free_packet(&packet);
		}
		//Free the packet that was allocated by av_read_frame
		SDL_PollEvent(&event);
		switch(event.type) {
			case SDL_QUIT:
				printf("\r\n SDL_Quit!\r\n");
				SDL_Quit();
				exit(0);
				break;
			default:
				break;
		}
	}
 	printf("\r\n Read stream End!\r\n");

	// Free the YUV frame
	printf("\r\n av_frame_free!\r\n");
	av_frame_free(&pFrame);

	// Close the codec
	printf("\r\n avcodec_close!\r\n");
	avcodec_close(pCodecCtx);

	printf("\r\n avformat_close_input!\r\n");
	avformat_close_input(&pFormatCtx);
	
	printf("\r\n end!\r\n");
	return 0;
}
Esempio n. 22
0
int main(int argc, char *argv[]) {
    av_register_all();

    AVFormatContext *pFormatCtx = NULL;
    openVideoFile(&pFormatCtx, argv[1]);

    av_dump_format(pFormatCtx, 0, argv[1], 0);

    AVCodecContext *pCodecCtx = NULL;
    AVCodec *pCodec = NULL;
    // Open codec
    openCodecAndCtx(pFormatCtx, &pCodecCtx, &pCodec);

    // Copy context
    /*
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
        fprintf(stderr, "Counldn't copy codec context");
        return -1;
    }
    */
    AVFrame *pFrame = NULL;

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Allocate an AVFrame structure
    AVFrame *pFrameRGB = av_frame_alloc();
    if (pFrameRGB == NULL)
        return -1;

    uint8_t *buffer = NULL;
    int numBytes;
    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
            pCodecCtx->height);
    buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
            pCodecCtx->width, pCodecCtx->height);

    struct SwsContext *sws_ctx = NULL;
    int frameFinished;
    AVPacket packet;
    sws_ctx = sws_getContext(pCodecCtx->width,
            pCodecCtx->height,
            pCodecCtx->pix_fmt,
            pCodecCtx->width,
            pCodecCtx->height,
            PIX_FMT_RGB24,
            SWS_BILINEAR,
            NULL,
            NULL,
            NULL
        );

    int i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        if (packet.stream_index == videoStream) {
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
            if (frameFinished) {
            sws_scale(sws_ctx, (uint8_t const * const*)pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height,
                    pFrameRGB->data, pFrameRGB->linesize);
            }
            ++i;
            if (i <= 240 && i >= 230) {
                SaveFrame(pFrameRGB, pCodecCtx->width,
                        pCodecCtx->height, i);
            }
        }
    }
    av_free_packet(&packet);

    av_free(buffer);
    av_free(pFrameRGB);

    av_free(pFrame);

    avcodec_close(pCodecCtx);

    avformat_close_input(&pFormatCtx);

    return 0;
}
/* Called from the main */
int main(int argc, char **argv)
{
	AVFormatContext *pFormatCtx = NULL;
	int err;
	int i;
	int videoStream;
	AVCodecContext *pCodecCtx;
	AVCodec         *pCodec;
	AVFrame         *pFrame; 

	AVPacket        packet;
	int             frameFinished;
	float           aspect_ratio;

	SDL_Overlay     *bmp;
	SDL_Surface     *screen;
	SDL_Rect        rect;
	SDL_Event       event;


	
	if(argc < 2) {
		printf("Please provide a movie file\n");
		return -1;
	}
	
	// Register all formats and codecs
	av_register_all();

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}

	pFormatCtx = avformat_alloc_context();
	
	// Open video file
	err = avformat_open_input(&pFormatCtx, argv[1],NULL,NULL);
	printf("nb_streams = %d\n",pFormatCtx->nb_streams);
	if(err<0)
	{
		printf("error ret =  %d\n",err);
	}

	// Retrieve stream information
	err = avformat_find_stream_info(pFormatCtx, NULL);
	if(err<0)
	{
		printf("error ret =  %d\n",err);
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream=AVMEDIA_TYPE_UNKNOWN;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(AVMEDIA_TYPE_VIDEO==pFormatCtx->streams[i]->codec->codec_type)
		{
			videoStream=i;
			break;
		}
	}
	if(videoStream==AVMEDIA_TYPE_UNKNOWN)
	{
		return -1; // Didn't find a video stream
	}
	printf("videoStream = %d\n",videoStream);
	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame=avcodec_alloc_frame();


	// Make a screen to put our video
#ifndef __DARWIN__
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
	if(!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	// Allocate a place to put our YUV image on that screen
	bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
			 pCodecCtx->height,
			 SDL_YV12_OVERLAY,
			 screen);

	// Read frames and save first five frames to disk
	i=0;
	while(av_read_frame(pFormatCtx, &packet)>=0)
	{
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) 
		{
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
									&packet);

			// Did we get a video frame?
			if(frameFinished) 
			{

				SDL_LockYUVOverlay(bmp);

				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];

				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];

				// Convert the image into YUV format that SDL uses
				#include <libswscale/swscale.h>
				// other codes
				static struct SwsContext *img_convert_ctx;
				// other codes
				img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
												 pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
												 PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
				// other codes
				// Convert the image from its native format to RGB
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
				 0, pCodecCtx->height, pict.data, pict.linesize);

				SDL_UnlockYUVOverlay(bmp);

				rect.x = 0;
				rect.y = 0;
				rect.w = pCodecCtx->width;
				rect.h = pCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
			
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);

		SDL_PollEvent(&event);
		switch(event.type) {
			case SDL_QUIT:
				SDL_Quit();
				exit(0);
				break;
			default:
				break;
		}		
	}

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
	
}
Esempio n. 24
0
static int video_open(video_t *video, const char *filename) {
    video->format = PIX_FMT_RGB24;
    if (avformat_open_input(&video->format_context, filename, NULL, NULL) ||
            avformat_find_stream_info(video->format_context, NULL) < 0) {
        fprintf(stderr, ERROR("cannot open video stream %s\n"), filename);
        goto failed;
    }

    video->stream_idx = -1;
    for (int i = 0; i < video->format_context->nb_streams; i++) {
        if (video->format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video->stream_idx = i;
            break;
        }
    }

    if (video->stream_idx == -1) {
        fprintf(stderr, ERROR("cannot find video stream\n"));
        goto failed;
    }

    AVStream *stream = video->format_context->streams[video->stream_idx];
    video->codec_context = stream->codec;
    video->codec = avcodec_find_decoder(video->codec_context->codec_id);

    /* Save Width/Height */
    video->width = video->codec_context->width;
    video->height = video->codec_context->height;

	
    if (!video->codec || avcodec_open2(video->codec_context, video->codec, NULL) < 0) {
        fprintf(stderr, ERROR("cannot open codec\n"));
        goto failed;
    }

    video->buffer_width = video->codec_context->width;
    video->buffer_height = video->codec_context->height;

    fprintf(stderr, INFO("pixel aspect ratio: %d/%d, size: %dx%d buffer size: %dx%d\n"), 
        video->codec_context->sample_aspect_ratio.num,
        video->codec_context->sample_aspect_ratio.den,
        video->width,
        video->height,
        video->buffer_width,
        video->buffer_height
    );

    video->par = (float)video->codec_context->sample_aspect_ratio.num / video->codec_context->sample_aspect_ratio.den;
    if (video->par == 0)
        video->par = 1;

    /* Frame rate fix for some codecs */
    if (video->codec_context->time_base.num > 1000 && video->codec_context->time_base.den == 1)
        video->codec_context->time_base.den = 1000;

    /* Get FPS */
    // http://libav-users.943685.n4.nabble.com/Retrieving-Frames-Per-Second-FPS-td946533.html
    if ((stream->time_base.den != stream->r_frame_rate.num) ||
            (stream->time_base.num != stream->r_frame_rate.den)) {
        video->fps = 1.0 / stream->r_frame_rate.den * stream->r_frame_rate.num;
    } else {
        video->fps = 1.0 / stream->time_base.num * stream->time_base.den;
    }
    fprintf(stderr, INFO("fps: %lf\n"), video->fps);

    /* Get framebuffers */
    video->raw_frame = avcodec_alloc_frame();
    video->scaled_frame = avcodec_alloc_frame();

    if (!video->raw_frame || !video->scaled_frame) {
        fprintf(stderr, ERROR("cannot preallocate frames\n"));
        goto failed;
    }

    /* Create data buffer */
    video->buffer = av_malloc(avpicture_get_size(
        video->format, 
        video->buffer_width, 
        video->buffer_height
    ));

    /* Init buffers */
    avpicture_fill(
        (AVPicture *) video->scaled_frame, 
        video->buffer, 
        video->format, 
        video->buffer_width, 
        video->buffer_height
    );

    /* Init scale & convert */
    video->scaler = sws_getContext(
        video->buffer_width,
        video->buffer_height,
        video->codec_context->pix_fmt,
        video->buffer_width, 
        video->buffer_height, 
        video->format, 
        SWS_BICUBIC, 
        NULL, 
        NULL, 
        NULL
    );

    if (!video->scaler) {
        fprintf(stderr, ERROR("scale context init failed\n"));
        goto failed;
    }

    /* Give some info on stderr about the file & stream */
    av_dump_format(video->format_context, 0, filename, 0);
    return 1;
failed:
    video_free(video);
    return 0;
}
Esempio n. 25
0
int main(int argc, char **argv)
{
    if (argc < 2) {
        printf("usage: %s output_file\n"
                       "API example program to output a media file with libavformat.\n"
                       "This program generates a synthetic video stream, encodes and\n"
                       "muxes them into a file named output_file.\n"
                       "The output format is automatically guessed according to the file extension.\n"
                       "Raw images can also be output by using '%%d' in the filename.\n"
                       "\n", argv[0]);
        return 1;
    }

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    std::string filename = argv[1];

    /* allocate the output media context */
    AVFormatContext *oc = NULL;
    avformat_alloc_output_context2(&oc, NULL, "mp4", NULL);
    if (!oc) {
        printf("fail to generate mp4 format context");
        return 1;
    }

    /* Add the video streams using the default format codecs and initialize the codecs. */
    OutputStream video_st = {0};
    AVCodec *video_codec = NULL;
    if (oc->oformat->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, oc->oformat->video_codec);
    }

    /* Now that all the parameters are set, we can open the
     * video codecs and allocate the necessary encode buffers. */
    AVDictionary *opt = NULL;
    if (video_codec != NULL)
        open_video(oc, video_codec, &video_st, opt);

    av_dump_format(oc, 0, filename.c_str(), 1);

    /* open the output file, if needed */
    int ret = 0;
    if (!(oc->oformat->flags & AVFMT_NOFILE)) {
        if ((ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE)) < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename.c_str(), av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    AVDictionary *movflags = NULL;
    av_dict_set(&movflags, "movflags", "empty_moov+default_base_moof+frag_keyframe", 0);
    if ((ret = avformat_write_header(oc, &movflags)) < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
        return 1;
    }
    av_dict_free(&movflags);

    // Generate raw video frame, encode them and mux into container
    bool encode_video = true;
    while (encode_video) {
        encode_video = write_video_frame(oc, &video_st);
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_codec != NULL)
        close_stream(oc, &video_st);

    if (!(oc->oformat->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_closep(&oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Esempio n. 26
0
int main(int argc, char *argv[])
{
    if (argc < 2)
    {
        printf ("Usage: %s <filename>\n", argv[0]);
        return -1;
    }

    signal (SIGINT, signal_handler);
    signal (SIGTERM, signal_handler);

    const char *src_filename = argv[1];

    if (SDL_Init (SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
    {
        av_log (NULL, AV_LOG_ERROR, "Could not initialize SDL: %s\n",
                SDL_GetError ());
        return -1;
    }

    av_register_all();

    AVFormatContext *format_ctx = NULL;
    if (avformat_open_input (&format_ctx, src_filename, NULL, NULL) < 0)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not open input file\n");
        return -1;
    }

    if (avformat_find_stream_info (format_ctx, NULL) < 0)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not find stream information\n");
        goto end;
    }

    av_dump_format (format_ctx, 0, src_filename, 0);

    PlayerContext ctx = {0};

    AVCodec *video_decoder = NULL;
    ctx.video_stream_index = av_find_best_stream (format_ctx,
                                AVMEDIA_TYPE_VIDEO, -1, -1, &video_decoder, 0);
    if (ctx.video_stream_index < 0)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not find the best video stream\n");
        goto end;
    }

    av_log (NULL, AV_LOG_INFO, "Using video stream: %d\n",
            ctx.video_stream_index);

    ctx.video_codec = format_ctx->streams[ctx.video_stream_index]->codec;
    if (avcodec_open2 (ctx.video_codec, video_decoder, NULL) < 0)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not open video codec\n");
        goto end;
    }

    AVCodec *audio_decoder = NULL;
    ctx.audio_stream_index = av_find_best_stream (format_ctx,
                                AVMEDIA_TYPE_AUDIO, -1, -1, &audio_decoder, 0);
    if (ctx.audio_stream_index < 0)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not find the best audio stream\n");
        goto end;
    }

    av_log (NULL, AV_LOG_INFO, "Using audio stream: %d\n",
            ctx.audio_stream_index);

    ctx.audio_codec = format_ctx->streams[ctx.audio_stream_index]->codec;
    if (avcodec_open2 (ctx.audio_codec, audio_decoder, NULL) < 0)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not open audio codec\n");
        goto end;
    }

    ctx.frame = avcodec_alloc_frame ();
    if (!ctx.frame)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not allocate video frame\n");
        goto end;
    }

    SDL_Surface *screen = SDL_SetVideoMode (ctx.video_codec->width, ctx.video_codec->height,
                                            0, 0);
    if (!screen)
    {
        av_log (NULL, AV_LOG_ERROR, "Could not create screen: %s\n",
                SDL_GetError ());
        goto end;
    }

    ctx.overlay = SDL_CreateYUVOverlay (
                ctx.video_codec->width, ctx.video_codec->height, SDL_YV12_OVERLAY, screen);

    av_log (NULL, AV_LOG_INFO, "YUV overlay: format=%u, w=%d, h=%d, planes=%d\n",
             ctx.overlay->format, ctx.overlay->w, ctx.overlay->h,
             ctx.overlay->planes);

    ctx.rect.x = 0;
    ctx.rect.y = 0;
    ctx.rect.w = ctx.video_codec->width;
    ctx.rect.h = ctx.video_codec->height;

    SDL_AudioSpec desired_spec =
    {
        .freq = ctx.audio_codec->sample_rate,
        .format = AUDIO_S16SYS,
        .channels = ctx.audio_codec->channels,
        .samples = SDL_AUDIO_BUFFER_SIZE,
        .callback = audio_callback,
        .userdata = &ctx
    };

    SDL_AudioSpec audio_spec = {0};

    if (SDL_OpenAudio (&desired_spec, &audio_spec) < 0)
    {
        av_log (NULL, AV_LOG_INFO, "Could not open audio device: %s\n",
                SDL_GetError ());
        goto end;
    }

    packet_queue_init (&ctx.audioq);
    SDL_PauseAudio (0);

    AVPacket pkt =
    {
        .data = NULL,
        .size = 0
    };

    SDL_Event event;

    while ((av_read_frame (format_ctx, &pkt) >= 0) && !quit)
    {
        process_packet (&pkt, &ctx);

        SDL_PollEvent (&event);
        switch (event.type) {
        case SDL_QUIT:
            packet_queue_stop (&ctx.audioq);
            quit = 1;
            break;
        default:
            break;
        }
    }

    // flush cached frames
    pkt.data = NULL;
    pkt.size = 0;
    do
    {
        process_packet(&pkt, &ctx);
    }
    while (ctx.got_frame && !quit);

end:
    av_free_packet (&pkt);
    av_free (ctx.frame);
    av_free (ctx.data[0]);
    if (ctx.video_codec)
        avcodec_close (ctx.video_codec);
    if (format_ctx)
        avformat_close_input (&format_ctx);
    SDL_CloseAudio ();
    SDL_Quit ();

    return 0;
}
Esempio n. 27
0
File: muxing.c Progetto: BtbN/FFmpeg
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;
    int i;

    if (argc < 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];
    for (i = 2; i+1 < argc; i+=2) {
        if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
            av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
            (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
                                            audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !write_audio_frame(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_closep(&oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Esempio n. 28
0
int _tmain(int argc, _TCHAR* argv[])
{
    ISubtitles* subtitle = NULL;
    if (!ISubtitles::create(&subtitle)) {
		printf("failed to create subtitle instance.\n");
        return 1;
    }
    
#ifdef EMBEDDING_SUBTITLE
	av_register_all();

	avformat_network_init();

	AVFormatContext *fmt_ctx = NULL;
	char *url = LOCAL_FILE;
	int subtitle_stream_idx = -1;
	AVStream* subtitle_stream;
	AVCodecContext* subtitle_dec_ctx;
	AVPacket pkt;
	AVSubtitle sub;
	int got_sub;
	int ret;
	int index = 0;

	/* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, url, NULL, NULL) < 0) {
		LOGE("Could not open source file");
        return 1;
    }

	/* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        LOGE("Could not find stream information\n");
        return 1;
    }

	if (open_codec_context(fmt_ctx, &subtitle_stream_idx, AVMEDIA_TYPE_SUBTITLE) < 0) {
		LOGE("failed to find subttile track");
		return 1;
	}

	subtitle_stream = fmt_ctx->streams[subtitle_stream_idx];
	subtitle_dec_ctx = subtitle_stream->codec;

	/* dump input information to stderr */
	av_dump_format(fmt_ctx, 0, url, 0);

	SubtitleCodecId codec_id;
	if (subtitle_dec_ctx->codec_id == AV_CODEC_ID_ASS ||
		subtitle_dec_ctx->codec_id == AV_CODEC_ID_SSA)
		codec_id = SUBTITLE_CODEC_ID_ASS;
	else
		codec_id = SUBTITLE_CODEC_ID_TEXT;
	ret = subtitle->addEmbeddingSubtitle(codec_id, "chs", "chs", 
		(const char *)subtitle_dec_ctx->extradata, subtitle_dec_ctx->extradata_size);
	if (ret < 0) {
		LOGE("failed to addEmbeddingSubtitle");
		return 1;
	}

	/* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0 && index < 10 ) {
		if (pkt.stream_index == subtitle_stream_idx) {
			AVPacket orig_pkt = pkt;
			do {
				ret = avcodec_decode_subtitle2(subtitle_dec_ctx, &sub, &got_sub, &pkt);
				if (ret < 0) {
					break;
				}
				if (got_sub) {
					LOGI("got subtitle");

					for (int i=0;i<sub.num_rects;i++) {
						if (sub.rects[i]->ass) {
							int64_t start_time ,stop_time;
							AVRational ra;
							ra.num = 1;
							ra.den = AV_TIME_BASE;
							start_time = av_rescale_q(sub.pts + sub.start_display_time * 1000,
									 ra, subtitle_stream->time_base);
							stop_time = av_rescale_q(sub.pts + sub.end_display_time * 1000,
									 ra, subtitle_stream->time_base);
							subtitle->addEmbeddingSubtitleEntity(0, start_time, stop_time - start_time, 
								sub.rects[i]->ass, strlen(sub.rects[i]->ass)); // my_strlen_utf8_c

							index++;
						}
					}
					avsubtitle_free(&sub);
				}
				pkt.data += ret;
				pkt.size -= ret;
			} while (pkt.size > 0);
			av_free_packet(&orig_pkt);
		}
		else {
			av_free_packet(&pkt);
		}
    }

#else
	if (!subtitle->loadSubtitle(SUB_FILE_PATH, false)) {
		printf("failed to load subtitle: %s", SUB_FILE_PATH);
		return 1;
	}
#endif

    STSSegment* segment = NULL;
	char subtitleText[1024] = {0};

	int line = 0;
    while(line < 20 && subtitle->getNextSubtitleSegment(&segment)) {
        int64_t startTime = segment->getStartTime();
        int64_t stopTime = segment->getStopTime();
		segment->getSubtitleText(subtitleText, 1024);
        LOGI("%01d:%02d:%02d.%02d  --> %01d:%02d:%02d.%02d %s",
            int(startTime/1000/3600), int(startTime/1000%3600/60), int(startTime/1000%60), int(startTime%1000)/10,
            int(stopTime/1000/3600), int(stopTime/1000%3600/60), int(stopTime/1000%60), int(stopTime%1000)/10,
			CW2A(CA2W(subtitleText, CP_UTF8)));

		//getchar();
		line++;
    }

    subtitle->close();

	return 0;
}
Esempio n. 29
0
int open_aplayer(const char *filename, struct aplayer_t *player) {
    // creates a new format (file container) context to be
    // used to detect the kind of file in use
    AVFormatContext *container = avformat_alloc_context();
    if(avformat_open_input(&container, filename, NULL, NULL) < 0) {
        WARN("Could not open file");
    }
    if(avformat_find_stream_info(container, NULL) < 0) {
        WARN("Could not find file info");
    }

#ifdef _DEBUG
    // dumps the format information to the standard outpu
    // this should print information on the container file
    av_dump_format(container, 0, filename, false);
#endif

    // starts the (audio) stream id with an invalid value and then
    // iterates over the complete set of stream to find one that complies
    // with the audio "interface"
    int stream_id = -1;
    for(unsigned int index = 0; index < container->nb_streams; index++) {
        // retrieves the current stram and checks if the
        // codec type is of type audio in case it's not
        // continues the loop (nothing to be done)
        AVStream *stream = container->streams[index];
        if(stream->codec->codec_type != AVMEDIA_TYPE_AUDIO) { continue; }

        // sets the current index as the stream identifier
        // to be used from now on
        stream_id = index;
        break;
    }
    if(stream_id == -1) { WARN("Could not find Audio Stream"); }

    // retrieves the codec context associted with the audio stream
    // that was just discovered
    AVCodecContext *codec_ctx = container->streams[stream_id]->codec;

    // tries to find the codec for the current codec context and
    // opens it for the current execution
    AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id);
    if(codec == NULL) { WARN("Cannot find codec"); }
    if(avcodec_open2(codec_ctx, codec, NULL) < 0) { WARN("Codec cannot be found"); }

    // initializes the ao structure creating the device associated
    // with the created structures this is going to be used
    ao_device *device = init_ao(codec_ctx);

    // allocates the buffer to be used in the packet for the
    // unpacking of the various packets
    uint8_t buffer[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];

    // creates the packet structure to be used and initializes
    // it, this is going to be the payload for each iteration
    // then sets its data and size
    av_init_packet(&player->packet);
    player->packet.data = buffer;
    player->packet.size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;

    // allocates a new frame structure to be used for the audio
    // frames in iteration
    AVFrame *frame = avcodec_alloc_frame();

    // updates the player structure with all the attributes that
    // were retrieved for the current context
    player->device = device;
    player->stream_id = stream_id;
    player->frame = frame;
    player->container = container;
    player->codec_ctx = codec_ctx;

    return 0;
}
Esempio n. 30
0
int decode_thread(void* arg) {
//	CoInitialize(NULL);
	CoInitializeEx(NULL, COINIT_MULTITHREADED);
	printf("decode_thread\n");
	VideoState* is = (VideoState*) arg;
	AVFormatContext* pFormatCtx;
	AVPacket pkt1, *packet = &pkt1;

	int video_index = -1;
	int audio_index = -1;
	int i;

	global_video_state = is;

	if (avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) {
		return -1;
	}

	is->pFormatCtx = pFormatCtx;
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		return -1;
	}

	av_dump_format(pFormatCtx, 0, is->filename, 0);

	for (i = 0; i < pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO
				&& video_index < 0)
			video_index = i;
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
				&& audio_index < 0)
			audio_index = i;
	}

	if (audio_index >= 0)
		stream_component_open(is, audio_index);

	if (video_index >= 0)
		stream_component_open(is, video_index);

	if (is->videoStream < 0 || is->audioStream < 0) {
		fprintf(stderr, "%s: could not open codecs\n", is->filename);
		goto fail;
	}

// main decode loop
	for (;;) {
		if (is->quit) {
			SDL_LockMutex(is->audioq.mutex);
			SDL_CondSignal(is->audioq.cond);
			SDL_UnlockMutex(is->audioq.mutex);
			break;
		}

		//seek stuff goes here
		if (is->seek_req) {
			int stream_index = -1;
			int64_t seek_target = is->seek_pos;
			if (is->videoStream >= 0)
				stream_index = is->videoStream;
			else if (is->audioStream >= 0)
				stream_index = is->audioStream;

			if (stream_index >= 0) {
				seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q,
						pFormatCtx->streams[stream_index]->time_base);
			}

			if (av_seek_frame(is->pFormatCtx, stream_index, seek_target,
					is->seek_flags) < 0) {
				fprintf(stderr, "%s: error while seeking\n",
						is->pFormatCtx->filename);
			} else {
				if (is->audioStream >= 0) {
					packet_queue_flush(&is->audioq);
					packet_queue_put(&is->audioq, &flush_pkt, 0);
				}
			}
			is->seek_req = 0;
		}

		if (is->audioq.size > MAX_AUDIOQ_SIZE
				|| is->videoq.size > MAX_AUDIOQ_SIZE) {
			SDL_Delay(10);
			continue;
		}
		if (av_read_frame(is->pFormatCtx, packet) < 0) {
			printf("av_read_frame\n");
			fflush(stdout);
			if (is->pFormatCtx->pb->error == 0) {
				SDL_Delay(100); // no error, wait for user input
				continue;
			} else {
				break;
			}
		}

		if (packet->stream_index == is->videoStream) {
			packet_queue_put(&is->videoq, packet, 1);
		} else if (packet->stream_index == is->audioStream) {
			packet_queue_put(&is->audioq, packet, 0);
		} else {
			av_free_packet(packet);
		}
	}

// all done - wait for it
	while (!is->quit) {
		SDL_Delay(100);
	}

	fail: if (1) {
		SDL_Event event;
		event.type = FF_QUIT_EVENT;
		event.user.data1 = is;
		SDL_PushEvent(&event);
	}
	return 0;
}