void  ffmpeg_native_reader::init_video_codec()
	{
		if(strcoll(codec, "JPEG")==0)
		{
			video_codec = avcodec_find_decoder(AV_CODEC_ID_MJPEG);
		}
		else if(strcoll(codec, "H264")==0)
		{
			video_codec = avcodec_find_decoder(AV_CODEC_ID_H264);
		}
		else if(strcoll(codec, "MPEG4")==0)
		{
			video_codec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
		}

		video_codec_context = avcodec_alloc_context3(video_codec);
		video_codec_context->codec_type = AVMEDIA_TYPE_VIDEO;

		video_codec_context->pix_fmt = PIX_FMT_YUV420P;
		video_codec_context->width=width;
		video_codec_context->height=height;

		if (avcodec_open2(video_codec_context, video_codec, NULL) < 0) 
			throw std::exception("ffmpeg: Unable to open video codec");

		alloc_frames();
	}
Beispiel #2
0
void gavl_audio_convert(gavl_audio_converter_t * cnv,
                        const gavl_audio_frame_t * input_frame,
                        gavl_audio_frame_t * output_frame)
  {
  int i;
  gavl_audio_convert_context_t * ctx;
  
  cnv->contexts->input_frame = input_frame;
  cnv->last_context->output_frame = output_frame;
  
  alloc_frames(cnv, input_frame->valid_samples, -1.0);
  
  ctx = cnv->contexts;
  
  for(i = 0; i < cnv->num_conversions; i++)
    {
    ctx->output_frame->valid_samples = 0;
    if(ctx->func)
      {
      ctx->func(ctx);
      if(!ctx->output_frame->valid_samples)
        ctx->output_frame->valid_samples = ctx->input_frame->valid_samples;
      
      if(ctx->output_format.samplerate != ctx->input_format.samplerate)
        ctx->output_frame->timestamp =
          gavl_time_rescale(ctx->input_format.samplerate,
                            ctx->output_format.samplerate,
                            ctx->input_frame->timestamp);
      else
        ctx->output_frame->timestamp = ctx->input_frame->timestamp;
      }
    ctx = ctx->next;
    }
  }
Beispiel #3
0
void gavl_audio_converter_resample(gavl_audio_converter_t * cnv,
		gavl_audio_frame_t * input_frame,
		gavl_audio_frame_t * output_frame,
		double ratio)
  {
  gavl_audio_convert_context_t * ctx;

  cnv->contexts->input_frame = input_frame;
  cnv->last_context->output_frame = output_frame;

  alloc_frames(cnv, input_frame->valid_samples, ratio);

  ctx = cnv->contexts;

  while(ctx) 
    {
    ctx->output_frame->valid_samples = 0;
    if (ctx->samplerate_converter != NULL)
      {
      if (ctx->samplerate_converter->ratio != ratio )
        {
        //ctx->output_format.samplerate = ctx->input_format.samplerate * ratio;
        ctx->samplerate_converter->ratio = ratio;
        ctx->samplerate_converter->data.src_ratio = ratio;
        //for (j=0; j < ctx->samplerate_converter->num_resamplers; j++)
        //	gavl_src_set_ratio( ctx->samplerate_converter->resamplers[j], ratio);
        }
      }

    if(ctx->func)
      {
      ctx->func(ctx);
      // DO WE NEED THIS HERE???
      if(!ctx->output_frame->valid_samples)
        ctx->output_frame->valid_samples = ctx->input_frame->valid_samples;

      ctx->output_frame->timestamp = ctx->input_frame->timestamp;

      }
    ctx = ctx->next;
    }
  }
Beispiel #4
0
void gavl_video_convert(gavl_video_converter_t * cnv,
                        const gavl_video_frame_t * input_frame,
                        gavl_video_frame_t * output_frame)
  {
  gavl_video_convert_context_t * tmp_ctx;

  alloc_frames(cnv);
  
  cnv->first_context->input_frame = input_frame;
  cnv->last_context->output_frame = output_frame;
  
  tmp_ctx = cnv->first_context;
  
  while(tmp_ctx)
    {
    gavl_video_frame_copy_metadata(tmp_ctx->output_frame,
                                   tmp_ctx->input_frame);
    tmp_ctx->func(tmp_ctx);
    tmp_ctx = tmp_ctx->next;
    }

  }