Exemplo n.º 1
0
int gavl_audio_converter_init_resample(gavl_audio_converter_t * cnv,
                                   const gavl_audio_format_t * format)
  {
  gavl_audio_format_t tmp_format;
  gavl_audio_convert_context_t * ctx;
  
  gavl_audio_format_copy(&cnv->input_format, format);
  gavl_audio_format_copy(&cnv->output_format, format);
  gavl_audio_format_copy(&tmp_format, format);

  adjust_format(&cnv->input_format);
  adjust_format(&cnv->output_format);

  // Delete previous conversions 
  audio_converter_cleanup(cnv);

  cnv->current_format = &cnv->input_format;

  put_samplerate_context(cnv, &tmp_format, cnv->output_format.samplerate);

  /* put_samplerate will automatically convert sample format and interleave format 
	* we need to check to see if it did or not and add contexts to convert back */
  if(cnv->current_format->sample_format != cnv->output_format.sample_format)
    {
    if(cnv->current_format->interleave_mode == GAVL_INTERLEAVE_2)
      {
      tmp_format.interleave_mode = GAVL_INTERLEAVE_NONE;
      ctx = gavl_interleave_context_create(&cnv->opt,
                                           cnv->current_format,
                                           &tmp_format);
      add_context(cnv, ctx);
      }

    tmp_format.sample_format = cnv->output_format.sample_format;
    ctx = gavl_sampleformat_context_create(&cnv->opt,
                                           cnv->current_format,
                                           &tmp_format);
    add_context(cnv, ctx);
    }

  /* Final interleaving */

  if(cnv->current_format->interleave_mode != cnv->output_format.interleave_mode)
    {
    tmp_format.interleave_mode = cnv->output_format.interleave_mode;
    ctx = gavl_interleave_context_create(&cnv->opt,
                                         cnv->current_format,
                                         &tmp_format);
    add_context(cnv, ctx);
    }

  cnv->input_format.samples_per_frame = 0;

  return cnv->num_conversions;
  }
Exemplo n.º 2
0
int gavl_audio_converter_init(gavl_audio_converter_t* cnv,
                              const gavl_audio_format_t * input_format,
                              const gavl_audio_format_t * output_format)
  {
  gavl_audio_format_copy(&cnv->input_format, input_format);
  gavl_audio_format_copy(&cnv->output_format, output_format);

  adjust_format(&cnv->input_format);
  adjust_format(&cnv->output_format);
  return gavl_audio_converter_reinit(cnv);
  }
Exemplo n.º 3
0
gavl_audio_convert_context_t *
gavl_audio_convert_context_create(gavl_audio_format_t  * input_format,
                                  gavl_audio_format_t  * output_format)
  {
  gavl_audio_convert_context_t * ret;
  ret = calloc(1, sizeof(*ret));
  gavl_audio_format_copy(&ret->input_format,  input_format);
  gavl_audio_format_copy(&ret->output_format, output_format);

  return ret;
  }
Exemplo n.º 4
0
int bg_avdec_start(void * priv)
  {
  int i;
  const gavl_video_format_t * format;
  avdec_priv * avdec = priv;
  
  if(!bgav_start(avdec->dec))
    {
    return 0;
    }
  for(i = 0; i < avdec->current_track->num_video_streams; i++)
    {
    gavl_video_format_copy(&(avdec->current_track->video_streams[i].format),
                           bgav_get_video_format(avdec->dec, i));

    gavl_metadata_copy(&avdec->current_track->video_streams[i].m,
                       bgav_get_video_metadata(avdec->dec, i));
    
    avdec->current_track->video_streams[i].duration =
      bgav_video_duration(avdec->dec, i);
    
    }
  for(i = 0; i < avdec->current_track->num_audio_streams; i++)
    {
    gavl_audio_format_copy(&(avdec->current_track->audio_streams[i].format),
                           bgav_get_audio_format(avdec->dec, i));

    gavl_metadata_copy(&avdec->current_track->audio_streams[i].m,
                       bgav_get_audio_metadata(avdec->dec, i));
    
    avdec->current_track->audio_streams[i].duration =
      bgav_audio_duration(avdec->dec, i);
    }

  for(i = 0; i < avdec->current_track->num_text_streams; i++)
    {
    gavl_metadata_copy(&avdec->current_track->text_streams[i].m,
                       bgav_get_text_metadata(avdec->dec, i));
    
    avdec->current_track->text_streams[i].duration =
      bgav_text_duration(avdec->dec, i);

    avdec->current_track->text_streams[i].timescale = 
      bgav_get_text_timescale(avdec->dec, i);
    }

  for(i = 0; i < avdec->current_track->num_overlay_streams; i++)
    {
    gavl_metadata_copy(&avdec->current_track->overlay_streams[i].m,
                       bgav_get_overlay_metadata(avdec->dec, i));
    
    avdec->current_track->overlay_streams[i].duration =
      bgav_overlay_duration(avdec->dec, i);
    
    format = bgav_get_overlay_format(avdec->dec, i);
    gavl_video_format_copy(&avdec->current_track->overlay_streams[i].format,
                           format);
    }
  return 1;
  }
Exemplo n.º 5
0
void
gavl_audio_source_set_dst(gavl_audio_source_t * s, int dst_flags,
                          const gavl_audio_format_t * dst_format)
  {
  s->next_pts = GAVL_TIME_UNDEFINED;
  s->dst_flags = dst_flags;

  if(dst_format)
    gavl_audio_format_copy(&s->dst_format, dst_format);
  else
    gavl_audio_format_copy(&s->dst_format, &s->src_format);

  if(gavl_audio_converter_init(s->cnv,
                               &s->src_format, &s->dst_format))
    s->flags |= FLAG_DO_CONVERT;
  else
    s->flags &= ~FLAG_DO_CONVERT;
  
  if(!(s->flags & FLAG_DO_CONVERT) &&
     (s->src_format.samples_per_frame == s->src_format.samples_per_frame) &&
     !(s->src_flags & GAVL_SOURCE_SRC_FRAMESIZE_MAX))
    s->flags |= (FLAG_PASSTHROUGH | FLAG_PASSTHROUGH_INIT);
  else
    s->flags &= ~(FLAG_PASSTHROUGH | FLAG_PASSTHROUGH_INIT);
  
  if(s->out_frame)
    {
    gavl_audio_frame_destroy(s->out_frame);
    s->out_frame = NULL;
    }
  if(s->dst_frame)
    {
    gavl_audio_frame_destroy(s->dst_frame);
    s->dst_frame = NULL;
    }
  if(s->buffer_frame)
    {
    gavl_audio_frame_destroy(s->buffer_frame);
    s->buffer_frame = NULL;
    }

  s->frame = NULL;

  s->flags |= FLAG_DST_SET;

  }
Exemplo n.º 6
0
static int
add_audio_stream_lame(void * data,
                      const gavl_metadata_t * m,
                      const gavl_audio_format_t * format)
  {
  lame_priv_t * lame = data;
  gavl_audio_format_copy(&lame->fmt, format);
  return 0;
  }
Exemplo n.º 7
0
static void
bg_gavf_get_audio_format(void * data, int stream,
                         gavl_audio_format_t*ret)
  {
  bg_gavf_t * priv;
  priv = data;

  gavl_audio_format_copy(ret, &priv->audio_streams[stream].format);
  }
Exemplo n.º 8
0
static int open_pulse(void * data,
                      gavl_audio_format_t * format)
  {
  bg_pa_t * priv;
  priv = data;

  gavl_audio_format_copy(&priv->format, format);
  
  if(!bg_pa_open(priv, 0))
    return 0;
  
  gavl_audio_format_copy(format, &priv->format);

  priv->sink = gavl_audio_sink_create(NULL, write_func_pulse, priv,
                                      &priv->format);
  
  return 1;
  }
Exemplo n.º 9
0
bg_nle_audio_compositor_t *
bg_nle_audio_compositor_create(bg_nle_outstream_t * os,
                               bg_gavl_audio_options_t * opt,
                               const gavl_audio_format_t * format)
  {
  bg_nle_audio_compositor_t * ret = calloc(1, sizeof(*ret));
  ret->os = os;
  ret->opt = opt;
  gavl_audio_format_copy(&ret->format, format);
  return ret;
  }
Exemplo n.º 10
0
static int open_jack(void * data,
                     gavl_audio_format_t * format,
                     gavl_video_format_t * video_format,
                     gavl_metadata_t * m)
  {
  int i;
  jack_t * priv = data;
  if(!priv->client)
    bg_jack_open_client(priv, 0, jack_process);

  /* Copy format */
  gavl_audio_format_copy(&priv->format, format);
  
  priv->format.samplerate = priv->samplerate;
  priv->format.sample_format = GAVL_SAMPLE_FLOAT;
  priv->format.interleave_mode = GAVL_INTERLEAVE_NONE;
  priv->format.samples_per_frame = priv->samples_per_frame;

  /* TODO: Make this configurable */
  priv->format.num_channels = 2;
  
  /* Clear ports */
  for(i = 0; i < priv->num_ports; i++)
    priv->ports[i].active = 0;
  
  /* Setup ports */
  for(i = 0; i < priv->num_ports; i++)
    {
    priv->ports[i].index = i;
    priv->format.channel_locations[i] = priv->ports[i].channel_id;
    priv->ports[i].active = 1;
    jack_ringbuffer_reset(priv->ports[i].buffer);
    }

  gavl_audio_format_copy(format, &priv->format);

  priv->src = gavl_audio_source_create(read_func_jack, priv, 0, format);
  
  priv->samples_read = 0;
  return 1;
  }
Exemplo n.º 11
0
static void check_out_frame(gavl_audio_source_t * s)
  {
  if(!s->out_frame)
    {
    gavl_audio_format_t frame_format;
    gavl_audio_format_copy(&frame_format, &s->dst_format);
    frame_format.samples_per_frame =
      gavl_time_rescale(s->src_format.samplerate,
                        s->dst_format.samplerate,
                        s->src_format.samples_per_frame) + 10;
    s->out_frame = gavl_audio_frame_create(&frame_format);
    }
  }
Exemplo n.º 12
0
gavl_audio_source_t *
gavl_audio_source_create(gavl_audio_source_func_t func,
                         void * priv,
                         int src_flags,
                         const gavl_audio_format_t * src_format)
  {
  gavl_audio_source_t * ret = calloc(1, sizeof(*ret));

  ret->func = func;
  ret->priv = priv;
  ret->src_flags = src_flags;
  gavl_audio_format_copy(&ret->src_format, src_format);
  ret->cnv = gavl_audio_converter_create();
  return ret;
  }
Exemplo n.º 13
0
bg_ogg_stream_t *
bg_ogg_encoder_add_audio_stream(void * data,
                                const gavl_metadata_t * m,
                                const gavl_audio_format_t * format)
  {
  bg_ogg_stream_t * s;
  bg_ogg_encoder_t * e = data;

  s = append_stream(e, &e->audio_streams, &e->num_audio_streams, m);
  
  gavl_audio_format_copy(&s->afmt, format);
  gavl_metadata_copy(&s->m_stream, m);
  gavl_metadata_delete_compression_fields(&s->m_stream);
  return s;
  }
Exemplo n.º 14
0
static audio_stream_t * append_audio_stream(bg_gavf_t * f, const gavl_metadata_t * m,
                                            const gavl_audio_format_t * format)
  {
  audio_stream_t * ret;

  f->audio_streams =
    realloc(f->audio_streams,
            (f->num_audio_streams+1)*sizeof(*f->audio_streams));
  ret = f->audio_streams + f->num_audio_streams;
  f->num_audio_streams++;
  memset(ret, 0, sizeof(*ret));
  gavl_audio_format_copy(&ret->format, format);
  if(m)
    gavl_metadata_copy(&ret->com.m, m);
  return ret;
  }
Exemplo n.º 15
0
static int open_pulse(void * data,
                      gavl_audio_format_t * format,
                      gavl_video_format_t * video_format,
                      gavl_metadata_t * m)
  {
  bg_pa_t * priv;
  priv = data;

  // gavl_audio_format_copy(&priv->format, format);
  
  if(!bg_pa_open(priv, 1))
    return 0;
  
  gavl_audio_format_copy(format, &priv->format);
  priv->src = gavl_audio_source_create(read_func_pulse, priv, 0, format);
  return 1;
  }
Exemplo n.º 16
0
int bg_audio_filter_chain_init(bg_audio_filter_chain_t * ch,
                               const gavl_audio_format_t * in_format,
                               gavl_audio_format_t * out_format)
  {
  ch->need_restart = 0;
  

  if(ch->in_src)
    gavl_audio_source_destroy(ch->in_src);
  
  ch->in_src = gavl_audio_source_create(read_func_in, ch, 0, in_format);
  ch->in_samples = in_format->samples_per_frame;
  
  bg_audio_filter_chain_connect(ch, ch->in_src);
  gavl_audio_format_copy(out_format,
                         gavl_audio_source_get_src_format(ch->out_src));
  
  //  if(ch->in_samples > out_format->samples_per_frame)
  //    ch->in_samples = out_format->samples_per_frame;
  return ch->num_filters;
  }
Exemplo n.º 17
0
int bg_avdec_set_track(void * priv, int track)
  {
  int i;
  avdec_priv * avdec = priv;
  
  if(!bgav_select_track(avdec->dec, track))
    return 0;
  avdec->current_track = &(avdec->track_info[track]);
  
  /* Get formats (need them for compressed output */
  for(i = 0; i < avdec->current_track->num_audio_streams; i++)
    gavl_audio_format_copy(&(avdec->current_track->audio_streams[i].format),
                           bgav_get_audio_format(avdec->dec, i));

  for(i = 0; i < avdec->current_track->num_video_streams; i++)
    gavl_video_format_copy(&(avdec->current_track->video_streams[i].format),
                           bgav_get_video_format(avdec->dec, i));

  
  return 1;
  }
Exemplo n.º 18
0
static int
open_gl_lv(void * data, gavl_audio_format_t * audio_format,
           const char * window_id)
  {
  int width, height;
  
  lv_priv_t * priv;
  priv = (lv_priv_t*)data;

  visual_video_set_depth(priv->video, VISUAL_VIDEO_DEPTH_GL);
  
  adjust_audio_format(audio_format);

    
  gavl_audio_format_copy(&priv->audio_format, audio_format);
  
  bg_x11_window_set_gl(priv->win);
  visual_actor_set_video(priv->actor, priv->video);
  bg_x11_window_unset_gl(priv->win);
  
  /* Set the size changed callback after initializing the libvisual stuff */
  bg_x11_window_set_callbacks(priv->win, &priv->window_callbacks);
  bg_x11_window_show(priv->win, 1);

  
  bg_x11_window_set_gl(priv->win);
  
  bg_x11_window_get_size(priv->win, &width, &height);
  
  /* We cannot use the size callback above since it's called before the
     actor is realized */
  visual_video_set_dimension(priv->video, width, height);
  visual_actor_video_negotiate(priv->actor, 0, FALSE, FALSE);
  
  bg_x11_window_unset_gl(priv->win);
  return 1;
  }
Exemplo n.º 19
0
static int open_esd(void * data,
                    gavl_audio_format_t * format,
                    gavl_video_format_t * video_format,
                    gavl_metadata_t * m)
  {
  int esd_format;
  const char * esd_host;
  char * name;
  char hostname[128];
  
  esd_t * e = data;

  e->samples_read = 0;
  
  /* Set up format */

  memset(format, 0, sizeof(*format));
    
  format->interleave_mode = GAVL_INTERLEAVE_ALL;
  format->samplerate = 44100;
  format->sample_format = GAVL_SAMPLE_S16;
  format->samples_per_frame = ESD_BUF_SIZE / 4;

  format->num_channels = 2;
  gavl_set_channel_setup(format);
  
  gavl_audio_format_copy(&e->format, format);
  
  e->f = gavl_audio_frame_create(format);
    
  if(!e->hostname || (*(e->hostname) == '\0'))
    {
    esd_host = NULL;
    }
  else
    esd_host = e->hostname;
    
  esd_format = ESD_STREAM | ESD_PLAY;
  if(e->do_monitor)
    esd_format |= ESD_MONITOR;
  else
    esd_format |= ESD_RECORD;
  
  esd_format |= (ESD_STEREO|ESD_BITS16);

  gethostname(hostname, 128);
    
  name = bg_sprintf("gmerlin@%s pid: %d", hostname, getpid());
  
  if(e->do_monitor)
    e->esd_socket = esd_monitor_stream(esd_format, format->samplerate,
                                       e->hostname, name);
  else
    e->esd_socket = esd_record_stream(esd_format, format->samplerate,
                                      e->hostname, name);

  free(name);
  if(e->esd_socket < 0)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Cannot connect to daemon");
    return 0;
    }
  e->bytes_per_frame = 4;
  e->src = gavl_audio_source_create(read_func_esd, e,
                                    GAVL_SOURCE_SRC_FRAMESIZE_MAX,
                                    format);
  return 1;
  }
Exemplo n.º 20
0
static int open_oss(void * data,
                    gavl_audio_format_t * format,
                    gavl_video_format_t * video_format,
                    gavl_metadata_t * m)
  {
  gavl_sample_format_t sample_format;
  int test_value;
  oss_t * priv = data;

  priv->fd = -1;
  
  memset(format, 0, sizeof(*format));
  
  /* Set up the format */

  format->interleave_mode = GAVL_INTERLEAVE_ALL;
  format->samples_per_frame = 1024;

  switch(priv->bytes_per_sample)
    {
    case 1:
      format->sample_format = GAVL_SAMPLE_U8;
      break;
    case 2:
      format->sample_format = GAVL_SAMPLE_S16;
      break;
    default:
      bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Invalid number of bits");
      return 0;
    }

  switch(priv->num_channels)
    {
    case 1:
      format->num_channels = 1;
      format->interleave_mode = GAVL_INTERLEAVE_NONE;
      break;
    case 2:
      format->num_channels = 2;
      format->interleave_mode = GAVL_INTERLEAVE_ALL;
      break;
    default:
      bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Invalid number of channels");
      return 0;
    }
  
  format->samplerate = priv->samplerate;
  format->samples_per_frame = SAMPLES_PER_FRAME;
  format->channel_locations[0] = GAVL_CHID_NONE;
  gavl_set_channel_setup(format);
  
  
  priv->fd = open(priv->device, O_RDONLY, 0);
  if(priv->fd == -1)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Cannot open %s: %s", priv->device,
           strerror(errno));
    goto fail;
    }
  
  sample_format = bg_oss_set_sample_format(priv->fd,
                                           format->sample_format);
  
  if(sample_format == GAVL_SAMPLE_NONE)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Cannot set sampleformat for %s", priv->device);
    goto fail;
    }

  test_value =
    bg_oss_set_channels(priv->fd, format->num_channels);
  if(test_value != format->num_channels)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Device %s doesn't support %d channel sound",
            priv->device, format->num_channels);
    goto fail;
    }

  test_value =
    bg_oss_set_samplerate(priv->fd, format->samplerate);
  if(test_value != format->samplerate)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN, "Samplerate %f kHz not supported by device %s",
              format->samplerate / 1000.0,
              priv->device);
    goto fail;
    }

  priv->block_align = priv->bytes_per_sample * priv->num_channels;

  gavl_audio_format_copy(&priv->format, format);

  priv->src = gavl_audio_source_create(read_func_oss, priv, 0, format);
  
  return 1;
  fail:
  if(priv->fd != -1)
    close(priv->fd);
  return 0;
  }
Exemplo n.º 21
0
bool ReadMedia::initFormat() {

	const gavl_audio_format_t * open_audio_format;
	const gavl_video_format_t * open_video_format;

	// we use the m_vfifosize to see if the user app wants video or not
	// then, we set m_video_stream_count to 0 if he doesn't want video
	if (m_video_stream_count > 0 && m_vfifosize > 0) {
		open_video_format = bgav_get_video_format(m_file, 0);

		if (open_video_format->pixelformat == GAVL_PIXELFORMAT_NONE) {
			printf("!!!sorry, pixelformat is not recognized.\n");
			return false;
		}

		// let's check to see if the formats are the same, if they are the same
		// there is no reason to recreate the fifo or frames
		if ( gavl_video_formats_equal( &m_video_format, open_video_format) == 0 ) { 	
			// the formats are different
			gavl_video_format_copy (&m_video_format, open_video_format);
			if (m_video_frame != NULL)
				gavl_video_frame_destroy(m_video_frame);
			m_video_frame = gavl_video_frame_create(&m_video_format);
			gavl_video_frame_clear( m_video_frame, &m_video_format);
			if (m_fifovideo != NULL)
				delete m_fifovideo;
			m_fifovideo=  new FifoVideoFrames( m_vfifosize ,  &m_video_format); 
		}
	} else {
		m_video_stream_count = 0;
		m_veof = true;
	}

	// we use the m_afifosize to see if the user app wants audio or not
	// then, we set m_audio_stream_count to 0 if he doesn't want audio
	if (m_audio_stream_count > 0 && m_afifosize > 0) {  
		open_audio_format = bgav_get_audio_format(m_file, 0);    
	
		// we can get audio formats that are unkown
		if ( open_audio_format->sample_format == GAVL_SAMPLE_NONE) {
			printf("sorry, this file has unsupported audio.\n"); 
			return false;	
		}

		if ( gavl_audio_formats_equal(&m_audio_format, open_audio_format) == 0 ) { 	
			// audio formats are different
			// save the old spf
			int spf = m_audio_format.samples_per_frame; 
			gavl_audio_format_copy(&m_audio_format, open_audio_format);

			if (m_audio_frame != NULL) {
				gavl_audio_frame_destroy(m_audio_frame);
			}

			// set it back to original
			m_audio_format.samples_per_frame = spf ;

			m_audio_frame = gavl_audio_frame_create(&m_audio_format);
	
			gavl_audio_frame_mute( m_audio_frame, &m_audio_format);
			if( m_fifoaudio != NULL )
				delete m_fifoaudio;
			m_fifoaudio = new FifoAudioFrames( m_afifosize , &m_audio_format); 
		}
	} else {
		// user doesn't want audio
		m_audio_stream_count = 0;
		m_aeof=true;
	}


	m_length_in_gavltime = bgav_get_duration ( m_file, 0);;
	m_length_in_seconds = gavl_time_to_seconds(  m_length_in_gavltime );
	m_num_samples = 0;
	m_num_frames = 0;

	if (m_audio_stream_count) {
		if ( bgav_can_seek_sample(m_file) == 1 ) {
			m_num_samples=	bgav_audio_duration ( m_file, 0) ;
	 } else { 
			m_num_samples=	gavl_time_to_samples( m_audio_format.samplerate ,  bgav_get_duration ( m_file, 0) );
		}
	}

	// set frames   WE NEED TO take care here for non-constant frame-rates
	if(m_video_stream_count) {
		if ( bgav_can_seek_sample(m_file) == 1  && m_video_format.framerate_mode == GAVL_FRAMERATE_CONSTANT) { 
			m_num_frames =	bgav_video_duration ( m_file, 0)/ m_video_format.frame_duration;
		} else if ( bgav_can_seek_sample(m_file) == 1  && m_video_format.framerate_mode == GAVL_FRAMERATE_VARIABLE ) {
			// FIXME what to do with non constant frame rates?
			m_num_frames=0;
		} else { 
			m_num_frames =	gavl_time_to_frames( m_video_format.timescale, m_video_format.frame_duration ,  bgav_get_duration ( m_file, 0) );
		}
	}

  //	printf("m_num_frames =%lld, duration = %lld , vid_duration=%lld\n", 
	//		m_num_frames, bgav_get_duration ( m_file, 0),  bgav_video_duration ( m_file, 0) );
	// set seconds
	if ( bgav_can_seek_sample(m_file) == 1) {
		gavl_time_t atime=0,vtime=0;
		if ( m_audio_stream_count ) 
			atime =  gavl_samples_to_time( m_audio_format.samplerate, m_num_samples );
		if (m_video_stream_count &&  m_video_format.frame_duration > 0) {
			vtime =  gavl_frames_to_time( m_video_format.timescale, m_video_format.frame_duration, m_num_frames );
		} else if ( m_video_stream_count  ) { // non constant framerate			
			vtime = bgav_video_duration( m_file, 0);
		}
		// else rely on audio time
		m_length_in_gavltime = atime > vtime ? atime :vtime;
		m_length_in_seconds = gavl_time_to_seconds( m_length_in_gavltime );
		//printf("atime=%ld,  vtime=%ld, l_in_sec=%f\n", atime, vtime, m_length_in_seconds);
	} 

	m_pcm_seek = SEEK_NOTHING;
	m_frame_seek = SEEK_NOTHING;

	return true;
}
Exemplo n.º 22
0
void ReadMedia::copyAudioFormat(gavl_audio_format_t * dst ){ 
	lockState();
	//if (m_state == STATE_READY)
	gavl_audio_format_copy(dst, &m_audio_format);
	unlockState();
}
Exemplo n.º 23
0
int gavl_audio_converter_reinit(gavl_audio_converter_t* cnv)
  {
  int do_mix, do_resample;
  int i;
  gavl_audio_convert_context_t * ctx;

  gavl_audio_format_t * input_format, * output_format;
  
  gavl_audio_format_t tmp_format;

  input_format = &cnv->input_format;
  output_format = &cnv->output_format;
  
#if 0
  fprintf(stderr, "Initializing audio converter, quality: %d, Flags: 0x%08x\n",
          cnv->opt.quality, cnv->opt.accel_flags);
#endif
  
  /* Delete previous conversions */
  audio_converter_cleanup(cnv);

  /* Copy formats and options */
  
  memset(&tmp_format, 0, sizeof(tmp_format));
  gavl_audio_format_copy(&tmp_format, &cnv->input_format);
  
  cnv->current_format = &cnv->input_format;
    
  /* Check if we must mix */

  do_mix = 0;
  
  if((input_format->num_channels != output_format->num_channels) ||
     (gavl_front_channels(input_format) != gavl_front_channels(output_format)) ||
     (gavl_rear_channels(input_format) != gavl_rear_channels(output_format)) ||
     (gavl_side_channels(input_format) != gavl_side_channels(output_format)))
    {
    do_mix = 1;
    }
  if(!do_mix)
    {
    i = (input_format->num_channels < output_format->num_channels) ?
      input_format->num_channels : output_format->num_channels;
    
    while(i--)
      {
      if(input_format->channel_locations[i] != output_format->channel_locations[i])
        {
        do_mix = 1;
        break;
        }
      }
    }

  /* Check if we must resample */

  do_resample = (input_format->samplerate != output_format->samplerate) ? 1 : 0;

  /* Check for resampling. We take care, that we do resampling for the least possible channels */

  if(do_resample &&
     (!do_mix ||
      (do_mix && (input_format->num_channels <= output_format->num_channels))))
    put_samplerate_context(cnv, &tmp_format, output_format->samplerate);
  
  /* Check for mixing */
    
  if(do_mix)
    {
    if(cnv->current_format->interleave_mode != GAVL_INTERLEAVE_NONE)
      {
      tmp_format.interleave_mode = GAVL_INTERLEAVE_NONE;
      ctx = gavl_interleave_context_create(&cnv->opt,
                                           cnv->current_format,
                                           &tmp_format);
      add_context(cnv, ctx);
      }
    
    if((cnv->current_format->sample_format < GAVL_SAMPLE_FLOAT) &&
       ((cnv->opt.quality > 3) ||
        (cnv->output_format.sample_format == GAVL_SAMPLE_FLOAT)))
      {
      tmp_format.sample_format = GAVL_SAMPLE_FLOAT;
      ctx = gavl_sampleformat_context_create(&cnv->opt,
                                             cnv->current_format,
                                             &tmp_format);
      add_context(cnv, ctx);
      }
    else if((cnv->current_format->sample_format < GAVL_SAMPLE_DOUBLE) &&
       ((cnv->opt.quality > 4) ||
        (cnv->output_format.sample_format == GAVL_SAMPLE_DOUBLE)))
      {
      tmp_format.sample_format = GAVL_SAMPLE_DOUBLE;
      ctx = gavl_sampleformat_context_create(&cnv->opt,
                                             cnv->current_format,
                                             &tmp_format);
      add_context(cnv, ctx);
      }

    else if(gavl_bytes_per_sample(cnv->current_format->sample_format) <
            gavl_bytes_per_sample(cnv->output_format.sample_format))
      {
      tmp_format.sample_format = cnv->output_format.sample_format;
      ctx = gavl_sampleformat_context_create(&cnv->opt,
                                             cnv->current_format,
                                             &tmp_format);
      add_context(cnv, ctx);
      }

    tmp_format.num_channels = cnv->output_format.num_channels;
    memcpy(tmp_format.channel_locations, cnv->output_format.channel_locations,
           GAVL_MAX_CHANNELS * sizeof(tmp_format.channel_locations[0]));

    ctx = gavl_mix_context_create(&cnv->opt, cnv->current_format,
                                  &tmp_format);
    add_context(cnv, ctx);
    }

  if(do_resample && do_mix && (input_format->num_channels > output_format->num_channels))
    {
    put_samplerate_context(cnv, &tmp_format, output_format->samplerate);
    }
  
  /* Check, if we must change the sample format */
  
  if(cnv->current_format->sample_format != cnv->output_format.sample_format)
    {
    if(cnv->current_format->interleave_mode == GAVL_INTERLEAVE_2)
      {
      tmp_format.interleave_mode = GAVL_INTERLEAVE_NONE;
      ctx = gavl_interleave_context_create(&cnv->opt,
                                           cnv->current_format,
                                           &tmp_format);
      add_context(cnv, ctx);
      }

    tmp_format.sample_format = cnv->output_format.sample_format;
    ctx = gavl_sampleformat_context_create(&cnv->opt,
                                           cnv->current_format,
                                           &tmp_format);
    add_context(cnv, ctx);
    
    }
     
  /* Final interleaving */

  if(cnv->current_format->interleave_mode != cnv->output_format.interleave_mode)
    {
    tmp_format.interleave_mode = cnv->output_format.interleave_mode;
    ctx = gavl_interleave_context_create(&cnv->opt,
                                         cnv->current_format,
                                         &tmp_format);
    add_context(cnv, ctx);
    }

  //  fprintf(stderr, "Audio converter initialized, %d conversions\n", cnv->num_conversions);
  
  //  gavl_audio_format_dump(&cnv->input_format);

  //  gavl_audio_format_dump(&cnv->output_format);
  //  gavl_audio_format_dump(cnv->current_format);

  /* Set samples_per_frame of the first context
     to zero to enable automatic allocation later on */
  
  cnv->input_format.samples_per_frame = 0;
  return cnv->num_conversions;
  }