Exemple #1
0
bool video_recording_state_t::initialize(uint16 video_nr, int width, int height, int depth)
{
	enum AVPixelFormat raw_fmt;
	if (depth == VIDEO_DEPTH_8BIT) raw_fmt = AV_PIX_FMT_PAL8;
	else if (depth == VIDEO_DEPTH_32BIT) raw_fmt = AV_PIX_FMT_ARGB;
	else return false;
	char filename[32];
	snprintf(filename, sizeof filename, "rec%hu.avi", video_nr);
	AVOutputFormat *fmt = av_guess_format(NULL, filename, NULL);
	if (!fmt) return false;
	if (fmt->flags & AVFMT_NOFILE) return false;

	output_context = avformat_alloc_context();
	if (!output_context) return false;
	output_context->oformat = fmt;
	snprintf(output_context->filename, sizeof(output_context->filename), "%s", filename);
	if (fmt->video_codec == AV_CODEC_ID_NONE) return false;

	if (!add_audio_stream(AV_CODEC_ID_PCM_S16LE)) return false;
	if (!add_video_stream(fmt->video_codec, width, height)) return false;
	if (!open_audio()) return false;
	if (!open_video()) return false;
	if (!(video_frame_raw = alloc_picture(video_stream, raw_fmt))) return false;
	if (!(video_frame = alloc_picture(video_stream, AV_PIX_FMT_YUV420P))) return false;
	if (!init_sws_context()) return false;

	if (avio_open(&output_context->pb, filename, AVIO_FLAG_WRITE) < 0) return false;
	avformat_write_header(output_context, NULL);
	return true;
}
Exemple #2
0
static void play_file(const char *fn)
{
	ACMStream *acm;
	int err, res, buflen;
	ao_sample_format fmt;
	ao_device *dev;
	char *buf;
	unsigned int total_bytes, bytes_done = 0;

	err = acm_open_file(&acm, fn, cf_force_chans);
	if (err < 0) {
		fprintf(stderr, "%s: %s\n", fn, acm_strerror(err));
		return;
	}
	show_header(fn, acm);
	fmt.bits = 16;
	fmt.rate = acm_rate(acm);
	fmt.channels = acm_channels(acm);
	fmt.byte_format = AO_FMT_LITTLE;

	dev = open_audio(&fmt);

	buflen = 4*1024;
	buf = malloc(buflen);

	total_bytes = acm_pcm_total(acm) * acm_channels(acm) * ACM_WORD;
	while (bytes_done < total_bytes) {
		res = acm_read_loop(acm, buf, buflen/ACM_WORD, 0,2,1);
		if (res == 0)
			break;
		if (res > 0) {
			bytes_done += res;
			res = ao_play(dev, buf, res);
		} else {
			fprintf(stderr, "%s: %s\n", fn, acm_strerror(res));
			break;
		}
	}

	memset(buf, 0, buflen);
	if (bytes_done < total_bytes)
		fprintf(stderr, "%s: adding filler_samples: %d\n",
				fn, total_bytes - bytes_done);
	while (bytes_done < total_bytes) {
		int bs;
		if (bytes_done + buflen > total_bytes) {
			bs = total_bytes - bytes_done;
		} else {
			bs = buflen;
		}
		res = ao_play(dev, buf, bs);
		if (res != bs)
			break;
		bytes_done += res;
	}

	acm_close(acm);
	free(buf);
}
void st_audio_setup(void)
{

  /* Init SDL Audio silently even if --disable-sound : */

  if (audio_device)
    {
      if (SDL_Init(SDL_INIT_AUDIO) < 0)
        {
          /* only print out message if sound or music
             was not disabled at command-line
           */
          if (use_sound || use_music)
            {
              fprintf(stderr,
                      "\nWarning: I could not initialize audio!\n"
                      "The Simple DirectMedia error that occured was:\n"
                      "%s\n\n", SDL_GetError());
            }
          /* keep the programming logic the same :-)
             because in this case, use_sound & use_music' values are ignored
             when there's no available audio device
          */
          use_sound = false;
          use_music = false;
          audio_device = false;
        }
    }


  /* Open sound silently regarless the value of "use_sound": */

  if (audio_device)
    {
      if (open_audio(44100, AUDIO_S16, 2, 2048) < 0)
        {
          /* only print out message if sound or music
             was not disabled at command-line
           */
          if (use_sound || use_music)
            {
              fprintf(stderr,
                      "\nWarning: I could not set up audio for 44100 Hz "
                      "16-bit stereo.\n"
                      "The Simple DirectMedia error that occured was:\n"
                      "%s\n\n", SDL_GetError());
            }
          use_sound = false;
          use_music = false;
          audio_device = false;
        }
    }

}
Exemple #4
0
static int
tfile (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;

  p->fmt = av_guess_format (NULL, o->path, NULL);
  if (!p->fmt)
    {
      fprintf (stderr,
               "ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
               "");
      p->fmt = av_guess_format ("mpeg", NULL, NULL);
    }
  p->oc = avformat_alloc_context ();
  if (!p->oc)
    {
      fprintf (stderr, "memory error\n%s", "");
      return -1;
    }

  p->oc->oformat = p->fmt;

  snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);

  p->video_st = NULL;
  p->audio_st = NULL;

  if (p->fmt->video_codec != AV_CODEC_ID_NONE)
    {
      p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
    }
  if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
    {
     p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
    }


  if (p->video_st)
    open_video (p, p->oc, p->video_st);

  if (p->audio_st)
    open_audio (o, p->oc, p->audio_st);

  av_dump_format (p->oc, 0, o->path, 1);

  if (avio_open (&p->oc->pb, o->path, AVIO_FLAG_WRITE) < 0)
    {
      fprintf (stderr, "couldn't open '%s'\n", o->path);
      return -1;
    }

  avformat_write_header (p->oc, NULL);
  return 0;
}
Exemple #5
0
int main(int argc, char**argv)
{
    int running = 1;

    m = create_mixer();
    p = create_player(m);
    if(argc == 2){
        player_load(p, argv[1]);
    }else{
        player_load(p, "res/silent_light.mid");
    }
    /*
    mixer_add_instrument(m,create_instrument(OSC_SQU));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    */

    //SDL_SetVideoMode( 640, 480, 32, SDL_SWSURFACE );
	// Open the audio
	open_audio();
	while(running){
	    while(SDL_PollEvent( &event )){
	        switch( event.type ){
            case SDL_KEYDOWN:
                switch( event.key.keysym.sym ){
                    case SDLK_ESCAPE:
                        running = 0;
                    default:;
                }
                break;
            case SDL_QUIT:
                running = 0;
                break;
	        }
	    }
	    SDL_Delay(1);
	}
	// Close audio
	close_audio();
	return 0;
}
Exemple #6
0
void CVideoLivRecord::InitRecoder(LPCSTR lpFileName,LONG lWidth,LONG lHeight,INT iKeyFrameInterval,int iOnlyVideo)
{
	m_Width = lWidth;
	m_Height = lHeight;
	m_videoduriation = iKeyFrameInterval;
	m_audioduriation = iKeyFrameInterval;

	int ret = 0;
	char filename[MAX_PATH] = {0};
	memcpy(filename, lpFileName, strlen(lpFileName));
// 	strcat(filename, ".");
// 	strcat(filename, FILE_SUFFIX);

	avformat_alloc_output_context2(&m_pAVFormatContext, NULL, NULL, filename);
	if (!m_pAVFormatContext){
		log("[CVideoLivRecord::InitRecoder] -- avformat_alloc_output_context2() error");
		return ;
	}
	//video
	if (m_pAVFormatContext->oformat->video_codec != AV_CODEC_ID_NONE){
		add_stream(&m_pVideoStream, &m_pVideoCodec, m_pAVFormatContext->oformat->video_codec);
		m_bHasVideo = TRUE;
		m_bEncodeVideo = TRUE;
	}
	//audio
	if (iOnlyVideo == 0 && m_pAVFormatContext->oformat->audio_codec != AV_CODEC_ID_NONE){
		add_stream(&m_pAudioStream, &m_pAudioCodec, m_pAVFormatContext->oformat->audio_codec);
		m_bHasAudio = TRUE;
		m_bEncodeAudio = TRUE;
	}
	if (m_bHasVideo){
		open_video(m_pVideoStream, m_pVideoCodec, m_pOpt);
	}
	if (m_bHasAudio){
		open_audio(m_pAudioStream, m_pAudioCodec, m_pOpt);
	}

	if (!(m_pAVFormatContext->oformat->flags & AVFMT_NOFILE)){
		ret = avio_open(&m_pAVFormatContext->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0){
			log("[CVideoLivRecord::InitRecoder] -- avio_open() error");
			return ;
		}
	}

	ret = avformat_write_header(m_pAVFormatContext, &m_pOpt);
	if (ret < 0){
		log("[CVideoLivRecord::InitRecoder] -- avformat_write_header() error");
		return ;
	}
}
static void set_ra_plugin(const bg_plugin_info_t * plugin,
                          void * data)
  {
  visualizer_t * v = (visualizer_t*)data;
  bg_plugin_registry_set_default(v->plugin_reg, BG_PLUGIN_RECORDER_AUDIO, BG_PLUGIN_RECORDER,
                                 plugin->name);
  
  v->ra_info = plugin;
  bg_log(BG_LOG_INFO, LOG_DOMAIN,
         "Changed recording plugin to %s", v->ra_info->long_name);
  close_vis(v);
  open_audio(v);
  open_vis(v);
  }
static void button_callback(GtkWidget * w, gpointer data)
  {
  visualizer_t * win = (visualizer_t*)data;
  
  if((w == win->quit_button) ||
     (w == win->normal_window.window))
    gtk_main_quit();
  else if(w == win->config_button)
    bg_dialog_show(win->cfg_dialog, win->current_window->window);
  else if((w == win->fullscreen_button) ||
          (w == win->nofullscreen_button))
    toggle_fullscreen(win);
  else if(w == win->plugin_button)
    {
    gtk_widget_show(win->plugin_window.window);
    gtk_widget_set_sensitive(win->plugin_button, 0);
    }
  else if(w == win->restart_button)
    {
    if(win->vis_open)
      {
      close_vis(win);
      open_audio(win);
      open_vis(win);
      hide_toolbar(win);
      }
    }
  else if(w == win->log_button)
    {
    gtk_widget_set_sensitive(win->log_button, 0);
    bg_gtk_log_window_show(win->log_window);
    }
  else if(w == win->about_button)
    {
    gtk_widget_set_sensitive(win->about_button, 0);
    bg_gtk_about_window_create("Gmerlin visualizer", VERSION,
                               "visualizer_icon.png",
                               about_window_close_callback,
                               win);
    }
  else if(w == win->help_button)
    bg_display_html_help("userguide/Visualizer.html");
  }
Exemple #9
0
    bool play(const char *filename, VFSFile &file)
    {
      force_apply = true;

      try
      {
        MPTWrap mpt(file);

        open_audio(FMT_FLOAT, 44100, 2);

        while(!check_stop())
        {
          unsigned char buffer[65536];
          std::int64_t n;
          int seek_value = check_seek();

          if(seek_value >= 0) mpt.seek(seek_value);

          if(force_apply)
          {
            mpt.set_interpolator(aud_get_int(PACKAGE, SETTING_INTERPOLATOR));
            mpt.set_stereo_separation(aud_get_int(PACKAGE, SETTING_STEREO_SEPARATION));
            force_apply = false;
          }

          n = mpt.read(buffer, sizeof buffer);
          if(n == 0) break;

          write_audio(buffer, n);
        }

        return true;
      }
      catch(MPTWrap::InvalidFile)
      {
        return false;
      }
    }
Exemple #10
0
static void create_call(struct modem_data *modem,
				const char *path, DBusMessageIter *iter)
{
	struct call_data *call;
	DBusMessageIter dict;

	call = g_try_new0(struct call_data, 1);
	if (call == NULL)
		return;

	call->path = g_strdup(path);

	g_hash_table_replace(modem->call_list, call->path, call);

	g_print("call added (%s)\n", call->path);

	call->modem = modem;

	open_audio(modem);

	dbus_message_iter_recurse(iter, &dict);

	while (dbus_message_iter_get_arg_type(&dict) == DBUS_TYPE_DICT_ENTRY) {
		DBusMessageIter entry, value;
		const char *key;

		dbus_message_iter_recurse(&dict, &entry);
		dbus_message_iter_get_basic(&entry, &key);

		dbus_message_iter_next(&entry);
		dbus_message_iter_recurse(&entry, &value);

		call_set(call, key, &value);

		dbus_message_iter_next(&dict);
	}
}
Exemple #11
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_video_stream(&video_st, oc, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_audio_stream(&audio_st, oc, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, &video_st);
    if (have_audio)
        open_audio(oc, &audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
            (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
                                            audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !process_audio_stream(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
int main(int argc,char *argv[]){

  int i,j;
  ogg_packet op;

  FILE *infile = stdin;

#ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */
  /* Beware the evil ifdef. We avoid these where we can, but this one we
     cannot. Don't add any more, you'll probably go to hell if you do. */
  _setmode( _fileno( stdin ), _O_BINARY );
#endif

  /* open the input file if any */
  if(argc==2){
    infile=fopen(argv[1],"rb");
    if(infile==NULL){
      fprintf(stderr,"Unable to open '%s' for playback.\n", argv[1]);
      exit(1);
    }
  }
  if(argc>2){
      usage();
      exit(1);
  }

  /* start up Ogg stream synchronization layer */
  ogg_sync_init(&oy);

  /* init supporting Vorbis structures needed in header parsing */
  vorbis_info_init(&vi);
  vorbis_comment_init(&vc);

  /* init supporting Theora structures needed in header parsing */
  theora_comment_init(&tc);
  theora_info_init(&ti);

  /* Ogg file open; parse the headers */
  /* Only interested in Vorbis/Theora streams */
  while(!stateflag){
    int ret=buffer_data(infile,&oy);
    if(ret==0)break;
    while(ogg_sync_pageout(&oy,&og)>0){
      ogg_stream_state test;

      /* is this a mandated initial header? If not, stop parsing */
      if(!ogg_page_bos(&og)){
        /* don't leak the page; get it into the appropriate stream */
        queue_page(&og);
        stateflag=1;
        break;
      }

      ogg_stream_init(&test,ogg_page_serialno(&og));
      ogg_stream_pagein(&test,&og);
      ogg_stream_packetout(&test,&op);

      /* identify the codec: try theora */
      if(!theora_p && theora_decode_header(&ti,&tc,&op)>=0){
        /* it is theora */
        memcpy(&to,&test,sizeof(test));
        theora_p=1;
      }else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){
        /* it is vorbis */
        memcpy(&vo,&test,sizeof(test));
        vorbis_p=1;
      }else{
        /* whatever it is, we don't care about it */
        ogg_stream_clear(&test);
      }
    }
    /* fall through to non-bos page parsing */
  }

  /* we're expecting more header packets. */
  while((theora_p && theora_p<3) || (vorbis_p && vorbis_p<3)){
    int ret;

    /* look for further theora headers */
    while(theora_p && (theora_p<3) && (ret=ogg_stream_packetout(&to,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Theora stream headers; corrupt stream?\n");
        exit(1);
      }
      if(theora_decode_header(&ti,&tc,&op)){
        printf("Error parsing Theora stream headers; corrupt stream?\n");
        exit(1);
      }
      theora_p++;
      if(theora_p==3)break;
    }

    /* look for more vorbis header packets */
    while(vorbis_p && (vorbis_p<3) && (ret=ogg_stream_packetout(&vo,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      if(vorbis_synthesis_headerin(&vi,&vc,&op)){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      vorbis_p++;
      if(vorbis_p==3)break;
    }

    /* The header pages/packets will arrive before anything else we
       care about, or the stream is not obeying spec */

    if(ogg_sync_pageout(&oy,&og)>0){
      queue_page(&og); /* demux into the appropriate stream */
    }else{
      int ret=buffer_data(infile,&oy); /* someone needs more data */
      if(ret==0){
        fprintf(stderr,"End of file while searching for codec headers.\n");
        exit(1);
      }
    }
  }

  /* and now we have it all.  initialize decoders */
  if(theora_p){
    theora_decode_init(&td,&ti);
    printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n",
           (unsigned int)to.serialno,ti.width,ti.height, 
           (double)ti.fps_numerator/ti.fps_denominator);
    if(ti.width!=ti.frame_width || ti.height!=ti.frame_height)
      printf("  Frame content is %dx%d with offset (%d,%d).\n",
           ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
    report_colorspace(&ti);
    dump_comments(&tc);
  }else{
    /* tear down the partial theora setup */
    theora_info_clear(&ti);
    theora_comment_clear(&tc);
  }
  if(vorbis_p){
    vorbis_synthesis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);
    fprintf(stderr,"Ogg logical stream %x is Vorbis %d channel %d Hz audio.\n",
            (unsigned int)vo.serialno,vi.channels,(int)vi.rate);
  }else{
    /* tear down the partial vorbis setup */
    vorbis_info_clear(&vi);
    vorbis_comment_clear(&vc);
  }

  /* open audio */
  if(vorbis_p)open_audio();

  /* open video */
  if(theora_p)open_video();

  /* install signal handler as SDL clobbered the default */
  signal (SIGINT, sigint_handler);

  /* on to the main decode loop.  We assume in this example that audio
     and video start roughly together, and don't begin playback until
     we have a start frame for both.  This is not necessarily a valid
     assumption in Ogg A/V streams! It will always be true of the
     example_encoder (and most streams) though. */

  stateflag=0; /* playback has not begun */
  while(!got_sigint){

    /* we want a video and audio frame ready to go at all times.  If
       we have to buffer incoming, buffer the compressed data (ie, let
       ogg do the buffering) */
    while(vorbis_p && !audiobuf_ready){
      int ret;
      float **pcm;

      /* if there's pending, decoded audio, grab it */
      if((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0){
        int count=audiobuf_fill/2;
        int maxsamples=(audiofd_fragsize-audiobuf_fill)/2/vi.channels;
        for(i=0;i<ret && i<maxsamples;i++)
          for(j=0;j<vi.channels;j++){
            int val=rint(pcm[j][i]*32767.f);
            if(val>32767)val=32767;
            if(val<-32768)val=-32768;
            audiobuf[count++]=val;
          }
        vorbis_synthesis_read(&vd,i);
        audiobuf_fill+=i*vi.channels*2;
        if(audiobuf_fill==audiofd_fragsize)audiobuf_ready=1;
        if(vd.granulepos>=0)
          audiobuf_granulepos=vd.granulepos-ret+i;
        else
          audiobuf_granulepos+=i;
        
      }else{
        
        /* no pending audio; is there a pending packet to decode? */
        if(ogg_stream_packetout(&vo,&op)>0){
          if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
            vorbis_synthesis_blockin(&vd,&vb);
        }else   /* we need more data; break out to suck in another page */
          break;
      }
    }

    while(theora_p && !videobuf_ready){
      /* theora is one in, one out... */
      if(ogg_stream_packetout(&to,&op)>0){

        theora_decode_packetin(&td,&op);
        videobuf_granulepos=td.granulepos;
        
        videobuf_time=theora_granule_time(&td,videobuf_granulepos);

        /* is it already too old to be useful?  This is only actually
           useful cosmetically after a SIGSTOP.  Note that we have to
           decode the frame even if we don't show it (for now) due to
           keyframing.  Soon enough libtheora will be able to deal
           with non-keyframe seeks.  */

        if(videobuf_time>=get_time())
        videobuf_ready=1;
                
      }else
        break;
    }

    if(!videobuf_ready && !audiobuf_ready && feof(infile))break;

    if(!videobuf_ready || !audiobuf_ready){
      /* no data yet for somebody.  Grab another page */
      int bytes=buffer_data(infile,&oy);
      while(ogg_sync_pageout(&oy,&og)>0){
        queue_page(&og);
      }
    }

    /* If playback has begun, top audio buffer off immediately. */
    if(stateflag) audio_write_nonblocking();

    /* are we at or past time for this video frame? */
    if(stateflag && videobuf_ready && videobuf_time<=get_time()){
      video_write();
      videobuf_ready=0;
    }

    if(stateflag &&
       (audiobuf_ready || !vorbis_p) &&
       (videobuf_ready || !theora_p) &&
       !got_sigint){
      /* we have an audio frame ready (which means the audio buffer is
         full), it's not time to play video, so wait until one of the
         audio buffer is ready or it's near time to play video */
        
      /* set up select wait on the audiobuffer and a timeout for video */
      struct timeval timeout;
      fd_set writefs;
      fd_set empty;
      int n=0;

      FD_ZERO(&writefs);
      FD_ZERO(&empty);
      if(audiofd>=0){
        FD_SET(audiofd,&writefs);
        n=audiofd+1;
      }

      if(theora_p){
        long milliseconds=(videobuf_time-get_time())*1000-5;
        if(milliseconds>500)milliseconds=500;
        if(milliseconds>0){
          timeout.tv_sec=milliseconds/1000;
          timeout.tv_usec=(milliseconds%1000)*1000;

          n=select(n,&empty,&writefs,&empty,&timeout);
          if(n)audio_calibrate_timer(0);
        }
      }else{
        select(n,&empty,&writefs,&empty,NULL);
      }
    }

    /* if our buffers either don't exist or are ready to go,
       we can begin playback */
    if((!theora_p || videobuf_ready) &&
       (!vorbis_p || audiobuf_ready))stateflag=1;
    /* same if we've run out of input */
    if(feof(infile))stateflag=1;

  }

  /* tear it all down */

  audio_close();
  SDL_Quit();

  if(vorbis_p){
    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);
  }
  if(theora_p){
    ogg_stream_clear(&to);
    theora_clear(&td);
    theora_comment_clear(&tc);
    theora_info_clear(&ti);
  }
  ogg_sync_clear(&oy);

  if(infile && infile!=stdin)fclose(infile);

  fprintf(stderr,
          "\r                                                              "
          "\nDone.\n");
  return(0);

}
Exemple #13
0
int
main (int argc, char ** argv)
{
  char * device;
  int number_of_birds;
  double noise_level;
  flock_t flock = NULL;
  bird_data_t data_of_birds;
  int c;
  int count;
  int result;
  int flockfd = -1;
  int audiofd = -1;
  int maxfd;
  fd_set input_fd_set;
  fd_set output_fd_set;
  int play_noise;

  device = DEFAULT_FLOCK_DEVICE;
  number_of_birds = atoi (DEFAULT_NUMBER_OF_BIRDS);
  noise_level = atof (DEFAULT_NOISE_LEVEL);

  /* Parsing arguments. */
  opterr = 0;

  while ((c = getopt (argc, argv, "d:b:n:")) != -1)
    switch (c)
      {
      case 'd':
	device = optarg;
	break;
      case 'b':
	number_of_birds = atoi (optarg);
	break;
      case 'n':
	noise_level = atof (optarg);
	break;
      default:
	break;
      }

  if (argc - optind != 0)
    {
      usage (argv[0]);
      exit (EXIT_FAILURE);
    }

  flock = NULL;
  result = EXIT_SUCCESS;

  get_more_priority ();
  signal (SIGINT, handle_signal);

  fprintf (stderr, "Opening sound card.\n");

  if ((audiofd = open_audio ()) == 0)
    {
      result = EXIT_FAILURE;
      goto terminate;
    }

  play_noise = 0;

  fprintf (stderr, "Preparing flock device: %s, number of birds: %d.\n",
	   device, number_of_birds);

  if ((flock = flock_hl_open (device, number_of_birds,
			      flock_bird_record_mode_position_angles,
			      1, 1)) == NULL)
    {
      result = EXIT_FAILURE;
      goto terminate;
    }

  data_of_birds = (bird_data_t)
    malloc (number_of_birds * sizeof (struct bird_data_s));

  flockfd = flock_get_file_descriptor (flock);
  maxfd = (audiofd < flockfd) ? flockfd : audiofd;
  FD_ZERO (&input_fd_set);
  FD_SET (flockfd, &input_fd_set);
  FD_ZERO (&output_fd_set);
  FD_SET (audiofd, &output_fd_set);

  fprintf (stderr, "Getting data... (Hit Ctrl-C to stop.)\n");

  count = 0;

  /* First values. */
  {
    bird_data_t data;
    int bird;

    if (flock_next_record (flock, 1) == 0)
      {
	fprintf (stderr, "Can't get response from flock.\n");
	result = EXIT_FAILURE;
	goto terminate;
      }

    count++;

    for (bird = 0, data = data_of_birds;
	 bird < number_of_birds;
	 bird++, data++)
      {
	memcpy (&data->rec,
		flock_get_record (flock, bird + 1),
		sizeof (data->rec));
	data->zset = 0;
	data->zcount = 0;
	data->maxdz = 0;
	data->lastbump = 0;
	data->bumpcount = 0;
      }
  }

  while (!terminate)
    {
      fd_set read_fd_set;
      fd_set write_fd_set;

      read_fd_set = input_fd_set;
      write_fd_set = output_fd_set;

      /* Block until new data is available from the flock or we can
         write to the sound card. */
      if (select (maxfd + 1, &read_fd_set, &write_fd_set, NULL, NULL) == -1)
	{
	  perror (__FUNCTION__ ": select");
	  result = EXIT_FAILURE;
	  goto terminate;
	}

      if (FD_ISSET (flockfd, &read_fd_set))
	{
	  bird_data_t data;
	  int bird;

	  if (flock_next_record (flock, 1) == 0)
	    {
	      result = EXIT_FAILURE;
	      goto terminate;
	    }

	  count++;

	  for (bird = 0, data = data_of_birds;
	       bird < number_of_birds;
	       bird++, data++)
	    {
	      double dx, dy, dz;

	      /* Shifting previous record. */
	      memcpy (&data->prev_rec,
		      &data->rec,
		      sizeof (data->rec));

	      /* Copy bird's record. */
	      memcpy (&data->rec,
		      flock_get_record (flock, bird + 1),
		      sizeof (data->rec));

	      dx = data->rec.values.pa.x - data->prev_rec.values.pa.x;
	      dy = data->rec.values.pa.y - data->prev_rec.values.pa.y;
	      dz = data->rec.values.pa.z - data->prev_rec.values.pa.z;

	      if (dx < xthreshold)
		{
		  data->xset = 1;
		  data->xcount = count;
		}

	      if (dz > zthreshold)
		{
		  data->zset = 1;
		  data->zcount = count;
		  if (data->maxdz < dz)
		    data->maxdz = dz;
		}

	      if (!(data->xset && data->zset))
		continue;

	      /* Q: is this really useful? */
	      if (((count - data->xcount) > after_threshold_delay) ||
		  ((count - data->zcount) > after_threshold_delay))
		{
		  data->xset = data->zset = 0;
		  data->maxdz = 0;
		  continue;
		}

	      /* Proposition: delay could depend on maxdz. */
	      if ((dz < 0) && ((count - data->lastbump) > after_bump_delay))
		{
		  fprintf (stderr, "bird %d bumps (%g).\n", bird + 1, data->maxdz);
		  data->xset = data->zset = 0;
		  data->maxdz = 0;
		  data->lastbump = count;
		  data->bumpcount++;

		  play_noise = 1;
		}
	    }
	}

      if (FD_ISSET (audiofd, &write_fd_set))
	{
	  double buffer[AUDIO_BLOCK_SIZE];

	  memset (buffer, 0, sizeof (buffer));

	  if (play_noise)
	    {
	      int i;

	      play_noise = 0;
	      for (i = 0; i < sizeof (buffer) / sizeof (*buffer); i++)
		buffer[i] = ((double) RAND_MAX - 2 * random ()) / 2.0;
	    }

	  write_audio (audiofd, buffer);
	}
    }

 terminate:

  fprintf (stderr, "Exiting.\n");

  if (flock != NULL)
    flock_hl_close (flock);

  if (audiofd != -1)
    close_audio (audiofd);

  return result;
}
Exemple #14
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc < 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
                (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
                                                audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !write_audio_frame(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Exemple #15
0
int main(int argc, char **argv)
{
    OutputStream audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec;
    int ret;
    int have_audio = 0;
    int encode_audio = 0;
    AVDictionary *opt = NULL;
    
    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();
    avformat_network_init();
    
    if (argc < 2) {
        printf("usage: %s output_file\n", argv[0]);
        return 1;
    }
    av_dict_set(&opt, "strict", "experimental", 0);
    
    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }
    
    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, "sdp", filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;
    
    fmt = oc->oformat;
    
    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
    }
    
    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */

    open_audio(oc, audio_codec, &audio_st, opt);
    
    av_dump_format(oc, 0, filename, 1);
    
    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }
    
    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }
    
    while (encode_audio) {
        /* select the stream to encode */
            encode_audio = !write_audio_frame(oc, &audio_st);
    }
    
    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);
    
    /* Close each codec. */
    close_stream(oc, &audio_st);
    
    if (!(fmt->flags & AVFMT_NOFILE))
    /* Close the output file. */
        avio_close(oc->pb);
    
    /* free the stream */
    avformat_free_context(oc);
    
    return 0;
}
Exemple #16
0
/* Play a list of audio files. */
int
main(int argc, char **argv) {
	int		errorStatus = 0;
	int		i;
	int		c;
	int		cnt;
	int		file_type;
	int		rem;
	int		outsiz;
	int		tsize;
	int		len;
	int		err;
	int		ifd;
	int		stdinseen;
	int		regular;
	int		swapBytes;
	int		frame;
	char		*outbuf;
	caddr_t		mapaddr;
	struct stat	st;
	char		*cp;
	char		ctldev[MAXPATHLEN];

	(void) setlocale(LC_ALL, "");
	(void) textdomain(TEXT_DOMAIN);

	/* Get the program name */
	prog = strrchr(argv[0], '/');
	if (prog == NULL)
		prog = argv[0];
	else
		prog++;
	Stdin = MGET("(stdin)");

	/* Check AUDIODEV environment for audio device name */
	if (cp = getenv("AUDIODEV")) {
		Audio_dev = cp;
	}

	/* Parse the command line arguments */
	err = 0;
	while ((i = getopt(argc, argv, prog_opts)) != EOF) {
		switch (i) {
			case 'v':
				if (parse_unsigned(optarg, &Volume, "-v")) {
					err++;
				} else if (Volume > MAX_GAIN) {
					Error(stderr, MGET("%s: invalid value "
					    "for -v\n"), prog);
					err++;
				}
				break;
			case 'd':
				Audio_dev = optarg;
				break;
			case 'V':
				Verbose = TRUE;
				break;
			case 'E':
				Errdetect = TRUE;
				break;
			case 'i':
				Immediate = TRUE;
				break;
			case '?':
				usage();
		/*NOTREACHED*/
		}
	}
	if (err > 0)
		exit(1);

	argc -= optind;		/* update arg pointers */
	argv += optind;

	/* Validate and open the audio device */
	err = stat(Audio_dev, &st);
	if (err < 0) {
		Error(stderr, MGET("%s: cannot stat "), prog);
		perror(Audio_dev);
		exit(1);
	}
	if (!S_ISCHR(st.st_mode)) {
		Error(stderr, MGET("%s: %s is not an audio device\n"), prog,
		    Audio_dev);
		exit(1);
	}

	/* This should probably use audio_cntl instead of open_audio */
	if ((argc <= 0) && isatty(fileno(stdin))) {
		Error(stderr, MGET("%s: No files and stdin is a tty.\n"), prog);
		exit(1);
	}

	/* Check on the -i status now. */
	Audio_fd = open(Audio_dev, O_WRONLY | O_NONBLOCK);
	if ((Audio_fd < 0) && (errno == EBUSY)) {
		if (Immediate) {
			Error(stderr, MGET("%s: %s is busy\n"), prog,
			    Audio_dev);
			exit(1);
		}
	}
	(void) close(Audio_fd);
	Audio_fd = -1;

	/* Try to open the control device and save the current format */
	(void) snprintf(ctldev, sizeof (ctldev), "%sctl", Audio_dev);
	Audio_ctlfd = open(ctldev, O_RDWR);
	if (Audio_ctlfd >= 0) {
		/*
		 * wait for the device to become available then get the
		 * controls. We want to save the format that is left when the
		 * device is in a quiescent state. So wait until then.
		 */
		Audio_fd = open(Audio_dev, O_WRONLY);
		(void) close(Audio_fd);
		Audio_fd = -1;
		if (audio_get_play_config(Audio_ctlfd, &Save_hdr)
		    != AUDIO_SUCCESS) {
			(void) close(Audio_ctlfd);
			Audio_ctlfd = -1;
		}
	}

	/* store AUDIOPATH so we don't keep doing getenv() */
	Audio_path = getenv("AUDIOPATH");

	/* Set up SIGINT handler to flush output */
	(void) signal(SIGINT, sigint);

	/* Set the endian nature of the machine. */
	if ((ulong_t)1 != htonl((ulong_t)1)) {
		NetEndian = FALSE;
	}

	/* If no filenames, read stdin */
	stdinseen = FALSE;
	if (argc <= 0) {
		Ifile = Stdin;
	} else {
		Ifile = *argv++;
		argc--;
	}

	/* Loop through all filenames */
	do {
		/* Interpret "-" filename to mean stdin */
		if (strcmp(Ifile, "-") == 0)
			Ifile = Stdin;

		if (Ifile == Stdin) {
			if (stdinseen) {
				Error(stderr,
				    MGET("%s: stdin already processed\n"),
				    prog);
				goto nextfile;
			}
			stdinseen = TRUE;
			ifd = fileno(stdin);
		} else {
			if ((ifd = path_open(Ifile, O_RDONLY, 0, Audio_path))
			    < 0) {
				Error(stderr, MGET("%s: cannot open "), prog);
				perror(Ifile);
				errorStatus++;
				goto nextfile;
			}
		}

		/* Check to make sure this is an audio file */
		err = audio_read_filehdr(ifd, &File_hdr, &file_type,
		    (char *)NULL, 0);
		if (err != AUDIO_SUCCESS) {
			Error(stderr,
			    MGET("%s: %s is not a valid audio file\n"),
			    prog, Ifile);
			errorStatus++;
			goto closeinput;
		}

		/* If G.72X adpcm, set flags for conversion */
		if ((File_hdr.encoding == AUDIO_ENCODING_G721) &&
		    (File_hdr.samples_per_unit == 2) &&
		    (File_hdr.bytes_per_unit == 1)) {
			Decode = AUDIO_ENCODING_G721;
			File_hdr.encoding = AUDIO_ENCODING_ULAW;
			File_hdr.samples_per_unit = 1;
			File_hdr.bytes_per_unit = 1;
			adpcm_state = (struct audio_g72x_state *)malloc
			    (sizeof (*adpcm_state) * File_hdr.channels);
			for (i = 0; i < File_hdr.channels; i++) {
				g721_init_state(&adpcm_state[i]);
			}
		} else if ((File_hdr.encoding == AUDIO_ENCODING_G723) &&
		    (File_hdr.samples_per_unit == 8) &&
		    (File_hdr.bytes_per_unit == 3)) {
			Decode = AUDIO_ENCODING_G723;
			File_hdr.encoding = AUDIO_ENCODING_ULAW;
			File_hdr.samples_per_unit = 1;
			File_hdr.bytes_per_unit = 1;
			adpcm_state = (struct audio_g72x_state *)malloc
			    (sizeof (*adpcm_state) * File_hdr.channels);
			for (i = 0; i < File_hdr.channels; i++) {
				g723_init_state(&adpcm_state[i]);
			}
		} else {
			Decode = AUDIO_ENCODING_NONE;
		}

		/* Check the device configuration */
		open_audio();
		if (audio_cmp_hdr(&Dev_hdr, &File_hdr) != 0) {
			/*
			 * The device does not match the input file.
			 * Wait for any old output to drain, then attempt
			 * to reconfigure the audio device to match the
			 * input data.
			 */
			if (audio_drain(Audio_fd, FALSE) != AUDIO_SUCCESS) {
				/* Flush any remaining audio */
				(void) ioctl(Audio_fd, I_FLUSH, FLUSHW);

				Error(stderr, MGET("%s: "), prog);
				perror(MGET("AUDIO_DRAIN error"));
				exit(1);
			}

			/* Flush any remaining audio */
			(void) ioctl(Audio_fd, I_FLUSH, FLUSHW);

			if (!reconfig()) {
				errorStatus++;
				goto closeinput;
			}
		}


		/* try to do the mmaping - for regular files only ... */
		err = fstat(ifd, &st);
		if (err < 0) {
			Error(stderr, MGET("%s: cannot stat "), prog);
			perror(Ifile);
			exit(1);
		}
		regular = (S_ISREG(st.st_mode));


		/* If regular file, map it.  Else, allocate a buffer */
		mapaddr = 0;

		/*
		 * This should compare to MAP_FAILED not -1, can't
		 * find MAP_FAILED
		 */
		if (regular && ((mapaddr = mmap(0, st.st_size, PROT_READ,
		    MAP_SHARED, ifd, 0)) != MAP_FAILED)) {

			(void) madvise(mapaddr, st.st_size, MADV_SEQUENTIAL);

			/* Skip the file header and set the proper size */
			cnt = lseek(ifd, 0, SEEK_CUR);
			if (cnt < 0) {
			    perror("lseek");
			    exit(1);
			}
			inbuf = (unsigned char *) mapaddr + cnt;
			len = cnt = st.st_size - cnt;
		} else {		/* Not a regular file, or map failed */

			/* mark is so. */
			mapaddr = 0;

			/* Allocate buffer to hold 10 seconds of data */
			cnt = BUFFER_LEN * File_hdr.sample_rate *
			    File_hdr.bytes_per_unit * File_hdr.channels;
			if (bufsiz != cnt) {
				if (buf != NULL) {
					(void) free(buf);
				}
				buf = (unsigned char *) malloc(cnt);
				if (buf == NULL) {
					Error(stderr,
					    MGET("%s: couldn't allocate %dK "
					    "buf\n"), prog, bufsiz / 1000);
					exit(1);
				}
				inbuf = buf;
				bufsiz = cnt;
			}
		}

		/* Set buffer sizes and pointers for conversion, if any */
		switch (Decode) {
		default:
		case AUDIO_ENCODING_NONE:
			insiz = bufsiz;
			outbuf = (char *)buf;
			break;
		case AUDIO_ENCODING_G721:
			insiz = ADPCM_SIZE / 2;
			outbuf = (char *)adpcm_buf;
			initmux(1, 2);
			break;
		case AUDIO_ENCODING_G723:
			insiz = (ADPCM_SIZE * 3) / 8;
			outbuf = (char *)adpcm_buf;
			initmux(3, 8);
			break;
		}

		/*
		 * 8-bit audio isn't a problem, however 16-bit audio is.
		 * If the file is an endian that is different from the machine
		 * then the bytes will need to be swapped.
		 *
		 * Note: Because the G.72X conversions produce 8bit output,
		 * they don't require a byte swap before display and so
		 * this scheme works just fine. If a conversion is added
		 * that produces a 16 bit result and therefore requires
		 * byte swapping before output, then a mechanism
		 * for chaining the two conversions will have to be built.
		 *
		 * Note: The following if() could be simplified, but then
		 * it gets to be very hard to read. So it's left as is.
		 */

		if (File_hdr.bytes_per_unit == 2 &&
		    ((!NetEndian && file_type == FILE_AIFF) ||
		    (!NetEndian && file_type == FILE_AU) ||
		    (NetEndian && file_type == FILE_WAV))) {
			swapBytes = TRUE;
		} else {
			swapBytes = FALSE;
		}

		if (swapBytes) {
			/* Read in interal number of sample frames. */
			frame = File_hdr.bytes_per_unit * File_hdr.channels;
			insiz = (SWAP_SIZE / frame) * frame;
			/* make the output buffer  the swap buffer. */
			outbuf = (char *)swap_buf;
		}

		/*
		 * At this point, we're all ready to copy the data.
		 */
		if (mapaddr == 0) { /* Not mmapped, do it a buffer at a time. */
			inbuf = buf;
			frame = File_hdr.bytes_per_unit * File_hdr.channels;
			rem = 0;
			while ((cnt = read(ifd, inbuf+rem, insiz-rem)) >= 0) {
				/*
				 * We need to ensure only an integral number of
				 * samples is ever written to the audio device.
				 */
				cnt = cnt + rem;
				rem = cnt % frame;
				cnt = cnt - rem;

				/*
				 * If decoding adpcm, or swapping bytes do it
				 * now.
				 *
				 * We treat the swapping like a separate
				 * encoding here because the G.72X encodings
				 * decode to single byte output samples. If
				 * another encoding is added and it produces
				 * multi-byte output samples this will have to
				 * be changed.
				 */
				if (Decode == AUDIO_ENCODING_G721) {
				    outsiz = 0;
				    demux(1, cnt / File_hdr.channels);
				    for (c = 0; c < File_hdr.channels; c++) {
					err = g721_decode(in_ch_data[c],
					    cnt / File_hdr.channels,
					    &File_hdr,
					    (void*)out_ch_data[c],
					    &tsize,
					    &adpcm_state[c]);
					outsiz = outsiz + tsize;
					if (err != AUDIO_SUCCESS) {
					    Error(stderr, MGET(
						"%s: error decoding g721\n"),
						prog);
						errorStatus++;
						break;
					}
				    }
				    mux(outbuf);
				    cnt = outsiz;
				} else if (Decode == AUDIO_ENCODING_G723) {
				    outsiz = 0;
				    demux(3, cnt / File_hdr.channels);
				    for (c = 0; c < File_hdr.channels; c++) {
					err = g723_decode(in_ch_data[c],
					    cnt / File_hdr.channels,
					    &File_hdr,
					    (void*)out_ch_data[c],
					    &tsize,
					    &adpcm_state[c]);
					outsiz = outsiz + tsize;
					if (err != AUDIO_SUCCESS) {
					    Error(stderr, MGET(
						"%s: error decoding g723\n"),
						prog);
					    errorStatus++;
					    break;
					}
				    }
				    mux(outbuf);
				    cnt = outsiz;
				} else if (swapBytes) {
					swab((char *)inbuf, outbuf, cnt);
				}

				/* If input EOF, write an eof marker */
				err = write(Audio_fd, outbuf, cnt);

				if (err < 0) {
					perror("write");
					errorStatus++;
					break;
				} else if (err != cnt) {
					Error(stderr,
					    MGET("%s: output error: "), prog);
					perror("");
					errorStatus++;
					break;
				}
				if (cnt == 0) {
					break;
				}
				/* Move remainder to the front of the buffer */
				if (rem != 0) {
					(void *)memcpy(inbuf, inbuf + cnt, rem);
				}

			}
			if (cnt < 0) {
				Error(stderr, MGET("%s: error reading "), prog);
				perror(Ifile);
				errorStatus++;
			}
		} else {	/* We're mmaped */
			if ((Decode != AUDIO_ENCODING_NONE) || swapBytes) {

				/* Transform data if we have to. */
				for (i = 0; i <= len; i += cnt) {
					cnt = insiz;
					if ((i + cnt) > len) {
						cnt = len - i;
					}
					if (Decode == AUDIO_ENCODING_G721) {
					    outsiz = 0;
					    demux(1, cnt / File_hdr.channels);
					    for (c = 0; c < File_hdr.channels;
						c++) {
						err = g721_decode(
						    in_ch_data[c],
						    cnt / File_hdr.channels,
						    &File_hdr,
						    (void*)out_ch_data[c],
						    &tsize,
						    &adpcm_state[c]);
						outsiz = outsiz + tsize;
						if (err != AUDIO_SUCCESS) {
						    Error(stderr, MGET(
							"%s: error decoding "
							"g721\n"), prog);
						    errorStatus++;
						    break;
						}
					    }
					    mux(outbuf);
					} else if
					    (Decode == AUDIO_ENCODING_G723) {
						outsiz = 0;
						demux(3,
						    cnt / File_hdr.channels);
						for (c = 0;
							c < File_hdr.channels;
							c++) {
						    err = g723_decode(
							in_ch_data[c],
							cnt /
							    File_hdr.channels,
							&File_hdr,
							(void*)out_ch_data[c],
							&tsize,
							&adpcm_state[c]);
						    outsiz = outsiz + tsize;
						    if (err != AUDIO_SUCCESS) {
							Error(stderr, MGET(
							    "%s: error "
							    "decoding g723\n"),
							    prog);
							errorStatus++;
							break;
						    }
						}
						mux(outbuf);
					} else if (swapBytes) {
						swab((char *)inbuf, outbuf,
						    cnt);
						outsiz = cnt;
					}
					inbuf += cnt;

					/* If input EOF, write an eof marker */
					err = write(Audio_fd, (char *)outbuf,
					    outsiz);
					if (err < 0) {
						perror("write");
						errorStatus++;
					} else if (outsiz == 0) {
						break;
					}

				}
			} else {
				/* write the whole thing at once!  */
				err = write(Audio_fd, inbuf, len);
				if (err < 0) {
					perror("write");
					errorStatus++;
				}
				if (err != len) {
					Error(stderr,
					    MGET("%s: output error: "), prog);
					perror("");
					errorStatus++;
				}
				err = write(Audio_fd, inbuf, 0);
				if (err < 0) {
					perror("write");
					errorStatus++;
				}
			}
		}

		/* Free memory if decoding ADPCM */
		switch (Decode) {
		case AUDIO_ENCODING_G721:
		case AUDIO_ENCODING_G723:
			freemux();
			break;
		default:
			break;
		}

closeinput:;
		if (mapaddr != 0)
			(void) munmap(mapaddr, st.st_size);
		(void) close(ifd);		/* close input file */
		if (Errdetect) {
			cnt = 0;
			audio_set_play_error(Audio_fd, (unsigned int *)&cnt);
			if (cnt) {
				Error(stderr,
				    MGET("%s: output underflow in %s\n"),
				    Ifile, prog);
				errorStatus++;
			}
		}
nextfile:;
	} while ((argc > 0) && (argc--, (Ifile = *argv++) != NULL));

	/*
	 * Though drain is implicit on close(), it's performed here
	 * to ensure that the volume is reset after all output is complete.
	 */
	(void) audio_drain(Audio_fd, FALSE);

	/* Flush any remaining audio */
	(void) ioctl(Audio_fd, I_FLUSH, FLUSHW);

	if (Volume != INT_MAX)
		(void) audio_set_play_gain(Audio_fd, &Savevol);
	if ((Audio_ctlfd >= 0) && (audio_cmp_hdr(&Save_hdr, &Dev_hdr) != 0)) {
		(void) audio_set_play_config(Audio_fd, &Save_hdr);
	}
	(void) close(Audio_fd);			/* close output */
	return (errorStatus);
}
/*
 * Initialize the AVFormatContext
 * Called on encoder initialize and when beginning
 * each new video chunk
 */
int initializeAVFormatContext(AVFormatContext **out_oc, jbyte *output_filename, AVStream **out_video_st, AVFrame **out_picture, int video_width, int video_height, float video_crf, int *out_last_pts, int *out_video_frame_count, AVStream **out_audio_st, int16_t **out_samples, int audio_bitrate){
	AVFormatContext *oc;
	AVStream *video_st;
	AVStream *audio_st;
	AVFrame *picture;
	int16_t *samples;

	// TODO: Can we do this only once?
	/* Initialize libavcodec, and register all codecs and formats. */
	av_register_all();
	//LOGI("initializeAVFC with filename: %s", output_filename);
	if(!oc)
		LOGI("initializeAVFC, oc is properly null");

	/* allocate the output media context */
	avformat_alloc_output_context2(&oc, NULL, NULL, ((const char*) output_filename));
	if (!oc) {
		LOGI("Could not deduce output format, using mpeg");
		//printf("Could not deduce output format from file extension: using MPEG.\n");
		avformat_alloc_output_context2(&oc, NULL, "mpeg", ((const char*) output_filename));
	}
	if (!oc) {
		LOGE("Could not allocate output context");
		exit(1);
	}
	//else
		//LOGI("initializeAVFC, oc appears properly allocated");

	//LOGI("avformat_alloc_output_context2");
	fmt = oc->oformat;

	// Set AVOutputFormat video/audio codec
	fmt->video_codec = VIDEO_CODEC_ID;
	fmt->audio_codec = AUDIO_CODEC_ID;

	/* Add the audio and video streams using the default format codecs
	 * and initialize the codecs. */
	video_st = NULL;
	audio_st = NULL;
	if (fmt->video_codec != CODEC_ID_NONE) {
		video_st = add_video_stream(oc, fmt->video_codec, video_width, video_height, video_crf);
		//(AVFormatContext *oc, enum CodecID codec_id, int width, int height, float crf)
	}
	if (fmt->audio_codec != CODEC_ID_NONE) {
		audio_st = add_audio_stream(oc, fmt->audio_codec, audio_bitrate);
		//static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id, int bit_rate)
	}
	//LOGI("add_audio_stream / add_video_stream");
	/* Now that all the parameters are set, we can open the audio and
	 * video codecs and allocate the necessary encode buffers. */
	if (video_st){
		open_video(oc, video_st, &picture);
		//open_video(AVFormatContext *oc, AVStream *st, AVFrame *picture
	}
	if (audio_st){
		open_audio(oc, audio_st ,&samples);
		//open_audio(AVFormatContext *oc, AVStream *st, int16_t *samples)
	}

	av_dump_format(oc, 0, output_filename, 1);

	//LOGI("open audio / video");
	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {
		char *error_buffer_ptr;
		char error_buffer[90];
		error_buffer_ptr = error_buffer;
		//LOGI("pre avio_open2");
		int error = avio_open2(&oc->pb, output_filename, AVIO_FLAG_WRITE, NULL, NULL);
		//LOGI("post avio_open2");
		if ( error < 0) {
			av_strerror (error, error_buffer_ptr, 90);
			LOGE("Could not open %s. Error: %s", output_filename, error_buffer_ptr);
			//fprintf(stderr, "Could not open '%s'\n", native_output_file_lq1);
			exit(-420);
		}
	}

	/* Write the stream header, if any. */
	//LOGI("pre avformat_write_header");
	avformat_write_header(oc, NULL);
	//LOGI("avformat_write_header");
	//LOGI("end initializeAVFC: audio_input_frame_size: %d fps: %d", audio_input_frame_size, video_st->codec->time_base.den);

	// Set results to output arguments
	*out_oc = oc;
	*out_video_st = video_st;
	*out_audio_st = audio_st;
	*out_picture = picture;
	*out_samples = samples;
	*out_last_pts = -1;
	*out_video_frame_count = 0;

	return audio_input_frame_size;
}
Exemple #18
0
int
main (int argc, char **argv)
{

  int frequency, oversample, stereo;
  struct song *song;
  int index;
  int c;
  int opt, error_flag;


  /* supposed to make wildcard expansion */
  _wildcard(&argc,&argv);

  signal (2, nextsong);
  signal (3, goodbye);

  printf("Tracker1 V0.91 for the SBOS2 package\n");

  /* Read environment variables */
  frequency = read_env ("FREQUENCY", 0);
  oversample = read_env ("OVERSAMPLE", 1);
  transpose = read_env ("TRANSPOSE", 0);

  if (getenv ("MONO"))
    pref.stereo = 0;
  else if (getenv ("STEREO"))
    pref.stereo = 1;
  else
    pref.stereo = DEFAULT_CHANNELS - 1;
  pref.type = BOTH;
  pref.repeats = 1;
  pref.speed = 50;
  pref.tolerate = 2;
  pref.verbose = 0;
  set_mix (DEFAULT_MIX);        /* 0 = full stereo, 100 = mono */

  error_flag = 0;
  while ((opt = getopt_long_only (argc, argv, "", long_options, NULL)) != EOF)
    {
      switch (opt)
        {
        case 'H':               /* help */
          error_flag++;
          break;
        case 'Q':               /* quiet */
          quiet++;
          break;
        case 'P':               /* abort on faults (be picky) */
          pref.tolerate = 0;
          break;
        case 'N':               /* new tracker type */
          pref.type = NEW;
          break;
        case 'O':               /* old tracker type */
          pref.type = OLD;
          break;
        case 'B':               /* both tracker types */
          pref.type = BOTH;
          break;
        case 'M':               /* mono */
          pref.stereo = 0;
          break;
        case 'S':               /* stereo */
          pref.stereo = 1;
          break;
        case 'V':
          pref.verbose = 1;
          break;
        case 'f':               /* frequency */
          frequency = atoi (optarg);
          break;
        case 'o':               /* oversampling */
          oversample = atoi (optarg);
          break;
        case 't':               /* transpose half-steps*/
          transpose = atoi (optarg);
          break;
        case 'r':               /* number of repeats */
          pref.repeats = atoi (optarg);
          break;
        case 's':               /* speed */
          pref.speed = atoi (optarg);
          break;
        case 'm':               /* % of channel mix.  100=mono */
          set_mix (atoi (optarg));
          break;
        default:                /* ??? */
          error_flag++;
          break;
        }
    }
  if (error_flag || !argv[optind])
    {
      fprintf (stderr, "Usage: %s " USAGE, argv[0]);
      exit(1);
    }

  frequency = open_audio (frequency);
  init_player (oversample, frequency);

  while (argv[optind])
    {
      switch (pref.type)
        {
        case BOTH:
          song = do_read_song (argv[optind], NEW);
          if (!song)
            song = do_read_song (argv[optind], OLD);
          break;
        case OLD:
          song = do_read_song (argv[optind], pref.type);
          break;
        case NEW:
          /* this is explicitly flagged as a new module,
           * so we don't need to look for a signature.
           */
          song = do_read_song (argv[optind], NEW_NO_CHECK);
          break;
        }
      optind++;
      if (song == NULL)
        continue;

      dump_song (song);
      play_song (song, &pref);
      release_song (song);

      /* flush out anything remaining in DMA buffers */
      flush_DMA_buffers();

    }

  close_audio ();
  return 0;
}
static visualizer_t * visualizer_create()
  {
  const bg_plugin_info_t * info;
  char * tmp_path;
  int row, col;
  
  GtkWidget * main_table;
  GtkWidget * table;
  GtkWidget * box;
  
  visualizer_t * ret;
  bg_cfg_section_t * cfg_section;
  
  ret = calloc(1, sizeof(*ret));
  
  ret->cb.motion_callback = motion_callback;
  ret->cb.data = ret;
  
  window_init(ret, &ret->normal_window, 0);
  window_init(ret, &ret->fullscreen_window, 1);
  ret->current_window = &ret->normal_window;

  ret->log_window = bg_gtk_log_window_create(log_close_callback, ret,
                                             TR("Gmerlin visualizer"));
  
  ret->config_button =
    create_pixmap_button(ret, "config_16.png", TRS("Configure"));
  ret->plugin_button =
    create_pixmap_button(ret, "plugin_16.png", TRS("Recording and display plugins"));
  ret->restart_button =
    create_pixmap_button(ret, "refresh_16.png", TRS("Restart visualization"));
  ret->quit_button =
    create_pixmap_button(ret, "quit_16.png", TRS("Quit"));
  ret->fullscreen_button =
    create_pixmap_button(ret, "fullscreen_16.png", TRS("Fullscreen mode"));
  ret->nofullscreen_button =
    create_pixmap_button(ret, "windowed_16.png", TRS("Leave fullscreen mode"));

  ret->log_button = create_pixmap_button(ret,
                                         "log_16.png",
                                         TRS("Show log window"));
  ret->about_button = create_pixmap_button(ret,
                                           "about_16.png",
                                           TRS("About Gmerlin visualizer"));
  ret->help_button = create_pixmap_button(ret,
                                          "help_16.png",
                                          TRS("Launch help in a webwroswer"));
  
  ret->fps = gtk_label_new("Fps: --:--");
  gtk_misc_set_alignment(GTK_MISC(ret->fps), 0.0, 0.5);
  gtk_widget_show(ret->fps);
  
  gtk_widget_hide(ret->nofullscreen_button);
  
  //  bg_gtk_box_pack_start_defaults(GTK_BOX(mainbox),
  //                              bg_gtk_vumeter_get_widget(ret->vumeter));
  
  ret->toolbar = gtk_event_box_new();

  gtk_widget_set_events(ret->toolbar,
                        GDK_ENTER_NOTIFY_MASK | GDK_LEAVE_NOTIFY_MASK);
  
  g_signal_connect(G_OBJECT(ret->toolbar), "enter-notify-event",
                   G_CALLBACK(crossing_callback), (gpointer*)ret);
  
  g_signal_connect(G_OBJECT(ret->toolbar), "leave-notify-event",
                   G_CALLBACK(crossing_callback), (gpointer*)ret);

  
  g_object_ref(ret->toolbar); /* Must be done for widgets, which get
                                 reparented after */
  
  ret->vumeter = bg_gtk_vumeter_create(2, 0);
  
  /* Create actual objects */

  /* Create plugin regsitry */
  ret->cfg_reg = bg_cfg_registry_create();
  tmp_path =  bg_search_file_read("visualizer", "config.xml");
  bg_cfg_registry_load(ret->cfg_reg, tmp_path);
  if(tmp_path)
    free(tmp_path);
  
  cfg_section = bg_cfg_registry_find_section(ret->cfg_reg, "plugins");
  ret->plugin_reg = bg_plugin_registry_create(cfg_section);
  ret->visualizer = bg_visualizer_create(ret->plugin_reg);

  bg_visualizer_set_callbacks(ret->visualizer, &ret->cb);
  
  /* Create vis plugin widget */

  ret->vis_plugins =
    bg_gtk_plugin_widget_single_create(TR("Visualization"),
                                       ret->plugin_reg,
                                       BG_PLUGIN_VISUALIZATION,
                                       BG_PLUGIN_VISUALIZE_FRAME |
                                       BG_PLUGIN_VISUALIZE_GL);

  bg_gtk_plugin_widget_single_set_change_callback(ret->vis_plugins,
                                                  set_vis_plugin,
                                                  ret);

  bg_gtk_plugin_widget_single_set_parameter_callback(ret->vis_plugins,
                                                     set_vis_parameter,
                                                     ret);
  
  /* Create audio and video plugin widgets */
  
  plugin_window_init(&ret->plugin_window, ret);

  /* Get ov info */
  
  ret->ov_info =
    bg_gtk_plugin_widget_single_get_plugin(ret->plugin_window.ov_plugins);
  
  /* Load recording plugin */
  ret->ra_info =
    bg_gtk_plugin_widget_single_get_plugin(ret->plugin_window.ra_plugins);
  
  /* Create config stuff */
  
  ret->visualizer_section =
    bg_cfg_registry_find_section(ret->cfg_reg, "visualizer");
  ret->general_section =
    bg_cfg_registry_find_section(ret->cfg_reg, "general");
  ret->log_section =
    bg_cfg_registry_find_section(ret->cfg_reg, "log");
  
  ret->cfg_dialog = create_cfg_dialog(ret);


  
  /* Pack everything */

  main_table = gtk_table_new(2, 2, 0);

  gtk_table_set_row_spacings(GTK_TABLE(main_table), 5);
  gtk_table_set_col_spacings(GTK_TABLE(main_table), 5);
  gtk_container_set_border_width(GTK_CONTAINER(main_table), 5);
  
  table = gtk_table_new(1, 4, 0);

  gtk_table_set_row_spacings(GTK_TABLE(table), 5);
  gtk_table_set_col_spacings(GTK_TABLE(table), 5);
  gtk_container_set_border_width(GTK_CONTAINER(table), 5);
  
  
  row = 0;
  col = 0;
  bg_gtk_plugin_widget_single_attach(ret->vis_plugins,
                                     table,
                                     &row, &col);
  gtk_widget_show(table);
  
  gtk_table_attach(GTK_TABLE(main_table), table, 0, 1, 0, 1,
                   GTK_FILL, GTK_SHRINK, 0, 0);
  
  box = gtk_hbox_new(0, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->plugin_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->config_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->restart_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->fullscreen_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->nofullscreen_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->log_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->about_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->help_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->quit_button,
                     FALSE, FALSE, 0);
  gtk_box_pack_start(GTK_BOX(box), ret->fps, TRUE, TRUE, 5);
  gtk_widget_show(box);
  
  gtk_table_attach(GTK_TABLE(main_table), box, 0, 1, 1, 2,
                   GTK_FILL, GTK_SHRINK, 0, 0);

  gtk_table_attach(GTK_TABLE(main_table),
                   bg_gtk_vumeter_get_widget(ret->vumeter),
                   1, 2, 0, 2,
                   GTK_FILL | GTK_EXPAND, GTK_FILL, 0, 0);

  
  gtk_widget_show(main_table);
  
  gtk_container_add(GTK_CONTAINER(ret->toolbar), main_table);
  
  //  gtk_widget_show(ret->toolbar);
  
  /* Start with non-fullscreen mode */
  attach_toolbar(ret, &ret->normal_window);
  
  apply_config(ret);
  
  /* Get visualization plugin */
  info = bg_gtk_plugin_widget_single_get_plugin(ret->vis_plugins);
  bg_visualizer_set_vis_plugin(ret->visualizer, info);
  
  /* Initialize stuff */
  open_audio(ret);
  open_vis(ret);

  
  gtk_widget_show(ret->current_window->window);

  if(ret->width && ret->height)
    gtk_decorated_window_move_resize_window(GTK_WINDOW(ret->current_window->window),
                                            ret->x, ret->y,
                                            ret->width, ret->height);
  else
    gtk_decorated_window_move_resize_window(GTK_WINDOW(ret->current_window->window),
                                            100, 100,
                                            640, 480);
  
  while(gdk_events_pending() || gtk_events_pending())
    gtk_main_iteration();
  
  
  g_idle_add(idle_func, ret);
  
  g_timeout_add(3000, toolbar_timeout, ret);
  g_timeout_add(1000, fps_timeout, ret);
  
  return ret;
  }
Exemple #20
0
bool CTheoraPlayer::playVideo(
//Plays specified OGG Theora file to screen surface.
//If screen == NULL, then this method will test that the file is playable
//by decoding it as fast as possible but not displaying anything.
//
//Returns: whether playback was successful
	CStretchyBuffer& buffer, SDL_Surface *screen,
	const int x, const int y) //[default=(0,0)]
{
	//init
	theora_p = vorbis_p = 0;
	startticks = 0;
	bool bSkippedLastFrame = false;

	// start up Ogg stream synchronization layer
	ogg_sync_init(&oy);

	// init supporting Vorbis structures needed in header parsing
	vorbis_info_init(&vi);
	vorbis_comment_init(&vc);

	// init supporting Theora structures needed in header parsing
	theora_comment_init(&tc);
	theora_info_init(&ti);
	if (!screen)
		ti.quick_p = 1;
	ti.quality = 63;

	if (!parseHeaders(buffer))
		return false;

	// force audio off
	vorbis_p = 0;

	// initialize decoders
	if (theora_p) {
		theora_decode_init(&td,&ti);
#if 0
		printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n"
			  "  Frame content is %dx%d with offset (%d,%d).\n",
			to.serialno,ti.width,ti.height, (double)ti.fps_numerator/ti.fps_denominator,
			ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
		//report_colorspace(&ti); //we're not using this info for anything
		dump_comments(&tc);
#endif
	} else {
		// tear down the partial theora setup
		theora_info_clear(&ti);
		theora_comment_clear(&tc);
	}
	if(vorbis_p) {
		vorbis_synthesis_init(&vd,&vi);
		vorbis_block_init(&vd,&vb);  
		printf("Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n",
			vo.serialno,vi.channels,vi.rate);
	} else {
		// tear down the partial vorbis setup
		vorbis_info_clear(&vi);
		vorbis_comment_clear(&vc);
	}

	// open audio
	if (vorbis_p)
		open_audio();

	// open video
	SDL_Overlay *yuv_overlay = NULL;
	if (theora_p && screen)
		yuv_overlay = open_video(screen);
  
	// single frame video buffering
	ogg_packet op;
	ogg_int64_t  videobuf_granulepos=-1;
	double       videobuf_time=0;
	double last_frame_time = 0;
	bool hasdatatobuffer = true;

	// Main loop
	bool audiobuf_ready=false;
	bool videobuf_ready=false;
	bool playbackdone = (yuv_overlay == NULL);
	bool isPlaying = false;
	bool bBreakout = false;
	while (!playbackdone)
	{
		// break out on SDL quit event
		SDL_Event event;
		if (SDL_PollEvent(&event))
		{
			switch (event.type)
			{
				case SDL_QUIT: playbackdone = bBreakout = true; break;
				case SDL_KEYDOWN:
					if (event.key.keysym.sym == SDLK_ESCAPE)
						playbackdone = bBreakout = true;
				break;
				default: break;
			}
		}

		while (theora_p && !videobuf_ready) {
			// get one video packet...
			if (ogg_stream_packetout(&to,&op)>0)
			{
				theora_decode_packetin(&td,&op);

				videobuf_granulepos=td.granulepos;
				videobuf_time=theora_granule_time(&td,videobuf_granulepos);

#if 0
				//Without sound channels to synch to, don't need to worry about skipping frames when slow.
				// update the frame counter
				//++frameNum;

				// check if this frame time has not passed yet.
				//	If the frame is late we need to decode additional
				//	ones and keep looping, since theora at this stage
				//	needs to decode all frames.
				const double now=get_time();
				const double delay=videobuf_time-now;
				if(delay>=0.0){
					/// got a good frame, not late, ready to break out
					videobuf_ready=true;
				} else if(now-last_frame_time>=1.0) {
					// display at least one frame per second, regardless
					videobuf_ready=true;
				} else {
					//Need to catch up -- no time to display frame.
					if (bSkippedLastFrame) //only allow skipping one frame in a row
						videobuf_ready = true; //show anyway
					else
						bSkippedLastFrame = true;
					//printf("dropping frame %d (%.3fs behind)\n", frameNum, -delay);
				}
#else
				videobuf_ready = true; //show every frame
#endif
			} else {
				// need more data
				break;
			}
		}

		if (!hasdatatobuffer && !videobuf_ready && !audiobuf_ready) {
			isPlaying = false;
			playbackdone = true;
		}

		//If we're set for the next frame, sleep.
		//In other words, don't show frames too rapidly. 
		if((!theora_p || videobuf_ready) && 
			(!vorbis_p || audiobuf_ready))
		{
			const int ticks = (int)(1000*(videobuf_time-get_time()));
			if(ticks>0 && screen) //don't need to sleep if only testing file
				SDL_Delay(ticks);
		}
 
		if (videobuf_ready)
		{
			// time to write our cached frame
			if (screen)
			{
				const bool bRes = video_write(screen, yuv_overlay, x, y);
				if (!bRes) //couldn't display image
					playbackdone = bBreakout = true;
			}
			videobuf_ready=false;
			last_frame_time=get_time();
			bSkippedLastFrame = false;

			// if audio has not started (first frame) then start it
			if ((!isPlaying)&&(vorbis_p)) {
				start_audio();
				isPlaying = true;
			}
		}

		// HACK: always look for more audio data
		audiobuf_ready=false;

		// buffer compressed data every loop
		if (hasdatatobuffer) {
			hasdatatobuffer = buffer_data(&oy, buffer) > 0;
			if (!hasdatatobuffer) {
				//printf("Ogg buffering stopped, end of file reached.\n");
			}
		}
    
		if (ogg_sync_pageout(&oy,&og)>0)
			queue_page(&og);

	} // playbackdone

	// show number of video frames decoded
	//printf("\nFrames decoded: %d\n", frameNum);

	// deinit
	if (vorbis_p) {
		audio_close();

		ogg_stream_clear(&vo);
		vorbis_block_clear(&vb);
		vorbis_dsp_clear(&vd);
		vorbis_comment_clear(&vc);
		vorbis_info_clear(&vi); 
	}
	if (theora_p) {
		if (yuv_overlay)
			SDL_FreeYUVOverlay(yuv_overlay);

		ogg_stream_clear(&to);
		theora_clear(&td);
		theora_comment_clear(&tc);
		theora_info_clear(&ti);
	}
	ogg_sync_clear(&oy);

	//If broken out of testing, return false since entire file was not verified.
	return !bBreakout || screen != NULL;
}
Exemple #21
0
/*
 * Read sound.cfg and map events to sounds; then load all the sounds into
 * memory to avoid I/O latency later.
 */
static bool sound_sdl_init(bool no_cache)
{
	char path[2048];
	char buffer[2048];
	ang_file *fff;


	/* Initialise the mixer  */
	if (!open_audio())
	    return FALSE;


	/* Build the "sound" path */
	path_build(path, sizeof(path), ANGBAND_DIR_XTRA, "sound");
	ANGBAND_DIR_XTRA_SOUND = string_make(path);

	/* Find and open the config file */
	path_build(path, sizeof(path), ANGBAND_DIR_XTRA_SOUND, "sound.cfg");
	fff = file_open(path, MODE_READ, -1);

	/* Handle errors */
	if (!fff)
	{
		plog_fmt("Failed to open sound config (%s):\n    %s", 
		          path, strerror(errno));
		return FALSE;
	}

	/* Parse the file */
	/* Lines are always of the form "name = sample [sample ...]" */
	while (file_getl(fff, buffer, sizeof(buffer)))
	{
		char *msg_name;
		char *sample_list;
		char *search;
		char *cur_token;
		char *next_token;
		int event;

		/* Skip anything not beginning with an alphabetic character */
		if (!buffer[0] || !isalpha((unsigned char)buffer[0])) continue;

		/* Split the line into two: message name, and the rest */
		search = strchr(buffer, ' ');
        sample_list = strchr(search + 1, ' ');
		if (!search) continue;
        if (!sample_list) continue;

		/* Set the message name, and terminate at first space */
		msg_name = buffer;
		search[0] = '\0';


		/* Make sure this is a valid event name */
		for (event = MSG_MAX - 1; event >= 0; event--)
		{
			if (strcmp(msg_name, angband_sound_name[event]) == 0)
			    break;
		}
        if (event < 0) continue;

	/* Advance the sample list pointer so it's at the beginning of text */
		sample_list++;
		if (!sample_list[0]) continue;

		/* Terminate the current token */
		cur_token = sample_list;
		search = strchr(cur_token, ' ');
		if (search)
		{
			search[0] = '\0';
			next_token = search + 1;
		}
		else
		{
			next_token = NULL;
		}

        /*
         * Now we find all the sample names and add them one by one
         */
        while (cur_token)
        {
            int num = samples[event].num;

			/* Don't allow too many samples */
			if (num >= MAX_SAMPLES) break;

			/* Build the path to the sample */
			path_build(path, sizeof(path), ANGBAND_DIR_XTRA_SOUND, cur_token);
			if (!file_exists(path)) goto next_token;

			/* Don't load now if we're not caching */
			if (no_cache)
			{
				/* Just save the path for later */
			  samples[event].paths[num] = (char *)string_make(path);
			}
			else
			{
				/* Load the file now */
				samples[event].wavs[num] = Mix_LoadWAV(path);
				if (!samples[event].wavs[num])
				{
					plog_fmt("%s: %s", SDL_GetError(), strerror(errno));
					goto next_token;
				}
			}

			/* Imcrement the sample count */
			samples[event].num++;

		next_token:

			/* Figure out next token */
			cur_token = next_token;
			if (next_token)
			{
				/* Try to find a space */
				search = strchr(cur_token, ' ');

				/* If we can find one, terminate, and set new "next" */
				if (search)
				{
					search[0] = '\0';
					next_token = search + 1;
				}
				else
				{
					/* Otherwise prevent infinite looping */
					next_token = NULL;
				}
			}
		}
	}

	/* Close the file */
	file_close(fff);


	/* Success */
	return TRUE;
}
int start_up(EncoderJob &jobSpec) {

	jobSpec.p = new Pests();

	jobSpec.oc = avformat_alloc_context();
	if (!jobSpec.oc) {
		fprintf(stderr, "Memory error\n");
		jobSpec.IsValid = false;
		return 3;
	}
	jobSpec.oc->oformat = jobSpec.fmt;
	sprintf(jobSpec.oc->filename, "%s-%05u.ts", jobSpec.BaseDirectory, jobSpec.SegmentNumber);


	// Set video codecs:
	jobSpec.fmt->video_codec = CODEC_ID_H264; // Video codec. Requires FFmpeg to be built with libx264.
	jobSpec.fmt->audio_codec = CODEC_ID_MP3; //CODEC_ID_AAC; // AAC is not working so well. Will use MP3 instead.

	jobSpec.video_st = NULL;
	jobSpec.audio_st = NULL;
	if (jobSpec.fmt->video_codec != CODEC_ID_NONE) {
		jobSpec.video_st = add_video_stream(jobSpec.oc, jobSpec.fmt->video_codec, jobSpec);
	}
	if (jobSpec.fmt->audio_codec != CODEC_ID_NONE) {
		jobSpec.audio_st = add_audio_stream(jobSpec, jobSpec.oc, jobSpec.fmt->audio_codec);
	}

	/*if (av_set_parameters(jobSpec.oc, NULL) < 0) {
		fprintf(stderr, "Invalid output format parameters\n");
			jobSpec.IsValid = false;
			return 4;
	}*/

	/* now that all the parameters are set, we can open the audio and
	video codecs and allocate the necessary encode buffers */
	if (jobSpec.video_st) {
		open_video(jobSpec, jobSpec.oc, jobSpec.video_st);
	}
	if (jobSpec.audio_st) {
		open_audio(jobSpec.oc, jobSpec.audio_st);
	}

#ifdef NEW_M2TS
	jobSpec.fmt->flags |= AVFMT_NOFILE; // we'll write our own, thanks!
	int track_ids[2] = {120, 121};
	uint8_t track_types[2] = {Pests::TT_H264, Pests::TT_MpegAudio};
	jobSpec.p->StartFile(jobSpec.oc->filename, track_ids, track_types, 2); // 120 = video, 121 = audio
#else
	// open the output file, if needed
	if (!(jobSpec.fmt->flags & AVFMT_NOFILE)) {
		if (url_fopen(&jobSpec.oc->pb, jobSpec.oc->filename, URL_WRONLY) < 0) {
			fprintf(stderr, "Could not open '%s'\n", jobSpec.oc->filename);
			jobSpec.IsValid = false;
			return 5;
		}
		av_write_header(jobSpec.oc);
	}
#endif


	// All done OK, validate and return.
	// From this point on, the developer MUST call CloseEncoderJob() before exiting.
	jobSpec.IsValid = true;
	return 0;
}
Exemple #23
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Exemple #24
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_time, video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;

    if (fmt->video_codec != AV_CODEC_ID_NONE)
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    flush = 0;
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
        /* Compute current audio and video time. */
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush &&
            (!audio_st || audio_time >= STREAM_DURATION) &&
            (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        /* write interleaved audio and video frames */
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
            write_audio_frame(oc, audio_st, flush);
        } else if (video_st && !video_is_eof && video_time < audio_time) {
            write_video_frame(oc, video_st, flush);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Exemple #25
0
	bool FFMPEGer::init(MetaData* meta){
		if(mInited)
			return true;
		
		do{
			AVDictionary *opt = NULL;
			int ret;
			
			av_register_all();
			
			avformat_alloc_output_context2(&fmt_ctx, NULL, NULL, mOutputFile);
			if(fmt_ctx == NULL){
				ALOGE("fail to avformat_alloc_output_context2 for %s", mOutputFile);
				break;
			}

			fmt = fmt_ctx->oformat;
			
			/* Add the audio and video streams using the default format codecs
			 * and initialize the codecs. */
			if (fmt->video_codec != AV_CODEC_ID_NONE) {
				add_stream(&video_st, fmt_ctx, &video_codec, fmt->video_codec);
				have_video = true;
			}
			if (fmt->audio_codec != AV_CODEC_ID_NONE) {
				add_stream(&audio_st, fmt_ctx, &audio_codec, fmt->audio_codec);
				have_audio = true;
			}
			
			if(!have_audio && !have_video){
				ALOGE("no audio or video codec found for the fmt!");
				break;
			}

			/* Now that all the parameters are set, we can open the audio and
			 * video codecs and allocate the necessary encode buffers. */
			if (have_video)
				open_video(video_codec, &video_st, opt);
			
			if (have_audio)
				open_audio(audio_codec, &audio_st, opt);
			
			/* open the output file, if needed */
			if (!(fmt->flags & AVFMT_NOFILE)) {
				ret = avio_open(&fmt_ctx->pb, mOutputFile, AVIO_FLAG_WRITE);
				if (ret < 0) {
					ALOGE("Could not open '%s': %s", mOutputFile, av_err2str(ret));
					break;
				}
			}

			/* Write the stream header, if any. */
			ret = avformat_write_header(fmt_ctx, NULL);
			if (ret < 0) {
				ALOGE("Error occurred when opening output file: %s", av_err2str(ret));
				break;
			}
			
			mInited = true;
		}while(0);

		if(!mInited)
			reset();

		return mInited;
	}
Exemple #26
0
	/*
	* ����һ�������������ģ�����Ƶ�ļ����н������
	*/
	AVDecodeCtx *ffCreateDecodeContext(
		const char * filename, AVDictionary *opt_arg
		)
	{
		int i, ret;
		AVInputFormat *file_iformat = NULL;
		AVDecodeCtx * pdc;
		AVDictionary * opt = NULL;

		ffInit();
		
		pdc = (AVDecodeCtx *)malloc(sizeof(AVDecodeCtx));
		while (pdc)
		{
			memset(pdc, 0, sizeof(AVDecodeCtx));
			pdc->_fileName = strdup(filename);
			pdc->_ctx = avformat_alloc_context();
			if (!pdc->_ctx)
			{
				av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext : could not allocate context.\n");
				break;
			}
			//filename = "video=.." ,open dshow device
			if (filename && strstr(filename, "video=") == filename){
				file_iformat = av_find_input_format(CAP_DEVICE_NAME);
				if (!file_iformat){
					av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n",CAP_DEVICE_NAME);
					break;
				}
			}
			av_dict_copy(&opt, opt_arg, 0);
			ret = avformat_open_input(&pdc->_ctx, filename, file_iformat, &opt);
			av_dict_free(&opt);
			opt = NULL;
			if (ret < 0)
			{
				char errmsg[ERROR_BUFFER_SIZE];
				av_strerror(ret, errmsg, ERROR_BUFFER_SIZE);
				av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext %s.\n", errmsg);
				break;
			}
			av_format_inject_global_side_data(pdc->_ctx);

			av_dict_copy(&opt, opt_arg, 0);
			ret = avformat_find_stream_info(pdc->_ctx, NULL);
			av_dict_free(&opt);
			opt = NULL;
			if (ret < 0)
			{
				char errmsg[ERROR_BUFFER_SIZE];
				av_strerror(ret, errmsg, ERROR_BUFFER_SIZE);
				av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext %s.\n", errmsg);
				break;
			}
			/*
			 * ������Ƶ������Ƶ��
			 */
			ret = av_find_best_stream(pdc->_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
			if (ret >= 0)
			{
				pdc->has_video = 1;
				pdc->_video_st = pdc->_ctx->streams[ret];
				pdc->_video_st_index = ret;
			}
			ret = av_find_best_stream(pdc->_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
			if (ret >= 0)
			{
				pdc->has_audio = 1;
				pdc->_audio_st = pdc->_ctx->streams[ret];
				pdc->_audio_st_index = ret;
			}
			if (pdc->has_video)
			{
				if (open_video(pdc, pdc->_video_st->codec->codec_id, NULL) < 0)
				{
					ffCloseDecodeContext(pdc);
					return NULL;
				}
				pdc->encode_video = 1;
			}
			if (pdc->has_audio)
			{
				if (open_audio(pdc, pdc->_audio_st->codec->codec_id, NULL) < 0)
				{
					ffCloseDecodeContext(pdc);
					return NULL;
				}
				pdc->encode_audio = 1;
			}

			return pdc;
		}

		/*
		 * ʧ������
		 */
		ffCloseDecodeContext(pdc);
		return NULL;
	}
Exemple #27
0
int main( int argc, char* argv[] ){

  int i,j;
  ogg_packet op;
  SDL_Event event;
  int hasdatatobuffer = 1;
  int playbackdone = 0;
  double now, delay, last_frame_time = 0;

  int frameNum=0;
  int skipNum=0;

  /* takes first argument as file to play */
  /* this works better on Windows and is more convenient
     for drag and drop ogg files over the .exe */

  if( argc != 2 )
  {
    usage();
    exit(0);
  }

  infile  = fopen( argv[1], "rb" );

  /* start up Ogg stream synchronization layer */
  ogg_sync_init(&oy);

  /* init supporting Vorbis structures needed in header parsing */
  vorbis_info_init(&vi);
  vorbis_comment_init(&vc);

  /* init supporting Theora structures needed in header parsing */
  theora_comment_init(&tc);
  theora_info_init(&ti);

  parseHeaders();

  /* force audio off */
  /* vorbis_p = 0; */

  /* initialize decoders */
  if(theora_p){
    theora_decode_init(&td,&ti);
    printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n"
           "  Frame content is %dx%d with offset (%d,%d).\n",
	   to.serialno,ti.width,ti.height, (double)ti.fps_numerator/ti.fps_denominator,
	   ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
    report_colorspace(&ti);
    dump_comments(&tc);
  }else{
    /* tear down the partial theora setup */
    theora_info_clear(&ti);
    theora_comment_clear(&tc);
  }
  if(vorbis_p){
    vorbis_synthesis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);  
    printf("Ogg logical stream %x is Vorbis %d channel %d Hz audio.\n",
	   vo.serialno,vi.channels,vi.rate);
  }else{
    /* tear down the partial vorbis setup */
    vorbis_info_clear(&vi);
    vorbis_comment_clear(&vc);
  }
  /* open audio */
  if(vorbis_p)open_audio();
  /* open video */
  if(theora_p)open_video();
  
  /* our main loop */
  while(!playbackdone){

    /* break out on SDL quit event */
    if ( SDL_PollEvent ( &event ) )
    {
      if ( event.type == SDL_QUIT ) break ;
    }

    /* get some audio data */
    while(vorbis_p && !audiobuf_ready){
      int ret;
      float **pcm;
      int count = 0;
      int maxBytesToWrite;

      /* is there pending audio? does it fit our circular buffer without blocking? */
      ret=vorbis_synthesis_pcmout(&vd,&pcm);
      maxBytesToWrite = GetAudioStreamWriteable(aOutStream);

      if (maxBytesToWrite<=FRAMES_PER_BUFFER){
        /* break out until there is a significant amount of
           data to avoid a series of small write operations. */
        break;
      }
      /* if there's pending, decoded audio, grab it */
      if((ret>0)&&(maxBytesToWrite>0)){

	for(i=0;i<ret && i<(maxBytesToWrite/vi.channels);i++)
	  for(j=0;j<vi.channels;j++){
	    int val=(int)(pcm[j][i]*32767.f);
	    if(val>32767)val=32767;
	    if(val<-32768)val=-32768;
	    samples[count]=val;
	    count++;
	  }
	if(WriteAudioStream( aOutStream, samples, i )) {
	  if(count==maxBytesToWrite){
	    audiobuf_ready=1;
	  }
	}
        vorbis_synthesis_read(&vd,i);

	if(vd.granulepos>=0)
	  audiobuf_granulepos=vd.granulepos-ret+i;
	else
	  audiobuf_granulepos+=i;

      }else{
	
	/* no pending audio; is there a pending packet to decode? */
	if(ogg_stream_packetout(&vo,&op)>0){
	  if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
	   vorbis_synthesis_blockin(&vd,&vb);
	}else	/* we need more data; break out to suck in another page */
	  break;
      }
    } /* end audio cycle */

    while(theora_p && !videobuf_ready){
      /* get one video packet... */
      if(ogg_stream_packetout(&to,&op)>0){
      
        theora_decode_packetin(&td,&op);

	  videobuf_granulepos=td.granulepos;
	  videobuf_time=theora_granule_time(&td,videobuf_granulepos);
	  /* update the frame counter */
	  frameNum++;

	  /* check if this frame time has not passed yet.
	     If the frame is late we need to decode additonal
	     ones and keep looping, since theora at this stage
	     needs to decode all frames */
	  now=get_time();
	  delay=videobuf_time-now;
	  if(delay>=0.0){
		/* got a good frame, not late, ready to break out */
		videobuf_ready=1;
	  }else if(now-last_frame_time>=1.0){
		/* display at least one frame per second, regardless */
		videobuf_ready=1;
	  }else{
		fprintf(stderr, "dropping frame %d (%.3fs behind)\n",
			frameNum, -delay);
	   }
      }else{
	/* need more data */
	break;
      }
    }

    if(!hasdatatobuffer && !videobuf_ready && !audiobuf_ready){
      isPlaying = 0;
      playbackdone = 1;
    }

    /* if we're set for the next frame, sleep */
    if((!theora_p || videobuf_ready) && 
       (!vorbis_p || audiobuf_ready)){
        int ticks = 1.0e3*(videobuf_time-get_time());
	if(ticks>0)
          SDL_Delay(ticks);
    }
 
    if(videobuf_ready){
      /* time to write our cached frame */
      video_write();
      videobuf_ready=0;
      last_frame_time=get_time();

      /* if audio has not started (first frame) then start it */
      if ((!isPlaying)&&(vorbis_p)){
        start_audio();
        isPlaying = 1;
      }
    }

    /* HACK: always look for more audio data */
    audiobuf_ready=0;

    /* buffer compressed data every loop */
    if(hasdatatobuffer){
      hasdatatobuffer=buffer_data(&oy);
      if(hasdatatobuffer==0){
        printf("Ogg buffering stopped, end of file reached.\n");
      }
    }
    
    if (ogg_sync_pageout(&oy,&og)>0){
      queue_page(&og);
    }

  } /* playbackdone */

  /* show number of video frames decoded */
  printf( "\n");
  printf( "Frames decoded: %d", frameNum );
  if(skipNum)
    printf( " (only %d shown)", frameNum-skipNum);
  printf( "\n" );

  /* tear it all down */
  fclose( infile );

  if(vorbis_p){
    audio_close();

    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi); 
  }
  if(theora_p){
    ogg_stream_clear(&to);
    theora_clear(&td);
    theora_comment_clear(&tc);
    theora_info_clear(&ti);
  }
  ogg_sync_clear(&oy);

  printf("\r                                                              "
	 "\nDone.\n");
	 
  SDL_Quit();

  return(0);

}
Exemple #28
0
int main(int argc, char *argv[])
{
	int ret;
	const char *url;
	struct sound_file_info *file;
	struct decode *dec;
	struct fifo *fifo;
	pthread_t tid;
	struct load_thread_arg arg;
	u8 *lrc;
	u8 *icon;
	size_t lrc_size;
	size_t icon_size;
	u8 mp3_buff[MP3_BUFF_SIZE];
	u8 raw_buff[RAW_BUFF_SIZE];
	int mp3_size, raw_size;
	struct mp3_param mp3_pm;
	struct audio_output *out;
	struct window_info *win_info;

	if (argc < 2) {
		fprintf(stderr, "Usage: %s PATH\n", argv[0]);
		return -EINVAL;
	}

	url = argv[1];

	file = sound_file_open(url);
	if (NULL == file) {
		fprintf(stderr, "Fail to open sound file \"%s\"!\n", url);
		return -ENODEV;
	}

	fifo = fifo_open();
	if (NULL == fifo) {
		goto L1;
		ret = -ENOMEM;
	}

	ret = parse_mp3_tag(file, &lrc, &lrc_size, &icon, &icon_size);
	if (ret < 0) {
		DPRINT("\n");
		goto L2;
	}

	DPRINT("mp3_start = %lu, mp3_end = %lu, "
			"lrc = %p, lrc_size = %lu, icon = %p, icon_size = %lu\n",
			file->mp3_data_start, file->mp3_data_end,
			lrc, lrc_size, icon, icon_size);

	arg.fifo = fifo;
	arg.file = file;
	ret = pthread_create(&tid, NULL, load_mp3_data_to_fifo, &arg);
	if (ret < 0) {
		DPRINT("\n");
		goto L2;
	}

	dec = decode_open(MPAUDEC); // fixme!
	if (NULL == dec) {
		ret = -ENODEV;
		goto L2;
	}

	while (fifo->used < fifo->size / 3) usleep(1000);
	mp3_size = fifo_read(fifo, mp3_buff, sizeof(mp3_buff));

	get_mp3_param(dec, mp3_buff, mp3_size, &mp3_pm);

	win_info = window_init();
	win_info->icon = icon;
	win_info->icon_size = icon_size;
	win_info->lrc = lrc;
	win_info->lrc_size = lrc_size;
	win_info->total.tv_sec = (file->mp3_data_end - file->mp3_data_start) * 8 / mp3_pm.bit_rate;
	win_info->total.tv_usec = (file->mp3_data_end - file->mp3_data_start) * 8 * 1000000 / mp3_pm.bit_rate % 1000000;
	win_info->param = &mp3_pm;

	DPRINT("rate = %d, channels = %d, bps = %d, bitrate = %d\n",
			mp3_pm.rate, mp3_pm.channels, mp3_pm.bits_per_sample, mp3_pm.bit_rate);

	out = open_audio(AUDIO_ALSA, &mp3_pm);
	if (NULL == out) {
		ret = -ENODEV;
		goto L3;
	}

	while (1) {
		if (file->mp3_data_end == file->offset && mp3_size == 0)
			break;

		if (mp3_size > 0) {
			ret = decode(dec, raw_buff, &raw_size, mp3_buff, mp3_size);
			mp3_size -= ret;
			memmove(mp3_buff, mp3_buff + ret, mp3_size);
		}

		play_frames(out, raw_buff, raw_size, &mp3_pm);

		ret = fifo_read(fifo, mp3_buff + mp3_size, sizeof(mp3_buff) - mp3_size);

		mp3_size += ret;
	}

	close_audio(out);
	window_destroy();
L3:
	decode_close(dec);
L2:
	fifo_close(fifo);
L1:
	sound_file_close(file);

	return ret;
}