Exemplo n.º 1
0
static int audio_read_close(AVFormatContext *s1)
{
    AudioData *s = s1->priv_data;

    audio_close(s);
    return 0;
}
Exemplo n.º 2
0
int flowm_say_text(TCHAR *text)
{
    char *s;
    int ns;
    cst_voice *v;

    if (previous_wave)
    {
        delete_wave(previous_wave);
        previous_wave = NULL;
    }

    s = cst_wstr2cstr(text);               /* text to synthesize */
    v = VoxDefs[flowm_selected_voice].v;   /* voice to synthesize with */

    feat_remove(v->features,"print_info_relation");
    if (flowm_selected_relation == 1)
        feat_set_string(v->features, "print_info_relation", "Word");
    if (flowm_selected_relation == 2)
        feat_set_string(v->features, "print_info_relation", "Segment");

    /* Do the synthesis */
    previous_wave = flite_text_to_wave(s,v);

    ns = cst_wave_num_samples(previous_wave);

    cst_free(s);
    audio_flush(fl_ad);
    audio_close(fl_ad); 
    fl_ad = NULL;

    return ns;
}
Exemplo n.º 3
0
void shutdown_sdl(void)
{
	if (guarded_exit) {
		audio_close();
		printer_close();
#ifdef CONFIG_W5300_SUPPORT
		w5300_shutdown();
#endif
#ifdef CONFIG_EXDOS_SUPPORT
		wd_detach_disk_image();
#endif
		if (sram_ready)
			sram_save_all_segments();
		DEBUGPRINT("Shutdown callback, return." NL);
	}
	if (sdl_win) {
#ifdef __EMSCRIPTEN__
		// This is used, because window title would remain as Emu would run after exit, which is not the case ...
		SDL_SetWindowTitle(sdl_win, WINDOW_TITLE " v" VERSION " - EXITED");
#endif
		SDL_DestroyWindow(sdl_win);
	}
	console_close_window_on_exit();
	/* last stuff! */
	if (debug_fp) {
		DEBUGPRINT("Closing debug messages log file on exit." NL);
		fclose(debug_fp);
		debug_fp = NULL;
	}
	SDL_Quit();
}
Exemplo n.º 4
0
/*
 * close the audio context
 * args:
 *    none
 *
 * asserts:
 *    none
 *
 * returns: none
 */
void close_audio_context()
{
	if(my_audio_ctx != NULL)
		audio_close(my_audio_ctx);

	my_audio_ctx = NULL;
}
Exemplo n.º 5
0
int q6audio_mp3_close(struct audio_client *ac)
{
    audio_close(ac);
    audio_rx_path_enable(0, 0);
    audio_client_free(ac);
    return 0;
}
Exemplo n.º 6
0
static int audio_write_trailer(AVFormatContext *s1)
{
    AudioData *s = s1->priv_data;

    audio_close(s);
    return 0;
}
Exemplo n.º 7
0
static void appExit()
{
	printf("##appExit\n");
	//if (bHasAudio)
	{
		audio_stop();
		audio_close();
		printf("audio_close\n");
	}
	close_encode();
	audio_dec_exit();
	video_process_stop();
	usleep(200 * 1000);
	camera_close();
	printf("camera_close\n");
	encode_close();
	printf("encode_close\n");
	mux_exit();
	printf("mux_close\n");

	encode_destroy();
	akuio_pmem_fini();
	setled_off();
	PTZControlDeinit();
	printf("akuio_pmem_fini\n");
	
    record_rename_file();

}
Exemplo n.º 8
0
/*
 * Exit emulator
 */
static void do_exit_machine (void)
{
    graphics_leave ();
    inputdevice_close ();

#ifdef SCSIEMU
    scsidev_exit ();
#endif
    DISK_free ();
    audio_close ();
    dump_counts ();
#ifdef SERIAL_PORT
    serial_exit ();
#endif
#ifdef CD32
    akiko_free ();
#endif
    gui_exit ();

#ifdef AUTOCONFIG
    expansion_cleanup ();
#endif
#ifdef FILESYS
    filesys_cleanup ();
    hardfile_cleanup ();
    rtarea_cleanup ();
#endif
#ifdef SAVESTATE
    savestate_free ();
#endif
    memory_cleanup ();
    cfgfile_addcfgparam (0);
}
Exemplo n.º 9
0
static void alccapturestop(void *handle) {
    if(handle == (void*)1) {
        audio_close(handle);
        return;
    }

    alcCaptureStop(handle);
}
Exemplo n.º 10
0
/* Play the wave for the current set of notes for the given duration */
void play_note(uint8_t sz, uint8_t x[3], uint8_t duration) {
	pwm_init(sz, x);
    while (playing) {
        if(sample >= DUNIT*duration) {
            audio_close();
        }
    }
}
Exemplo n.º 11
0
static void cleanup(struct uade_state *state)
{
  save_content_db(state);

  if (uadepid != -1) {
    kill(uadepid, SIGTERM);
    uadepid = -1;
  }

  audio_close();
}
Exemplo n.º 12
0
int q6audio_close(struct audio_client *ac)
{
    audio_close(ac);
    if (ac->flags & AUDIO_FLAG_WRITE)
        audio_rx_path_enable(0, 0);
    else
        audio_tx_path_enable(0, 0);

    audio_client_free(ac);
    audio_allow_sleep();
    return 0;
}
Exemplo n.º 13
0
static javacall_result video_close(javacall_handle handle)
{
#ifdef ENABLE_EXTRA_CAMERA_CONTROLS
    audio_handle* pHandle = (audio_handle*)handle;

    if( JC_FMT_CAPTURE_VIDEO == pHandle->mediaType )
    {
        extra_camera_controls_cleanup( pHandle );
    }
#endif //ENABLE_EXTRA_CAMERA_CONTROLS

    return audio_close(handle);
}
Exemplo n.º 14
0
int flowm_say_file(TCHAR *tfilename)
{
    int rc = 0;
    char *filename;
    cst_voice *v;
    
    if (previous_wave)
    {   /* This is really tidy up from Play -- but might say space */
        delete_wave(previous_wave);
        previous_wave = NULL;
    }

    if (fl_ad)
    {
        MessageBoxW(0,L"audio fd still open",L"SayFile",0);
        audio_close(fl_ad); 
        fl_ad = NULL;
    }

    v = VoxDefs[flowm_selected_voice].v;

    /* Where we want to start from */
    feat_set_int(v->features, "file_start_position", flowm_file_pos);

    /* Only do print_info in play mode */
    feat_remove(v->features,"print_info_relation");

    filename = cst_wstr2cstr(tfilename);
    rc = flite_file_to_speech(filename, v, "stream");
    cst_free(filename);

    audio_flush(fl_ad);
    audio_close(fl_ad); 
    fl_ad = NULL;

    return rc;

}
Exemplo n.º 15
0
int audio_open(struct audio_info_struct *ai)
{
	int err;
	int card=0,device=0;
	char scard[128], sdevice[128];

	if(!ai)
		return -1;
	if(ai->device) {	/* parse ALSA device name */
		if(strchr(ai->device,':')) {	/* card with device */
			strncpy(scard, ai->device, sizeof(scard)-1);
			scard[sizeof(scard)-1] = '\0';
			if (strchr(scard,':')) *strchr(scard,':') = '\0';
			card = snd_card_name(scard);
			if (card < 0) {
				fprintf(stderr, "wrong soundcard number: %s\n", scard);
				exit(1);
			}
			strncpy(sdevice, strchr(ai->device, ':') + 1, sizeof(sdevice)-1);
		} else {
			strncpy(sdevice, ai->device, sizeof(sdevice)-1);
		}
		sdevice[sizeof(sdevice)-1] = '\0';
		device = atoi(sdevice);
		if (!isdigit(sdevice[0]) || device < 0 || device > 31) {
			fprintf(stderr, "wrong device number: %s\n", sdevice);
			exit(1);
		}
	}

	if((err=snd_pcm_open(&ai->handle, card, device, SND_PCM_OPEN_PLAYBACK)) < 0 )
	{
		fprintf(stderr, "open failed: %s\n", snd_strerror(err));
		exit(1);
	}

	if(audio_reset_parameters(ai) < 0)
	{
		audio_close(ai);
		return -1;
	}

	return 0;
}
Exemplo n.º 16
0
int
audio_device_release(session_t *sp, audio_desc_t the_dev)
{
        if (sp->audio_device == 0) {
                debug_msg("Audio device already released from session\n");
                return FALSE;
        }

        if (sp->audio_device != the_dev) {
                debug_msg("Releasing wrong device!\n");
                return FALSE;
        }

	/* Mix is going to be destroyed - tone_generator and
	 * voxlet have pointers to mixer in their state that
	 * is about to expire.  Could pass mixer as argument
	 * to their process functions...
	 */
	if (sp->tone_generator) {
		tonegen_destroy(&sp->tone_generator);
	}

	if (sp->local_file_player) {
		voxlet_destroy(&sp->local_file_player);
	}

        cushion_destroy(&sp->cushion);
        mix_destroy(&sp->ms);

        tx_stop(sp->tb);
        tx_destroy(&sp->tb);

        source_list_clear(sp->active_sources);

        audio_close(sp->audio_device);
        sp->audio_device = 0;

        xfree(zero_buf);
        zero_buf = NULL;

        return FALSE;
}
Exemplo n.º 17
0
void audio_capabilities(struct audio_info_struct *ai)
{
	int fmts;
	int i,j,k,k1=NUM_RATES-1;
	struct audio_info_struct ai1 = *ai;

        if (param.outmode != DECODE_AUDIO) {
		memset(capabilities,1,sizeof(capabilities));
		return;
	}

	memset(capabilities,0,sizeof(capabilities));
	if(param.force_rate) {
		rates[NUM_RATES-1] = param.force_rate;
		k1 = NUM_RATES;
	}

	/* if audio_open fails, the device is just not capable of anything... */
	if(audio_open(&ai1) < 0) {
		perror("audio");
	}
	else
	{
		for(i=0;i<NUM_CHANNELS;i++) {
			for(j=0;j<NUM_RATES;j++) {
				ai1.channels = channels[i];
				ai1.rate = rates[j];
				fmts = audio_get_formats(&ai1);
				if(fmts < 0)
					continue;
				for(k=0;k<NUM_ENCODINGS;k++) {
					if((fmts & encodings[k]) == encodings[k])
						capabilities[i][k][j] = 1;
				}
			}
		}
		audio_close(&ai1);
	}

	if(param.verbose > 1) print_capabilities(ai);
}
Exemplo n.º 18
0
void audio_capabilities(struct audio_info_struct *ai)
{
	int fmts;
	int i,j,k,k1=NUM_RATES-1;
	struct audio_info_struct ai1 = *ai;

        if (param.outmode != DECODE_AUDIO) {
		memset(capabilities,1,sizeof(capabilities));
		return;
	}

	memset(capabilities,0,sizeof(capabilities));
	if(param.force_rate) {
		rates[NUM_RATES-1] = param.force_rate;
		k1 = NUM_RATES;
	}

#ifndef NO_DECODE_AUDIO
	if(audio_open(&ai1) < 0) {
		perror("audio");
		exit(1);
	}
#endif

	for(i=0;i<NUM_CHANNELS;i++) {
		for(j=0;j<NUM_RATES;j++) {
			ai1.channels = channels[i];
			ai1.rate = rates[j];
			fmts = audio_get_formats(&ai1);
			if(fmts < 0)
				continue;
			for(k=0;k<NUM_ENCODINGS;k++) {
				if((fmts & encodings[k]) == encodings[k])
					capabilities[i][k][j] = 1;
			}
		}
	}

#ifndef NO_DECODE_AUDIO
	audio_close(&ai1);
#endif

	if(param.verbose > 1) {
		fprintf(stderr,"\nAudio capabilities:\n        |");
		for(j=0;j<NUM_ENCODINGS;j++) {
			fprintf(stderr," %5s |",audio_val2name[j].sname);
		}
		fprintf(stderr,"\n --------------------------------------------------------\n");
		for(k=0;k<k1;k++) {
			fprintf(stderr," %5d  |",rates[k]);
			for(j=0;j<NUM_ENCODINGS;j++) {
				if(capabilities[0][j][k]) {
					if(capabilities[1][j][k])
						fprintf(stderr,"  M/S  |");
					else
						fprintf(stderr,"   M   |");
				}
				else if(capabilities[1][j][k])
					fprintf(stderr,"   S   |");
				else
					fprintf(stderr,"       |");
			}
			fprintf(stderr,"\n");
		}
		fprintf(stderr,"\n");
	}
}
Exemplo n.º 19
0
static void cleanup(void)
{
    audio_close();
}
Exemplo n.º 20
0
int main(int argc, char *argv[])
{
    int result = ACTION_NONE;
    int leave = 0;

    /* i18n */
#ifdef ENABLE_NLS
    setlocale (LC_ALL, "");
    bindtextdomain (PACKAGE, LOCALEDIR);
    textdomain (PACKAGE);
#endif
    
    /* ltris info */
    printf( "LTris %s\nCopyright 2002-2005 Michael Speck\nPublished under GNU GPL\n---\n", VERSION );
    printf( "Looking up data in: %s\n", SRC_DIR );
#ifndef SOUND
    printf( "Compiled without sound and music\n" );
#endif

    set_random_seed(); /* set random seed */

    /* game ids - not translated to be fixed independant of language */
    strcpy(gametype_ids[0],"demo");
    strcpy(gametype_ids[1],"classic");
    strcpy(gametype_ids[2],"figures");
    strcpy(gametype_ids[3],"vshuman");
    strcpy(gametype_ids[4],"vscpu");
    strcpy(gametype_ids[5],"vshumanhuman");
    strcpy(gametype_ids[6],"vshumancpu");
    strcpy(gametype_ids[7],"vscpucpu");
    /* game names - translated for display */
    strcpy(gametype_names[0],_("Demo"));
    strcpy(gametype_names[1],_("Classic"));
    strcpy(gametype_names[2],_("Figures"));
    strcpy(gametype_names[3],_("Vs Human"));
    strcpy(gametype_names[4],_("Vs CPU"));
    strcpy(gametype_names[5],_("Vs Human&Human"));
    strcpy(gametype_names[6],_("Vs Human&CPU"));
    strcpy(gametype_names[7],_("Vs CPU&CPU"));
    config_load();

    init_sdl( SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER );
    set_video_mode( std_video_mode( config.fullscreen ) );
    SDL_WM_SetCaption( "LTris", 0 );
    sdl.fade = config.fade;
    SDL_SetEventFilter( event_filter );
#ifdef SOUND
    audio_open();
    sound_enable( config.sound );
    sound_volume( config.volume * 16 );
#endif

    /* create */
    hint_load_res();
    manager_create();    
    tetris_create();
    chart_load();
    /* run game */
    manager_fade( FADE_IN );
    while( !leave && !term_game ) {
        result = manager_run();
        switch( result ) {
            case ACTION_QUIT: leave = 1; break;
            case ACTION_MAKE_STAT:
                manager_fade( FADE_OUT );
                tetris_make_stat();
                manager_fade( FADE_IN );
                break;
            case ACTION_PLAY:
                manager_fade( FADE_OUT );
                if ( tetris_init() ) {
                    tetris_run();
                    tetris_clear();
                }
                manager_fade( FADE_IN );
                break;
            default: break;
        }
    }
    manager_fade( FADE_OUT );
    /* delete stuff */
    tetris_delete();
	manager_delete();
	chart_save();
    chart_delete();
    hint_delete_res();
    
#ifdef SOUND
    audio_close();
#endif
    config_save();

    return EXIT_SUCCESS;
}
Exemplo n.º 21
0
int audio_set_channels(struct audio_info_struct *ai)
{
  audio_close(ai);
  return audio_open(ai);
}
Exemplo n.º 22
0
/* Reading thread of the buffer. */
static void *read_thread (void *arg)
{
	struct out_buf *buf = (struct out_buf *)arg;
	int audio_dev_closed = 0;

	logit ("entering output buffer thread");

	set_realtime_prio ();

	LOCK (buf->mutex);

	while (1) {
		int played = 0;
		char play_buf[AUDIO_MAX_PLAY_BYTES];
		int play_buf_fill;
		int play_buf_pos = 0;

		if (buf->reset_dev && !audio_dev_closed) {
			audio_reset ();
			buf->reset_dev = 0;
		}

		if (buf->stop)
			fifo_buf_clear (&buf->buf);

		if (buf->free_callback) {

			/* unlock the mutex to make calls to out_buf functions
			 * possible in the callback */
			UNLOCK (buf->mutex);
			buf->free_callback ();
			LOCK (buf->mutex);
		}

		debug ("sending the signal");
		pthread_cond_broadcast (&buf->ready_cond);

		if ((fifo_buf_get_fill(&buf->buf) == 0 || buf->pause
					|| buf->stop)
				&& !buf->exit) {
			if (buf->pause && !audio_dev_closed) {
				logit ("Closing the device due to pause");
				audio_close ();
				audio_dev_closed = 1;
			}

			debug ("waiting for something in the buffer");
			buf->read_thread_waiting = 1;
			pthread_cond_wait (&buf->play_cond, &buf->mutex);
			debug ("something appeared in the buffer");

		}

		buf->read_thread_waiting = 0;

		if (audio_dev_closed && !buf->pause) {
			logit ("Opening the device again after pause");
			if (!audio_open(NULL)) {
				logit ("Can't reopen the device! sleeping...");
				sleep (1); /* there is no way to exit :( */
			}
			else
				audio_dev_closed = 0;
		}

		if (fifo_buf_get_fill(&buf->buf) == 0) {
			if (buf->exit) {
				logit ("exit");
				break;
			}

			logit ("buffer empty");
			continue;
		}

		if (buf->pause) {
			logit ("paused");
			continue;
		}

		if (buf->stop) {
			logit ("stopped");
			continue;
		}

		if (!audio_dev_closed) {
			int audio_bpf;
			size_t play_buf_frames;

			audio_bpf = audio_get_bpf();
			play_buf_frames = MIN(audio_get_bps() * AUDIO_MAX_PLAY,
			                      AUDIO_MAX_PLAY_BYTES) / audio_bpf;
			play_buf_fill = fifo_buf_get(&buf->buf, play_buf,
			                             play_buf_frames * audio_bpf);
			UNLOCK (buf->mutex);

			debug ("playing %d bytes", play_buf_fill);

			while (play_buf_pos < play_buf_fill) {
				played = audio_send_pcm (
						play_buf + play_buf_pos,
						play_buf_fill - play_buf_pos);
				play_buf_pos += played;
			}

			/*logit ("done sending PCM");*/
			/*write (fd, buf->buf + buf->pos, to_play);*/

			LOCK (buf->mutex);

			/* Update time */
			if (played && audio_get_bps())
				buf->time += played / (float)audio_get_bps();
			buf->hardware_buf_fill = audio_get_buf_fill();
		}
	}

	UNLOCK (buf->mutex);

	logit ("exiting");

	return NULL;
}
Exemplo n.º 23
0
static int play_wave_from_socket(snd_header *header,int audiostream)
{
    /* Read audio from stream and play it to audio device, converting */
    /* it to pcm if required                                          */
    int num_samples;
    int sample_width;
    cst_audiodev *audio_device;
    int q,i,n,r;
    unsigned char bytes[CST_AUDIOBUFFSIZE];
    short shorts[CST_AUDIOBUFFSIZE];
    cst_file fff;

    fff = cst_fopen("/tmp/awb.wav",CST_OPEN_WRITE|CST_OPEN_BINARY);

    if ((audio_device = audio_open(header->sample_rate,1,
				   (header->encoding == CST_SND_SHORT) ?
				   CST_AUDIO_LINEAR16 : CST_AUDIO_LINEAR8)) == NULL)
    {
	cst_errmsg("play_wave_from_socket: can't open audio device\n");
	return -1;
    }

    if (header->encoding == CST_SND_SHORT)
	sample_width = 2;
    else
	sample_width = 1;

    num_samples = header->data_size / sample_width;
    /* we naively let the num_channels sort itself out */
    for (i=0; i < num_samples; i += r/2)
    {
	if (num_samples > i+CST_AUDIOBUFFSIZE)
	    n = CST_AUDIOBUFFSIZE;
	else
	    n = num_samples-i;
	if (header->encoding == CST_SND_ULAW)
	{
	    r = read(audiostream,bytes,n);
	    for (q=0; q<r; q++)
		shorts[q] = cst_ulaw_to_short(bytes[q]);
	    r *= 2;
	}
	else /* if (header->encoding == CST_SND_SHORT) */
	{
	    r = read(audiostream,shorts,n*2);
	    if (CST_LITTLE_ENDIAN)
		for (q=0; q<r/2; q++)
		    shorts[q] = SWAPSHORT(shorts[q]);
	}
	
	if (r <= 0)
	{   /* I'm not getting any data from the server */
	    audio_close(audio_device);
	    return CST_ERROR_FORMAT;
	}
	
	for (q=r; q > 0; q-=n)
	{
	    n = audio_write(audio_device,shorts,q);
	    cst_fwrite(fff,shorts,2,q);
	    if (n <= 0)
	    {
		audio_close(audio_device);
		return CST_ERROR_FORMAT;
	    }
	}
    }
    audio_close(audio_device);
    cst_fclose(fff);

    return CST_OK_FORMAT;

}
int main(int argc,char *argv[]){

  int i,j;
  ogg_packet op;

  FILE *infile = stdin;

#ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */
  /* Beware the evil ifdef. We avoid these where we can, but this one we
     cannot. Don't add any more, you'll probably go to hell if you do. */
  _setmode( _fileno( stdin ), _O_BINARY );
#endif

  /* open the input file if any */
  if(argc==2){
    infile=fopen(argv[1],"rb");
    if(infile==NULL){
      fprintf(stderr,"Unable to open '%s' for playback.\n", argv[1]);
      exit(1);
    }
  }
  if(argc>2){
      usage();
      exit(1);
  }

  /* start up Ogg stream synchronization layer */
  ogg_sync_init(&oy);

  /* init supporting Vorbis structures needed in header parsing */
  vorbis_info_init(&vi);
  vorbis_comment_init(&vc);

  /* init supporting Theora structures needed in header parsing */
  theora_comment_init(&tc);
  theora_info_init(&ti);

  /* Ogg file open; parse the headers */
  /* Only interested in Vorbis/Theora streams */
  while(!stateflag){
    int ret=buffer_data(infile,&oy);
    if(ret==0)break;
    while(ogg_sync_pageout(&oy,&og)>0){
      ogg_stream_state test;

      /* is this a mandated initial header? If not, stop parsing */
      if(!ogg_page_bos(&og)){
        /* don't leak the page; get it into the appropriate stream */
        queue_page(&og);
        stateflag=1;
        break;
      }

      ogg_stream_init(&test,ogg_page_serialno(&og));
      ogg_stream_pagein(&test,&og);
      ogg_stream_packetout(&test,&op);

      /* identify the codec: try theora */
      if(!theora_p && theora_decode_header(&ti,&tc,&op)>=0){
        /* it is theora */
        memcpy(&to,&test,sizeof(test));
        theora_p=1;
      }else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){
        /* it is vorbis */
        memcpy(&vo,&test,sizeof(test));
        vorbis_p=1;
      }else{
        /* whatever it is, we don't care about it */
        ogg_stream_clear(&test);
      }
    }
    /* fall through to non-bos page parsing */
  }

  /* we're expecting more header packets. */
  while((theora_p && theora_p<3) || (vorbis_p && vorbis_p<3)){
    int ret;

    /* look for further theora headers */
    while(theora_p && (theora_p<3) && (ret=ogg_stream_packetout(&to,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Theora stream headers; corrupt stream?\n");
        exit(1);
      }
      if(theora_decode_header(&ti,&tc,&op)){
        printf("Error parsing Theora stream headers; corrupt stream?\n");
        exit(1);
      }
      theora_p++;
      if(theora_p==3)break;
    }

    /* look for more vorbis header packets */
    while(vorbis_p && (vorbis_p<3) && (ret=ogg_stream_packetout(&vo,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      if(vorbis_synthesis_headerin(&vi,&vc,&op)){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      vorbis_p++;
      if(vorbis_p==3)break;
    }

    /* The header pages/packets will arrive before anything else we
       care about, or the stream is not obeying spec */

    if(ogg_sync_pageout(&oy,&og)>0){
      queue_page(&og); /* demux into the appropriate stream */
    }else{
      int ret=buffer_data(infile,&oy); /* someone needs more data */
      if(ret==0){
        fprintf(stderr,"End of file while searching for codec headers.\n");
        exit(1);
      }
    }
  }

  /* and now we have it all.  initialize decoders */
  if(theora_p){
    theora_decode_init(&td,&ti);
    printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n",
           (unsigned int)to.serialno,ti.width,ti.height, 
           (double)ti.fps_numerator/ti.fps_denominator);
    if(ti.width!=ti.frame_width || ti.height!=ti.frame_height)
      printf("  Frame content is %dx%d with offset (%d,%d).\n",
           ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
    report_colorspace(&ti);
    dump_comments(&tc);
  }else{
    /* tear down the partial theora setup */
    theora_info_clear(&ti);
    theora_comment_clear(&tc);
  }
  if(vorbis_p){
    vorbis_synthesis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);
    fprintf(stderr,"Ogg logical stream %x is Vorbis %d channel %d Hz audio.\n",
            (unsigned int)vo.serialno,vi.channels,(int)vi.rate);
  }else{
    /* tear down the partial vorbis setup */
    vorbis_info_clear(&vi);
    vorbis_comment_clear(&vc);
  }

  /* open audio */
  if(vorbis_p)open_audio();

  /* open video */
  if(theora_p)open_video();

  /* install signal handler as SDL clobbered the default */
  signal (SIGINT, sigint_handler);

  /* on to the main decode loop.  We assume in this example that audio
     and video start roughly together, and don't begin playback until
     we have a start frame for both.  This is not necessarily a valid
     assumption in Ogg A/V streams! It will always be true of the
     example_encoder (and most streams) though. */

  stateflag=0; /* playback has not begun */
  while(!got_sigint){

    /* we want a video and audio frame ready to go at all times.  If
       we have to buffer incoming, buffer the compressed data (ie, let
       ogg do the buffering) */
    while(vorbis_p && !audiobuf_ready){
      int ret;
      float **pcm;

      /* if there's pending, decoded audio, grab it */
      if((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0){
        int count=audiobuf_fill/2;
        int maxsamples=(audiofd_fragsize-audiobuf_fill)/2/vi.channels;
        for(i=0;i<ret && i<maxsamples;i++)
          for(j=0;j<vi.channels;j++){
            int val=rint(pcm[j][i]*32767.f);
            if(val>32767)val=32767;
            if(val<-32768)val=-32768;
            audiobuf[count++]=val;
          }
        vorbis_synthesis_read(&vd,i);
        audiobuf_fill+=i*vi.channels*2;
        if(audiobuf_fill==audiofd_fragsize)audiobuf_ready=1;
        if(vd.granulepos>=0)
          audiobuf_granulepos=vd.granulepos-ret+i;
        else
          audiobuf_granulepos+=i;
        
      }else{
        
        /* no pending audio; is there a pending packet to decode? */
        if(ogg_stream_packetout(&vo,&op)>0){
          if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
            vorbis_synthesis_blockin(&vd,&vb);
        }else   /* we need more data; break out to suck in another page */
          break;
      }
    }

    while(theora_p && !videobuf_ready){
      /* theora is one in, one out... */
      if(ogg_stream_packetout(&to,&op)>0){

        theora_decode_packetin(&td,&op);
        videobuf_granulepos=td.granulepos;
        
        videobuf_time=theora_granule_time(&td,videobuf_granulepos);

        /* is it already too old to be useful?  This is only actually
           useful cosmetically after a SIGSTOP.  Note that we have to
           decode the frame even if we don't show it (for now) due to
           keyframing.  Soon enough libtheora will be able to deal
           with non-keyframe seeks.  */

        if(videobuf_time>=get_time())
        videobuf_ready=1;
                
      }else
        break;
    }

    if(!videobuf_ready && !audiobuf_ready && feof(infile))break;

    if(!videobuf_ready || !audiobuf_ready){
      /* no data yet for somebody.  Grab another page */
      int bytes=buffer_data(infile,&oy);
      while(ogg_sync_pageout(&oy,&og)>0){
        queue_page(&og);
      }
    }

    /* If playback has begun, top audio buffer off immediately. */
    if(stateflag) audio_write_nonblocking();

    /* are we at or past time for this video frame? */
    if(stateflag && videobuf_ready && videobuf_time<=get_time()){
      video_write();
      videobuf_ready=0;
    }

    if(stateflag &&
       (audiobuf_ready || !vorbis_p) &&
       (videobuf_ready || !theora_p) &&
       !got_sigint){
      /* we have an audio frame ready (which means the audio buffer is
         full), it's not time to play video, so wait until one of the
         audio buffer is ready or it's near time to play video */
        
      /* set up select wait on the audiobuffer and a timeout for video */
      struct timeval timeout;
      fd_set writefs;
      fd_set empty;
      int n=0;

      FD_ZERO(&writefs);
      FD_ZERO(&empty);
      if(audiofd>=0){
        FD_SET(audiofd,&writefs);
        n=audiofd+1;
      }

      if(theora_p){
        long milliseconds=(videobuf_time-get_time())*1000-5;
        if(milliseconds>500)milliseconds=500;
        if(milliseconds>0){
          timeout.tv_sec=milliseconds/1000;
          timeout.tv_usec=(milliseconds%1000)*1000;

          n=select(n,&empty,&writefs,&empty,&timeout);
          if(n)audio_calibrate_timer(0);
        }
      }else{
        select(n,&empty,&writefs,&empty,NULL);
      }
    }

    /* if our buffers either don't exist or are ready to go,
       we can begin playback */
    if((!theora_p || videobuf_ready) &&
       (!vorbis_p || audiobuf_ready))stateflag=1;
    /* same if we've run out of input */
    if(feof(infile))stateflag=1;

  }

  /* tear it all down */

  audio_close();
  SDL_Quit();

  if(vorbis_p){
    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);
  }
  if(theora_p){
    ogg_stream_clear(&to);
    theora_clear(&td);
    theora_comment_clear(&tc);
    theora_info_clear(&ti);
  }
  ogg_sync_clear(&oy);

  if(infile && infile!=stdin)fclose(infile);

  fprintf(stderr,
          "\r                                                              "
          "\nDone.\n");
  return(0);

}
Exemplo n.º 25
0
bool CTheoraPlayer::playVideo(
//Plays specified OGG Theora file to screen surface.
//If screen == NULL, then this method will test that the file is playable
//by decoding it as fast as possible but not displaying anything.
//
//Returns: whether playback was successful
	CStretchyBuffer& buffer, SDL_Surface *screen,
	const int x, const int y) //[default=(0,0)]
{
	//init
	theora_p = vorbis_p = 0;
	startticks = 0;
	bool bSkippedLastFrame = false;

	// start up Ogg stream synchronization layer
	ogg_sync_init(&oy);

	// init supporting Vorbis structures needed in header parsing
	vorbis_info_init(&vi);
	vorbis_comment_init(&vc);

	// init supporting Theora structures needed in header parsing
	theora_comment_init(&tc);
	theora_info_init(&ti);
	if (!screen)
		ti.quick_p = 1;
	ti.quality = 63;

	if (!parseHeaders(buffer))
		return false;

	// force audio off
	vorbis_p = 0;

	// initialize decoders
	if (theora_p) {
		theora_decode_init(&td,&ti);
#if 0
		printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n"
			  "  Frame content is %dx%d with offset (%d,%d).\n",
			to.serialno,ti.width,ti.height, (double)ti.fps_numerator/ti.fps_denominator,
			ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
		//report_colorspace(&ti); //we're not using this info for anything
		dump_comments(&tc);
#endif
	} else {
		// tear down the partial theora setup
		theora_info_clear(&ti);
		theora_comment_clear(&tc);
	}
	if(vorbis_p) {
		vorbis_synthesis_init(&vd,&vi);
		vorbis_block_init(&vd,&vb);  
		printf("Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n",
			vo.serialno,vi.channels,vi.rate);
	} else {
		// tear down the partial vorbis setup
		vorbis_info_clear(&vi);
		vorbis_comment_clear(&vc);
	}

	// open audio
	if (vorbis_p)
		open_audio();

	// open video
	SDL_Overlay *yuv_overlay = NULL;
	if (theora_p && screen)
		yuv_overlay = open_video(screen);
  
	// single frame video buffering
	ogg_packet op;
	ogg_int64_t  videobuf_granulepos=-1;
	double       videobuf_time=0;
	double last_frame_time = 0;
	bool hasdatatobuffer = true;

	// Main loop
	bool audiobuf_ready=false;
	bool videobuf_ready=false;
	bool playbackdone = (yuv_overlay == NULL);
	bool isPlaying = false;
	bool bBreakout = false;
	while (!playbackdone)
	{
		// break out on SDL quit event
		SDL_Event event;
		if (SDL_PollEvent(&event))
		{
			switch (event.type)
			{
				case SDL_QUIT: playbackdone = bBreakout = true; break;
				case SDL_KEYDOWN:
					if (event.key.keysym.sym == SDLK_ESCAPE)
						playbackdone = bBreakout = true;
				break;
				default: break;
			}
		}

		while (theora_p && !videobuf_ready) {
			// get one video packet...
			if (ogg_stream_packetout(&to,&op)>0)
			{
				theora_decode_packetin(&td,&op);

				videobuf_granulepos=td.granulepos;
				videobuf_time=theora_granule_time(&td,videobuf_granulepos);

#if 0
				//Without sound channels to synch to, don't need to worry about skipping frames when slow.
				// update the frame counter
				//++frameNum;

				// check if this frame time has not passed yet.
				//	If the frame is late we need to decode additional
				//	ones and keep looping, since theora at this stage
				//	needs to decode all frames.
				const double now=get_time();
				const double delay=videobuf_time-now;
				if(delay>=0.0){
					/// got a good frame, not late, ready to break out
					videobuf_ready=true;
				} else if(now-last_frame_time>=1.0) {
					// display at least one frame per second, regardless
					videobuf_ready=true;
				} else {
					//Need to catch up -- no time to display frame.
					if (bSkippedLastFrame) //only allow skipping one frame in a row
						videobuf_ready = true; //show anyway
					else
						bSkippedLastFrame = true;
					//printf("dropping frame %d (%.3fs behind)\n", frameNum, -delay);
				}
#else
				videobuf_ready = true; //show every frame
#endif
			} else {
				// need more data
				break;
			}
		}

		if (!hasdatatobuffer && !videobuf_ready && !audiobuf_ready) {
			isPlaying = false;
			playbackdone = true;
		}

		//If we're set for the next frame, sleep.
		//In other words, don't show frames too rapidly. 
		if((!theora_p || videobuf_ready) && 
			(!vorbis_p || audiobuf_ready))
		{
			const int ticks = (int)(1000*(videobuf_time-get_time()));
			if(ticks>0 && screen) //don't need to sleep if only testing file
				SDL_Delay(ticks);
		}
 
		if (videobuf_ready)
		{
			// time to write our cached frame
			if (screen)
			{
				const bool bRes = video_write(screen, yuv_overlay, x, y);
				if (!bRes) //couldn't display image
					playbackdone = bBreakout = true;
			}
			videobuf_ready=false;
			last_frame_time=get_time();
			bSkippedLastFrame = false;

			// if audio has not started (first frame) then start it
			if ((!isPlaying)&&(vorbis_p)) {
				start_audio();
				isPlaying = true;
			}
		}

		// HACK: always look for more audio data
		audiobuf_ready=false;

		// buffer compressed data every loop
		if (hasdatatobuffer) {
			hasdatatobuffer = buffer_data(&oy, buffer) > 0;
			if (!hasdatatobuffer) {
				//printf("Ogg buffering stopped, end of file reached.\n");
			}
		}
    
		if (ogg_sync_pageout(&oy,&og)>0)
			queue_page(&og);

	} // playbackdone

	// show number of video frames decoded
	//printf("\nFrames decoded: %d\n", frameNum);

	// deinit
	if (vorbis_p) {
		audio_close();

		ogg_stream_clear(&vo);
		vorbis_block_clear(&vb);
		vorbis_dsp_clear(&vd);
		vorbis_comment_clear(&vc);
		vorbis_info_clear(&vi); 
	}
	if (theora_p) {
		if (yuv_overlay)
			SDL_FreeYUVOverlay(yuv_overlay);

		ogg_stream_clear(&to);
		theora_clear(&td);
		theora_comment_clear(&tc);
		theora_info_clear(&ti);
	}
	ogg_sync_clear(&oy);

	//If broken out of testing, return false since entire file was not verified.
	return !bBreakout || screen != NULL;
}
Exemplo n.º 26
0
int main(int argc, char **argv)
{
    struct sockaddr_in serv_addr;
    struct pollfd   poll_fds[1];
    int             exit_code = EXIT_FAILURE;
    int             net_fd = -1;
    int             connected = 0;
    int             res;

    audio_t        *audio;
    OpusDecoder    *decoder;
    uint64_t        encoded_bytes = 0;
    uint64_t        decoder_errors = 0;
    int             error;

    struct app_data app = {
        .sample_rate = 48000,
        .device_index = -1,
        .server_port = DEFAULT_AUDIO_PORT,
    };

    parse_options(argc, argv, &app);
    if (app.server_ip == NULL)
        app.server_ip = strdup("127.0.0.1");

    fprintf(stderr, "Using server IP %s\n", app.server_ip);
    fprintf(stderr, "using server port %d\n", app.server_port);

    /* initialize audio subsystem */
    audio = audio_init(app.device_index, app.sample_rate, AUDIO_CONF_OUTPUT);
    if (audio == NULL)
        exit(EXIT_FAILURE);

    decoder = opus_decoder_create(app.sample_rate, 1, &error);
    if (error != OPUS_OK)
    {
        fprintf(stderr, "Error creating opus decoder: %d (%s)\n",
                error, opus_strerror(error));
        audio_close(audio);
        exit(EXIT_FAILURE);
    }

    /* setup signal handler */
    if (signal(SIGINT, signal_handler) == SIG_ERR)
        printf("Warning: Can't catch SIGINT\n");
    if (signal(SIGTERM, signal_handler) == SIG_ERR)
        printf("Warning: Can't catch SIGTERM\n");

    memset(&serv_addr, 0, sizeof(serv_addr));
    serv_addr.sin_family = AF_INET;
    serv_addr.sin_port = htons(app.server_port);
    if (inet_pton(AF_INET, app.server_ip, &serv_addr.sin_addr) == -1)
    {
        fprintf(stderr, "Error calling inet_pton(): %d: %s\n", errno,
                strerror(errno));
        goto cleanup;
    }

    while (keep_running)
    {
        if (net_fd == -1)
        {
            net_fd = socket(AF_INET, SOCK_STREAM, 0);
            if (net_fd == -1)
            {
                fprintf(stderr, "Error creating socket: %d: %s\n", errno,
                        strerror(errno));
                goto cleanup;
            }
        }

        /* Try to connect to server */
        if (connect(net_fd, (struct sockaddr *)&serv_addr, sizeof(serv_addr))
            == -1)
        {
            fprintf(stderr, "Connect error %d: %s\n", errno, strerror(errno));

            /* These errors may be temporary; try again */
            if (errno == ECONNREFUSED || errno == ENETUNREACH ||
                errno == ETIMEDOUT)
            {
                sleep(1);
                continue;
            }
            else
            {
                goto cleanup;
            }
        }

        poll_fds[0].fd = net_fd;
        poll_fds[0].events = POLLIN;
        connected = 1;
        fprintf(stderr, "Connected...\n");

        /* start audio system */
        audio_start(audio);

        while (keep_running && connected)
        {
            res = poll(poll_fds, 1, 500);

            if (res <= 0)
                continue;

            /* service network socket */
            if (poll_fds[0].revents & POLLIN)
            {

#define AUDIO_FRAMES 5760       // allows receiving up to 120 msec frames
#define AUDIO_BUFLEN 2 * AUDIO_FRAMES   // 120 msec: 48000 * 0.12
                uint8_t         buffer1[AUDIO_BUFLEN];
                uint8_t         buffer2[AUDIO_BUFLEN * 2];
                uint16_t        length;

                int             num;

                /* read 2 byte header */
                num = read(net_fd, buffer1, 2);
                if (num != 2)
                {
                    /* unrecovarable error; disconnect */
                    fprintf(stderr, "Error reading packet header: %d\n", num);
                    close(net_fd);
                    net_fd = -1;
                    connected = 0;
                    poll_fds[0].fd = -1;
                    audio_stop(audio);

                    num = opus_decode(decoder, NULL, 0, (opus_int16 *) buffer2,
                                      AUDIO_FRAMES, 0);

                    continue;
                }

                length = buffer1[0] + ((buffer1[1] & 0x1F) << 8);
                length -= 2;
                num = read(net_fd, buffer1, length);

                if (num == length)
                {
                    encoded_bytes += num;
                    num = opus_decode(decoder, buffer1, num,
                                      (opus_int16 *) buffer2, AUDIO_FRAMES, 0);

                    if (num > 0)
                    {
                        audio_write_frames(audio, buffer2, num);
                    }
                    else
                    {
                        decoder_errors++;
                        fprintf(stderr, "Decoder error: %d (%s)\n", num,
                                opus_strerror(num));
                    }
                }
                else if (num == 0)
                {
                    fprintf(stderr, "Connection closed (FD=%d)\n", net_fd);
                    close(net_fd);
                    net_fd = -1;
                    connected = 0;
                    poll_fds[0].fd = -1;
                    audio_stop(audio);
                }
                else
                {
                    fprintf(stderr, "Error reading from net: %d / \n", num);
                }

            }
        }
    }

    fprintf(stderr, "Shutting down...\n");
    exit_code = EXIT_SUCCESS;

  cleanup:
    close(net_fd);
    if (app.server_ip != NULL)
        free(app.server_ip);

    audio_stop(audio);
    audio_close(audio);
    opus_decoder_destroy(decoder);

    fprintf(stderr, "  Encoded bytes in: %" PRIu64 "\n", encoded_bytes);
    fprintf(stderr, "  Decoder errors  : %" PRIu64 "\n", decoder_errors);

    exit(exit_code);
}
Exemplo n.º 27
0
int audio_set_format(struct audio_info_struct *ai)
{
  audio_close(ai);
  return audio_open(ai);
}
Exemplo n.º 28
0
int audio_set_rate(struct audio_info_struct *ai)
{
  audio_close(ai);
  return audio_open(ai);
}
Exemplo n.º 29
0
void toc_extract( WINDOW * win )
{
  static const char * ext[] = { "avr", "raw", "raw", "wav" };
  char pathname[ 256 ];
  char prog_info[ 64 ];
  char buf[ 128 ];
  struct avr_header * avrh;
  struct wave_header * wavh;
  struct audio_entry entry;
  struct audio_stream * as;
  struct _toc_data * data;
  struct device_info * info;
  OBJECT * ck;
  int format, i, max, track_no;
  int fd, swap;
  long offset, length, position, end, progress, total_length;
  long max_buf_blocks, nblocks;
  void * buffer;

  if( !fileselect( preferences.toc_dest, "", "TXT_EXTDEST" ) )
    return;
  strrchr( preferences.toc_dest, '\\' )[1] = '\0';

  data = DataSearch( win, TW_MAGIC );
  max = data->n_tracks;
  format = fmt_popup.selected;
  total_length = 0;
  buffer = alloc_comm_buffer( BUFSIZE );
  if( !buffer )
    return;
  for( i = 0; i < max; i++ )
  {
    ck = data->tree + 1 + TF_CK + i * data->n_obj;
    if( ! (ck->ob_state & SELECTED) )
      continue;
    offset = toc_address( data->f[i].beg_time );
    length = toc_address( data->f[i].end_time ) + 1 - offset;
    if( length > 0 )
      total_length += length;
  }
  max_buf_blocks = BUFSIZE / 2352;

  progress = 0;
  progress_init( get_string( "TXT_EXTMSG" ), total_length );
  progress_activate_cancel( 1 );
  progress_init_timer();

  log_begin();
  log_printf( "*** Begin of a track extraction session\n\n" );
  as = NULL;
  for( i = 0; i < max; i++ )
  {
    ck = data->tree + 1 + TF_CK + i * data->n_obj;
    if( ! (ck->ob_state & SELECTED) )
      continue;
    offset = toc_address( data->f[i].beg_time );
    length = toc_address( data->f[i].end_time ) + 1 - offset;
    if( length <= 0 )
      continue;
    track_no = i + 1;
    position = get_track_offset( &data->toc, track_no, &end );
    if( toc_popup.selected == 0 )
      gen_daoimg_entry( &entry, toc_info.toc_file, track_no,
                        offset - position, end - offset - length );
    else
    {
      info = (struct device_info*)toc_popup.item[toc_popup.selected].info;
      gen_cd_entry( &entry, info, track_no, offset - position, end - offset - length );
    }
    if( as )
      as = audio_reopen( as, &entry );
    else
      as = audio_open( &entry );
    if( as == NULL )
      continue;

    sprintf( prog_info, get_string( "TXT_EXTTRK" ), track_no );
    progress_setinfo( prog_info );

    sprintf( pathname, "%strack%02d.%s", preferences.toc_dest, track_no, ext[ format ] );
    fd = open( pathname, O_WRONLY|O_CREAT|O_TRUNC );
    if( fd == -1 )
    {
      audio_close( as );
      alert_msg( "AL_FILERR", 1, pathname );
      goto error;
    }
    switch( format )
    {
    case 0:        /* AVR */
      avrh = (struct avr_header *) buf;
      avrh->avr_id = '2BIT';
      memset( avrh->name, 0, 8 );
      avrh->num_voices = 0xFFFF;
      avrh->num_bits = 16;
      avrh->signe = 0xffff;
      avrh->loop = 0;
      avrh->midi = 0xffff;
      avrh->freq_type.frequence = 0xff00ac44L;
      avrh->length = length * (2352 / 2);
      avrh->beg_loop = 0;
      avrh->end_loop = avrh->length;
      memset( avrh->reserved, 0, 26 + 64 );
      write( fd, avrh, sizeof( *avrh ) );
      swap = as->little_endian;
      break;
    case 1:        /* RAW big-endian */
      swap = as->little_endian;
      break;
    case 2:        /* RAW little-endian */
      swap = !as->little_endian;
      break;
    case 3:        /* WAVE */
      wavh = (struct wave_header *) buf;
      wavh->riff_id = 'RIFF';
      wavh->riff_len = swap_long( length * 2352 + 36 );
      wavh->wave_id = 'WAVE';
      wavh->fmt_id = 'fmt ';
      wavh->fmt_size = 0x10000000L;
      wavh->fmt_compression_code = 0x0100;
      wavh->fmt_channels = 0x0200;
      wavh->fmt_freq = 0x44ac0000L;
      wavh->fmt_bytes_sec = 0x10b10200L;
      wavh->fmt_block_align = 0x0400;
      wavh->fmt_num_bits = 0x1000;
      wavh->data_id = 'data';
      wavh->data_size = swap_long( length * 2352 );
      write( fd, wavh, sizeof( *wavh ) );
      swap = !as->little_endian;
      break;
    }
    while( length > 0 )
    {
      if( yield() )
      {
        audio_close( as );
        alert_msg( "AL_EXTINT", 1 );
        goto error;
      }
      nblocks = MIN( length, max_buf_blocks );
      if( audio_read( as, buffer, nblocks ) == 0 )
      {
        audio_close( as );
        goto error;
      }
      if( swap )
        swap_endian( buffer, nblocks * 2352 );
      if( write( fd, buffer, nblocks * 2352 ) == -1 )
      {
        close( fd );
        audio_close( as );
        alert_msg( "AL_FWRTERR", 1, pathname );
        goto error;
      }
      length -= nblocks;
      progress += nblocks;
      progress_setcount( progress );
    }
    close( fd );
  }
  audio_close( as );
error:
  log_printf( "*** End of the track extraction session\n\n" );
  log_end();
  progress_exit();
  free_comm_buffer( buffer );

}
Exemplo n.º 30
0
int main(int argc, char* argv[]) {
  lua_State *lua = luaL_newstate();
  luaL_openlibs(lua);

  love_Config config;

  l_love_register(lua);
  l_audio_register(lua);
  l_event_register(lua);
  l_graphics_register(lua);
  l_image_register(lua);
  l_keyboard_register(lua);
  l_mouse_register(lua);
  l_filesystem_register(lua);
  l_timer_register(lua);
  l_math_register(lua);
  l_system_register(lua);
  l_physics_register(lua);

  l_boot(lua, &config);

  keyboard_init();
  graphics_init(config.window.width, config.window.height);
  audio_init();

  if(luaL_dofile(lua, "main.lua")){
      printf("Error: %s\n", lua_tostring(lua, -1));
      l_no_game(lua,&config);
    }

  love_Version const * version = love_getVersion();
  printf("%s %s %i %i %i \n", "Love code name: ",version->codename,version->major,version->minor,version->revision);

  lua_pushcfunction(lua, lua_errorhandler);
  lua_getglobal(lua, "love");
  lua_pushstring(lua, "load");
  lua_rawget(lua, -2);
  if(lua_pcall(lua, 0, 0, 1)) {
      printf("Error in love.load: %s\n", lua_tostring(lua, -1));
    }
  lua_pop(lua, 1);

  lua_pushcfunction(lua, lua_errorhandler);
  MainLoopData mainLoopData = {
    .luaState = lua,
    .errhand = luaL_ref(lua, LUA_REGISTRYINDEX)
  };

  timer_init();

#ifdef EMSCRIPTEN
  //TODO find a way to quit(love.event.quit) love on web?
  emscripten_set_main_loop_arg(main_loop, &mainLoopData, 0, 1);
#else
  while(l_event_running()) {
      main_loop(&mainLoopData);
    }
  if(!l_event_running())
    quit_function(lua);
#endif
  audio_close ();
  lua_close(lua);
  return 0;
}