Пример #1
0
void sheepshaver_state::record_audio(void)
{
	static uint8 silence[8192] = {0};
	record_audio(silence);
}
Пример #2
0
static int
stream_callback (const void *input_buffer, void *output_buffer, unsigned long frames_per_buffer, const PaStreamCallbackTimeInfo * time_info, PaStreamCallbackFlags status_flags, void *user_data)
{
  float **buffers = (float **) output_buffer;
#ifdef _HAVE_RUBBERBAND_
  static gboolean initialized = FALSE;
  if (!initialized) {
      rubberband_set_max_process_size(rubberband, frames_per_buffer);
      initialized = TRUE;
  }
#endif

  size_t i;
  for (i = 0; i < 2; ++i)
    {
      memset (buffers[i], 0, frames_per_buffer * sizeof (float));
    }

  if (!ready)
    return paContinue;

#ifdef _HAVE_FLUIDSYNTH_
  if (reset_audio)
    {
      fluidsynth_all_notes_off ();
      reset_synth_channels ();
      reset_audio = FALSE;
      return paContinue;
    }

  unsigned char event_data[MAX_MESSAGE_LENGTH]; //needs to be long enough for variable length messages...
  size_t event_length = MAX_MESSAGE_LENGTH;
  double event_time;

  double until_time = nframes_to_seconds (playback_frame + frames_per_buffer);
#ifdef _HAVE_RUBBERBAND_
  gint available = rubberband_available(rubberband);
if((!rubberband_active) || (available < (gint)frames_per_buffer)) {
#endif

  while (read_event_from_queue (AUDIO_BACKEND, event_data, &event_length, &event_time, until_time/slowdown))
    {//g_print("%x %x %x\n", event_data[0], event_data[1], event_data[2] );
      fluidsynth_feed_midi (event_data, event_length);  //in fluid.c note fluidsynth api ues fluid_synth_xxx these naming conventions are a bit too similar
    }

  fluidsynth_render_audio (frames_per_buffer, buffers[0], buffers[1]);  //in fluid.c calls fluid_synth_write_float()

// Now get any audio to mix - dump it in the left hand channel for now
  event_length = frames_per_buffer;
  read_event_from_mixer_queue (AUDIO_BACKEND, (void *) buffers[1], &event_length);

#ifdef _HAVE_RUBBERBAND_
  }
  //if there is stuff available use it and give buffers[] to rubber band to process
  if(rubberband_active)
      {
      if(available < (gint)frames_per_buffer)
          rubberband_process(rubberband, (const float * const*)buffers, frames_per_buffer, 0);
      available = rubberband_available(rubberband);
      if(available >= (gint)frames_per_buffer)
          {
              rubberband_retrieve(rubberband, buffers, frames_per_buffer);//re-use buffers[] as they are available...
              write_samples_to_rubberband_queue (AUDIO_BACKEND, buffers[0], frames_per_buffer);
              write_samples_to_rubberband_queue (AUDIO_BACKEND,  buffers[1], frames_per_buffer);
              available -= frames_per_buffer;
          }
      event_length = frames_per_buffer;
      read_event_from_rubberband_queue (AUDIO_BACKEND, (unsigned char *) buffers[0], &event_length);
      event_length = frames_per_buffer;
      read_event_from_rubberband_queue (AUDIO_BACKEND, (unsigned char *) buffers[1],  &event_length);
      }
#endif //_HAVE_RUBBERBAND_

  if (until_time < get_playuntil ())
    {
#endif //_HAVE_FLUIDSYNTH_
      playback_frame += frames_per_buffer;
      update_playback_time (TIMEBASE_PRIO_AUDIO, nframes_to_seconds (playback_frame));
#ifdef _HAVE_FLUIDSYNTH_
    }
#endif //_HAVE_FLUIDSYNTH_

  // This is probably a bad idea to do heavy work in an audio callback
  record_audio(buffers, frames_per_buffer);
  return paContinue;
}
Пример #3
0
int quisk_read_sound(void)	// Called from sound thread
{  // called in an infinite loop by the main program
	int i, nSamples, mic_count, mic_interp, retval, is_cw, mic_sample_rate;
	complex tx_mic_phase;
	static double cwEnvelope=0;
	static double cwCount=0;
	static complex tuneVector = (double)CLIP32 / CLIP16;	// Convert 16-bit to 32-bit samples
	static struct quisk_cFilter filtInterp={NULL};

	quisk_sound_state.interupts++;
#if DEBUG_IO > 1
	QuiskPrintTime("Start read_sound", 0);
#endif
	if (pt_sample_read) {			// read samples from SDR-IQ
		nSamples = (*pt_sample_read)(cSamples);
	}
	else if (quisk_using_udp) {	// read samples from UDP port
		nSamples = quisk_read_rx_udp(cSamples);
	}
	else if (Capture.handle) {							// blocking read from soundcard
		if (Capture.portaudio_index < 0)
			nSamples = quisk_read_alsa(&Capture, cSamples);
		else
			nSamples = quisk_read_portaudio(&Capture, cSamples);
		if (Capture.channel_Delay >= 0)	// delay the I or Q channel by one sample
			delay_sample(&Capture, (double *)cSamples, nSamples);
		if (Capture.doAmplPhase)		// amplitude and phase corrections
			correct_sample(&Capture, cSamples, nSamples);
	}
	else {
		nSamples = 0;
	}
	retval = nSamples;		// retval remains the number of samples read
#if DEBUG_IO
	debug_timer += nSamples;
	if (debug_timer >= quisk_sound_state.sample_rate)		// one second
		debug_timer = 0;
#endif
#if DEBUG_IO > 2
	ptimer (nSamples);
#endif
	quisk_sound_state.latencyCapt = nSamples;	// samples available
#if DEBUG_IO > 1
	QuiskPrintTime("  read samples", 0);
#endif
	// Perhaps record the samples to a file
	if (want_record) {
		if (is_recording_samples) {
			record_samples(cSamples, nSamples);   // Record samples
		}
		else if (file_name_samples[0]) {
			if (record_samples(NULL, -1))	 // Open file
				is_recording_samples = 1;
		}
	}
	else if (is_recording_samples) {
		record_samples(NULL, -2);		 // Close file
		is_recording_samples = 0;
	}
#if ! DEBUG_MIC
	nSamples = quisk_process_samples(cSamples, nSamples);
#endif
#if DEBUG_IO > 1
	QuiskPrintTime("  process samples", 0);
#endif
	if (Playback.portaudio_index < 0)
		quisk_play_alsa(&Playback, nSamples, cSamples, 1);
	else
		quisk_play_portaudio(&Playback, nSamples, cSamples, 1);
	if (rxMode == 7) {
		if (DigitalOutput.portaudio_index < 0)
			quisk_play_alsa(&DigitalOutput, nSamples, cSamples, 0);
		else
			quisk_play_portaudio(&DigitalOutput, nSamples, cSamples, 0);
	}
	// Perhaps record the speaker audio to a file
	if (want_record) {
		if (is_recording_audio) {
			record_audio(cSamples, nSamples);   // Record samples
		}
		else if (file_name_audio[0]) {
			if (record_audio(NULL, -1))	 // Open file
				is_recording_audio = 1;
		}
	}
	else if (is_recording_audio) {
		record_audio(NULL, -2);		 // Close file
		is_recording_audio = 0;
	}

#if DEBUG_IO > 1
	QuiskPrintTime("  play samples", 0);
#endif
	// Read and process the microphone
	mic_count = 0;
	mic_sample_rate = quisk_sound_state.mic_sample_rate;
	if (MicCapture.handle) {
		if (MicCapture.portaudio_index < 0)
			mic_count = quisk_read_alsa(&MicCapture, cSamples);
		else
			mic_count = quisk_read_portaudio(&MicCapture, cSamples);
	}
	if (quisk_record_state == PLAYBACK) {	// Discard previous samples and replace with saved sound
		quisk_tmp_microphone(cSamples, mic_count);
	}
	if (rxMode == 7) {		// Discard previous samples and use digital samples
		if (DigitalInput.handle) {
			mic_sample_rate = DigitalInput.sample_rate;
			if (DigitalInput.portaudio_index < 0)
				mic_count = quisk_read_alsa(&DigitalInput, cSamples);
			else
				mic_count = quisk_read_portaudio(&DigitalInput, cSamples);
		}
		else {
			mic_count = 0;
		}
	}
	if (mic_count > 0) {
#if DEBUG_IO > 1
		QuiskPrintTime("  mic-read", 0);
#endif
		// quisk_process_microphone returns samples at the sample rate MIC_OUT_RATE
		mic_count = quisk_process_microphone(mic_sample_rate, cSamples, mic_count);
#if DEBUG_MIC == 1
		if ( ! quisk_is_key_down())
			for (i = 0; i < mic_count; i++)
				cSamples[i] = 0;
		for (i = 0; i < mic_count; i++)
			cSamples[i] *= (double)CLIP32 / CLIP16;	// convert 16-bit samples to 32 bits
		quisk_process_samples(cSamples, mic_count);
#endif
#if DEBUG_IO > 1
		QuiskPrintTime("  mic-proc", 0);
#endif
	}
	// Mic playback without a mic is needed for CW
	if (MicPlayback.handle) {		// Mic playback: send mic I/Q samples to a sound card
		if (rxMode == 0 || rxMode == 1) {	// Transmit CW
			is_cw = 1;
		}
		else {
			is_cw = 0;
			cwCount = 0;
			cwEnvelope = 0.0;
		}
		tx_mic_phase = cexp(( -I * 2.0 * M_PI * quisk_tx_tune_freq) / MicPlayback.sample_rate);
		if (is_cw) {	// Transmit CW; use capture device for timing, not microphone
			cwCount += (double)retval * MicPlayback.sample_rate / quisk_sound_state.sample_rate;
			mic_count = 0;
			if (quisk_is_key_down()) {
				while (cwCount >= 1.0) {
					if (cwEnvelope < 1.0) {
						cwEnvelope += 1. / (MicPlayback.sample_rate * 5e-3);	// 5 milliseconds
						if (cwEnvelope > 1.0)
							cwEnvelope = 1.0;
					}
					cSamples[mic_count++] = (CLIP16 - 1) * cwEnvelope * tuneVector * quisk_sound_state.mic_out_volume;
					tuneVector *= tx_mic_phase;
					cwCount -= 1;
				}
			}
			else {		// key is up
				while (cwCount >= 1.0) {
					if (cwEnvelope > 0.0) {
						cwEnvelope -= 1.0 / (MicPlayback.sample_rate * 5e-3);	// 5 milliseconds
						if (cwEnvelope < 0.0)
							cwEnvelope = 0.0;
					}
					cSamples[mic_count++] = (CLIP16 - 1) * cwEnvelope * tuneVector * quisk_sound_state.mic_out_volume;
					tuneVector *= tx_mic_phase;
					cwCount -= 1;
				}
			}
		}
		else {		// Transmit SSB
			if ( ! quisk_is_key_down()) {
				for (i = 0; i < mic_count; i++)
					cSamples[i] = 0.0;
			}
		}
		// Perhaps interpolate the mic samples back to the mic play rate
		mic_interp = MicPlayback.sample_rate / MIC_OUT_RATE;
		if ( ! is_cw && mic_interp > 1) {
			if (! filtInterp.dCoefs)
				quisk_filt_cInit(&filtInterp, quiskFilt12_19Coefs, sizeof(quiskFilt12_19Coefs)/sizeof(double));
			mic_count = quisk_cInterpolate(cSamples, mic_count, &filtInterp, mic_interp);
		}
		// Tune the samples to frequency
		if ( ! is_cw) {
			for (i = 0; i < mic_count; i++) {
				cSamples[i] = conj(cSamples[i]) * tuneVector * quisk_sound_state.mic_out_volume;
				tuneVector *= tx_mic_phase;
			}
		}
		// delay the I or Q channel by one sample
		if (MicPlayback.channel_Delay >= 0)
			delay_sample(&MicPlayback, (double *)cSamples, mic_count);
		// amplitude and phase corrections
		if (MicPlayback.doAmplPhase)
			correct_sample (&MicPlayback, cSamples, mic_count);
		// play mic samples
		if (MicPlayback.portaudio_index < 0)
			quisk_play_alsa(&MicPlayback, mic_count, cSamples, 0);
		else
			quisk_play_portaudio(&MicPlayback, mic_count, cSamples, 0);
#if DEBUG_MIC == 2
		quisk_process_samples(cSamples, mic_count);
#endif
	}
#if DEBUG_IO > 1
	QuiskPrintTime("  finished", 0);
#endif
	// Return negative number for error
	return retval;
}
Пример #4
0
int quisk_read_sound(void)	// Called from sound thread
{  // called in an infinite loop by the main program
	int i, nSamples, mic_count, mic_interp, retval, is_cw, mic_sample_rate;
	complex double tx_mic_phase;
	static double cwEnvelope=0;
	static double cwCount=0;
	static complex double tuneVector = (double)CLIP32 / CLIP16;	// Convert 16-bit to 32-bit samples
	static struct quisk_cFilter filtInterp={NULL};
        key_state = quisk_is_key_down(); //reading this once is important for predicable bevavior on cork/flush
        
#if DEBUG_MIC == 1
	complex double tmpSamples[SAMP_BUFFER_SIZE];
#endif

	quisk_sound_state.interupts++;
#if DEBUG_IO > 1
	QuiskPrintTime("Start read_sound", 0);
#endif
	if (pt_sample_read) {			// read samples from SDR-IQ or UDP
		nSamples = (*pt_sample_read)(cSamples);
	}
	else if (Capture.handle) {		// blocking read from soundcard
                if (Capture.driver == DEV_DRIVER_PULSEAUDIO && quisk_sound_state.IQ_server[0]) {
                	if (key_state == 1 && !(Capture.cork_status)) 
                	   	quisk_cork_pulseaudio(&Capture, 1);
                	else if (key_state == 0 && Capture.cork_status) {
                     		quisk_flush_pulseaudio(&Capture);
                      		quisk_cork_pulseaudio(&Capture, 0);

                   	}
                }

		nSamples = read_sound_interface(&Capture, cSamples);
		if (Capture.channel_Delay >= 0)	// delay the I or Q channel by one sample
			delay_sample(&Capture, (double *)cSamples, nSamples);
		if (Capture.doAmplPhase)		// amplitude and phase corrections
			correct_sample(&Capture, cSamples, nSamples);
	}
	else {
		nSamples = 0;
	}
	retval = nSamples;		// retval remains the number of samples read
#if DEBUG_IO
	debug_timer += nSamples;
	if (debug_timer >= quisk_sound_state.sample_rate)		// one second
		debug_timer = 0;
#endif
#if DEBUG_IO > 2
	ptimer (nSamples);
#endif
	quisk_sound_state.latencyCapt = nSamples;	// samples available
#if DEBUG_IO > 1
	QuiskPrintTime("  read samples", 0);
#endif
	// Perhaps record the samples to a file
	if (want_record) {
		if (is_recording_samples) {
			record_samples(cSamples, nSamples);   // Record samples
		}
		else if (file_name_samples[0]) {
			if (record_samples(NULL, -1))	 // Open file
				is_recording_samples = 1;
		}
	}
	else if (is_recording_samples) {
		record_samples(NULL, -2);		 // Close file
		is_recording_samples = 0;
	}
	// Perhaps write samples to a loopback device for use by another program
	if (RawSamplePlayback.handle)
		play_sound_interface(&RawSamplePlayback, nSamples, cSamples, 0, 1.0);
#if ! DEBUG_MIC
	nSamples = quisk_process_samples(cSamples, nSamples);
#endif
#if DEBUG_IO > 1
	QuiskPrintTime("  process samples", 0);
#endif

	if (quisk_record_state == PLAYBACK)
		quisk_tmp_playback(cSamples, nSamples, 1.0);		// replace radio sound
	else if (quisk_record_state == PLAY_FILE)
		quisk_file_playback(cSamples, nSamples, 1.0);		// replace radio sound
   
	// Play the demodulated audio
	play_sound_interface(&Playback, nSamples, cSamples, 1, quisk_audioVolume);
   
	// Play digital if required
	if (rxMode == 7 || rxMode == 8 || rxMode == 9)
		play_sound_interface(&DigitalOutput, nSamples, cSamples, 0, digital_output_level);
   
	// Perhaps record the speaker audio to a file
	if (want_record) {
		if (is_recording_audio) {
			record_audio(cSamples, nSamples);   // Record samples
		}
		else if (file_name_audio[0]) {
			if (record_audio(NULL, -1))	 // Open file
				is_recording_audio = 1;
		}
	}
	else if (is_recording_audio) {
		record_audio(NULL, -2);		 // Close file
		is_recording_audio = 0;
	}

#if DEBUG_IO > 1
	QuiskPrintTime("  play samples", 0);
#endif
	// Read and process the microphone
	mic_count = 0;
	mic_sample_rate = quisk_sound_state.mic_sample_rate;
    if (MicCapture.handle)
		mic_count = read_sound_interface(&MicCapture, cSamples);
	if (quisk_record_state == PLAYBACK)			// Discard previous samples and replace with saved sound
		quisk_tmp_microphone(cSamples, mic_count);
	else if (quisk_record_state == PLAY_FILE)	// Discard previous samples and replace with saved sound
		quisk_file_microphone(cSamples, mic_count);
	if (rxMode == 7 || rxMode == 8 || rxMode == 9) {		// Discard previous samples and use digital samples
		if (DigitalInput.handle) {
                   if (DigitalInput.driver == DEV_DRIVER_PULSEAUDIO && quisk_sound_state.IQ_server[0]) {
                      if (key_state == 0 && !(DigitalInput.cork_status)) {//key is up, cork stream
                         quisk_cork_pulseaudio(&DigitalInput, 1);
                      }
                      else if (key_state == 1 && DigitalInput.cork_status) { //key is down, uncork and flush
                         quisk_flush_pulseaudio(&DigitalInput);
                         if (MicPlayback.handle)
                             quisk_flush_pulseaudio(&MicPlayback);
                         quisk_cork_pulseaudio(&DigitalInput, 0);
                      }
                   }
		   mic_sample_rate = DigitalInput.sample_rate;
		   mic_count = read_sound_interface(&DigitalInput, cSamples);
		}
		else {
			mic_count = 0;
		}
	}
	if (mic_count > 0) {
#if DEBUG_IO > 1
		QuiskPrintTime("  mic-read", 0);
#endif
#if DEBUG_MIC == 3
		quisk_process_samples(cSamples, mic_count);
#endif
		// quisk_process_microphone returns samples at the sample rate MIC_OUT_RATE
		mic_count = quisk_process_microphone(mic_sample_rate, cSamples, mic_count);
#if DEBUG_MIC == 1
		for (i = 0; i < mic_count; i++)
			tmpSamples[i] = cSamples[i] * (double)CLIP32 / CLIP16;	// convert 16-bit samples to 32 bits
		quisk_process_samples(tmpSamples, mic_count);
#endif
#if DEBUG_IO > 1
		QuiskPrintTime("  mic-proc", 0);
#endif
	}
	// Mic playback without a mic is needed for CW
	if (MicPlayback.handle) {		// Mic playback: send mic I/Q samples to a sound card
		if (rxMode == 0 || rxMode == 1) {	// Transmit CW
			is_cw = 1;
            if (MicCapture.driver == DEV_DRIVER_PULSEAUDIO && MicCapture.cork_status) {
                //uncork and flush streams. Don't cork on key-up while in CW mode.
                quisk_flush_pulseaudio(&MicCapture);
                quisk_cork_pulseaudio(&MicCapture, 0);
                quisk_flush_pulseaudio(&MicPlayback);
            }
		}
		else {
			is_cw = 0;
			cwCount = 0;
			cwEnvelope = 0.0;
            //Not in CW mode, cork/uncork capture streams as PTT is used.
            if (MicCapture.driver == DEV_DRIVER_PULSEAUDIO && MicCapture.handle && quisk_sound_state.IQ_server[0]) {
                if (key_state == 0 && !(MicCapture.cork_status)) {//key is up, cork stream
                    quisk_cork_pulseaudio(&MicCapture, 1);
                }
                else if (key_state == 1 && DigitalInput.cork_status) { //key is down, uncork and flush
                    quisk_flush_pulseaudio(&MicCapture);
                    quisk_flush_pulseaudio(&MicPlayback);
                    quisk_cork_pulseaudio(&MicCapture, 0);
                }
            }
		}
		tx_mic_phase = cexp(( -I * 2.0 * M_PI * quisk_tx_tune_freq) / MicPlayback.sample_rate);
		if (is_cw) {	// Transmit CW; use capture device for timing, not microphone
			cwCount += (double)retval * MicPlayback.sample_rate / quisk_sound_state.sample_rate;
			mic_count = 0;
			if (quisk_is_key_down()) {
				while (cwCount >= 1.0) {
					if (cwEnvelope < 1.0) {
						cwEnvelope += 1. / (MicPlayback.sample_rate * 5e-3);	// 5 milliseconds
						if (cwEnvelope > 1.0)
							cwEnvelope = 1.0;
					}
					if (quiskSpotLevel >= 0)
						cSamples[mic_count++] = (CLIP16 - 1) * cwEnvelope * quiskSpotLevel / 1000.0 * tuneVector * quisk_sound_state.mic_out_volume;
					else
						cSamples[mic_count++] = (CLIP16 - 1) * cwEnvelope * tuneVector * quisk_sound_state.mic_out_volume;
					tuneVector *= tx_mic_phase;
					cwCount -= 1;
				}
			}
			else {		// key is up
				while (cwCount >= 1.0) {
					if (cwEnvelope > 0.0) {
						cwEnvelope -= 1.0 / (MicPlayback.sample_rate * 5e-3);	// 5 milliseconds
						if (cwEnvelope < 0.0)
							cwEnvelope = 0.0;
					}
					cSamples[mic_count++] = (CLIP16 - 1) * cwEnvelope * tuneVector * quisk_sound_state.mic_out_volume;
					tuneVector *= tx_mic_phase;
					cwCount -= 1;
				}
			}
		}
		else if( ! DEBUG_MIC && ! quisk_is_key_down()) {		// Transmit SSB
			for (i = 0; i < mic_count; i++)
				cSamples[i] = 0.0;
		}
		// Perhaps interpolate the mic samples back to the mic play rate
		mic_interp = MicPlayback.sample_rate / MIC_OUT_RATE;
		if ( ! is_cw && mic_interp > 1) {
			if (! filtInterp.dCoefs)
				quisk_filt_cInit(&filtInterp, quiskFilt12_19Coefs, sizeof(quiskFilt12_19Coefs)/sizeof(double));
			mic_count = quisk_cInterpolate(cSamples, mic_count, &filtInterp, mic_interp);
		}
		// Tune the samples to frequency
		if ( ! is_cw) {
			for (i = 0; i < mic_count; i++) {
				cSamples[i] = conj(cSamples[i]) * tuneVector * quisk_sound_state.mic_out_volume;
				tuneVector *= tx_mic_phase;
			}
		}
		// delay the I or Q channel by one sample
		if (MicPlayback.channel_Delay >= 0)
			delay_sample(&MicPlayback, (double *)cSamples, mic_count);
		// amplitude and phase corrections
		if (MicPlayback.doAmplPhase)
            correct_sample (&MicPlayback, cSamples, mic_count);

        // play mic samples
        play_sound_interface(&MicPlayback, mic_count, cSamples, 0, 1.0);

#if DEBUG_MIC == 2
		quisk_process_samples(cSamples, mic_count);
#endif
	}
        prev_key_state = key_state;
#if DEBUG_IO > 1
	QuiskPrintTime("  finished", 0);
#endif
	// Return negative number for error
	return retval;
}
Пример #5
0
void handle_recordstate(MENU *state) {
#ifdef TESTMODE
	printf("Entered record state\n");
#endif
	TEXT output;
	output.row = 0;
	output.column = 0;
	output.size = 1;
	colorcopy(output.RGB, red);
	nextState = STATE_NULL;
	int stop = 0;
	while (stop == 0) {
		keysWaitOnPress();
		if ((keysGetLeft() == 1) || (keysGetRight() == 1)) {
			printf("buttonpress\n");
			state->activeButton += 1;
			if (state->activeButton == state->buttonCount) {
				state->activeButton = 0;
			}
			updateButtonSelection(state);
			refreshButtons(state);
		}
		if (keysGetEnter() == 1) {
			printf("ENTER\n");
			switch (state->activeButton) {
			case 0:
				//get global settings
				//--start of temporary data--//
				audio_set.duration = 0.250;
				audio_set.numChannels = 1;
				audio_set.sampleRate = 44100;
				audio_set.numSamples = audio_set.duration
						* audio_set.sampleRate;
				//--end of temporary data----//
				TEXT output;
				output.row = 0;
				output.column = 0;
				output.size = 2;
				colorcopy(output.RGB, red);
				//-----record audio-----
				sprintf(output.text, "RECORDING 250ms of NOISE\n");
				draw_text(output);
				output.row++;
				if (record_audio(noiseFile, &audio_set) < 0) {
					//error doorgeven
					nextState = STATE_NULL;
					break;
				}
				printf("record done");

				sprintf(output.text, "RECORDING\n");
				draw_text(output);
				output.row++;
				audio_set.duration = 0.450;
				if (record_audio(fileName, &audio_set) < 0) {

					//error doorgeven
					nextState = STATE_NULL;
					break;
				}
				printf("record done");
				nextState = STATE_CALCULATE;
				stop = 1;
				break;
			default:
				nextState = STATE_MAIN;
				stop = 1;
				break;
			}
		}
	}
}