Exemple #1
0
        eSpeak::eSpeak(std::unique_ptr<NUClear::Environment> environment) : Reactor(std::move(environment)) {

            // Initialize espeak, and set it to play out the speakers, and not exit if it can't find it's directory
            espeak_Initialize(AUDIO_OUTPUT_PLAYBACK, 500, nullptr, 1 << 15);
            espeak_SetVoiceByName("default");
            espeak_SetParameter(espeakVOLUME, 100, 0);
            espeak_SetParameter(espeakCAPITALS, 6, 0);

            on<Trigger<messages::output::Say>, Options<Sync<eSpeak>>>([](const messages::output::Say& message) {
                // Wait to finish the current message (if any)
                // By waiting here this reaction can finish and return to the pool
                // if it does not have to wait for another say message
                espeak_Synchronize();

                // Say the new message
                espeak_Synth(message.c_str(),       // Text
                             message.size() + 1,    // Size (including null at end)
                             0,                     // Start position
                             POS_CHARACTER,         // Position Type (irrelevant since we start at the beginning)
                             0,                     // End position (0 means no end position)
                             espeakCHARS_AUTO,      // Flags (auto encoding)
                             nullptr,               // User identifier for callback
                             nullptr                // Callback
                        );
            });

            on<Trigger<Shutdown>>([](const Shutdown&) {
                // Stop espeak
                espeak_Terminate();
            });
        }
Exemple #2
0
int
main(int argc, char **argv)
{
	(void)argc; // unused parameter
	(void)argv; // unused parameter

	assert(espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS, 0, NULL, espeakINITIALIZE_DONT_EXIT) == 22050);

	test_latin();
	test_latin_sentence();

	test_greek();
	test_armenian();
	test_arabic();
	test_devanagari();
	test_tibetan();
	test_sinhala();
	test_georgian();
	test_ethiopic();
	test_ideographic();
	test_fullwidth();

	test_uts51_emoji_character();
	test_uts51_text_presentation_sequence();
	test_uts51_emoji_presentation_sequence();
	test_uts51_emoji_modifier_sequence();
	test_uts51_emoji_flag_sequence();
	test_uts51_emoji_tag_sequence_emoji_character();
	test_uts51_emoji_combining_sequence();
	test_uts51_emoji_keycap_sequence();

	assert(espeak_Terminate() == EE_OK);

	return EXIT_SUCCESS;
}
Exemple #3
0
//-----------------------------------------------------------------------------
// EndSpeak - terminate our usage of espeak
//-----------------------------------------------------------------------------
void RobotSpeak::EndSpeak(void)
{
    if (_fSpeakInit)
    {
        espeak_Terminate();
        _fSpeakInit = false;
    }
}
Exemple #4
0
static void queue_process_entry(struct synth_t *s)
{
	espeak_ERROR error;
	static struct espeak_entry_t *current = NULL;

	if (current != queue_peek(synth_queue)) {
		if (current)
			free_espeak_entry(current);
		current = (struct espeak_entry_t *) queue_remove(synth_queue);
	}
	pthread_mutex_unlock(&queue_guard);

	if (current->cmd != CMD_PAUSE && paused_espeak) {
		reinitialize_espeak(s);
	}

	switch (current->cmd) {
	case CMD_SET_FREQUENCY:
		error = set_frequency(s, current->value, current->adjust);
		break;
	case CMD_SET_PITCH:
		error = set_pitch(s, current->value, current->adjust);
		break;
	case CMD_SET_PUNCTUATION:
		error = set_punctuation(s, current->value, current->adjust);
		break;
	case CMD_SET_RATE:
		error = set_rate(s, current->value, current->adjust);
		break;
	case CMD_SET_VOICE:
		error = EE_OK;
		break;
	case CMD_SET_VOLUME:
		error = set_volume(s, current->value, current->adjust);
		break;
	case CMD_SPEAK_TEXT:
		s->buf = current->buf;
		s->len = current->len;
		error = speak_text(s);
		break;
	case CMD_PAUSE:
		if (!paused_espeak) {
			espeak_Cancel();
			espeak_Terminate();
			paused_espeak = 1;
		}
		break;
	default:
		break;
	}

	if (error == EE_OK) {
		free_espeak_entry(current);
		current = NULL;
	}
}
Exemple #5
0
ESpeak::~ESpeak()
{
    --refcount;

    if (0 == refcount)
    {
        cancel();
        espeak_Terminate();
    }
}
Exemple #6
0
int terminate() {
  espeak_ERROR rc;

  if ( 1 == initFlag ) {
    rc = espeak_Terminate();
    if ( EE_INTERNAL_ERROR == rc ) {
	return -1;
    }
  }
  return 0;
}
Exemple #7
0
void
module_close(int status)
{
	DBG("Espeak: close().");

	DBG("Espeak: Terminating threads");
	espeak_stop_requested = TRUE;
	espeak_close_requested =TRUE;

	pthread_mutex_lock(&playback_queue_mutex);
	pthread_cond_broadcast(&playback_queue_condition);
	pthread_mutex_unlock(&playback_queue_mutex);

	sem_post(espeak_play_semaphore);
	sem_post(espeak_stop_or_pause_semaphore);
	/* Give threads a chance to quit on their own terms. */
	g_usleep(25000);

	/* Make sure threads have really exited */
	pthread_cancel(espeak_play_thread);
	pthread_cancel(espeak_stop_or_pause_thread);

	DBG("Joining  play thread.");
	pthread_join(espeak_play_thread, NULL);
	DBG("Joinging stop thread.");
	pthread_join(espeak_stop_or_pause_thread, NULL);

	DBG("Espeak: terminating synthesis.");
	espeak_Terminate();

	DBG("Espeak: Closing audio output");
	if (module_audio_id) {
		spd_audio_close(module_audio_id);
	}

	DBG("Freeing resources.");
	espeak_clear_playback_queue();
	espeak_free_voice_list();

	pthread_mutex_destroy(&espeak_state_mutex);
	pthread_mutex_destroy(&espeak_play_suspended_mutex);
	pthread_mutex_destroy(&espeak_stop_or_pause_suspended_mutex);
	pthread_mutex_destroy(&playback_queue_mutex);
	pthread_cond_destroy(&playback_queue_condition);
	sem_destroy(espeak_play_semaphore);
	sem_destroy(espeak_stop_or_pause_semaphore);

	exit(status);
}
Exemple #8
0
int tts_terminate(void)
{
	espeak_ERROR erc;
	erc = espeak_Terminate();
	return 0;
} /* end tts_terminate */
// Shutsdown the TTS engine
tts_result TtsEngine::shutdown( void )
{
    espeak_Terminate();
    return TTS_SUCCESS;
}
autoSound SpeechSynthesizer_to_Sound (SpeechSynthesizer me, const char32 *text, autoTextGrid *tg, autoTable *events) {
	try {
		int fsamp = espeak_Initialize (AUDIO_OUTPUT_SYNCHRONOUS, 0, nullptr, // 5000ms
			espeakINITIALIZE_PHONEME_EVENTS|espeakINITIALIZE_PHONEME_IPA);
		if (fsamp == -1) {
			Melder_throw (U"Internal espeak error.");
		}
		int synth_flags = espeakCHARS_WCHAR;
		if (my d_inputTextFormat == SpeechSynthesizer_INPUT_TAGGEDTEXT) {
			synth_flags |= espeakSSML;
		}
		if (my d_inputTextFormat != SpeechSynthesizer_INPUT_TEXTONLY) {
			synth_flags |= espeakPHONEMES;
		}
		option_phoneme_events = espeakINITIALIZE_PHONEME_EVENTS; // extern int option_phoneme_events;
		if (my d_outputPhonemeCoding == SpeechSynthesizer_PHONEMECODINGS_IPA) {
			option_phoneme_events |= espeakINITIALIZE_PHONEME_IPA;
		}

		espeak_SetParameter (espeakRATE, my d_wordsPerMinute, 0);
		espeak_SetParameter (espeakPITCH, my d_pitchAdjustment, 0);
		espeak_SetParameter (espeakRANGE, my d_pitchRange, 0);
		const char32 *voiceLanguageCode = SpeechSynthesizer_getVoiceLanguageCodeFromName (me, my d_voiceLanguageName);
		const char32 *voiceVariantCode = SpeechSynthesizer_getVoiceVariantCodeFromName (me, my d_voiceVariantName);
		espeakdata_SetVoiceByName ((const char *) Melder_peek32to8 (voiceLanguageCode), 
			(const char *) Melder_peek32to8 (voiceVariantCode));

		espeak_SetParameter (espeakWORDGAP, my d_wordgap * 100, 0); // espeak wordgap is in units of 10 ms
		espeak_SetParameter (espeakCAPITALS, 0, 0);
		espeak_SetParameter (espeakPUNCTUATION, espeakPUNCT_NONE, 0);

		espeak_SetSynthCallback (synthCallback);

		my d_events = Table_createWithColumnNames (0, U"time type type-t t-pos length a-pos sample id uniq");

		#ifdef _WIN32
                wchar_t *textW = Melder_peek32toW (text);
                espeak_Synth (textW, wcslen (textW) + 1, 0, POS_CHARACTER, 0, synth_flags, nullptr, me);
		#else
                espeak_Synth (text, str32len (text) + 1, 0, POS_CHARACTER, 0, synth_flags, nullptr, me);
		#endif
				
		espeak_Terminate ();
		autoSound thee = buffer_to_Sound (my d_wav, my d_numberOfSamples, my d_internalSamplingFrequency);

		if (my d_samplingFrequency != my d_internalSamplingFrequency) {
			thee = Sound_resample (thee.get(), my d_samplingFrequency, 50);
		}
		my d_numberOfSamples = 0; // re-use the wav-buffer
		if (tg) {
			double xmin = Table_getNumericValue_Assert (my d_events.get(), 1, 1);
			if (xmin > thy xmin) {
				xmin = thy xmin;
			}
			double xmax = Table_getNumericValue_Assert (my d_events.get(), my d_events -> rows.size, 1);
			if (xmax < thy xmax) {
				xmax = thy xmax;
			}
			autoTextGrid tg1 = Table_to_TextGrid (my d_events.get(), text, xmin, xmax);
			*tg = TextGrid_extractPart (tg1.get(), thy xmin, thy xmax, 0);
		}
		if (events) {
			Table_setEventTypeString (my d_events.get());
			*events = my d_events.move();
		}
		my d_events.reset();
		return thee;
	} catch (MelderError) {
		espeak_Terminate ();
		Melder_throw (U"Text not played.");
	}
}
Exemple #11
0
static void spk_destruct(volatile SpeechSynthesizer *spk)
{
	espeak_Cancel();
	espeak_Terminate();
}