Example #1
0
int sync_espeak_terminated_msg( uint32_t unique_identifier, void* user_data)
{//=====================================================================
	ENTER("sync_espeak_terminated_msg");

	int finished=0;

	memset(event_list, 0, 2*sizeof(espeak_EVENT));

	event_list[0].type = espeakEVENT_MSG_TERMINATED;
	event_list[0].unique_identifier = unique_identifier;
	event_list[0].user_data = user_data;
	event_list[1].type = espeakEVENT_LIST_TERMINATED;
	event_list[1].unique_identifier = unique_identifier;
	event_list[1].user_data = user_data;

	if (my_mode==AUDIO_OUTPUT_PLAYBACK)
	{
		while(1)
		{
			espeak_ERROR a_error = event_declare(event_list);
			if (a_error != EE_BUFFER_FULL)
			{
		 		break;
			}
			SHOW_TIME("sync_espeak_terminated_msg > EE_BUFFER_FULL\n");
			usleep(10000);
		}
	}
	else
	{
		if (synth_callback)
		{
			finished=synth_callback(NULL,0,event_list);
		}
	}
	return finished;
}
Example #2
0
int sync_espeak_terminated_msg(uint32_t unique_identifier, void *user_data)
{
	int finished = 0;

	memset(event_list, 0, 2*sizeof(espeak_EVENT));

	event_list[0].type = espeakEVENT_MSG_TERMINATED;
	event_list[0].unique_identifier = unique_identifier;
	event_list[0].user_data = user_data;
	event_list[1].type = espeakEVENT_LIST_TERMINATED;
	event_list[1].unique_identifier = unique_identifier;
	event_list[1].user_data = user_data;

	if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
		while (1) {
			err = event_declare(event_list);
			if (err != ENS_EVENT_BUFFER_FULL)
				break;
			usleep(10000);
		}
	} else if (synth_callback)
		finished = synth_callback(NULL, 0, event_list);
	return finished;
}
Example #3
0
static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags)
{
	// Fill the buffer with output sound
	int length;
	int finished = 0;
	int count_buffers = 0;

	if ((outbuf == NULL) || (event_list == NULL))
		return ENS_NOT_INITIALIZED;

	option_ssml = flags & espeakSSML;
	option_phoneme_input = flags & espeakPHONEMES;
	option_endpause = flags & espeakENDPAUSE;

	count_samples = 0;

	espeak_ng_STATUS status;
	if (translator == NULL) {
		status = espeak_ng_SetVoiceByName("en");
		if (status != ENS_OK)
			return status;
	}

	if (p_decoder == NULL)
		p_decoder = create_text_decoder();

	status = text_decoder_decode_string_multibyte(p_decoder, text, translator->encoding, flags);
	if (status != ENS_OK)
		return status;

	SpeakNextClause(0);

	for (;;) {
		out_ptr = outbuf;
		out_end = &outbuf[outbuf_size];
		event_list_ix = 0;
		WavegenFill();

		length = (out_ptr - outbuf)/2;
		count_samples += length;
		event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
		event_list[event_list_ix].unique_identifier = unique_identifier;
		event_list[event_list_ix].user_data = my_user_data;

		count_buffers++;
		if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
			finished = create_events((short *)outbuf, length, event_list);
			if (finished < 0)
				return ENS_AUDIO_ERROR;
		} else if (synth_callback)
			finished = synth_callback((short *)outbuf, length, event_list);
		if (finished) {
			SpeakNextClause(2); // stop
			return ENS_SPEECH_STOPPED;
		}

		if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) {
			if (WcmdqUsed() == 0) {
				// don't process the next clause until the previous clause has finished generating speech.
				// This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary

				event_list[0].type = espeakEVENT_LIST_TERMINATED;
				event_list[0].unique_identifier = my_unique_identifier;
				event_list[0].user_data = my_user_data;

				if (SpeakNextClause(1) == 0) {
					finished = 0;
					if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
						if (dispatch_audio(NULL, 0, NULL) < 0)
							return ENS_AUDIO_ERROR;
					} else if (synth_callback)
						finished = synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
					if (finished) {
						SpeakNextClause(2); // stop
						return ENS_SPEECH_STOPPED;
					}
					return ENS_OK;
				}
			}
		}
	}
}
Example #4
0
static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event)
{
	int a_wave_can_be_played = 1;
#ifdef USE_ASYNC
	if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
		a_wave_can_be_played = fifo_is_command_enabled();
#endif

	switch ((int)my_mode)
	{
	case ENOUTPUT_MODE_SPEAK_AUDIO:
	case ENOUTPUT_MODE_SPEAK_AUDIO | ENOUTPUT_MODE_SYNCHRONOUS:
	{
		int event_type = 0;
		if (event)
			event_type = event->type;

		if (event_type == espeakEVENT_SAMPLERATE) {
			voice_samplerate = event->id.number;

			if (out_samplerate != voice_samplerate) {
#ifdef HAVE_PCAUDIOLIB_AUDIO_H
				if (out_samplerate != 0) {
					// sound was previously open with a different sample rate
					audio_object_close(my_audio);
					out_samplerate = 0;
#ifdef HAVE_SLEEP
					sleep(1);
#endif
				}
#endif
#ifdef HAVE_PCAUDIOLIB_AUDIO_H
				int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1);
				if (error != 0) {
					fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
					err = ENS_AUDIO_ERROR;
					return -1;
				}
#endif
				out_samplerate = voice_samplerate;
#ifdef USE_ASYNC
				if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
					event_init();
#endif
			}
		}

#ifdef HAVE_PCAUDIOLIB_AUDIO_H
		if (out_samplerate == 0) {
			int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1);
			if (error != 0) {
				fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
				err = ENS_AUDIO_ERROR;
				return -1;
			}
			out_samplerate = voice_samplerate;
		}
#endif

#ifdef HAVE_PCAUDIOLIB_AUDIO_H
		if (outbuf && length && a_wave_can_be_played) {
			int error = audio_object_write(my_audio, (char *)outbuf, 2*length);
			if (error != 0)
				fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
		}
#endif

#ifdef USE_ASYNC
		while (event && a_wave_can_be_played) {
			// TBD: some event are filtered here but some insight might be given
			// TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
			// TBD: For example sentence "or ALT)." returns three words
			// "or", "ALT" and "".
			// TBD: the last one has its size=0.
			if ((event->type == espeakEVENT_WORD) && (event->length == 0))
				break;
			if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0) {
				err = event_declare(event);
				if (err != ENS_EVENT_BUFFER_FULL)
					break;
				usleep(10000);
				a_wave_can_be_played = fifo_is_command_enabled();
			} else
				break;
		}
#endif
	}
		break;
	case 0:
		if (synth_callback)
			synth_callback(outbuf, length, event);
		break;
	}

	return a_wave_can_be_played == 0; // 1 = stop synthesis, -1 = error
}
Example #5
0
static int dispatch_audio(short* outbuf, int length, espeak_EVENT* event)
{//======================================================================
	ENTER("dispatch_audio");

	int a_wave_can_be_played = fifo_is_command_enabled();

#ifdef DEBUG_ENABLED
	SHOW("*** dispatch_audio > uid=%d, [write=%p (%d bytes)], sample=%d, a_wave_can_be_played = %d\n",
			(event) ? event->unique_identifier : 0, wave_test_get_write_buffer(), 2*length,
			(event) ? event->sample : 0,
			a_wave_can_be_played);
#endif

	switch(my_mode)
	{
	case AUDIO_OUTPUT_PLAYBACK:
	{
		int event_type=0;
		if(event)
		{
			event_type = event->type;
		}

		if(event_type == espeakEVENT_SAMPLERATE)
		{
			voice_samplerate = event->id.number;

			if(out_samplerate != voice_samplerate)
			{
				if(out_samplerate != 0)
				{
					// sound was previously open with a different sample rate
					wave_close(my_audio);
					sleep(1);
				}
				out_samplerate = voice_samplerate;
				if(!wave_init(voice_samplerate))
				{
					err = EE_INTERNAL_ERROR;
					return(-1);
				}
				wave_set_callback_is_output_enabled( fifo_is_command_enabled);
				my_audio = wave_open("alsa");
				event_init();
			}
		}

		if (outbuf && length && a_wave_can_be_played)
		{
			wave_write (my_audio, (char*)outbuf, 2*length);
		}

		while(a_wave_can_be_played) {
			// TBD: some event are filtered here but some insight might be given
			// TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
			// TBD: For example sentence "or ALT)." returns three words
			// "or", "ALT" and "".
			// TBD: the last one has its size=0.
			if (event && (event->type == espeakEVENT_WORD) && (event->length==0))
			{
				break;
			}
			espeak_ERROR a_error = event_declare(event);
			if (a_error != EE_BUFFER_FULL)
			{
				break;
			}
			SHOW_TIME("dispatch_audio > EE_BUFFER_FULL\n");
			usleep(10000);
			a_wave_can_be_played = fifo_is_command_enabled();
		}
	}
	break;

	case AUDIO_OUTPUT_RETRIEVAL:
		if (synth_callback)
		{
			synth_callback(outbuf, length, event);
		}
		break;

	case AUDIO_OUTPUT_SYNCHRONOUS:
	case AUDIO_OUTPUT_SYNCH_PLAYBACK:
		break;
	}

	if (!a_wave_can_be_played)
	{
		SHOW_TIME("dispatch_audio > synth must be stopped!\n");
	}

	SHOW_TIME("LEAVE dispatch_audio\n");

	return (a_wave_can_be_played==0); // 1 = stop synthesis, -1 = error
}
Example #6
0
static espeak_ERROR Synthesize(unsigned int unique_identifier, const void *text, int flags)
{//========================================================================================
	// Fill the buffer with output sound
	int length;
	int finished = 0;
	int count_buffers = 0;
#ifdef USE_ASYNC
	uint32_t a_write_pos=0;
#endif

#ifdef DEBUG_ENABLED
	ENTER("Synthesize");
	if (text)
	{
	SHOW("Synthesize > uid=%d, flags=%d, >>>text=%s<<<\n", unique_identifier, flags, text);
	}
#endif

	if((outbuf==NULL) || (event_list==NULL))
		return(EE_INTERNAL_ERROR);  // espeak_Initialize()  has not been called

	option_multibyte = flags & 7;
	option_ssml = flags & espeakSSML;
	option_phoneme_input = flags & espeakPHONEMES;
	option_endpause = flags & espeakENDPAUSE;

	count_samples = 0;

#ifdef USE_ASYNC
	if(my_mode == AUDIO_OUTPUT_PLAYBACK)
	{
		a_write_pos = wave_get_write_position(my_audio);
	}
#endif

	if(translator == NULL)
	{
		SetVoiceByName("default");
	}

	SpeakNextClause(NULL,text,0);

	if(my_mode == AUDIO_OUTPUT_SYNCH_PLAYBACK)
	{
		for(;;)
		{
#ifdef PLATFORM_WINDOWS
			Sleep(300);   // 0.3s
#else
#ifdef USE_NANOSLEEP
			struct timespec period;
			struct timespec remaining;
			period.tv_sec = 0;
			period.tv_nsec = 300000000;  // 0.3 sec
			nanosleep(&period,&remaining);
#else
			sleep(1);
#endif
#endif
			if(SynthOnTimer() != 0)
				break;
		}
		return(EE_OK);
	}

	for(;;)
	{
#ifdef DEBUG_ENABLED
		SHOW("Synthesize > %s\n","for (next)");
#endif
		out_ptr = outbuf;
		out_end = &outbuf[outbuf_size];
		event_list_ix = 0;
		WavegenFill(0);

		length = (out_ptr - outbuf)/2;
		count_samples += length;
		event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
		event_list[event_list_ix].unique_identifier = my_unique_identifier;
		event_list[event_list_ix].user_data = my_user_data;

		count_buffers++;
		if (my_mode==AUDIO_OUTPUT_PLAYBACK)
		{
#ifdef USE_ASYNC
			finished = create_events((short *)outbuf, length, event_list, a_write_pos);
			if(finished < 0)
				return EE_INTERNAL_ERROR;
			length = 0; // the wave data are played once.
#endif
		}
		else
		{
			finished = synth_callback((short *)outbuf, length, event_list);
		}
		if(finished)
		{
			SpeakNextClause(NULL,0,2);  // stop
			break;
		}

		if(Generate(phoneme_list,&n_phoneme_list,1)==0)
		{
			if(WcmdqUsed() == 0)
			{
				// don't process the next clause until the previous clause has finished generating speech.
				// This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary

				event_list[0].type = espeakEVENT_LIST_TERMINATED;
				event_list[0].unique_identifier = my_unique_identifier;
				event_list[0].user_data = my_user_data;

				if(SpeakNextClause(NULL,NULL,1)==0)
				{
#ifdef USE_ASYNC
					if (my_mode==AUDIO_OUTPUT_PLAYBACK)
					{
						if(dispatch_audio(NULL, 0, NULL) < 0) // TBD: test case
							return err = EE_INTERNAL_ERROR;
					}
					else
					{
						synth_callback(NULL, 0, event_list);  // NULL buffer ptr indicates end of data
					}
#else
					synth_callback(NULL, 0, event_list);  // NULL buffer ptr indicates end of data
#endif
					break;
				}
			}
		}
	}
	return(EE_OK);
}  //  end of Synthesize