示例#1
0
static int WavegenFile(void)
{
    int finished;
    unsigned char wav_outbuf[1024];
    char fname[210];

    out_ptr = out_start = wav_outbuf;
    out_end = wav_outbuf + sizeof(wav_outbuf);

    finished = WavegenFill();

    if (quiet)
        return finished;

    if (f_wave == NULL) {
        sprintf(fname, "%s_%.2d%s", wavefile, ++wavefile_count, filetype);
        if (OpenWaveFile(fname, samplerate) != 0)
            return 1;
    }

    if (end_of_sentence) {
        end_of_sentence = 0;
        if ((samples_split > 0 ) && (samples_total > samples_split)) {
            CloseWaveFile();
            samples_total = 0;
        }
    }

    if (f_wave != NULL) {
        samples_total += (out_ptr - wav_outbuf)/2;
        fwrite(wav_outbuf, 1, out_ptr - wav_outbuf, f_wave);
    }
    return finished;
}
示例#2
0
int WavegenFile2(void)
{//==================
	int finished;
	unsigned char wav_outbuf[1024];

	out_ptr = out_start = wav_outbuf;
	out_end = wav_outbuf + sizeof(wav_outbuf);

	finished = WavegenFill(0);

	if(f_wave != NULL)
	{
		fwrite(wav_outbuf, 1, out_ptr-wav_outbuf, f_wave);
	}
	return(finished);
}  // end of WavegenFile
示例#3
0
static int wave_out(struct speech_priv* sp_priv)
{
	int isDone;

	WAVEHDR *WaveHeader = g_list_first(sp_priv->free_buffers)->data;
	sp_priv->free_buffers = g_list_remove(sp_priv->free_buffers, WaveHeader);

	out_ptr = out_start = WaveHeader->lpData;
	out_end = WaveHeader->lpData + WaveHeader->dwBufferLength;

	isDone = WavegenFill(0);

	if ( out_ptr < out_end )
	{
		memset ( out_ptr, 0, out_end - out_ptr );
	}
	waveOutWrite(sp_priv->h_wave_out, WaveHeader, sizeof(WAVEHDR));

	return isDone;
}
示例#4
0
文件: speech.c 项目: rhdunn/espeak
static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags)
{
	// Fill the buffer with output sound
	int length;
	int finished = 0;
	int count_buffers = 0;

	if ((outbuf == NULL) || (event_list == NULL))
		return ENS_NOT_INITIALIZED;

	option_ssml = flags & espeakSSML;
	option_phoneme_input = flags & espeakPHONEMES;
	option_endpause = flags & espeakENDPAUSE;

	count_samples = 0;

	espeak_ng_STATUS status;
	if (translator == NULL) {
		status = espeak_ng_SetVoiceByName("en");
		if (status != ENS_OK)
			return status;
	}

	if (p_decoder == NULL)
		p_decoder = create_text_decoder();

	status = text_decoder_decode_string_multibyte(p_decoder, text, translator->encoding, flags);
	if (status != ENS_OK)
		return status;

	SpeakNextClause(0);

	for (;;) {
		out_ptr = outbuf;
		out_end = &outbuf[outbuf_size];
		event_list_ix = 0;
		WavegenFill();

		length = (out_ptr - outbuf)/2;
		count_samples += length;
		event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
		event_list[event_list_ix].unique_identifier = unique_identifier;
		event_list[event_list_ix].user_data = my_user_data;

		count_buffers++;
		if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
			finished = create_events((short *)outbuf, length, event_list);
			if (finished < 0)
				return ENS_AUDIO_ERROR;
		} else if (synth_callback)
			finished = synth_callback((short *)outbuf, length, event_list);
		if (finished) {
			SpeakNextClause(2); // stop
			return ENS_SPEECH_STOPPED;
		}

		if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) {
			if (WcmdqUsed() == 0) {
				// don't process the next clause until the previous clause has finished generating speech.
				// This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary

				event_list[0].type = espeakEVENT_LIST_TERMINATED;
				event_list[0].unique_identifier = my_unique_identifier;
				event_list[0].user_data = my_user_data;

				if (SpeakNextClause(1) == 0) {
					finished = 0;
					if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
						if (dispatch_audio(NULL, 0, NULL) < 0)
							return ENS_AUDIO_ERROR;
					} else if (synth_callback)
						finished = synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
					if (finished) {
						SpeakNextClause(2); // stop
						return ENS_SPEECH_STOPPED;
					}
					return ENS_OK;
				}
			}
		}
	}
}
示例#5
0
static espeak_ERROR Synthesize(unsigned int unique_identifier, const void *text, int flags)
{//========================================================================================
	// Fill the buffer with output sound
	int length;
	int finished = 0;
	int count_buffers = 0;
#ifdef USE_ASYNC
	uint32_t a_write_pos=0;
#endif

#ifdef DEBUG_ENABLED
	ENTER("Synthesize");
	if (text)
	{
	SHOW("Synthesize > uid=%d, flags=%d, >>>text=%s<<<\n", unique_identifier, flags, text);
	}
#endif

	if((outbuf==NULL) || (event_list==NULL))
		return(EE_INTERNAL_ERROR);  // espeak_Initialize()  has not been called

	option_multibyte = flags & 7;
	option_ssml = flags & espeakSSML;
	option_phoneme_input = flags & espeakPHONEMES;
	option_endpause = flags & espeakENDPAUSE;

	count_samples = 0;

#ifdef USE_ASYNC
	if(my_mode == AUDIO_OUTPUT_PLAYBACK)
	{
		a_write_pos = wave_get_write_position(my_audio);
	}
#endif

	if(translator == NULL)
	{
		SetVoiceByName("default");
	}

	SpeakNextClause(NULL,text,0);

	if(my_mode == AUDIO_OUTPUT_SYNCH_PLAYBACK)
	{
		for(;;)
		{
#ifdef PLATFORM_WINDOWS
			Sleep(300);   // 0.3s
#else
#ifdef USE_NANOSLEEP
			struct timespec period;
			struct timespec remaining;
			period.tv_sec = 0;
			period.tv_nsec = 300000000;  // 0.3 sec
			nanosleep(&period,&remaining);
#else
			sleep(1);
#endif
#endif
			if(SynthOnTimer() != 0)
				break;
		}
		return(EE_OK);
	}

	for(;;)
	{
#ifdef DEBUG_ENABLED
		SHOW("Synthesize > %s\n","for (next)");
#endif
		out_ptr = outbuf;
		out_end = &outbuf[outbuf_size];
		event_list_ix = 0;
		WavegenFill(0);

		length = (out_ptr - outbuf)/2;
		count_samples += length;
		event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
		event_list[event_list_ix].unique_identifier = my_unique_identifier;
		event_list[event_list_ix].user_data = my_user_data;

		count_buffers++;
		if (my_mode==AUDIO_OUTPUT_PLAYBACK)
		{
#ifdef USE_ASYNC
			finished = create_events((short *)outbuf, length, event_list, a_write_pos);
			if(finished < 0)
				return EE_INTERNAL_ERROR;
			length = 0; // the wave data are played once.
#endif
		}
		else
		{
			finished = synth_callback((short *)outbuf, length, event_list);
		}
		if(finished)
		{
			SpeakNextClause(NULL,0,2);  // stop
			break;
		}

		if(Generate(phoneme_list,&n_phoneme_list,1)==0)
		{
			if(WcmdqUsed() == 0)
			{
				// don't process the next clause until the previous clause has finished generating speech.
				// This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary

				event_list[0].type = espeakEVENT_LIST_TERMINATED;
				event_list[0].unique_identifier = my_unique_identifier;
				event_list[0].user_data = my_user_data;

				if(SpeakNextClause(NULL,NULL,1)==0)
				{
#ifdef USE_ASYNC
					if (my_mode==AUDIO_OUTPUT_PLAYBACK)
					{
						if(dispatch_audio(NULL, 0, NULL) < 0) // TBD: test case
							return err = EE_INTERNAL_ERROR;
					}
					else
					{
						synth_callback(NULL, 0, event_list);  // NULL buffer ptr indicates end of data
					}
#else
					synth_callback(NULL, 0, event_list);  // NULL buffer ptr indicates end of data
#endif
					break;
				}
			}
		}
	}
	return(EE_OK);
}  //  end of Synthesize