void TranslDlg::SpeakFile(void) {//============================ wxString file; wxFileName fname; FILE *f_text; char buf[200]; fname = wxFileName(path_speaktext); file = wxFileSelector(_T("Text file to speak"),fname.GetPath(),fname.GetName(),_T(""),_T("*"),wxOPEN); if(file == wxEmptyString) return; strcpy(buf,file.mb_str(wxConvLocal)); f_text = fopen(buf,"r"); if(f_text == NULL) { wxLogError(_T("Failed to read: ")+file); return; } path_speaktext = file; InitText(0); SpeakNextClause(f_text,NULL,0); return; } // end of SpeakFile
static void StopSpeak(int unused) {//============================== signal(SIGINT,SIG_IGN); // DEBUG // printf("\n*** Interrupting speech output (use Ctrl-D to actually quit).\n"); fflush(stdout); SpeakNextClause(NULL,NULL,5); signal(SIGINT,StopSpeak); } // end of StopSpeak()
void MyFrame::OnSpeak(wxCommandEvent& event) {//========================================= switch(event.GetId()) { case MENU_SPEAK_TRANSLATE: case MENU_SPEAK_RULES: case MENU_SPEAK_IPA: case MENU_SPEAK_TEXT: transldlg->OnCommand(event); break; case MENU_SPEAK_FILE: out_ptr = NULL; transldlg->SpeakFile(); break; case MENU_SPEAK_STOP: SpeakNextClause(NULL,NULL,2); break; case MENU_SPEAK_PAUSE: out_ptr = NULL; SpeakNextClause(NULL,NULL,3); if(SynthStatus() & 2) speak_menu->SetLabel(MENU_SPEAK_PAUSE,_T("&Resume")); else { speak_menu->SetLabel(MENU_SPEAK_PAUSE,_T("&Pause")); } break; case MENU_SPEAK_VOICE: transldlg->ReadVoice(0); SetVoiceTitle(voice_name2); break; case MENU_SPEAK_VOICE_VARIANT: transldlg->ReadVoice(1); SetVoiceTitle(voice_name2); break; } }
static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags) { // Fill the buffer with output sound int length; int finished = 0; int count_buffers = 0; if ((outbuf == NULL) || (event_list == NULL)) return ENS_NOT_INITIALIZED; option_ssml = flags & espeakSSML; option_phoneme_input = flags & espeakPHONEMES; option_endpause = flags & espeakENDPAUSE; count_samples = 0; espeak_ng_STATUS status; if (translator == NULL) { status = espeak_ng_SetVoiceByName("en"); if (status != ENS_OK) return status; } if (p_decoder == NULL) p_decoder = create_text_decoder(); status = text_decoder_decode_string_multibyte(p_decoder, text, translator->encoding, flags); if (status != ENS_OK) return status; SpeakNextClause(0); for (;;) { out_ptr = outbuf; out_end = &outbuf[outbuf_size]; event_list_ix = 0; WavegenFill(); length = (out_ptr - outbuf)/2; count_samples += length; event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list event_list[event_list_ix].unique_identifier = unique_identifier; event_list[event_list_ix].user_data = my_user_data; count_buffers++; if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) { finished = create_events((short *)outbuf, length, event_list); if (finished < 0) return ENS_AUDIO_ERROR; } else if (synth_callback) finished = synth_callback((short *)outbuf, length, event_list); if (finished) { SpeakNextClause(2); // stop return ENS_SPEECH_STOPPED; } if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) { if (WcmdqUsed() == 0) { // don't process the next clause until the previous clause has finished generating speech. // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary event_list[0].type = espeakEVENT_LIST_TERMINATED; event_list[0].unique_identifier = my_unique_identifier; event_list[0].user_data = my_user_data; if (SpeakNextClause(1) == 0) { finished = 0; if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) { if (dispatch_audio(NULL, 0, NULL) < 0) return ENS_AUDIO_ERROR; } else if (synth_callback) finished = synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data if (finished) { SpeakNextClause(2); // stop return ENS_SPEECH_STOPPED; } return ENS_OK; } } } } }
static espeak_ERROR Synthesize(unsigned int unique_identifier, const void *text, int flags) {//======================================================================================== // Fill the buffer with output sound int length; int finished = 0; int count_buffers = 0; #ifdef USE_ASYNC uint32_t a_write_pos=0; #endif #ifdef DEBUG_ENABLED ENTER("Synthesize"); if (text) { SHOW("Synthesize > uid=%d, flags=%d, >>>text=%s<<<\n", unique_identifier, flags, text); } #endif if((outbuf==NULL) || (event_list==NULL)) return(EE_INTERNAL_ERROR); // espeak_Initialize() has not been called option_multibyte = flags & 7; option_ssml = flags & espeakSSML; option_phoneme_input = flags & espeakPHONEMES; option_endpause = flags & espeakENDPAUSE; count_samples = 0; #ifdef USE_ASYNC if(my_mode == AUDIO_OUTPUT_PLAYBACK) { a_write_pos = wave_get_write_position(my_audio); } #endif if(translator == NULL) { SetVoiceByName("default"); } SpeakNextClause(NULL,text,0); if(my_mode == AUDIO_OUTPUT_SYNCH_PLAYBACK) { for(;;) { #ifdef PLATFORM_WINDOWS Sleep(300); // 0.3s #else #ifdef USE_NANOSLEEP struct timespec period; struct timespec remaining; period.tv_sec = 0; period.tv_nsec = 300000000; // 0.3 sec nanosleep(&period,&remaining); #else sleep(1); #endif #endif if(SynthOnTimer() != 0) break; } return(EE_OK); } for(;;) { #ifdef DEBUG_ENABLED SHOW("Synthesize > %s\n","for (next)"); #endif out_ptr = outbuf; out_end = &outbuf[outbuf_size]; event_list_ix = 0; WavegenFill(0); length = (out_ptr - outbuf)/2; count_samples += length; event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list event_list[event_list_ix].unique_identifier = my_unique_identifier; event_list[event_list_ix].user_data = my_user_data; count_buffers++; if (my_mode==AUDIO_OUTPUT_PLAYBACK) { #ifdef USE_ASYNC finished = create_events((short *)outbuf, length, event_list, a_write_pos); if(finished < 0) return EE_INTERNAL_ERROR; length = 0; // the wave data are played once. #endif } else { finished = synth_callback((short *)outbuf, length, event_list); } if(finished) { SpeakNextClause(NULL,0,2); // stop break; } if(Generate(phoneme_list,&n_phoneme_list,1)==0) { if(WcmdqUsed() == 0) { // don't process the next clause until the previous clause has finished generating speech. // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary event_list[0].type = espeakEVENT_LIST_TERMINATED; event_list[0].unique_identifier = my_unique_identifier; event_list[0].user_data = my_user_data; if(SpeakNextClause(NULL,NULL,1)==0) { #ifdef USE_ASYNC if (my_mode==AUDIO_OUTPUT_PLAYBACK) { if(dispatch_audio(NULL, 0, NULL) < 0) // TBD: test case return err = EE_INTERNAL_ERROR; } else { synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data } #else synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data #endif break; } } } } return(EE_OK); } // end of Synthesize
void TranslDlg::OnCommand(wxCommandEvent& event) {//============================================= #define N_PH_LIST N_PHONEME_LIST void *vp; int translate_text = 0; char buf[1000]; char phon_out[N_PH_LIST*2]; int clause_tone; int clause_count; int use_ipa = 0; FILE *f; int fd_temp; char fname_temp[100]; static int n_ph_list; static PHONEME_LIST ph_list[N_PH_LIST+1]; if(translator==NULL) { wxLogError(_T("Voice not set")); return; } option_phonemes = 0; switch(event.GetId()) { case T_RULES: case MENU_SPEAK_RULES: #ifdef PLATFORM_POSIX strcpy(fname_temp,"/tmp/espeakXXXXXX"); if((fd_temp = mkstemp(fname_temp)) >= 0) { close(fd_temp); if((f = fopen(fname_temp,"w+")) != NULL) { f_trans = f; // write translation rule trace to a temp file } } #else strcpy(fname_temp,tmpnam(NULL)); if((f = fopen(fname_temp,"w+")) != NULL) { f_trans = f; // write translation rule trace to a temp file } #endif t_phonetic->SetDefaultStyle(style_phonetic); translate_text = 2; break; case T_TRANSLATE: case MENU_SPEAK_TRANSLATE: t_phonetic->SetDefaultStyle(style_phonetic); translate_text = 1; break; case T_TRANSLATE_IPA: case MENU_SPEAK_IPA: t_phonetic->SetDefaultStyle(style_phonetic_large); translate_text = 3; use_ipa = 1; break; case T_PROCESS: case MENU_SPEAK_TEXT: myframe->OnProsody(event); prosodycanvas->LayoutData(ph_list,n_ph_list); myframe->Refresh(); option_phoneme_events = espeakINITIALIZE_PHONEME_EVENTS; option_log_frames = 1; MakeWave2(ph_list,n_ph_list); option_log_frames = 0; break; } if(translate_text) { option_phonemes = translate_text; option_multibyte = espeakCHARS_AUTO; SpeakNextClause(NULL,NULL,2); // stop speaking file strncpy0(buf,t_source->GetValue().mb_str(wxConvUTF8),sizeof(buf)); phon_out[0] = 0; n_ph_list = 0; clause_count = 0; vp = buf; InitText(0); while((vp != NULL) && (n_ph_list < N_PH_LIST)) { vp = TranslateClause(translator,NULL,vp,&clause_tone,NULL); CalcPitches(translator,clause_tone); CalcLengths(translator); GetTranslatedPhonemeString(translator->phon_out, sizeof(translator->phon_out), use_ipa); if(clause_count++ > 0) strcat(phon_out," ||"); strcat(phon_out,translator->phon_out); t_phonetic->SetValue(wxString(translator->phon_out,wxConvUTF8)); if((n_ph_list + n_phoneme_list) >= N_PH_LIST) { n_phoneme_list = N_PH_LIST - n_ph_list; } memcpy(&ph_list[n_ph_list],phoneme_list,sizeof(PHONEME_LIST)*n_phoneme_list); n_ph_list += n_phoneme_list; } ph_list[N_PH_LIST].ph = NULL; // to recognize overrun off list (in Generate() ) t_phonetic->Clear(); if(option_phonemes == 2) { option_phonemes=0; rewind(f_trans); while(fgets(buf,sizeof(buf),f_trans) != NULL) { t_phonetic->AppendText(wxString(buf,wxConvUTF8)); } t_phonetic->AppendText(_T("---\n")); if(f_trans != NULL) fclose(f_trans); remove(fname_temp); } t_phonetic->AppendText(wxString(phon_out,wxConvUTF8)); } } // end of TranslDlg::OnCommand