コード例 #1
0
ファイル: speak_lib.cpp プロジェクト: alltoy/espeak
ESPEAK_API espeak_ERROR espeak_SetVoiceByName(const char *name)
{//============================================================
  ENTER("espeak_SetVoiceByName");

//#ifdef USE_ASYNC
// I don't think there's a need to queue change voice requests
#ifdef deleted
	espeak_ERROR a_error;

	if(synchronous_mode)
	{
		return(SetVoiceByName(name));
	}

	t_espeak_command* c = create_espeak_voice_name(name);
	a_error = fifo_add_command(c);
	if (a_error != EE_OK)
	{
		delete_espeak_command(c);
	}
	return a_error;
#else
	return(SetVoiceByName(name));
#endif
}  // end of espeak_SetVoiceByName
コード例 #2
0
ファイル: speak_lib.cpp プロジェクト: HeryLong/booxsdk
ESPEAK_API int espeak_Initialize(espeak_AUDIO_OUTPUT output_type, int buf_length, const char *path, int options)
{//=============================================================================================================
ENTER("espeak_Initialize");
	int param;

	// It seems that the wctype functions don't work until the locale has been set
	// to something other than the default "C".  Then, not only Latin1 but also the
	// other characters give the correct results with iswalpha() etc.
#ifdef PLATFORM_RISCOS
	setlocale(LC_CTYPE,"ISO8859-1");
#else
	if(setlocale(LC_CTYPE,"en_US.UTF-8") == NULL)
	{
		if(setlocale(LC_CTYPE,"UTF-8") == NULL)
			setlocale(LC_CTYPE,"");
	}
#endif
	
	init_path(path);
	initialise();
	select_output(output_type);
	
	// buflength is in mS, allocate 2 bytes per sample
	if(buf_length == 0)
		buf_length = 200;
	outbuf_size = (buf_length * samplerate)/500;
	outbuf = (unsigned char*)realloc(outbuf,outbuf_size);
	if((out_start = outbuf) == NULL)
		return(EE_INTERNAL_ERROR);
	
	// allocate space for event list.  Allow 200 events per second.
	// Add a constant to allow for very small buf_length
	n_event_list = (buf_length*200)/1000 + 20;
	if((event_list = (espeak_EVENT *)realloc(event_list,sizeof(espeak_EVENT) * n_event_list)) == NULL)
		return(EE_INTERNAL_ERROR);
	
	option_phonemes = 0;
	option_phoneme_events = (options & 1);

	SetVoiceByName("default");
	
	for(param=0; param<N_SPEECH_PARAM; param++)
		param_stack[0].parameter[param] = param_defaults[param];
	
	SetParameter(espeakRATE,170,0);
	SetParameter(espeakVOLUME,100,0);
	SetParameter(espeakCAPITALS,option_capitals,0);
	SetParameter(espeakPUNCTUATION,option_punctuation,0);
	SetParameter(espeakWORDGAP,0,0);
	DoVoiceChange(voice);
	
#ifdef USE_ASYNC
	fifo_init();
#endif

  return(samplerate);
}
コード例 #3
0
ファイル: transldlg.cpp プロジェクト: Shqip/ar-espeak
void TranslDlg::ReadVoice(int variant)
{//===================================
	wxString path;
	wxString filename;
	char *p;
	char vname[40];
	char fname[sizeof(path_home)+30];

	if(variant)
	{
		// remove variant from the previous voice name
		if((p = strchr(voice_name2,'+')) != NULL)
			*p = 0;

		sprintf(fname,"%s/voices/!v",path_home);
		path = wxFileSelector(_T("Load voice variant"),wxString(fname,wxConvLocal),_T(""),_T(""),_T("*"),wxOPEN);
		if(path.IsEmpty())
		{
			strcpy(fname,voice_name2);
		}
		else
		{
			filename = path.Mid(strlen(fname)+1);
			strcpy(vname,filename.mb_str(wxConvLocal));
			sprintf(fname,"%s+%s",voice_name2,vname);
		}
	}
	else
	{
		sprintf(fname,"%s/voices",path_home);
		path = wxFileSelector(_T("Load voice"),wxString(fname,wxConvLocal),_T(""),_T(""),_T("*"),wxOPEN);
		if(path.IsEmpty())
			return;

		filename = path.Mid(strlen(fname)+1);
		strcpy(fname,filename.mb_str(wxConvLocal));
	}

	if(SetVoiceByName(fname) != EE_OK)
	{
		wxLogError(_T("Failed to load voice data"));
	}
	else
	{
		strcpy(voice_name2,fname);
	}
	WavegenSetVoice(voice);
}
コード例 #4
0
ファイル: espeakedit.cpp プロジェクト: ashengmz/espeak
MyFrame::MyFrame(wxWindow *parent, const wxWindowID id, const wxString& title, const wxPoint& pos, const wxSize& size,
    const long style):
  wxFrame(parent, id, title, pos, size, style)
{//===================================================================================================================
// Main Frame constructor

	int error_flag = 0;
	int result;
	int param;
	int srate;

	notebook = new wxNotebook(this, ID_NOTEBOOK, wxDefaultPosition, wxSize(312,760));
//	notebook->AddPage(voicedlg,_T("Voice"),FALSE);
	formantdlg = new FormantDlg(notebook);
	notebook->AddPage(formantdlg,_T(" Spect"),FALSE);
	voicedlg = new VoiceDlg(notebook);

	transldlg = new TranslDlg(notebook);
	notebook->AddPage(transldlg,_T("Text"),TRUE);


    screenpages = new wxNotebook(this, ID_SCREENPAGES, wxDefaultPosition, wxSize(554,702));

    wxBoxSizer *framesizer = new wxBoxSizer( wxHORIZONTAL );


    framesizer->Add(
        notebook,
        0,            // make horizontally stretchable
        wxEXPAND |    // make vertically stretchable
        wxALL,        //   and make border all around
        4 );         // set border width

    framesizer->Add(
        screenpages,
        1,            // make horizontally stretchable
        wxEXPAND |    // make vertically stretchable
        wxALL,        //   and make border all around
        4 );         // set border width

    SetSizer( framesizer );      // use the sizer for layout
    framesizer->SetSizeHints( this );   // set size hints to honour minimum size
    SetSize(pos.x, pos.y, size.GetWidth(), size.GetHeight());

	LoadConfig();

	if((result = LoadPhData(&srate)) != 1)
	{
		if(result == -1)
			wxLogError(_T("Failed to read espeak-data/phontab,phondata,phonindex\nPath = ")+wxString(path_home,wxConvLocal)+_T("\n\nThe 'eSpeak' package needs to be installed"));
		else
			wxLogError(_T("Wrong version of espeak-data at:\n")+ wxString(path_home,wxConvLocal)+_T("\nVersion 0x%x (expects 0x%x)"),result,version_phdata);

		error_flag = 1;
		srate = 22050;
	}
	WavegenInit(srate,0);
	WavegenInitSound();

	f_trans = stdout;
	option_ssml = 1;
	option_phoneme_input = 1;


//	if(LoadVoice(voice_name,0) == NULL)
	if(SetVoiceByName(voice_name2) != EE_OK)
	{
		if(error_flag==0)
			wxLogError(_T("Failed to load voice data"));
		strcpy(dictionary_name,"en");
	}
	WavegenSetVoice(voice);

	for(param=0; param<N_SPEECH_PARAM; param++)
		param_stack[0].parameter[param] = param_defaults[param];

	SetParameter(espeakRATE,option_speed,0);

	SetSpeed(3);
	SynthesizeInit();

	InitSpectrumDisplay();
	InitProsodyDisplay();
//	InitWaveDisplay();
	espeak_ListVoices(NULL);

   m_timer.SetOwner(this,1);

   m_timer.Start(500);   /* 0.5 timer */

}  // end of MyFrame::MyFrame
コード例 #5
0
ファイル: espeak_command.cpp プロジェクト: Jongsix/espeak
//>
//< process_espeak_command
void process_espeak_command( t_espeak_command* the_command)
{
  ENTER("process_espeak_command");

  SHOW("command=0x%x\n", the_command);

  if (the_command == NULL)
    {
      return;
    }

  the_command->state = CS_PROCESSED;

  switch(the_command->type)
    {
    case ET_TEXT:
      {
	t_espeak_text* data = &(the_command->u.my_text);
	sync_espeak_Synth( data->unique_identifier, data->text, data->size,
			   data->position, data->position_type,
			   data->end_position, data->flags, data->user_data);
      }
      break;

    case ET_MARK:
      {
	t_espeak_mark* data = &(the_command->u.my_mark);
	sync_espeak_Synth_Mark( data->unique_identifier, data->text, data->size,
				data->index_mark, data->end_position, data->flags,
				data->user_data);
      }
      break;

    case ET_TERMINATED_MSG:
      {
	t_espeak_terminated_msg* data = &(the_command->u.my_terminated_msg);
	sync_espeak_terminated_msg( data->unique_identifier, data->user_data);
      }
      break;

    case ET_KEY:
      {
	const char* data = the_command->u.my_key.key_name;
	sync_espeak_Key(data);
      }
      break;

    case ET_CHAR:
      {
	const wchar_t data = the_command->u.my_char.character;
	sync_espeak_Char( data);
      }
      break;

    case ET_PARAMETER:
      {
	t_espeak_parameter* data = &(the_command->u.my_param);
	SetParameter( data->parameter, data->value, data->relative);
      }
      break;

    case ET_PUNCTUATION_LIST:
      {
	const wchar_t* data = the_command->u.my_punctuation_list;
	sync_espeak_SetPunctuationList( data);
      }
      break;

    case ET_VOICE_NAME:
      {
	const char* data = the_command->u.my_voice_name;
	SetVoiceByName( data);
      }
      break;

    case ET_VOICE_SPEC:
      {
	espeak_VOICE* data = &(the_command->u.my_voice_spec);
	SetVoiceByProperties(data);
      }
      break;

    default:
      assert(0);
      break;
    }
}
コード例 #6
0
ファイル: speak_lib.cpp プロジェクト: alltoy/espeak
static espeak_ERROR Synthesize(unsigned int unique_identifier, const void *text, int flags)
{//========================================================================================
	// Fill the buffer with output sound
	int length;
	int finished = 0;
	int count_buffers = 0;
#ifdef USE_ASYNC
	uint32_t a_write_pos=0;
#endif

#ifdef DEBUG_ENABLED
	ENTER("Synthesize");
	if (text)
	{
	SHOW("Synthesize > uid=%d, flags=%d, >>>text=%s<<<\n", unique_identifier, flags, text);
	}
#endif

	if((outbuf==NULL) || (event_list==NULL))
		return(EE_INTERNAL_ERROR);  // espeak_Initialize()  has not been called

	option_multibyte = flags & 7;
	option_ssml = flags & espeakSSML;
	option_phoneme_input = flags & espeakPHONEMES;
	option_endpause = flags & espeakENDPAUSE;

	count_samples = 0;

#ifdef USE_ASYNC
	if(my_mode == AUDIO_OUTPUT_PLAYBACK)
	{
		a_write_pos = wave_get_write_position(my_audio);
	}
#endif

	if(translator == NULL)
	{
		SetVoiceByName("default");
	}

	SpeakNextClause(NULL,text,0);

	if(my_mode == AUDIO_OUTPUT_SYNCH_PLAYBACK)
	{
		for(;;)
		{
#ifdef PLATFORM_WINDOWS
			Sleep(300);   // 0.3s
#else
#ifdef USE_NANOSLEEP
			struct timespec period;
			struct timespec remaining;
			period.tv_sec = 0;
			period.tv_nsec = 300000000;  // 0.3 sec
			nanosleep(&period,&remaining);
#else
			sleep(1);
#endif
#endif
			if(SynthOnTimer() != 0)
				break;
		}
		return(EE_OK);
	}

	for(;;)
	{
#ifdef DEBUG_ENABLED
		SHOW("Synthesize > %s\n","for (next)");
#endif
		out_ptr = outbuf;
		out_end = &outbuf[outbuf_size];
		event_list_ix = 0;
		WavegenFill(0);

		length = (out_ptr - outbuf)/2;
		count_samples += length;
		event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
		event_list[event_list_ix].unique_identifier = my_unique_identifier;
		event_list[event_list_ix].user_data = my_user_data;

		count_buffers++;
		if (my_mode==AUDIO_OUTPUT_PLAYBACK)
		{
#ifdef USE_ASYNC
			finished = create_events((short *)outbuf, length, event_list, a_write_pos);
			if(finished < 0)
				return EE_INTERNAL_ERROR;
			length = 0; // the wave data are played once.
#endif
		}
		else
		{
			finished = synth_callback((short *)outbuf, length, event_list);
		}
		if(finished)
		{
			SpeakNextClause(NULL,0,2);  // stop
			break;
		}

		if(Generate(phoneme_list,&n_phoneme_list,1)==0)
		{
			if(WcmdqUsed() == 0)
			{
				// don't process the next clause until the previous clause has finished generating speech.
				// This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary

				event_list[0].type = espeakEVENT_LIST_TERMINATED;
				event_list[0].unique_identifier = my_unique_identifier;
				event_list[0].user_data = my_user_data;

				if(SpeakNextClause(NULL,NULL,1)==0)
				{
#ifdef USE_ASYNC
					if (my_mode==AUDIO_OUTPUT_PLAYBACK)
					{
						if(dispatch_audio(NULL, 0, NULL) < 0) // TBD: test case
							return err = EE_INTERNAL_ERROR;
					}
					else
					{
						synth_callback(NULL, 0, event_list);  // NULL buffer ptr indicates end of data
					}
#else
					synth_callback(NULL, 0, event_list);  // NULL buffer ptr indicates end of data
#endif
					break;
				}
			}
		}
	}
	return(EE_OK);
}  //  end of Synthesize