int main(int argc, char* argv[]) { ISpVoice * pVoice = NULL; if (FAILED(::CoInitialize(NULL))) return FALSE; HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED(hr)) { hr = pVoice->Speak(L"Здравствуйте", 0, NULL); // Change pitch hr = pVoice->Speak(L"Вы меня <pitch middle = '-10'/> слышите?", SPF_IS_XML, NULL); pVoice->Release(); pVoice = NULL; } ::CoUninitialize(); return TRUE; }
int main(int argc, char* argv[]) { ISpVoice * pVoice = NULL; if (FAILED(::CoInitialize(NULL))) return FALSE; HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED(hr)) { //hr = pVoice->Speak(L"Hello Brad <pitch middle = '-10'/> gwaps", SPF_IS_XML, NULL); hr = pVoice->Speak(L"Myca needs vog", 0, NULL); //hr = pVoice->Speak(L"cliff golem tashed", 0, NULL); //hr = pVoice->Speak(L"Invis failing, invis failing", 0, NULL); //hr = pVoice->Speak(L"charm break, <pitch middle = '-10'/> beat down imminent", SPF_IS_XML, NULL); hr = pVoice->Speak(L"pet is loose, <pitch middle = '+10'/> pet is loose", 8, NULL); pVoice->Release(); pVoice = NULL; } ::CoUninitialize(); return TRUE; }
void Speech::speechOutput(const char* str) { ISpVoice* pVoice = NULL; ISpObjectToken* cpAudioOutToken; IEnumSpObjectTokens* cpEnum; ULONG ulCount = 0; if (FAILED(::CoInitialize(NULL))) return; HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED (hr)) { hr = SpEnumTokens( SPCAT_AUDIOOUT, NULL, NULL, &cpEnum); } if (SUCCEEDED (hr)) { hr = cpEnum->GetCount( &ulCount); } while (SUCCEEDED(hr) && ulCount--) { hr = cpEnum->Next( 1, &cpAudioOutToken, NULL ); wchar_t* deviceId = nullptr; if(SUCCEEDED(hr)) { hr = cpAudioOutToken->GetStringValue(L"DeviceId", &deviceId); if(wcscmp(outputDevice, deviceId) == 0) break; } } if (SUCCEEDED (hr)) { hr = pVoice->SetOutput( cpAudioOutToken, TRUE ); } if(SUCCEEDED(hr)) { const size_t cSize = strlen(str)+1; wchar_t* wc = new wchar_t[cSize]; size_t ret; mbstowcs_s (&ret, wc, cSize, str, cSize); hr = pVoice->Speak(wc, NULL, NULL); pVoice->Release(); pVoice = NULL; } cpEnum->Release(); cpAudioOutToken->Release(); ::CoUninitialize(); }
/********************************************************************** * CTextRun::Speak * *-----------------* * Description: * Speaks the text associated with this CTextRun using TTS. * * Return: * S_OK * Return value of ITextRange::GetText() * Return value of ISpVoice::Speak() **********************************************************************/ HRESULT CTextRun::Speak( ISpVoice &rVoice ) { _ASSERTE( m_cpTextRange ); if ( !m_cpTextRange ) { return E_UNEXPECTED; } // Get the text and speak it. BSTR bstrText; HRESULT hr = m_cpTextRange->GetText( &bstrText ); if( SUCCEEDED( hr ) ) { hr = rVoice.Speak( bstrText, SPF_ASYNC, NULL ); ::SysFreeString( bstrText ); } return hr; } /* CTextRun::Speak */
bool TextToSpeech::say(string wordToSay) { //convert string to wstring to use with speech function wstring wstr(wordToSay.begin(), wordToSay.end()); ISpVoice * pVoice = NULL; if (FAILED(::CoInitialize(NULL))) return FALSE; HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED(hr)) { long rate = -5; pVoice->SetRate(rate); hr = pVoice->Speak(wstr.c_str(), 0, NULL); pVoice->Release(); pVoice = NULL; } ::CoUninitialize(); return TRUE; }
bool speak(wchar_t * text, wchar_t * pszReqAttribs) { ISpVoice * pVoice = NULL; HRESULT stInitializing = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED(stInitializing)) { ISpObjectToken* cpToken(NULL); HRESULT stTokenFinding = SpFindBestToken(SPCAT_VOICES, pszReqAttribs, L"", &cpToken); if (SUCCEEDED(stTokenFinding)) { HRESULT stVoiceSetting = pVoice->SetVoice(cpToken); if (SUCCEEDED(stVoiceSetting)) { HRESULT stSpoken = pVoice->Speak(text, 0, NULL); if (SUCCEEDED(stSpoken)) { cpToken->Release(); cpToken = NULL; pVoice->Release(); pVoice = NULL; return true; } else { cpToken->Release(); cpToken = NULL; pVoice->Release(); pVoice = NULL; wcout << "Error, I couldn't play this text " << text << endl; return false; } } else { cpToken->Release(); cpToken = NULL; pVoice->Release(); pVoice = NULL; wcout << "Error, I can't set this voice " << pszReqAttribs << endl; return false; } } else { pVoice->Release(); pVoice = NULL; wcout << "Error, I can't find this voice " << pszReqAttribs << endl; return false; } } else { wcout << "Error, I can't create Voice instance" << endl; return false; } }
int main(int argc, char **argv) { ISpVoice *pVoice = NULL; WCHAR line_buffer[LINE_LENGTH]; bool exitflag = FALSE; int counter = 0; if (FAILED(::CoInitialize(NULL))) { printf("ERROR: Couldn't initialise COM.\n"); return FALSE; } HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if( SUCCEEDED( hr ) ) { // For some reason, if you don't do this, speech cuts out // (only on first announcement)... pVoice->Speak(L" ", SPF_IS_NOT_XML, NULL); // Main loop... while( ! exitflag ) { for(counter = 0; counter < LINE_LENGTH; counter++ ) { // Fill up the buffer... line_buffer[counter] = getwchar(); // Check for exit condition... if( line_buffer[counter] == WEOF ) { line_buffer[counter] = 0; exitflag = TRUE; break; } // Break at end of line... if( line_buffer[counter] == L'\n' ) { line_buffer[counter] = 0; break; } } counter = 0; // Should the synth's UI be displayed? if ( line_buffer == L"DisplayUI" ) { int supported = 0; hr = pVoice->IsUISupported(SPDUI_EngineProperties, NULL, NULL, &supported); if( SUCCEEDED( hr ) && supported ) { hr = pVoice->DisplayUI(NULL, NULL, SPDUI_EngineProperties, NULL, NULL); if( ! SUCCEEDED( hr ) ) { pVoice->Speak(L"There was an error displaying the properties window.\n", SPF_IS_NOT_XML|SPF_ASYNC|SPF_PURGEBEFORESPEAK, NULL); } } else { pVoice->Speak(L"Your current voice doesn't support a properties window.\n", SPF_IS_NOT_XML|SPF_ASYNC|SPF_PURGEBEFORESPEAK, NULL); } } // Was the message high-priority? if( line_buffer[0] == L'!' ) { line_buffer[0] = L' '; pVoice->Speak(line_buffer, SPF_IS_NOT_XML|SPF_ASYNC|SPF_PURGEBEFORESPEAK, NULL); } else { pVoice->Speak(line_buffer, SPF_IS_NOT_XML|SPF_ASYNC, NULL); } } // Allow the user to hear the rest of the speech buffer... pVoice->Speak(L" ", SPF_IS_NOT_XML, NULL); pVoice->Release(); pVoice = NULL; } else { printf("ERROR: Couldn't initialise SAPI 5.1.\n"); } ::CoUninitialize(); return TRUE; }
void TextToSpeechPrivate::say(const QString &text) { if (pVoice) { pVoice->Speak((const wchar_t *) text.utf16(), SPF_ASYNC, NULL); } }
void Sound::test() { ISpVoice * pVoice = NULL; ISpObjectToken* pVoiceToken=nullptr; IEnumSpObjectTokens* pEnum; ULONG ulCount = 0; if (FAILED(::CoInitialize(NULL))) { return; } HRESULT hr = S_OK; // Find the best matching installed en-us recognizer. CComPtr<ISpObjectToken> cpRecognizerToken; if (SUCCEEDED(hr)) { hr = SpFindBestToken(SPCAT_RECOGNIZERS, L"language=409", NULL, &cpRecognizerToken); } // Create the in-process recognizer and immediately set its state to inactive. CComPtr<ISpRecognizer> cpRecognizer; if (SUCCEEDED(hr)) { hr = cpRecognizer.CoCreateInstance(CLSID_SpInprocRecognizer); } if (SUCCEEDED(hr)) { hr = cpRecognizer->SetRecognizer(cpRecognizerToken); } if (SUCCEEDED(hr)) { hr = cpRecognizer->SetRecoState(SPRST_INACTIVE); } // Create a new recognition context from the recognizer. CComPtr<ISpRecoContext> cpContext; if (SUCCEEDED(hr)) { hr = cpRecognizer->CreateRecoContext(&cpContext); } // Subscribe to the speech recognition event and end stream event. if (SUCCEEDED(hr)) { ULONGLONG ullEventInterest = SPFEI(SPEI_RECOGNITION); hr = cpContext->SetInterest(ullEventInterest, ullEventInterest); } // Establish a Win32 event to signal when speech events are available. HANDLE hSpeechNotifyEvent = INVALID_HANDLE_VALUE; if (SUCCEEDED(hr)) { hr = cpContext->SetNotifyWin32Event(); } if (SUCCEEDED(hr)) { hSpeechNotifyEvent = cpContext->GetNotifyEventHandle(); if (INVALID_HANDLE_VALUE == hSpeechNotifyEvent) { // Notification handle unsupported. hr = E_NOINTERFACE; } } // Initialize an audio object to use the default audio input of the system and set the recognizer to use it. CComPtr<ISpAudio> cpAudioIn; if (SUCCEEDED(hr)) { hr = cpAudioIn.CoCreateInstance(CLSID_SpMMAudioIn); } if (SUCCEEDED(hr)) { hr = cpRecognizer->SetInput(cpAudioIn, TRUE); } // Populate a WAVEFORMATEX struct with our desired output audio format. information. WAVEFORMATEX* pWfexCoMemRetainedAudioFormat = NULL; GUID guidRetainedAudioFormat = GUID_NULL; if (SUCCEEDED(hr)) { hr = SpConvertStreamFormatEnum(SPSF_16kHz16BitMono, &guidRetainedAudioFormat, &pWfexCoMemRetainedAudioFormat); } // Instruct the recognizer to retain the audio from its recognition results. if (SUCCEEDED(hr)) { hr = cpContext->SetAudioOptions(SPAO_RETAIN_AUDIO, &guidRetainedAudioFormat, pWfexCoMemRetainedAudioFormat); } if (NULL != pWfexCoMemRetainedAudioFormat) { CoTaskMemFree(pWfexCoMemRetainedAudioFormat); } // Create a new grammar and load an SRGS grammar from file. CComPtr<ISpRecoGrammar> cpGrammar; if (SUCCEEDED(hr)) { hr = cpContext->CreateGrammar(0, &cpGrammar); } if (SUCCEEDED(hr)) { hr = cpGrammar->LoadCmdFromFile(L"grammar.grxml", SPLO_STATIC); } // Set all top-level rules in the new grammar to the active state. if (SUCCEEDED(hr)) { hr = cpGrammar->SetRuleState(NULL, NULL, SPRS_ACTIVE); } // Set the recognizer state to active to begin recognition. if (SUCCEEDED(hr)) { hr = cpRecognizer->SetRecoState(SPRST_ACTIVE_ALWAYS); } // Establish a separate Win32 event to signal the event loop exit. HANDLE hExitEvent = CreateEventW(NULL, FALSE, FALSE, NULL); // Collect the events listened for to pump the speech event loop. HANDLE rghEvents[] = { hSpeechNotifyEvent, hExitEvent }; // Speech recognition event loop. BOOL fContinue = TRUE; while (fContinue && SUCCEEDED(hr)) { // Wait for either a speech event or an exit event, with a 15 second timeout. DWORD dwMessage = WaitForMultipleObjects(sp_countof(rghEvents), rghEvents, FALSE, 15000); switch (dwMessage) { // With the WaitForMultipleObjects call above, WAIT_OBJECT_0 is a speech event from hSpeechNotifyEvent. case WAIT_OBJECT_0: { // Sequentially grab the available speech events from the speech event queue. CSpEvent spevent; while (S_OK == spevent.GetFrom(cpContext)) { switch (spevent.eEventId) { case SPEI_RECOGNITION: { // Retrieve the recognition result and output the text of that result. ISpRecoResult* pResult = spevent.RecoResult(); LPWSTR pszCoMemResultText = NULL; hr = pResult->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &pszCoMemResultText, NULL); if (SUCCEEDED(hr)) { wprintf(L"Recognition event received, text=\"%s\"\r\n", pszCoMemResultText); } // Also retrieve the retained audio we requested. CComPtr<ISpStreamFormat> cpRetainedAudio; if (SUCCEEDED(hr)) { hr = pResult->GetAudio(0, 0, &cpRetainedAudio); } // To demonstrate, we'll speak the retained audio back using ISpVoice. CComPtr<ISpVoice> cpVoice; if (SUCCEEDED(hr)) { hr = cpVoice.CoCreateInstance(CLSID_SpVoice); } if (SUCCEEDED(hr)) { hr = cpVoice->SpeakStream(cpRetainedAudio, SPF_DEFAULT, 0); } if (NULL != pszCoMemResultText) { CoTaskMemFree(pszCoMemResultText); } break; } } } break; } case WAIT_OBJECT_0 + 1: case WAIT_TIMEOUT: { // Exit event or timeout; discontinue the speech loop. fContinue = FALSE; //break; } } } CoUninitialize(); CComPtr <ISpVoice> cpVoice; CComPtr <ISpStream> cpStream; CSpStreamFormat cAudioFmt; //Create a SAPI Voice hr = cpVoice.CoCreateInstance(CLSID_SpVoice); //Set the audio format if (SUCCEEDED(hr)) { hr = cAudioFmt.AssignFormat(SPSF_22kHz16BitMono); } //Call SPBindToFile, a SAPI helper method, to bind the audio stream to the file if (SUCCEEDED(hr)) { hr = SPBindToFile(L"c:\\ttstemp.wav", SPFM_CREATE_ALWAYS, &cpStream, &cAudioFmt.FormatId(), cAudioFmt.WaveFormatExPtr()); } //set the output to cpStream so that the output audio data will be stored in cpStream if (SUCCEEDED(hr)) { hr = cpVoice->SetOutput(cpStream, TRUE); } //Speak the text "hello world" synchronously if (SUCCEEDED(hr)) { hr = cpVoice->Speak(L"Hello World", SPF_DEFAULT, NULL); } //close the stream if (SUCCEEDED(hr)) { hr = cpStream->Close(); } //Release the stream and voice object cpStream.Release(); cpVoice.Release(); CComPtr<ISpGrammarBuilder> cpGrammarBuilder; SPSTATEHANDLE hStateTravel; // Create (if rule does not already exist) // top-level Rule, defaulting to Active. hr = cpGrammarBuilder->GetRule(L"Travel", 0, SPRAF_TopLevel | SPRAF_Active, TRUE, &hStateTravel); // Approach 1: List all possible phrases. // This is the most intuitive approach, and it does not sacrifice efficiency // because the grammar builder will merge shared sub-phrases when possible. // There is only one root state, hStateTravel, and the terminal NULL state, // and there are six unique transitions between root state and NULL state. /* XML Approximation: <rule id="Travel"> <item> fly to Seattle </item> <item> fly to New York </item> <item> fly to Washington DC </item> <item> drive to Seattle </item> <item> drive to New York </item> <item> drive to Washington DC </item> </rule> */ // Create set of peer phrases, each containing complete phrase. // Note: the word delimiter is set as " ", so that the text we // attach to the transition can be multiple words (for example, // "fly to Seattle" is implicitly "fly" + "to" + "Seattle"): if (SUCCEEDED(hr)) { hr = cpGrammarBuilder->AddWordTransition(hStateTravel, NULL, L"fly to Seattle", L" ", SPWT_LEXICAL, 1, NULL); } if (SUCCEEDED(hr)) { hr = cpGrammarBuilder->AddWordTransition(hStateTravel, NULL, L"fly to New York", L" ", SPWT_LEXICAL, 1, NULL); } if (SUCCEEDED(hr)) { hr = cpGrammarBuilder->AddWordTransition(hStateTravel, NULL, L"fly to Washington DC", L" ", SPWT_LEXICAL, 1, NULL); } if (SUCCEEDED(hr)) { hr = cpGrammarBuilder->AddWordTransition(hStateTravel, NULL, L"drive to Seattle", L" ", SPWT_LEXICAL, 1, NULL); } if (SUCCEEDED(hr)) { hr = cpGrammarBuilder->AddWordTransition(hStateTravel, NULL, L"drive to New York", L" ", SPWT_LEXICAL, 1, NULL); } if (SUCCEEDED(hr)) { hr = cpGrammarBuilder->AddWordTransition(hStateTravel, NULL, L"drive to Washington DC", L" ", SPWT_LEXICAL, 1, NULL); } // Find the best matching installed en-US recognizer. //CComPtr<ISpObjectToken> cpRecognizerToken; if (SUCCEEDED(hr)) { hr = SpFindBestToken(SPCAT_RECOGNIZERS, L"language=409", NULL, &cpRecognizerToken); } // Create the in-process recognizer and immediately set its state to inactive. //CComPtr<ISpRecognizer> cpRecognizer; if (SUCCEEDED(hr)) { hr = cpRecognizer.CoCreateInstance(CLSID_SpInprocRecognizer); } if (SUCCEEDED(hr)) { hr = cpRecognizer->SetRecognizer(cpRecognizerToken); } if (SUCCEEDED(hr)) { hr = cpRecognizer->SetRecoState(SPRST_INACTIVE); } // Create a new recognition context from the recognizer. //CComPtr<ISpRecoContext> cpContext; if (SUCCEEDED(hr)) { hr = cpRecognizer->CreateRecoContext(&cpContext); } // Subscribe to the speech recognition event and end stream event. if (SUCCEEDED(hr)) { ULONGLONG ullEventInterest = SPFEI(SPEI_RECOGNITION) | SPFEI(SPEI_END_SR_STREAM); hr = cpContext->SetInterest(ullEventInterest, ullEventInterest); } // Establish a Win32 event to signal when speech events are available. //HANDLE hSpeechNotifyEvent = INVALID_HANDLE_VALUE; if (SUCCEEDED(hr)) { hr = cpContext->SetNotifyWin32Event(); } if (SUCCEEDED(hr)) { hr = cpContext->SetNotifyWin32Event(); } if (SUCCEEDED(hr)) { hSpeechNotifyEvent = cpContext->GetNotifyEventHandle(); if (INVALID_HANDLE_VALUE == hSpeechNotifyEvent) { // Notification handle unsupported //hr = SPERR_UNITIALIZED; } } // Set up an audio input stream using a .wav file and set the recognizer's input. CComPtr<ISpStream> cpInputStream; if (SUCCEEDED(hr)) { hr = SPBindToFile(L"Test.wav", SPFM_OPEN_READONLY, &cpInputStream); } if (SUCCEEDED(hr)) { hr = cpRecognizer->SetInput(cpInputStream, TRUE); } // Create a new grammar and load an SRGS grammar from file. //CComPtr<ISpRecoGrammar> cpGrammar; if (SUCCEEDED(hr)) { hr = cpContext->CreateGrammar(0, &cpGrammar); } if (SUCCEEDED(hr)) { hr = cpGrammar->LoadCmdFromFile(L"grammar.grxml", SPLO_STATIC); } // Set all top-level rules in the new grammar to the active state. if (SUCCEEDED(hr)) { hr = cpGrammar->SetRuleState(NULL, NULL, SPRS_ACTIVE); } // Finally, set the recognizer state to active to begin recognition. if (SUCCEEDED(hr)) { hr = cpRecognizer->SetRecoState(SPRST_ACTIVE_ALWAYS); } hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED(hr)) { hr = SpEnumTokens(SPCAT_VOICES, L"Gender=Female", NULL, &pEnum); if (SUCCEEDED(hr)) { // Get the number of voices. hr = pEnum->GetCount(&ulCount); } // Obtain a list of available voice tokens, set // the voice to the token, and call Speak. while (SUCCEEDED(hr) && ulCount--) { if (pVoiceToken != nullptr) { pVoiceToken->Release(); } if (SUCCEEDED(hr)) { hr = pEnum->Next(1, &pVoiceToken, NULL); } if (SUCCEEDED(hr)) { hr = pVoice->SetVoice(pVoiceToken); } if (SUCCEEDED(hr)) { wchar_t* start = L"<?xml version=\"1.0\" encoding=\"ISO - 8859 - 1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang = \"en-US\">"; wchar_t* end = L"</speak>"; const wchar_t *xml = L"<voice required = \"Gender=Male\"> hi! <prosody pitch=\"fast\"> This is low pitch. </prosody><prosody volume=\"x - loud\"> This is extra loud volume. </prosody>"; wstring s = start; s += xml; s += end; hr = pVoice->Speak(xml, SPF_IS_XML| SPF_ASYNC, 0); //hr = pVoice->Speak(L"How are you?", SPF_DEFAULT, NULL); } } /* if (SUCCEEDED(hr)) { hr = pEnum->Next(1, &pVoiceToken, NULL); if (SUCCEEDED(hr)) { hr = pVoice->SetVoice(pVoiceToken); // Set the output to the default audio device. if (SUCCEEDED(hr)) { hr = pVoice->SetOutput(NULL, TRUE); if (SUCCEEDED(hr)) { hr = pVoice->Speak(L"Hello, world!", SPF_DEFAULT, 0); } } } } */ pVoice->Release(); } ::CoUninitialize(); }
HRESULT Speak(LPCWSTR pwcs) { UnPause(); return VoiceObj->Speak(pwcs, Flags, 0); }
HRESULT Speak() //开始朗读 { UnPause(); return VoiceObj->Speak(Text.c_str(), Flags, 0); }
int main(int argc, char* argv[]) { Arguments arguments(argc, argv); //Initialize Pocketsphinx mic_data_t mic; continuous_init(arguments, mic); //Initialize Voice ISpVoice * pVoice = NULL; if (FAILED(::CoInitialize(NULL))) return FALSE; HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&pVoice); if (SUCCEEDED(hr)) std::cout << "Speech Initialized" << std::endl; //Initialize Rebecca AimlFacade aiml; GraphBuilder &builder = aiml.getGraphBuilder(); myCallBacks callback; builder.setCallBacks(&callback); rebecca_init(arguments, builder); //Main code try { /* * Send a initial conversation of "connect" to * annotated alice and get the response. */ StringPimpl response = builder.getResponse("connect"); /* * Get the botName which should be Rebecca since that is * the name give in the configuration file properties.xml * which we parsed above. */ string botName = builder.getBotPredicate("name").c_str(); //Send the initial opening line of the bot cout << botName << " says: " << response.c_str() << endl; hr = pVoice->Speak(s2ws(response.c_str()).c_str(), 0, NULL); pVoice->WaitUntilDone(15000); /* * The main loop to get the input * from the user until the user types '/exit' */ while (true) { //getUtterance(mic); //string input = string(mic.hyp); string input; getline(cin, input); if (input == "/exit" || input == "GOOD NIGHT") { /* * The user wants to exit so break * out of the while(true) loop */ continuous_exit(mic); pVoice->Release(); pVoice = NULL; ::CoUninitialize(); break; } else //The user gave an input to the bot { //Here we get some internal Rebecca information. cout << endl << "Internal information:" << endl << "=====================" << endl << input << " : " << builder.getThat().c_str() << " : " << builder.getTopic().c_str() << endl; /* * Ahhh finally. We give the user input to Rebecca Aiml's loaded * AIML and get the response back. */ StringPimpl response = builder.getResponse(input.c_str()); cout << "=====================" << endl << endl; //Print out what Rebecca says. cout << botName << " says: " << response.c_str() << endl; hr = pVoice->Speak(s2ws(response.c_str()).c_str(), 0, NULL); pVoice->WaitUntilDone(15000); } } } catch (Exception &e) { cout << "[An unknown exception occured, Terminating program]" << endl; cout << "[" << e.what() << "]"; return 1; } //Everything smooth. Exit normally. return 0; }