HRESULT SREngine::InitializeSapi(WId hWnd, UINT Msg)
{
	HRESULT hr = S_OK;

	//FOR ONE NOT FOR ALL
	/* 独享模式的配置 */
	hr = m_cpRecognizer.CoCreateInstance(CLSID_SpInprocRecognizer);  //独享模式
	if (FAILED(hr))
	{
		QMessageBox::information(NULL, "Error", "Create recognizer error", MB_OK);
		return hr;
	}

	hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &m_cpAudio); //建立默认的音频输入对象
	if (FAILED(hr))
	{
		QMessageBox::information(NULL, "Error", "Create default audio object error", MB_OK);
		return hr;
	}

	hr = m_cpRecognizer->SetInput(m_cpAudio, TRUE);  //设置识别引擎输入源
	if (FAILED(hr))
	{
		QMessageBox::information(NULL, "Error", "Error setINPUT", MB_OK);
		return hr;
	}

	hr = m_cpRecognizer->CreateRecoContext(&m_cpRecoContext);   //创建识别上下文接口
	if (FAILED(hr))
	{
		QMessageBox::information(NULL, "Error", "Error CreateRecoContext", MB_OK);
		return hr;
	}

	hr = m_cpRecoContext->SetNotifyWindowMessage((HWND)hWnd, Msg, 0, 0);  //设置识别消息
	if (FAILED(hr))
	{
		QMessageBox::information(NULL, "Error", "Error SetNotifyWindowMessage", MB_OK);
		return hr;
	}

	const ULONGLONG ullInterest = SPFEI(SPEI_SOUND_START) | SPFEI(SPEI_SOUND_END) |
		SPFEI(SPEI_PHRASE_START) | SPFEI(SPEI_RECOGNITION) |
		SPFEI(SPEI_FALSE_RECOGNITION) | SPFEI(SPEI_HYPOTHESIS) |
		SPFEI(SPEI_INTERFERENCE) | SPFEI(SPEI_RECO_OTHER_CONTEXT) |
		SPFEI(SPEI_REQUEST_UI) | SPFEI(SPEI_RECO_STATE_CHANGE) |
		SPFEI(SPEI_PROPERTY_NUM_CHANGE) | SPFEI(SPEI_PROPERTY_STRING_CHANGE);
	hr = m_cpRecoContext->SetInterest(ullInterest, ullInterest);   //设置感兴趣的事件
	if (FAILED(hr))
	{
		QMessageBox::information(NULL, "Error", "Error set interest", MB_OK);
	}

	return hr;
}
Beispiel #2
0
// CMyDlg 메시지 처리기입니다.
BOOL CMyDlg::OnInitSpeech()
{
	HRESULT hr = S_OK;

	hr = cpRecoEngine.CoCreateInstance(CLSID_SpInprocRecognizer);

	if (SUCCEEDED(hr))
	{
		hr = cpRecoEngine->CreateRecoContext(&m_cpRecoCtxt);
	}

	if (SUCCEEDED(hr))
	{
		hr = m_cpRecoCtxt->SetNotifyWindowMessage(m_hWnd, WM_RECOEVENT, 0, 0);
	}

	if (SUCCEEDED(hr))
	{
		const ULONGLONG ullInterest = SPFEI(SPEI_RECOGNITION);
		hr = m_cpRecoCtxt->SetInterest(ullInterest, ullInterest);
	}

	CComPtr<ISpAudio> cpAudio;
	hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);

	hr = cpRecoEngine->SetInput(cpAudio, TRUE);
	hr = cpRecoEngine->SetRecoState(SPRST_ACTIVE);

	if (SUCCEEDED(hr))
		hr = m_cpRecoCtxt->CreateGrammar(GID_DICTATION, &m_cpDictationGrammar);

	if (SUCCEEDED(hr))
		hr = m_cpDictationGrammar->LoadDictation(NULL, SPLO_STATIC);

	if (SUCCEEDED(hr))
		hr = m_cpDictationGrammar->SetDictationState(SPRS_ACTIVE);

	if (FAILED(hr))
		m_cpDictationGrammar.Release();

	return (hr == S_OK);
}
BOOL CTTSApp::OnInitDialog( HWND hWnd )
/////////////////////////////////////////////////////////////////
{
    HRESULT                         hr          = S_OK;
        
    // Store this as the "Main Dialog"
    m_hWnd  = hWnd;

    // Add some default text to the main edit control
    SetDlgItemText( hWnd, IDE_EDITBOX, _T("Enter text you wish spoken here.") );

    // Set the event mask in the rich edit control so that it notifies us when text is
    // changed in the control
    SendMessage( GetDlgItem( hWnd, IDE_EDITBOX ), EM_SETEVENTMASK, 0, ENM_CHANGE );

    // Initialize the Output Format combo box
    int i;
    for( i=0; i<NUM_OUTPUTFORMATS; i++ )
    {
        SendDlgItemMessage( hWnd, IDC_COMBO_OUTPUT, CB_ADDSTRING, 0,
                    (LPARAM)g_aszOutputFormat[i] );

        SendDlgItemMessage( hWnd, IDC_COMBO_OUTPUT, CB_SETITEMDATA, i, 
                    (LPARAM)g_aOutputFormat[i] );
    }

    if ( !m_cpVoice )
    {
        hr = E_FAIL;
    }

    // Set the default output format as the current selection.
    if( SUCCEEDED( hr ) )
    {
        CComPtr<ISpStreamFormat> cpStream;
        HRESULT hrOutputStream = m_cpVoice->GetOutputStream(&cpStream);

        if (hrOutputStream == S_OK)
        {
            CSpStreamFormat Fmt;
            hr = Fmt.AssignFormat(cpStream);
            if (SUCCEEDED(hr))
            {
                SPSTREAMFORMAT eFmt = Fmt.ComputeFormatEnum();
                for( i=0; i<NUM_OUTPUTFORMATS; i++ )
                {
                    if( g_aOutputFormat[i] == eFmt )
                    {
                        m_DefaultFormatIndex = i;
                        SendDlgItemMessage( hWnd, IDC_COMBO_OUTPUT, CB_SETCURSEL, m_DefaultFormatIndex, 0 );
                    }
                }
            }
        }
        else
        {
            SendDlgItemMessage( hWnd, IDC_COMBO_OUTPUT, CB_SETCURSEL, 0, 0 );
        }
    }

    // Use the SAPI5 helper function in sphelper.h to initialize the Voice combo box.
    if ( SUCCEEDED( hr ) )
    {
        hr = SpInitTokenComboBox( GetDlgItem( hWnd, IDC_COMBO_VOICES ), SPCAT_VOICES );
    }
    
    if ( SUCCEEDED( hr ) )
    {
        SpCreateDefaultObjectFromCategoryId( SPCAT_AUDIOOUT, &m_cpOutAudio );
    }

    // Set default voice data 
    VoiceChange();

    // Set Range for Skip Edit Box...
    SendDlgItemMessage( hWnd, IDC_SKIP_SPIN, UDM_SETRANGE, TRUE, MAKELONG( 50, -50 ) );

    // Set the notification message for the voice
    if ( SUCCEEDED( hr ) )
    {
        m_cpVoice->SetNotifyWindowMessage( hWnd, WM_TTSAPPCUSTOMEVENT, 0, 0 );
    }

    // We're interested in all TTS events
    if( SUCCEEDED( hr ) )
    {
        hr = m_cpVoice->SetInterest( SPFEI_ALL_TTS_EVENTS, SPFEI_ALL_TTS_EVENTS );
    }

    // Get default rate and volume
    if( SUCCEEDED( hr ) )
    {
        hr = m_cpVoice->GetRate( &m_DefaultRate );
        // initialize sliders and edit boxes with default rate
        if ( SUCCEEDED( hr ) )
        {
            SendDlgItemMessage( hWnd, IDC_RATE_SLIDER, TBM_SETRANGE, TRUE, MAKELONG( SPMIN_RATE, SPMAX_RATE ) );
            SendDlgItemMessage( hWnd, IDC_RATE_SLIDER, TBM_SETPOS, TRUE, m_DefaultRate );
            SendDlgItemMessage( hWnd, IDC_RATE_SLIDER, TBM_SETPAGESIZE, TRUE, 5 );
        }
    }

    if( SUCCEEDED( hr ) )
    {
        hr = m_cpVoice->GetVolume( &m_DefaultVolume );
        // initialize sliders and edit boxes with default volume
        if ( SUCCEEDED( hr ) )
        {
            SendDlgItemMessage( hWnd, IDC_VOLUME_SLIDER, TBM_SETRANGE, TRUE, MAKELONG( SPMIN_VOLUME, SPMAX_VOLUME ) );
            SendDlgItemMessage( hWnd, IDC_VOLUME_SLIDER, TBM_SETPOS, TRUE, m_DefaultVolume );
            SendDlgItemMessage( hWnd, IDC_VOLUME_SLIDER, TBM_SETPAGESIZE, TRUE, 10 );
        }
    }

    // If any SAPI initialization failed, shut down!
    if( FAILED( hr ) )
    {
        MessageBox( NULL, _T("Error initializing speech objects. Shutting down."), _T("Error"), MB_OK );
        SendMessage( hWnd, WM_CLOSE, 0, 0 );
        return(FALSE);
        
    }
    else
    {
        //
        // Create the child windows to which we'll blit our result
        //
        HWND hCharWnd = GetDlgItem(hWnd, IDC_CHARACTER);
        RECT rc;

        GetClientRect(hCharWnd, &rc);
        rc.left = (rc.right - CHARACTER_WIDTH) / 2;
        rc.top = (rc.bottom - CHARACTER_HEIGHT) / 2;
        m_hChildWnd = CreateWindow( CHILD_CLASS, NULL, 
                            WS_CHILDWINDOW | WS_VISIBLE,
                            rc.left, rc.top,
                            rc.left + CHARACTER_WIDTH, rc.top + CHARACTER_HEIGHT,
                            hCharWnd, NULL, m_hInst, NULL );

        if ( !m_hChildWnd )
        {
            MessageBox( hWnd, _T("Error initializing speech objects. Shutting down."), _T("Error"), MB_OK );
            SendMessage( hWnd, WM_CLOSE, 0, 0 );
            return(FALSE);
            
        }
        else
        {
            // Load Mouth Bitmaps and use and ImageList since we'll blit the mouth
            // and eye positions over top of the full image
            g_hListBmp = InitImageList();
        }
    }
    return(TRUE);
    
}
bool VOICEREC_init(HWND hWnd, int event_id, int grammar_id, int command_resource)
{
	HRESULT hr = S_OK;

	while (true)
	{
		// create a recognition engine
		hr = p_recogEngine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR, "Failed to create a recognition engine\n","Error");
			printf("Failed to create a recognition engine\n");
			break;
		}

		// create the command recognition context
		hr = p_recogEngine->CreateRecoContext( &p_recogContext );
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to create the command recognition context\n","Error");
			printf("Failed to create the command recognition context\n");
			break;
		}

		// Let SR know that window we want it to send event information to, and using
		// what message
		hr = p_recogContext->SetNotifyWindowMessage( hWnd, event_id, 0, 0 );
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to SetNotifyWindowMessage\n","Error");
			break;
		}

		// Tell SR what types of events interest us.  Here we only care about command
		// recognition.
		hr = p_recogContext->SetInterest( SPFEI(SPEI_RECOGNITION), SPFEI(SPEI_RECOGNITION) );
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to set events\n","Error");
			break;
		}

		// Create a grammar
		hr = p_recogContext->CreateGrammar(grammar_id, &p_grammarObject);
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to create grammar\n","Error");
			break;
		}

		// Load our grammar from data\phrases.xml, or if that doesn't exist, from the compiled in 
		// user defined ("SRGRAMMAR") resource type.
		hr = p_grammarObject->LoadCmdFromFile(L"data\\phrases.xml", SPLO_STATIC);
		if (FAILED(hr))
		{
			hr = p_grammarObject->LoadCmdFromResource(NULL, MAKEINTRESOURCEW(command_resource),
												L"SRGRAMMAR", MAKELANGID(LANG_NEUTRAL, SUBLANG_NEUTRAL),
												SPLO_STATIC);
			if (FAILED(hr))
			{
				os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to load resource SRGRAMMAR\n","Error");
				break;
			}
		}

		hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to get default audio input\n", "Error");
			break;
		}

		// Set the audio input to our token.
		hr = p_recogEngine->SetInput(cpAudio, TRUE);
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to set audio input\n", "Error");
		}

		// Set rules to active, we are now listening for commands
		hr = p_grammarObject->SetRuleState(NULL, NULL, SPRS_ACTIVE );
		if (FAILED(hr))
		{
			os::dialogs::Message(os::dialogs::MESSAGEBOX_ERROR,"Failed to set listening for commands\n","Error");
			break;
		}

		break;
	}

	// if we failed and have a partially setup SAPI, close it all down
	if (FAILED(hr))
	{
		VOICEREC_deinit();
	}

	os::events::addEventListener(SDL_SYSWMEVENT, os::events::DEFAULT_LISTENER_WEIGHT, system_event_handler);

	return ( hr == S_OK);
}
Beispiel #5
0
//Speech Initialization is done here
HRESULT CASRwrapper::InitSpeech(std::wstring sPathToFile, IStream * pMemStream)
{
	HRESULT hr = S_OK;

	hr = cpRecoEngine.CoCreateInstance(CLSID_SpInprocRecognizer);

	if (SUCCEEDED(hr))
	{
		hr = cpRecoEngine->CreateRecoContext(&m_cpRecoCtxt);
	}

	if (SUCCEEDED(hr))
	{
		WPARAM wparam = NULL;
		LPARAM lparam = NULL;
		hr = m_cpRecoCtxt->SetNotifyWin32Event();
		//hr = m_cpRecoCtxt->SetNotifyCallbackFunction(SpRecCallback,wparam,lparam);
		//	hr = m_cpRecoCtxt->SetNotifyWindowMessage(m_hWnd, WM_RECOEVENT, 0, 0);
	}

	if (SUCCEEDED(hr))
	{
		// This specifies which of the recognition events are going 
		//to trigger notifications. Here, all we are interested in 
		//is the beginning and ends of sounds, as well as
		// when the engine has recognized something
		//using ISpRecoContext
		const ULONGLONG ullInterest = SPFEI(SPEI_RECOGNITION);
		hr = m_cpRecoCtxt->SetInterest(ullInterest, ullInterest);
	}

	if (SUCCEEDED(hr))
	{
		// Specifies that the grammar we want is a dictation grammar.
		// Initializes the grammar (m_cpDictationGrammar)
		// using ISpRecoContext
		hr = m_cpRecoCtxt->CreateGrammar(GID_DICTATION, &m_cpDictationGrammar);
	}

	if (SUCCEEDED(hr))
	{
		//Load the dictation tool for the grammar specified
		//using ISpRecoGrammar
		hr = m_cpDictationGrammar->LoadDictation(NULL, SPLO_STATIC);
	}

	if (!sPathToFile.empty() || pMemStream != NULL)
	{
		CComPtr<ISpStream> cpInputStream;
		if (SUCCEEDED(hr))
		{
			// Create basic SAPI stream object
			// NOTE: The helper SpBindToFile can be used to perform the following operations
			hr = cpInputStream.CoCreateInstance(CLSID_SpStream);
		}

		CSpStreamFormat sInputFormat;
		// generate WaveFormatEx structure, assuming the wav format is 44kHz, 16-bit, Mono
		if (SUCCEEDED(hr))
		{
			hr = sInputFormat.AssignFormat(SPSF_44kHz16BitMono);
		}

		if (pMemStream != NULL)
		{
			if (SUCCEEDED(hr))
			{
				hr = cpInputStream->SetBaseStream(pMemStream, SPDFID_WaveFormatEx, sInputFormat.WaveFormatExPtr());
			}
		}
		else
		{
			if (SUCCEEDED(hr))
			{
				//   for read-only access, since it will only be access by the SR engine
				hr = cpInputStream->BindToFile(sPathToFile.c_str(),
					SPFM_OPEN_READONLY,
					&(sInputFormat.FormatId()),
					sInputFormat.WaveFormatExPtr(),
					SPFEI_ALL_EVENTS);
			}
		}

		if (SUCCEEDED(hr))
		{
			// connect wav input to recognizer
			// SAPI will negotiate mismatched engine/input audio formats using system audio codecs, so second parameter is not important - use default of TRUE
			hr = cpRecoEngine->SetInput(cpInputStream, TRUE);
		}

	}
	else //connect to mic
	{
		// create default audio object
		CComPtr<ISpAudio> cpAudio;
		if (SUCCEEDED(hr))
		{
			hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);
		}

		// set the input for the engine
		if (SUCCEEDED(hr))
		{
			hr = cpRecoEngine->SetInput(cpAudio, TRUE);
		}

		if (SUCCEEDED(hr))
		{
			hr = cpRecoEngine->SetRecoState(SPRST_ACTIVE);
		}
	}


	if (FAILED(hr))
	{
		//Release the grammar using ISpRecoGrammar
		m_cpDictationGrammar.Release();
	}

	return hr;
}
/*********************************************************************************************
* CSimpleDict::InitDialog()
*   Creates the recognition context and activates the grammar.  
*   Returns TRUE iff successful.
**********************************************************************************************/
bool CSimpleDict::InitDialog( HWND hDlg )
{
    m_hDlg = hDlg;
    
    HRESULT hr = S_OK;
    CComPtr<ISpRecognizer> cpRecoEngine;
    hr = cpRecoEngine.CoCreateInstance(CLSID_SpInprocRecognizer);

    if( SUCCEEDED( hr ) )
    {
        hr = cpRecoEngine->CreateRecoContext( &m_cpRecoCtxt );
    }


    // Set recognition notification for dictation
    if (SUCCEEDED(hr))
    {
        hr = m_cpRecoCtxt->SetNotifyWindowMessage( hDlg, WM_RECOEVENT, 0, 0 );
    }
    
    
    if (SUCCEEDED(hr))
    {
        // This specifies which of the recognition events are going to trigger notifications.
        // Here, all we are interested in is the beginning and ends of sounds, as well as
        // when the engine has recognized something
        const ULONGLONG ullInterest = SPFEI(SPEI_RECOGNITION);
        hr = m_cpRecoCtxt->SetInterest(ullInterest, ullInterest);
    }

    // create default audio object
    CComPtr<ISpAudio> cpAudio;
    hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);

    // set the input for the engine
    hr = cpRecoEngine->SetInput(cpAudio, TRUE);
    hr = cpRecoEngine->SetRecoState( SPRST_ACTIVE );




    
    if (SUCCEEDED(hr))
    {
        // Specifies that the grammar we want is a dictation grammar.
        // Initializes the grammar (m_cpDictationGrammar)
        hr = m_cpRecoCtxt->CreateGrammar( GID_DICTATION, &m_cpDictationGrammar );
    }
    if  (SUCCEEDED(hr))
    {
        hr = m_cpDictationGrammar->LoadDictation(NULL, SPLO_STATIC);
    }
    if (SUCCEEDED(hr))
    {
        hr = m_cpDictationGrammar->SetDictationState( SPRS_ACTIVE );
    }
    if (FAILED(hr))
    {
        m_cpDictationGrammar.Release();
    }

    return (hr == S_OK);
}
Beispiel #7
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Module implementation
bool SpeechRecognizerModule::configure(ResourceFinder &rf )
{
    setName( rf.check("name",Value("speechRecognizer")).asString().c_str() );
    m_timeout = rf.check("timeout",Value(10000)).asInt();
    USE_LEGACY = !rf.check("noLegacy");
    m_forwardSound = rf.check("forwardSound");
    m_tmpFileFolder = rf.getHomeContextPath().c_str();
    interruptRecognition = false;

    //Deal with speech recognition
    string grammarFile = rf.check("grammarFile",Value("defaultGrammar.grxml")).asString().c_str();
	grammarFile = rf.findFile(grammarFile).c_str();

	std::wstring tmp = s2ws(grammarFile);
    LPCWSTR cwgrammarfile = tmp.c_str();

    m_useTalkBack = rf.check("talkback");

    //Initialise the speech crap
    bool everythingIsFine = true;
    HRESULT hr;
    everythingIsFine = SUCCEEDED( m_cpRecoEngine.CoCreateInstance(CLSID_SpInprocRecognizer));
    everythingIsFine &= SUCCEEDED( SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &m_cpAudio));
    everythingIsFine &= SUCCEEDED( m_cpRecoEngine->CreateRecoContext( &m_cpRecoCtxt ));

    // Here, all we are interested in is the beginning and ends of sounds, as well as
    // when the engine has recognized something
    const ULONGLONG ullInterest = SPFEI(SPEI_RECOGNITION);
    everythingIsFine &= SUCCEEDED(m_cpRecoCtxt->SetInterest(ullInterest, ullInterest));

    // set the input for the engine
    everythingIsFine &= SUCCEEDED( m_cpRecoEngine->SetInput(m_cpAudio, TRUE));
    everythingIsFine &= SUCCEEDED( m_cpRecoEngine->SetRecoState( SPRST_ACTIVE ));

    //Load grammar from file
    everythingIsFine &= SUCCEEDED( m_cpRecoCtxt->CreateGrammar( 1, &m_cpGrammarFromFile ));
    everythingIsFine &= SUCCEEDED( m_cpGrammarFromFile->SetGrammarState(SPGS_DISABLED));
    everythingIsFine &= SUCCEEDED( m_cpGrammarFromFile->LoadCmdFromFile(cwgrammarfile, SPLO_DYNAMIC));
//  everythingIsFine &= loadGrammarFromRf(rf);

    //Create a runtime grammar
    everythingIsFine &= SUCCEEDED( m_cpRecoCtxt->CreateGrammar( 2, &m_cpGrammarRuntime ));
    everythingIsFine &= SUCCEEDED( m_cpGrammarRuntime->SetGrammarState(SPGS_DISABLED));

    //Create a dictation grammar
    everythingIsFine &= SUCCEEDED(m_cpRecoCtxt->CreateGrammar( GID_DICTATION, &m_cpGrammarDictation ));
    everythingIsFine &= SUCCEEDED(m_cpGrammarDictation->LoadDictation(NULL, SPLO_STATIC));
    everythingIsFine &= SUCCEEDED(m_cpGrammarDictation->SetDictationState(SPRS_INACTIVE));
    
    //Setup thing for the raw audio processing
    everythingIsFine &= SUCCEEDED(m_cAudioFmt.AssignFormat(SPSF_22kHz16BitMono));
    hr = m_cpRecoCtxt->SetAudioOptions(SPAO_RETAIN_AUDIO, &m_cAudioFmt.FormatId(), m_cAudioFmt.WaveFormatExPtr());
    //everythingIsFine &= SUCCEEDED(hr = SPBindToFile((const WCHAR *)"C:\\temp.wav", SPFM_CREATE_ALWAYS, &m_streamFormat, &m_cAudioFmt.FormatId(), m_cAudioFmt.WaveFormatExPtr()));

    //CComPtr <ISpStream>     cpStream = NULL;
    //CSpStreamFormat         cAudioFmt;
    //hr = cAudioFmt.AssignFormat(SPSF_22kHz16BitMono);
    //hr = SPBindToFile((const WCHAR *)"c:\\ttstemp.wav", SPFM_CREATE_ALWAYS, &cpStream,  &cAudioFmt.FormatId(), cAudioFmt.WaveFormatExPtr());

    if( everythingIsFine )
    {
        string pName = "/";
        pName += getName().c_str();
        pName += "/recog/continuous:o";
        m_portContinuousRecognition.open( pName.c_str() );

        pName = "/";
		pName += getName().c_str();
        pName += "/recog/continuousGrammar:o";
        m_portContinuousRecognitionGrammar.open( pName.c_str() );

        pName = "/";
		pName += getName().c_str();
        pName += "/recog/sound:o";   
        m_portSound.open(pName.c_str());

        //iSpeak
        pName = "/";
		pName += getName().c_str();
        pName += "/tts/iSpeak:o"; 
        m_port2iSpeak.open( pName.c_str() );
                
        pName = "/";
		pName += getName().c_str();
        pName += "/tts/iSpeak/rpc"; 
        m_port2iSpeakRpc.open( pName.c_str() );
        if (Network::connect(m_port2iSpeak.getName().c_str(),"/iSpeak")&&Network::connect(m_port2iSpeakRpc.getName().c_str(),"/iSpeak/rpc"))
            yInfo() <<"Connection to iSpeak succesfull" ;
        else
            yWarning() <<"Unable to connect to iSpeak. Connect manually." ;

        pName = "/";
		pName += getName().c_str();
        pName += "/rpc";
        m_portRPC.open( pName.c_str() );
        attach(m_portRPC);

        //Start recognition
        //everythingIsFine &= SUCCEEDED(m_cpRecoEngine->SetRecoState(SPRST_ACTIVE_ALWAYS));
        everythingIsFine &= SUCCEEDED(m_cpGrammarFromFile->SetRuleState(NULL, NULL, SPRS_ACTIVE));
        everythingIsFine &= SUCCEEDED( m_cpGrammarFromFile->SetGrammarState(SPGS_ENABLED));
    }

    return (everythingIsFine);
}
//音声認識のためのオブジェクトの構築.
void RSpeechRecognition::Create(const string & inToWave,const string & inGrammarXML) throw(RComException)
{
	USES_CONVERSION;

	HRESULT hr;

	// 認識エンジンオブジェクトの作成
	//	CLSID_SpSharedRecognizer		共有オブジェクト
	//	CLSID_SpInprocRecognizer		アプリ内動作
	
	if ( inToWave.empty() )
	{
//		hr = this->Engine.CoCreateInstance(CLSID_SpSharedRecognizer);
//		if(FAILED(hr))	throw RComException(hr , "CLSID_SpSharedRecognizer 構築 に失敗");
		hr = this->Engine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if(FAILED(hr))	throw RComException(hr , "CLSID_SpInprocRecognizer 構築 に失敗");


		CComPtr<ISpAudio> cpAudio;
		hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);
		if(FAILED(hr))	throw RComException(hr , "SpCreateDefaultObjectFromCategoryId に失敗");

		//認識エンジンのエンジンのディフォルトに設定する。
		hr = this->Engine->SetInput(cpAudio, TRUE);
		if(FAILED(hr))	throw RComException(hr , "SetInput に失敗");

//		hr = this->Engine->SetRecoState( SPRST_ACTIVE );
//		if(FAILED(hr))	throw RComException(hr , "SetRecoState に失敗");
	}
	else
	{
		CComPtr<ISpStream> cpStream;

		hr = this->Engine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if(FAILED(hr))	throw RComException(hr , "CLSID_SpInprocRecognizer 構築 に失敗");

		hr = cpStream.CoCreateInstance(CLSID_SpStream);
		if(FAILED(hr))	throw RComException(hr , "CoCreateInstance  CLSID_SpStream に失敗");

		hr = cpStream->BindToFile( A2W( inToWave.c_str() ) , SPFM_OPEN_READONLY , NULL , NULL,  SPFEI_ALL_EVENTS);  
	    if(FAILED(hr))	throw RComException(hr , "BindToFile に失敗");

		hr = this->Engine->SetInput( cpStream, TRUE);  
	    if(FAILED(hr))	throw RComException( this->Engine , CLSID_SpSharedRecognizer , hr , "SetInput に失敗");
	}

	// 認識コンテクストオブジェクトの作成
	hr = this->Engine->CreateRecoContext(&this->RecoCtxt);
	if(FAILED(hr))	throw RComException(hr , "CreateRecoContext に失敗");

	hr = this->RecoCtxt->SetNotifyWin32Event();
	if ( FAILED(hr) )	throw RComException(hr , "SetNotifyWin32Event に失敗");

	hr = this->RecoCtxt->SetInterest(SPFEI(SPEI_RECOGNITION), SPFEI(SPEI_RECOGNITION));
	if ( FAILED(hr) )	throw RComException(hr , "SetInterest に失敗");

	hr = this->RecoCtxt->SetAudioOptions(SPAO_RETAIN_AUDIO, NULL, NULL);
	if ( FAILED(hr) )	throw RComException(hr , "SetAudioOptions に失敗");


	//メインとなる文法の作成
	hr = this->RecoCtxt->CreateGrammar(0, &this->DictationGrammar);
	if ( FAILED(hr) )	throw RComException(hr , "CreateGrammar に失敗");

	hr = this->DictationGrammar->LoadDictation(NULL, SPLO_STATIC);
	if ( FAILED(hr) )	throw RComException(hr , "LoadDictation に失敗");

	if ( inGrammarXML.empty() )
	{
		//録音開始
		hr = this->DictationGrammar->SetDictationState( SPRS_ACTIVE );
		if ( FAILED(hr) )	throw RComException(hr , "SetDictationState に失敗");
	}
	else
	{
		//ユーザ指定ファイルからのロード
		hr = this->DictationGrammar->LoadCmdFromFile( A2W( inGrammarXML.c_str() ) ,SPLO_STATIC);
		if ( FAILED(hr) )	throw RComException(hr , "LoadCmdFromFile に失敗");

		//録音開始
		hr = this->DictationGrammar->SetRuleState(NULL, NULL, SPRS_ACTIVE );
		if ( FAILED(hr) )	throw RComException(hr , "SetRuleState に失敗");
	}
}
//音声認識のためのオブジェクトの構築.
void RSpeechRecognition::Create(const std::string & inDicticationFilterWord , const std::string & inGrammarXML , HWND inWindow , UINT inCallbackMesage )
{
	USES_CONVERSION;

	HRESULT hr;

	this->DicticationFilterWord = inDicticationFilterWord;
	this->CallbackWindowHandle = inWindow;
	this->CallbackWindowMesage = inCallbackMesage;

	//Dictation
	{
		CComPtr<ISpAudio> cpAudio;
		hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->DictationEngine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->DictationEngine->CreateRecoContext(&this->DictationRecoCtxt);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->DictationRecoCtxt->SetInterest(SPFEI(SPEI_RECOGNITION), SPFEI(SPEI_RECOGNITION));
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->DictationRecoCtxt->SetAudioOptions(SPAO_RETAIN_AUDIO, NULL, NULL);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		//認識器始動
		hr = this->DictationRecoCtxt->CreateGrammar(0, &this->DictationGrammar);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->DictationGrammar->LoadDictation(NULL, SPLO_STATIC);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->DictationRecoCtxt->SetNotifyWin32Event();
		if(FAILED(hr))	 AfxThrowOleException(hr);
	}
	//ルールベースのエンジンを作る.
	{
		CComPtr<ISpAudio> cpAudio;
		hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleEngine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		//オーディオから読み込んでね
		hr = this->RuleEngine->SetInput( cpAudio, TRUE);  
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleEngine->CreateRecoContext(&this->RuleRecoCtxt);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleRecoCtxt->SetInterest(SPFEI(SPEI_RECOGNITION), SPFEI(SPEI_RECOGNITION));
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleRecoCtxt->SetAudioOptions(SPAO_RETAIN_AUDIO, NULL, NULL);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		//認識器始動
		hr = this->RuleRecoCtxt->CreateGrammar(0, &this->RuleGrammar);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleGrammar->LoadDictation(NULL, SPLO_STATIC);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleGrammar->LoadCmdFromFile( A2W( inGrammarXML.c_str() ) ,SPLO_STATIC);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		hr = this->RuleRecoCtxt->SetNotifyCallbackFunction(__callbackRule , (WPARAM)this , 0);
		if(FAILED(hr))	 AfxThrowOleException(hr);

		//録音開始
		hr = this->RuleGrammar->SetRuleState(NULL, NULL, SPRS_ACTIVE );
		if(FAILED(hr))	 AfxThrowOleException(hr);
	}
	this->FlagCleanup();
}
//音声認識のためのオブジェクトの構築.
xreturn::r<bool> Recognition_SAPI::Create(MainWindow* poolMainWindow)
{
	_USE_WINDOWS_ENCODING;

	HRESULT hr;
	this->GlobalRuleNodeCount = 1;
	this->TemporaryRuleConfidenceFilter = 0.80;
	this->YobikakeRuleConfidenceFilter = 0.80;
	this->BasicRuleConfidenceFilter = 0.80;
	this->UseDictationFilter = true;
	this->TemporaryRuleCount = 0;
	this->IsNeedUpdateRule = true;
	this->PoolMainWindow = poolMainWindow;

	//Dictation
	{
		hr = this->DictationEngine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		hr = this->DictationEngine->CreateRecoContext(&this->DictationRecoCtxt);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		ULONGLONG hookevent = SPFEI(SPEI_RECOGNITION)|SPFEI(SPEI_FALSE_RECOGNITION);
		hr = this->DictationRecoCtxt->SetInterest(hookevent, hookevent);
		if(FAILED(hr))	 return xreturn::windowsError(hr);


		hr = this->DictationRecoCtxt->SetAudioOptions(SPAO_RETAIN_AUDIO, NULL, NULL);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		//認識器始動
		hr = this->DictationRecoCtxt->CreateGrammar(0, &this->DictationGrammar);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

//		hr = this->DictationGrammar->LoadDictation(NULL, SPLO_STATIC);
//		if(FAILED(hr))	 return xreturn::windowsError(hr);

		hr = this->DictationRecoCtxt->SetNotifyWin32Event();
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		hr = this->DictationGrammar->GetRule(_A2W("FilterRule") ,0,SRATopLevel | SRADynamic | SPRAF_Active, TRUE ,  &this->FilterRuleHandleHandle); 
		if(FAILED(hr))	 return xreturn::windowsError(hr);

//		hr = this->DictationGrammar->GetRule(_A2W("FilterRule2") ,0,SRATopLevel | SRADynamic | SPRAF_Active, TRUE ,  &this->FilterRuleHandleHandle2); 
//		if(FAILED(hr))	 return xreturn::windowsError(hr);


//		hr = this->DictationGrammar->Commit(0);
//		if(FAILED(hr))	 return xreturn::windowsError(hr);
	}
	//ルールベースのエンジンを作る.
	{
		CComPtr<ISpAudio> cpAudio;
		hr = SpCreateDefaultObjectFromCategoryId(SPCAT_AUDIOIN, &cpAudio);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		hr = this->RuleEngine.CoCreateInstance(CLSID_SpInprocRecognizer);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		//オーディオから読み込んでね
		hr = this->RuleEngine->SetInput( cpAudio, TRUE);  
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		hr = this->RuleEngine->CreateRecoContext(&this->RuleRecoCtxt);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

//		ULONGLONG hookevent = SPFEI(SPEI_RECOGNITION)|SPFEI(SPEI_FALSE_RECOGNITION)|SPFEI(SPEI_HYPOTHESIS)| SPFEI(SPEI_SOUND_START) | SPFEI(SPEI_SOUND_END);
		ULONGLONG hookevent = SPFEI(SPEI_RECOGNITION);
		hr = this->RuleRecoCtxt->SetInterest(hookevent, hookevent);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		hr = this->RuleRecoCtxt->SetAudioOptions(SPAO_RETAIN_AUDIO, NULL, NULL);
		if(FAILED(hr))	 return xreturn::windowsError(hr);

		//認識器始動
		hr = this->RuleRecoCtxt->CreateGrammar(0, &this->RuleGrammar);
		if(FAILED(hr))	 return xreturn::windowsError(hr);
	}
	//トップレベルルールたちを作成する。
//	hr = this->RuleGrammar->GetRule(_A2W("BasicRule") ,0,SRATopLevel | SRADynamic | SPRAF_Active, TRUE ,  &this->BasicRuleHandleHandle); 
//	if(FAILED(hr))	 return xreturn::windowsError(hr);

	hr = this->RuleGrammar->GetRule(_A2W("TemporaryRule") ,0,SRATopLevel | SRADynamic |  SPRAF_Active , TRUE ,  &this->TemporaryRuleHandle); 
	if(FAILED(hr))	 return xreturn::windowsError(hr);

	hr = this->RuleGrammar->GetRule(_A2W("YobikakeRule") ,0,SRATopLevel | SRADynamic |  SPRAF_Active , TRUE ,  &this->YobikakeRuleHandle); 
	if(FAILED(hr))	 return xreturn::windowsError(hr);

	hr = this->RuleGrammar->GetRule(_A2W("CommandRule") ,0,SRADynamic , TRUE ,  &this->CommandRuleHandle); 
	if(FAILED(hr))	 return xreturn::windowsError(hr);
/*
	hr = this->RuleGrammar->GetRule(_A2W("MusicRule") ,0,SRADynamic , TRUE ,  &this->MusicRuleHandle); 
	if(FAILED(hr))	 return xreturn::windowsError(hr);

	hr = this->RuleGrammar->GetRule(_A2W("VideoRule") ,0,SRADynamic , TRUE ,  &this->VideoRuleHandle); 
	if(FAILED(hr))	 return xreturn::windowsError(hr);

	hr = this->RuleGrammar->GetRule(_A2W("BookRule") ,0,SRADynamic , TRUE ,  &this->BookRuleHandle); 
	if(FAILED(hr))	 return xreturn::windowsError(hr);
*/
	//http://msdn.microsoft.com/en-us/library/ee125671(v=vs.85).aspx
	//BasicRule = YobikakeRule + CommandRule
//	SPSTATEHANDLE appendState; //あとに続くを表現するためには、 新規にステートを作らないといけない。
//	hr = this->RuleGrammar->Z(this->BasicRuleHandleHandle, &appendState);
//	if(FAILED(hr))	 return xreturn::windowsError(hr);
//	hr = this->RuleGrammar->AddRuleTransition(this->BasicRuleHandleHandle , appendState , this->YobikakeRuleHandle , 1.0f , NULL );
//	if(FAILED(hr))	 return xreturn::windowsError(hr);
//	hr = this->RuleGrammar->AddRuleTransition(appendState , NULL , this->CommandRuleHandle , 1.0f , NULL );
//	if(FAILED(hr))	 return xreturn::windowsError(hr);

	//////
//	SPSTATEHANDLE appendState; //あとに続くを表現するためには、 新規にステートを作らないといけない。
//	hr = this->RuleGrammar->CreateNewState(this->YobikakeRuleHandle, &appendState);
//	if(FAILED(hr))	 return xreturn::windowsError(hr);
//	hr = this->RuleGrammar->AddRuleTransition(this->YobikakeRuleHandle , appendState , this->CommandRuleHandle , 1.0f , NULL );
//	if(FAILED(hr))	 return xreturn::windowsError(hr);
//	hr = this->RuleGrammar->AddRuleTransition(appendState , NULL , this->CommandRuleHandle , 1.0f , NULL );
//	if(FAILED(hr))	 return xreturn::windowsError(hr);


	//戻りはコールバックで。
	//SAPIのコールバックはメインスレッドにコールバックされる。
	hr = this->RuleRecoCtxt->SetNotifyCallbackFunction
		( methodcallback::registstdcall<struct _sapi_callback1,SPNOTIFYCALLBACK*>(this, &Recognition_SAPI::Callback)  ,0,0);
	if(FAILED(hr))	 return xreturn::windowsError(hr);

	//ルールの適応
//	this->CommitRule();

	return true;
}