Beispiel #1
0
void run(void) {
	while ((status.pos_h + LEN_WINDOW) <= get_positionDMA()) {
		//check fill rest
		if((status.pos_h >= SIZE_BUF - LEN_WINDOW) && !status.isRestFill) {
			if(!fillRest())
				break;
		}	
		//recognition
		if (recognition(&buf[0].buf[2 * status.pos_h], &buf[1].buf[2 * status.pos_h], LEN_WINDOW))
			status.pos_h += LEN_WINDOW;
		else
			status.pos_h += STEP_WINDOW;
		//check of end buffer
		if(status.pos_h > SIZE_BUF) {
			status.pos_h -= SIZE_BUF;
			status.isRestFill = 0;
			status.side_h = !status.side_h;
			status.firstCount += SIZE_BUF;
			//check on synchronization
			if(status.side_h == sync.side) {
				status.firstCount = SIZE_BUF - sync.correctFirstCount;
				sync.side = (char)-1;
			}
		}
	}
	return;
}
void SpeechRecognitionClientProxy::didReceiveError(
    const WebSpeechRecognitionHandle& handle,
    const WebString& message,
    WebSpeechRecognizerClient::ErrorCode code) {
  SpeechRecognition* recognition(handle);
  SpeechRecognitionError::ErrorCode errorCode =
      static_cast<SpeechRecognitionError::ErrorCode>(code);
  recognition->didReceiveError(
      SpeechRecognitionError::create(errorCode, message));
}
Beispiel #3
0
int main(int argc, char **argv)
{
    try {
        clock_t begin_time, summary_time = clock();

        begin_time = clock();
        Photo sudoku("9.jpg");
        std::cout << "Open image: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;

        // 1) Resize
        begin_time = clock();
        sudoku.resize(512, 512);
        std::cout << "Resizing: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;

        // 2) Binarization
        begin_time = clock();
        Filter::AdaptiveBinarization(sudoku, 4, 0.04);
        std::cout << "Adaptive Binarization: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;

        GridRecognition recognition(sudoku);

        // 3) Rotate
        begin_time = clock();
        double angle = recognition.getHorizontalAngle();
        std::cout << "getHorizontalAngle: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;
        sudoku.rotate(angle);
        std::cout << "getHorizontalAngle + Rotation: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;

        // 4) Crop
        begin_time = clock();
        Rectangle grid = recognition.getGridCoords();
        std::cout << "getGridCoords: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;
        sudoku.crop(grid.x, grid.y, grid.width, grid.height);
        std::cout << "getGridCoords + crop: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;

        // 5) Split digits
        begin_time = clock();
        vector< Point > points = recognition.getLinesCrossPoints();
        std::cout << "getLinesCrossPoints: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;
        recognition.splitOnDigits(points, "digits");
        std::cout << "getLinesCrossPoints + splitOnDigits: " << double(clock() - begin_time) / CLOCKS_PER_SEC << " sec." << std::endl;

        sudoku.save("test.jpg");

        std::cout << "Summary: " << double(clock() - summary_time) / CLOCKS_PER_SEC << " sec." << std::endl;
    } catch(Magick::Exception &error_) { 
        std::cout << "Caught exception: " << error_.what() << std::endl; 

        system("pause");
        return 1; 
    }

    system("pause");
    return 0;
}
void SpeechRecognitionClientProxy::didReceiveResults(const WebSpeechRecognitionHandle& handle, const WebVector<WebSpeechRecognitionResult>& newFinalResults, const WebVector<WebSpeechRecognitionResult>& currentInterimResults)
{
    SpeechRecognition* recognition(handle);

    HeapVector<Member<SpeechRecognitionResult> > finalResultsVector(newFinalResults.size());
    for (size_t i = 0; i < newFinalResults.size(); ++i)
        finalResultsVector[i] = static_cast<SpeechRecognitionResult*>(newFinalResults[i]);

    HeapVector<Member<SpeechRecognitionResult> > interimResultsVector(currentInterimResults.size());
    for (size_t i = 0; i < currentInterimResults.size(); ++i)
        interimResultsVector[i] = static_cast<SpeechRecognitionResult*>(currentInterimResults[i]);

    recognition->didReceiveResults(finalResultsVector, interimResultsVector);
}
Beispiel #5
0
	bool execute(FST fst)
	{
		bool rc = false;
		//fst.rstates = NULL;
		//fst.rstates = new short[fst.nstates];
		short *a = new short[fst.nstates];
		fst.rstates = a;

		short *b = new short[fst.nstates];

		startarr(fst.rstates, fst.nstates);
		startarr(b, fst.nstates);
		recognition(fst, b);
		if (fst.rstates[fst.nstates - 1] == strlen(fst.string))
		{
			rc = true;
		}
		//delete[] fst.rstates;
		delete[] a;
		delete[] b;
		fst.rstates = NULL;
		return rc;
	}
void SpeechRecognitionClientProxy::didEndSound(const WebSpeechRecognitionHandle& handle)
{
    SpeechRecognition* recognition(handle);
    recognition->didEndSpeech();
    recognition->didEndSound();
}
void SpeechRecognitionClientProxy::didStartAudio(const WebSpeechRecognitionHandle& handle)
{
    SpeechRecognition* recognition(handle);
    recognition->didStartAudio();
}
void SpeechRecognitionClientProxy::didReceiveNoMatch(const WebSpeechRecognitionHandle& handle, const WebSpeechRecognitionResult& result)
{
    SpeechRecognition* recognition(handle);
    recognition->didReceiveNoMatch(result);
}