void TesseractProcessor::ClearAdaptiveClassifier()
{
	if (_apiInstance != NULL)
	{
		TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
		api->ClearAdaptiveClassifier();
	}
}
void TesseractProcessor::End()
{
	if (_apiInstance != NULL)
	{
		TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
		api->End();
	}
}
void TesseractProcessor::DisableThresholder()
{
	if (_apiInstance == NULL)
		return;

	TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
	
	api->SetThresholder(NULL);
}
void TesseractProcessor::UseThresholder()
{
	if (_apiInstance == NULL)
		return;

	TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
	
	ImageThresholder* thresholder = new ImageThresholder();
	api->SetThresholder(thresholder);
}
bool TesseractProcessor::SetVariable(System::String* name, System::String* value)
{
	if (_apiInstance == NULL)
		return false;

	TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
	bool succeed = api->SetVariable(Helper::StringToPointer(name), Helper::StringToPointer(value));

	return succeed;
}
Esempio n. 6
0
/*
=======================================
    获取 TESS-OCR 结果文本
=======================================
*/
CR_API ansi_t*
tessocr_get_utf8 (
  __CR_IN__ tessocr_t   tessocr
    )
{
    TessBaseAPI*    tess;

    tess = (TessBaseAPI*)tessocr;
    return (tess->GetUTF8Text());
}
String* TesseractProcessor::GetTesseractEngineVersion()
{
	if (_apiInstance != NULL)
	{
		TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
		return Helper::PointerToString(api->Version());
	}

	return NULL;
}
Esempio n. 8
0
/*
=======================================
    设置 TESS-OCR 图片分辨率
=======================================
*/
CR_API void_t
tessocr_set_ppi (
  __CR_IN__ tessocr_t   tessocr,
  __CR_IN__ uint_t      ppi
    )
{
    TessBaseAPI*    tess;

    tess = (TessBaseAPI*)tessocr;
    tess->SetSourceResolution(ppi);
}
Esempio n. 9
0
/*
=======================================
    设置 TESS-OCR 图片识别区域
=======================================
*/
CR_API void_t
tessocr_set_rect (
  __CR_IN__ tessocr_t       tessocr,
  __CR_IN__ const sRECT*    rect
    )
{
    TessBaseAPI*    tess;

    tess = (TessBaseAPI*)tessocr;
    tess->SetRectangle(rect->x1, rect->y1, rect->ww, rect->hh);
}
System::String* TesseractProcessor::GetStringVariable(System::String* name)
{
	if (_apiInstance == NULL)
		return false;

	TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();

	const char *value = api->GetStringVariable(Helper::StringToPointer(name));

	return Helper::PointerToString(value);
}
bool TesseractProcessor::GetDoubleVariable(System::String* name, double __gc* value)
{
	if (_apiInstance == NULL)
		return false;

	TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();

	double val = 0;
	bool succeed = api->GetDoubleVariable(Helper::StringToPointer(name), &val);
	*value = val;

	return succeed;
}
string ocr_tesseract(Mat im) {
	// Pass it to Tesseract API
    TessBaseAPI tess;
    tess.Init(NULL, "eng", OEM_DEFAULT);
    tess.SetPageSegMode(PSM_SINGLE_BLOCK);
    tess.SetImage((uchar*)im.data, im.cols, im.rows, 1, im.cols);

    // Get the text
    char* out = tess.GetUTF8Text();
    string ret = (string)out;
    
    return ret;
}
void TesseractProcessor::InternalFinally()
{
	if (_apiInstance != NULL)
	{
		TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();

		api->End();

		delete api;
		api = null;

		_apiInstance = NULL;
	}
}
Esempio n. 14
0
/*
=======================================
    设置 TESS-OCR 参数
=======================================
*/
CR_API bool_t
tessocr_set_param (
  __CR_IN__ tessocr_t       tessocr,
  __CR_IN__ const ansi_t*   name,
  __CR_IN__ const ansi_t*   value
    )
{
    TessBaseAPI*    tess;

    tess = (TessBaseAPI*)tessocr;
    if (!tess->SetVariable(name, value))
        return (FALSE);
    return (TRUE);
}
Esempio n. 15
0
File: Ocr.cpp Progetto: sddari/Flow
	DLL_API char *ocr(char *argv)
	{
		TessBaseAPI myOCR;
		string str(argv);		
		string delimiter = " ";

		size_t pos = 0;
		string token;
		vector <string> vecArgv;
		while ((pos = str.find(delimiter)) != std::string::npos) 
		{
			token = str.substr(0, pos);
			vecArgv.push_back(token);			
			str.erase(0, pos + delimiter.length());
		}

		vecArgv.push_back(str);
		if (vecArgv.size() != 2)
		{		
			return "오류 : 인자값 : 언어(eng or kor) 파일명";
		}

		if (myOCR.Init(NULL, vecArgv[0].c_str()))
		{				
			return "오류 : Init";
		}

		static string strMsg;
		STRING text_out;
		if (!myOCR.ProcessPages(vecArgv[1].c_str(), NULL, 0, &text_out))
		{			
			return "오류 : ProcessPages";
		}		

		strMsg = text_out.string();		

		int nLen = MultiByteToWideChar(CP_UTF8, 0, strMsg.c_str(), strMsg.size(), NULL, NULL);
		wstring strUni(nLen,0);
		MultiByteToWideChar(CP_UTF8, 0, strMsg.c_str(), strMsg.size(), &strUni[0], nLen);		
		
		strMsg.clear();
		nLen = WideCharToMultiByte(CP_ACP, 0, &strUni[0], -1, NULL, 0, NULL, NULL);			
		strMsg.resize(nLen,0);
		WideCharToMultiByte(CP_ACP, 0, &strUni[0], -1, &strMsg[0], nLen, NULL, NULL);			

		myOCR.End();
		return (char *)strMsg.c_str();
	}
Esempio n. 16
0
/*
=======================================
    建立 TESS-OCR 对象
=======================================
*/
CR_API tessocr_t
tessocr_init (
  __CR_IN__ const ansi_t*   path,
  __CR_IN__ const ansi_t*   lang
    )
{
    TessBaseAPI*    tess;

    tess = new TessBaseAPI ();
    if (tess == NULL)
        return (NULL);
    if (tess->Init(path, lang) != 0) {
        delete tess;
        return (NULL);
    }
    return ((tessocr_t)tess);
}
Esempio n. 17
0
/*
=======================================
    设置 TESS-OCR 待识别图片
=======================================
*/
CR_API bool_t
tessocr_set_image (
  __CR_IN__ tessocr_t       tessocr,
  __CR_IN__ const sIMAGE*   image
    )
{
    TessBaseAPI*    tess;

    /* 只支持 8/24/32 位色图片 */
    if (image->fmt != CR_INDEX8 &&
        image->fmt != CR_ARGB888 &&
        image->fmt != CR_ARGB8888)
        return (FALSE);
    tess = (TessBaseAPI*)tessocr;
    tess->SetImage(image->data, image->position.ww,
                   image->position.hh, image->bpc, image->bpl);
    return (TRUE);
}
bool TesseractProcessor::Init(String* dataPath, String* lang, int ocrEngineMode)
{
	bool bSucced = false;

	_dataPath = dataPath;
	_lang = lang;
	_ocrEngineMode = ocrEngineMode;

	if (_apiInstance != NULL)
	{
		TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();
		bSucced = api->Init(
			Helper::StringToPointer(dataPath), 
			Helper::StringToPointer(lang),
			TesseractConverter::ParseOcrEngineMode(ocrEngineMode)) >= 0;
	}

	return bSucced;
}
Esempio n. 19
0
//----------------------------------------------------------------------------------
void OCRit(Mat inputimg, vector<vector<Point>>points, char data[][60], Mat &outputimg, char lang[]){
	TessBaseAPI tess;
	
	tess.Init(NULL, lang, OEM_DEFAULT);
	cout << "\n\n";
	for (unsigned int c = 0; c < points.size(); c++){
		Mat imag = inputimg(Rect(points[c][0], points[c][1]));
		Mat img;
		cvtColor(imag, img, COLOR_BGR2GRAY);//use img to do ocr now

		tess.SetImage((uchar*)img.data, img.size().width, img.size().height, img.channels(), img.step1());
		char* out = tess.GetUTF8Text();
		cout << c + 1 << " -- " << out << "\n";
		strcpy(data[c],out);
		//removed to prevent alteration of original pan
		//rectangle(outputimg, points[c][0], points[c][1],1,4,0);
	}



}
BEGIN_NAMSPACE

USING_COMMON_SYSTEM

USING_TESSERACT

USING_TESSERACT_ENGINE_WRAPPER


// ===============================================================
// GET/SET VARIABLES
bool TesseractProcessor::GetBoolVariable(System::String* name, bool __gc* value)
{
	if (_apiInstance == NULL)
		return false;

	TessBaseAPI* api = (TessBaseAPI*)_apiInstance.ToPointer();

	bool val = false;
	bool succeed = api->GetBoolVariable(Helper::StringToPointer(name), &val);
	*value = val;

	return succeed;
}
int main()
{



	Mat findImage = imread("sudo.jpg", IMREAD_GRAYSCALE); //이 이미지

	int tempsudo[81] = { 0 };
	int sudoku[9][9] = { 0 };




	Mat cloneImage = findImage.clone();
	threshold(findImage, findImage, 200, 255, THRESH_BINARY);
	//adaptiveThreshold(findImage, findImage, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 21, 2);
	imshow("findImage", findImage);
	waitKey();
	Mat srcImage2 = findImage.clone();

	Mat dstImage(srcImage2.size(), CV_8UC3);

	vector<vector<Point> > beforeContoursforRectangle;//스도쿠 이미지만 빼기위한 경계선
	vector<vector<Point> > AfterContoursforSmallRectangle; //스도쿠에서 작은 네모만 빼기위한경계선
	vector<vector<Point> > configurBackground;

	vector<Vec4i> hierarchy; //외곽선용
	vector<Vec4i> hierarchy2; //81개칸용
	vector<Vec4i> hierarchy3; //배경 검사용


	vector<vector<Point>> cutSize;//외곽선용
	vector<vector<Point>> cutSize2;//81개칸용
	vector<vector<Point>> cutSize3; //배경 검사용



	int mode = RETR_TREE;
	int method = CHAIN_APPROX_NONE;
	Rect r;
	findContours(findImage, beforeContoursforRectangle, hierarchy, mode, method); //윤곽석 검출


	cout << "beforeContoursforRectangle.size()=" << beforeContoursforRectangle.size() << endl; //윤곽선네모 개수

	cout << findImage.size() << endl; //이미지 크기

	int countMax = beforeContoursforRectangle[0].size(); //최대크기를 찾기위한 변수선언

	for (int k = 0; k < beforeContoursforRectangle.size(); k++)
	{
		if (beforeContoursforRectangle[k].size()>countMax)
			countMax = beforeContoursforRectangle[k].size();
	}

	for (int k = 0; k < beforeContoursforRectangle.size(); k++)
	{
		if (beforeContoursforRectangle[k].size()>(countMax*0.6))//스토쿠 판만 빼오기 
		{
			cutSize.push_back(beforeContoursforRectangle[k]);
		}
	}
	/*
	//테스트용
	Mat testtImage;
	for (int k = 0; k < cutSize.size(); k++)
	{
	//
	cvtColor(findImage, testtImage, COLOR_GRAY2BGR);
	r = boundingRect(cutSize[k]);//영역 사각형으로 감쌈
	Rect ROI2(r.x, r.y, r.width, r.height);
	drawContours(testtImage, cutSize, k, Scalar(255, 0, 0), 4); //경계선 그림
	cout << r.size() << endl;

	rectangle(testtImage, ROI2, Scalar(128, 255, 255), 2);//네모로 크림




	circle(testtImage, cutSize[k][0], 5, Scalar(123, 123, 123), -1);
	imshow("testtImage", testtImage);
	waitKey();
	}
	*/



	vector<vector<Point>> cuttingArea;
	int pushArea = -1;
	int smaller = cutSize[0].size();

	for (int k = 1; k < cutSize.size(); k++)
	{
		if (cutSize[k].size() < smaller)
		{
			smaller = cutSize[k].size(); //제일 작은거 가져옴 sort(cutsize.begin(),cutsize.end())가안되서 이방법 씀
			pushArea = k;
		}
	}
	cuttingArea.push_back(cutSize[pushArea]);


	r = boundingRect(cuttingArea[0]);
	Rect ROI(r.x, r.y, r.width, r.height); //자를곳을 골라서 영역 표시 

	cout << "bigSize.size()=" << cutSize.size() << endl;




	int eraseSpot = -1;
	int again = 0;



	int mode2 = RETR_LIST;
	int mode3 = RETR_LIST;
	int method2 = CHAIN_APPROX_NONE;
	int method3 = CHAIN_APPROX_NONE;

	Mat roi = cloneImage(ROI); //해당 영역만 가지고옴
	Mat newImage = repeat(roi, 1, 1);
	Mat contourImage = newImage.clone(); //배경에 음영있는것들 검출
	for (int a = 0; a < contourImage.rows; a++)
	{
		for (int b = 0; b < contourImage.cols; b++)
		{
			float te = contourImage.at<uchar>(a, b);
			if (te >= 160 && te < 190)
				contourImage.at<uchar>(a, b) = 250;
		}
	}

	threshold(contourImage, contourImage, 230, 255, THRESH_BINARY);


	imshow("contourImage", contourImage);
	waitKey();
	vector<int> configureRectSizeStorage; //빈칸과 숫자칸 저장
	vector<int> configureRect; //빈칸숫자칸의 최대 최소값을 알기위해 만듬
	vector<Rect> countRect;
	findContours(contourImage, configurBackground, hierarchy3, mode3, method3);

	for (int k = 0; k < configurBackground.size(); k++)
	{
		cutSize3.push_back(configurBackground[k]); //그 경계선위치들을 넣음
	}

	Mat configureBackgroundImage(contourImage.size(), CV_8UC1);
	for (int k = 0; k < cutSize3.size(); k++)
	{
		//
		cvtColor(contourImage, configureBackgroundImage, COLOR_GRAY2BGR);
		r = boundingRect(cutSize3[k]);//영역 사각형으로 감쌈
		Rect ROI2(r.x, r.y, r.width, r.height);
		drawContours(configureBackgroundImage, cutSize3, k, Scalar(255, 0, 0), 4); //경계선 그림
		//cout << r.size() << endl;

		rectangle(configureBackgroundImage, ROI2, Scalar(128, 255, 255), 2);//네모로 크림

		if ((r.width*r.height)>(configureBackgroundImage.rows*configureBackgroundImage.cols) - 20000)
		{		//최대영역은 받지 않음
			//imshow("resultImage", resultImage);
			//waitKey();
			goto here234;

		}
		configureRectSizeStorage.push_back(r.width*r.height); //sorting하기 위해 정리
	here234:{}

		circle(configureBackgroundImage, cutSize3[k][0], 5, Scalar(123, 123, 123), -1);
		//imshow("resultImage", configureBackgroundImage);
		//waitKey();
	}


	sort(configureRectSizeStorage.begin(), configureRectSizeStorage.end());

	int tSpot = -1;
	int tagain = 0;


	for (int k = 0; k < configureRectSizeStorage.size(); k++)
	{
		if (k == configureRectSizeStorage.size() - 1)
			break;

		if ((configureRectSizeStorage[k + 1] - configureRectSizeStorage[k]) >500)
		{
			tSpot = k; //500이 차이나면 따로 넣을곳 정리 
			break;
		}
	}

	for (int k = tSpot + 1; k < configureRectSizeStorage.size(); k++)
	{
		if (tSpot == -1)
			configureRect.push_back(configureRectSizeStorage[tagain++]); //그냥 집어 넣음
		else
			configureRect.push_back(configureRectSizeStorage[k]); //81개 네모 영역만 집어넣음
	}

	int minRectSize2 = configureRect.front(); //최소 네모 사이즈
	int maxRectSize2 = configureRect[configureRect.size() - 1]; //최대 네모 사이즈
	Mat lastConfigure;
	for (int k = 0; k < cutSize3.size(); k++)
	{

		cvtColor(contourImage, lastConfigure, COLOR_GRAY2BGR);
		//imshow("testImage", testImage);
		r = boundingRect(cutSize3[k]);
		Rect ROI2(r.x, r.y, r.width, r.height);
		//cout << r.size() << endl;

		rectangle(lastConfigure, ROI2, Scalar(128, 255, 255), 2);//그림

		if ((minRectSize2 <= (r.width*r.height)) && ((r.width*r.height) <= maxRectSize2))
			countRect.push_back(ROI2); //해당 영역만 비교 하기 위해 넣어줌
		//imshow("lastConfigure", lastConfigure);
		//waitKey();
	}





	vector<int>params; //압축양식사용

	params.push_back(IMWRITE_JPEG_QUALITY);
	params.push_back(9);

	imwrite("sudoOnly.jpg", newImage, params);
	Mat sudokuImage = imread("sudoOnly.jpg", IMREAD_GRAYSCALE);
	Mat realImage = imread("sudoOnly.jpg", IMREAD_GRAYSCALE);
	Mat testSudo = sudokuImage.clone(); //나중에 확인할거
	Size size(5, 5);
	Mat rectKernel = getStructuringElement(MORPH_RECT, size);
	Mat elipseKernel = getStructuringElement(MORPH_ELLIPSE, size);
	Mat crossKernel = getStructuringElement(MORPH_CROSS, size);


	if (countRect.size() != 81)
	{
		cout << "안되지롱" << endl;
		waitKey();

		goto jumpThreshold;
	}

	threshold(sudokuImage, sudokuImage, 230, 255, THRESH_BINARY); //영상 이진화 
	imshow("teswtsteswtse", sudokuImage);
	waitKey();
	goto jumpAdaptiveThreshold;
	//morphologyEx(sudokuImage, sudokuImage, MORPH_CLOSE, rectKernel, Point(-1, -1), 1);

jumpThreshold:
	//threshold(sudokuImage, sudokuImage, 200, 255, THRESH_BINARY);
	//threshold(sudokuImage, sudokuImage, 200, 255, THRESH_OTSU+THRESH_BINARY); //영상 이진화 

	adaptiveThreshold(sudokuImage, sudokuImage, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 21, 1);
	//morphologyEx(sudokuImage, sudokuImage, MORPH_GRADIENT, crossKernel, Point(-1, -1), 1);
	erode(sudokuImage, sudokuImage, crossKernel, Point(-1, -1), 1);
	dilate(sudokuImage, sudokuImage, elipseKernel, Point(-1, -1), 1);
jumpAdaptiveThreshold:



	imshow("sudokuImage", sudokuImage);
	waitKey();

	Mat testImage = realImage.clone();//스도쿠 된것만 복사시킴
	Mat resultImage(testImage.size(), CV_8UC3);
	Mat testSudoImage(realImage.size(), CV_8UC3);
	vector<int> rectSizeStorage; //빈칸과 숫자칸 저장
	vector<int> realRect; //빈칸숫자칸의 최대 최소값을 알기위해 만듬
	vector<Rect> sudokuRect; //칸마다 잘라진 영역 저장된곳
	vector<Rect> temp;
	threshold(testImage, testImage, 200, 255, THRESH_OTSU + THRESH_BINARY);
	//waitKey();
	findContours(sudokuImage, AfterContoursforSmallRectangle, hierarchy2, mode2, method2);
	//스도쿠 원형만 자른곳에서 다시 경계선 그림
	for (int k = 0; k < AfterContoursforSmallRectangle.size(); k++)
	{
		cutSize2.push_back(AfterContoursforSmallRectangle[k]); //그 경계선위치들을 넣음
	}



	for (int k = 0; k < cutSize2.size(); k++)
	{
		//
		cvtColor(testImage, resultImage, COLOR_GRAY2BGR);
		r = boundingRect(cutSize2[k]);//영역 사각형으로 감쌈
		Rect ROI2(r.x, r.y, r.width, r.height);
		drawContours(resultImage, cutSize2, k, Scalar(255, 0, 0), 4); //경계선 그림
		//cout << r.size() << endl;

		rectangle(resultImage, ROI2, Scalar(128, 255, 255), 2);//네모로 크림

		if ((r.width*r.height)>(resultImage.rows*resultImage.cols) - 20000)
		{		//최대영역은 받지 않음
			//imshow("resultImage", resultImage);
			//waitKey();
			goto here;

		}
		rectSizeStorage.push_back(r.width*r.height); //sorting하기 위해 정리
	here:{}

		circle(resultImage, cutSize2[k][0], 5, Scalar(123, 123, 123), -1);
		//imshow("resultImage", resultImage);
		//waitKey();
	}


	sort(rectSizeStorage.begin(), rectSizeStorage.end());

	eraseSpot = -1;
	again = 0;


	for (int k = 0; k < rectSizeStorage.size(); k++)
	{
		if (k == rectSizeStorage.size() - 1)
			break;

		if ((rectSizeStorage[k + 1] - rectSizeStorage[k]) >500)
		{
			eraseSpot = k; //500이 차이나면 따로 넣을곳 정리 
		}
	}

	for (int k = eraseSpot + 1; k < rectSizeStorage.size(); k++)
	{
		if (eraseSpot == -1)
			realRect.push_back(rectSizeStorage[again++]); //그냥 집어 넣음
		else
			realRect.push_back(rectSizeStorage[k]); //81개 네모 영역만 집어넣음
	}


	//소팅이 이미 되서 들어가므로 할 필요 없음
	int minRectSize = realRect.front(); //최소 네모 사이즈
	int maxRectSize = realRect[realRect.size() - 1]; //최대 네모 사이즈

	for (int k = 0; k < cutSize2.size(); k++)
	{

		cvtColor(testImage, resultImage, COLOR_GRAY2BGR);
		//imshow("testImage", testImage);
		r = boundingRect(cutSize2[k]);
		Rect ROI2(r.x, r.y, r.width, r.height);
		//cout << r.size() << endl;

		rectangle(resultImage, ROI2, Scalar(128, 255, 255), 2);//그림

		if ((minRectSize <= (r.width*r.height)) && ((r.width*r.height) <= maxRectSize))
			temp.push_back(ROI2); //해당 영역만 비교 하기 위해 넣어줌
		//	imshow("resultImage2", resultImage);
		//waitKey();
	}

	for (int k = temp.size() - 1; k >= 0; k--)
		sudokuRect.push_back(temp[k]); //반대로 시작하므로 처음1칸부터 비교하기위해 역으로 넣음

	temp.clear();



	vector<int> testSortingOk;
	//각 줄이 순서대로 소팅이 안되있으므로 각 줄마다 왼쪽부터 오른쪽으로 되게 바꿈
	for (int i = 0; i < 81; i += 9)
	{
		int ty = sudokuRect[i].y;
		int ti = i;
		testSortingOk.clear();
		for (int j = i; j < ti + 9; j++)
		{
			testSortingOk.push_back(sudokuRect[j].x);
		}
		sort(testSortingOk.begin(), testSortingOk.end());
		int k = 0;
		for (int j = i; j < ti + 9; j++)
		{
			if (testSortingOk[k] == sudokuRect[j].x)
				temp.push_back(sudokuRect[j]);
			else
			{
				for (int t = i; t <= ti + 9; t++)
				{
					if (testSortingOk[k] == sudokuRect[t].x)
					{
						temp.push_back(sudokuRect[t]);
						break;
					}
				}
			}

			k++;
		}

	}

	sudokuRect.clear();
	erode(resultImage, resultImage, crossKernel, Point(-1, -1), 1);
	dilate(resultImage, resultImage, elipseKernel, Point(-1, -1), 1);
	for (int k = 0; k < temp.size(); k++)
		sudokuRect.push_back(temp[k]);
	for (int k = 0; k < sudokuRect.size(); k++)
	{
		rectangle(resultImage, sudokuRect[k], Scalar(128, 255, 255), 2);//그림
		imshow("testSudoImage34", resultImage);
		waitKey();
	}

		const char *path = "/tessdate";
	TessBaseAPI tess; //tesseract 선언

		//TessBaseAPI *api = new TessBaseAPI();

	tess.Init(NULL, "eng", tesseract::OEM_DEFAULT); //한국어로 인식 기본 숫자 되어있음
	/*
		if (api->Init(NULL, "eng"))
		{
			fprintf(stderr, "could not initailze teseract\n");
			exit(1);
		}
		*/

	string tessRecogNum[81][1]; //글자로 인식해오므로 넣을곳 만들어줌
	int sudoTestte[81]; //스도쿠 번호 넣을곳을 만들어줌
	int d = 0;

	for (int i = 0; i<sudokuRect.size(); i++)
	{
		cvtColor(testSudo, testSudoImage, COLOR_GRAY2BGR);
		rectangle(testSudoImage, sudokuRect[i], Scalar(128, 255, 255), 2);//그림

		Mat cutImage = testSudo(sudokuRect[i]);
		//	imshow("cutImage", cutImage);
		Mat tempImage = repeat(cutImage, 1, 1);


		Mat numberImage = tempImage.clone();//스도쿠 된것만 복사시킴
		//imshow("numberImage", numberImage);
		imwrite("numberImage.jpg", numberImage, params);
		Mat resultNImage(tempImage.size(), CV_8UC3);
		waitKey();

		int height = tempImage.size().height;
		int width = tempImage.size().width;

		int continueNum = 0; //0이 계속되는 수 선언
		int testPixel = (height*width); //최대 픽셀 받아옴

		//그 칸이 빈칸일 경우 골라서 0을 넣음 
		/*
		for (int a = 0; a < tempImage.rows; a++)
		{
			for (int b = 0; b < tempImage.cols; b++)
			{
				float te = tempImage.at<uchar>(a, b);
				if (continueNum >= testPixel)
					break;
				if (te >= 190 && te <= 200)
					tempImage.at<uchar>(a, b) = 255;

				if (te > 200)
					continueNum++;
			}
			if (continueNum >= (testPixel - 10))
				break;
		}
		*/
		if (continueNum >= testPixel){
			sudoTestte[d++] = atoi("0");
			tessRecogNum[i][0] = '0';
			continue;
		}

		//imshow("tempImage", tempImage);

		//waitKey();

	tess.SetImage((uchar*)tempImage.data, tempImage.size().width, tempImage.size().height, tempImage.channels(), tempImage.step1()); //이미지의 데이터 넓이 높이와 채널을 받아옴

		//Pix *image = pixRead("numberImage.jpg");
		//api->SetImage(image);
//
		tess.Recognize(0); //인식 시킴
		const char * out = tess.GetUTF8Text(); //그걸 utf8text로 인식시킴
		//const char * out = api.GetUTF8Text();
		tessRecogNum[i][0] = out;
		if (atoi(out) > 9 || atoi(out) < 0)
			sudoTestte[d++] = 0;
		else
		sudoTestte[d++] = atoi(out);
		//printf("%s", out);

		//cout << endl;

		imshow("testSudoImage", testSudoImage);
		//waitKey();
	}

	cout << endl;
	for (int i = 0; i < 81; i++)
	{
		if (i % 9 == 0 && i>1)
			cout << endl;
		cout << sudoTestte[i] << " ";

	}


	waitKey();


	return 0;
}
Esempio n. 22
0
int main(int argc, char* argv[]) {
 
    const char* name_main = "Plate Recognition";
    const char* name_edge = "Edge Window";
    #if defined(__DMDEBUG)
    const char* name_mask = "Mask Window";
    #endif
    const char* name_bwframe = "BW Window";
    const char* name_crop = "CROP Window";
    const char* name_ctrl = "Control Window";


    unsigned int i;
    Mat frame;
    Mat bwframe;
    Mat edge;

    TessBaseAPI *OCR = new TessBaseAPI();
    if(OCR->Init(NULL, "eng")) {
      cout << "Could not initialize tesseract OCR\n";
      return -1;
    }

    if( argc != 2) {
      cout << "You must specify a file to load\n";
      return -1;
    }
     
    VideoCapture cap(argv[1]);
    if(!cap.isOpened()) {
      cout << "Cannot open file " << argv[1] << endl;
      return -1;
    }

    namedWindow(name_main, 0);
    namedWindow(name_edge, 0);
    #if defined(__DMDEBUG)
    namedWindow(name_mask, 0);
    #endif
    namedWindow(name_bwframe, 0);
    namedWindow(name_ctrl, 0);
    //#if defined(__DMDEBUG)
    namedWindow(name_crop, 0);

    namedWindow("AAA", 0);

    //#endif
    createTrackbar( "Contour perimeter", name_ctrl, &high_switch_value, 100, switch_callback_h, NULL );
    createTrackbar( "Min area", name_ctrl, &minimum_area, 100000, NULL, NULL);
    createTrackbar( "Max area", name_ctrl, &maximum_area, 100000, NULL, NULL);
    createTrackbar( "Threshold", name_ctrl, &threshold_val, 255, NULL, NULL);
    //createTrackbar( "thr size", name_ctrl, &athr_size, 255, athr_callback, &athr_size);
    createTrackbar( "Plate Thr", name_ctrl, &threshold2_val, 255, NULL, NULL);
    //createTrackbar( "PThr size", name_ctrl, &athr2_size, 255, athr_callback, &athr2_size);

    waitKey();

    for(;;) {

      //try {
         cap >> frame;
      //} catch(int e) {
      //   frame = imread(argv[1], 3);
      //}
      cvtColor(frame, bwframe, CV_RGB2GRAY);

      imshow(name_bwframe, bwframe); 

      // extract V channel from HSV image
      Mat vframe(frame.rows, frame.cols, CV_8UC1);
      Mat tvframe(frame.rows, frame.cols, CV_8UC3);
      cvtColor(frame, tvframe, CV_RGB2HSV);
      int from_to[] = { 2,0 };
      mixChannels( &tvframe, 1, &vframe,1, from_to, 1);

      // maximize contrast
      // https://github.com/oesmith/OpenANPR/blob/master/anpr/preprocess.py
      Mat el=getStructuringElement(MORPH_ELLIPSE, Size(3,3), Point(1,1));
      Mat bh(vframe.rows, vframe.cols, CV_8UC1);
      Mat th(vframe.rows, vframe.cols, CV_8UC1);
      Mat s1(vframe.rows, vframe.cols, CV_8UC1);
      morphologyEx(vframe, th, MORPH_TOPHAT, el, Point(1,1), 1);
      morphologyEx(vframe, bh, MORPH_BLACKHAT, el, Point(1,1), 1);
      add(vframe, th, s1);
      subtract(s1,bh, vframe);

      // Smooth image to remove rumor
      GaussianBlur(vframe, vframe, Size(5,5), 5, 5, BORDER_DEFAULT);

      imshow("AAA", vframe);

      // apply your filter
      Canny(bwframe, edge, threshold_val, 255);
      //Canny(vframe, edge, threshold_val, 255);
      //threshold( bwframe, edge, threshold_val, 255, CV_THRESH_BINARY );
      //threshold( vframe, edge, threshold_val, 255, CV_THRESH_BINARY );
      //adaptiveThreshold(bwframe, edge, threshold_val, 
      //                  CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, athr_size, 5);
                       // OR CV_ADAPTIVE_THRES_MEAN_C
      //adaptiveThreshold(vframe, edge, threshold_val, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, athr_size, 9);
      // find the contours
      vector< vector<Point> > contours;

      findContours(edge, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
      vector<double> areas(contours.size());     
      for( i = 0; i < contours.size(); i++) {
         areas[i] = fabs(contourArea(Mat(contours[i])));
         if(areas[i] >= minimum_area && areas[i] <= maximum_area) {

            vector<Point> results;
            approxPolyDP(Mat(contours[i]), results, arcLength(Mat(contours[i]),1)*perimeter_constant,1);
            if (results.size() == 4 && isContourConvex(results)){
               // you could also reuse bwframe here
               Mat mask = Mat::zeros(bwframe.rows, bwframe.cols, CV_8UC1);
 
               // CV_FILLED fills the connected components found
               drawContours(mask, contours, i, Scalar(255,255,255), CV_FILLED);

               // draw contours to cover plate external lines
               drawContours(bwframe, contours, i, Scalar(255,255,255), 2, 2);

               Rect box = boundingRect(Mat(contours[i]));


               // let's create a new image now
               Mat crop(frame.rows, frame.cols, CV_8UC1);
          
               // set background color to black
               //crop.setTo(Scalar(0,0,0));
               crop.setTo(Scalar(0));
          
               // and copy the magic apple
               bwframe.copyTo(crop, mask);
               

               // normalize so imwrite(...)/imshow(...) shows the mask correctly!
               normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);

               //rectangle(crop, box, Scalar(0,0,255) ,3);
               Mat roi(crop, box);


               // Image for OCR
               Mat ocrimg(roi.rows, roi.cols, CV_8UC1);
               ocrimg.setTo(Scalar(255));
               roi.copyTo(ocrimg, roi);
               int scalefactor = 1000/ocrimg.cols;
               //cout << "Scale factor: " << scalefactor << endl;;
               resize(ocrimg, ocrimg, Size(0,0), scalefactor, scalefactor, CV_INTER_CUBIC);
               threshold(ocrimg, ocrimg, threshold2_val, 255, CV_THRESH_BINARY );
               //adaptiveThreshold(ocrimg, ocrimg, threshold2_val, 
               //                  CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, athr2_size, 5);
               //Canny(ocrimg, ocrimg, threshold2_val, 255);

               //#if defined(__DMDEBUG)
               imshow(name_crop, ocrimg);
               //#endif

               rectangle(frame, box, Scalar(0,0,255), 3);
               rectangle(bwframe, box, Scalar(0,0,255), 3);
               #if defined(__DMDEBUG)
               imshow(name_mask, mask);
               #endif

               //OCR->TesseractRect(roi.data, 1, roi.step1(), 0,0,roi.cols, roi.rows);
               OCR->SetImage((uchar*)ocrimg.data, ocrimg.size().width, ocrimg.size().height, 
                             ocrimg.channels(), ocrimg.step1());
               OCR->Recognize(0);
               char* detected_text = OCR->GetUTF8Text();
               //cout << "Size text: " << strlen(detected_text) << endl;
               if(strlen(detected_text) > 0) {
                  
                  cout << "License plate number: " << detected_text << endl;
                  //#if !defined(__DMDEBUG)
                  //imshow(name_crop, ocrimg);
                  //#endif
                  imshow(name_main, frame);
                  imshow(name_bwframe, bwframe);
                  imshow(name_edge, edge);
                  imshow(name_ctrl, ocrimg);
                  #if defined(__DMDEBUG)
                  imwrite("antani.jpg", ocrimg);
                  #endif
                  waitKey();
                  //#endif
               }
            }
          } 
       }
       imshow(name_main, frame);
       imshow(name_bwframe, bwframe);
       imshow(name_edge, edge);
       if(waitKey(30) >= 0) break;
    }
    return 0;
}