void OclBM::process(unsigned char* imgBuf){ // convert to gray image cv::Mat clrImg(imgHeight,2*imgWidth,CV_8UC4,imgBuf); cv::Mat grayImg; cv::cvtColor(clrImg,grayImg,CV_BGRA2GRAY); LOGI("[colin] Created cv GRAY"); // remaping image cv::Mat Limg=grayImg(cv::Range::all(),cv::Range(0,imgWidth)); cv::Mat Rimg=grayImg(cv::Range::all(),cv::Range(imgWidth,2*imgWidth)); process(Limg, Rimg); }
//Reconocer caras en una imagen bool CompleteFaceRecognizer::recognizeFaces(const cv::Mat& img, std::vector<Face> &faces, cv::Mat & output, int recognizeWidth, int recognizeHeight, float scale, int minWidth, int minHeight, int maxWidth, int maxHeight, int numVecinos) { //Rectángulos de las caras encontradas y la imagen en escala de grises std::vector<cv::Rect> foundRectFaces; cv::Mat grayImg; //Encontrar caras this->faceDetector->detectLocatedFaces(img, foundRectFaces, grayImg, scale, minWidth, minHeight, maxWidth, maxHeight, numVecinos); //Preparar imagen final output = img.clone(); //Para cada cara encontrada, reconocerla, añadirla a la lista de caras y dibujarla sobre la imagen final for (unsigned int i = 0; i < foundRectFaces.size(); ++i) { const cv::Rect& rectFace = foundRectFaces[i]; //Recortar cara cv::Mat grayFace = grayImg(rectFace); //Aplicar upsampling cv::Mat upsampledFace; this->upsampler->upSample(grayFace, upsampledFace, recognizeHeight, recognizeWidth); //Reconocer cara double confidence; int clase = this->faceRecognizer->predict(upsampledFace,confidence); //Almacenar cara detectada en su formato original faces.push_back(Face(img(rectFace), clase, confidence)); //Dibujar detección sobre la imagen según sea detección positiva o negativa if (clase == -1) { //Señalar la cara en rojo en la imagen de salida Point pt1(rectFace.x, rectFace.y); Point pt2((rectFace.x + rectFace.height), (rectFace.y + rectFace.width)); rectangle(output, pt1, pt2, Scalar(0, 0, 255), 2, 8, 0); } else { //Señalar la cara en verde en la imagen de salida Point pt1(rectFace.x, rectFace.y); Point pt2((rectFace.x + rectFace.height), (rectFace.y + rectFace.width)); rectangle(output, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0); } } //Devolver si se ha encontrado alguna cara return faces.size() > 0; }
Mat toGray(Mat img){ //We only need one channel Mat grayImg(img.rows, img.cols,CV_32FC1); float * grayPtr = (float *)grayImg.data; unsigned char * imgPtr = (unsigned char *) img.data; int grayStep = grayImg.step/sizeof(float); int imgStep = img.step/sizeof(unsigned char); //Loop over the image pixels. Convert to float to avoid precision issues. for(int y = 0; y < img.rows; y++){ for(int x = 0; x < img.cols; x++){ int idxGray = y*grayStep + x; int idxImg = y*imgStep + x*3; grayPtr[idxGray] = ((float)imgPtr[idxImg]+(float)imgPtr[idxImg+1]+(float)imgPtr[idxImg+2])/(3.0*100); //TODO take out scale factor 100 } } return grayImg; }
int CDataset::extractFeatures(const CConfig& conf){ int imgRow = this->img.at(0)->rows, imgCol = this->img.at(0)->cols; cv::Mat *integralMat; if(conf.learningMode != 1){ if(conf.rgbFeature == 1){ // if got rgb image only, calc hog feature feature.clear(); feature.resize(32); for(int i = 0; i < 32; ++i) feature.at(i) = new cv::Mat(imgRow, imgCol, CV_8UC1); cv::cvtColor(*img.at(0), *(feature.at(0)), CV_RGB2GRAY); cv::Mat I_x(imgRow, imgCol, CV_16SC1); cv::Mat I_y(imgRow, imgCol, CV_16SC1); cv::Sobel(*(feature.at(0)), I_x, CV_16S, 1, 0); cv::Sobel(*(feature.at(0)), I_y, CV_16S, 0, 1); cv::convertScaleAbs(I_x, *(feature[3]), 0.25); cv::convertScaleAbs(I_y, *(feature[4]), 0.25); // Orientation of gradients for(int y = 0; y < img.at(0)->rows; y++) for(int x = 0; x < img.at(0)->cols; x++) { // Avoid division by zero float tx = (float)I_x.at<short>(y, x) + (float)copysign(0.000001f, I_x.at<short>(y, x)); // Scaling [-pi/2 pi/2] -> [0 80*pi] feature.at(1)->at<uchar>(y, x) = (uchar)(( atan((float)I_y.at<short>(y, x) / tx) + 3.14159265f / 2.0f ) * 80); //std::cout << "scaling" << std::endl; feature.at(2)->at<uchar>(y, x) = (uchar)sqrt((float)I_x.at<short>(y, x)* (float)I_x.at<short>(y, x) + (float)I_y.at<short>(y, x) * (float)I_y.at<short>(y, x)); } // Magunitude of gradients for(int y = 0; y < img.at(0)->rows; y++) for(int x = 0; x < img.at(0)->cols; x++ ) { feature.at(2)->at<uchar>(y, x) = (uchar)sqrt(I_x.at<short>(y, x)*I_x.at<short>(y, x) + I_y.at<short>(y, x) * I_y.at<short>(y, x)); } hog.extractOBin(feature[1], feature[2], feature, 7); // calc I_xx I_yy cv::Sobel(*(feature.at(0)), I_x, CV_16S, 2, 0); cv::Sobel(*(feature.at(0)), I_y, CV_16S, 0, 2); cv::convertScaleAbs(I_x, *(feature[5]), 0.25); cv::convertScaleAbs(I_y, *(feature[6]), 0.25); cv::Mat img_Lab; cv::cvtColor(*img.at(0), img_Lab, CV_RGB2Lab); cv::vector<cv::Mat> tempfeature(3); cv::split(img_Lab, tempfeature); for(int i = 0; i < 3; ++i) tempfeature.at(i).copyTo(*(feature.at(i))); // min max filter for(int c = 0; c < 16; ++c) minFilter(feature[c], feature[c + 16], 5); for(int c = 0; c < 16; ++c) maxFilter(feature[c], feature[c], 5); }else{ feature.clear(); // calc gray integral image cv::Mat grayImg(imgRow + 1, imgCol, CV_8U); cv::cvtColor(*img.at(0), grayImg, CV_RGB2GRAY); integralMat = new cv::Mat(imgRow + 1, imgCol + 1, CV_64F); cv::integral(grayImg, *integralMat, CV_64F); feature.push_back(integralMat); // calc r g b integral image std::vector<cv::Mat> splittedRgb; cv::split(*img.at(0), splittedRgb); for(int i = 0; i < splittedRgb.size(); ++i){ integralMat = new cv::Mat(imgRow + 1, imgCol + 1, CV_64F); cv::integral(splittedRgb.at(i), *integralMat, CV_64F); feature.push_back(integralMat); } featureFlag = 1; } } if(img.size() > 1){ cv::Mat tempDepth = cv::Mat(img.at(0)->rows, img.at(0)->cols, CV_8U);// = *img.at(1); if(img.at(1)->type() != CV_8U) img.at(1)->convertTo(tempDepth, CV_8U, 255.0 / (double)(conf.maxdist - conf.mindist)); else tempDepth = *img.at(1); integralMat = new cv::Mat(imgRow + 1, imgCol + 1, CV_64F); cv::integral(tempDepth, *integralMat, CV_64F); feature.push_back(integralMat); featureFlag = 1; } return 0; }
QList<FacePtr> FaceDetector::detect(const MatPtr image) { // Make grayscaled version of the image, and equalize the histogram // which normalizes the brightness and increases the contrast in the // image. cv::Mat grayImg; cvtColor(*image.get(), grayImg, cv::COLOR_BGR2GRAY); equalizeHist(grayImg, grayImg); // Detect faces with scale factor 1.1, minimum 3 neighbors and // minimum 80x80 face size. std::vector<cv::Rect> faces; faceCas.detectMultiScale(grayImg, faces, 1.1, 3, 0, cv::Size(80, 80)); QList<FacePtr> results; for (auto it = faces.begin(); it != faces.end(); ++it) { const auto &f = *it; auto face = FacePtr(new Face); face->setFace(f); // Detect two eyes for each face with scale factor 1.1, 3 // min. neighbors and min size of 30x30. cv::Mat facePart = grayImg(f); std::vector<cv::Rect> eyes; eyesCas.detectMultiScale(facePart, eyes, 1.1, 3, 0, cv::Size(30, 30)); if (eyes.size() >= 2) { // Take the two largest. auto &eye1 = eyes[0], &eye2 = eyes[1]; int area1 = eye1.width * eye1.height, area2 = eye2.width * eye2.height; if (area1 > area2) { qSwap<cv::Rect>(eye1, eye2); qSwap<int>(area1, area2); } for (int i = 2; i < eyes.size(); i++) { const auto &eye = eyes[i]; int area = eye.width * eye.height; if (area > area1 && area < area2) { eye1 = eye; area1 = area; } else if (area > area2 && area < area1) { eye2 = eye; area2 = area; } else if (area > area1 && area > area2) { eye1 = eye; area1 = area; qSwap<cv::Rect>(eye1, eye2); qSwap<int>(area1, area2); } } // Since we are only looking at the sub-region of the face we // need to convert into image coordinates. eye1.x += f.x; eye1.y += f.y; eye2.x += f.x; eye2.y += f.y; face->setEyes(eye1, eye2); } results << face; } return results; }
int main(int argc, char** argv) { int height ,width ,step ,channels; int same, lighter; float thresh; uchar *dataB, *dataG, *dataR, *dataGray, *dataD; uchar b1, g1, r1, b2, g2, r2; int w = 3; int th = 50; int idx1, idx2; cv::Mat img = cv::imread(argv[1]); height = img.rows; width = img.cols; cv::namedWindow("Image0", cv::WINDOW_NORMAL); cv::Mat textImg(1000, 1200, CV_8UC1, cv::Scalar(255)); cv::putText(textImg, "Original Image:", cv::Point(400, 500), cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(0, 0, 0)); //cv::imshow("Image0", textImg); //cv::waitKey(); //cv::imshow("Image0", img); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Apply SUSAN algorithm to detect edge and cross.", cv::Point(200, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); std::vector<cv::Mat> imgChannels; cv::split(img, imgChannels); cv::Mat dstSusan(height, width, CV_8UC1, cv::Scalar(0)); cv::Mat grayImg(height, width, CV_8UC1, cv::Scalar(0)); step = imgChannels[0].step[0]; dataB = imgChannels[0].data; dataG = imgChannels[1].data; dataR = imgChannels[2].data; dataGray = grayImg.data; dataD= dstSusan.data; for (int x = w; x < width-w; x++) { for (int y = w; y < height-w; y++) { same = 0; idx1 = x + y * step; b1 = dataB[idx1]; g1 = dataG[idx1]; r1 = dataR[idx1]; for (int u = 0; u < w+1; u++) { for (int v = 0; v < w+1; v++) { if (u + v == 0) { continue; } idx2 = (x+u) + (y+v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } idx2 = (x-u) + (y+v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (u != 0 && calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } idx2 = (x+u) + (y-v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (v != 0 && calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } idx2 = (x-u) + (y-v) * step; b2 = dataB[idx2]; g2 = dataG[idx2]; r2 = dataR[idx2]; if (u != 0 && v != 0 && calc_dist(b1, g1, r1, b1, g2, r2) < th) { same += 1; } } } dataD[idx1] = uchar(255.0 * float(same) / ((2*w+1) * (2*w+1) - 1)); if (dataD[idx1] < 128) { dataD[idx1] = 255; } else { dataD[idx1] = 0; } } } //cv::imshow("Image0", dstSusan); cv::imwrite("outimg_1.jpg", dstSusan); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Apply Hough algorithm to detect lines.", cv::Point(300, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::waitKey(); //cv::imshow("Image0", textImg); //cv::waitKey(); //Hough line detection std::vector<cv::Vec4i> lines; HoughLinesP(dstSusan, lines, 1, CV_PI/180, 80, 500, 20); double thetaSum = 0.0; int thetaNum = 0; double theta; for(size_t i = 0; i < lines.size(); i++) { cv::Vec4i l = lines[i]; cv::line(img, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(186,88,255), 1, CV_AA); if (l[0] == l[2]) { theta = CV_PI / 2; } else { theta = std::atan(-double(l[3]-l[1]) / (l[2] - l[0])); } if (theta >= -CV_PI / 4 && theta <= CV_PI / 4) { thetaSum += theta; thetaNum += 1; } } theta = -thetaSum / thetaNum * 180 / CV_PI; //cv::imshow("Image0", img); cv::imwrite("outimg_2.jpg", img); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); std::ostringstream textStr; textStr << "Find " << lines.size() << " lines."; cv::putText(textImg, textStr.str(), cv::Point(500, 400), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); textStr.str(std::string()); textStr.clear(); textStr << "Rotating angle is " << theta << " degree."; cv::putText(textImg, textStr.str(), cv::Point(350, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Next: Rotating the image.", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 700), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); img.release(); img = cv::imread(argv[1]); imgChannels[0].release(); imgChannels[1].release(); imgChannels[2].release(); imgChannels.clear(); cv::Mat rotateImg(height, width, CV_8UC3); cv::Point2f center; center.x = float(width / 2.0 + 0.5); center.y = float(height / 2.0 + 0.5); cv::Mat affineMat = getRotationMatrix2D(center, theta, 1); cv::warpAffine(img,rotateImg, affineMat, cv::Size(width, height), CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS); //cv::imshow("Image0", rotateImg); cv::imwrite("outimg_3.jpg", rotateImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Transform the image to gray scale.", cv::Point(300, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); cv::split(rotateImg, imgChannels); dataB = imgChannels[0].data; dataG = imgChannels[1].data; dataR = imgChannels[2].data; step = imgChannels[0].step[0]; //imgChannels[2].setTo(cv::Scalar(0)); for (int x = 0; x < rotateImg.cols; x++) { for (int y = 0; y < rotateImg.rows; y++) { int idx = x + y * step; if (dataB[idx] < dataG[idx] && dataB[idx] < dataR[idx]) { dataG[idx] = dataB[idx]; dataR[idx] = dataB[idx]; } if (dataG[idx] < dataB[idx] && dataG[idx] < dataR[idx]) { dataB[idx] = dataG[idx]; dataR[idx] = dataG[idx]; } if (dataR[idx] < dataB[idx] && dataR[idx] < dataG[idx]) { dataB[idx] = dataR[idx]; dataG[idx] = dataR[idx]; } } } cv::Mat filterRedImg(rotateImg.rows, rotateImg.cols, CV_8UC3, cv::Scalar::all(255)); cv::merge(imgChannels, filterRedImg); cv::cvtColor(filterRedImg, grayImg, CV_BGR2GRAY); //cv::imshow("Image0", grayImg); cv::imwrite("outimg_4.jpg", grayImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Clean the noise.", cv::Point(450, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); step = grayImg.step[0]; for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { int idx = x + y * step; if (grayImg.data[idx] > 100) //if(!is_gray(dataB[idx], dataG[idx], dataR[idx])) { grayImg.data[idx] = 255; } } } //cv::imshow("Image0", grayImg); cv::imwrite("outimg_5.jpg", grayImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Next: Digitizing the curves.", cv::Point(400, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); cv::putText(textImg, "Press any key to continue...", cv::Point(400, 600), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); cv::Mat newImg(height, width, CV_8UC3, cv::Scalar::all(255)); SegmentFactory segFactory = SegmentFactory(0); std::vector<Segment *> segments; segFactory.makeSegments(grayImg, segments); std::vector<Segment *>::iterator itr; for (itr = segments.begin(); itr != segments.end(); itr++) { Segment *seg = *itr; std::vector<SegmentLine *>::iterator itr_l; for (itr_l = seg->m_lines.begin(); itr_l != seg->m_lines.end(); itr_l++) { SegmentLine *line = *itr_l; cv::line(newImg, cv::Point(line->m_x1, line->m_y1), cv::Point(line->m_x2, line->m_y2), cv::Scalar(186,88,255), 1, CV_AA); std::cout << line->m_x1 << ", " << line->m_y1 << ", " << line->m_x2 << ", " << line->m_y2 << std::endl; } } //cv::imshow("Image0", newImg); cv::imwrite("outimg_6.jpg", newImg); //cv::waitKey(); textImg.setTo(cv::Scalar(255, 255, 255)); cv::putText(textImg, "Done.", cv::Point(550, 500), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255)); //cv::imshow("Image0", textImg); //cv::waitKey(); return 0; }