void objectDetectionCallback(const sensor_msgs::ImageConstPtr& original_image){
	cv_bridge::CvImagePtr cv_ptr;
	try
    {
        //Always copy, returning a mutable CvImage
        //OpenCV expects color images to use BGR channel order.
        cv_ptr = cv_bridge::toCvCopy(original_image, enc::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
        //if there is an error during conversion, display it
        ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
        return;
    }
	
	//Color Detection
	cv::Mat img_hsv,img_mask;
	cv::cvtColor(cv_ptr->image,img_hsv,CV_BGR2HSV);
	inRange(img_hsv, cv::Scalar(LowerH,LowerS,LowerV), cv::Scalar(UpperH,UpperS,UpperV),img_mask);
	
	//Circle Detection
	GaussianBlur(img_mask, img_mask, cv::Size(9,9),2,2);
	std::vector<cv::Vec3f> circles;
	HoughCircles(img_mask, circles, CV_HOUGH_GRADIENT,1,img_mask.rows/8,HC_Param1,HC_Param2,0,0);
	//Line Detection
	cv::Mat dst;
	cv::Canny(img_mask,dst,10,200,3);
	cv::vector<cv::Vec4i> lines;
	HoughLinesP(dst, lines, 1, CV_PI/180, HL_Threshold, HL_MinLineLength, 40 );

	for( size_t i = 0; i < lines.size(); i++ )
    	{
    		cv::line( cv_ptr->image, cv::Point(lines[i][0], lines[i][1]),
            cv::Point(lines[i][2], lines[i][3]), cv::Scalar(0,255,0), 3, 8 );
    	}

    for( size_t i = 0; i < circles.size(); i++ )
    {
        cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
        int radius = cvRound(circles[i][2]);
        // circle center
        circle( cv_ptr->image, center, 3, cv::Scalar(0,255,0), -1, 8, 0 );
        // circle outline
        circle( cv_ptr->image, center, radius, cv::Scalar(0,255,0), 3, 8, 0 );
    }

    cv::imshow(WINDOW, img_mask);
	cv::imshow("Processed", cv_ptr->image);
	cv::waitKey(3);

    //Convert the CvImage to a ROS image message and publish it on the "camera/image_processed" topic.
    pub.publish(cv_ptr->toImageMsg());

}
std::vector<Region *>*  HoughCircleDetection::processFrame(cv::Mat& frame) {
	//code taken from http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/hough_circle/hough_circle.html
	cv::Mat newFrame;
	cvtColor(frame, newFrame, CV_BGR2GRAY);
	GaussianBlur(newFrame, newFrame, cv::Size(9, 9), 2, 2);
	cv::vector<cv::Vec3f> circles;
	HoughCircles(newFrame, circles, CV_HOUGH_GRADIENT, 1, frame.rows/8, 40, 40, 0, 0 );

	for (unsigned int i = 0; i < circles.size(); i++) {
		Color col = getColor(frame, circles[i][0], circles[i][1], circles[i][2]);
		regionList->push_back(new Region(circles[i][0], circles[i][1], circles[i][2], col, CIRCLE));
	}
	return regionList;
}
void CLEyeCameraCapture::CircleDetector(Mat& input, Mat& input_gray, vector<Vec3f>& circles, Point& center, int& radius)
{
	if(input.data != NULL){
		cvtColor(input, input_gray, CV_BGR2GRAY);
		GaussianBlur(input_gray, input_gray, Size(3, 3), 2, 2);
		HoughCircles(input_gray, circles, CV_HOUGH_GRADIENT, 2, input_gray.rows, 130, 50, 5, 20);
		for(size_t i = 0; i < circles.size(); i++)
		{
			center.x = cvRound(circles[i][0]);
			center.y = cvRound(circles[i][1]);
			radius = cvRound(circles[i][2]);
			circle(input, center, 3, Scalar(0, 255, 0), -1, 8, 0);
			circle(input, center, radius, Scalar(0, 0, 255), 3, 8, 0);
		}
	}
}
Esempio n. 4
0
//M is not of rows
//N is no of cols
bool findcircles(Mat img,int M, int N){
    Mat dst,roi;
    bool a[M*N];
    //roi.copyTo(img1);
    for (int i=0;i<img.rows;i+=img.rows/M){
            for(int j=0;j<img.cols;j+=img.cols/N){
                    roi=img(Rect(j,i,img.cols/M,img.rows/N));
                    cvtColor(roi,dst,CV_RGB2GRAY);
                    GaussianBlur(dst,dst,Size(9,9),2,2);

                    vector<Vec3f> circles;
                    HoughCircles(dst,circles,CV_HOUGH_GRADIENT,1,dst.rows/8,th,100,0,0);
                    cout<<circles.size()<<endl;
            }
    }
    return circles.size();
}
vector<Point> THDUtil::circleDetector(Mat& imgThresholded, Mat& scene, int cannyThreshold, int accumulatorThreshold, int minSize, int maxSize)
{
	GaussianBlur(imgThresholded, imgThresholded, Size(3, 3), 2, 2);

	vector<Vec3f> circles;
	vector<Point> circleLocation;


	/// Apply the Hough Transform to find the circles
	HoughCircles(imgThresholded, circles, CV_HOUGH_GRADIENT, 1, imgThresholded.rows / 8, cannyThreshold, accumulatorThreshold, minSize, maxSize);

	for (size_t i = 0; i < circles.size(); i++)
	{
		Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
		circleLocation.push_back(center);
	}

	return circleLocation;

}
void TrackingHoughCircles(const Mat &img) {
	Mat cimg = img.clone();
	CvSize size = img.size();

	//binary threshold, val = 235
	threshold(img, cimg, 235, 255, 0);
	medianBlur(cimg, cimg, 5);

	//Hough transform	
	vector<Vec3f> circles;
	HoughCircles(cimg, circles, HOUGH_GRADIENT, 1, img.rows / 8, 20, 10, 0, 0);

	cvtColor(cimg, cimg, CV_GRAY2BGR);

	for (size_t i = 0; i < circles.size(); i++)
	{
		Vec3i c = circles[i];
		circle(cimg, Point(c[0], c[1]), c[2], Scalar(0, 0, 255), 1, LINE_AA);
		circle(cimg, Point(c[0], c[1]), 2, Scalar(255, 0, 0), 1, LINE_AA);
		//cout << "Center: " << c[0] << "; " << c[1] << "\n";
	}

	imshow("detected circles", cimg);	
}
Esempio n. 7
0
bool Target::DetectTarget(Mat imgInput, Mat *imgOutput)
{
	Mat inputCopy, src_gray, src = imgInput, potential_target;
	bool detected_circle = false;
	
	(imgInput).copyTo(inputCopy);

	/// Convert it to gray
	cvtColor(src, src_gray, CV_BGR2GRAY);
	/// Reduce the noise so we avoid false circle detection
	GaussianBlur(src_gray, src_gray, Size(9, 9), 2, 2);

	vector<Vec3f> circles;
	
	/// Apply the Hough Transform to find the circles


	HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, src_gray.rows / 2, lowThresh, upThresh, min_radius, max_radius);
	
	/// Draw the circles detected
	for (size_t i = 0; i < circles.size(); i++)
	{
		Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
		int radius = cvRound(circles[i][2]);
		// circle center
		circle(inputCopy, center, 3, Scalar(0, 255, 0), -1, 8, 0);
		// circle outline
		circle(inputCopy, center, radius, Scalar(0, 0, 255), 3, 8, 0);
		

		//ostringstream ss;
		//ss << radius;
		//string s = ss.str();
		//putText(inputCopy, s, Point(100, 150), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(20, 40, 80), 3, 8);

		try
		{
			//potential_target = inputCopy(Rect(abs(center.x - r_shild), abs(center.y - r_shild), r_shild * 2, r_shild * 2));
			detected_circle = true;
			*targetCenter = center;
		}
		catch(...)
		{
			detected_circle = false;
		}

	}


	if (detected_circle)
	{
		*imgOutput = inputCopy;//potential_target;
		//target = potential_target;
		return true;
	}
	else
	{
		*imgOutput = inputCopy;
		detected_circle = false;
		return false;
	}


}
Esempio n. 8
0
void Escena::procesar( Mat &frame )
{
     Mat matImagenActual = frame.clone();

     // PROCESAMIENTO

     // Declaramos un vector para guardar las caras detectadas
     std::vector<Rect> vectorCaras;
     vectorCaras.clear();

     // Detectamos las caras en toda la extStream.reimagen completa
     clasificadorCara.detectMultiScale( matImagenActual, vectorCaras, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size( 100,100 ) );

     for ( unsigned int i = 0; i < vectorCaras.size(); i++ )
     {
         // Dibujamos el rectangulo de la cara
         //rectangle( matImagenActual, vectorCaras.at(i), Scalar( 0, 255, 0 ), linesWidth );

         // Declaramos y dibujamos un contenedor para el ojo izquierdo

         Rect rectParaOjosIzquierdos( vectorCaras.at(i).x + vectorCaras.at(i).width/2, vectorCaras.at(i).y + vectorCaras.at(i).height*MARGEN_VERTICAL_CONTENEDOR_OJOS/100, vectorCaras.at(i).width*ANCHO_CONTENEDOR_OJO/100, vectorCaras.at(i).height*ALTO_CONTENEDOR_OJO/100);
         // rectangle( matImagenActual, rectParaOjosIzquierdos, Scalar( 0, 0, 200 ), linesWidth );

         // Declaramos y dibujamos un contenedor para el ojo derecho
         Rect rectParaOjosDerechos( vectorCaras.at(i).x + vectorCaras.at(i).width*MARGEN_LATERAL_CONTENEDOR_OJOS/100, vectorCaras.at(i).y + vectorCaras.at(i).height*MARGEN_VERTICAL_CONTENEDOR_OJOS/100, vectorCaras.at(i).width*ANCHO_CONTENEDOR_OJO/100, vectorCaras.at(i).height*ALTO_CONTENEDOR_OJO/100);
         // rectangle( matImagenActual, rectParaOjosDerechos, Scalar( 0, 0, 200 ), linesWidth );

         // Declaramos un vector para guardar los ojos izquierdos detectados
         std::vector<Rect> vectorOjosIzquierdos;
         vectorOjosIzquierdos.clear();

         // Detectamos ojos izquierdos en el mat designado por el rectOjosIzquierdos
         Mat matParaOjosIzquierdos( matImagenActual, rectParaOjosIzquierdos );
         clasificadorOjoIzquierdo.detectMultiScale( matParaOjosIzquierdos, vectorOjosIzquierdos, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size( 30, 30 ) );

         Rect ojoIzquierdoLocalAlContenedor;

         if ( vectorOjosIzquierdos.size() > 0 )
         {
             // Descartamos los ojos izquierdos que no sean el mas grande
             ojoIzquierdoLocalAlContenedor = rectanguloMasGrande( vectorOjosIzquierdos );
             Rect rectanguloOjoIzquierdoDesplazado( rectParaOjosIzquierdos.x + ojoIzquierdoLocalAlContenedor.x, rectParaOjosIzquierdos.y + ojoIzquierdoLocalAlContenedor.y, ojoIzquierdoLocalAlContenedor.width, ojoIzquierdoLocalAlContenedor.height );
             rectangle( matImagenActual, rectanguloOjoIzquierdoDesplazado, Scalar( 255, 255, 255 ), linesWidth );
         }

         // Declaramos un vector para guardar los ojos derechos detectados
         std::vector<Rect> vectorOjosDerechos;
         vectorOjosDerechos.clear();

         // Detectamos ojos derechos en el mat designado por el rectOjosDerechos
         Mat matParaOjosDerechos( matImagenActual, rectParaOjosDerechos );
         clasificadorOjoDerecho.detectMultiScale( matParaOjosDerechos, vectorOjosDerechos, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size( 30, 30 ) );

         Rect ojoDerechoLocalAlContenedor;

         if ( vectorOjosDerechos.size() > 0 )
         {
             // Descartamos los ojos derechos que no sean el mas grande
             ojoDerechoLocalAlContenedor = rectanguloMasGrande( vectorOjosDerechos );
             Rect rectanguloOjoDerechoDesplazado( rectParaOjosDerechos.x + ojoDerechoLocalAlContenedor.x, rectParaOjosDerechos.y + ojoDerechoLocalAlContenedor.y, ojoDerechoLocalAlContenedor.width, ojoDerechoLocalAlContenedor.height );
             rectangle( matImagenActual, rectanguloOjoDerechoDesplazado, Scalar( 255, 255, 255 ), linesWidth );
         }

        //Creo la matriz en la que voy a meter los posibles circulos
        //metodo para deteccion de parpadeo
         int threshold_value = 0;
         int threshold_type = 3;;
         int const max_value = 255;
         int const max_type = 4;
         int const max_BINARY_value = 255;
         //Mat src, src_gray, dst;

         std::vector<Vec3f> storageCir;


         Mat img;
         int dp = 2;
         float minDist = 300.0 ;
         int param1 = 32 ;
         int param2 = 60;
         int minRadius = 10;
         int maxRadius = 22;
         //GaussianBlur( img, img, Size(7,7), 2, 2 );
         cvtColor( matParaOjosDerechos, img, CV_BGR2GRAY );
         threshold( img, img, threshold_value, max_BINARY_value,threshold_type );

         HoughCircles(img, storageCir, CV_HOUGH_GRADIENT  , dp, minDist,
                                             param1, param2, minRadius, maxRadius);


        if (storageCir.empty())
        {

            //qDebug()<<"ojo cerrado";
            //storageCir.clear();
            cant++;
         }

        else{
            cant=0;
            //qDebug()<<"ojo abierto";
            //storageCir.clear();
        }
        storageCir.clear();


        if (cant==3){
        qDebug()<<"parpadeo";
        cant=0;
        }
         if ( vectorOjosIzquierdos.size() > 0 && vectorOjosDerechos.size() > 0 )
         {
             // A estos puntos les llamo centros globales, pero son los centros desplazados (1/4 y 3/4)
             Point centroOjoIzquierdoGlobal( rectParaOjosIzquierdos.x + ojoIzquierdoLocalAlContenedor.x + ojoIzquierdoLocalAlContenedor.width*3/4, rectParaOjosIzquierdos.y + ojoIzquierdoLocalAlContenedor.y + ojoIzquierdoLocalAlContenedor.height/2 );
             Point centroOjoDerechoGlobal( rectParaOjosDerechos.x + ojoDerechoLocalAlContenedor.x + ojoDerechoLocalAlContenedor.width*1/4, rectParaOjosDerechos.y + ojoDerechoLocalAlContenedor.y + ojoDerechoLocalAlContenedor.height/2 );
            // qDebug()<<cv::Point ('centroOjoDerechoGlobal');
             //QDebug(centroOjoDerechoGlobal);
             //QDebug(centroOjoIzquierdoGlobal);
             line( matImagenActual, centroOjoIzquierdoGlobal, centroOjoDerechoGlobal, Scalar( 100, 50, 255 ), linesWidth );

             float anguloEntreOjos = anguloEntre ( centroOjoDerechoGlobal, centroOjoIzquierdoGlobal );
             float largoLineaOjoBoca = vectorCaras.at(i).height/9*6;

             float baseTrianguloOjoBoca = sin( anguloEntreOjos ) * largoLineaOjoBoca;
             if ( centroOjoDerechoGlobal.y < centroOjoIzquierdoGlobal.y ) baseTrianguloOjoBoca = -baseTrianguloOjoBoca;
             float alturaTrianguloOjoBoca = cos( anguloEntreOjos ) * largoLineaOjoBoca;

             Point finLineaOjoIzquierdo( centroOjoIzquierdoGlobal.x + baseTrianguloOjoBoca, centroOjoIzquierdoGlobal.y + alturaTrianguloOjoBoca );
             Point finLineaOjoDerecho( centroOjoDerechoGlobal.x + baseTrianguloOjoBoca, centroOjoDerechoGlobal.y + alturaTrianguloOjoBoca );

             line( matImagenActual, centroOjoIzquierdoGlobal, finLineaOjoIzquierdo, Scalar( 150, 20, 255 ), linesWidth );
             line( matImagenActual, centroOjoDerechoGlobal, finLineaOjoDerecho, Scalar( 150, 20, 255 ), linesWidth );

             // Saco dos promedios consecutivos
             Point bocaSupIzq( (finLineaOjoIzquierdo.x + centroOjoIzquierdoGlobal.x)/2, ( finLineaOjoIzquierdo.y + centroOjoIzquierdoGlobal.y)/2 );
             Point bocaSupDer( (finLineaOjoDerecho.x + centroOjoDerechoGlobal.x)/2, ( finLineaOjoDerecho.y + centroOjoDerechoGlobal.y)/2 );

             line( matImagenActual, bocaSupIzq, bocaSupDer, Scalar( 150, 20, 255 ), linesWidth );
             line( matImagenActual, finLineaOjoIzquierdo, finLineaOjoDerecho, Scalar( 150, 20, 255 ), linesWidth );

             //Point bocaInfIzq( (finLineaOjoIzquierdo.x + centroOjoIzquierdoGlobal.x)/2, (finLineaOjoIzquierdo.y + centroOjoIzquierdoGlobal.y)/2 );
             //Point bocaInfDer( (finLineaOjoIzquierdo.x + centroOjoIzquierdoGlobal.x)/2, (finLineaOjoIzquierdo.y + centroOjoIzquierdoGlobal.y)/2 );
         }

         // Estiramos un poco la cara y la dibujamos
         vectorCaras.at(i).height *= 1.2;
         rectangle( matImagenActual, vectorCaras.at(i), Scalar( 0, 0, 0 ), linesWidth );
     }

     // FIN PROCESAMIENTO

     frame = matImagenActual.clone();
}
Esempio n. 9
0
/*
 * 1. CALCULATE RANGE FROM MEAN AND STANDARD DEVIATION
 * 2. CREATE A MASK FROM THE RANGE
 * 3. SMOOTH STUFF USING MORPHOLOGY
 * 4. DETECT THE CIRCLES
 */
vector<ILAC_Sphere>
ILAC_SphereFinder::findSpheres ( ILAC_Square &square, Mat &img,
                                 const size_t pixSphDiam )
{
  /* 1. CALCULATE RANGE FROM MEAN AND STANDARD DEVIATION */
  Mat mean, stddev;
  {/* Isolate the Hue */
    Mat tmpImg;
    vector<Mat> tmp_dim;
    cvtColor ( square.getImg(), tmpImg, CV_BGR2HSV_FULL );
    split( tmpImg, tmp_dim );
    tmpImg = tmp_dim[0];
    meanStdDev ( tmpImg, mean, stddev );
  }

  /*
   * Range will be -+ 1 standard deviation. This has aprox 68% of the data
   * (http://en.wikipedia.org/wiki/Standard_deviation)
   */
  Mat lowerb = mean - stddev;
  Mat upperb = mean + stddev;

  /* 2. CREATE A MASK FROM THE RANGE */
  Mat himg;
  {
    Mat tmpImg;
    vector<Mat> tmp_dim;
    cvtColor ( img, tmpImg, CV_BGR2HSV_FULL );
    split ( tmpImg, tmp_dim );
    himg = tmp_dim[0];
  }

  Mat mask = Mat::ones(img.rows, img.cols, CV_8UC1);
  inRange(himg, lowerb, upperb, mask);

  /* 3. SMOOTH STUFF USING MORPHOLOGY */
  {
    /*
     * Morphological open is 1.Erode and 2.Dilate. We use 1/4 of the sphere
     * diameter in the hope that its big enough to clean the noise, but not big
     * enough to remove the big sphere blob.
     */
    int openSize = pixSphDiam/4;
    Mat se = getStructuringElement ( MORPH_ELLIPSE, Size(openSize,openSize) );
    morphologyEx ( mask, mask, MORPH_OPEN, se );


    /*
     * We dilate with half of the sphere diameter and hope for a blob
     * that is approx double the radius of the original blob. The edges are more
     * roundy this way.
     */
    int dilateSize = pixSphDiam/2;
    se = getStructuringElement ( MORPH_ELLIPSE,
                                 Size(dilateSize,dilateSize) );
    dilate ( mask, mask, se );
  }

  /* 4. DETECT THE CIRCLES */
  /* Play with the arguments for HoughCircles. */
  vector<Vec3f> circles;
  vector<ILAC_Sphere> spheres;
  int minCircDist = 3*pixSphDiam/2;

  GaussianBlur ( mask, mask, Size(15, 15), 2, 2 );
  HoughCircles ( mask, circles, CV_HOUGH_GRADIENT, 2, minCircDist, 100, 40);

  for( size_t i = 0; i < circles.size(); i++ )
  {
    Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
    int radius = cvRound(circles[i][2]);
    ILAC_Sphere temp ( &img, center, radius );
    spheres.push_back(temp);

    for ( int j = i ;
         j > 0 && spheres[j].getRadius() < spheres[j-1].getRadius() ; j-- )
      std::swap( spheres[j], spheres[j-1] );
  }

  if ( spheres.size() < 3 )
    throw ILACExLessThanThreeSpheres ();

  return spheres;
}
std::vector<cv::Vec3f> CircularSampleAreaDetector::detect(cv::Mat frame) {
  // Convert the image to grayscale
  cv::Mat frame_gray(frame);
  cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);

  // cv::cvtColor(frame, frame_gray, CV_BGR2HSV);
  // std::vector<cv::Mat> channels;
  // cv::split(frame_gray, channels);
  // frame_gray = channels[2];

  // Blur to remove extraneous detail before edge detection
  // cv::medianBlur(frame_gray, frame_gray, 9);
  // cv::blur(frame_gray, frame_gray, cv::Size(3, 3));
  cv::GaussianBlur(frame_gray, frame_gray, cv::Size(9, 9), 2, 2);

  // cv::imshow("blur_win", frame_gray);

  // Edge detection
  // cv::adaptiveThreshold(frame_gray, frame_gray, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 1);
  cv::Mat frame_canny;

  // int erosion_size = 2;
  // cv::Mat element = getStructuringElement(cv::MORPH_ELLIPSE,
  //     cv::Size( 2*erosion_size + 1, 2*erosion_size+1),
  //     cv::Point( erosion_size, erosion_size ));
  // cv::dilate(frame_gray, frame_gray, element );
  // cv::erode(frame_gray, frame_gray, element );

  // cv::Canny(frame_gray, frame_canny, 5, 50);
  // cv::imshow("canny_win", frame_canny);

  // Extract circle features
  std::vector<cv::Vec3f> circles;
  // HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT, 1, 50, 50, 40, 0, 0);
  HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
      2,   // inverse resolution ratio
      50,  // min dist between circle centers
      50,  // canny upper threshold
      150,  // center detection threshold
      0,   // min radius
      0    // max radius
    );
  // HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
  //     1,   // inverse resolution ratio
  //     50,  // min dist between circle centers
  //     50,  // canny upper threshold
  //     50,  // center detection threshold
  //     0,   // min radius
  //     0    // max radius
  //   );

  // Of the circles found, pick the one closest to the center of the frame
  // TODO: This is not the best way to do this. Research probabilistic methods?
  cv::Point frame_center(frame_gray.cols / 2, frame_gray.rows / 2);

  std::vector<cv::Vec3f> good_circles;
  for(size_t i = 0; i < circles.size(); i++) {
    cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
    int radius = circles[i][2];

    // Ensure circle is entirely in screen
    if(center.x - radius < 0 || center.x + radius > frame_gray.cols
        || center.y - radius < 0 || center.y + radius > frame_gray.rows) {
      continue;
    }

    good_circles.push_back(cv::Vec3f(circles[i][0], circles[i][1], circles[i][2] * CIRCLE_SHRINK_FACTOR));
  }

  return good_circles;
}