Esempio n. 1
0
void BlobTracking::process(const cv::Mat &img_input, const cv::Mat &img_mask, cv::Mat &img_output)
{
  if(img_input.empty() || img_mask.empty())
    return;

  loadConfig();

  if(firstTime)
    saveConfig();

  IplImage* frame = new IplImage(img_input);
  cvConvertScale(frame, frame, 1, 0);

  IplImage* segmentated = new IplImage(img_mask);
  
  IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
  cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1);

  if(showBlobMask)
    cvShowImage("Blob Mask", segmentated);

  IplImage* labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1);

  cvb::CvBlobs blobs;
  unsigned int result = cvb::cvLabel(segmentated, labelImg, blobs);
  
  //cvb::cvFilterByArea(blobs, 500, 1000000);
  cvb::cvFilterByArea(blobs, minArea, maxArea);
  
  //cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
  if(debugBlob)
    cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_TO_STD);
  else
    cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);

  cvb::cvUpdateTracks(blobs, tracks, 200., 5);
  
  if(debugTrack)
    cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_STD);
  else
    cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
  
  //std::map<CvID, CvTrack *> CvTracks

  if(showOutput)
    cvShowImage("Blob Tracking", frame);

  cv::Mat img_result(frame);
  img_result.copyTo(img_output);

  //cvReleaseImage(&frame);
  //cvReleaseImage(&segmentated);
  cvReleaseImage(&labelImg);
  delete frame;
  delete segmentated;
  cvReleaseBlobs(blobs);
  cvReleaseStructuringElement(&morphKernel);

  firstTime = false;
}
Esempio n. 2
0
void AirLayerDetector::update()
{
    // let us grab the current raw image from the kinect
    cvZero(_tmp);
    cvCopy(_kinect->rawDepthImage(), _tmp);//, _bgmodel->mask());
    
    // subtraction
    const unsigned short* ptr_sd; // bgmodel->standardDeviation
    const unsigned short* ptr_bg; // bgmodel->image
    const unsigned short* ptr_tmp; // rawDepthImage AND bgmodel->mask
    const unsigned char* ptr_mask; // bg mask
    unsigned short* ptr_tmp2; // result...
    for(int y=0; y<_tmp->height; y++) {
        ptr_sd = (const unsigned short*)(_bgmodel->standardDeviation()->imageData + y * _bgmodel->standardDeviation()->widthStep);
        ptr_bg = (const unsigned short*)(_bgmodel->image()->imageData + y * _bgmodel->image()->widthStep);
        //ptr_bg = (const unsigned short*)(_bgmodel->mean()->imageData + y * _bgmodel->mean()->widthStep);
        ptr_mask = (const unsigned char*)(_bgmodel->mask()->imageData + y * _bgmodel->mask()->widthStep);
        ptr_tmp = (const unsigned short*)(_tmp->imageData + y * _tmp->widthStep);
        ptr_tmp2 = (unsigned short*)(_tmp2->imageData + y * _tmp2->widthStep);
        for(int x=0; x<_tmp->width; x++) {
            // perform the background subtraction
            if(ptr_mask[x] > 0 && ptr_tmp[x] > 25 && ptr_tmp[x] < 2047) { // FIXME: magic numbers?
                if(ptr_bg[x] >= ptr_tmp[x]) {
                    // inverse depth (kinect)
                    ptr_tmp2[x] = ptr_bg[x] - ptr_tmp[x];

                    // avoid jitter
                    if(ptr_tmp2[x] < 3 * ptr_sd[x])
                        ptr_tmp2[x] = 0;
                }
                else
                    ptr_tmp2[x] = 0;
            }
            else
                ptr_tmp2[x] = 0;
        }
    }

    // binary threshold
    cvInRangeS(_tmp2, cvScalar(_lo), cvScalar(_hi), _blobs.getCvImage());

    // noise...
    cvDilate(_blobs.getCvImage(), _blobs.getCvImage(), NULL, 2);
    cvSmooth(_blobs.getCvImage(), _blobs.getCvImage(), CV_MEDIAN, 3);
    _blobs.flagImageChanged();

    // extracting & filtering the blobs
    cvReleaseBlobs(_cvb);
    cvb::cvLabel(_blobs.getCvImage(), _labelImg, _cvb);
    cvFilterByArea(_cvb, _minArea, _maxArea);
}
  unsigned int cvLabel (IplImage const *img, IplImage *imgOut, CvBlobs &blobs)
  {
    CV_FUNCNAME("cvLabel");
    __CV_BEGIN__;
    {
      CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==1));
      CV_ASSERT(imgOut&&(imgOut->depth==IPL_DEPTH_LABEL)&&(imgOut->nChannels==1));

      unsigned int numPixels=0;

      cvSetZero(imgOut);

      CvLabel label=0;
      cvReleaseBlobs(blobs);

      unsigned int stepIn = img->widthStep / (img->depth / 8);
      unsigned int stepOut = imgOut->widthStep / (imgOut->depth / 8);
      unsigned int imgIn_width = img->width;
      unsigned int imgIn_height = img->height;
      unsigned int imgIn_offset = 0;
      unsigned int imgOut_width = imgOut->width;
      unsigned int imgOut_height = imgOut->height;
      unsigned int imgOut_offset = 0;
      if(img->roi)
      {
	imgIn_width = img->roi->width;
	imgIn_height = img->roi->height;
	imgIn_offset = img->roi->xOffset + (img->roi->yOffset * stepIn);
      }
      if(imgOut->roi)
      {
	imgOut_width = imgOut->roi->width;
	imgOut_height = imgOut->roi->height;
	imgOut_offset = imgOut->roi->xOffset + (imgOut->roi->yOffset * stepOut);
      }

      unsigned char *imgDataIn = (unsigned char *)img->imageData + imgIn_offset;
      CvLabel *imgDataOut = (CvLabel *)imgOut->imageData + imgOut_offset;

#define imageIn(X, Y) imgDataIn[(X) + (Y)*stepIn]
#define imageOut(X, Y) imgDataOut[(X) + (Y)*stepOut]

      CvLabel lastLabel = 0;
      CvBlob *lastBlob = NULL;

      for (unsigned int y=0; y<imgIn_height; y++)
      {
	for (unsigned int x=0; x<imgIn_width; x++)
	{
	  if (imageIn(x, y))
	  {
	    bool labeled = imageOut(x, y);

	    if ((!imageOut(x, y))&&((y==0)||(!imageIn(x, y-1))))
	    {
	      labeled = true;

	      // Label contour.
	      label++;
	      CV_ASSERT(label!=CV_BLOB_MAX_LABEL);

	      imageOut(x, y) = label;
	      numPixels++;

	      if (y>0)
		imageOut(x, y-1) = CV_BLOB_MAX_LABEL;

	      CvBlob *blob = new CvBlob;
	      blob->label = label;
	      blob->area = 1;
	      blob->minx = x; blob->maxx = x;
	      blob->miny = y; blob->maxy = y;
	      blob->m10=x; blob->m01=y;
	      blob->m11=x*y;
	      blob->m20=x*x; blob->m02=y*y;
	      blob->internalContours.clear();
	      blobs.insert(CvLabelBlob(label,blob));

              lastLabel = label;
	      lastBlob = blob;

	      blob->contour.startingPoint = cvPoint(x, y);

	      unsigned char direction=1;
	      unsigned int xx = x;
	      unsigned int yy = y;


	      bool contourEnd = false;

	      do
	      {
		for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
		{
		  bool found = false;

		  for (unsigned char i=0; i<3; i++)
		  {
		    int nx = xx+movesE[direction][i][0];
		    int ny = yy+movesE[direction][i][1];
		    if ((nx<imgIn_width)&&(nx>=0)&&(ny<imgIn_height)&&(ny>=0))
		    {
		      if (imageIn(nx, ny))
		      {
			found = true;

			blob->contour.chainCode.push_back(movesE[direction][i][3]);

			xx=nx;
			yy=ny;

			direction=movesE[direction][i][2];
			break;
		      }
		      else
		      {
			imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
		      }
		    }
		  }

		  if (!found)
		    direction=(direction+1)%4;
		  else
		  {
		    if (imageOut(xx, yy) != label)
		    {
		      imageOut(xx, yy) = label;
		      numPixels++;

		      if (xx<blob->minx) blob->minx = xx;
		      else if (xx>blob->maxx) blob->maxx = xx;
		      if (yy<blob->miny) blob->miny = yy;
		      else if (yy>blob->maxy) blob->maxy = yy;

		      blob->area++;
		      blob->m10+=xx; blob->m01+=yy;
		      blob->m11+=xx*yy;
		      blob->m20+=xx*xx; blob->m02+=yy*yy;
		    }

		    break;
		  }
		  
		  if (contourEnd = ((xx==x) && (yy==y) && (direction==1)))
		    break;
		}
	      }
	      while (!contourEnd);

	    }

	    if ((y+1<imgIn_height)&&(!imageIn(x, y+1))&&(!imageOut(x, y+1)))
	    {
	      labeled = true;

	      // Label internal contour
	      CvLabel l;
	      CvBlob *blob = NULL;

	      if (!imageOut(x, y))
	      {
		l = imageOut(x-1, y);

		imageOut(x, y) = l;
		numPixels++;

                if (l==lastLabel)
                  blob = lastBlob;
                else
                {
                  blob = blobs.find(l)->second;
                  lastLabel = l;
                  lastBlob = blob;
                }
		blob->area++;
		blob->m10+=x; blob->m01+=y;
		blob->m11+=x*y;
		blob->m20+=x*x; blob->m02+=y*y;
	      }
	      else
	      {
		l = imageOut(x, y);

                if (l==lastLabel)
                  blob = lastBlob;
                else
                {
                  blob = blobs.find(l)->second;
                  lastLabel = l;
                  lastBlob = blob;
                }
	      }

	     imageOut(x, y+1) = CV_BLOB_MAX_LABEL;

	      CvContourChainCode *contour = new CvContourChainCode;
	      contour->startingPoint = cvPoint(x, y);

	      unsigned char direction = 3;
	      unsigned int xx = x;
	      unsigned int yy = y;

	      do
	      {
		for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
		{
		  bool found = false;

		  for (unsigned char i=0; i<3; i++)
		  {
		    int nx = xx+movesI[direction][i][0];
		    int ny = yy+movesI[direction][i][1];
		    if (imageIn(nx, ny))
		    {
		      found = true;

		      contour->chainCode.push_back(movesI[direction][i][3]);

		      xx=nx;
		      yy=ny;

		      direction=movesI[direction][i][2];
		      break;
		    }
		    else
		    {
		      imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
		    }
		  }

		  if (!found)
		    direction=(direction+1)%4;
		  else
		  {
		    if (!imageOut(xx, yy))
		    {
		      imageOut(xx, yy) = l;
		      numPixels++;

		      blob->area++;
		      blob->m10+=xx; blob->m01+=yy;
		      blob->m11+=xx*yy;
		      blob->m20+=xx*xx; blob->m02+=yy*yy;
		    }

		    break;
		  }
		}
	      }
	      while (!(xx==x && yy==y));

	      blob->internalContours.push_back(contour);
	    }
	    if (!labeled)
	    {
	      CvLabel l = imageOut(x-1, y);

	      imageOut(x, y) = l;
	      numPixels++;

	      CvBlob *blob = NULL;
              if (l==lastLabel)
                blob = lastBlob;
              else
              {
                blob = blobs.find(l)->second;
                lastLabel = l;
                lastBlob = blob;
              }
	      blob->area++;
	      blob->m10+=x; blob->m01+=y;
	      blob->m11+=x*y;
	      blob->m20+=x*x; blob->m02+=y*y;
	    }
	  }
	}
      }

      for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
      {
	cvCentroid((*it).second);

        (*it).second->u11 = (*it).second->m11 - ((*it).second->m10*(*it).second->m01)/(*it).second->m00;
        (*it).second->u20 = (*it).second->m20 - ((*it).second->m10*(*it).second->m10)/(*it).second->m00;
        (*it).second->u02 = (*it).second->m02 - ((*it).second->m01*(*it).second->m01)/(*it).second->m00;

        double m00_2 = (*it).second->m00 * (*it).second->m00;

        (*it).second->n11 = (*it).second->u11 / m00_2;
        (*it).second->n20 = (*it).second->u20 / m00_2;
        (*it).second->n02 = (*it).second->u02 / m00_2;

        (*it).second->p1 = (*it).second->n20 + (*it).second->n02;

        double nn = (*it).second->n20 - (*it).second->n02;
        (*it).second->p2 = nn*nn + 4.*((*it).second->n11*(*it).second->n11);
      }

      return numPixels;

    }
    __CV_END__;
  }
void BlobTrackingNode::process(const cv::Mat &img_input, const cv::Mat &img_mask, cv::Mat &img_output)
{
    //This is output event
    QList<DetectedEvent> blobEvent;
cv::Mat newInput;
img_input.copyTo(newInput);

    if(img_input.empty() || img_mask.empty()){
        return;
    }
    loadConfig();

    if(firstTime){
        saveConfig();
    }
    IplImage* frame = new IplImage(img_input);
    cvConvertScale(frame, frame, 1, 0);

    IplImage* segmentated = new IplImage(img_mask);

    IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
    cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1);

    cv::Mat temp = cv::Mat(segmentated);
    if(showBlobMask){
        //cv::imshow("test",temp);
        ownerPlugin->updateFrameViewer("Tracking Mask",convertToQImage(temp));
    }
    IplImage* labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1);

    cvb::CvBlobs blobs;
    cvb::cvLabel(segmentated, labelImg, blobs);

    cvb::cvFilterByArea(blobs, minArea, maxArea);
  
    if(debugBlob){
        cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_TO_STD);
    }
    else{
        cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);
    }

    cvb::cvUpdateTracks(blobs, tracks,threshold_distance, threshold_inactive);



    //At this point, we have assigned each blob in current frame in to a track. so we can iterate through active tracks, and
    // find out out blobs within one of those tracks. Following loop does that. This is helpfull to have more generalized idea about
    // relationship between blobs with increasing time.

    for (cvb::CvTracks::const_iterator track = tracks.begin(); track!=tracks.end(); ++track)
    {
        if((*track).second->active != 0){
            for(std::map<cvb::CvLabel,cvb::CvBlob *>::iterator it = blobs.begin() ; it != blobs.end(); it++){

                cvb::CvLabel label = (*it).first;
                cvb::CvBlob * blob = (*it).second;

                if((*track).second->label == label){
                    //qDebug()<< blob->minx <<","<<blob->miny;
                    //This is smoothed time tracked blob lables
                    blobEvent.append(DetectedEvent("blob",QString("%1,%2,%3,%4,%5,%6,%7,%8").arg(frameIndex).arg((*track).first).arg(blob->centroid.x).arg(blob->centroid.y).arg(blob->minx).arg(blob->miny).arg(blob->maxx).arg(blob->maxy),1.0));
                }
            }
        }
    }

    if(debugTrack){
        cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_STD);
    }
    else{
        cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
    }
    if(showFrameID){
        cv::Mat temp = cv::Mat(frame);
        cv::putText(temp,QString("%1").arg(frameIndex).toStdString(),cv::Point(40,120),cv::FONT_HERSHEY_PLAIN,2,cv::Scalar(0,255,0),2);
    }
    if(showOutput){
        cv::Mat temp = cv::Mat(frame);
        ownerPlugin->updateFrameViewer("Tracking Output",convertToQImage(temp));

        //cvShowImage("Blob Tracking", frame);
    }
    cv::Mat img_result(frame);

    cv::putText(img_result,QString("%1").arg(QString::number(frameIndex)).toUtf8().constData(), cv::Point(10,30), CV_FONT_HERSHEY_PLAIN,1.0, CV_RGB(255,255,255));

    img_result.copyTo(img_output);
    cvReleaseImage(&labelImg);
    delete frame;
    delete segmentated;
    cvReleaseBlobs(blobs);
    cvReleaseStructuringElement(&morphKernel);

    firstTime = false;
    frameIndex++;

    QImage input((uchar*)newInput.data, newInput.cols, newInput.rows, newInput.step1(), QImage::Format_RGB888);
    QImage output((uchar*)img_output.data, img_output.cols, img_output.rows, img_output.step1(), QImage::Format_RGB888);
    QList<QImage> images;

    images.append(input);
    images.append(output);
    emit generateEvent(blobEvent,images);

    //emit generateEvent(blobEvent);
}
Esempio n. 5
0
/**
 * Runs the tracking.
 */
void Tracker::executeTracker()
{
	#define PI (3.1415926535897932384626433832795028841)
	// Kinect fow: 43° vertical, 57° horizontal
	double verticalScalingFactor = tan(43 * PI / 180) / 240;
	double horizontalScalingFactor = tan(57 * PI / 180) / 320;
	ROS_DEBUG("Scaling factors: %lf/%lf", horizontalScalingFactor, verticalScalingFactor);

	bool quadcopterTracked = false;

	// Images
	cv::Mat cameraImage(cv::Size(640, 480), CV_8UC3); // Only for showCameraImage == true.
	cv::Mat maskedImage(cv::Size(640, 480), CV_8UC3); // Only for showMaskedImage == true.
	cv::Mat image(cv::Size(640, 480), CV_8UC3); // The raw image from the camera.
	cv::Mat mapImage(cv::Size(640, 480), CV_8UC1); // The color mapped image.
	cv::Mat hsvImage(cv::Size(640, 480), CV_8UC3);  // The raw image in hsv format.

	// CvBlob
	cvb::CvBlobs blobs;
	IplImage *labelImg = cvCreateImage(image.size(), IPL_DEPTH_LABEL, 1);
	cv::Mat morphKernel = cv::getStructuringElement(CV_SHAPE_RECT, cv::Size(5, 5));
	cvb::CvTracks tracks;
	IplImage iplMapImage;

	while (!stopFlag) {
		if (imageDirty == 0) {
			usleep(100);
			continue;
		} else if (imageDirty > 1) {
			ROS_WARN("Skipped %d frames!", imageDirty - 1);
		}

		START_CLOCK(trackerClock)

		imageMutex.lock();
		((cv::Mat*) this->image)->copyTo(image);
		long int time = this->imageTime;
		imageDirty = 0;
		imageMutex.unlock();

		if (showCameraImage)
			image.copyTo(cameraImage);

		createColorMapImage(image, mapImage, hsvImage);

		if (showMaskedImage) {
			// Convert to 3 channel image.
			int target = 0;

			for (int i = 0; i < mapImage.total(); ++i) {
				maskedImage.data[target++] = mapImage.data[i];
				maskedImage.data[target++] = mapImage.data[i];
				maskedImage.data[target++] = mapImage.data[i];
			}
		}

		cv::morphologyEx(mapImage, mapImage, cv::MORPH_OPEN, morphKernel);

		// Finding blobs
		// Only copies headers.
		iplMapImage = mapImage;
		unsigned int result = cvLabel(&iplMapImage, labelImg, blobs);
		// ROS_DEBUG("Blob result: %d", result);

		// Filter blobs
		cvFilterByArea(blobs, 10, 1000000);

		if (showCameraImage || showMaskedImage)
			cvUpdateTracks(blobs, tracks, 200., 5);

		if (showMaskedImage) {
			// Only copies headers.
			IplImage iplImage = maskedImage;
			cvRenderBlobs(labelImg, blobs, &iplImage, &iplImage, CV_BLOB_RENDER_BOUNDING_BOX);
			cvRenderTracks(tracks, &iplImage, &iplImage, CV_TRACK_RENDER_ID | CV_TRACK_RENDER_BOUNDING_BOX);
			ROS_DEBUG("Exiting visual masked block"); // TODO Tracking down
			                                          // issue #7
		}

		if (showCameraImage) {
			// Only copies headers.
			IplImage iplImage = cameraImage;
			cvRenderBlobs(labelImg, blobs, &iplImage, &iplImage, CV_BLOB_RENDER_BOUNDING_BOX);
			cvRenderTracks(tracks, &iplImage, &iplImage, CV_TRACK_RENDER_ID | CV_TRACK_RENDER_BOUNDING_BOX);
			ROS_DEBUG("Exiting visual masked block"); // TODO Tracking down
			                                          // issue #7
		}

		if (showCameraImage || showMaskedImage)
			cvReleaseTracks(tracks);

		if (blobs.size() != 0) {
			// Find biggest blob
			cvb::CvLabel largestBlob = cvLargestBlob(blobs);
			CvPoint2D64f center = blobs.find(largestBlob)->second->centroid;
			double x = center.x;
			double y = center.y;

			// Set (0, 0) to center.
			x -= 320;
			y -= 240;
			// ROS_DEBUG("Center: %lf/%lf", x, y);

			// Apply scaling
			x *= horizontalScalingFactor;
			y *= verticalScalingFactor;

			dataReceiver->receiveTrackingData(cv::Scalar(x, y, 1.0), ((QuadcopterColor*) qc)->getId(), time);

			if (showMaskedImage)
				drawCross(maskedImage, center.x, center.y);

			if (showCameraImage)
				drawCross(cameraImage, center.x, center.y);

			if (!quadcopterTracked) {
				quadcopterTracked = true;
				ROS_DEBUG("Quadcopter %d tracked", ((QuadcopterColor*) this->qc)->getId());
			}
		} else if (quadcopterTracked) {
			quadcopterTracked = false;
			ROS_DEBUG("Quadcopter %d NOT tracked", ((QuadcopterColor*) this->qc)->getId());
		}

		// Free cvb stuff.
		cvReleaseBlobs(blobs);

		// ROS_DEBUG("cvb stuff freed"); // TODO Tracking down issue #7

		if (showMaskedImage) {
			cv::imshow(maskedWindowName, maskedImage);

			ROS_DEBUG("showed masked image"); // TODO Tracking down issue #7
		}

		if (showCameraImage) {
			cv::imshow(cameraWindowName, cameraImage);

			ROS_DEBUG("showed camera image"); // TODO Tracking down issue #7
		}

		STOP_CLOCK(trackerClock, "Calculation of quadcopter position took: ")
	}

	cvReleaseImage(&labelImg);

	if (showMaskedImage)
		cv::destroyWindow(maskedWindowName);

	if (showCameraImage)
		cv::destroyWindow(cameraWindowName);

	ROS_INFO("Tracker with id %d terminated", ((QuadcopterColor*) this->qc)->getId());
}