void GuiGradientCtrl::onMouseDown(const GuiEvent &event)
{
   if (!mActive)
      return;
   
   mouseLock(this);
   
   if (mProfile->mCanKeyFocus)
      setFirstResponder();
	
	if (mActive) 
      onAction();

	Point2I extent = getRoot()->getExtent();
   Point2I resolution = getRoot()->getExtent();
   GFXTexHandle bb( resolution.x, 
                    resolution.y, 
                    GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, avar("%s() - bb (line %d)", __FUNCTION__, __LINE__) );
   
   Point2I tmpPt( event.mousePoint.x, event.mousePoint.y );
   GFXTarget *targ = GFX->getActiveRenderTarget();
   targ->resolveTo( bb );
   GBitmap bmp( bb.getWidth(), bb.getHeight() );
   bb.copyToBmp( &bmp );
   ColorI tmp;
   bmp.getColor( event.mousePoint.x, event.mousePoint.y, tmp );
	
	addColorRange( globalToLocalCoord(event.mousePoint), ColorF(tmp) );
   
   mMouseDown = true;
}
void GuiColorPickerCtrl::onRender(Point2I offset, const RectI& updateRect)
{
   if (mStateBlock.isNull())
   {
      GFXStateBlockDesc desc;
      desc.setBlend(true, GFXBlendSrcAlpha, GFXBlendInvSrcAlpha);
      desc.setZReadWrite(false);
      desc.zWriteEnable = false;
      desc.setCullMode(GFXCullNone);
      mStateBlock = GFX->createStateBlock( desc );
   }

   RectI boundsRect(offset, getExtent()); 
   renderColorBox(boundsRect);

   if (mPositionChanged) 
   {
      mPositionChanged = false;
      Point2I extent = getRoot()->getExtent();
      // If we are anything but a pallete, change the pick color
      if (mDisplayMode != pPallet)
      {
         Point2I resolution = getRoot()->getExtent();

         U32 buf_x = offset.x + mSelectorPos.x + 1;
         U32 buf_y = ( extent.y - ( offset.y + mSelectorPos.y + 1 ) );
         if(GFX->getAdapterType() != OpenGL)
            buf_y = resolution.y - buf_y;

         GFXTexHandle bb( resolution.x, 
                          resolution.y, 
                          GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, avar("%s() - bb (line %d)", __FUNCTION__, __LINE__) );
         
         Point2I tmpPt( buf_x, buf_y );

         GFXTarget *targ = GFX->getActiveRenderTarget();
         targ->resolveTo( bb );
         
         GBitmap bmp( bb.getWidth(), bb.getHeight() );

         bb.copyToBmp( &bmp );
         
         //bmp.writePNGDebug( "foo.png" );

         ColorI tmp;
         bmp.getColor( buf_x, buf_y, tmp );

         mPickColor = (ColorF)tmp;

         // Now do onAction() if we are allowed
         if (mActionOnMove) 
            onAction();
      }
      
   }
   
   //render the children
   renderChildControls( offset, updateRect);
}
Exemple #3
0
int AIInterface::CalcResourceValue(const MapPoint pt, AIJH::Resource res, char direction, int lastval) const
{
    int returnVal;
    if(direction == -1) //calculate complete value from scratch (3n^2+3n+1)
    {
        returnVal = 0;
        std::vector<MapPoint> pts = GetPointsInRadius(pt, AIJH::RES_RADIUS[res]);
        for(std::vector<MapPoint>::const_iterator it = pts.begin(); it != pts.end(); ++it)
            returnVal += GetResourceRating(*it, res);
        //add the center point value
        returnVal += GetResourceRating(pt, res);
    }
    else//calculate different nodes only (4n+2 ?anyways much faster)
    {
        returnVal = lastval;
        //add new points
        //first: go radius steps towards direction-1
        MapPoint tmpPt(pt);
        for(unsigned i = 0; i < AIJH::RES_RADIUS[res]; i++)
            tmpPt = gwb.GetNeighbour(tmpPt, (direction + 5) % 6);
        //then clockwise around at radius distance to get all new points
        for(int i = direction + 1; i < (direction + 3); ++i)
        {
            int resRadius = AIJH::RES_RADIUS[res];
            //add 1 extra step on the second side we check to complete the side
            if(i == direction + 2)
                ++resRadius;
            for(MapCoord r2 = 0; r2 < resRadius; ++r2)
            {
                returnVal += GetResourceRating(tmpPt, res);
                tmpPt = gwb.GetNeighbour(tmpPt, i % 6);
            }
        }
        //now substract old points not in range of new point
        //go to old center point:
        tmpPt = pt;
        tmpPt = gwb.GetNeighbour(tmpPt, (direction + 3) % 6);
        //next: go to the first old point we have to substract
        for(unsigned i = 0; i < AIJH::RES_RADIUS[res]; i++)
            tmpPt = gwb.GetNeighbour(tmpPt, (direction + 2) % 6);
        //now clockwise around at radius distance to remove all old points
        for(int i = direction + 4; i < (direction + 6); ++i)
        {
            int resRadius = AIJH::RES_RADIUS[res];
            if(i == direction + 5)
                ++resRadius;
            for(MapCoord r2 = 0; r2 < resRadius; ++r2)
            {
                returnVal -= GetResourceRating(tmpPt, res);
                tmpPt = gwb.GetNeighbour(tmpPt, i % 6);
            }
        }
    }
    //if(returnval<0&&lastval>=0&&res==AIJH::BORDERLAND)
    //LOG.lprintf("AIInterface::CalcResourceValue - warning: negative returnvalue direction %i oldval %i\n", direction, lastval);
    return returnVal;
}
Exemple #4
0
void GuiColorPickerCtrl::onRender(Point2I offset, const RectI& updateRect)
{
   if (mStateBlock.isNull())
   {
      GFXStateBlockDesc desc;
      desc.setBlend(true, GFXBlendSrcAlpha, GFXBlendInvSrcAlpha);
      desc.setZReadWrite(false);
      desc.zWriteEnable = false;
      desc.setCullMode(GFXCullNone);
      mStateBlock = GFX->createStateBlock(desc);
   }

   RectI boundsRect(offset, getExtent());
   renderColorBox(boundsRect);

   if (mPositionChanged || mBitmap == NULL)
   {
      bool nullBitmap = false;

      if (mPositionChanged == false && mBitmap == NULL)
         nullBitmap = true;

      mPositionChanged = false;
      Point2I extent = getRoot()->getExtent();

      // If we are anything but a pallete, change the pick color
      if (mDisplayMode != pPallet)
      {
         Point2I resolution = getRoot()->getExtent();

         U32 buf_x = offset.x + mSelectorPos.x + 1;
         U32 buf_y = resolution.y - (extent.y - (offset.y + mSelectorPos.y + 1));

         GFXTexHandle bb( resolution.x, resolution.y, GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, avar("%s() - bb (line %d)", __FUNCTION__, __LINE__) );

         Point2I tmpPt(buf_x, buf_y);

         GFXTarget *targ = GFX->getActiveRenderTarget();
         targ->resolveTo(bb);

         if (mBitmap)
         {
            delete mBitmap;
            mBitmap = NULL;
         }

         mBitmap = new GBitmap(bb.getWidth(), bb.getHeight());

         bb.copyToBmp(mBitmap);

         if (!nullBitmap)
         {
            if (mSelectColor)
            {
               Point2I pos = findColor(mSetColor, offset, resolution, *mBitmap);
               mSetColor = mSetColor.BLACK;
               mSelectColor = false;
               setSelectorPos(pos);
            }
            else
            {
               ColorI tmp;
               mBitmap->getColor(buf_x, buf_y, tmp);

               mPickColor = (ColorF)tmp;

               // Now do onAction() if we are allowed
               if (mActionOnMove)
                  onAction();
            }
         }
      }
   }

   //render the children
   renderChildControls(offset, updateRect);
}
Exemple #5
0
void formContourByVertSeg(const std::vector<Segment>& vertSegVec, std::vector<Segment>& contourSegVec)
{
    //===============================================================
    // Will not check the validity of the input data because it is
    // too time consuming. Make sure the input is valid yourself.
    //================================================================
    
    if( vertSegVec.size() < 2 ) return;
    
    //triplet content: Point/which vertical seg
    Point tmpPt(0,0);
    std::vector<std::pair<Point,size_t> > tripleVec;
    tripleVec.reserve(vertSegVec.size()*2);  
    for (size_t idx = 0; idx < vertSegVec.size(); ++idx)
    {
        tmpPt = vertSegVec[idx].getHead();
        tripleVec.push_back( std::pair<Point,size_t>(tmpPt,idx) ) ;
        tmpPt = vertSegVec[idx].getTail();
        tripleVec.push_back( std::pair<Point,size_t>(tmpPt,idx) ) ;
    }
    
    // sort by y first and then by x. this is for forming horizontal segements
    std::sort(tripleVec.begin(), tripleVec.end(), sortByYX);
    
    //Basic idea is to build an array that contains the information of either vertical or horizontal segment and
    //with the information of its next and previous segments in the contour.
    // DETAIL DESCRIPTION:
    //   The sinlge element in the array contains:
    //   headId_ == Segment(vertical/horiztonal) == tailId_
    //
    // EXPLANATION:
    //   headId_ : the index in this array. 
    //             it means the left vertical segment of a horizontal segment if the segment of the element is horizontal.               
    //             it means the bottom horizontal segment of a vertical segment if the segment of the element is veritcal.  
    //   tailId_ : the index in this array. 
    //             it means the right vertical segment of a horizontal segment if the segment of the element is horizontal.              
    //             it means the top horizontal segment of a vertical segment if the segment of the element is veritcal.
    //
    // The array is divided into two parts:
    // [ Vertical Segments ] [ Horizontal Segments ]
    // While building up the second part of this array, the procedure will establish the correct headId_ and tailId_ of
    // the vertical segments and horizontal segments   
    //reserve correct size in order to prevent it to realloc memory. 
    //
    std::vector<SegmentLink> segLinkVec;
    segLinkVec.reserve(2*vertSegVec.size());
    //transform the input vertical seg into new format          
    for(int idx =0; idx < vertSegVec.size(); ++idx)
    {
        segLinkVec.push_back(SegmentLink(vertSegVec[idx],0,0));
    }
    
    //put in the horizontal segments
    Segment tmpSeg(Point(0,0),Point(0,0));
    for(size_t idx = 1; idx <= tripleVec.size()-1; idx+=2 )
    {
/*      assert((tripleVec[idx].first.getY() == vertSegVec[tripleVec[idx].second].getHead().getY() && 
                tripleVec[idx].first.getY() == vertSegVec[tripleVec[idx-1].second].getTail().getY() ) ||
               (tripleVec[idx].first.getY() == vertSegVec[tripleVec[idx].second].getTail().getY() && 
                tripleVec[idx].first.getY() == vertSegVec[tripleVec[idx-1].second].getHead().getY())
              );*/
        if(tripleVec[idx].first.getY() == vertSegVec[tripleVec[idx].second].getTail().getY())
        {
            //right to left
            //tmpSeg.getHead() = tripleVec[idx].first; 
            tmpSeg.setHead( tripleVec[idx].first ); 
            //tmpSeg.getTail() = tripleVec[idx-1].first; 
            tmpSeg.setTail( tripleVec[idx-1].first );
            segLinkVec.push_back(SegmentLink(tmpSeg, tripleVec[idx].second, tripleVec[idx-1].second));
            segLinkVec[tripleVec[idx-1].second].headId_ = segLinkVec.size()-1;
            segLinkVec[tripleVec[idx].second].tailId_   = segLinkVec.size()-1;
        }
        else
        {
            //left to right
            //tmpSeg.getHead() = tripleVec[idx-1].first;
            tmpSeg.setHead( tripleVec[idx-1].first );
            //tmpSeg.getTail() = tripleVec[idx].first; 
            tmpSeg.setTail( tripleVec[idx].first ); 
            segLinkVec.push_back(SegmentLink(tmpSeg, tripleVec[idx-1].second, tripleVec[idx].second));
            segLinkVec[tripleVec[idx].second].headId_ = segLinkVec.size()-1;
            segLinkVec[tripleVec[idx-1].second].tailId_ = segLinkVec.size()-1;
        }           
    }

    for(size_t idx = 0; idx < segLinkVec.size(); ++idx )
    {
        printf("segmentLink:[%u] (%d,%d)-(%d,%d) head:%u tail:%u\n", idx,
                segLinkVec[idx].seg_.getHead().getX(),segLinkVec[idx].seg_.getHead().getY(),
                segLinkVec[idx].seg_.getTail().getX(),segLinkVec[idx].seg_.getTail().getY(),
                segLinkVec[idx].headId_, segLinkVec[idx].tailId_
              );
    }       
    contourSegVec.reserve(2*vertSegVec.size());
    for(size_t idx = 0; idx < segLinkVec.size(); ++idx )
    {
        if ( segLinkVec[idx].headId_ ==  segLinkVec[idx].tailId_ ) continue;
        
        size_t startIdx = idx;
        size_t currIdx  = startIdx;
        size_t prevIdx  = currIdx;
        do
        {
            contourSegVec.push_back(segLinkVec[currIdx].seg_);  
            prevIdx  = currIdx;
            currIdx = segLinkVec[currIdx].tailId_;
            segLinkVec[prevIdx].headId_ = 0;
            segLinkVec[prevIdx].tailId_ = 0;
        } while(startIdx != currIdx);
    }

    for (size_t idx = 0; idx < contourSegVec.size(); ++idx )
    {
        printf("CONTOUR: (%d,%d)-(%d,%d)\n", contourSegVec[idx].getHead().getX(),contourSegVec[idx].getHead().getY(),
                                             contourSegVec[idx].getTail().getX(),contourSegVec[idx].getTail().getY()
              );
    }
}
/*********************w is variable********************************/
Mat corrector::latitudeCorrection4(Mat imgOrg, Point2i center, int radius, double w_longtitude, double w_latitude, distMapMode distMap, double theta_left, double phi_up, double camerFieldAngle, camMode camProjMode)
{
	if (!(camerFieldAngle > 0 && camerFieldAngle <= PI))
	{
		cout << "The parameter \"camerFieldAngle\" must be in the interval (0,PI]." << endl;
		return Mat();
	}
	double rateOfWindow = 0.9;

	//int width = imgOrg.size().width*rateOfWindow;
	//int height = width;

	//int width = max(imgOrg.cols, imgOrg.rows);
	int width = 512;
	int height = width;
	//int height = imgOrg.rows;


	Size imgSize(width, height);
	int center_x = imgSize.width / 2;
	int center_y = imgSize.height / 2;

	Mat retImg(imgSize, CV_8UC3, Scalar(0, 0, 0));

	double dx = camerFieldAngle / imgSize.width;
	double dy = camerFieldAngle / imgSize.height;

	//coordinate for latitude map
	double latitude;
	double longitude;

	//unity sphere coordinate 
	double x, y, z, r;

	//parameter cooradinate of sphere coordinate
	double Theta_sphere;
	double Phi_sphere;

	//polar cooradinate for fish-eye Image
	double p;
	double theta;

	//cartesian coordinate 
	double x_cart, y_cart;

	//Image cooradinate of imgOrg
	double u, v;
	Point pt, pt1, pt2, pt3, pt4;

	//Image cooradinate of imgRet
	int u_latitude, v_latitude;
	Rect imgArea(0, 0, imgOrg.cols, imgOrg.rows);

	//offset of imgRet Origin
	double longitude_offset, latitude_offset;
	longitude_offset = (PI - camerFieldAngle) / 2;
	latitude_offset = (PI - camerFieldAngle) / 2;

	double foval = 0.0;//焦距


	cv::Mat_<Vec3b> _retImg = retImg;
	cv::Mat_<Vec3b> _imgOrg = imgOrg;

	//according to the camera type to do the calibration
	double  limi_latitude = 2 * auxFunc(w_latitude, 0);
	double  limi_longtitude = 2 * auxFunc(w_longtitude, 0);
	for (int j = 0; j < imgSize.height; j++)
	{

		for (int i = 0; i < imgSize.width; i++)
		{
			Point3f tmpPt(i - center_x, center_y - j, 600);//最后一个参数用来修改成像面的焦距
			double normPt = norm(tmpPt);

			switch (distMap)
			{
			case PERSPECTIVE:

				tmpPt.x /= normPt;
				tmpPt.y /= normPt;
				tmpPt.z /= normPt;

				x = tmpPt.x;
				y = tmpPt.y;
				z = tmpPt.z;

				break;
			case LATITUDE_LONGTITUDE:

				//latitude = latitude_offset + j*dy;

				latitude = getPhi1((double)j*limi_latitude / imgSize.height, w_latitude);
				//longitude = getPhi1((double)i * limi_longtitude / imgSize.width,w_longtitude);

				//latitude = latitude_offset + j*dy;
				longitude = longitude_offset + i*dx;
				//Convert from latitude cooradinate to the sphere cooradinate
				x = -sin(latitude)*cos(longitude);
				y = cos(latitude);
				z = sin(latitude)*sin(longitude);

				break;
			default:
				break;
			}

			if (distMap == PERSPECTIVE)
			{
				//double theta = PI/4;
				//double phi = -PI/2;
				cv::Mat curPt(cv::Point3f(x, y, z));
				std::vector<cv::Point3f> pts;

				//向东旋转地球
				//pts.push_back(cv::Point3f(cos(theta), 0, -sin(theta)));
				//pts.push_back(cv::Point3f(0, 1, 0));
				//pts.push_back(cv::Point3f(sin(theta), 0, cos(theta)));

				//向南旋转地球
				//pts.push_back(cv::Point3f(1, 0, 0));
				//pts.push_back(cv::Point3f(0, cos(phi), sin(phi)));
				//pts.push_back(cv::Point3f(0, -sin(phi), cos(phi)));

				//两个方向旋转
				pts.push_back(cv::Point3f(cos(theta_left), 0, sin(theta_left)));
				pts.push_back(cv::Point3f(sin(phi_up)*sin(theta_left), cos(phi_up), -sin(phi_up)*cos(theta_left)));
				pts.push_back(cv::Point3f(-cos(phi_up)*sin(theta_left), sin(phi_up), cos(phi_up)*cos(theta_left)));


				cv::Mat revert = cv::Mat(pts).reshape(1).t();

				cv::Mat changed(revert*curPt);

				cv::Mat_<double> changed_double;
				changed.convertTo(changed_double, CV_64F);

				x = changed_double.at<double>(0, 0);
				y = changed_double.at<double>(1, 0);
				z = changed_double.at<double>(2, 0);

				//std::cout << curPt << std::endl
				//	<<revert<<std::endl;
			}

			//Convert from unit sphere cooradinate to the parameter sphere cooradinate
			Theta_sphere = acos(z);
			Phi_sphere = cvFastArctan(y, x);//return value in Angle
			Phi_sphere = Phi_sphere*PI / 180;//Convert from Angle to Radian


			switch (camProjMode)
			{
			case STEREOGRAPHIC:
				foval = radius / (2 * tan(camerFieldAngle / 4));
				p = 2 * foval*tan(Theta_sphere / 2);
				break;
			case EQUIDISTANCE:
				foval = radius / (camerFieldAngle / 2);
				p = foval*Theta_sphere;
				break;
			case EQUISOLID:
				foval = radius / (2 * sin(camerFieldAngle / 4));
				p = 2 * foval*sin(Theta_sphere / 2);
				break;
			case ORTHOGONAL:
				foval = radius / sin(camerFieldAngle / 2);
				p = foval*sin(Theta_sphere);
				break;
			default:
				cout << "The camera mode hasn't been choose!" << endl;
			}
			//Convert from parameter sphere cooradinate to fish-eye polar cooradinate
			//p = sin(Theta_sphere);
			theta = Phi_sphere;

			//Convert from fish-eye polar cooradinate to cartesian cooradinate
			x_cart = p*cos(theta);
			y_cart = p*sin(theta);

			//double R = radius / sin(camerFieldAngle / 2);

			//Convert from cartesian cooradinate to image cooradinate
			u = x_cart + center.x;
			v = -y_cart + center.y;

			pt = Point(u, v);

			if (!pt.inside(imgArea))
			{
				continue;
			}
			else
			{
				_retImg.at<Vec3b>(j, i) = _imgOrg.at<Vec3b>(pt);
			}


		}
	}

	//imshow("org", _imgOrg);
	//imshow("ret", _retImg);
	//cv::waitKey();
#ifdef _DEBUG_
	cv::namedWindow("Corrected Image", CV_WINDOW_AUTOSIZE);
	imshow("Corrected Image", retImg);
	cv::waitKey();
#endif
	imwrite("ret.jpg", retImg);
	return retImg;
}