コード例 #1
0
ファイル: video.cpp プロジェクト: Dixit-Z/SmartTracking
void *opencv(void * args)
{
	/*
	   DEBUT TRAITEMENT OPENCV
	 */
	Mat imgHSV;

	cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

	inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprit dans notre intervalle pour la balle

	//Retire les petits parasite en fond
	erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
	dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

	dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
	erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

	int i, nlabels;
	Rect box;
	int maxArea=0;
	Mat labels;
	Mat centroids;
	Mat stats;
	
	//Calcul des différentes composantes connexes de l'image
	nlabels=connectedComponentsWithStats(imgDetection, labels, stats, centroids, 4, CV_32S);

	//On recherche la composante connexe la plus grande
	for(i=1; i<(int)nlabels;i++)
	{
		int *row = (int *) &stats.at<int>(i,0);
		//printf("i : %d, mon area %d vs %d max \n", i, row[CC_STAT_AREA], maxArea);
		if(row[CC_STAT_AREA]>maxArea)
		{
			box = Rect(row[CC_STAT_LEFT], row[CC_STAT_TOP], row[CC_STAT_WIDTH], row[CC_STAT_HEIGHT]);
			maxArea=row[CC_STAT_AREA];
		}
	}

	Moments position;
	//cout << maxArea << endl << (int)(Ball.lastdZone*0.3) << endl;
	//Si la composante connexe n'est pas assez grande ce n'est pas l'objet
	if(maxArea>200)//(int)(0.3*Ball.lastdZone))
	{
		Ball.setFoundCV(true);
		rectangle(imgOriginal, box, Scalar(0,255,0), 4, 8, 0);

		//Calcule l emplacement de l objet
		position = moments(imgDetection(box));

		double y = position.m01; //y
		double x = position.m10; //x
		double dZone = position.m00; //z
		//cout << "dZone " << dZone << endl << "LdZone " << Ball.lastdZone << endl;

		posX = x / dZone;
		posY = y / dZone;
		posX+=box.x;
		posY+=box.y;
		

		int posZ=0;
		if(dZone>Ball.lastdZone+Ball.lastdZone*0.2)
		{
			posZ=-1; //Trop près de l'objet, il faut reculer.
		}
		else if(dZone > Ball.lastdZone-Ball.lastdZone*0.2 && dZone < Ball.lastdZone+Ball.lastdZone*0.2)
		{
			 posZ=0; //On est à distance correcte de l'objet
		}
		else
		{
			posZ=1; //Trop loin de l'objet, il faut avancer.
		}
		 Ball.setCurrentCV((float)posX/fSize.width*100,(float)posY/fSize.height*100, (float)posZ);
	}
	else
	{
		if(activate) {//On passe ici quand la zone détectée est trop petite ou que l'on en détecte pas.
			//AtCmd::sendMovement(0, 0, 0, 0, 0); // CHANGE
		}
		Ball.setFoundCV(false);
	}

	/*
	   FIN TRAITEMENT OPENCV
	 */
	return NULL;
}
コード例 #2
0
ファイル: Draw Line 3.c プロジェクト: FSXAC/APSC-160-Projects
// main function
int main(void) {
	// coord of endpoints of line
	int x1 = -2, y1 = -2, x2, y2;
	char **grid;
	int isDone = 0;

	// set up dynamic memory 2D array
	grid = malloc(MAX_ROW * sizeof(char*));
	for (int row = 0; row < MAX_ROW; row++) {
		grid[row] = malloc(MAX_COL * sizeof(char));

		// add blank chars to each element
		for (int col = 0; col < MAX_COL; col++) {
			grid[row][col] = BLANK;
		}
	}

	while (not isDone) {
		// ask the user for width and height
		x2 = getint("Enter X: ");
		y2 = getint("Enter Y: ");

		// check if point is on grid
		if (inRange(x2, 0, MAX_COL) and inRange(y2, 0, MAX_ROW)) {

			// add the end points to the array
			grid[y2][x2] = DOT;

			if (x1 != -2 and y1 != -2) {
				// draw line between the two points
				drawLine(x1, y1, x2, y2, grid);
			}
			
			// add current point to previous point
			x1 = x2;
			y1 = y2;

			// display the grid
			displayGrid(grid);
		}
		else if (x2 == -1 or y2 == -1) {
			// quit
			isDone = 1;
		}
		else {
			// invalid input
			printf("XY coord not in range.\n");
		}
	}

	// end of program
	// free memory
	for (int i = 0; i < MAX_ROW; i++) {
		free(grid[i]);
	}
	free(grid);

	// end
	pause;
	return 0;
}
コード例 #3
0
void cPlayer::mount( P_NPC pMount )
{
	if ( !pMount )
		return;

	cUOSocket* socket = this->socket();
	if ( !inRange( pMount, 2 ) && !isGM() )
	{
		if ( socket )
			socket->sysMessage( tr( "You are too far away to mount!" ) );
		return;
	}

	if ( pMount->owner() == this || isGM() )
	{
		unmount();

		P_ITEM pMountItem = new cItem;
		pMountItem->Init();
		pMountItem->setId( 0x915 );
		pMountItem->setColor( pMount->skin() );

		switch ( static_cast<unsigned short>( pMount->body() & 0x00FF ) )
		{
		case 0xC8:
			pMountItem->setId( 0x3E9F ); break; // Horse
		case 0xE2:
			pMountItem->setId( 0x3EA0 ); break; // Horse
		case 0xE4:
			pMountItem->setId( 0x3EA1 ); break; // Horse
		case 0xCC:
			pMountItem->setId( 0x3EA2 ); break; // Horse
		case 0xD2:
			pMountItem->setId( 0x3EA3 ); break; // Desert Ostard
		case 0xDA:
			pMountItem->setId( 0x3EA4 ); break; // Frenzied Ostard
		case 0xDB:
			pMountItem->setId( 0x3EA5 ); break; // Forest Ostard
		case 0xDC:
			pMountItem->setId( 0x3EA6 ); break; // LLama
		case 0x34:
			pMountItem->setId( 0x3E9F ); break; // Brown Horse
		case 0x4E:
			pMountItem->setId( 0x3EA0 ); break; // Grey Horse
		case 0x50:
			pMountItem->setId( 0x3EA1 ); break; // Tan Horse
		case 0x74:
			pMountItem->setId( 0x3EB5 ); break; // Nightmare
		case 0x75:
			pMountItem->setId( 0x3EA8 ); break; // Silver Steed
		case 0x72:
			pMountItem->setId( 0x3EA9 ); break; // Dark Steed
		case 0x7A:
			pMountItem->setId( 0x3EB4 ); break; // Unicorn
		case 0x84:
			pMountItem->setId( 0x3EAD ); break; // Kirin
		case 0x73:
			pMountItem->setId( 0x3EAA ); break; // Etheral
		case 0x76:
			pMountItem->setId( 0x3EB2 ); break; // War Horse-Brit
		case 0x77:
			pMountItem->setId( 0x3EB1 ); break; // War Horse-Mage Council
		case 0x78:
			pMountItem->setId( 0x3EAF ); break; // War Horse-Minax
		case 0x79:
			pMountItem->setId( 0x3EB0 ); break; // War Horse-Shadowlord
		case 0xAA:
			pMountItem->setId( 0x3EAB ); break; // Etheral LLama
		case 0x3A:
			pMountItem->setId( 0x3EA4 ); break; // Forest Ostard
		case 0x39:
			pMountItem->setId( 0x3EA3 ); break; // Desert Ostard
		case 0x3B:
			pMountItem->setId( 0x3EA5 ); break; // Frenzied Ostard
		case 0x90:
			pMountItem->setId( 0x3EB3 ); break; // Seahorse
		case 0xAB:
			pMountItem->setId( 0x3EAC ); break; // Etheral Ostard
		case 0xBB:
			pMountItem->setId( 0x3EB8 ); break; // Ridgeback
		case 0x17:
			pMountItem->setId( 0x3EBC ); break; // giant beetle
		case 0x19:
			pMountItem->setId( 0x3EBB ); break; // skeletal mount
		case 0x1a:
			pMountItem->setId( 0x3EBD ); break; // swamp dragon
		case 0x1f:
			pMountItem->setId( 0x3EBE ); break; // armor dragon
		}

		this->addItem( cBaseChar::Mount, pMountItem );
		pMountItem->setTag( "pet", cVariant( pMount->serial() ) );
		pMountItem->update();

		// if this is a gm lets tame the animal in the process
		if ( isGM() )
		{
			pMount->setOwner( this );
		}

		// remove it from screen!
		pMount->bark( Bark_Idle );
		pMount->removeFromView( false );
		pMount->fight( 0 );
		pMount->setStablemasterSerial( serial_ );
	}
	else
		socket->sysMessage( tr( "You dont own that creature." ) );
}
コード例 #4
0
ファイル: test.cpp プロジェクト: annethf/Marginalia
SpreadsheetCell Spreadsheet::getCellAt(int x, int y)
{
	if(!inRange(x, mWidth) || !inRange(y, mHeight))
		throw std::out_of_range("");
	return mCells[x][y];
}
コード例 #5
0
void camera_feed()
{
	VideoCapture cap(0);
	if (cap.isOpened())
	{
		int distance[3], MUL = 1, dif = 0;
		char key;
		bool first_run = false, is_size_checked = false, moved = false, shoot = false;
		unsigned long max_contours_amount = 0;
		Point drawing_point, cursor, additional_point;
		vector<vector<Point>> contours, main_points;
		vector<Point> pen1, pen2, pens;
		vector<Vec4i> hierarchy;
		Mat frame, real_pic, drawing_frame, maze;
		Scalar low_boundry(45, 107, 52), high_boundry(86, 227, 160), color(100, 100, 100);
		//namedWindow("drawing_frame", 1);
		//namedWindow("frame", 1);
		cap >> frame;
		cursor = Point(20, 20);
		maze = imread("maze1.jpg");
		maze = maze / WHITE;
		maze = maze * WHITE;
		bitwise_not(maze, maze);
		
		

		RECT rect = { 0 }; // gaming stuff!
		HWND window = FindWindow("Chicken Invaders 5", "Chicken Invaders 5");
		Sleep(2000);
		if (window)
		{
			GetClientRect(window, &rect);
			SetForegroundWindow(window);
			SetActiveWindow(window);
			SetFocus(window);
		}

		while (true)
		{
			shoot = false;
			cap >> frame;
			real_pic = frame.clone();
			while (main_points.size() != 0)
			{
				main_points.pop_back();
			}
			if (!first_run)
			{
				drawing_frame = real_pic.clone();
				resize(drawing_frame, drawing_frame, Size(GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN) - 50));
				resize(maze, maze, Size(GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN) - 50));
				first_run = true;
			}
			flip(real_pic, real_pic, 1);

			cvtColor(frame, frame, COLOR_BGR2HSV);
			
			inRange(frame, low_boundry, high_boundry, frame);
			flip(frame, frame, 1);

			contours.clear();
			resize(frame, frame, Size(GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN)));
			findContours(frame, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
			is_size_checked = false;
			if (contours.size() != 0)
			{
				for (vector<vector<Point>>::iterator it = contours.begin(); it != contours.end(); it++)
				{
					if (it->size() > max_contours_amount * 0.7)
					{
						main_points.push_back(*it);
						max_contours_amount = it->size();
						is_size_checked = true;
					}
				}
			}
			if (is_size_checked)
			{
				moved = false;
				drawing_point = stabilized_point(main_points[0]);
				if (main_points.size() == 2)
				{
					if (stabilized_point(main_points[0]).x < stabilized_point(main_points[1]).x)
					{
						drawing_point = stabilized_point(main_points[1]);
						
					}
					shoot = true;
				}
				drawing_point.x += (drawing_point.x - drawing_frame.size().width / 2) / 10;
				drawing_point.y += (drawing_point.y - drawing_frame.size().height / 2) / 10;
				while (drawing_point.x > maze.size().width)
				{
					drawing_point.x--;
				}
				while (drawing_point.x < 0)
				{
					drawing_point.x++;

				}
				while (drawing_point.y > maze.size().height)
				{
					drawing_point.y--;
				}
				while (drawing_point.y < 0)
				{
					drawing_point.y++;
				}

				distance[0] = drawing_point.x - cursor.x;
				distance[1] = drawing_point.y - cursor.y;
				while (distance[0] != 0 && distance[1] != 0)
				{
					if (maze.at<Vec3b>(Point(cursor.x + distance[0] / 15, cursor.y))[0] != WHITE)
					{
						cursor.x += distance[0] / 15;
						distance[0] /= 15;
						moved = true;
					}
					if (maze.at<Vec3b>(Point(cursor.x, cursor.y + distance[1] / 15))[0] != WHITE)
					{
						cursor.y += distance[1] / 15;
						distance[1] /= 15;
						moved = true;
					}				
					if (!moved)
					{
						putText(drawing_frame, "Struck a wall!", Point(0, 40), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(WHITE, WHITE, BLACK, 1), 1, CV_AA);
						distance[0] = 0;
						distance[1] = 0;
					}
					
				}
				SetCursorPos(drawing_point.x, drawing_point.y); // gaming stuff!
				circle(drawing_frame, cursor, 13, Scalar(WHITE, BLACK, WHITE), 2);
				circle(drawing_frame, drawing_point, 13, Scalar(WHITE, BLACK, WHITE), -1);
				//circle(drawing_frame, stabilized_point(pen1), 13, Scalar(WHITE, WHITE, BLACK), -1);
			}
			else
			{
				putText(drawing_frame, "Lost drawing object!", Point(0, 20), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(WHITE, WHITE, BLACK, 1), 1, CV_AA);
				circle(drawing_frame, cursor, 13, Scalar(WHITE, WHITE, BLACK), 3);
			}
			if (shoot)
			{
				LeftClick(drawing_point.x, drawing_point.y);
			}
			key = waitKey(10);

			drawing_frame = maze + drawing_frame;
			bitwise_not(drawing_frame, drawing_frame);
			//imshow("drawing_frame", drawing_frame);
			//imshow("frame", frame);

			frame = BLACK;
			drawing_frame = BLACK;
			real_pic = BLACK;

		}
	}
コード例 #6
0
int main(int argc, char** argv) {
	if (argc != 2) {
		cout << "No image" << endl;
		return -1;
	}
	cout << "Loading Image: ";
	cout << argv[1] << endl;
	Mat inputImage = imread(argv[1], CV_LOAD_IMAGE_COLOR);

	if (!inputImage.data) {
		cout << "Invalid Image" << endl;
	}
	Mat finalImage;
	Mat * ptr = NULL;
	pthread_t intensityThread, colorThread;
for(int counter = 0; counter <1; counter++)	
{

	long totaltime = timestamp();
	IntensityImg = Get_Intensity_Image(inputImage);
	
	//long intTime = timestamp();
	//IntensityImage_GPU = Get_Intensity_Image_GPU(inputImage);
	pthread_create(&intensityThread, NULL, intensity_processing, (void *) ptr);
	//pthread_create(&intensityThread, NULL, intensity_processing, (void *) ptrGPU);
	//pthread_join(intensityThread, NULL);
	//long intFinal = timestamp() - intTime;

	double maxInt;

	minMaxLoc(IntensityImg, NULL, &maxInt, NULL, NULL);
        //Normalize all color channels
        int i = 0, j = 0;
        Vec3b intensity = inputImage.at<Vec3b>(i,j);

	for(i=0; i<inputImage.rows; i++)//row
	{
		for(j=0; j<inputImage.cols; j++)//
		{
			if(inputImage.at<uchar>(i, j) >= 0.1 * maxInt)
			{
				intensity.val[0] = (intensity.val[0] * 255)/maxInt;//b
				intensity.val[1] = (intensity.val[1] * 255)/maxInt;//g
				intensity.val[2] = (intensity.val[2] * 255)/maxInt;//r	
				
			}
		}
	}


	ptr = &inputImage;
	//long colTime = timestamp();
	pthread_create(&colorThread, NULL, color_processing, (void *) ptr);
	//pthread_join(colorThread, NULL);
	//long colFinal = timestamp() - colTime;
	//cout << "Color Map Time: " << colFinal << "\n";
	
	//long orTime = timestamp();
         //Mat AggOr;
	 //Mat AggOr = getGaborImage(IntensityImg);
         Mat AggOr = getGaborImage();
	 normalize(AggOr, AggOr, 0, 255, NORM_MINMAX, -1);
	//long orFinal = timestamp() - orTime;
	//cout << "Orientation Map Time: " << orFinal << "\n";

        pthread_join(intensityThread, NULL);
        pthread_join(colorThread, NULL);

	//gpu::GpuMat temp = AggIntGPU;
	
	finalImage = (AggInt + AggColor + AggOr) / 3;
	normalize(finalImage, finalImage, 0, 255, NORM_MINMAX, -1);

	for (int bCtr = 0; bCtr < 4; bCtr++) {
		pyrUp(finalImage, finalImage);
	}
	
	long finaltime = timestamp() - totaltime;
	//cout << "Intensity Map Time: " << intFinal << "\n";
	//cout << "Color Map Time: " << colFinal << "\n";
	cout << "Total Time: " << finaltime << "\n";
}
	Mat contImg;
	inRange(finalImage, 160, 230, contImg);
	vector < vector<Point> > contours;
	vector < Vec4i > hierarchy;

	findContours(contImg, contours, hierarchy, CV_RETR_CCOMP,
			CV_CHAIN_APPROX_SIMPLE);
	for (int i = 0; i >= 0; i = hierarchy[i][0]) {
		Scalar color(rand() & 255, rand() & 255, rand() & 255);
		drawContours(inputImage, contours, i, color, 3, 8, hierarchy);
	}

	imwrite("Salient_Image.jpg", inputImage);

	waitKey(0);
	return 0;
}
コード例 #7
0
ファイル: plurfmt.cpp プロジェクト: venkatarajasekhar/Qt
void
PluralFormat::applyPattern(const UnicodeString& newPattern, UErrorCode& status) {
    if (U_FAILURE(status)) {
        return;
    }
    this->pattern = newPattern;
    UnicodeString token;
    int32_t braceCount=0;
    fmtToken type;
    UBool spaceIncluded=FALSE;
    
    if (fParsedValuesHash==NULL) {
        fParsedValuesHash = new Hashtable(TRUE, status);
        if (U_FAILURE(status)) {
            return;
        }
        fParsedValuesHash->setValueDeleter(deleteHashStrings);
    }
    
    UBool getKeyword=TRUE;
    UnicodeString hashKeyword;
    UnicodeString *hashPattern;
    
    for (int32_t i=0; i<pattern.length(); ++i) {
        UChar ch=pattern.charAt(i);

        if ( !inRange(ch, type) ) {
            if (getKeyword) {
                status = U_ILLEGAL_CHARACTER;
                return;
            }
            else {
                token += ch;
                continue;
            }
        }
        switch (type) {
            case tSpace:
                if (token.length()==0) {
                    continue;
                }
                if (getKeyword) {
                    // space after keyword
                    spaceIncluded = TRUE;
                }
                else {
                    token += ch;
                }
                break;
            case tLeftBrace:
                if ( getKeyword ) {
                    if (fParsedValuesHash->get(token)!= NULL) {
                        status = U_DUPLICATE_KEYWORD;
                        return; 
                    }
                    if (token.length()==0) {
                        status = U_PATTERN_SYNTAX_ERROR;
                        return;
                    }
                    if (!pluralRules->isKeyword(token)) {
                        status = U_UNDEFINED_KEYWORD;
                        return;
                    }
                    hashKeyword = token;
                    getKeyword = FALSE;
                    token.remove();
                }
                else  {
                    if (braceCount==0) {
                        status = U_UNEXPECTED_TOKEN;
                        return;
                    }
                    else {
                        token += ch;
                    }
                }
                braceCount++;
                spaceIncluded = FALSE;
                break;
            case tRightBrace:
                if ( getKeyword ) {
                    status = U_UNEXPECTED_TOKEN;
                    return;
                }
                else  {
                    hashPattern = new UnicodeString(token);
                    fParsedValuesHash->put(hashKeyword, hashPattern, status);
                    if (U_FAILURE(status)) {
                        return;
                    }
                    braceCount--;
                    if ( braceCount==0 ) {
                        getKeyword=TRUE;
                        hashKeyword.remove();
                        hashPattern=NULL;
                        token.remove();
                    }
                    else {
                        token += ch;
                    }
                }
                spaceIncluded = FALSE;
                break;
            case tLetter:
            case tNumberSign:
                if (spaceIncluded) {
                    status = U_PATTERN_SYNTAX_ERROR;
                    return;
                }
            default:
                token+=ch;
                break;
        }
    }
    if ( checkSufficientDefinition() ) {
        return;
    }
    else {
        status = U_DEFAULT_KEYWORD_MISSING;
        return;
    }
}
コード例 #8
0
ファイル: Visao.cpp プロジェクト: oscar-neiva/vision-tracking
void Visao::tratarImagemComCor(Mat imagem, Cor cor){
    inRange(imagemHSV, Scalar(cor.corH-variacaoH, cor.corS*(1-variacao), cor.corV*(1-variacao)),
     Scalar(cor.corH+variacaoH, cor.corS*(1+variacao), cor.corV*(1+variacao)), imagemTratada);
}
コード例 #9
0
ファイル: 3Dscan.cpp プロジェクト: imclab/OpenC3DS
//--------------------------------------------------------------
void scan(Cam *cam, ofImage *grislaser, ofImage *TaL, ofImage *TsL){

    Mat image1;
    Mat Laser1;
    Mat Tot, gris, grisc;

    Mat HSV;
    Mat threshold1;

//    camera(cam);

    int valueRL = 60;
    int valueGL = 0;
    int valueBL = 0;

    Mat tt1, tt2, tt3, colo;

    Mat matImg;

    cv::Point punt;

    tt1 = toCv(*TaL).clone();
    Laser1 = tt1.clone();

    tt2 = toCv(*TsL).clone();
    Tot = tt2.clone();
    Mat th1, th2;
    Mat image2;

    absdiff(Laser1, Tot, image1);
    cvtColor(image1, HSV, CV_BGR2HSV);
    inRange(HSV, Scalar(cam->Bi, cam->Gi, cam->Ri), Scalar(cam->Bs, cam->Gs, cam->Rs), threshold1);
    th1 = threshold1.clone();
    image2 = image1.clone();
    GaussianBlur(threshold1, th1, cv::Size(1,1), 0,0);
    GaussianBlur(image2, image1, cv::Size(cam->blur_ksizew, cam->blur_ksizeh), cam->blur_sigmax, cam->blur_sigmay);
    cam_cap_subpixel(cam, image1, threshold1);

    cvtColor(image1, gris, CV_BGR2GRAY);
    cvtColor(gris, grisc, CV_GRAY2BGR);

    for(int i=0; i<cam->resy; i++){
        cv::Point paux1;
        paux1.x = (int)cam->p[i].x;
        paux1.y = (int)cam->p[i].y;

        line(grisc, paux1, paux1, Scalar(255,0,0), 1,8,0);
    }

    ofImage gl,L1,Tt;

    toOf(grisc, gl);
    gl.update();

    *grislaser = gl;

    toOf(Laser1, L1);
    L1.update();

    *TaL = L1;

    toOf(Tot, Tt);
    Tt.update();

    *TsL = Tt;
}
コード例 #10
0
    qreal static inline singleRealSolutionForCubic(qreal a, qreal b, qreal c)
    {
        //returns the real solutiuon in [0..1]
        //We use the Cardano formula

        //substituiton: x = z - a/3
        // z^3+pz+q=0

        if (c < 0.000001 && c > -0.000001)
            return 0;

        const qreal a_by3 = a / 3.0;

        const qreal a_cubic = a * a * a;

        const qreal p = b - a * a_by3;
        const qreal q = 2.0 * a_cubic / 27.0 - a * b / 3.0 + c;

        const qreal q_squared = q * q;
        const qreal p_cubic = p * p * p;
        const qreal D = 0.25 * q_squared + p_cubic / 27.0;

        if (D >= 0) {
            const qreal D_sqrt = qSqrt(D);
            qreal u = _cbrt( -q * 0.5 + D_sqrt);
            qreal v = _cbrt( -q * 0.5 - D_sqrt);
            qreal z1 = u + v;

            qreal t1 = z1 - a_by3;

            if (inRange(t1))
                return t1;
            qreal z2 = -1 *u;
            qreal t2 = z2 - a_by3;
            return t2;
        }

        //casus irreducibilis
        const qreal p_minus_sqrt = qSqrt(-p);

        //const qreal f = sqrt(4.0 / 3.0 * -p);
        const qreal f = qSqrt(4.0 / 3.0) * p_minus_sqrt;

        //const qreal sqrtP = sqrt(27.0 / -p_cubic);
        const qreal sqrtP = -3.0*qSqrt(3.0) / (p_minus_sqrt * p);


        const qreal g = -q * 0.5 * sqrtP;

        qreal s1;
        qreal s2;
        qreal s3;

        cosacos(g, s1, s2, s3);

        qreal z1 = -1* f * s2;
        qreal t1 = z1 - a_by3;
        if (inRange(t1))
            return t1;

        qreal z2 = f * s1;
        qreal t2 = z2 - a_by3;
        if (inRange(t2))
            return t2;

        qreal z3 = -1 * f * s3;
        qreal t3 = z3 - a_by3;
        return t3;
    }
コード例 #11
0
bool rice::p2p::util::RedBlackMap_SubWrappedMap::containsKey(::java::lang::Object* key)
{
    return inRange(key) && RedBlackMap_this->containsKey(key);
}
int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;

	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat HSV;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	waitKey(1000);
	while(1){
		//store image to matrix
		capture.read(cameraFeed);

		src = cameraFeed;

  		if( !src.data )
  		{ return -1; }

		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){

		//need to find the appropriate color range values
		// calibrationMode must be false

		//if in calibration mode, we track objects based on the HSV slider values.
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
			morphOps(threshold);
			imshow(windowName2,threshold);

		//the folowing for canny edge detec
			/// Create a matrix of the same type and size as src (for dst)
	  		dst.create( src.size(), src.type() );
	  		/// Convert the image to grayscale
	  		cvtColor( src, src_gray, CV_BGR2GRAY );
	  		/// Create a window
	  		namedWindow( window_name, CV_WINDOW_AUTOSIZE );
	  		/// Create a Trackbar for user to enter threshold
	  		createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold);
	  		/// Show the image
			trackFilteredObject(threshold,HSV,cameraFeed);
		}
		else{
			//create some temp fruit objects so that
			//we can use their member functions/information
			Object blue("blue"), yellow("yellow"), red("red"), green("green");

			//first find blue objects
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(blue,threshold,HSV,cameraFeed);
			//then yellows
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(yellow,threshold,HSV,cameraFeed);
			//then reds
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(red,threshold,HSV,cameraFeed);
			//then greens
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,green.getHSVmin(),green.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(green,threshold,HSV,cameraFeed);

		}
		//show frames
		//imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		//imshow(windowName1,HSV);

		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}
	return 0;
}
コード例 #13
0
void sprites::importSpriteSheet(const char*fname){
	if(!fname){
		if(!load_file_generic("Load image"))
			return;
		fname=the_file.c_str();
	}
	if(fname){
		Fl_Shared_Image * loaded_image=Fl_Shared_Image::get(fname);
		if(!loaded_image){
			fl_alert("Error loading image");
			return;
		}
		unsigned depth=loaded_image->d();
		if (unlikely(depth != 3 && depth != 4 && depth!=1)){
			fl_alert("Please use color depth of 1,3 or 4\nYou Used %d",depth);
			loaded_image->release();
			return;
		}else
			printf("Image depth %d\n",depth);
		uint32_t w,h;
		w=loaded_image->w();
		h=loaded_image->h();
		bool grayscale;
		uint8_t*palMap;
		uint8_t*imgptr;
		unsigned remap[256];
		if(depth==1){
			grayscale=handle1byteImg(loaded_image,remap);
			if(!grayscale){
				palMap=(uint8_t*)loaded_image->data()[1];
				imgptr=(uint8_t*)loaded_image->data()[2];
			}
		}
		uint8_t mask[3];
		bool useAlpha;
		if(getMaskColorImg(loaded_image,grayscale,remap,palMap,mask,useAlpha)){
			std::vector<int> rects;//x0,x1,y0,y1
			Fl_Window *winP;
			Fl_Progress *progress;
			mkProgress(&winP,&progress);
			time_t lasttime=time(NULL);
			progress->maximum(h);
			Fl::check();
			for(int y=0;y<h;++y){
				for(int x=0;x<w;++x){
					if(!isMask(x,y,loaded_image,grayscale,useAlpha,mask)){
						rects.push_back(x);
						while(!isMask(x+1,y,loaded_image,grayscale,useAlpha,mask))
							++x;
						rects.push_back(x);
						rects.push_back(y);
						rects.push_back(y);
					}
				}
				if((time(NULL)-lasttime)>=1){
					lasttime=time(NULL);
					progress->value(h);
					Fl::check();
				}
			}
			progress->maximum(rects.size());
			progress->value(0);
			//Now combine the rectangles
			//Start by combining rectangles by that touch with y values
			bool canEnd;
			int pass=0;
			char txtbufstage[1024];
			char txtbuf[1024];
			do{
			canEnd=true;
			snprintf(txtbufstage,1024,"Stage 1 pass %d",pass++);
			winP->label(txtbufstage);
			Fl::check();
			for(int i=0;i<rects.size();i+=4){
				for(int j=0;j<rects.size();j+=4){
					if(i==j)
						continue;
					//See if rectangles are touching or overlap
					//if((inRange(rects[j+2],rects[i+2]-1,rects[i+3]+1)||inRange(rects[i+2],rects[j+2]-1,rects[j+3]+1))&&(!((rects[i+2]==rects[j+2])||(rects[i+3]==rects[j+3])))){//Is rectange j directly above or below i
					if((rects[j+3]-rects[i+2])==1){
						if((inRange(rects[j],rects[i]-1,rects[i+1]+1)||inRange(rects[i],rects[j]-1,rects[j+1]+1))){
							canEnd=false;
							//Merge the two squares obtaining maximum size
							//Now try and find the combination that results in the largest rectangle
							rects[i]=std::min(rects[i],rects[j]);
							rects[i+1]=std::max(rects[i+1],rects[j+1]);
							rects[i+2]=std::min(rects[i+2],rects[j+2]);
							rects[i+3]=std::max(rects[i+3],rects[j+3]);
							rects.erase(rects.begin()+j,rects.begin()+j+4);
							//Now try to find next in sequence
							bool foundit;
							do{
								foundit=false;
								for(int a=0;a<rects.size();a+=4){
									int look=rects[i+3]+1;
									if(rects[a+2]==look){
										if((inRange(rects[a],rects[i]-1,rects[i+1]+1)||inRange(rects[i],rects[a]-1,rects[a+1]+1))){
											foundit=true;
											rects[i]=std::min(rects[i],rects[a]);
											rects[i+1]=std::max(rects[i+1],rects[a+1]);
											rects[i+2]=std::min(rects[i+2],rects[a+2]);
											rects[i+3]=std::max(rects[i+3],rects[a+3]);
											rects.erase(rects.begin()+a,rects.begin()+a+4);
										}
									}
								}
							}while(foundit);
						}
					}
				}
				if((time(NULL)-lasttime)>=1){
					lasttime=time(NULL);
					progress->maximum(rects.size());
					progress->value(i);
					snprintf(txtbuf,1024,"Rectangles: %d",rects.size());
					progress->label(txtbuf);
					Fl::check();
				}
			}
			}while(!canEnd);
			pass=0;
			do{
				canEnd=true;
				snprintf(txtbufstage,1024,"Stage 2 pass %d",pass++);
				winP->label(txtbufstage);
				progress->maximum(rects.size());
				progress->value(0);
				Fl::check();
				for(int i=0;i<rects.size();i+=4){
					for(int j=0;j<rects.size();j+=4){
						if(i==j)
							continue;
						//Merge overlapping rectangles
						if((rects[i]<=rects[j+1])&&(rects[i+1]>=rects[j])&&(rects[i+2]<=rects[j+3])&&(rects[i+3]>=rects[j+2])){
							canEnd=false;
							rects[i]=std::min(rects[i],rects[j]);
							rects[i+1]=std::max(rects[i+1],rects[j+1]);
							rects[i+2]=std::min(rects[i+2],rects[j+2]);
							rects[i+3]=std::max(rects[i+3],rects[j+3]);
							rects.erase(rects.begin()+j,rects.begin()+j+4);
						}
						//Merge touching rectangles
						if(abs(rects[i+1]-rects[j])==1){
							if((inRange(rects[j+2],rects[i+2]-1,rects[i+3]+1)||inRange(rects[i+2],rects[j+2]-1,rects[j+3]+1))){
								canEnd=false;
								rects[i]=std::min(rects[i],rects[j]);
								rects[i+1]=std::max(rects[i+1],rects[j+1]);
								rects[i+2]=std::min(rects[i+2],rects[j+2]);
								rects[i+3]=std::max(rects[i+3],rects[j+3]);
								rects.erase(rects.begin()+j,rects.begin()+j+4);
							}
						}
					}
					if((time(NULL)-lasttime)>=1){
						lasttime=time(NULL);
						progress->maximum(rects.size());
						progress->value(i);
						snprintf(txtbuf,1024,"Rectangles: %d",rects.size());
						progress->label(txtbuf);
						Fl::check();
					}
				}
			}while(!canEnd);
			winP->remove(progress);// remove progress bar from window
			delete(progress);// deallocate it
			delete winP;
			std::vector<bool> deleted;
			deleted.resize(rects.size()/4);
			//Now show the window allowing user to adjust sprite settings
			if(window){
				win=new Fl_Double_Window(640,480,"Sprite selection");
				win->begin();
				win->resizable(win);
				Fl_Button * Ok=new Fl_Button(256,448,64,24,"Okay");
				Ok->callback(RetCB,(void*)1);
				Fl_Button * Cancel=new Fl_Button(320,448,64,24,"Cancel");
				Cancel->callback(RetCB,0);
				Fl_Scroll*scroll=new Fl_Scroll(8,8,624,440);
				box=new RectBox(8,8,w,h);
				box->scroll=scroll;
				box->rects=&rects;
				box->deleted=&deleted;
				box->image(loaded_image);
				scroll->end();
				win->end();
				win->set_modal();
				win->show();
				Fl::check();

				while(win->shown())
					Fl::wait();
				delete win;
			}else
				retOkay=true;
			if(retOkay){
				for(unsigned i=0;i<rects.size();i+=4){
					recttoSprite(rects[i],rects[i+1],rects[i+2],rects[i+3],-1,loaded_image,grayscale,remap,palMap,mask,true,useAlpha);
				}
				updateTileSelectAmt();
			}
			deleted.clear();
			rects.clear();
		}
		loaded_image->release();
	}
}
コード例 #14
0
ファイル: PathFinder.cpp プロジェクト: cmangos/mangos-classic
void PathFinder::BuildPointPath(const float* startPoint, const float* endPoint)
{
    float pathPoints[MAX_POINT_PATH_LENGTH * VERTEX_SIZE];
    uint32 pointCount = 0;
    dtStatus dtResult;

    if (m_useStraightPath)
    {
        dtResult = m_navMeshQuery->findStraightPath(
                       startPoint,         // start position
                       endPoint,           // end position
                       m_pathPolyRefs,     // current path
                       m_polyLength,       // lenth of current path
                       pathPoints,         // [out] path corner points
                       nullptr,               // [out] flags
                       nullptr,               // [out] shortened path
                       (int*)&pointCount,
                       m_pointPathLimit);   // maximum number of points/polygons to use
    }
    else
    {
        dtResult = findSmoothPath(
                       startPoint,         // start position
                       endPoint,           // end position
                       m_pathPolyRefs,     // current path
                       m_polyLength,       // length of current path
                       pathPoints,         // [out] path corner points
                       (int*)&pointCount,
                       m_pointPathLimit);    // maximum number of points
    }

    if (pointCount < 2 || dtStatusFailed(dtResult))
    {
        // only happens if pass bad data to findStraightPath or navmesh is broken
        // single point paths can be generated here
        // TODO : check the exact cases
        DEBUG_FILTER_LOG(LOG_FILTER_PATHFINDING, "++ PathFinder::BuildPointPath FAILED! path sized %d returned\n", pointCount);
        BuildShortcut();
        m_type = PATHFIND_NOPATH;
        return;
    }

    if (pointCount == m_pointPathLimit)
    {
        DEBUG_FILTER_LOG(LOG_FILTER_PATHFINDING, "++ PathFinder::BuildPointPath FAILED! path sized %d returned, lower than limit set to %d\n", pointCount, m_pointPathLimit);
        BuildShortcut();
        m_type = PATHFIND_SHORT;
        return;
    }

    if (pointCount > 2 && sWorld.getConfig(CONFIG_BOOL_PATH_FIND_OPTIMIZE))
    {
        uint32 tempPointCounter = 2;

        PointsArray tempPathPoints;
        tempPathPoints.resize(pointCount);

        for (uint32 i = 0; i < pointCount; ++i)      // y, z, x  expected here
        {
            uint32 pointPos = i * VERTEX_SIZE;
            tempPathPoints[i] = Vector3(pathPoints[pointPos + 2], pathPoints[pointPos], pathPoints[pointPos + 1]);
        }

        // Optimize points
        Vector3 emptyVec = { 0.0f, 0.0f, 0.0f };

        uint8 cutLimit = 0;
        for (uint32 i = 1; i < pointCount - 1; ++i)
        {
            G3D::Vector3 p  = tempPathPoints[i];     // Point
            G3D::Vector3 p1 = tempPathPoints[i - 1]; // PrevPoint
            G3D::Vector3 p2 = tempPathPoints[i + 1]; // NextPoint

            float lineLen = (p1.y - p2.y) * p.x + (p2.x - p1.x) * p.y + (p1.x * p2.y - p2.x * p1.y);

            if (fabs(lineLen) < LINE_FAULT && cutLimit < SKIP_POINT_LIMIT)
            {
                tempPathPoints[i] = emptyVec;
                cutLimit++;
            }
            else
            {
                tempPointCounter++;
                cutLimit = 0;
            }
        }

        m_pathPoints.resize(tempPointCounter);

        uint32 pointPos = 0;
        for (uint32 i = 0; i < pointCount; ++i)
        {
            if (tempPathPoints[i] != emptyVec)
            {
                m_pathPoints[pointPos] = tempPathPoints[i];
                pointPos++;
            }
        }

        pointCount = tempPointCounter;
    }
    else
    {
        m_pathPoints.resize(pointCount);
        for (uint32 i = 0; i < pointCount; ++i)
        {
            uint32 pointPos = i * VERTEX_SIZE;
            m_pathPoints[i] = { pathPoints[pointPos + 2], pathPoints[pointPos], pathPoints[pointPos + 1] };
        }
    }

    // first point is always our current location - we need the next one
    setActualEndPosition(m_pathPoints[pointCount - 1]);

    // force the given destination, if needed
    if (m_forceDestination &&
            (!(m_type & PATHFIND_NORMAL) || !inRange(getEndPosition(), getActualEndPosition(), 1.0f, 1.0f)))
    {
        // we may want to keep partial subpath
        if (dist3DSqr(getActualEndPosition(), getEndPosition()) < 0.3f * dist3DSqr(getStartPosition(), getEndPosition()))
        {
            setActualEndPosition(getEndPosition());
            m_pathPoints[m_pathPoints.size() - 1] = getEndPosition();
        }
        else
        {
            setActualEndPosition(getEndPosition());
            BuildShortcut();
        }

        m_type = PathType(PATHFIND_NORMAL | PATHFIND_NOT_USING_PATH);
    }

    NormalizePath();

    DEBUG_FILTER_LOG(LOG_FILTER_PATHFINDING, "++ PathFinder::BuildPointPath path type %d size %d poly-size %d\n", m_type, pointCount, m_polyLength);
}
コード例 #15
0
ファイル: GraphFitting.cpp プロジェクト: cxf476/uwo
long long CGraphFitting::fastFitting()
{
	int num_pixels=pImg3D->foreNum+1;
	int num_labels=getValidNum();
	////////////////////////////////////////////////////////////
	int *data = new int[num_pixels*num_labels];
	int curIndex=0;

	int slice=pImg3D->getS();
	int width=pImg3D->getW();
	int height=pImg3D->getH();

	for(int z = 0;z<slice;++z)
		for(int y=0;y<height;++y)
			for(int x=0;x<width;++x)
			{
				bool vessel=pImg3D->isVessel(x,y,z);
				if(vessel)
				{
					for (int l=0; l < num_labels; l++ )
					{
						if(l==num_labels-1)
							data[curIndex*num_labels+l]=5000;//INFINIT;
						else
							data[curIndex*num_labels+l]=models[l].compEnergy(x,y,z,*pImg3D);
					}
					curIndex++;
				}
			}
	for (int l=0; l < num_labels; l++ )
	{
		if(l==num_labels-1)
			data[curIndex*num_labels+l]=0;
		else
			data[curIndex*num_labels+l]=INFINIT;
	}

	//////////////////////////////////////////////////////////////////
	int *label = new int[num_labels];
	for(int k=0;k<num_labels;++k)		label[k]=LABELCOST;
	/////////////////////////////////////////////////////////////////
	int *smooth = new int[num_labels*num_labels];
	memset(smooth,0,sizeof(int)*num_labels*num_labels);
	for( int i=0; i<num_labels; i++ ) 
		for ( int j=0; j<num_labels; j++ ) 
			smooth[ i*num_labels+j ] = smooth[ i+j*num_labels ] =SMOOTHCOST* int(i!=j);
	long long energy=0;
	try{
		GCoptimizationGeneralGraph *gc = new GCoptimizationGeneralGraph(num_pixels,num_labels);
		gc->setDataCost(data);
		gc->setLabelCost(label);

#ifdef USE_SMOOTHCOST
		gc->setSmoothCost(smooth);
		for(int z = 0;z<slice;++z)
			for(int y=0;y<height;++y)
				for(int x=0;x<width;++x)
				{
					if(!pImg3D->isVessel(x,y,z))	continue;

					int numHoles=0;
					//////////////////////////////////////////////////////////////////////////////////
					if(inRange(Voxel(x+1,y,z)))//right neighbour
					{
						if(pImg3D->isVessel(x+1,y,z))	
							gc->setNeighbors(pImg3D->getRealPos(x,y,z),pImg3D->getRealPos(x+1,y,z));
						else
							++numHoles;
					}
					//////////////////////////////////////////////////////////////////////////////////
					//////////////////////////////////////////////////////////////////////////////////
					if(inRange(Voxel(x,y+1,z)))//top neighbour
					{
						if(pImg3D->isVessel(x,y+1,z))	
							gc->setNeighbors(pImg3D->getRealPos(x,y,z),pImg3D->getRealPos(x,y+1,z));
						else
							++numHoles;
					}
					//////////////////////////////////////////////////////////////////////////////////
					//////////////////////////////////////////////////////////////////////////////////
					if(inRange(Voxel(x,y,z+1)))//front neighbour
					{
						if(pImg3D->isVessel(x,y,z+1))	
							gc->setNeighbors(pImg3D->getRealPos(x,y,z),pImg3D->getRealPos(x,y,z+1));
						else
							++numHoles;
					}
					//////////////////////////////////////////////////////////////////////////////////
					if(inRange(Voxel(x-1,y,z))&& !pImg3D->isVessel(x-1,y,z) )	++numHoles;//left hole
					if(inRange(Voxel(x,y-1,z))&& !pImg3D->isVessel(x,y-1,z) )	++numHoles;//down hole
					if(inRange(Voxel(x,y,z-1))&& !pImg3D->isVessel(x,y,z-1) )	++numHoles;//back hole

					if(numHoles>0)
						gc->setNeighbors(pImg3D->getRealPos(x,y,z),num_pixels-1,numHoles);
				}
#endif
		//printf("\nBefore optimization energy is %d",gc->compute_energy());
		//std::cout<<"\nBefore optimization energy is "<<gc->compute_energy();
		energy=gc->compute_energy();
		gc->expansion(2);// run expansion for 2 iterations. For swap use gc->swap(num_iterations);
		//printf("\nAfter optimization energy is %d",gc->compute_energy());
		//std::cout<<"\nAfter optimization energy is "<<gc->compute_energy();
		gc->compute_energy();

		for ( int  i = 0; i < num_pixels; i++ )
		{
			int tag = gc->whatLabel(i);
			models[tag].addSupport();
			pLabels[i]=tag;
			//if(result[i]!=num_labels-1)	printf("%d ",result[i]);
		}
		////////////////////////////////////////////////////////////////////////////////////
		for(int i=0;i<num_labels;++i)
		{
			int sp=models[i].getSupport();
			models[i].setValid(sp>0);
			if(i==num_labels-1)//last model ,must be valid
				models[i].setValid(true);
		}
		///////////////////////////////////////////////
		for(int i=0;i<num_labels-1;++i)
		{
			if(models[i].isValid())
			{
				fitLine(i,gc);
			}
		}
		delete gc;
	}
	catch (GCException e){
		e.Report();
	}

	delete [] smooth;
	delete []label;
	delete [] data;
	return energy;
}
コード例 #16
0
float thresholdSegmentation(Rect r, ntk::RGBDImage* current_frame, Mat& dst){
	Mat depth = current_frame->depth();
	Rect& rr = r;
	Mat depthROI = depth(rr), maskROI;
	Mat& rDepthROI = depthROI, &rMaskROI = maskROI;
	double var = 0.3;

	// maskROI for nonZero values in the Face Region
	inRange(depthROI, Scalar::all(0.001), Scalar::all(255), maskROI);
	// Mean depth of Face Region
	Scalar mFace = cv::mean(rDepthROI, rMaskROI);
	//mFace[0]  = mFace[0] - mFace[0] * var;
	inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	mFace = cv::mean(rDepthROI, rMaskROI);
	//inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	//mFace = cv::mean(rDepthROI, rMaskROI);
	


	
	// Mask for nearer than the mean of face.
	inRange(depth, Scalar::all(0.001), mFace, dst);
	Mat rgbImage = current_frame->rgb();
	Mat outFrame = cvCreateMat(rgbImage.rows, rgbImage.cols, CV_32FC3);
	rgbImage.copyTo(outFrame, dst);
	Mat outFrameROI;
	outFrameROI = outFrame(rr);
	//cvCopy(&rgbImage, &outFrame, &dst);
	//rgbImageROI = rgbImageROI(rr);
	
	imshow("ROI", outFrameROI);
	//imshow("thresholdSeg", dst);

	// For debug of cvblobslib
	// Display the color image	

	//imshow("faceRIO", maskROI);
	imshow("faceRIO", outFrameROI);
	bool iswrite;
	const int nchannel = 1;
	vector<Rect> faces;
	//iswrite = imwrite("faceROI.png", maskROI);
	iswrite = imwrite("faceROI.png", outFrameROI);
	//iswrite = cvSaveImage("faceROI.jpeg", pOutFrame, &nchannel);

	// ---- blob segmentation on maskROI by using cvblobslib ----
	// ---		Third Trial	---
	//visualizeBlobs("faceROI.png", "faceRIO");




	// ---		First Trial Not Successful		---
	//Mat maskROIThr=cvCreateMat(maskROI.rows, maskROI.cols, CV_8UC1);	
	//maskROIThr = maskROI;
	//IplImage imgMaskROIThr = maskROIThr;
	//IplImage* pImgMaskROIThr = &imgMaskROIThr;
	//cvThreshold(pImgMaskROIThr, pImgMaskROIThr, 0.1, 255, CV_THRESH_BINARY_INV);

	// ---		Second Trial	---
	IplImage* original = cvLoadImage("faceROI.png", 0);
	IplImage* originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 1);
	IplImage* displayBiggestBlob = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 3);
	CBlobResult blobs;
	CBlob biggestBlob;
	//IplImage source = maskROIThr;	IplImage* pSource = &source;
	//blobs = CBlobResult(
	cvThreshold(original, originalThr, 0.1, 255, CV_THRESH_BINARY_INV);
	blobs =  CBlobResult( originalThr, NULL, 255);
	printf("%d blobs \n", blobs.GetNumBlobs());
	blobs.GetNthBlob(CBlobGetArea(), 0, biggestBlob);
	biggestBlob.FillBlob(displayBiggestBlob, CV_RGB(255, 0, 0));

	// Drawing the eclipse and Rect on the blob
	Mat mat(displayBiggestBlob);

	cv::RotatedRect blobEllipseContour;
	cv::Rect blobRectContour;
	//RotatedRect blobEllipseContour;
	blobEllipseContour = biggestBlob.GetEllipse();
	blobRectContour = biggestBlob.GetBoundingBox();
	//cv::ellipse(
	cv::ellipse(mat, blobEllipseContour, cv::Scalar(0,255, 0), 3, CV_AA);
	cv::rectangle(mat, blobRectContour, cv::Scalar(255, 0, 0), 3, CV_AA);
	//cv::ellipse(mat, blobEllipseContour);
	float headOritation = blobEllipseContour.angle;
	if (headOritation <= 180)
		headOritation = headOritation - 90;
	else
		headOritation = headOritation - 270;
	cv::putText(mat,
			cv::format("%f degree", headOritation),
			Point(10,20), 0, 0.5, Scalar(255,0,0,255));

	cv::imshow("faceRIO", mat);
	return(headOritation);
}
コード例 #17
0
ファイル: CCNavMesh.cpp プロジェクト: RyunosukeOno/rayjack
void cocos2d::NavMesh::findPath(const Vec3 &start, const Vec3 &end, std::vector<Vec3> &pathPoints)
{
    static const int MAX_POLYS = 256;
    static const int MAX_SMOOTH = 2048;
    float ext[3];
    ext[0] = 2; ext[1] = 4; ext[2] = 2;
    dtQueryFilter filter;
    dtPolyRef startRef, endRef;
    dtPolyRef polys[MAX_POLYS];
    int npolys = 0;
    _navMeshQuery->findNearestPoly(&start.x, ext, &filter, &startRef, 0);
    _navMeshQuery->findNearestPoly(&end.x, ext, &filter, &endRef, 0);
    _navMeshQuery->findPath(startRef, endRef, &start.x, &end.x, &filter, polys, &npolys, MAX_POLYS);

    if (npolys)
    {
        //// Iterate over the path to find smooth path on the detail mesh surface.
        //dtPolyRef polys[MAX_POLYS];
        //memcpy(polys, polys, sizeof(dtPolyRef)*npolys);
        //int npolys = npolys;

        float iterPos[3], targetPos[3];
        _navMeshQuery->closestPointOnPoly(startRef, &start.x, iterPos, 0);
        _navMeshQuery->closestPointOnPoly(polys[npolys - 1], &end.x, targetPos, 0);

        static const float STEP_SIZE = 0.5f;
        static const float SLOP = 0.01f;

        int nsmoothPath = 0;
        //dtVcopy(&m_smoothPath[m_nsmoothPath * 3], iterPos);
        //m_nsmoothPath++;

        pathPoints.push_back(Vec3(iterPos[0], iterPos[1], iterPos[2]));
        nsmoothPath++;

        // Move towards target a small advancement at a time until target reached or
        // when ran out of memory to store the path.
        while (npolys && nsmoothPath < MAX_SMOOTH)
        {
            // Find location to steer towards.
            float steerPos[3];
            unsigned char steerPosFlag;
            dtPolyRef steerPosRef;

            if (!getSteerTarget(_navMeshQuery, iterPos, targetPos, SLOP,
                polys, npolys, steerPos, steerPosFlag, steerPosRef))
                break;

            bool endOfPath = (steerPosFlag & DT_STRAIGHTPATH_END) ? true : false;
            bool offMeshConnection = (steerPosFlag & DT_STRAIGHTPATH_OFFMESH_CONNECTION) ? true : false;

            // Find movement delta.
            float delta[3], len;
            dtVsub(delta, steerPos, iterPos);
            len = dtMathSqrtf(dtVdot(delta, delta));
            // If the steer target is end of path or off-mesh link, do not move past the location.
            if ((endOfPath || offMeshConnection) && len < STEP_SIZE)
                len = 1;
            else
                len = STEP_SIZE / len;
            float moveTgt[3];
            dtVmad(moveTgt, iterPos, delta, len);

            // Move
            float result[3];
            dtPolyRef visited[16];
            int nvisited = 0;
            _navMeshQuery->moveAlongSurface(polys[0], iterPos, moveTgt, &filter,
                result, visited, &nvisited, 16);

            npolys = fixupCorridor(polys, npolys, MAX_POLYS, visited, nvisited);
            npolys = fixupShortcuts(polys, npolys, _navMeshQuery);

            float h = 0;
            _navMeshQuery->getPolyHeight(polys[0], result, &h);
            result[1] = h;
            dtVcopy(iterPos, result);

            // Handle end of path and off-mesh links when close enough.
            if (endOfPath && inRange(iterPos, steerPos, SLOP, 1.0f))
            {
                // Reached end of path.
                dtVcopy(iterPos, targetPos);
                if (nsmoothPath < MAX_SMOOTH)
                {
                    //dtVcopy(&m_smoothPath[m_nsmoothPath * 3], iterPos);
                    //m_nsmoothPath++;
                    pathPoints.push_back(Vec3(iterPos[0], iterPos[1], iterPos[2]));
                    nsmoothPath++;
                }
                break;
            }
            else if (offMeshConnection && inRange(iterPos, steerPos, SLOP, 1.0f))
            {
                // Reached off-mesh connection.
                float startPos[3], endPos[3];

                // Advance the path up to and over the off-mesh connection.
                dtPolyRef prevRef = 0, polyRef = polys[0];
                int npos = 0;
                while (npos < npolys && polyRef != steerPosRef)
                {
                    prevRef = polyRef;
                    polyRef = polys[npos];
                    npos++;
                }
                for (int i = npos; i < npolys; ++i)
                    polys[i - npos] = polys[i];
                npolys -= npos;

                // Handle the connection.
                dtStatus status = _navMesh->getOffMeshConnectionPolyEndPoints(prevRef, polyRef, startPos, endPos);
                if (dtStatusSucceed(status))
                {
                    if (nsmoothPath < MAX_SMOOTH)
                    {
                        //dtVcopy(&m_smoothPath[m_nsmoothPath * 3], startPos);
                        //m_nsmoothPath++;
                        pathPoints.push_back(Vec3(startPos[0], startPos[1], startPos[2]));
                        nsmoothPath++;
                        // Hack to make the dotted path not visible during off-mesh connection.
                        if (nsmoothPath & 1)
                        {
                            //dtVcopy(&m_smoothPath[m_nsmoothPath * 3], startPos);
                            //m_nsmoothPath++;
                            pathPoints.push_back(Vec3(startPos[0], startPos[1], startPos[2]));
                            nsmoothPath++;
                        }
                    }
                    // Move position at the other side of the off-mesh link.
                    dtVcopy(iterPos, endPos);
                    float eh = 0.0f;
                    _navMeshQuery->getPolyHeight(polys[0], iterPos, &eh);
                    iterPos[1] = eh;
                }
            }

            // Store results.
            if (nsmoothPath < MAX_SMOOTH)
            {
                //dtVcopy(&m_smoothPath[m_nsmoothPath * 3], iterPos);
                //m_nsmoothPath++;

                pathPoints.push_back(Vec3(iterPos[0], iterPos[1], iterPos[2]));
                nsmoothPath++;
            }
        }
    }
}
コード例 #18
0
ファイル: KdTreeAccel.cpp プロジェクト: ShenyaoKe/Kaguya
bool KdTreeAccel::intersect(const Ray &inRay, DifferentialGeometry* queryPoint, const KdAccelNode* node,
	Float* tHit, Float* rayEpsilon) const
{
	//Compute initial parametric range of ray inside kd-tree extent
	Float tmin, tmax, rayEp;//temprary DifferentialGeometry result
	if (!node->bbox.intersectP(inRay, &tmin, &tmax))
	{
		return false;
	}

	//prepare to traversal kd-tree for ray
	Vector3f invDir(1.0 / inRay.d.x, 1.0 / inRay.d.y, 1.0 / inRay.d.z);

	//Traversal kd-tree node in order of ray
	bool isHit = false;
	if (node != nullptr)
	{
		//if hit outside the box, think it's used for later use
		if (inRay.tmax < tmin)
		{
			return isHit;
		}
		if (node->isLeaf())
		{
			DifferentialGeometry* tmpQuery = new DifferentialGeometry();
			Float hitDist;
			for (int i = 0; i < node->primIndex.size(); ++i)
			{
				int idx = node->primIndex[i];

				if (primitives[idx]->intersectP(inRay))
				{
					if (primitives[idx]->intersect(inRay, tmpQuery, &hitDist, &rayEp))
					{
						if (hitDist < *tHit && inRange(hitDist, tmin, tmax))
						{
							*queryPoint = *tmpQuery;
							*tHit = hitDist;
							*rayEpsilon = rayEp;
							queryPoint->shape = primitives[idx];
							isHit = true;
						}
					}
				}
			}
			delete tmpQuery;
		}
		else//if hit interior node
		{
			/*process interior node*/
			//calculate parametric distance from ray to split plane
			int axis = node->flags;
			Float tsplit = (node->split - inRay.o[axis]) * invDir[axis];

			//get children node for ray
            const KdAccelNode* nearChild;
            const KdAccelNode* farChild;
			bool belowFisrt = ((inRay.o[axis] < node->split) ||
				(inRay.o[axis] == node->split && inRay.d[axis] < 0));
			if (belowFisrt)
			{
				nearChild = node->belowNode;
				farChild = node->aboveNode;
			}
			else
			{
				nearChild = node->aboveNode;
				farChild = node->belowNode;
			}
			if (tsplit > tmax || tsplit <= 0)
			{
				isHit = intersect(inRay, queryPoint, nearChild, tHit, rayEpsilon);
			}
			else if (tsplit < tmin)
			{
				isHit = intersect(inRay, queryPoint, farChild, tHit, rayEpsilon);
			}
			else
			{
				isHit = intersect(inRay, queryPoint, nearChild, tHit, rayEpsilon);
				if (!isHit)
				{
					isHit = intersect(inRay, queryPoint, farChild, tHit, rayEpsilon);
				}
			}
// 			nearChild = nullptr;
// 			farChild = nullptr;
		}
	}
	return isHit;
}
コード例 #19
0
void blob_main(sample_loc &s_loc)
{
    /*
	OpenCV defines HSV colors by the following ranges: 
	H: 0-180, S: 0-255, V: 0-255
	*/

	// hsvParams hsvWhite = {20,0,0,180,80,255}; // original
	hsvParams hsvWhite = {0, 0, 230, 180, 20, 255}; // edited
    hsvParams hsvPurple = {80,60,0,130,255,255};

    hsvParams hsv = hsvWhite; // s_loc.whiteSample==true? hsvWhite:hsvPurple;

    //Set up blob detection parameters
    SimpleBlobDetector::Params params = setupObjectBlobParams();

    vector<KeyPoint> keypoints;

    // const string filename("/home/buckeye/catkin_ws/src/CapstoneROS/src/vision/samplePics/25ft3.jpg");
    //Initialize camera
/* 
    VideoCapture cap(0);
    if ( !cap.isOpened() ){
        cout << "Cannot open the web cam" << endl;
        return;
    }
*/
    while(true){
      for(int n=1; n=4; n++)
	{
	  stringstream ss;
	  ss << n;
	  string num = ss.str();
        Mat img, imgHSV, imgTHRESH, out;
		/* img = imread(filename, CV_LOAD_IMAGE_COLOR); */
		
    	// cap>>img;
		img = imread("samplePics/pic"+num+".jpg", CV_LOAD_IMAGE_COLOR);
        
        if(img.empty()){
            cout << "can not open image" << endl;
	    s_loc.sample_not_found=true;
            return;
        }

        //convert color to HSV, threshold and remove noise
        cvtColor(img, imgHSV, COLOR_BGR2HSV);
        findGrass(img,imgHSV);
        cvtColor(img, imgHSV, COLOR_BGR2HSV);

        inRange(imgHSV, Scalar(hsv.hL, hsv.sL, hsv.vL), Scalar(hsv.hH, hsv.sH, hsv.vH), imgTHRESH);
        removenoise(imgTHRESH);

        namedWindow("Input", WINDOW_AUTOSIZE);
        namedWindow("Detection", WINDOW_AUTOSIZE);
	
        Ptr<SimpleBlobDetector> blobDetect = SimpleBlobDetector::create(params);
        blobDetect->detect( imgTHRESH, keypoints );

        drawKeypoints(imgTHRESH, keypoints, out, CV_RGB(0,0,255), DrawMatchesFlags::DEFAULT);

        /* Circle blobs
        for(int i = 0; i < keypoints.size(); i++)
            circle(out, keypoints[i].pt, 1.5*keypoints[i].size, CV_RGB(0,255,0), 20, 8);
        */

        // Find largest keypoint blob, and use that in determining angle and distance
        if(keypoints.size() >= 1){
			int index = 0;
			for (int i = 0; i < keypoints.size(); i++){
				if( keypoints[i].size > keypoints[index].size ) { 
					index = i;
				}
			}
            cout<<endl<<endl<<"Object Found"<<endl;
            tilt_turn_degrees(imgTHRESH, keypoints[index].pt.y, keypoints[index].pt.x, &s_loc);
	    	robot_angle(&s_loc, imgTHRESH, keypoints[index].pt.x);
        }
        else{
            cout<<"No Object Found"<<endl;
        }

        imshow("Input", img);
        // imwrite("exampleOfFindGrass.jpg", img);
        imshow("Detection", out);
        // imwrite("showingKeypoints.jpg", out);
        waitKey(-1);
	}
    }
  }
コード例 #20
0
ファイル: ocv.cpp プロジェクト: turcofran/omfootctrl
// Process input
string OCV::readBLine(void)
{
  auto start = chrono::steady_clock::now();
  string retCmd = "";
  char keyPressed;
  Mat camFeed, procHSV, procThreshold;
  //~ Mat canvas = Mat::zeros(FRAME_HEIGHT, FRAME_WIDTH, CV_8UC3);
  // Get commnds map and it
  if (!videoCap.read(camFeed)) {
    cerr << "VideoCapture is not reading" << endl;
    #ifdef VIDEO_IN
      exit(0);
    #endif
    return retCmd;
  }
  auto tic1 = chrono::duration_cast<chrono::milliseconds>(chrono::steady_clock::now() - start);
  //flip image
  #ifndef VIDEO_IN
    flip(camFeed, camFeed, 1);
  #endif
  // write out
  #ifdef VIDEO_OUT
    videoOut.write(camFeed);
  #endif
  cvtColor(camFeed, procHSV, COLOR_BGR2HSV);
  //~ inRange(procHSV, hsvRange.lowerb, hsvRange.upperb, procThreshold);
  inRange(procHSV, Scalar(hsvRange.lowerb[0], sLowerbTrackebar, hsvRange.lowerb[2]), hsvRange.upperb, procThreshold);
  erodeAndDilate(procThreshold);
  auto tic2 = chrono::duration_cast<chrono::milliseconds>(chrono::steady_clock::now() - start);
  if (!paused)
    retCmd = trackAndEval(procThreshold, camFeed);
  else 
    addWeighted(camFeed, 0.5, layoutPaused, 1, 0.0, camFeed);

  //addWeighted(camFeed, 0.8, layout6x, 0.7, 0.0, camFeed);  // TODO it's nut add the frames each time!
  addWeighted(camFeed, 0.8, layout6xcmds, 0.7, 0.0, camFeed);

  
  imshow(W_NAME_FEED, camFeed);

  //delay so that screen can refresh.
  #ifdef SHOW_WIN // Overwrite this macro with $ cmake -DSHOW_WIN=0 . 
    imshow(W_NAME_HSV, procHSV);
    imshow(W_NAME_THRESHOLD, procThreshold);
  #endif
  //image will not appear without this waitKey() command
  #ifdef VIDEO_IN
    keyPressed = waitKey(DEF_FRAME_INT_US/1000);
  #else  
    keyPressed = waitKey(CV_DELAY_MS);
  #endif
  //~ cout << "keyPressed: " << to_string(keyPressed) << endl;
  switch(keyPressed) {
    case 27: exit(0); break;
    case 32: paused = !paused; break;
    default: break;
  }
  auto tic3 = chrono::duration_cast<chrono::milliseconds>(chrono::steady_clock::now() - start);
  #ifdef DEBUG_TICS
    cout << "processInput: " << tic1.count() << " - " << tic2.count()<< " - " << tic3.count()<< endl;
  #endif
  return retCmd;
}
コード例 #21
0
ファイル: test.cpp プロジェクト: annethf/Marginalia
void Spreadsheet::setCellAt(int x, int y, const SpreadsheetCell& cell)
{
	if(!inRange(x, mWidth) || !inRange(y, mHeight))
		throw std::out_of_range("");
	mCells[x][y] = cell;
}
コード例 #22
0
ファイル: allclass.cpp プロジェクト: vovadenisov/c-_project
bool Map::onClick(int x, int y, queue<string>* commands){
    if (inRange(x,y))
    return false;
}
コード例 #23
0
ファイル: playlisthistory.cpp プロジェクト: Okspen/okPlayer
Playlist *PlaylistHistory::current() const
{
    if (inRange(m_currentIndex))
        return m_playlists->at(m_currentIndex);
    return 0;
}
コード例 #24
0
ファイル: allclass.cpp プロジェクト: vovadenisov/c-_project
bool OffBtn::onClick(int x, int y, queue<string>* commands){
    if (inRange(x,y)){
        return true;
    }
    return false;
}
コード例 #25
0
void PathFinderMovementGenerator::BuildPointPath(const float *startPoint, const float *endPoint)
{
    float pathPoints[MAX_POINT_PATH_LENGTH*VERTEX_SIZE];
    uint32 pointCount = 0;
    dtStatus dtResult = DT_FAILURE;
    if (m_useStraightPath)
    {
        dtResult = m_navMeshQuery->findStraightPath(
                startPoint,         // start position
                endPoint,           // end position
                m_pathPolyRefs,     // current path
                m_polyLength,       // lenth of current path
                pathPoints,         // [out] path corner points
                NULL,               // [out] flags
                NULL,               // [out] shortened path
                (int*)&pointCount,
                m_pointPathLimit);   // maximum number of points/polygons to use
    }
    else
    {
        dtResult = findSmoothPath(
                startPoint,         // start position
                endPoint,           // end position
                m_pathPolyRefs,     // current path
                m_polyLength,       // length of current path
                pathPoints,         // [out] path corner points
                (int*)&pointCount,
                m_pointPathLimit);    // maximum number of points
    }

    if (pointCount < 2 || dtResult != DT_SUCCESS)
    {
        // only happens if pass bad data to findStraightPath or navmesh is broken
        // single point paths can be generated here
        // TODO : check the exact cases
        sLog->outDebug(LOG_FILTER_MAPS, "++ PathFinderMovementGenerator::BuildPointPath FAILED! path sized %d returned\n", pointCount);
        BuildShortcut();
        m_type = PATHFIND_NOPATH;
        return;
    }
    else if (pointCount == m_pointPathLimit)
    {
        sLog->outDebug(LOG_FILTER_MAPS, "++ PathGenerator::BuildPointPath FAILED! path sized %d returned, lower than limit set to %d\n", pointCount, m_pointPathLimit);
        BuildShortcut();
        m_type = PATHFIND_SHORT;
        return;
    }

    m_pathPoints.resize(pointCount);
    for (uint32 i = 0; i < pointCount; ++i)
        m_pathPoints[i] = Vector3(pathPoints[i*VERTEX_SIZE+2], pathPoints[i*VERTEX_SIZE], pathPoints[i*VERTEX_SIZE+1]);

    // first point is always our current location - we need the next one
    setActualEndPosition(m_pathPoints[pointCount-1]);

    // force the given destination, if needed
    if(m_forceDestination &&
        (!(m_type & PATHFIND_NORMAL) || !inRange(getEndPosition(), getActualEndPosition(), 1.0f, 1.0f)))
    {
        // we may want to keep partial subpath
        if(dist3DSqr(getActualEndPosition(), getEndPosition()) <
            0.3f * dist3DSqr(getStartPosition(), getEndPosition()))
        {
            setActualEndPosition(getEndPosition());
            m_pathPoints[m_pathPoints.size()-1] = getEndPosition();
        }
        else
        {
            setActualEndPosition(getEndPosition());
            BuildShortcut();
        }

        m_type = PathType(PATHFIND_NORMAL | PATHFIND_NOT_USING_PATH);
    }

	// Custom point for bugged zone - start
    float startEndDist = dist3DSqr(getStartPosition(), getEndPosition());   

    // Blade's Edge Arena (mapid)
	if (m_sourceUnit->GetMapId() == 562)
    {
    	// Start & End Position
		// NAPOLOVINA RABOTESHT PILLAR
		if (startEndDist < 3000.0f && startPoint[1] >= 9.000000f && startPoint[2] <= 6234.335938f && startPoint[2] >= 6224.140430f && startPoint[0] >= 244.160000f && startPoint[0] <= 255.118940f) // southeast pillar
	    {
			clear();
            m_pathPoints.resize(4);
			m_pathPoints[0] = getStartPosition();
			m_pathPoints[1] = Vector3(6231.038574f, 252.630981f, 11.300000f);
			m_pathPoints[2] = Vector3(6234.254395f, 256.513702f, 11.280000f);
			m_pathPoints[3] = getEndPosition();
		}
		// Problemna chast:
		else if (startEndDist < 3000.0f && endPoint[1] >= 9.000000f && endPoint[2] <= 6234.335938f && endPoint[2] >= 6224.140430f && endPoint[0] >= 244.160000f && endPoint[0] <= 255.118940f) // southeast pillar
		{
			clear();
            m_pathPoints.resize(4);
		    m_pathPoints[0] = getStartPosition();
			m_pathPoints[1] = Vector3(6234.254395f, 256.513702f, 11.280000f);
			m_pathPoints[2] = Vector3(6231.038574f, 252.630981f, 11.300000f);
			m_pathPoints[3] = getEndPosition();
		}

		// RABOTESHT PILLAR 
		// TODO: (na zemqta ima mqsto na koeto Z e > 9 koeto znachi che moje da se charge i ot dolu -> fail
		if (startEndDist < 3000.0f && startPoint[1] >= 9.000000f && startPoint[2] >= 6243.385660f && startPoint[2] <= 6254.611660f && startPoint[0] >= 268.757917f && startPoint[0] <= 279.558794f) // northwest pillar
        {
    		clear();
            m_pathPoints.resize(4);
            m_pathPoints[0] = getStartPosition();
            m_pathPoints[1] = Vector3(6246.324219f, 271.570000f, 11.300000f);
            m_pathPoints[2] = Vector3(6242.942484f, 267.210030f, 11.280000f);
            m_pathPoints[3] = getEndPosition();
        } 
		else if (startEndDist < 3000.0f && endPoint[1] >= 9.000000f && endPoint[2] >= 6243.385660f && endPoint[2] <= 6254.611660f && endPoint[0] >= 268.757917f && endPoint[0] <= 279.558794f) // northwest pillar
        {
		    clear();
            m_pathPoints.resize(4);
            m_pathPoints[0] = getStartPosition();
            m_pathPoints[1] = Vector3(6242.942484f, 267.210030f, 11.280000f);
            m_pathPoints[2] = Vector3(6246.324219f, 271.570000f, 11.300000f);
            m_pathPoints[3] = getEndPosition();
        }
    }
    // Dalaran Sewers
    if (m_sourceUnit->GetMapId() == 617)
    {
        if (startPoint[2] >= 1330.033223f && startPoint[1] >= 9.000000f)      // Canal 1#
        {
            //  Path x,y,z
            m_pathPoints.resize(5);
            m_pathPoints[0] = getStartPosition();
            m_pathPoints[1] = Vector3(1332.749268f, 816.274780f, 8.355900f);
            m_pathPoints[2] = Vector3(1325.749268f, 816.602539f, 5.4000000f);
            m_pathPoints[3] = Vector3(1328.749268f, 816.602539f, 3.4000000f);
            m_pathPoints[4] = getEndPosition();
        }
        else if (startPoint[2] <= 1253.904785f && startPoint[1] >= 9.000000f)      // Canal 2#
        {
            //  Path x,y,z
            m_pathPoints.resize(5);
            m_pathPoints[0] = getStartPosition();
            m_pathPoints[1] = Vector3(1252.425395f, 764.971680f, 8.000000f); 
            m_pathPoints[3] = Vector3(1255.425395f, 764.971680f, 5.3559000f);
            m_pathPoints[3] = Vector3(1257.425395f, 764.971680f, 3.3559000f);
            m_pathPoints[4] = getEndPosition();
        }
    }
    // Custom point for bugged zone - end

    sLog->outDebug(LOG_FILTER_MAPS, "++ PathFinderMovementGenerator::BuildPointPath path type %d size %d poly-size %d\n", m_type, pointCount, m_polyLength);
}
コード例 #26
0
ファイル: VideoCorrect.cpp プロジェクト: isikdogan/autoposer
void VideoCorrect::correctImage(Mat& inputFrame, Mat& outputFrame, bool developerMode){
	
	resize(inputFrame, inputFrame, CAMERA_RESOLUTION);
	inputFrame.copyTo(img);

	//Convert to YCbCr color space
	cvtColor(img, ycbcr, CV_BGR2YCrCb);

	//Skin color thresholding
	inRange(ycbcr, Scalar(0, 150 - Cr, 100 - Cb), Scalar(255, 150 + Cr, 100 + Cb), bw);

	if(IS_INITIAL_FRAME){
		face = detectFaces(img);
		if(face.x != 0){
			lastFace = face;
		}
		else{
			outputFrame = img;
			return;
		}
		prevSize = Size(face.width/2, face.height/2);
		head = Mat::zeros(bw.rows, bw.cols, bw.type());
		ellipse(head, Point(face.x + face.width/2, face.y + face.height/2), prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0);
		if(face.x > 0 && face.y > 0 && face.width > 0 && face.height > 0 
			&& (face.x + face.width) < img.cols && (face.y + face.height) < img.rows){
			img(face).copyTo(bestImg);
		}
		putText(img, "Give your best pose!", Point(face.x, face.y), CV_FONT_HERSHEY_SIMPLEX, 0.4, Scalar(255,255,255,0), 1, CV_AA);
	}

	firstFrameCounter--;

	if(face.x == 0) //missing face prevention
		face = lastFace;

	//Mask the background out
	bw &= head;

	//Compute more accurate image moments after background removal
	m = moments(bw, true);
	angle = (atan((2*m.nu11)/(m.nu20-m.nu02))/2)*180/PI;
	center = Point(m.m10/m.m00,m.m01/m.m00);

	//Smooth rotation (running average)
	bufferCounter++;
	rotationBuffer[ bufferCounter % SMOOTHER_SIZE ] = angle;
	smoothAngle += (angle - rotationBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE;

	//Expand borders
	copyMakeBorder( img, img, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, 
					BORDER_REPLICATE, Scalar(255,255,255,0));

	if(!IS_INITIAL_FRAME){
		//Rotate the image to correct the leaning angle
		rotateImage(img, smoothAngle);
	
		//After rotation detect faces
		face = detectFaces(img);
		if(face.x != 0)
			lastFace = face;

		//Create background mask around the face
		head = Mat::zeros(bw.rows, bw.cols, bw.type());
		ellipse(head, Point(face.x - BORDER_EXPAND + face.width/2, face.y -BORDER_EXPAND + face.height/2),
					  prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0);

		//Draw a rectangle around the face
		//rectangle(img, face, Scalar(255,255,255,0), 1, 8, 0);

		//Overlay the ideal pose
		if(replaceFace && center.x > 0 && center.y > 0){
			center = Point(face.x + face.width/2, face.y + face.width/2);
			overlayImage(img, bestImg, center, smoothSize);
		}

	} else{
		face.x += BORDER_EXPAND; //position alignment after border expansion (not necessary if we detect the face after expansion)
		face.y += BORDER_EXPAND;
	}
	
	//Smooth ideal image size (running average)
	sizeBuffer[ bufferCounter % SMOOTHER_SIZE ] = face.width;
	smoothSize += (face.width - sizeBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE;

	//Get ROI
	center = Point(face.x + face.width/2, face.y + face.width/2);
	roi = getROI(img, center);
	if(roi.x > 0 && roi.y > 0 && roi.width > 0 && roi.height > 0 
		&& (roi.x + roi.width) < img.cols && (roi.y + roi.height) < img.rows){
		img = img(roi);
	}

	//Resize the final image
	resize(img, img, CAMERA_RESOLUTION);

	if(developerMode){

		Mat developerScreen(img.rows, 
							img.cols + 
							inputFrame.cols +
							bw.cols, CV_8UC3);

		Mat left(developerScreen, Rect(0, 0, img.size().width, img.size().height));
		img.copyTo(left);

		Mat center(developerScreen, Rect(img.cols, 0, inputFrame.cols, inputFrame.rows));
		inputFrame.copyTo(center);

		cvtColor(bw, bw, CV_GRAY2BGR);
		Mat right(developerScreen, Rect(img.size().width + inputFrame.size().width, 0, bw.size().width, bw.size().height));
		bw.copyTo(right);

		Mat rightmost(developerScreen, Rect(img.size().width + inputFrame.size().width + bw.size().width - bestImg.size().width, 0,
											bestImg.size().width, bestImg.size().height));
		bestImg.copyTo(rightmost);

		outputFrame = developerScreen;
	}
	else{
		outputFrame = img;
	}
}
コード例 #27
0
void PathFinderMovementGenerator::BuildPointPath(const float *startPoint, const float *endPoint)
{
    float pathPoints[MAX_POINT_PATH_LENGTH*VERTEX_SIZE];
    uint32 pointCount = 0;
    dtStatus dtResult = DT_FAILURE;
    if (m_useStraightPath)
    {
        dtResult = m_navMeshQuery->findStraightPath(
                startPoint,         // start position
                endPoint,           // end position
                m_pathPolyRefs,     // current path
                m_polyLength,       // lenth of current path
                pathPoints,         // [out] path corner points
                NULL,               // [out] flags
                NULL,               // [out] shortened path
                (int*)&pointCount,
                m_pointPathLimit);   // maximum number of points/polygons to use
    }
    else
    {
        dtResult = findSmoothPath(
                startPoint,         // start position
                endPoint,           // end position
                m_pathPolyRefs,     // current path
                m_polyLength,       // length of current path
                pathPoints,         // [out] path corner points
                (int*)&pointCount,
                m_pointPathLimit);    // maximum number of points
    }

    if (pointCount < 2 || dtResult != DT_SUCCESS)
    {
        // only happens if pass bad data to findStraightPath or navmesh is broken
        // single point paths can be generated here
        // TODO : check the exact cases
        sLog->outDebug(LOG_FILTER_MAPS, "++ PathFinderMovementGenerator::BuildPointPath FAILED! path sized %d returned\n", pointCount);
        BuildShortcut();
        m_type = PATHFIND_NOPATH;
        return;
    }

    m_pathPoints.resize(pointCount);
    for (uint32 i = 0; i < pointCount; ++i)
        m_pathPoints[i] = Vector3(pathPoints[i*VERTEX_SIZE+2], pathPoints[i*VERTEX_SIZE], pathPoints[i*VERTEX_SIZE+1]);

    // first point is always our current location - we need the next one
    setActualEndPosition(m_pathPoints[pointCount-1]);

    // force the given destination, if needed
    if(m_forceDestination &&
        (!(m_type & PATHFIND_NORMAL) || !inRange(getEndPosition(), getActualEndPosition(), 1.0f, 1.0f)))
    {
        // we may want to keep partial subpath
        if(dist3DSqr(getActualEndPosition(), getEndPosition()) <
            0.3f * dist3DSqr(getStartPosition(), getEndPosition()))
        {
            setActualEndPosition(getEndPosition());
            m_pathPoints[m_pathPoints.size()-1] = getEndPosition();
        }
        else
        {
            setActualEndPosition(getEndPosition());
            BuildShortcut();
        }

        m_type = PathType(PATHFIND_NORMAL | PATHFIND_NOT_USING_PATH);
    }

    sLog->outDebug(LOG_FILTER_MAPS, "++ PathFinderMovementGenerator::BuildPointPath path type %d size %d poly-size %d\n", m_type, pointCount, m_polyLength);
}
コード例 #28
0
ファイル: WebcamHandler.cpp プロジェクト: Gnork/FaceRepair
void WebcamHandler::run()
{

	// initialize webcam
	VideoCapture cap = VideoCapture(0);
	cap.set(CV_CAP_PROP_FRAME_WIDTH, m_frameWidth);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, m_frameHeight);	

	// initialize window
	namedWindow("Settings", CV_WINDOW_AUTOSIZE);
	namedWindow("FaceRepair", CV_WINDOW_NORMAL);
	cvSetWindowProperty("FaceRepair", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);

	cvWaitKey(1000);

	float* hidden;
	float* visible;

	while (m_loop)
	{
		// read frame and continue with next frame if not successfull
		Mat frame;
		cap.retrieve(frame);
		flip(frame, frame, 1);

		// take subimage at faceArea
		Mat subimage;
		frame(*m_faceArea).copyTo(subimage);
		Mat subimageHSV;
		cvtColor(subimage, subimageHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV

		// detect color
		Mat mask;
		inRange(subimageHSV, *m_detectionColorMin, *m_detectionColorMax, mask);
		erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
		dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(15, 15)));
		Mat invertedMask = 255 - mask;

		// scale to rbm input size
		Size size = Size(m_edgeLength, m_edgeLength);
		Mat scaledSubimage;	
		resize(subimage, scaledSubimage, size, 0.0, 0.0, INTER_LINEAR);
		Mat scaledMask;
		resize(mask, scaledMask, size, 0.0, 0.0, INTER_NEAREST);
		Mat invertedScaledMask = 255 - scaledMask;

		// calc mean rgb of preserved area
		Scalar bgr = mean(scaledSubimage, invertedScaledMask);

		// set mean rgb at reconstructionArea
		scaledSubimage.setTo(bgr, scaledMask);

		// subimage to normalized float array
		visible = matToNormalizedFloatArrayWithBias(&scaledSubimage);

		// process RBMs
		hidden = m_rbm1000->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm1000->runVisible(hidden, 1);
		delete hidden;
		visible[0] = 1;
		resetPreservedArea(&scaledSubimage, &invertedScaledMask, visible);

		hidden = m_rbm1500->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm1500->runVisible(hidden, 1);
		delete hidden;
		visible[0] = 1;
		resetPreservedArea(&scaledSubimage, &invertedScaledMask, visible);

		hidden = m_rbm2000->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm2000->runVisible(hidden, 1);
		delete hidden;

		// normalized float array to subimage
		normalizedFloatArrayToMatWithoutBias(visible, &scaledSubimage);

		// scale to original faceArea size
		Mat result;
		size = Size(m_faceArea->width, m_faceArea->height);
		resize(scaledSubimage, result, size, 0.0, 0.0, INTER_CUBIC);

		// reset pixels of preserved area in native resolution
		subimage.copyTo(result, invertedMask);

		// create fullscreen image
		Mat fs;
		frame.copyTo(fs);
		result.copyTo(fs(*m_faceArea));
		flip(fs, fs, 1);
		
		// maybe not necessary
		//result.copyTo(frame(*m_faceArea));
		
		// paint visualizations for settings image
		rectangle(frame, *m_faceArea, Scalar(0, 255, 0));
		Point* eyePositions = calculateEyePositions(m_faceArea, m_relativeEyePositionX, m_relativeEyePositionY);
		circle(frame, eyePositions[0], 4, Scalar(255, 255, 0));
		circle(frame, eyePositions[1], 4, Scalar(255, 255, 0));
		delete eyePositions;

		// show frames
		imshow("Settings", frame);
		imshow("FaceRepair", fs);
		
		// check keyboard input
		checkKeys();
	}
	// terminate webcam
	cap.release();
}
コード例 #29
0
ファイル: 3d_attractor.cpp プロジェクト: Maldela/Macrodyn-Qt
void attractor_3d::simulation()
{
    qint64 t;
    double h_max;
    int  k, l, m;
    int  color;
    qreal dx, dy, dz;
    qint64 dummy=1;
    
    model->initialize();
    h.reset();
  
  //initialize output information
  int jobtag_dummy = 63;
  int resolution_x = h.get_x_res();
  int resolution_y = h.get_y_res();
  int resolution_z = h.get_z_res();
  log() << "resolution: [" << resolution_x << "," << resolution_y << "," << resolution_z <<"]\n";
  outFile.write((char*)&jobtag_dummy, 4);
  outFile.write((char*)&resolution_x, 4);
  outFile.write((char*)&resolution_y, 4);
  outFile.write((char*)&resolution_z, 4);
  
  outFile.write((char*)&dummy, 4);		// dummyx
  outFile.write((char*)&dummy, 4);		// dummyy
  outFile.write((char*)&dummy, 4);		// dummyz
  
  outFile.write((char*)&xmin, 8);
  outFile.write((char*)&xmax, 8);
  outFile.write((char*)&ymin, 8);
  outFile.write((char*)&ymax, 8);
  outFile.write((char*)&zmin, 8);
  outFile.write((char*)&zmax, 8);
  
  log() << "xmin: " << xmin << "\txmax: " << xmax;
  log() << "ymin: " << ymin << "\tymax: " << ymax;
  log() << "zmin: " << zmin << "\tzmax: " << zmax;
  
  QTextStream stream(&outFile);

  stream << xLabel;
  stream << yLabel;
  stream << zLabel;


  
  for(t=0;t<length;t++) {
    model->iteration(t+1);
    if( t >= limit && inRange(*xParam,*yParam,*zParam) ) {
      h.inc(*xParam, *yParam, *zParam);
    }
  }

  h_max = double (h.get_max_hits());
  log() << "h_max = " << h_max;
  log() << "color step every " << h_max/94 << " hits in cell";
  if( h_max == 0 ) {
    h_max = 1;
  }

	double dummy2=94.0;	
	double hitshilf;
	double hitpoint;

  for( dz=zmin, m=0; m<h.get_z_res(); dz+=stepZ, m++) {
    for( dy=ymin, l=0; l<h.get_y_res(); dy+=stepY, l++) {
      for( dx=xmin, k=0; k<h.get_x_res(); dx+=stepX, k++) {
//      color = int (floor( ( double (h(k,l)) / double (h_max) ) * 32));

/*neu*/
	
	hitshilf=h(k,l,m);
    hitpoint = hitshilf / h_max;
    if (hitshilf==h_max)
    log() << "qMaximal hitcounts at: "<< dx << " , " << dy << " , " << dz;

	if(hitpoint>0){
/*		for(int i=0;i<=dummy2;i++){
			if(hitpoint>=i*intervall){
				color=i+1;
			}
		}*/
		color=int (ceil (hitpoint*dummy2));
//	    outFile << dx << "\t" << dy << "\t" << hitpoint 
	} else color=0;
	
    if ( color>94 ) log() << "warning: color > 94\n";
     //   outFile << dx << "\t" << dy << "\t" << hitpoint 
    QTextStream(&outFile) << color;
/*Ende neu*/



//     if( screenGraphics ) {
//        screenGraphics->setPoint(dx,dy,color);
//      }

    }
    //outFile 
  }
  }
  outFile.flush();
  outFile.close();  
}
コード例 #30
0
ファイル: video.cpp プロジェクト: Dixit-Z/SmartTracking
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}