示例#1
0
/* main loop */
static void mainLoop(void)
{
    static int      contF = 0;
    ARUint8         *dataPtr;
    ARMarkerInfo    *marker_info;
    int             marker_num;
    int             j, k;

	//update new data
	g_MyKinect.Update();

	if(g_pRightHandShotDetector->detect())
	{
		printf("RIGHTHANDSHOT\n");
		size += 20.0f;
	}

#ifdef USE_USERDETECTOR
	if(g_MyKinect.userStatus.isPlayerVisible())
	{
		XV3 tmp = g_MyKinect.userDetector->getSkeletonJointPosition(XN_SKEL_RIGHT_HAND);
		//printf("Right hand position: %.2f %.2f %.2f\n", tmp.X, tmp.Y, tmp.Z);
	}
#endif

    /* grab a vide frame */
   /* if( (dataPtr = (ARUint8 *)arVideoGetImage()) == NULL ) {
        arUtilSleep(2);
        return;
    }*/
	
	//get image data to detect marker
	if( (dataPtr = (ARUint8 *)g_MyKinect.GetBGRA32Image()) == NULL ) {
        arUtilSleep(2);
        return;
    }

    if( count == 0 ) arUtilTimerReset();
    count++;

    /* detect the markers in the video frame */
    if( arDetectMarker(dataPtr, thresh, &marker_info, &marker_num) < 0 ) {
        cleanup();
        exit(0);
    }

	//option . You can choose many display mode. image, Depth by Color, depth mixed image
	if(displayMode == 2)
		dataPtr = (ARUint8 *)g_MyKinect.GetDepthDrewByColor();
	else
		if(displayMode == 3)
			dataPtr = (ARUint8 *)g_MyKinect.GetDepthMixedImage();

	argDrawMode2D();
    argDispImage( dataPtr, 0,0 );

   // arVideoCapNext();

    /* check for object visibility */
    k = -1;
    for( j = 0; j < marker_num; j++ ) {
        if( patt_id == marker_info[j].id ) {
            if( k == -1 ) k = j;
            else if( marker_info[k].cf < marker_info[j].cf ) k = j;
        }
    }
    if( k == -1 ) {
        contF = 0;
        argSwapBuffers();
        return;
    }

    /* get the transformation between the marker and the real camera */
    if( mode == 0 || contF == 0 ) {
        arGetTransMat(&marker_info[k], patt_center, patt_width, patt_trans);
    }
    else {
        arGetTransMatCont(&marker_info[k], patt_trans, patt_center, patt_width, patt_trans);
    }
    contF = 1;

    draw( patt_trans );

    argSwapBuffers();
}
示例#2
0
/* main loop */
static void mainLoop(void)
{
    ARUint8         *dataPtr;
    ARMarkerInfo    *marker_info;
    int             marker_num;
    int             i,j,k;

    /* grab a video frame */
    if( (dataPtr = (ARUint8 *)arVideoGetImage()) == NULL ) {
        arUtilSleep(2);
        return;
    }
	
    if( count == 0 ) arUtilTimerReset();  
    count++;

	/*draw the video*/
    argDrawMode2D();
    argDispImage( dataPtr, 0,0 );

	/* capture the next video frame */
	arVideoCapNext();
	
	glColor3f( 1.0, 0.0, 0.0 );
	glLineWidth(6.0);

	/* detect the markers in the video frame */ 
	if(arDetectMarker(dataPtr, thresh, 
		&marker_info, &marker_num) < 0 ) {
		cleanup(); 
		exit(0);
	}
	for( i = 0; i < marker_num; i++ ) {
		argDrawSquare(marker_info[i].vertex,0,0);
	}

	/* check for known patterns */
    for( i = 0; i < objectnum; i++ ) {
		k = -1;
		for( j = 0; j < marker_num; j++ ) {
	        if( object[i].id == marker_info[j].id) {

				/* you've found a pattern */
				//printf("Found pattern: %d ",patt_id);
				glColor3f( 0.0, 1.0, 0.0 );
				argDrawSquare(marker_info[j].vertex,0,0);

				if( k == -1 ) k = j;
		        else /* make sure you have the best pattern (highest confidence factor) */
					if( marker_info[k].cf < marker_info[j].cf ) k = j;
			}
		}
		if( k == -1 ) {
			object[i].visible = 0;
			continue;
		}
		
		/* calculate the transform for each marker */
		if( object[i].visible == 0 ) {
            arGetTransMat(&marker_info[k],
                          object[i].marker_center, object[i].marker_width,
                          object[i].trans);
        }
        else {
            arGetTransMatCont(&marker_info[k], object[i].trans,
                          object[i].marker_center, object[i].marker_width,
                          object[i].trans);
        }
        object[i].visible = 1;
	}

	/*check for object collisions between marker 0 and 1 */
	if(object[0].visible && object[1].visible){
		if(checkCollisions(object[0],object[1],COLLIDE_DIST)){
			object[0].collide = 1;
			object[1].collide = 1;
		}
		else{
			object[0].collide = 0;
			object[1].collide = 0;
		}
	}
	
	/* draw the AR graphics */
    draw( object, objectnum );

	/*swap the graphics buffers*/
	argSwapBuffers();
}
示例#3
0
文件: main.cpp 项目: AKIRA5611/AR
void MainLoop()
{
    //QueryPerformanceFrequency(&nFreq);
    //QueryPerformanceCounter(&nBefore);
    
    DWORD StartTime,EndTime,PassTime;
    double l_StartTime,l_EndTime,l_PassTime;
#ifdef _WIN32
    StartTime=timeGetTime();
#else
    l_StartTime=gettimeofday_sec();
#endif
    ARUint8		*image;
    ARMarkerInfo	*marker_info;
    int			 marker_num;
    int			 j,k;

    if( (image = (ARUint8*)arVideoGetImage() )==NULL){
	arUtilSleep(2);
	return;
    }

    argDrawMode2D();
    argDispImage(image, 0, 0);


    if(arDetectMarker(image, thresh, &marker_info, &marker_num) < 0){
	CleanUp();
	exit(0);
    }
    arVideoCapNext();

    k=-1;
    for(j=0;j<marker_num;j++){   
	if(patt_id==marker_info[j].id){
	    k = (k==-1)  ?   j : k;
	    k = (marker_info[k].cf < marker_info[j].cf)   ?  j: k;
	}
    }

    if(k!=-1) {
	if(isFirst==true)
	    nyar_NyARTransMat_O2_transMat(nyobj,&marker_info[k],patt_center,patt_width,patt_trans);
	else
	    nyar_NyARTransMat_O2_transMatCont(nyobj,&marker_info[k],patt_trans,patt_center,patt_width,patt_trans);

	isFirst=false;

	if(GameOver==false){
	    if(arUtilTimer()>1.0){
		MovePiece(3,f,p); 
		score+=f.ShiftPiece(f.deletePiece());
		arUtilTimerReset();
		GameOver=GameOverCheck(f,p);
	    }
	}
	else{
	    if(arUtilTimer()>15.0)
		InitGame();
	}
	DrawObject();
    }

    argSwapBuffers();
#ifdef _WIN32
    EndTime=timeGetTime();
    PassTime=EndTime-StartTime;
    (1000/FPS>PassTime)?Wait(1000/FPS-PassTime):Wait(0);
    FPSCount(&fps);
    printf("FPS=%d\n",fps);
#else
l_EndTime=gettimeofday_sec();
l_PassTime=l_EndTime-l_StartTime;
    ((double)(1000/FPS)>l_PassTime)?Wait((double)1000/FPS-l_PassTime):Wait(0);
    FPSCount(&fps);
    printf("FPS=%d\n",fps);
#endif
   
}
示例#4
0
  void ARSinglePublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k;
    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the 
     * ARToolKit configure option (see config.h).*/
	
    try
    {
	  capture_ = processFrame(capture_);
	  //if(capture_ != 0) cvShowImage("Video", capture_);
	  //cvWaitKey(10);
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR ("Could not convert from '%s' to 'bgr8'.", image_msg->encoding.c_str ());
    }
    //cvConvertImage(capture_,capture_,CV_CVTIMG_FLIP); //flip image
    dataPtr = (ARUint8 *) capture_->imageData;
    // detect the markers in the video frame 
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      ROS_FATAL ("arDetectMarker failed");
      ROS_BREAK ();             // FIXME: I don't think this should be fatal... -Bill
    }

    // check for known patterns
    k = -1;
    for (i = 0; i < marker_num; i++)
    {
      if (marker_info[i].id == patt_id_)
      {
        ROS_DEBUG ("Found pattern: %d ", patt_id_);

        // make sure you have the best pattern (highest confidence factor)
        if (k == -1)
          k = i;
        else if (marker_info[k].cf < marker_info[i].cf)
          k = i;
      }
    }

    if (k != -1)
    {
      // **** get the transformation between the marker and the real camera
      double arQuat[4], arPos[3];

      if (!useHistory_ || contF == 0)
        arGetTransMat (&marker_info[k], marker_center_, markerWidth_, marker_trans_);
      else
        arGetTransMatCont (&marker_info[k], marker_trans_, marker_center_, markerWidth_, marker_trans_);

      contF = 1;

      //arUtilMatInv (marker_trans_, cam_trans);
      arUtilMat2QuatPos (marker_trans_, arQuat, arPos);

      // **** convert to ROS frame

      btVector3 arP (arPos[0] * AR_TO_ROS, arPos[2] * AR_TO_ROS, -arPos[1] * AR_TO_ROS);
      btQuaternion arQ (-arQuat[0], -arQuat[2], arQuat[1], arQuat[3]);
      btTransform marker(arQ, arP);
      marker *= transform_;


      //ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      //ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** publish the marker

		  ar_pose_marker_.header.frame_id = image_msg->header.frame_id;
		  ar_pose_marker_.header.stamp    = image_msg->header.stamp;
		  ar_pose_marker_.id              = marker_info->id;

		  //ar_pose_marker_.pose.pose.position = marker.getOrigin();

		  ar_pose_marker_.pose.pose.position.x = marker.getOrigin().getX();
		  ar_pose_marker_.pose.pose.position.y = marker.getOrigin().getY();
		  ar_pose_marker_.pose.pose.position.z = marker.getOrigin().getZ();

		  //ar_pose_marker_.pose.pose.orientation = marker.getRotation();

		  ar_pose_marker_.pose.pose.orientation.x = marker.getRotation().getX();
		  ar_pose_marker_.pose.pose.orientation.y = marker.getRotation().getY();
		  ar_pose_marker_.pose.pose.orientation.z = marker.getRotation().getZ();
		  ar_pose_marker_.pose.pose.orientation.w = marker.getRotation().getW();
		
		  ar_pose_marker_.confidence = 100 * marker_info->cf;
		  arMarkerPub_.publish(ar_pose_marker_);
		  ROS_DEBUG ("Published ar_single marker");
		
      // **** publish transform between camera and marker

//	  btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
//      btVector3 origin(pos[0], pos[1], pos[2]);
      btTransform t(marker.getRotation(), marker.getOrigin());

      if(publishTf_)
      {
        if(reverse_transform_)
        {
          tf::StampedTransform markerToCam (t.inverse(), image_msg->header.stamp, markerFrame_.c_str(), image_msg->header.frame_id);
          broadcaster_.sendTransform(markerToCam);
        } else {
          tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame_.c_str());
          broadcaster_.sendTransform(camToMarker);
        }
      }

      // **** publish visual marker

      if(publishVisualMarkers_)
      {
        btVector3 markerOrigin(0, 0.25 * markerWidth_ * AR_TO_ROS, 0);
        btTransform m(btQuaternion::getIdentity(), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
        tf::poseTFToMsg(markerPose, rvizMarker_.pose);

			  rvizMarker_.header.frame_id = image_msg->header.frame_id;
			  rvizMarker_.header.stamp = image_msg->header.stamp;
			  rvizMarker_.id = 1;

			  rvizMarker_.scale.x = 1.0 * markerWidth_ * AR_TO_ROS;
			  rvizMarker_.scale.y = 0.5 * markerWidth_ * AR_TO_ROS;
			  rvizMarker_.scale.z = 1.0 * markerWidth_ * AR_TO_ROS;
			  rvizMarker_.ns = "basic_shapes";
			  rvizMarker_.type = visualization_msgs::Marker::CUBE;
			  rvizMarker_.action = visualization_msgs::Marker::ADD;
			  rvizMarker_.color.r = 0.0f;
			  rvizMarker_.color.g = 1.0f;
			  rvizMarker_.color.b = 0.0f;
			  rvizMarker_.color.a = 1.0;
			  rvizMarker_.lifetime = ros::Duration(1.0);
			
			  rvizMarkerPub_.publish(rvizMarker_);
			  ROS_DEBUG ("Published visual marker");
      }
    }
    else
    {
      contF = 0;
      ar_pose_marker_.confidence = 0;
      arMarkerPub_.publish(ar_pose_marker_);
      ROS_DEBUG ("Failed to locate marker");
    }
  }
示例#5
0
static void Idle(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;

	ARMarkerInfo    *marker_info;					// Pointer to array holding the details of detected markers.
	bool *invalid_marker;
    int             marker_num;						// Count of number of markers detected.
    int             j, k;

	// Find out how long since Idle() last ran.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001;
	if (s_elapsed < 0.01f) return; // Don't update more often than 100 Hz.
	ms_prev = ms;

	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) 
	{
		gARTImage = image;	// Save the fetched image.
		
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		
		// Detect the markers in the video frame.
		if (arDetectMarker(gARTImage, gARTThreshhold, &marker_info, &marker_num) < 0) 
		{
			exit(-1);
		}

		invalid_marker = (bool*)calloc(marker_num, sizeof(bool));

		for(std::list<Piece*>::iterator it = pieces.begin(); it != pieces.end(); it++)
		{
			(*it)->patt_found = FALSE;	// Invalidate any previous detected markers.

			// Check through the marker_info array for highest confidence
			// visible marker matching our preferred pattern.

			k = -1;
			for (j = 0; j < marker_num; j++) 
			{				
				if (!invalid_marker[j] && marker_info[j].id == (*it)->patt_id)
				{

					if (k == -1) // First marker detected.
					{
						k = j;
					}
					else if(marker_info[j].cf > marker_info[k].cf) // Higher confidence marker detected.
					{
						k = j;
					}
				}
			}

			if (k != -1) 
			{
				// Get the transformation between the marker and the real camera into gPatt_trans.
				arGetTransMat(&(marker_info[k]), (*it)->patt_centre, (*it)->patt_width, (*it)->patt_trans);
				(*it)->patt_found = TRUE;
				invalid_marker[k] = true; 
			}

		}

		AnimateModels();

		// Tell GLUT the display has changed.
		glutPostRedisplay();
	}
}
示例#6
0
void findMarkers(ARUint8* dataPtr) 
{
    ARMarkerInfo    *marker_info;
    int             marker_num;
    int             j, k;
    int             thresh = 100;

    // detect the markers in the video frame 
    int rv = arDetectMarker(dataPtr, thresh, &marker_info, &marker_num);
    if (rv < 0) {
        fprintf(stderr,"arDetectMarker failed\n");
        exit(0);
    }

    fprintf(stderr,"%d markers_found \n", marker_num);

/*
    // check for object visibility 
    k = -1;
        if ( patt_id == marker_info[j].id ) {
            if( k == -1 ) k = j;
            else if( marker_info[k].cf < marker_info[j].cf ) k = j;
        }
    }
  */	
    
    for ( k = 0; k < marker_num; k++ ) {

        if (1) {
        double ox,oy;
        arParamIdeal2Observ(cparam.dist_factor ,  marker_info[k].pos[0], marker_info[k].pos[1], &ox, &oy);

        printf("%d,\t", frame_ind);

        //printf("%g,\t%d,\t%g,\t%g,\t%g,\t%g,\t%g,\t",
        printf("%d,\t%g,\t%g,\t%g,\t%g,\t",
            marker_info[k].id,
            marker_info[k].cf, 
            (float)marker_info[k].area/(float)(xsize*ysize),
           // marker_info[k].pos[0]/(float)xsize,         marker_info[k].pos[1]/(float)(ysize), 
           // ox,         oy, 
            marker_info[k].pos[0], marker_info[k].pos[1] 
           // marker_info[k].vertex[0][0]/(float)xsize,   marker_info[k].vertex[0][1]/(float)(ysize));
           // marker_info[k].vertex[0][0],   marker_info[k].vertex[0][1]
            );
        }
       
        /// print rotation matrix
        if (1) {
        double          patt_trans[3][4];
		//fprintf("%f,\t%f,\t", patt_center[0], patt_center[1]);
        double          patt_width     = 80.0;
        double          patt_center[2] = {0.0, 0.0};
        /* get the transformation between the marker and the real camera */
		arGetTransMat(&marker_info[k], patt_center, patt_width, patt_trans);

		/// what is patt_center, it seems to be zeros
		//fprintf("%f,\t%f,\t", patt_center[0], patt_center[1]);

		int i;
		for (j = 0; j < 3; j++) {
		for (i = 0; i < 4; i++) {
				printf("%f,\t", patt_trans[j][i]);	
			}
			printf("\t");
		}
        }
		printf("\n");
	}
}
示例#7
0
/*
 * Class:     edu_dhbw_andar_MarkerInfo
 * Method:    artoolkit_detectmarkers
 * Signature: ([B[D)I
 */
JNIEXPORT jint JNICALL Java_edu_dhbw_andar_ARToolkit_artoolkit_1detectmarkers
(JNIEnv *env, jobject object, jbyteArray image, jobject transMatMonitor) {
    ARUint8         *dataPtr;
    ARMarkerInfo    *marker_info;
    double 	    *matrixPtr;
    int             marker_num;
    int             j, k=-1;
    Object* curObject;

    /* grab a vide frame */
    dataPtr = (*env)->GetByteArrayElements(env, image, JNI_FALSE);
    if( count == 0 ) arUtilTimerReset();
    count++;

    /* detect the markers in the video frame */
    if( arDetectMarker(dataPtr, thresh, &marker_info, &marker_num) < 0 ) {
        __android_log_write(ANDROID_LOG_ERROR,"AR native","arDetectMarker failed!!");
        jclass exc = (*env)->FindClass( env, "edu/dhbw/andar/exceptions/AndARException" );
        if ( exc != NULL )
            (*env)->ThrowNew( env, exc, "failed to detect marker" );
    }
#ifdef DEBUG_LOGGING
    __android_log_print(ANDROID_LOG_INFO,"AR native","detected %d markers",marker_num);
#endif


    //lock the matrix
    /*(*env)->MonitorEnter(env, transMatMonitor);
    cur_marker_id = k;
    argConvGlpara(patt_trans, gl_para);
    (*env)->MonitorExit(env, transMatMonitor);*/

    static jfieldID visibleField = NULL;
    static jfieldID glMatrixField = NULL;
    static jfieldID transMatField = NULL;
    jclass arObjectClass = NULL;
    jfloatArray glMatrixArrayObj = NULL;
    jdoubleArray transMatArrayObj = NULL;

#ifdef DEBUG_LOGGING
    __android_log_write(ANDROID_LOG_INFO,"AR native","done detecting markers, going to iterate over markers now");
#endif
    //iterate over objects:
    list_iterator_start(&objects);        /* starting an iteration "session" */
    int itCount = 0;
    while (list_iterator_hasnext(&objects)) { /* tell whether more values available */
        curObject = (Object *)list_iterator_next(&objects);     /* get the next value */
#ifdef DEBUG_LOGGING
        __android_log_print(ANDROID_LOG_INFO,"AR native","now handling object with id %d, in %d iteration",curObject->name, itCount);
#endif
        itCount++;
        // //get field ID'
        if(visibleField == NULL) {
            if(arObjectClass == NULL) {
                if(curObject->objref != NULL)
                    arObjectClass = (*env)->GetObjectClass(env, curObject->objref);
            }
            if(arObjectClass != NULL) {
                visibleField = (*env)->GetFieldID(env, arObjectClass, "visible", "Z");//Z means boolean
            }
        }
        if(glMatrixField == NULL) {
            if(arObjectClass == NULL) {
                if(curObject->objref != NULL)
                    arObjectClass = (*env)->GetObjectClass(env, curObject->objref);
            }
            if(arObjectClass != NULL) {
                glMatrixField = (*env)->GetFieldID(env, arObjectClass, "glMatrix", "[F");//[F means array of floats
            }
        }

        if(transMatField == NULL) {
            if(arObjectClass == NULL) {
                if(curObject->objref != NULL)
                    arObjectClass = (*env)->GetObjectClass(env, curObject->objref);
            }
            if(arObjectClass != NULL) {
                transMatField = (*env)->GetFieldID(env, arObjectClass, "transMat", "[D");//[D means array of doubles
            }
        }

        if(visibleField == NULL || glMatrixField == NULL || transMatField == NULL) {
            //something went wrong..
#ifdef DEBUG_LOGGING
            __android_log_write(ANDROID_LOG_INFO,"AR native","error: either visibleField or glMatrixField or transMatField null");
#endif
            continue;
        }

        // check for object visibility
        k = -1;
        for( j = 0; j < marker_num; j++ ) {
#ifdef DEBUG_LOGGING
            __android_log_print(ANDROID_LOG_INFO,"AR native","marker with id: %d", marker_info[j].id);
#endif
            if( curObject->id == marker_info[j].id ) {
                if( k == -1 ) {
                    k = j;
#ifdef DEBUG_LOGGING
                    __android_log_print(ANDROID_LOG_INFO,"AR native","detected object %d with marker %d and object marker %d",curObject->name,k,curObject->id);
#endif
                }
                else if( marker_info[k].cf < marker_info[j].cf )  {
#ifdef DEBUG_LOGGING
                    __android_log_print(ANDROID_LOG_INFO,"AR native","detected better object %d with marker %d and object marker %d",curObject->name,k,curObject->id);
#endif
                    k = j;
                }
            }
        }
        if( k == -1 ) {
            //object not visible
            curObject->contF = 0;
            (*env)->SetBooleanField(env, curObject->objref, visibleField, JNI_FALSE);
#ifdef DEBUG_LOGGING
            __android_log_print(ANDROID_LOG_INFO,"AR native","object %d  not visible, with marker ID %d",curObject->name,curObject->id);
#endif
            continue;
        }
        //object visible

        //lock the object
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","locking object");
#endif
        (*env)->MonitorEnter(env, curObject->objref);
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","done locking object...obtaining arrays");
#endif
        //access the arrays of the current object
        glMatrixArrayObj = (*env)->GetObjectField(env, curObject->objref, glMatrixField);
        transMatArrayObj = (*env)->GetObjectField(env, curObject->objref, transMatField);
        if(transMatArrayObj == NULL || glMatrixArrayObj == NULL) {
#ifdef DEBUG_LOGGING
            __android_log_write(ANDROID_LOG_INFO,"AR native","failed to fetch the matrix arrays objects");
#endif
            continue;//something went wrong
        }
        float *glMatrix = (*env)->GetFloatArrayElements(env, glMatrixArrayObj, JNI_FALSE);
        if(glMatrix == NULL ) {
#ifdef DEBUG_LOGGING
            __android_log_write(ANDROID_LOG_INFO,"AR native","failed to fetch the matrix arrays");
#endif
            continue;//something went wrong
        }
        double* transMat = (*env)->GetDoubleArrayElements(env, transMatArrayObj, JNI_FALSE);
        if(transMat == NULL) {
#ifdef DEBUG_LOGGING
            __android_log_write(ANDROID_LOG_INFO,"AR native","failed to fetch the matrix arrays");
#endif
            continue;//something went wrong
        }
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","calculating trans mat now");
#endif
        // get the transformation between the marker and the real camera
        if( curObject->contF == 0 ) {
            arGetTransMat(&marker_info[k], curObject->marker_center, curObject->marker_width, transMat);
        } else {
            arGetTransMatCont(&marker_info[k], transMat, curObject->marker_center, curObject->marker_width, transMat);
        }
        curObject->contF = 1;
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","calculating OpenGL trans mat now");
#endif
        argConvGlpara(transMat, glMatrix);
        //argConvGlpara(patt_trans, gl_para);
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","releasing arrays");
#endif
        (*env)->ReleaseFloatArrayElements(env, glMatrixArrayObj, glMatrix, 0);
        (*env)->ReleaseDoubleArrayElements(env, transMatArrayObj, transMat, 0);

        (*env)->SetBooleanField(env, curObject->objref, visibleField, JNI_TRUE);
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","releasing lock");
#endif
        //release the lock on the object
        (*env)->MonitorExit(env, curObject->objref);
#ifdef DEBUG_LOGGING
        __android_log_write(ANDROID_LOG_INFO,"AR native","done releasing lock");
#endif
    }
    list_iterator_stop(&objects);         /* starting the iteration "session" */
#ifdef DEBUG_LOGGING
    __android_log_write(ANDROID_LOG_INFO,"AR native","releasing image array");
#endif
    (*env)->ReleaseByteArrayElements(env, image, dataPtr, 0);
#ifdef DEBUG_LOGGING
    __android_log_write(ANDROID_LOG_INFO,"AR native","releasing image array");
#endif
    return marker_num;
}
示例#8
0
JNIEXPORT void JNICALL JNIFUNCTION_NATIVE(nativeVideoFrame(JNIEnv* env, jobject obj, jbyteArray pinArray))
{
    int i, j, k;
    jbyte* inArray;
    ARdouble err;

    if (!videoInited) {
#ifdef DEBUG
        LOGI("nativeVideoFrame !VIDEO\n");
#endif
        return; // No point in trying to track until video is inited.
    }
    if (!gARViewInited) {
        return; // Also, we won't track until the ARView has been inited.
#ifdef DEBUG
        LOGI("nativeVideoFrame !ARVIEW\n");
#endif
    }
#ifdef DEBUG
    LOGI("nativeVideoFrame\n");
#endif

    // Copy the incoming  YUV420 image in pinArray.
    env->GetByteArrayRegion(pinArray, 0, gVideoFrameSize, (jbyte *)gVideoFrame);

    // As of ARToolKit v5.0, NV21 format video frames are handled natively,
    // and no longer require colour conversion to RGBA.
    // If you still require RGBA format information from the video,
    // here is where you'd do the conversion:
    // color_convert_common(gVideoFrame, gVideoFrame + videoWidth*videoHeight, videoWidth, videoHeight, myRGBABuffer);

    videoFrameNeedsPixelBufferDataUpload = true; // Note that buffer needs uploading. (Upload must be done on OpenGL context's thread.)

    // Run marker detection on frame
    arDetectMarker(arHandle, gVideoFrame);

    // Get detected markers
    ARMarkerInfo* markerInfo = arGetMarker(arHandle);
    int markerNum = arGetMarkerNum(arHandle);

    // Update markers.
    for (i = 0; i < markersSquareCount; i++) {
        markersSquare[i].validPrev = markersSquare[i].valid;


        // Check through the marker_info array for highest confidence
        // visible marker matching our preferred pattern.
        k = -1;
        if (markersSquare[i].patt_type == AR_PATTERN_TYPE_TEMPLATE) {
            for (j = 0; j < markerNum; j++) {
                if (markersSquare[i].patt_id == markerInfo[j].idPatt) {
                    if (k == -1) {
                        if (markerInfo[j].cfPatt >= markersSquare[i].matchingThreshold) k = j; // First marker detected.
                    } else if (markerInfo[j].cfPatt > markerInfo[k].cfPatt) k = j; // Higher confidence marker detected.
                }
            }
            if (k != -1) {
                markerInfo[k].id = markerInfo[k].idPatt;
                markerInfo[k].cf = markerInfo[k].cfPatt;
                markerInfo[k].dir = markerInfo[k].dirPatt;
            }
        } else {
            for (j = 0; j < markerNum; j++) {
                if (markersSquare[i].patt_id == markerInfo[j].idMatrix) {
                    if (k == -1) {
                        if (markerInfo[j].cfMatrix >= markersSquare[i].matchingThreshold) k = j; // First marker detected.
                    } else if (markerInfo[j].cfMatrix > markerInfo[k].cfMatrix) k = j; // Higher confidence marker detected.
                }
            }
            if (k != -1) {
                markerInfo[k].id = markerInfo[k].idMatrix;
                markerInfo[k].cf = markerInfo[k].cfMatrix;
                markerInfo[k].dir = markerInfo[k].dirMatrix;
            }
        }

        if (k != -1) {
            markersSquare[i].valid = TRUE;
#ifdef DEBUG
            LOGI("Marker %d matched pattern %d.\n", k, markerInfo[k].id);
#endif
            // Get the transformation between the marker and the real camera into trans.
            if (markersSquare[i].validPrev) {
                err = arGetTransMatSquareCont(ar3DHandle, &(markerInfo[k]), markersSquare[i].trans, markersSquare[i].marker_width, markersSquare[i].trans);
            } else {
                err = arGetTransMatSquare(ar3DHandle, &(markerInfo[k]), markersSquare[i].marker_width, markersSquare[i].trans);
            }
        } else {
            markersSquare[i].valid = FALSE;
        }

        if (markersSquare[i].valid) {

            // Filter the pose estimate.
            if (markersSquare[i].ftmi) {
                if (arFilterTransMat(markersSquare[i].ftmi, markersSquare[i].trans, !markersSquare[i].validPrev) < 0) {
                    LOGE("arFilterTransMat error with marker %d.\n", i);
                }
            }

            if (!markersSquare[i].validPrev) {
                // Marker has become visible, tell any dependent objects.
                //ARMarkerAppearedNotification
            }

            // We have a new pose, so set that.
            arglCameraViewRHf(markersSquare[i].trans, markersSquare[i].pose.T, 1.0f /*VIEW_SCALEFACTOR*/);
            // Tell any dependent objects about the update.
            //ARMarkerUpdatedPoseNotification

        } else {

            if (markersSquare[i].validPrev) {
                // Marker has ceased to be visible, tell any dependent objects.
                //ARMarkerDisappearedNotification
            }
        }
    }
}
示例#9
0
static void mainLoop(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;
    AR2VideoBufferT *movieBuffer;
	ARdouble err;

    int             j, k;
	
	// Find out how long since mainLoop() last ran.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001f;
	if (s_elapsed < 0.01f) return; // Don't update more often than 100 Hz.
	ms_prev = ms;
	
	// Grab a movie frame (if available).
    if ((movieBuffer = ar2VideoGetImage(gMovieVideo)) != NULL) {
        if (movieBuffer->buff && movieBuffer->fillFlag)
            gMovieImage = movieBuffer->buff;
    }
    
	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) {
		gARTImage = image;	// Save the fetched image.
        
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		
		// Detect the markers in the video frame.
		if (arDetectMarker(gARHandle, gARTImage) < 0) {
			exit(-1);
		}
		
		// Check through the marker_info array for highest confidence
		// visible marker matching our preferred pattern.
		k = -1;
		for (j = 0; j < gARHandle->marker_num; j++) {
			if (gARHandle->markerInfo[j].id == gPatt_id) {
				if (k == -1) k = j; // First marker detected.
				else if (gARHandle->markerInfo[j].cf > gARHandle->markerInfo[k].cf) k = j; // Higher confidence marker detected.
			}
		}
		
		if (k != -1) {
			// Get the transformation between the marker and the real camera into gPatt_trans.
            if (gPatt_found && useContPoseEstimation) {
                err = arGetTransMatSquareCont(gAR3DHandle, &(gARHandle->markerInfo[k]), gPatt_trans, gPatt_width, gPatt_trans);
            } else {
                err = arGetTransMatSquare(gAR3DHandle, &(gARHandle->markerInfo[k]), gPatt_width, gPatt_trans);
                // Marker has appeared, so un-pause movie.
                ar2VideoCapStart(gMovieVideo);
            }
			gPatt_found = TRUE;
		} else {
            if (gPatt_found) {
                // Marker has disappeared, so pause movie.
                ar2VideoCapStop(gMovieVideo);
            }
			gPatt_found = FALSE;
		}
		
		// Tell GLUT the display has changed.
		glutPostRedisplay();
	}
}
示例#10
0
void mainLoop()
{
    ARMarkerInfo *marker_info;
    ARUint8 *dataPtr;
    int marker_num;

	if(!calib)//special paycay florian 
		cvReleaseImage(&image);

	if(!calib)
		detectColision();

    // Recuperation du flux video
    if ( (dataPtr = (ARUint8 *)arVideoGetImage()) == NULL )
    {
        arUtilSleep(2);
        return;
    }

	// Passage en mode 2D pour analyse de l'image capturee
    argDrawMode2D();

	// Récupération de l'image openCV puis conversion en ARImage
	//IplImage* imgTest;
	image = cvCreateImage(cvSize(xsize, ysize), IPL_DEPTH_8U, 4);	
	image->imageData = (char *)dataPtr;

	//sinon l'image est à l'envers
	cvFlip(image, image, 1);
	

	//test si les couleurs ont déjà été calibrée
	// si oui on teste si y a collision, sinon on calibre
		interactionBoutton();
		if(calib)
			calibrage();
		else
		{
			updateColor();
			interactions();
		}

			

    // affichage image à l'ecran
    argDispImage( (unsigned char *)image->imageData, 0,0 );
	// Recuperation d'une autre image car on a fini avec la precedente
	arVideoCapNext();

		if (arDetectMarker(dataPtr, thresh, &marker_info, &marker_num) < 0)
		{
			printf("impossible de detecter le marqueur\n");
			cleanup();
		}


		if(visible == false && !calib) //element IHM : procedure qui permet de savoir si on recherche ou pas + réinit mouvemment des objets précédement affiché
		{
			glEnable(GL_LIGHT0);

			objet1_x =0;objet1_y =0;objet2_x =0;objet2_y =0;

			if(scan.isVisible(0)==true)
				scan.setVisible(false,0);
			if(scan.isVisible(1)==false)
				scan.setVisible(true,1);

			glColor3ub(255,0,0);
			texte(GLUT_BITMAP_HELVETICA_18,(char*)"Searching",cparam.xsize-100,cparam.ysize-30);
			if(alterne1==0 && alterne2 > 20)
			{
				glColor3ub(255,0,0);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Searching .",cparam.xsize-100,cparam.ysize-30);
				if(alterne2 > 30){alterne2=0;alterne1=(alterne1+1)%3;}
			}
			if(alterne1==1 && alterne2 > 20 )
			{	
				glColor3ub(255,0,0);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Searching ..",cparam.xsize-100,cparam.ysize-30);
				if(alterne2 > 30){alterne2=0;alterne1=(alterne1+1)%3;}
			}
			if(alterne1==2 && alterne2 > 20)
			{	
				glColor3ub(255,0,0);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Searching ...",cparam.xsize-100,cparam.ysize-30);
				if(alterne2 > 30){alterne2=0;alterne1=(alterne1+1)%3;}
			}

			alterne2+=1;
			glDisable(GL_LIGHT0);
		}
		else if(calib)
		{
			
			if(couleur == 0)
			{
				glColor3ub(0,0,255);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Choose thumb's color",cparam.xsize-220,cparam.ysize-30);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"then press enter",cparam.xsize-220,cparam.ysize-(30+18));
			}
			else if(couleur == 1)
			{
				glColor3ub(0,255,0);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Choose forefinger's color",cparam.xsize-220,cparam.ysize-30);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"then press enter",cparam.xsize-220,cparam.ysize-(30+18));
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Press return for thumb",cparam.xsize-220,cparam.ysize-(30+18*2));
			}
			else
			{
				glColor3ub(255,0,0);
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Choose middle's color",cparam.xsize-220,cparam.ysize-(30));
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"then press enter",cparam.xsize-220,cparam.ysize-(30+18));
				texte(GLUT_BITMAP_HELVETICA_18,(char*)"Press return for forefinger",cparam.xsize-220,cparam.ysize-(30+18*2));

			}


		}
		else //passage mode 3d + init profondeur
		{
			argDrawMode3D();
			argDraw3dCamera(0, 0);
			glClearDepth(1.0);
			glClear(GL_DEPTH_BUFFER_BIT);
		}

		/// Visibilite de l'objet
		if(visible == false ) //si on a jms vu de patron ou qu'on a demandé une recapture des patrons faire
		{
			//recherche objet visible 
			for (int i=0; i<2; i++) //pour chaque patron initialisé faire 
			{
				k = -1; //k renseigne sur la visibilité du marker et son id
				for (int j=0; j<marker_num; j++) // pour chaque marqueur trouver avec arDetectMarker 
				{
					if (object[i].patt_id == marker_info[j].id) 
					{
						if (k == -1)
						{
							k = j;
						}
						else if (marker_info[k].cf < marker_info[j].cf)
						{
							k = j;
						}
					}
				}

				object[i].visible = k;

				if (k >= 0)
				{
					visible = true;
					arGetTransMat(&marker_info[k], object[i].center, object[i].width,object[i].trans);
					printf("object[%d] center[%f, %f]\n", i, marker_info->pos[0], marker_info->pos[1]);
					printf("object[%d] hg[%f, %f]\n", i, marker_info->vertex[0][0], marker_info->vertex[0][1]);
					printf("object[%d] hd[%f, %f]\n", i, marker_info->vertex[1][0], marker_info->vertex[1][1]);
					printf("object[%d] bg[%f, %f]\n", i, marker_info->vertex[2][0], marker_info->vertex[2][1]);
					printf("object[%d] bd[%f, %f]\n", i, marker_info->vertex[3][0], marker_info->vertex[3][1]);


					//changement etat boutton
					if(scan.isVisible(0)==false)
						scan.setVisible(true,0);
					if(scan.isVisible(1)==true)
						scan.setVisible(false,1);

					//si on a vu un patron, on créé une nouvelle instance de l'objet créé par le patron, qu'on stocke dans les objets à l'écran.
					onscreen_object.push_back(Object3D(mesh.at(object[i].model_id), object[i].center, object[i].trans, object[i].width));
				}
			}
		}
		
		//vu qu'on ne gère plus à partir de la variable "visible" d'un patron, on display, dans tout les cas, soit le vecteur est vide, soit 
		//on a un ou plusieurs objets à afficher
		display(true);

		if(menuShow==true)
			menu.show();

		if(!calib)
			scan.show();

		help.show();
		quit.show();


    argSwapBuffers(); /// Affichage de l'image sur l'interface graphique
}
示例#11
0
static void mainLoop(void)
{
    ARUint8        *dataPtr;
    ARMarkerInfo   *markerInfo;
    int             markerNum;
    ARdouble        patt_trans[3][4];
    ARdouble        err;
    int             imageProcMode;
    int             debugMode;
    int             j, k;

    /* grab a video frame */
    if( (dataPtr = (ARUint8 *)arVideoGetImage()) == NULL ) {
        arUtilSleep(2);
        return;
    }

    /* detect the markers in the video frame */
    if( arDetectMarker(arHandle, dataPtr) < 0 ) {
        cleanup();
        exit(0);
    }

    argSetWindow(w1);
    argDrawMode2D(vp1);
    arGetDebugMode( arHandle, &debugMode );
    if( debugMode == 0 ) {
        argDrawImage( dataPtr );
    }
    else {
        arGetImageProcMode(arHandle, &imageProcMode);
        if( imageProcMode == AR_IMAGE_PROC_FRAME_IMAGE ) {
            argDrawImage( arHandle->labelInfo.bwImage );
        }
        else {
            argDrawImageHalf( arHandle->labelInfo.bwImage );
        }
    }

    argSetWindow(w2);
    argDrawMode2D(vp2);
    argDrawImage( dataPtr );
    argSetWindow(w1);

    if( count % 10 == 0 ) {
        sprintf(fps, "%f[fps]", 10.0/arUtilTimer());
        arUtilTimerReset();
    }
    count++;
    glColor3f(0.0f, 1.0f, 0.0f);
    argDrawStringsByIdealPos(fps, 10, ysize-30);

    markerNum = arGetMarkerNum( arHandle );
    if( markerNum == 0 ) {
        argSetWindow(w1);
        argSwapBuffers();
        argSetWindow(w2);
        argSwapBuffers();
        return;
    }

    /* check for object visibility */
    markerInfo =  arGetMarker( arHandle ); 
    k = -1;
    for( j = 0; j < markerNum; j++ ) {
        //ARLOG("ID=%d, CF = %f\n", markerInfo[j].id, markerInfo[j].cf);
        if( patt_id == markerInfo[j].id ) {
            if( k == -1 ) {
                if (markerInfo[j].cf > 0.7) k = j;
            } else if (markerInfo[j].cf > markerInfo[k].cf) k = j;
        }
    }
    if( k == -1 ) {
        argSetWindow(w1);
        argSwapBuffers();
        argSetWindow(w2);
        argSwapBuffers();
        return;
    }

    err = arGetTransMatSquare(ar3DHandle, &(markerInfo[k]), patt_width, patt_trans);
    sprintf(errValue, "err = %f", err);
    glColor3f(0.0f, 1.0f, 0.0f);
    argDrawStringsByIdealPos(fps, 10, ysize-30);
    argDrawStringsByIdealPos(errValue, 10, ysize-60);
    //ARLOG("err = %f\n", err);

    draw(patt_trans);

    argSetWindow(w1);
    argSwapBuffers();
    argSetWindow(w2);
    argSwapBuffers();
}
示例#12
0
  void ARMultiPublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the 
     * ARToolKit configure option (see config.h).*/
#if ROS_VERSION_MINIMUM(1, 9, 0)
    try
    {
      capture_ = cv_bridge::toCvCopy (image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) ((IplImage) capture_->image).imageData;
#else
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) capture_->imageData;
#endif

    // detect the markers in the video frame
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      ROS_BREAK ();
    }

    arPoseMarkers_.markers.clear ();
    // check for known patterns
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }

      // calculate the transform for each marker
      if (object[i].visible == 0)
      {
        arGetTransMat (&marker_info[k], object[i].marker_center, object[i].marker_width, object[i].trans);
      }
      else
      {
        arGetTransMatCont (&marker_info[k], object[i].trans,
                           object[i].marker_center, object[i].marker_width, object[i].trans);
      }
      object[i].visible = 1;

      double arQuat[4], arPos[3];

      //arUtilMatInv (object[i].trans, cam_trans);
      arUtilMat2QuatPos (object[i].trans, arQuat, arPos);

      // **** convert to ROS frame

      double quat[4], pos[3];

      pos[0] = arPos[0] * AR_TO_ROS;
      pos[1] = arPos[1] * AR_TO_ROS;
      pos[2] = arPos[2] * AR_TO_ROS;

      if (isRightCamera_) {
        pos[2] += 0; // -0.001704;
        pos[0] += 0; // +0.0899971;
        pos[1] += 0; // -0.00012;
      }

      quat[0] = -arQuat[0];
      quat[1] = -arQuat[1];
      quat[2] = -arQuat[2];
      quat[3] = arQuat[3];

      ROS_DEBUG (" Object num %i ------------------------------------------------", i);
      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** publish the marker

      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = image_msg->header.frame_id;
      ar_pose_marker.header.stamp = image_msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = pos[0];
      ar_pose_marker.pose.pose.position.y = pos[1];
      ar_pose_marker.pose.pose.position.z = pos[2];

      ar_pose_marker.pose.pose.orientation.x = quat[0];
      ar_pose_marker.pose.pose.orientation.y = quat[1];
      ar_pose_marker.pose.pose.orientation.z = quat[2];
      ar_pose_marker.pose.pose.orientation.w = quat[3];

      ar_pose_marker.confidence = round(marker_info[k].cf * 100);
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      // **** publish transform between camera and marker

#if ROS_VERSION_MINIMUM(1, 9, 0)
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#else
// DEPRECATED: Fuerte support ends when Hydro is released
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#endif

      if (publishTf_)
      {
        std::string name = object[i].name;
        if (isRightCamera_) {
          name += "_r";
        }
        tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, name);
        broadcaster_.sendTransform(camToMarker);
      }

      // **** publish visual marker

      if (publishVisualMarkers_)
      {
#if ROS_VERSION_MINIMUM(1, 9, 0)
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame 
#else
// DEPRECATED: Fuerte support ends when Hydro is released
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
#endif

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = image_msg->header.frame_id;
        rvizMarker_.header.stamp = image_msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration (1.0);

        rvizMarkerPub_.publish(rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish(arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
示例#13
0
static void mainLoop(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;
    ARMarkerInfo* markerInfo;
    int markerNum;
	ARdouble err;
    ARPose opticalPose;
    int             i, j, k;
	
	// Calculate time delta.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001f;
	ms_prev = ms;
	
	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) {
		gARTImage = image;	// Save the fetched image.
		
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		
		// Detect the markers in the video frame.
		if (arDetectMarker(gARHandle, gARTImage) < 0) {
			exit(-1);
		}
		
		// Get detected markers
		markerInfo = arGetMarker(gARHandle);
		markerNum = arGetMarkerNum(gARHandle);
	
		// Update markers.
		for (i = 0; i < markersSquareCount; i++) {
			markersSquare[i].validPrev = markersSquare[i].valid;
            
            
			// Check through the marker_info array for highest confidence
			// visible marker matching our preferred pattern.
			k = -1;
			if (markersSquare[i].patt_type == AR_PATTERN_TYPE_TEMPLATE) {
				for (j = 0; j < markerNum; j++) {
					if (markersSquare[i].patt_id == markerInfo[j].idPatt) {
						if (k == -1) {
							if (markerInfo[j].cfPatt >= markersSquare[i].matchingThreshold) k = j; // First marker detected.
						} else if (markerInfo[j].cfPatt > markerInfo[k].cfPatt) k = j; // Higher confidence marker detected.
					}
				}
				if (k != -1) {
					markerInfo[k].id = markerInfo[k].idPatt;
					markerInfo[k].cf = markerInfo[k].cfPatt;
					markerInfo[k].dir = markerInfo[k].dirPatt;
				}
			} else {
				for (j = 0; j < markerNum; j++) {
					if (markersSquare[i].patt_id == markerInfo[j].idMatrix) {
						if (k == -1) {
							if (markerInfo[j].cfMatrix >= markersSquare[i].matchingThreshold) k = j; // First marker detected.
						} else if (markerInfo[j].cfMatrix > markerInfo[k].cfMatrix) k = j; // Higher confidence marker detected.
					}
				}
				if (k != -1) {
					markerInfo[k].id = markerInfo[k].idMatrix;
					markerInfo[k].cf = markerInfo[k].cfMatrix;
					markerInfo[k].dir = markerInfo[k].dirMatrix;
				}
			}

			if (k != -1) {
				markersSquare[i].valid = TRUE;
				ARLOGd("Marker %d matched pattern %d.\n", i, markerInfo[k].id);
				// Get the transformation between the marker and the real camera into trans.
				if (markersSquare[i].validPrev && useContPoseEstimation) {
					err = arGetTransMatSquareCont(gAR3DHandle, &(markerInfo[k]), markersSquare[i].trans, markersSquare[i].marker_width, markersSquare[i].trans);
				} else {
					err = arGetTransMatSquare(gAR3DHandle, &(markerInfo[k]), markersSquare[i].marker_width, markersSquare[i].trans);
				}
			} else {
				markersSquare[i].valid = FALSE;
			}
	   
			if (markersSquare[i].valid) {
			
				// Filter the pose estimate.
				if (markersSquare[i].ftmi) {
					if (arFilterTransMat(markersSquare[i].ftmi, markersSquare[i].trans, !markersSquare[i].validPrev) < 0) {
						ARLOGe("arFilterTransMat error with marker %d.\n", i);
					}
				}
			
				if (!markersSquare[i].validPrev) {
					// Marker has become visible, tell any dependent objects.
                    VirtualEnvironmentHandleARMarkerAppeared(i);
				}
	
				// We have a new pose, so set that.
				arglCameraViewRH(markersSquare[i].trans, markersSquare[i].pose.T, 1.0f /*VIEW_SCALEFACTOR*/);
				// Tell any dependent objects about the update.
                
                // Work out the correct optical position relative to the eye.
                // We first apply the transform from the eye of the viewer to the camera,
                // then the usual view relative to the camera.
                // Remember, mtxMultMatrix(A, B) post-multiplies A by B, i.e. A' = A.B,
                // so opticalPose.T = eye.m . markersSquare[i].pose.T.
#ifdef ARDOUBLE_IS_FLOAT
                mtxLoadMatrixf(opticalPose.T, eye.m);
                mtxMultMatrixf(opticalPose.T, markersSquare[i].pose.T);
#else
                mtxLoadMatrixd(opticalPose.T, eye.m);
                mtxMultMatrixd(opticalPose.T, markersSquare[i].pose.T);
#endif
                
                VirtualEnvironmentHandleARMarkerWasUpdated(i, opticalPose);
			
			} else {
			
				if (markersSquare[i].validPrev) {
					// Marker has ceased to be visible, tell any dependent objects.
					VirtualEnvironmentHandleARMarkerDisappeared(i);
				}
			}                    
		}
		
		// Tell GLUT the display has changed.
		glutPostRedisplay();
	} else {
		arUtilSleep(2);
	}
    
}
示例#14
0
void ARMultiPublisher::getTransformationCallback(const sensor_msgs::ImageConstPtr & image_msg)
{
  // Get the image from ROSTOPIC
  // NOTE: the dataPtr format is BGR because the ARToolKit library was
  // build with V4L, dataPtr format change according to the
  // ARToolKit configure option (see config.h).
  try
  {
    capture_ = bridge_.imgMsgToCv(image_msg, "bgr8");
  }
  catch (sensor_msgs::CvBridgeException & e)
  {
    ROS_ERROR ("Could not convert from >%s< to 'bgr8'.", image_msg->encoding.c_str ());
  }
  // cvConvertImage(capture,capture,CV_CVTIMG_FLIP);
  ARUint8* data_ptr = (ARUint8 *)capture_->imageData;

  // detect the markers in the video frame
  if (arDetectMarker(data_ptr, threshold_, &marker_info_, &num_detected_marker_) < 0)
  {
    argCleanup();
    ROS_BREAK ();
  }
  ROS_DEBUG("Detected >%i< of >%i< markers.", num_detected_marker_, num_total_markers_);

  double error = 0.0;
  if ((error = arMultiGetTransMat(marker_info_, num_detected_marker_, multi_marker_config_)) < 0)
  {
    // ROS_ERROR("Could not get transformation. This should never happen.");
    ROS_WARN("Could not get transformation.");
    return;
  }
  ROS_DEBUG("Error is >%f<.", error);

  for (int i = 0; i < num_detected_marker_; i++)
  {
    ROS_DEBUG("multi_marker_config_->prevF: %i", multi_marker_config_->prevF);
    ROS_DEBUG("%s: (%i) pos: %f %f id: %i cf: %f", marker_frame_.c_str(), i, marker_info_[i].pos[0], marker_info_[i].pos[1], marker_info_[i].id, marker_info_[i].cf);
  }

  // choose those with the highest confidence
  std::vector<double> cfs(num_total_markers_, 0.0);
  marker_indizes_.clear();
  for (int i = 0; i < num_total_markers_; ++i)
  {
    marker_indizes_.push_back(-1);
  }
  for (int i = 0; i < num_total_markers_; ++i)
  {
    for (int j = 0; j < num_detected_marker_; j++)
    {
      if (!(marker_info_[j].id < 0))
      {
        if (marker_info_[j].cf > cfs[marker_info_[j].id])
        {
          cfs[marker_info_[j].id] = marker_info_[j].cf;
          marker_indizes_[marker_info_[j].id] = j;
        }
      }
    }
  }

  double ar_quat[4], ar_pos[3];
  arUtilMat2QuatPos(multi_marker_config_->trans, ar_quat, ar_pos);
  tf::Quaternion rotation(-ar_quat[0], -ar_quat[1], -ar_quat[2], ar_quat[3]);
  tf::Vector3 origin(ar_pos[0] * AR_TO_ROS, ar_pos[1] * AR_TO_ROS, ar_pos[2] * AR_TO_ROS);
  tf::Transform transform(rotation, origin);
  if (multi_marker_config_->prevF && publish_tf_)
  {
    if(error < error_threshold_)
    {
      ROS_DEBUG("%f %f %f | %f %f %f %f | %f", origin.getX(), origin.getY(), origin.getZ(), rotation.getX(), rotation.getY(), rotation.getZ(), rotation.getW(), image_msg->header.stamp.toSec());
      tf::StampedTransform cam_to_marker(transform, image_msg->header.stamp, camera_frame_, marker_frame_);
      tf_broadcaster_.sendTransform(cam_to_marker);
    }
    publishErrorMarker(error, image_msg->header.stamp);
  }

  if(publish_visual_markers_)
  {
    for (int i = 0; i < num_total_markers_; i++)
    {
      if (marker_indizes_[i] >= 0)
      {
          tf::Transform marker_transform;
          getTransform(i, marker_transform);
          tf::Transform marker = transform * marker_transform;
          publishMarker(i, marker, image_msg->header.stamp);
          last_transform_ = marker;
      }
      // else
      // {
      //     publishMarker(i, last_transform_, image_msg->header.stamp);
      // }
    }
  }


}
示例#15
0
void ARSinglePublisher::getTransformationCallback(const sensor_msgs::ImageConstPtr& image_msg)
{
  // ROS_INFO("======================================================");
  // ROS_INFO("Callback...");

  ARMarkerInfo *marker_info;
  int marker_num;
  int i, k;

  // Get the image from ROSTOPIC
  // NOTE: the data_ptr format is BGR because the ARToolKit library was
  // build with V4L, data_ptr format change according to the
  // ARToolKit configure option (see config.h).
  try
  {
    capture_ = bridge_.imgMsgToCv(image_msg, "bgr8");
  }
  catch (sensor_msgs::CvBridgeException & e)
  {
    ROS_ERROR("Could not convert from >%s< to 'bgr8'.", image_msg->encoding.c_str ());
    return;
  }

//  const int WIDTH = 640;
//  const int HEIGHT = 480;
//  // declare a destination IplImage object with correct size, depth and channels
//  IplImage *destination = cvCreateImage(cvSize(WIDTH, HEIGHT), capture_->depth, capture_->nChannels);
//  // use cvResize to resize source to a destination image
//  cvResize(capture_, destination);
//  // save image with a name supplied with a second argument
  //  std::string filename = "/tmp/" + marker_frame_ + ".jpg";
  //  cvSaveImage(filename.c_str(), destination);
//  ROS_INFO("BEFORE: Depth = >%i<.", capture_->depth);
//  ROS_INFO("BEFORE: nChannels = >%i<.", capture_->nChannels);
//  ROS_INFO("BEFORE: Width = >%i<.", capture_->width);
//  ROS_INFO("BEFORE: WidthStep = >%i<.", capture_->widthStep);
//  ROS_INFO("BEFORE: Height = >%i<.", capture_->height);
//  ROS_INFO("BEFORE: ImageSize = >%i<.", capture_->imageSize);
//  ROS_INFO("BEFORE: nSize = >%i<.", capture_->nSize);
//  ROS_INFO("BEFORE: dataOrder = >%i<.", capture_->dataOrder);
//  ROS_INFO("BEFORE: origin = >%i<.", capture_->origin);
//  capture_ = destination;
//  // memcpy(capture_->imageData, destination->imageData, destination->imageSize);
//  ROS_INFO("AFTER:  Depth = >%i<.", capture_->depth);
//  ROS_INFO("AFTER:  nChannels = >%i<.", capture_->nChannels);
//  ROS_INFO("AFTER:  Width = >%i<.", capture_->width);
//  ROS_INFO("AFTER:  WidthStep = >%i<.", capture_->widthStep);
//  ROS_INFO("AFTER:  Height = >%i<.", capture_->height);
//  ROS_INFO("AFTER:  ImageSize = >%i<.", capture_->imageSize);
//  ROS_INFO("AFTER:  nSize = >%i<.", capture_->nSize);
//  ROS_INFO("AFTER:  dataOrder = >%i<.", capture_->dataOrder);
//  ROS_INFO("AFTER:  origin = >%i<.", capture_->origin);

  // cvConvertImage(capture_, capture_, CV_CVTIMG_FLIP); //flip image
  ARUint8 *data_ptr = (ARUint8 *)capture_->imageData;

  // detect the markers in the video frame
  if (arDetectMarker(data_ptr, threshold_, &marker_info, &marker_num) < 0)
  {
    ROS_FATAL ("arDetectMarker failed");
    ROS_BREAK (); // FIXME: I don't think this should be fatal... -Bill
  }

  // check for known patterns
  k = -1;
  for (i = 0; i < marker_num; i++)
  {
    if (marker_info[i].id == patt_id_)
    {
      ROS_DEBUG("Found pattern: %d ", patt_id_);

      // make sure you have the best pattern (highest confidence factor)
      if (k == -1)
        k = i;
      else if (marker_info[k].cf < marker_info[i].cf)
        k = i;
    }
  }

  if (k != -1)
  {
    if (!use_history_ || cont_f_ == 0)
    {
      arGetTransMat(&marker_info[k], marker_center_, marker_width_, marker_trans_);
    }
    else
    {
      arGetTransMatCont(&marker_info[k], marker_trans_, marker_center_, marker_width_, marker_trans_);
    }

    cont_f_ = 1;

    // get the transformation between the marker and the real camera
    double arQuat[4], arPos[3];

    // arUtilMatInv (marker_trans_, cam_trans);
    arUtilMat2QuatPos(marker_trans_, arQuat, arPos);

    // error checking
    if(fabs(sqrt(arQuat[0]*arQuat[0] + arQuat[1]*arQuat[1] + arQuat[2]*arQuat[2] + arQuat[3]*arQuat[3]) - 1.0) > 0.0001)
    {
      ROS_WARN("Skipping invalid frame. Computed quaternion is invalid.");
    }
    if(std::isnan(arQuat[0]) || std::isnan(arQuat[1]) || std::isnan(arQuat[2]) || std::isnan(arQuat[3]))
    {
      ROS_WARN("Skipping invalid frame because computed orientation is not a number.");
      return;
    }
    if(std::isinf(arQuat[0]) || std::isinf(arQuat[1]) || std::isinf(arQuat[2]) || std::isinf(arQuat[3]))
    {
      ROS_WARN("Skipping invalid frame because computed orientation is infinite.");
      return;
    }

    // convert to ROS frame

    double quat[4], pos[3];

    pos[0] = arPos[0] * AR_TO_ROS;
    pos[1] = arPos[1] * AR_TO_ROS;
    pos[2] = arPos[2] * AR_TO_ROS;

    quat[0] = -arQuat[0];
    quat[1] = -arQuat[1];
    quat[2] = -arQuat[2];
    quat[3] = arQuat[3];

    ROS_DEBUG("  Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
    ROS_DEBUG("  Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

    // publish the marker

    ar_target_marker_.confidence = marker_info->cf;

    ar_marker_publisher_.publish(ar_target_marker_);
    ROS_DEBUG ("Published ar_single marker");

    // publish transform between camera and marker

    tf::Quaternion rotation(quat[0], quat[1], quat[2], quat[3]);
    tf::Vector3 origin(pos[0], pos[1], pos[2]);
    tf::Transform transform(rotation, origin);

    // TODO: figure out why this happens once in a while...
    if(transform.getOrigin().getZ() < 0.0)
    {
      transform.setOrigin(-transform.getOrigin());
    }

    if (publish_tf_)
    {
      if (reverse_transform_)
      {
        ROS_ASSERT_MSG(false, "Reverse transform is not debugged yet.");
        // tf::StampedTransform marker_to_cam(t.inverse(), image_msg->header.stamp, marker_frame_.c_str(), image_msg->header.frame_id);
        tf::StampedTransform marker_to_cam(transform.inverse(), image_msg->header.stamp, marker_frame_, camera_frame_);
        tf_broadcaster_.sendTransform(marker_to_cam);
      }
      else
      {
        tf::Transform offseted_transform = transform * marker_offset_;
        // tf::StampedTransform cam_to_marker(t, image_msg->header.stamp, image_msg->header.frame_id, marker_frame_.c_str());
        tf::StampedTransform cam_to_marker(offseted_transform, image_msg->header.stamp, camera_frame_, marker_frame_);
        // tf::StampedTransform cam_to_marker(transform, image_msg->header.stamp, camera_frame_, marker_frame_);
        tf_broadcaster_.sendTransform(cam_to_marker);
      }
    }

    // publish visual marker
    publishMarker(transform, image_msg->header.stamp);
  }
  else
  {
    cont_f_ = 0;
    ROS_WARN("Failed to locate marker.");
  }

}
示例#16
0
  void ARBundlePublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int knownPatternCount, k, j;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the 
     * ARToolKit configure option (see config.h).*/
#if ROS_VERSION_MINIMUM(1, 9, 0)
    try
    {
      capture_ = cv_bridge::toCvCopy (image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) ((IplImage) capture_->image).imageData;
#else
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) capture_->imageData;
#endif

    // detect the markers in the video frame
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      ROS_BREAK ();
    }

    arPoseMarkers_.markers.clear ();
    // check for known patterns
    for (knownPatternCount = 0; knownPatternCount < objectnum; knownPatternCount++)
    {
      k = -1;	//haven't yet seen my pattern yet. 
	  //marker_num is how many markers were actually found
      for (j = 0; j < marker_num; j++)
      {
        if (object[knownPatternCount].id == marker_info[j].id)
        {
          if (k == -1)	//if this is the first wild sighting
            k = j;		//which marker matches my pattern?
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }//end for (j)
      if (k == -1)	//didn't find my pattern :(
      {
        object[knownPatternCount].visible = 0;
        continue;	//ok. so this just skips all the way to the next knownPatternCount
      }

      object[knownPatternCount].visible = 1;	//woohoo mark as found

	  getMarkerTransform(knownPatternCount, marker_info, k);	//transform is stored in object[knownPatternCount].trans

      double arQuat[4], arPos[3];	//for the marker
      double masterARQuat[4], masterARPos[3];	//for the master/center of the box 

	  //find the transform for the pattern to the center of the box
	  //updates master_trans_
	  findTransformToCenter(object[knownPatternCount].trans, knownPatternCount);
      
	  //arUtilMatInv (object[i].trans, cam_trans);
      arUtilMat2QuatPos (object[knownPatternCount].trans, arQuat, arPos);
      arUtilMat2QuatPos (master_trans_, masterARQuat, masterARPos);

      // **** convert to ROS frame
      double quat[4], pos[3];
      double masterQuat[4], masterPos[3];
	  convertToRosFrame(arQuat, arPos, quat, pos);
	  convertToRosFrame(masterARQuat, masterARPos, masterQuat, masterPos);
	
	  //write pos and quat out to text file 
	  if (outputToFile)	{
		  //need to output video tick
		  //i know this is incredibly incredibly not efficient :(
		  //TODO: output tick as well, from the camera_raw msg


		//convert nanoseconds -> seconds
		snprintf(buffer, BUFFER_SIZE, "%10.10f", 
			image_msg->header.stamp.toNSec()*1e-9);
	  	output << buffer <<  ",";	
	  	output << masterPos[0] << ",";	
	  	output << masterPos[1] << ",";	
	  	output << masterPos[2] << ",";	
	  	output << masterQuat[0] << ",";	
	  	output << masterQuat[1] << ",";	
	  	output << masterQuat[2] << ",";	
	  	output << masterQuat[3] << "\n";	
	  }

	  ROS_DEBUG (" Object num %i------------------------------------------------", knownPatternCount);
      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** prepare to publish the marker
	  stuffARMarkerMsg(knownPatternCount, pos, quat,image_msg->header, marker_info);		
 

      // **** publish transform between camera and marker

#if ROS_VERSION_MINIMUM(1, 9, 0)
      tf::Quaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      tf::Vector3 origin (pos[0], pos[1], pos[2]);
      tf::Transform t (rotation, origin);
#else
// DEPRECATED: Fuerte support ends when Hydro is released
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#endif

	  tf::Quaternion masterRotation (masterQuat[0], masterQuat[1], masterQuat[2], masterQuat[3]);
      tf::Vector3 masterOrigin (masterPos[0], masterPos[1], masterPos[2]);
      tf::Transform masterTransform (masterRotation, masterOrigin);

      if (publishTf_)
      {
		//no need to publish tfs of markers
       // tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, object[knownPatternCount].name);
      //  broadcaster_.sendTransform(camToMarker);

		tf::StampedTransform camToMaster (masterTransform, image_msg->header.stamp, image_msg->header.frame_id, "master");
        broadcaster_.sendTransform(camToMaster);

      }

      // **** publish visual marker

      if (publishVisualMarkers_)
      {
#if ROS_VERSION_MINIMUM(1, 9, 0)
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[knownPatternCount].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = t * m; // marker pose in the camera frame 
#else
// DEPRECATED: Fuerte support ends when Hydro is released
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
#endif

		publishVisualMarker(knownPatternCount, markerPose, image_msg->header); 
	  }
  
	} //end outer loop of for 
    arMarkerPub_.publish(arPoseMarkers_);
  
  }
示例#17
0
void findMarkers(void) 
{
    ARMarkerInfo    *marker_info;
    int             marker_num;
    int             j, k;
	/* grab a vide frame */
    //if( (dataPtr = (ARUint8 *)arVideoGetImage()) == NULL ) {
    
	//if( count == 0 ) arUtilTimerReset();
    //count++;

    argDrawMode2D();
    argDispImage( dataPtr, 0,0 );

    /* detect the markers in the video frame */
    if( arDetectMarker(dataPtr, thresh, &marker_info, &marker_num) < 0 ) {
        cleanup();
        exit(0);
    }


    /* check for object visibility */
    k = -1;
    for( j = 0; j < marker_num; j++ ) {
        if( patt_id == marker_info[j].id ) {
            if( k == -1 ) k = j;
            else if( marker_info[k].cf < marker_info[j].cf ) k = j;
        }
    }
  	
  if( k == -1 ) {
        argSwapBuffers();
		//fprintf(stderr,"no visible objects\n");
      
	  	#if 0
	   int i;
		for (i = 0; i < 4; i ++) {
			for (j = 0; j < 3; j++) {
				fprintf("0,\t");	
			}
			//fprintf("\n");
		}
		fprintf("\n");
		#endif
	   
	   //return;
	   int i;
		for (i = 0; i < 12; i++) {
				fprintf(stdout, "0,\t");	
		}
		fprintf(stdout,"\n");


	   //
    } else {
		//fprintf("patt_trans\n");

		/* get the transformation between the marker and the real camera */
		arGetTransMat(&marker_info[k], patt_center, patt_width, patt_trans);

		/// what is patt_center, it seems to be zeros
		//fprintf("%f,\t%f,\t", patt_center[0], patt_center[1]);
	    fprintf(stdout,"%g,\t%g,\n", marker_info[k].pos[0], marker_info[k].pos[1]);
		
		int i;
		for (j = 0; j < 3; j++) {
		for (i = 0; i < 4; i++) {
				fprintf(stdout, "%f,\t", patt_trans[j][i]);	
			}
			printf("\t");
		}
		fprintf(stdout,"\n");

		//draw();
	}
}
示例#18
0
//=======================================================
// メインループ関数
//=======================================================
void MainLoop(void)
{
	ARUint8          *image;			// カメラキャプチャ画像
	ARMarkerInfo     *marker_info;		// マーカ情報
	int              marker_num;		// 検出されたマーカの数
	int              i, j, k;

	// カメラ画像の取得
	if( (image = (ARUint8 *)arVideoGetImage()) == NULL ){
		arUtilSleep( 2 );
		return;
	}
	if( count == 0 ) arUtilTimerReset();
	count++;

	// カメラ画像の描画
	argDrawMode2D();
	argDispImage( image, 0, 0 );


	// マーカの検出と認識
	if( arDetectMarker( image, thresh, &marker_info, &marker_num ) < 0 ){
		Cleanup();
		exit(0);
	}

	// 次の画像のキャプチャ指示
	arVideoCapNext();

	// 3Dオブジェクトを描画するための準備
	argDrawMode3D();
	argDraw3dCamera( 0, 0 );
	glClearDepth(1.0);					// デプスバッファの消去値
	glClear( GL_DEPTH_BUFFER_BIT );		// デプスバッファの初期化

	if(movex[0]!=0 && movex[3]!=0 && movex[7]!=0){
		rmove++;
		
		if(rmove!=0){
			Drawnashi( marker[3].mark_id, marker[3].patt_trans);
		}
		if(rmove>40.0){
			rmove=0.0;
			for(int i=0;i<MARK_NUM;i++){
				movex[i]=0;
			}
		}
	}else{
		// マーカの一致度の比較
		for( i=0; i<MARK_NUM; i++ ){
			k = -1;
			for( j=0; j<marker_num; j++ ){
				if( marker[i].patt_id == marker_info[j].id ){
					if( k == -1 ) k = j;
					else if( marker_info[k].cf < marker_info[j].cf ) k = j;
				}
			}

			// マーカーが見つからなかったとき
			if( k == -1 ){
				if(marker[i].visible != 0){
					midi_out(i+1);
					midi_stop(i+1);
					movex[i]=1;
					marker[i].visible = 0;
				}else if(movex[i]!=0){
					DrawObject( marker[i].mark_id, marker[i].patt_trans,i );
				}
			}else{
				// 座標変換行列を取得
				if( marker[i].visible == 0 ) {
					// 1フレームを使ってマーカの位置・姿勢(座標変換行列)の計算
					arGetTransMat( &marker_info[k], marker[i].patt_center, marker[i].patt_width, marker[i].patt_trans );    //初回の認識ではarGetTransMatを2回目以降ではarGetTransMatContを使うと安定するらしい
					marker[i].visible = 1;

				} else {
					// 前のフレームを使ってマーカの位置・姿勢(座標変換行列)の計算
					arGetTransMatCont( &marker_info[k], marker[i].patt_trans, marker[i].patt_center, marker[i].patt_width, marker[i].patt_trans );
				}

				// 3Dオブジェクトの描画
				if(movex[i]!=0){
					DrawObject( marker[i].mark_id, marker[i].patt_trans,i );
				}
			}
			if(movex[i]>=40.0) movex[i]=0;
			if(movex[i]!=0) movex[i]++;
		}
	}
	// バッファの内容を画面に表示
	argSwapBuffers();
}