bool videoAndroidNativeCaptureSetSize(VIDEO_ANDROID_NATIVE_CAPTURE *nc, int width, int height)
{
    AR_VIDEO_FRAME_READY_CALLBACK frameReadyCallback;
    void                          *frameReadyCallbackUserdata;

    ARLOGd("videoAndroidNativeCaptureSetSize().\n");

    if (!nc || !nc->ca)
        return false;

    pthread_mutex_lock(&nc->frameLock);
    bool capturing = (nc->frameBuffers[0] || nc->frameBuffers[0]);
    pthread_mutex_unlock(&nc->frameLock);

    if (capturing)
    {
        frameReadyCallback         = nc->frameReadyCallback;
        frameReadyCallbackUserdata = nc->frameReadyCallbackUserdata;
        videoAndroidNativeCaptureStop(nc);
    }

    nc->ca->setProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH, width);
    nc->ca->setProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT, height);
    videoAndroidNativeCaptureApplyProperties(nc);
    nc->frameWidth  = (int)nc->ca->getProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH);
    nc->frameHeight = (int)nc->ca->getProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT);

    if (capturing)
        videoAndroidNativeCaptureStart(nc, frameReadyCallback, frameReadyCallbackUserdata);

    ARLOGd("/videoAndroidNativeCaptureSetSize.\n");

    return true;
}
bool videoAndroidNativeCaptureClose(VIDEO_ANDROID_NATIVE_CAPTURE **nc_p)
{
    ARLOGd("videoAndroidNativeCaptureClose().\n");

    if (!nc_p || !*nc_p)
        return (false);                  // Sanity check.

    if ((*nc_p)->frameBuffers[0] || (*nc_p)->frameBuffers[1])
    {
        ARLOGw("Warning: videoAndroidNativeCaptureClose called without call to videoAndroidNativeCaptureStop.\n");
        videoAndroidNativeCaptureStop(*nc_p);
    }

    pthread_mutex_destroy(&((*nc_p)->frameLock));
    pthread_cond_destroy(&((*nc_p)->frameReadyNotifierThreadCondGo));

    if ((*nc_p)->ca)
    {
        // ca->disconnect() will be automatically called inside destructor;
        delete((*nc_p)->ca);
        (*nc_p)->ca = NULL;
    }

    free(*nc_p);
    *nc_p = NULL;

    ARLOGd("/videoAndroidNativeCaptureClose.\n");

    return (true);
}
virtual bool onFrameBuffer(void *buffer, int bufferSize)
{
    int  frameIndex;
    bool ret;

    if (!isConnected() || !buffer || bufferSize <= 0)
    {
        ARLOGe("Error: onFrameBuffer() called while not connected, or called without frame.\n");
        return false;
    }

    ret = true;
    m_framesReceived++;

    pthread_mutex_lock(&m_nc->frameLock);
    if (m_nc->frameBuffers[0] && m_nc->frameBuffers[1])       // Only do copy if capture has been started.
    {
        if (bufferSize != m_nc->frameBufferLength)
        {
            ARLOGe("Error: onFrameBuffer frame size is %d but receiver expected %d.\n", bufferSize, m_nc->frameBufferLength);
            ret = false;
        }
        else
        {
            // Find a buffer to write to. Any buffer not locked by client is a candidate.
            if (m_nc->frameBuffersStatus[0] != LOCKED)
                frameIndex = 0;
            else if (m_nc->frameBuffersStatus[1] != LOCKED)
                frameIndex = 1;
            else
                frameIndex = -1;

            if (frameIndex == -1)
            {
                ARLOGe("Error: onFrameBuffer receiver was all full up.\n");
                ret = false;
            }
            else
            {
                ARLOGd("FRAME => buffer %d %p\n", frameIndex, m_nc->frameBuffers[frameIndex]);
                memcpy(m_nc->frameBuffers[frameIndex], buffer, bufferSize);
                m_nc->frameBuffersStatus[frameIndex] = READY;
                if (m_nc->frameReadyCallback)
                    pthread_cond_signal(&m_nc->frameReadyNotifierThreadCondGo);
            }
        }
    }
    else
    {
        ARLOGd("FRAME =X\n");
    }

    pthread_mutex_unlock(&m_nc->frameLock);

    return ret;
}
bool videoAndroidNativeCaptureStart(VIDEO_ANDROID_NATIVE_CAPTURE *nc, AR_VIDEO_FRAME_READY_CALLBACK callback, void *userdata)
{
    int  err;
    bool ret = true;

    ARLOGd("videoAndroidNativeCaptureStart().\n");

    if (!nc)
        return false;

    // Don't start if already started.
    if (nc->frameBuffers[0] || nc->frameBuffers[1])
    {
        ARLOGe("videoAndroidNativeCaptureStart called again.\n");
        return false;
    }

    // Create the frame buffers.
    pthread_mutex_lock(&nc->frameLock);
    nc->frameBufferLength     = (nc->frameWidth * nc->frameHeight * 3) / 2; // Assume NV21/NV12 format.
    nc->frameBuffersStatus[0] = nc->frameBuffersStatus[1] = EMPTY;
    nc->frameBuffers[0]       = (unsigned char*)malloc(nc->frameBufferLength);
    nc->frameBuffers[1]       = (unsigned char*)malloc(nc->frameBufferLength);
    if (!nc->frameBuffers[0] || !nc->frameBuffers[1])
    {
        ARLOGe("Out of memory!\n");
        ret = false;
    }
    else
    {
        nc->frameReadyCallback = callback;
        if (callback)
        {
            // Start the frameReadyNotifierThread.
            nc->frameReadyCallbackUserdata         = userdata;
            nc->frameReadyNotifierThreadShouldQuit = false;
            if ((err = pthread_create(&(nc->frameReadyNotifierThread), NULL, frameReadyNotifier, (void*)nc)) != 0)
            {
                ARLOGe("videoAndroidNativeCaptureOpen(): Error %d detaching thread.\n", err);
                ret = false;
            }
        }
    }

    pthread_mutex_unlock(&nc->frameLock);

    ARLOGd("/videoAndroidNativeCaptureStart nc->frameBufferLength=%d.\n", nc->frameBufferLength);

    return ret;
}
Esempio n. 5
0
static void stopWMC(AR2VideoParamWinMCT *vid)
{
    ARLOGd("ARWrap::ARvideo::stopWMC(): called");
	if (!vid || !vid->wmc) return;

    ARLOGd("ARWrap::ARvideo::stopWMC(): calling vid->wmc->Capturing()");
	if (!vid->wmc->Capturing()) {
		ARLOGe("ARWrap::ARvideo::stopWMC(): Windows.Media.Capture already stopped, exiting");
		return;
	}

	vid->wmc->StopCapture();
    ARLOGd("ARWrap::ARvideo::stopWMC(): exiting");
}
bool videoAndroidNativeCaptureStop(VIDEO_ANDROID_NATIVE_CAPTURE *nc)
{
    ARLOGd("videoAndroidNativeCaptureStop().\n");

    if (!nc)
        return false;

    // Don't stop if not started.
    if (!nc->frameBuffers[0] && !nc->frameBuffers[1])
    {
        return false;
    }

    pthread_mutex_lock(&nc->frameLock);

    // Cancel any ready frames.
    if (nc->frameBuffersStatus[0] == READY || nc->frameBuffersStatus[0] == NOTIFIED)
        nc->frameBuffersStatus[0] = EMPTY;

    if (nc->frameBuffersStatus[1] == READY || nc->frameBuffersStatus[1] == NOTIFIED)
        nc->frameBuffersStatus[1] = EMPTY;

    // Get frameReadyNotifier to exit. Once this is done, we can safely dispose of any remaining locked buffers.
    if (nc->frameReadyCallback)
    {
        nc->frameReadyNotifierThreadShouldQuit = true;
        pthread_cond_signal(&nc->frameReadyNotifierThreadCondGo); // Make sure thread leaves condition if its waiting there.
        pthread_mutex_unlock(&nc->frameLock);
        pthread_join(nc->frameReadyNotifierThread, NULL); // NULL -> don't return thread exit status.
        pthread_mutex_lock(&nc->frameLock);
        nc->frameReadyNotifierThreadShouldQuit = false;
        nc->frameReadyCallback                 = NULL;
        nc->frameReadyCallbackUserdata         = NULL;
    }

    free(nc->frameBuffers[0]);
    free(nc->frameBuffers[1]);
    nc->frameBuffers[0]       = nc->frameBuffers[1] = NULL;
    nc->frameBuffersStatus[0] = nc->frameBuffersStatus[1] = EMPTY;
    nc->frameBufferLength     = 0;

    pthread_mutex_unlock(&nc->frameLock);

    ARLOGd("/videoAndroidNativeCaptureStop.\n");

    return true;
}
Esempio n. 7
0
int ar2VideoCloseWinMC(AR2VideoParamWinMCT *vid)
{
    ARLOGd("ARWrap::ARvideo::ar2VideoCloseWinMC(): called");
    stopWMC(vid);
	if (vid->wmc) {
		delete vid->wmc;
		vid->wmc = NULL;
	}

	free(vid);

    ar2VideoWinMCRefCount--;
    if (ar2VideoWinMCRefCount == 0) ar2VideoWinMCFinal2();
    
    ARLOGd("ARWrap::ARvideo::ar2VideoCloseWinMC(): exiting, returning 0");
    return 0;
} 
static void icpStereoGetXw2XcCleanup( char *message, ARdouble *J_U_S, ARdouble *dU, ARdouble *E, ARdouble *E2 )
{
    ARLOGd("Error: %s\n", message);
    free(J_U_S);
    free(dU);
    free(E);
    free(E2);
}
VIDEO_ANDROID_NATIVE_CAPTURE* videoAndroidNativeCaptureOpen(int cameraIndex)
{
    CameraActivity::ErrorCode ca_err;

    ARLOGd("videoAndroidNativeCaptureOpen(%d).\n", cameraIndex);

    VIDEO_ANDROID_NATIVE_CAPTURE *nc = (VIDEO_ANDROID_NATIVE_CAPTURE*)calloc(1, sizeof(VIDEO_ANDROID_NATIVE_CAPTURE));
    if (!nc)
    {
        ARLOGe("Out of memory!\n");
        return (NULL);
    }

    nc->ca = new ARToolKitVideoAndroidCameraActivity(nc);
    if (!nc->ca)
    {
        ARLOGe("Unable to create native connection to camera.\n");
        goto bail;
    }

    // Lock manages contention between user thread, CameraActivity::onFrameBuffer thread (might be same as user thread), and frameReadyNotifierThread.
    pthread_mutex_init(&nc->frameLock, NULL);
    pthread_cond_init(&nc->frameReadyNotifierThreadCondGo, NULL);

    ca_err = nc->ca->connect(cameraIndex);
    if (ca_err != CameraActivity::NO_ERROR)
    {
        ARLOGe("Error %d opening native connection to camera.\n", ca_err);
        goto bail1;
    }

    nc->frameWidth  = (int)nc->ca->getProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH);
    nc->frameHeight = (int)nc->ca->getProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT);

    ARLOGd("/videoAndroidNativeCaptureOpen %dx%d.\n", nc->frameWidth, nc->frameHeight);

    return (nc);

bail1:
    delete(nc->ca);
    pthread_cond_destroy(&nc->frameReadyNotifierThreadCondGo);
    pthread_mutex_destroy(&nc->frameLock);
bail:
    free(nc);
    return (NULL);
}
// Thread which tells the user when new frames are ready.
static void* frameReadyNotifier(void *arg)
{
    ARLOGd("Start frameReadyNotifier thread.\n");

    VIDEO_ANDROID_NATIVE_CAPTURE *nc = (VIDEO_ANDROID_NATIVE_CAPTURE*)arg;
    if (!nc)
    {
        ARLOGe("Error: frameReadyNotifier thread with no arg.\n");
        return NULL;
    }

    pthread_mutex_lock(&nc->frameLock);

    while (!nc->frameReadyNotifierThreadShouldQuit)
    {
        // Wait for a frame or quit signal.
        while (!nc->frameReadyNotifierThreadShouldQuit && !(nc->frameBuffersStatus[0] == READY) && !(nc->frameBuffersStatus[1] == READY))
        {
            pthread_cond_wait(&nc->frameReadyNotifierThreadCondGo, &nc->frameLock); // Releases lock while waiting, reacquires it when done.
        }

        if (nc->frameReadyNotifierThreadShouldQuit)
            break;

        // Unlock frameLock during the notification, to allow the other frame buffer to be written concurrently, or the callee to get the frame.
        pthread_mutex_unlock(&nc->frameLock);
        (*nc->frameReadyCallback)(nc->frameReadyCallbackUserdata); // Invoke callback.
        pthread_mutex_lock(&nc->frameLock);

        // Mark the buffer(s) as notified, so that in the event the user doesn't pick it up, we don't keep on notifying.
        if (nc->frameBuffersStatus[0] == READY)
            nc->frameBuffersStatus[0] = NOTIFIED;

        if (nc->frameBuffersStatus[1] == READY)
            nc->frameBuffersStatus[1] = NOTIFIED;
    }

    pthread_mutex_unlock(&nc->frameLock);

    ARLOGd("End frameReadyNotifier thread.\n");
    return (NULL);
}
Esempio n. 11
0
static void *trackingInitMain( THREAD_HANDLE_T *threadHandle )
{
    TrackingInitHandle     *trackingInitHandle;
    KpmHandle              *kpmHandle;
    KpmResult              *kpmResult = NULL;
    int                     kpmResultNum;
    ARUint8                *imagePtr;
    float                  err;
    int                    i, j, k;

    if (!threadHandle) {
        ARLOGe("Error starting tracking thread: empty THREAD_HANDLE_T.\n");
        return (NULL);
    }
    trackingInitHandle = (TrackingInitHandle *)threadGetArg(threadHandle);
    if (!threadHandle) {
        ARLOGe("Error starting tracking thread: empty trackingInitHandle.\n");
        return (NULL);
    }
    kpmHandle          = trackingInitHandle->kpmHandle;
    imagePtr           = trackingInitHandle->imagePtr;
    if (!kpmHandle || !imagePtr) {
        ARLOGe("Error starting tracking thread: empty kpmHandle/imagePtr.\n");
        return (NULL);
    }
    ARLOGi("Start tracking thread.\n");
    
    kpmGetResult( kpmHandle, &kpmResult, &kpmResultNum );

    for(;;) {
        if( threadStartWait(threadHandle) < 0 ) break;

        kpmMatching(kpmHandle, imagePtr);
        trackingInitHandle->flag = 0;
        for( i = 0; i < kpmResultNum; i++ ) {
            if( kpmResult[i].camPoseF != 0 ) continue;
            ARLOGd("kpmGetPose OK.\n");
            if( trackingInitHandle->flag == 0 || err > kpmResult[i].error ) { // Take the first or best result.
                trackingInitHandle->flag = 1;
                trackingInitHandle->page = kpmResult[i].pageNo;
                for (j = 0; j < 3; j++) for (k = 0; k < 4; k++) trackingInitHandle->trans[j][k] = kpmResult[i].camPose[j][k];
                err = kpmResult[i].error;
            }
        }

        threadEndSignal(threadHandle);
    }

    ARLOGi("End tracking thread.\n");
    return (NULL);
}
Esempio n. 12
0
ARImageProcInfo *arImageProcInit(const int xsize, const int ysize, const AR_PIXEL_FORMAT pixFormat, int alwaysCopy)
{
    ARImageProcInfo *ipi = (ARImageProcInfo *)malloc(sizeof(ARImageProcInfo));
    if (ipi) {
        ipi->pixFormat = pixFormat;
        if (alwaysCopy || (pixFormat != AR_PIXEL_FORMAT_MONO && pixFormat != AR_PIXEL_FORMAT_420v && pixFormat != AR_PIXEL_FORMAT_420f && pixFormat != AR_PIXEL_FORMAT_NV21)) {
            ipi->image = (unsigned char *)malloc(xsize * ysize * sizeof(unsigned char));
            if (!ipi->image) goto bail;
            ipi->imageWasAllocated = TRUE;
        } else {
            ipi->imageWasAllocated = FALSE;
        }
        ipi->alwaysCopy = alwaysCopy;
        ipi->image2 = NULL;
        ipi->imageX = xsize;
        ipi->imageY = ysize;
#if AR_IMAGEPROC_USE_VIMAGE
        ipi->tempBuffer = NULL;
#endif
#ifdef HAVE_ARM_NEON
        ipi->fastPath = (ipi->imageX * ipi->imageY % 8 == 0
                         && (pixFormat == AR_PIXEL_FORMAT_RGBA
                             || pixFormat == AR_PIXEL_FORMAT_BGRA
                             || pixFormat == AR_PIXEL_FORMAT_ABGR
                             ||pixFormat == AR_PIXEL_FORMAT_ARGB
                             )
                         );
#  ifdef ANDROID
        // Not all Android devices with ARMv7 are guaranteed to have NEON, so check.
        uint64_t features = android_getCpuFeatures();
        ipi->fastPath = ipi->fastPath && (features & ANDROID_CPU_ARM_FEATURE_ARMv7) && (features & ANDROID_CPU_ARM_FEATURE_NEON);
#  endif
        if (ipi->fastPath) ARLOGd("arImageProc will use ARM NEON acceleration.\n");
#endif
    }
    return (ipi);
    
bail:
    free(ipi);
    return (NULL);
}
Esempio n. 13
0
// Modifies globals: kpmHandle, ar2Handle.
static int initNFT(ARParamLT *cparamLT, AR_PIXEL_FORMAT pixFormat)
{
    ARLOGd("Initialising NFT.\n");
    //
    // NFT init.
    //
    
    // KPM init.
    kpmHandle = kpmCreateHandle(cparamLT, pixFormat);
    if (!kpmHandle) {
        ARLOGe("Error: kpmCreateHandle.\n");
        return (FALSE);
    }
    //kpmSetProcMode( kpmHandle, KpmProcHalfSize );
    
    // AR2 init.
    if( (ar2Handle = ar2CreateHandle(cparamLT, pixFormat, AR2_TRACKING_DEFAULT_THREAD_NUM)) == NULL ) {
        ARLOGe("Error: ar2CreateHandle.\n");
        kpmDeleteHandle(&kpmHandle);
        return (FALSE);
    }
    if (threadGetCPU() <= 1) {
        ARLOGi("Using NFT tracking settings for a single CPU.\n");
        ar2SetTrackingThresh(ar2Handle, 5.0);
        ar2SetSimThresh(ar2Handle, 0.50);
        ar2SetSearchFeatureNum(ar2Handle, 16);
        ar2SetSearchSize(ar2Handle, 6);
        ar2SetTemplateSize1(ar2Handle, 6);
        ar2SetTemplateSize2(ar2Handle, 6);
    } else {
        ARLOGi("Using NFT tracking settings for more than one CPU.\n");
        ar2SetTrackingThresh(ar2Handle, 5.0);
        ar2SetSimThresh(ar2Handle, 0.50);
        ar2SetSearchFeatureNum(ar2Handle, 16);
        ar2SetSearchSize(ar2Handle, 12);
        ar2SetTemplateSize1(ar2Handle, 6);
        ar2SetTemplateSize2(ar2Handle, 6);
    }
    // NFT dataset loading will happen later.
    return (TRUE);
}
Esempio n. 14
0
static void cleanup(void)
{
    VirtualEnvironmentFinal();

    if (markersNFT) deleteMarkers(&markersNFT, &markersNFTCount);
    
    // NFT cleanup.
    unloadNFTData();
	ARLOGd("Cleaning up ARToolKit NFT handles.\n");
    ar2DeleteHandle(&ar2Handle);
    kpmDeleteHandle(&kpmHandle);
    arParamLTFree(&gCparamLT);

    // OpenGL cleanup.
    arglCleanup(gArglSettings);
    gArglSettings = NULL;
    
    // Camera cleanup.
	arVideoCapStop();
	arVideoClose();
}
Esempio n. 15
0
static void cleanup(void)
{
    if (markersNFT) deleteMarkers(&markersNFT, &markersNFTCount);
    
    // NFT cleanup.
    unloadNFTData();
	ARLOGd("Cleaning up ARToolKit NFT handles.\n");
    ar2DeleteHandle(&ar2Handle);
    kpmDeleteHandle(&kpmHandle);
    arParamLTFree(&gCparamLT);

    // OpenGL cleanup.
    arglCleanup(gArglSettings);
    gArglSettings = NULL;
    
    // Camera cleanup.
	arVideoCapStop();
	arVideoClose();
#ifdef _WIN32
	CoUninitialize();
#endif
}
Esempio n. 16
0
static void mainLoop(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;


    int             i, j, k;
	
	// Calculate time delta.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001f;
	ms_prev = ms;
	
	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) {
		gARTImage = image;	// Save the fetched image.
		
        // Calculate FPS every 30 frames.
        if (gCallCountMarkerDetect % 30 == 0) {
            gFPS = 30.0/arUtilTimer();
            arUtilTimerReset();
            gCallCountMarkerDetect = 0;
        }
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		

        // Run marker detection on frame
        if (threadHandle) {
            // Perform NFT tracking.
            float            err;
            int              ret;
            int              pageNo;
            
            if( detectedPage == -2 ) {
                trackingInitStart( threadHandle, gARTImage );
                detectedPage = -1;
            }
            if( detectedPage == -1 ) {
                ret = trackingInitGetResult( threadHandle, trackingTrans, &pageNo);
                if( ret == 1 ) {
                    if (pageNo >= 0 && pageNo < surfaceSetCount) {
                        ARLOGd("Detected page %d.\n", pageNo);
                        detectedPage = pageNo;
                        ar2SetInitTrans(surfaceSet[detectedPage], trackingTrans);
                    } else {
                        ARLOGe("Detected bad page %d.\n", pageNo);
                        detectedPage = -2;
                    }
                } else if( ret < 0 ) {
                    ARLOGd("No page detected.\n");
                    detectedPage = -2;
                }
            }
            if( detectedPage >= 0 && detectedPage < surfaceSetCount) {
                if( ar2Tracking(ar2Handle, surfaceSet[detectedPage], gARTImage, trackingTrans, &err) < 0 ) {
                    ARLOGd("Tracking lost.\n");
                    detectedPage = -2;
                } else {
                    ARLOGd("Tracked page %d (max %d).\n", detectedPage, surfaceSetCount - 1);
                }
            }
        } else {
            ARLOGe("Error: threadHandle\n");
            detectedPage = -2;
        }
        
        // Update markers.
        for (i = 0; i < markersNFTCount; i++) {
            markersNFT[i].validPrev = markersNFT[i].valid;
            if (markersNFT[i].pageNo >= 0 && markersNFT[i].pageNo == detectedPage) {
                markersNFT[i].valid = TRUE;
                for (j = 0; j < 3; j++) for (k = 0; k < 4; k++) markersNFT[i].trans[j][k] = trackingTrans[j][k];
            }
            else markersNFT[i].valid = FALSE;
            if (markersNFT[i].valid) {
                
                // Filter the pose estimate.
                if (markersNFT[i].ftmi) {
                    if (arFilterTransMat(markersNFT[i].ftmi, markersNFT[i].trans, !markersNFT[i].validPrev) < 0) {
                        ARLOGe("arFilterTransMat error with marker %d.\n", i);
                    }
                }
                
                if (!markersNFT[i].validPrev) {
                    // Marker has become visible, tell any dependent objects.
                    VirtualEnvironmentHandleARMarkerAppeared(i);
                }
                
                // We have a new pose, so set that.
                arglCameraViewRH(markersNFT[i].trans, markersNFT[i].pose.T, VIEW_SCALEFACTOR);
                // Tell any dependent objects about the update.
                VirtualEnvironmentHandleARMarkerWasUpdated(i, markersNFT[i].pose);
                
            } else {
                
                if (markersNFT[i].validPrev) {
                    // Marker has ceased to be visible, tell any dependent objects.
                    VirtualEnvironmentHandleARMarkerDisappeared(i);
                }
            }                    
        }

		// Tell GLUT the display has changed.
		glutPostRedisplay();
	} else {
		arUtilSleep(2);
	}
    
}
ARToolKitVideoAndroidCameraActivity(VIDEO_ANDROID_NATIVE_CAPTURE *nc)
{
    ARLOGd("ARToolKitVideoAndroidCameraActivity CTOR\n");
    m_nc             = nc;
    m_framesReceived = 0;
}
Esempio n. 18
0
static void mainLoop(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;

    // NFT results.
    static int detectedPage = -2; // -2 Tracking not inited, -1 tracking inited OK, >= 0 tracking online on page.
    static float trackingTrans[3][4];
    

    int             i, j, k;
	
	// Find out how long since mainLoop() last ran.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001f;
	if (s_elapsed < 0.01f) return; // Don't update more often than 100 Hz.
	ms_prev = ms;
	
	// Update drawing.
	DrawCubeUpdate(s_elapsed);
	
	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) {
		gARTImage = image;	// Save the fetched image.
		
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		

        // Run marker detection on frame
        if (threadHandle) {
            // Perform NFT tracking.
            float            err;
            int              ret;
            int              pageNo;
            
            if( detectedPage == -2 ) {
                trackingInitStart( threadHandle, gARTImage );
                detectedPage = -1;
            }
            if( detectedPage == -1 ) {
                ret = trackingInitGetResult( threadHandle, trackingTrans, &pageNo);
                if( ret == 1 ) {
                    if (pageNo >= 0 && pageNo < surfaceSetCount) {
                        ARLOGd("Detected page %d.\n", pageNo);
                        detectedPage = pageNo;
                        ar2SetInitTrans(surfaceSet[detectedPage], trackingTrans);
                    } else {
                        ARLOGe("Detected bad page %d.\n", pageNo);
                        detectedPage = -2;
                    }
                } else if( ret < 0 ) {
                    ARLOGd("No page detected.\n");
                    detectedPage = -2;
                }
            }
            if( detectedPage >= 0 && detectedPage < surfaceSetCount) {
                if( ar2Tracking(ar2Handle, surfaceSet[detectedPage], gARTImage, trackingTrans, &err) < 0 ) {
                    ARLOGd("Tracking lost.\n");
                    detectedPage = -2;
                } else {
                    ARLOGd("Tracked page %d (max %d).\n", detectedPage, surfaceSetCount - 1);
                }
            }
        } else {
            ARLOGe("Error: threadHandle\n");
            detectedPage = -2;
        }
        
        // Update markers.
        for (i = 0; i < markersNFTCount; i++) {
            markersNFT[i].validPrev = markersNFT[i].valid;
            if (markersNFT[i].pageNo >= 0 && markersNFT[i].pageNo == detectedPage) {
                markersNFT[i].valid = TRUE;
                for (j = 0; j < 3; j++) for (k = 0; k < 4; k++) markersNFT[i].trans[j][k] = trackingTrans[j][k];
            }
            else markersNFT[i].valid = FALSE;
            if (markersNFT[i].valid) {
                
                // Filter the pose estimate.
                if (markersNFT[i].ftmi) {
                    if (arFilterTransMat(markersNFT[i].ftmi, markersNFT[i].trans, !markersNFT[i].validPrev) < 0) {
                        ARLOGe("arFilterTransMat error with marker %d.\n", i);
                    }
                }
                
                if (!markersNFT[i].validPrev) {
                    // Marker has become visible, tell any dependent objects.
                    // --->
                }
                
                // We have a new pose, so set that.
                arglCameraViewRH(markersNFT[i].trans, markersNFT[i].pose.T, VIEW_SCALEFACTOR);
                // Tell any dependent objects about the update.
                // --->
                
            } else {
                
                if (markersNFT[i].validPrev) {
                    // Marker has ceased to be visible, tell any dependent objects.
                    // --->
                }
            }                    
        }

		// Tell GLUT the display has changed.
		glutPostRedisplay();
	}
}
Esempio n. 19
0
int VirtualEnvironmentInit(const char *objectListFile)
{
    int      numObjects;
    FILE     *fp;
    char     buf[MAXPATHLEN];
    char     objectFullpath[MAXPATHLEN];
    int      i;
    ARdouble translation[3], rotation[4], scale[3];
    int      lightingFlag, markerIndex;

    ARLOGd("Initialising Virtual Environment.\n");

    // One-time OSG initialization.
    if (!VirtualEnvironment_AROSG)
    {
        VirtualEnvironment_AROSG = arOSGInit();
        if (!VirtualEnvironment_AROSG)
        {
            ARLOGe("Error: unable to init arOSG library.\n");
            return (0);
        }
    }

    // Locate and open the objects description file.
    if ((fp = fopen(objectListFile, "r")) == NULL)
    {
        ARLOGe("Error: unable to open object data file '%s'.\n", objectListFile);
        perror(NULL);
        goto bail1;
    }
    else
    {
        ARLOGe("Error: open object data file '%s'.\n", objectListFile);
    }

    // First line is number of objects to read.
    numObjects = 0;
    get_buff(buf, MAXPATHLEN, fp, 1);
    if (sscanf(buf, "%d", &numObjects) != 1)
    {
        ARLOGe("Error: unable to read number of objects to load from object data file.\n");
        goto bail2;
    }

    // Allocate space for the objects.
    if (objects)
    {
        free(objects); objects = NULL;
        objectCount            = 0;
    }

    objects = (VEObject*)calloc(numObjects, sizeof(VEObject));
    if (!objects)
    {
        goto bail2;
    }

    ARLOGd("Reading %d objects.\n", numObjects);

    for (i = 0; i < numObjects; i++)
    {
        // Read in all info relating to the object.

        // Read model file path (relative to objects description file).
        if (!get_buff(buf, MAXPATHLEN, fp, 1))
        {
            ARLOGe("Error: unable to read model file name from object data file.\n");
            goto bail3;
        }

        if (!arUtilGetDirectoryNameFromPath(objectFullpath, objectListFile, sizeof(objectFullpath), 1))   // Get directory prefix, with path separator.
        {
            goto bail3;
        }

        strncat(objectFullpath, buf, sizeof(objectFullpath) - strlen(objectFullpath) - 1); // Add name of file to open.

        // Read translation.
        get_buff(buf, MAXPATHLEN, fp, 1);
#ifdef ARDOUBLE_IS_FLOAT
        if (sscanf(buf, "%f %f %f", &translation[0], &translation[1], &translation[2]) != 3)
#else
        if (sscanf(buf, "%lf %lf %lf", &translation[0], &translation[1], &translation[2]) != 3)
#endif
        {
            goto bail3;
        }

        // Read rotation.
        get_buff(buf, MAXPATHLEN, fp, 1);
#ifdef ARDOUBLE_IS_FLOAT
        if (sscanf(buf, "%f %f %f %f", &rotation[0], &rotation[1], &rotation[2], &rotation[3]) != 4)
#else
        if (sscanf(buf, "%lf %lf %lf %lf", &rotation[0], &rotation[1], &rotation[2], &rotation[3]) != 4)
#endif
        {
            goto bail3;
        }

        // Read scale.
        get_buff(buf, MAXPATHLEN, fp, 1);
#ifdef ARDOUBLE_IS_FLOAT
        if (sscanf(buf, "%f %f %f", &scale[0], &scale[1], &scale[2]) != 3)
#else
        if (sscanf(buf, "%lf %lf %lf", &scale[0], &scale[1], &scale[2]) != 3)
#endif
        {
            goto bail3;
        }

        // Look for optional tokens. A blank line marks end of options.
        lightingFlag = 1; markerIndex = -1;

        while (get_buff(buf, MAXPATHLEN, fp, 0) && (buf[0] != '\0'))
        {
            if (strncmp(buf, "LIGHTING", 8) == 0)
            {
                if (sscanf(&(buf[8]), " %d", &lightingFlag) != 1)
                {
                    ARLOGe("Error in object file: LIGHTING token must be followed by an integer >= 0. Discarding.\n");
                }
            }
            else if (strncmp(buf, "MARKER", 6) == 0)
            {
                if (sscanf(&(buf[6]), " %d", &markerIndex) != 1)
                {
                    ARLOGe("Error in object file: MARKER token must be followed by an integer > 0. Discarding.\n");
                }
                else
                {
                    markerIndex--; // Marker numbers are zero-indexed, but in the config file they're 1-indexed.
                }
            }

            // Unknown tokens are ignored.
        }


        // Now attempt to load objects.
        ARLOGd("Reading object data file %s.\n", objectFullpath);
        objects[i].modelIndex = arOSGLoadModel2(VirtualEnvironment_AROSG, objectFullpath, translation, rotation, scale);
        if (objects[i].modelIndex < 0)
        {
            ARLOGe("Error attempting to read object data file %s.\n", objectFullpath);
            goto bail4;
        }

        // Set optional properties.
        arOSGSetModelLighting(VirtualEnvironment_AROSG, objects[i].modelIndex, lightingFlag);

        // If a valid marker index has been specified, save it.
        if (markerIndex >= 0 /*&& markerIndex < markersCount]*/)
        {
            arOSGSetModelVisibility(VirtualEnvironment_AROSG, objects[i].modelIndex, FALSE); // Objects tied to markers will not be initially visible.
            objects[i].markerIndex = markerIndex;
        }
        else
        {
            arOSGSetModelVisibility(VirtualEnvironment_AROSG, objects[i].modelIndex, TRUE); // All other objects will be initially visible.
            objects[i].markerIndex = -1;
        }

        objectCount++;
    }

    ARLOGd("Virtual Environment initialised.\n");
    fclose(fp);
    return (objectCount);

bail4:

    for (i--; i >= 0; i--)
    {
        arOSGUnloadModel(VirtualEnvironment_AROSG, i);
    }

bail3:
    free(objects);
    objects     = NULL;
    objectCount = 0;
bail2:
    fclose(fp);
bail1:
    if (VirtualEnvironment_AROSG)
    {
        arOSGFinal(VirtualEnvironment_AROSG);
        VirtualEnvironment_AROSG = NULL;
    }

#ifdef DEBUG
    ARLOGe("Virtual Environment initialisation failed.\n");
#endif
    return (0);
}
ARMultiMarkerInfoT *arMultiReadConfigFile( const char *filename, ARPattHandle *pattHandle )
{
    FILE                   *fp;
    ARMultiEachMarkerInfoT *marker;
    ARMultiMarkerInfoT     *marker_info;
    ARdouble               wpos3d[4][2];
    char                   buf[256], pattPath[2048], dummy;
    int                    num;
    int                    patt_type = 0;
    int                    i, j;

    if ((fp = fopen(filename, "r")) == NULL) {
        ARLOGe("Error: unable to open multimarker config file '%s'.\n", filename);
        ARLOGperror(NULL);
        return NULL;
    }

    get_buff(buf, 256, fp);
    if( sscanf(buf, "%d", &num) != 1 ) {
        ARLOGe("Error processing multimarker config file '%s': First line must be number of marker configs to read.\n", filename);
        fclose(fp);
        return NULL;
    }
    ARLOGd("Reading %d markers from multimarker file '%s'\n", num, filename);

    arMalloc(marker, ARMultiEachMarkerInfoT, num);

    for( i = 0; i < num; i++ ) {
        get_buff(buf, 256, fp);
        if (sscanf(buf, 
#if defined(__LP64__) && !defined(__APPLE__)
                        "%lu%c",
#else
                        "%llu%c",
#endif
                         &(marker[i].globalID), &dummy) != 1) { // Try first as matrix code.
            
            if (!pattHandle) {
                ARLOGe("Error processing multimarker config file '%s': pattern '%s' specified in multimarker configuration while in barcode-only mode.\n", filename, buf);
                goto bail;
            }
            if (!arUtilGetDirectoryNameFromPath(pattPath, filename, sizeof(pattPath), 1)) { // Get directory prefix.
                ARLOGe("Error processing multimarker config file '%s': Unable to determine directory name.\n", filename);
                goto bail;
            }
            strncat(pattPath, buf, sizeof(pattPath) - strlen(pattPath) - 1); // Add name of file to open.
            if ((marker[i].patt_id = arPattLoad(pattHandle, pattPath)) < 0) {
                ARLOGe("Error processing multimarker config file '%s': Unable to load pattern '%s'.\n", filename, pattPath);
                goto bail;
            }
            marker[i].patt_type = AR_MULTI_PATTERN_TYPE_TEMPLATE;
            patt_type |= 0x01;
        } else {
            
            if ((marker[i].globalID & 0xffff8000ULL) == 0ULL) marker[i].patt_id = (int)(marker[i].globalID & 0x00007fffULL); // If upper 33 bits are zero, use lower 31 bits as regular matrix code.
            else marker[i].patt_id = 0;
            ARLOGd("Marker %3d is matrix code %llu.\n", i + 1, marker[i].globalID);
            marker[i].patt_type = AR_MULTI_PATTERN_TYPE_MATRIX;
            patt_type |= 0x02;
        }

        get_buff(buf, 256, fp);
        if( sscanf(buf,
#ifdef ARDOUBLE_IS_FLOAT
                   "%f",
#else
                   "%lf",
#endif
                   &marker[i].width) != 1 ) {
            ARLOGe("Error processing multimarker config file '%s', marker definition %3d: First line must be pattern width.\n", filename, i + 1);
            goto bail;
        }
        
        j = 0;
        get_buff(buf, 256, fp);
        if( sscanf(buf,
#ifdef ARDOUBLE_IS_FLOAT
                   "%f %f %f %f",
#else
                   "%lf %lf %lf %lf",
#endif
                   &marker[i].trans[j][0],
                   &marker[i].trans[j][1],
                   &marker[i].trans[j][2],
                   &marker[i].trans[j][3]) != 4 ) {
            // Perhaps this is an old ARToolKit v2.x multimarker file?
            // If so, then the next line is two values (center) and should be skipped.
            float t1, t2;
            if( sscanf(buf,
                       "%f %f",
                       &t1, &t2) != 2 ) {
                ARLOGe("Error processing multimarker config file '%s', marker definition %3d: Lines 2 - 4 must be marker transform.\n", filename, i + 1);
                goto bail;
            }
        } else j++;
        do {
            get_buff(buf, 256, fp);
            if( sscanf(buf, 
#ifdef ARDOUBLE_IS_FLOAT
                       "%f %f %f %f",
#else
                       "%lf %lf %lf %lf",
#endif
                       &marker[i].trans[j][0],
                       &marker[i].trans[j][1],
                       &marker[i].trans[j][2],
                       &marker[i].trans[j][3]) != 4 ) {
                ARLOGe("Error processing multimarker config file '%s', marker definition %3d: Lines 2 - 4 must be marker transform.\n", filename, i + 1);
                goto bail;
            }
            j++;
        } while (j < 3);
        arUtilMatInv( (const ARdouble (*)[4])marker[i].trans, marker[i].itrans );

        wpos3d[0][0] =  -marker[i].width/2.0;
        wpos3d[0][1] =   marker[i].width/2.0;
        wpos3d[1][0] =   marker[i].width/2.0;
        wpos3d[1][1] =   marker[i].width/2.0;
        wpos3d[2][0] =   marker[i].width/2.0;
        wpos3d[2][1] =  -marker[i].width/2.0;
        wpos3d[3][0] =  -marker[i].width/2.0;
        wpos3d[3][1] =  -marker[i].width/2.0;
        for( j = 0; j < 4; j++ ) {
            marker[i].pos3d[j][0] = marker[i].trans[0][0] * wpos3d[j][0]
                                  + marker[i].trans[0][1] * wpos3d[j][1]
                                  + marker[i].trans[0][3];
            marker[i].pos3d[j][1] = marker[i].trans[1][0] * wpos3d[j][0]
                                  + marker[i].trans[1][1] * wpos3d[j][1]
                                  + marker[i].trans[1][3];
            marker[i].pos3d[j][2] = marker[i].trans[2][0] * wpos3d[j][0]
                                  + marker[i].trans[2][1] * wpos3d[j][1]
                                  + marker[i].trans[2][3];
        }
    }

    fclose(fp);

    arMalloc(marker_info, ARMultiMarkerInfoT, 1);
    marker_info->marker     = marker;
    marker_info->marker_num = num;
    marker_info->prevF      = 0;
    if( (patt_type & 0x03) == 0x03 ) marker_info->patt_type = AR_MULTI_PATTERN_DETECTION_MODE_TEMPLATE_AND_MATRIX;
    else if( patt_type & 0x01 )    marker_info->patt_type = AR_MULTI_PATTERN_DETECTION_MODE_TEMPLATE;
    else                           marker_info->patt_type = AR_MULTI_PATTERN_DETECTION_MODE_MATRIX;
    marker_info->cfPattCutoff = AR_MULTI_CONFIDENCE_PATTERN_CUTOFF_DEFAULT;
    marker_info->cfMatrixCutoff = AR_MULTI_CONFIDENCE_MATRIX_CUTOFF_DEFAULT;

    return marker_info;
    
bail:
    fclose(fp);
    free(marker);
    return NULL;
}
Esempio n. 21
0
static void mainLoop(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;
    ARMarkerInfo* markerInfo;
    int markerNum;
	ARdouble err;
    int             i, j, k;
	
	// Calculate time delta.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001f;
	ms_prev = ms;
	
	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) {
		gARTImage = image;	// Save the fetched image.
		
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		
		// Detect the markers in the video frame.
		if (arDetectMarker(gARHandle, gARTImage) < 0) {
			exit(-1);
		}
		
		// Get detected markers
		markerInfo = arGetMarker(gARHandle);
		markerNum = arGetMarkerNum(gARHandle);
	
		// Update markers.
		for (i = 0; i < markersSquareCount; i++) {
			markersSquare[i].validPrev = markersSquare[i].valid;
            
            
			// Check through the marker_info array for highest confidence
			// visible marker matching our preferred pattern.
			k = -1;
			if (markersSquare[i].patt_type == AR_PATTERN_TYPE_TEMPLATE) {
				for (j = 0; j < markerNum; j++) {
					if (markersSquare[i].patt_id == markerInfo[j].idPatt) {
						if (k == -1) {
							if (markerInfo[j].cfPatt >= markersSquare[i].matchingThreshold) k = j; // First marker detected.
						} else if (markerInfo[j].cfPatt > markerInfo[k].cfPatt) k = j; // Higher confidence marker detected.
					}
				}
				if (k != -1) {
					markerInfo[k].id = markerInfo[k].idPatt;
					markerInfo[k].cf = markerInfo[k].cfPatt;
					markerInfo[k].dir = markerInfo[k].dirPatt;
				}
			} else {
				for (j = 0; j < markerNum; j++) {
					if (markersSquare[i].patt_id == markerInfo[j].idMatrix) {
						if (k == -1) {
							if (markerInfo[j].cfMatrix >= markersSquare[i].matchingThreshold) k = j; // First marker detected.
						} else if (markerInfo[j].cfMatrix > markerInfo[k].cfMatrix) k = j; // Higher confidence marker detected.
					}
				}
				if (k != -1) {
					markerInfo[k].id = markerInfo[k].idMatrix;
					markerInfo[k].cf = markerInfo[k].cfMatrix;
					markerInfo[k].dir = markerInfo[k].dirMatrix;
				}
			}

			if (k != -1) {
				markersSquare[i].valid = TRUE;
				ARLOGd("Marker %d matched pattern %d.\n", i, markerInfo[k].id);
				// Get the transformation between the marker and the real camera into trans.
				if (markersSquare[i].validPrev && useContPoseEstimation) {
					err = arGetTransMatSquareCont(gAR3DHandle, &(markerInfo[k]), markersSquare[i].trans, markersSquare[i].marker_width, markersSquare[i].trans);
				} else {
					err = arGetTransMatSquare(gAR3DHandle, &(markerInfo[k]), markersSquare[i].marker_width, markersSquare[i].trans);
				}
			} else {
				markersSquare[i].valid = FALSE;
			}
	   
			if (markersSquare[i].valid) {
			
				// Filter the pose estimate.
				if (markersSquare[i].ftmi) {
					if (arFilterTransMat(markersSquare[i].ftmi, markersSquare[i].trans, !markersSquare[i].validPrev) < 0) {
						ARLOGe("arFilterTransMat error with marker %d.\n", i);
					}
				}
			
				if (!markersSquare[i].validPrev) {
					// Marker has become visible, tell any dependent objects.
                    VirtualEnvironmentHandleARMarkerAppeared(i);
				}
	
				// We have a new pose, so set that.
				arglCameraViewRH((const ARdouble (*)[4])markersSquare[i].trans, markersSquare[i].pose.T, 1.0f /*VIEW_SCALEFACTOR*/);
				// Tell any dependent objects about the update.
				VirtualEnvironmentHandleARMarkerWasUpdated(i, markersSquare[i].pose);
			
			} else {
			
				if (markersSquare[i].validPrev) {
					// Marker has ceased to be visible, tell any dependent objects.
					VirtualEnvironmentHandleARMarkerDisappeared(i);
				}
			}                    
		}
		
		// Tell GLUT the display has changed.
		glutPostRedisplay();
	} else {
		arUtilSleep(2);
	}
    
}
Esempio n. 22
0
int ar2Tracking( AR2HandleT *ar2Handle, AR2SurfaceSetT *surfaceSet, ARUint8 *dataPtr, float  trans[3][4], float  *err )
{
    AR2TemplateCandidateT  *candidatePtr;
    AR2TemplateCandidateT  *cp[AR2_THREAD_MAX];
#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
    float                   aveBlur;
#endif
    int                     num, num2;
    int                     i, j, k;

    if (!ar2Handle || !surfaceSet || !dataPtr || !trans || !err) return (-1);

    if( surfaceSet->contNum <= 0  ) {
        ARLOGd("ar2Tracking() error: ar2SetInitTrans() must be called first.\n");
        return -2;
    }

    *err = 0.0F;

    for( i = 0; i < surfaceSet->num; i++ ) {
        arUtilMatMulf( (const float (*)[4])surfaceSet->trans1, (const float (*)[4])surfaceSet->surface[i].trans, ar2Handle->wtrans1[i] );
        if( surfaceSet->contNum > 1 ) arUtilMatMulf( (const float (*)[4])surfaceSet->trans2, (const float (*)[4])surfaceSet->surface[i].trans, ar2Handle->wtrans2[i] );
        if( surfaceSet->contNum > 2 ) arUtilMatMulf( (const float (*)[4])surfaceSet->trans3, (const float (*)[4])surfaceSet->surface[i].trans, ar2Handle->wtrans3[i] );
    }

    if( ar2Handle->trackingMode == AR2_TRACKING_6DOF ) {
        extractVisibleFeatures(ar2Handle->cparamLT, ar2Handle->wtrans1, surfaceSet, ar2Handle->candidate, ar2Handle->candidate2);
    }
    else {
        extractVisibleFeaturesHomography(ar2Handle->xsize, ar2Handle->ysize, ar2Handle->wtrans1, surfaceSet, ar2Handle->candidate, ar2Handle->candidate2);
    }

    candidatePtr = ar2Handle->candidate;
#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
    aveBlur = 0.0F;
#endif
    i = 0; // Counts up to searchFeatureNum.
    num = 0;
    while( i < ar2Handle->searchFeatureNum ) {
        num2 = num;
        for( j = 0; j < ar2Handle->threadNum; j++ ) {
            if( i == ar2Handle->searchFeatureNum ) break;

            k = ar2SelectTemplate( candidatePtr, surfaceSet->prevFeature, num2, ar2Handle->pos, ar2Handle->xsize, ar2Handle->ysize );
            if( k < 0 ) {
                if( candidatePtr == ar2Handle->candidate ) {
                    candidatePtr = ar2Handle->candidate2;
                    k = ar2SelectTemplate( candidatePtr, surfaceSet->prevFeature, num2, ar2Handle->pos, ar2Handle->xsize, ar2Handle->ysize );
                    if( k < 0 ) break; // PRL 2012-05-15: Give up if we can't select template from alternate candidate either.
                }
                else break;
            }

            cp[j] = &(candidatePtr[k]);
            ar2Handle->pos[num2][0] = candidatePtr[k].sx;
            ar2Handle->pos[num2][1] = candidatePtr[k].sy;
            ar2Handle->arg[j].ar2Handle  = ar2Handle;
            ar2Handle->arg[j].surfaceSet = surfaceSet;
            ar2Handle->arg[j].candidate  = &(candidatePtr[k]);
            ar2Handle->arg[j].dataPtr    = dataPtr;

            threadStartSignal( ar2Handle->threadHandle[j] );
            num2++;
            if( num2 == 5 ) num2 = num;
            i++;
        }
        k = j;
        if( k == 0 ) break;

        for( j = 0; j < k; j++ ) {
            threadEndWait( ar2Handle->threadHandle[j] );

            if( ar2Handle->arg[j].ret == 0 && ar2Handle->arg[j].result.sim > ar2Handle->simThresh ) {
                if( ar2Handle->trackingMode == AR2_TRACKING_6DOF ) {
#ifdef ARDOUBLE_IS_FLOAT
                    arParamObserv2Ideal(ar2Handle->cparamLT->param.dist_factor,
                                        ar2Handle->arg[j].result.pos2d[0], ar2Handle->arg[j].result.pos2d[1],
                                        &ar2Handle->pos2d[num][0], &ar2Handle->pos2d[num][1], ar2Handle->cparamLT->param.dist_function_version);
#else
                    ARdouble pos2d0, pos2d1;
                    arParamObserv2Ideal(ar2Handle->cparamLT->param.dist_factor,                    
                                        (ARdouble)(ar2Handle->arg[j].result.pos2d[0]), (ARdouble)(ar2Handle->arg[j].result.pos2d[1]),
                                        &pos2d0, &pos2d1, ar2Handle->cparamLT->param.dist_function_version);
                    ar2Handle->pos2d[num][0] = (float)pos2d0;
                    ar2Handle->pos2d[num][1] = (float)pos2d1;
#endif
                }
                else {
                    ar2Handle->pos2d[num][0] = ar2Handle->arg[j].result.pos2d[0];
                    ar2Handle->pos2d[num][1] = ar2Handle->arg[j].result.pos2d[1];
                }
                ar2Handle->pos3d[num][0] = ar2Handle->arg[j].result.pos3d[0];
                ar2Handle->pos3d[num][1] = ar2Handle->arg[j].result.pos3d[1];
                ar2Handle->pos3d[num][2] = ar2Handle->arg[j].result.pos3d[2];
                ar2Handle->pos[num][0] = cp[j]->sx;
                ar2Handle->pos[num][1] = cp[j]->sy;
                ar2Handle->usedFeature[num].snum  = cp[j]->snum;
                ar2Handle->usedFeature[num].level = cp[j]->level;
                ar2Handle->usedFeature[num].num   = cp[j]->num;
                ar2Handle->usedFeature[num].flag  = 0;
#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
                aveBlur += ar2Handle->arg[j].result.blurLevel;
#endif
                num++;
            }
        }
    }
    for( i = 0; i < num; i++ ) {
        surfaceSet->prevFeature[i] = ar2Handle->usedFeature[i];
    }
    surfaceSet->prevFeature[num].flag = -1;
//ARLOG("------\nNum = %d\n", num);

    if( ar2Handle->trackingMode == AR2_TRACKING_6DOF ) {
        if( num < 3 ) {
            surfaceSet->contNum = 0;
            return -3;
        }
        *err = ar2GetTransMat( ar2Handle->icpHandle, surfaceSet->trans1, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 0 );
//ARLOG("outlier  0%%: err = %f, num = %d\n", *err, num);
        if( *err > ar2Handle->trackingThresh ) {
            icpSetInlierProbability( ar2Handle->icpHandle, 0.8F );
            *err = ar2GetTransMat( ar2Handle->icpHandle, trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1 );
//ARLOG("outlier 20%%: err = %f, num = %d\n", *err, num);
            if( *err > ar2Handle->trackingThresh ) {
                icpSetInlierProbability( ar2Handle->icpHandle, 0.6F );
                *err = ar2GetTransMat( ar2Handle->icpHandle, trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1 );
//ARLOG("outlier 60%%: err = %f, num = %d\n", *err, num);
                if( *err > ar2Handle->trackingThresh ) {
                    icpSetInlierProbability( ar2Handle->icpHandle, 0.4F );
                    *err = ar2GetTransMat( ar2Handle->icpHandle, trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1 );
//ARLOG("outlier 60%%: err = %f, num = %d\n", *err, num);
                    if( *err > ar2Handle->trackingThresh ) {
                        icpSetInlierProbability( ar2Handle->icpHandle, 0.0F );
                        *err = ar2GetTransMat( ar2Handle->icpHandle, trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1 );
//ARLOG("outlier Max: err = %f, num = %d\n", *err, num);
                        if( *err > ar2Handle->trackingThresh ) {
                            surfaceSet->contNum = 0;
#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
                            if( ar2Handle->blurMethod == AR2_ADAPTIVE_BLUR ) ar2Handle->blurLevel = AR2_DEFAULT_BLUR_LEVEL; // Reset the blurLevel.
#endif
                            return -4;
                        }
                    }
                }
            }
        }
    }
    else {
        if( num < 3 ) {
            surfaceSet->contNum = 0;
            return -3;
        }
        *err = ar2GetTransMatHomography( surfaceSet->trans1, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 0, 1.0F );
//ARLOG("outlier  0%%: err = %f, num = %d\n", *err, num);
        if( *err > ar2Handle->trackingThresh ) {
            *err = ar2GetTransMatHomography( trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1, 0.8F );
//ARLOG("outlier 20%%: err = %f, num = %d\n", *err, num);
            if( *err > ar2Handle->trackingThresh ) {
                *err = ar2GetTransMatHomography( trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1, 0.6F );
//ARLOG("outlier 40%%: err = %f, num = %d\n", *err, num);
                if( *err > ar2Handle->trackingThresh ) {
                    *err = ar2GetTransMatHomography( trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1, 0.4F );
//ARLOG("outlier 60%%: err = %f, num = %d\n", *err, num);
                    if( *err > ar2Handle->trackingThresh ) {
                        *err = ar2GetTransMatHomography( trans, ar2Handle->pos2d, ar2Handle->pos3d, num, trans, 1, 0.0F );
//ARLOG("outlier Max: err = %f, num = %d\n", *err, num);
                        if( *err > ar2Handle->trackingThresh ) {
                            surfaceSet->contNum = 0;
#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
                            if( ar2Handle->blurMethod == AR2_ADAPTIVE_BLUR ) ar2Handle->blurLevel = AR2_DEFAULT_BLUR_LEVEL; // Reset the blurLevel.
#endif
                            return -4;
                        }
                    }
                }
            }
        }
    }

#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
    if( ar2Handle->blurMethod == AR2_ADAPTIVE_BLUR ) {
        aveBlur = aveBlur/num + 0.5F;
        ar2Handle->blurLevel += (int)aveBlur - 1;
        if( ar2Handle->blurLevel < 1 ) ar2Handle->blurLevel = 1;
        if( ar2Handle->blurLevel >= AR2_BLUR_IMAGE_MAX-1 ) ar2Handle->blurLevel = AR2_BLUR_IMAGE_MAX-2;
    }
#endif

    surfaceSet->contNum++;
    for( j = 0; j < 3; j++ ) {
        for( i = 0; i < 4; i++ ) surfaceSet->trans3[j][i] = surfaceSet->trans2[j][i];
    }
    for( j = 0; j < 3; j++ ) {
        for( i = 0; i < 4; i++ ) surfaceSet->trans2[j][i] = surfaceSet->trans1[j][i];
    }
    for( j = 0; j < 3; j++ ) {
        for( i = 0; i < 4; i++ ) surfaceSet->trans1[j][i] = trans[j][i];
    }

    return 0;
}