Example #1
0
CLandmark::CLandmark( const UIDLandmark& p_uID,
           const CDescriptor& p_matDescriptorLEFT,
           const CDescriptor& p_matDescriptorRIGHT,
           const double& p_dKeyPointSize,
           const cv::Point2d& p_ptUVLEFT,
           const cv::Point2d& p_ptUVRIGHT,
           const CPoint3DCAMERA& p_vecPointXYZLEFT,
           const Eigen::Isometry3d& p_matTransformationLEFTtoWORLD,
           const Eigen::Isometry3d& p_matTransformationWORLDtoLEFT,
           const MatrixProjection& p_matProjectionLEFT,
           const MatrixProjection& p_matProjectionRIGHT,
           const MatrixProjection& p_matProjectionWORLDtoLEFT,
           const MatrixProjection& p_matProjectionWORLDtoRIGHT,
           const UIDFrame& p_uFrame ): uID( p_uID ),
                                       matDescriptorReferenceLEFT( p_matDescriptorLEFT ),
                                       matDescriptorReferenceRIGHT( p_matDescriptorRIGHT ),
                                       dKeyPointSize( p_dKeyPointSize ),
                                       vecPointXYZInitial( p_matTransformationLEFTtoWORLD*p_vecPointXYZLEFT ),
                                       vecPointXYZOptimized( vecPointXYZInitial ),
                                       vecUVReferenceLEFT( p_ptUVLEFT.x, p_ptUVLEFT.y, 1.0 ),
                                       vecPointXYZMean( vecPointXYZInitial ),
                                       m_matProjectionLEFT( p_matProjectionLEFT ),
                                       m_matProjectionRIGHT( p_matProjectionRIGHT )
{
    vecDescriptorsLEFT.clear( );
    vecDescriptorsRIGHT.clear( );
    m_vecMeasurements.clear( );

    //ds construct filestring and open dump file
    //char chBuffer[256];
    //std::snprintf( chBuffer, 256, "/home/dominik/workspace_catkin/src/vi_mapper/logs/landmarks/landmark%06lu.txt", uID );
    //m_pFilePositionOptimization = std::fopen( chBuffer, "w" );

    //assert( 0 != m_pFilePositionOptimization );

    //ds dump file format
    //std::fprintf( m_pFilePositionOptimization, "ID_FRAME | ID_LANDMARK | ITERATION MEASUREMENTS INLIERS | ERROR_ARSS | DELTA_XYZ |      X      Y      Z\n" );

    //ds add this position
    addMeasurement( p_uFrame,
                    p_ptUVLEFT,
                    p_ptUVRIGHT,
                    p_matDescriptorLEFT,
                    p_matDescriptorRIGHT,
                    p_vecPointXYZLEFT,
                    p_matTransformationLEFTtoWORLD,
                    p_matTransformationWORLDtoLEFT,
                    p_matProjectionWORLDtoLEFT,
                    p_matProjectionWORLDtoRIGHT );
}
Example #2
0
//-------------------------------------------
//      Entry point for the application
//-------------------------------------------
void appMain(void)
{
    PRINTF("Starting...\n");

    initFullBN();

    accelOn();

    mdelay(START_DELAY);

    redLedOn();

    for (;;) {
        uint16_t now = ALARM_TIMER_VALUE();
        uint16_t endTime = now + MAIN_LOOP_LENGTH;

        Packet_t packet;
        packet.accX = accelReadX();
        packet.accY = accelReadY();
        packet.accZ = accelReadZ();

        // PRINTF("read %d %d %d\n", packet.accX, packet.accY, packet.accZ);

        uint16_t start = ALARM_TIMER_VALUE();
        MEMORY_BARRIER();

        addMeasurement(&packet);

        MEMORY_BARRIER();
        uint16_t end = ALARM_TIMER_VALUE();
        
        if (vectorPos == 0) {
            PRINTF("time to calc = %u ticks\n", end - start);
        }

        while (timeAfter16(endTime, ALARM_TIMER_VALUE()));
    }
}
Example #3
0
void AnalogSampler::measure()
{
    double v = analogRead(_pin) / 1024.0;
    addMeasurement(v);
}
    bool VideoBasedTracker::processImage(cv::Mat frame, cv::Mat grayImage,
                                         PoseHandler handler) {
        m_assertInvariants();
        bool done = false;
        m_frame = frame;
        m_imageGray = grayImage;

        //================================================================
        // Tracking the points

        // Threshold the image based on the brightness value that is between
        // the darkest and brightest pixel in the image.
        // @todo Make this a parameter.
        double minVal, maxVal;
        cv::minMaxLoc(m_imageGray, &minVal, &maxVal);
        double thresholdValue = 220;
        cv::threshold(m_imageGray, m_thresholdImage, thresholdValue, 255,
                      CV_THRESH_BINARY);

        // @todo are any of the above steps already being performed in the blob
        // detector (if configured in a given way?)
        // http://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html#details

        // Construct a blob detector and find the blobs in the image.
        // This set of parameters is optimized for the OSVR HDK prototype
        // that has exposed LEDs.
        /// @todo: Make a different set of parameters optimized for the
        /// Oculus Dk2.
        /// @todo: Determine the maximum size of a trackable blob by seeing
        /// when we're so close that we can't view at least four in the
        /// camera.
        cv::SimpleBlobDetector::Params params;
        params.minThreshold = static_cast<float>(thresholdValue);
        params.maxThreshold = static_cast<float>(
            thresholdValue + (maxVal - thresholdValue) * 0.3);
        params.thresholdStep = (params.maxThreshold - params.minThreshold) / 10;
        params.blobColor = static_cast<uchar>(255);
        params.filterByColor =
            false; // Look for bright blobs: there is a bug in this code
        params.minInertiaRatio = 0.5;
        params.maxInertiaRatio = 1.0;
        params.filterByInertia = false;   // Do we test for non-elongated blobs?
        params.minArea = 1;               // How small can the blobs be?
        params.filterByConvexity = false; // Test for convexity?
        params.filterByCircularity = false; // Test for circularity?
        params.minDistBetweenBlobs = 3;
#if CV_MAJOR_VERSION == 2
        cv::Ptr<cv::SimpleBlobDetector> detector =
            new cv::SimpleBlobDetector(params);
#elif CV_MAJOR_VERSION == 3
        auto detector = cv::SimpleBlobDetector::create(params);
#else
#error "Unrecognized OpenCV version!"
#endif
        /// @todo this variable is a candidate for hoisting to member
        std::vector<cv::KeyPoint> foundKeyPoints;
        detector->detect(m_imageGray, foundKeyPoints);

        // @todo: Consider computing the center of mass of a dilated bounding
        // rectangle around each keypoint to produce a more precise subpixel
        // localization of each LED.  The moments() function may be helpful
        // with this.

        // @todo: Estimate the summed brightness of each blob so that we can
        // detect when they are getting brighter and dimmer.  Pass this as
        // the brightness parameter to the Led class when adding a new one
        // or augmenting with a new frame.

        // We allow multiple sets of LEDs, each corresponding to a different
        // sensor, to be located in the same image.  We construct a new set
        // of LEDs for each and try to find them.  It is assumed that they all
        // have unique ID patterns across all sensors.
        for (size_t sensor = 0; sensor < m_identifiers.size(); sensor++) {
            osvrPose3SetIdentity(&m_pose);
            std::vector<cv::KeyPoint> keyPoints = foundKeyPoints;

            // Locate the closest blob from this frame to each LED found
            // in the previous frame.  If it is close enough to the nearest
            // neighbor from last time, we assume that it is the same LED and
            // update it.  If not, we delete the LED from the list.  Once we
            // have matched a blob to an LED, we remove it from the list.  If
            // there are any blobs leftover, we create new LEDs from them.
            // @todo: Include motion estimate based on Kalman filter along with
            // model of the projection once we have one built.  Note that this
            // will require handling the lens distortion appropriately.
            auto led = begin(m_led_groups[sensor]);
            auto e = end(m_led_groups[sensor]);
            while (led != e) {
                double TODO_BLOB_MOVE_THRESHOLD = 10;
                auto nearest =
                    led->nearest(keyPoints, TODO_BLOB_MOVE_THRESHOLD);
                if (nearest == keyPoints.end()) {
                    // We have no blob corresponding to this LED, so we need
                    // to delete this LED.
                    led = m_led_groups[sensor].erase(led);
                } else {
                    // Update the values in this LED and then go on to the
                    // next one.  Remove this blob from the list of potential
                    // matches.
                    led->addMeasurement(nearest->pt, nearest->size);
                    keyPoints.erase(nearest);
                    ++led;
                }
            }
            // If we have any blobs that have not been associated with an
            // LED, then we add a new LED for each of them.
            // std::cout << "Had " << Leds.size() << " LEDs, " <<
            // keyPoints.size() << " new ones available" << std::endl;
            for (auto &keypoint : keyPoints) {
                m_led_groups[sensor].emplace_back(m_identifiers[sensor].get(),
                                                  keypoint.pt, keypoint.size);
            }

            //==================================================================
            // Compute the pose of the HMD w.r.t. the camera frame of reference.
            bool gotPose = false;
            if (m_estimators[sensor]) {

                // Get an estimated pose, if we have enough data.
                OSVR_PoseState pose;
                if (m_estimators[sensor]->EstimatePoseFromLeds(
                        m_led_groups[sensor], pose)) {

                    // Project the expected locations of the beacons
                    // into the image and then compute the error between the
                    // expected locations and the visible locations for all of
                    // the visible beacons.  If they are too far off, cancel the
                    // pose.
                    std::vector<cv::Point2f> imagePoints;
                    m_estimators[sensor]->ProjectBeaconsToImage(imagePoints);
                    for (auto &led : m_led_groups[sensor]) {
                        auto label = std::to_string(led.getOneBasedID());
                        cv::Point where = led.getLocation();
                    }
                    // XXX

                    m_pose = pose;
                    handler(static_cast<unsigned>(sensor), pose);
                    gotPose = true;
                }
            }

#ifdef VBHMD_DEBUG
            // Don't display the debugging info every frame, or we can't go fast
            // enough.
            static int count = 0;
            if (++count == 11) {
                // Draw detected blobs as red circles.
                cv::drawKeypoints(m_frame, keyPoints, m_imageWithBlobs,
                                  cv::Scalar(0, 0, 255),
                                  cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // Label the keypoints with their IDs.
                for (auto &led : m_led_groups[sensor]) {
                    // Print 1-based LED ID for actual LEDs
                    auto label = std::to_string(led.getOneBasedID());
                    cv::Point where = led.getLocation();
                    where.x += 1;
                    where.y += 1;
                    cv::putText(m_imageWithBlobs, label, where,
                                cv::FONT_HERSHEY_SIMPLEX, 0.5,
                                cv::Scalar(0, 0, 255));
                }

                // If we have a transform, reproject all of the points from the
                // model space (the LED locations) back into the image and
                // display them on the blob image in green.
                if (gotPose) {
                    std::vector<cv::Point2f> imagePoints;
                    m_estimators[sensor]->ProjectBeaconsToImage(imagePoints);
                    size_t n = imagePoints.size();
                    for (size_t i = 0; i < n; ++i) {
                        // Print 1-based LED IDs
                        auto label = std::to_string(i + 1);
                        auto where = imagePoints[i];
                        where.x += 1;
                        where.y += 1;
                        cv::putText(m_imageWithBlobs, label, where,
                                    cv::FONT_HERSHEY_SIMPLEX, 0.5,
                                    cv::Scalar(0, 255, 0));
                    }
                }

                // Pick which image to show and show it.
                if (m_frame.data) {
                    std::ostringstream windowName;
                    windowName << "Sensor" << sensor;
                    cv::imshow(windowName.str().c_str(), *m_shownImage);
                    int key = cv::waitKey(1);
                    switch (key) {
                    case 'i':
                        // Show the input image.
                        m_shownImage = &m_frame;
                        break;

                    case 't':
                        // Show the thresholded image.
                        m_shownImage = &m_thresholdImage;
                        break;

                    case 'b':
                        // Show the blob image.
                        m_shownImage = &m_imageWithBlobs;
                        break;

                    case 'q':
                        // Indicate we want to quit.
                        done = true;
                        break;
                    }
                }

                // Report the pose, if we got one
                if (gotPose) {
                    std::cout << "Pos (sensor " << sensor
                              << "): " << m_pose.translation.data[0] << ", "
                              << m_pose.translation.data[1] << ", "
                              << m_pose.translation.data[2] << std::endl;
                }

                count = 0;
            }
#endif
        }

        m_assertInvariants();
        return done;
    }