Exemplo n.º 1
0
void FaceDetector::writeFaceDetectionResult(const ed::Measurement& msr, const cv::Rect& rgb_roi,
                                            const std::vector<cv::Rect>& rgb_face_rois,
                                            int& face_counter, tue::Configuration& result) const
{
    // get color image
    const cv::Mat& color_image = msr.image()->getRGBImage();

    // get color image
    const cv::Mat& depth_image = msr.image()->getDepthImage();

    // Calculate size factor between depth and rgb images
    double f_depth_rgb = (double)depth_image.cols / color_image.cols;

    // Create depth view
    rgbd::View depth_view(*msr.image(), depth_image.cols);

    for (uint j = 0; j < rgb_face_rois.size(); j++)
    {
        cv::Rect rgb_face_roi = rgb_face_rois[j];
        rgb_face_roi.x += rgb_roi.x;
        rgb_face_roi.y += rgb_roi.y;

        if (debug_mode_)
            ed::perception::saveDebugImage("face_detector-rgb", color_image(rgb_face_roi));

        result.addArrayItem();

        result.setValue("index", face_counter);

        // add 2D location of the face
        result.setValue("x", rgb_face_roi.x);
        result.setValue("y", rgb_face_roi.y);
        result.setValue("width", rgb_face_roi.width);
        result.setValue("height", rgb_face_roi.height);

        // Compute face roi for depth image
        cv::Rect depth_face_roi(f_depth_rgb * rgb_face_roi.x, f_depth_rgb * rgb_face_roi.y,
                                f_depth_rgb * rgb_face_roi.width, f_depth_rgb * rgb_face_roi.height);

        if (debug_mode_)
            ed::perception::saveDebugImage("face_detector-depth", depth_image(depth_face_roi));

        cv::Mat face_area = depth_image(depth_face_roi);
        float avg_depth = ed::perception::getMedianDepth(face_area);

        if (avg_depth > 0)
        {
            // calculate the center point of the face
            cv::Point2i p_2d(depth_face_roi.x + depth_face_roi.width/2,
                             depth_face_roi.y + depth_face_roi.height/2);

            geo::Vector3 projection = depth_view.getRasterizer().project2Dto3D(p_2d.x, p_2d.y) * avg_depth;
            geo::Vector3 point_map = msr.sensorPose() * projection;

            // add 3D location of the face
            result.setValue("map_x", point_map.x);
            result.setValue("map_y", point_map.y);
            result.setValue("map_z", point_map.z);

        }
        else
        {
            std::cout << "[ED FACE DETECTOR] Could not calculate face's average depth. Map coordinates might be incorrect!" << std::endl;
        }

        result.endArrayItem();
        face_counter++;
    }
}
Exemplo n.º 2
0
void HumanContourMatcher::process(ed::EntityConstPtr e, tue::Configuration& result) const
{
    // if initialization failed, return
    if (!init_success_)
        return;

    // Get the best measurement from the entity
    ed::MeasurementConstPtr msr = e->lastMeasurement();
    if (!msr)
        return;

    float depth_sum = 0;
    float avg_depht;
    float classification_error = 0;
    float classification_deviation = 0;
    std::string classification_stance;
    uint point_counter = 0;
    bool is_human = false;

    // Get the depth image from the measurement
    const cv::Mat& depth_image = msr->image()->getDepthImage();
    const cv::Mat& color_image = msr->image()->getRGBImage();

    cv::Mat mask_cv = cv::Mat::zeros(depth_image.rows, depth_image.cols, CV_8UC1);

    // Iterate over all points in the mask. You must specify the width of the image on which you
    // want to apply the mask (see the begin(...) method).
    for(ed::ImageMask::const_iterator it = msr->imageMask().begin(depth_image.cols); it != msr->imageMask().end(); ++it)
    {
        // mask's (x, y) coordinate in the depth image
        const cv::Point2i& p_2d = *it;

        // TODO dont creat a Mat mask, just give human_classifier_.Classify a vector of 2D points!
        // paint a mask
        mask_cv.at<unsigned char>(p_2d) = 255;

        // calculate measurement average depth
        depth_sum += depth_image.at<float>(p_2d);
        point_counter++;
    }

    avg_depht = depth_sum/(float)point_counter;

    is_human = human_classifier_.Classify(depth_image, color_image, mask_cv, avg_depht, classification_error, classification_deviation, classification_stance);

    // ----------------------- assert results -----------------------

    // create group if it doesnt exist
    if (!result.readGroup("perception_result", tue::OPTIONAL))
    {
        result.writeGroup("perception_result");
    }

    // assert the results to the entity
    result.writeGroup(kModuleName);
    result.setValue("label", "Human Shape");

    if (classification_error > 0){
        result.setValue("stance", classification_stance);
        result.setValue("error", classification_error);
        result.setValue("deviation", classification_deviation);
    }

    if(is_human)
    {
        result.setValue("score", 1.0);
    }else{
        result.setValue("score", 0.0);
    }

    result.endGroup();  // close human_contour_matcher group
    result.endGroup();  // close perception_result group
}