void prepareMeasurement(const ed::EntityConstPtr& e, cv::Mat& view_color_img, cv::Mat& view_depth_img, cv::Mat& mask, cv::Rect& bouding_box) { // Get the best measurement from the entity ed::MeasurementConstPtr msr = e->lastMeasurement(); if (!msr) return; int min_x, max_x, min_y, max_y; // create a view rgbd::View view(*msr->image(), msr->image()->getRGBImage().cols); // get depth image view_depth_img = msr->image()->getDepthImage(); // get color image const cv::Mat& color_image = msr->image()->getRGBImage(); // crop it to match the view view_color_img = cv::Mat(color_image(cv::Rect(0, 0, view.getWidth(), view.getHeight()))); // std::cout << "image: " << cropped_image.cols << "x" << cropped_image.rows << std::endl; // initialize bounding box points max_x = 0; max_y = 0; min_x = view.getWidth(); min_y = view.getHeight(); // initialize mask, all 0s mask = cv::Mat::zeros(view.getHeight(), view.getWidth(), CV_8UC1); // Iterate over all points in the mask for(ed::ImageMask::const_iterator it = msr->imageMask().begin(view.getWidth()); it != msr->imageMask().end(); ++it) { // mask's (x, y) coordinate in the depth image const cv::Point2i& p_2d = *it; // paint a mask mask.at<unsigned char>(*it) = 255; // update the boundary coordinates if (min_x > p_2d.x) min_x = p_2d.x; if (max_x < p_2d.x) max_x = p_2d.x; if (min_y > p_2d.y) min_y = p_2d.y; if (max_y < p_2d.y) max_y = p_2d.y; } bouding_box = cv::Rect(min_x, min_y, max_x - min_x, max_y - min_y); }
void HumanContourMatcher::process(ed::EntityConstPtr e, tue::Configuration& result) const { // if initialization failed, return if (!init_success_) return; // Get the best measurement from the entity ed::MeasurementConstPtr msr = e->lastMeasurement(); if (!msr) return; float depth_sum = 0; float avg_depht; float classification_error = 0; float classification_deviation = 0; std::string classification_stance; uint point_counter = 0; bool is_human = false; // Get the depth image from the measurement const cv::Mat& depth_image = msr->image()->getDepthImage(); const cv::Mat& color_image = msr->image()->getRGBImage(); cv::Mat mask_cv = cv::Mat::zeros(depth_image.rows, depth_image.cols, CV_8UC1); // Iterate over all points in the mask. You must specify the width of the image on which you // want to apply the mask (see the begin(...) method). for(ed::ImageMask::const_iterator it = msr->imageMask().begin(depth_image.cols); it != msr->imageMask().end(); ++it) { // mask's (x, y) coordinate in the depth image const cv::Point2i& p_2d = *it; // TODO dont creat a Mat mask, just give human_classifier_.Classify a vector of 2D points! // paint a mask mask_cv.at<unsigned char>(p_2d) = 255; // calculate measurement average depth depth_sum += depth_image.at<float>(p_2d); point_counter++; } avg_depht = depth_sum/(float)point_counter; is_human = human_classifier_.Classify(depth_image, color_image, mask_cv, avg_depht, classification_error, classification_deviation, classification_stance); // ----------------------- assert results ----------------------- // create group if it doesnt exist if (!result.readGroup("perception_result", tue::OPTIONAL)) { result.writeGroup("perception_result"); } // assert the results to the entity result.writeGroup(kModuleName); result.setValue("label", "Human Shape"); if (classification_error > 0){ result.setValue("stance", classification_stance); result.setValue("error", classification_error); result.setValue("deviation", classification_deviation); } if(is_human) { result.setValue("score", 1.0); }else{ result.setValue("score", 0.0); } result.endGroup(); // close human_contour_matcher group result.endGroup(); // close perception_result group }