void World::queryLine(point_t v1, point_t v2, QueryCallback* qc) { if (use_partitioning) { //spatial hash version: check every rect in bucket intersecting line int min_x = (v1.getX()) / bucket_width; int max_x = (v2.getX()) / bucket_width; if (min_x > max_x) std::swap(min_x, max_x); int min_y = (v1.getY()) / bucket_height; int max_y = (v2.getY()) / bucket_height; if (min_y > max_y) std::swap(min_y, max_y); //todo: iterates over full box containing line; just iterate over any bucket intersecting. //iterate over all buckets intersecting line: for (int i = min_x; i <= max_x; i++) for (int j = min_y; j <= max_y; j++) if (bucket* b = getBucket(i, j)) //iterate over all rects in bucket: for (auto iter : b->rect_v) //check if rect intersects the line: if (iter->queryOnLine(v1, v2)) //halt if QueryCallback returns false. if (!qc->onMatch(iter, iter->getContactPointOnLine(v1, v2))) return; } else { //non spatial hash version: check every rect for (auto iter : v_rect) { if (iter->queryOnLine(v1, v2)) if (!qc->onMatch(iter, iter->getContactPointOnLine(v1, v2))) return; } } }
static int _crossTwoCircles(point_t& pt1, point_t& pt2, const point_t& c1, double r1, const point_t& c2, double r2) { double d, a, b, c, p, q, r; double cos_value[2], sin_value[2]; if (mgEquals(c1.x, c2.x) && mgEquals(c1.y, c2.y) && mgEquals(r1, r2)) { return -1; } d = c1.distanceTo(c2); if (d > r1 + r2 || d < fabs(r1 - r2)) { return 0; } a = 2.0 * r1 * (c1.x - c2.x); b = 2.0 * r1 * (c1.y - c2.y); c = r2 * r2 - r1 * r1 - c1.distanceSquare(c2); p = a * a + b * b; q = -2.0 * a * c; if (mgEquals(d, r1 + r2) || mgEquals(d, fabs(r1 - r2))) { cos_value[0] = -q / p / 2.0; sin_value[0] = sqrt(1 - cos_value[0] * cos_value[0]); pt1.x = r1 * cos_value[0] + c1.x; pt1.y = r1 * sin_value[0] + c1.y; if (!mgEquals(pt1.distanceSquare(c2), r2 * r2)) { pt1.y = c1.y - r1 * sin_value[0]; } return 1; } r = c * c - b * b; cos_value[0] = (sqrt(q * q - 4.0 * p * r) - q) / p / 2.0; cos_value[1] = (-sqrt(q * q - 4.0 * p * r) - q) / p / 2.0; sin_value[0] = sqrt(1 - cos_value[0] * cos_value[0]); sin_value[1] = sqrt(1 - cos_value[1] * cos_value[1]); pt1.x = r1 * cos_value[0] + c1.x; pt2.x = r1 * cos_value[1] + c1.x; pt1.y = r1 * sin_value[0] + c1.y; pt2.y = r1 * sin_value[1] + c1.y; if (!mgEquals(pt1.distanceSquare(c2), r2 * r2)) { pt1.y = c1.y - r1 * sin_value[0]; } if (!mgEquals(pt2.distanceSquare(c2), r2 * r2)) { pt2.y = c1.y - r1 * sin_value[1]; } if (mgEquals(pt1.y, pt2.y) && mgEquals(pt1.x, pt2.x)) { if (pt1.y > 0) { pt2.y = -pt2.y; } else { pt1.y = -pt1.y; } } return 2; }
void fill_t::solid_fill(point_t p,float width,float height){ //std::cout<<"width "<<width<<" and height "<<height<<std::endl; std::queue <point_t> fillQueue; fillQueue.push(p); float pointx=p.getX(); float pointy=p.getY(); color_t c=colorArray[(int)pointx][(int)pointy]; color_t pixels; while(!fillQueue.empty()){ // std::cout<<c.getR()<<" ANSSS"<<std::endl; // exit(0); p=fillQueue.front(); pointx=p.getX(); pointy=p.getY(); fillQueue.pop(); //Added Canvas size check if(pointx>=width || pointy>=height || pointx<0 || pointy<0) continue; pixels = colorArray[(int)pointx][(int)pointy]; if( (pixels.getR()==c.getR()) && (pixels.getG()==c.getG()) && (pixels.getB()==c.getB()) ) { point_t p1(pointx, pointy); p1.draw(pen_t(color1,1)); fillQueue.push(point_t(pointx+1,pointy)); fillQueue.push(point_t(pointx,pointy+1)); fillQueue.push(point_t(pointx-1,pointy)); fillQueue.push(point_t(pointx,pointy-1)); } } }
bool Rect::queryOnLine(point_t v1, point_t v2) { //checks each edge of rect to see if line intersects return (lineSegmentsIntersect({x,y},{x+w,y},v1,v2) || lineSegmentsIntersect({x,y},{x,y+h},v1,v2) || lineSegmentsIntersect({x,y+h},{x+w,y+h},v1,v2) || lineSegmentsIntersect({x+w,y},{x+w,y+h},v1,v2) || //checks if line contained within rect queryContains(v1.getX(),v1.getY())); }
/// the out integral channel will be resized to the required dimensions void get_integral_channels(const integral_channels_t &in, const point_t &modelWindowSize, const point_t &dataOffset, const int resizing_factor, integral_channels_t &out) { get_integral_channels(in, dataOffset.x(), dataOffset.y(), modelWindowSize.x(), modelWindowSize.y(), resizing_factor, out); return; }
void fill_t::find_min_max(point_t p,int& YMin, int& YMax, int& XMin, int& XMax,float width,float height){ //std::cout<<"Min max\n"; std::queue <point_t> fmmQueue; fmmQueue.push(p); float pointx = p.getX(); float pointy = p.getY(); color_t c=colorArray[(int)pointx][(int)pointy]; color_t pixels; while(!fmmQueue.empty()){ p=fmmQueue.front(); pointx=p.getX(); pointy=p.getY(); fmmQueue.pop(); //Added Canvas size check if(pointx>=width || pointy>=height || pointx<0 || pointy<0) continue; pixels = colorArray[(int)pointx][(int)pointy]; //glReadPixels(pointx,pointy,1.0,1.0,GL_RGB,GL_FLOAT,pixels); if( (pixels.getR()==c.getR()) && (pixels.getG()==c.getG()) && (pixels.getB()==c.getB()) ) { if(pointy<YMin){ YMin=pointy; } if(pointy>YMax){ YMax=pointy; } if(pointx<XMin){ XMin=pointx; } if(pointx>XMax){ XMax=pointx; } p.draw(pen_t(color_t(0.5,0.5,0.5),1)); //std::cout<<" let me pass!!! I am the point "<<pointx<<" and "<<pointy<<std::endl; fmmQueue.push(point_t(pointx+1,pointy)); fmmQueue.push(point_t(pointx,pointy+1)); fmmQueue.push(point_t(pointx-1,pointy)); fmmQueue.push(point_t(pointx,pointy-1)); // fmmQueue.push(point_t(pointx+1,pointy+1)); // fmmQueue.push(point_t(pointx-1,pointy-1)); // fmmQueue.push(point_t(pointx+1,pointy-1)); // fmmQueue.push(point_t(pointx-1,pointy+1)); } } }
void TrainingData::addPositiveSamples(const std::vector<std::string> &filenamesPositives, const point_t &modelWindowSize, const point_t &dataOffset) { const size_t initialNumberOfTrainingSamples = getNumExamples(), // yl images number have been added to the training set finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + filenamesPositives.size(); if(finalNumberOfTrainingSamples > getMaxNumExamples()) { throw std::runtime_error("TrainingData::addPositiveSamples is trying to add more data than initially specified"); } printf("\nCollecting %zi positive samples\n", filenamesPositives.size()); boost::progress_display progress_indicator(filenamesPositives.size()); meta_datum_t metaDatum; integral_channels_t sampleIntegralChannels; // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop for (size_t filenameIndex = 0; filenameIndex < filenamesPositives.size(); filenameIndex +=1) { gil::rgb8_image_t image; gil::rgb8c_view_t image_view = doppia::open_image(filenamesPositives[filenameIndex].c_str(), image); _integralChannelsComputer.set_image(image_view); _integralChannelsComputer.compute(); get_integral_channels(_integralChannelsComputer.get_integral_channels(), modelWindowSize, dataOffset, _integralChannelsComputer.get_shrinking_factor(),// shrinking factor = 4 sampleIntegralChannels); metaDatum.filename = filenamesPositives[filenameIndex]; metaDatum.imageClass = 1;//classes[k]; metaDatum.x = dataOffset.x(); metaDatum.y = dataOffset.y(); setDatum(initialNumberOfTrainingSamples + filenameIndex, metaDatum, sampleIntegralChannels); ++progress_indicator; } // end of "for each filename" return; }
chassis_id PhysicsRegion::queryFirstPoint(point_t point, flag_plane p) { struct : public rects::QueryCallback { const PhysicsRegion* parent; chassis_id ret=NO_CHASSIS; bool onMatch(rects::Rect* r, point_t at) { ret = parent->getChassisFromRect(r); return false; } } qc; qc.parent = this; for (byte i = 0; i < getPlaneCount(); i++) { flag_plane p_it = 1 << i; if (p_it & p) planes[i].queryPoint(point.getX(), point.getY(), &qc); if (qc.ret!=NO_CHASSIS) return qc.ret; } return qc.ret; }
bool Polygon::within_n(const point_t &p, double d2) const { if (_in_mbr(p)) { for (size_t i = 0; i < outer_.size(); ++i) if (outer_[i].contains(p)) return outer_[i].within_n(p, d2); for (size_t i = 0; i < inner_.size(); ++i) if (inner_[i].contains(p)) return inner_[i].within_n(p, d2); return true; } else { if (p.y() >= mbr_[0]) return _within_n(p, d2, 0); if (p.x() <= mbr_[1]) return _within_n(p, d2, 1); if (p.y() <= mbr_[2]) return _within_n(p, d2, 2); if (p.x() >= mbr_[3]) return _within_n(p, d2, 3); } // not reachable return true; }
void ModelIO::initWrite(const std::string datasetName, const DetectorModel::DetectorTypes type, const std::string detectorName, const point_t modelWindow, const rectangle_t objectWindow) { doppia_protobuf::Point2d *model_window = _model.mutable_model_window_size(); model_window->set_x(modelWindow.x()); model_window->set_y(modelWindow.y()); doppia_protobuf::Box *b = _model.mutable_object_window(); b->mutable_min_corner()->set_x(objectWindow.min_corner().x()); b->mutable_min_corner()->set_y(objectWindow.min_corner().y()); b->mutable_max_corner()->set_x(objectWindow.max_corner().x()); b->mutable_max_corner()->set_y(objectWindow.max_corner().y()); _model.set_training_dataset_name(datasetName.c_str()); _model.set_detector_type(type); _model.set_detector_name(detectorName); return; }
// http://mathworld.wolfram.com/Circle-LineIntersection.html int _crossLineCircle(point_t& pt1, point_t& pt2, const point_t& a, const point_t& b, double r) { point_t d(b - a); double d2 = d.lengthSquare(); double dz = a.crossProduct(b); double z2 = dz * dz; double delta = r * r * d2 - z2; if (delta < 0) return 0; double s = sqrt(delta) / d2; double sx = (d.y < 0 ? -d.x : d.x) * s; double sy = fabs(d.y) * s; double tx = dz * d.y / d2; double ty = -dz * d.x / d2; pt1 = point_t(tx + sx, ty + sy); pt2 = point_t(tx - sx, ty - sy); return delta < 1e-8 ? 1 : 2; }
bool Plane::is_in(const point_t &p) const { assert(p.size() == a.size()); double sum = 0; for (unsigned i=0; i<a.size(); ++i) sum += a[i] * p[i]; //std::cerr << "Sum = " << sum << std::endl; switch(sign) { case lt: return (sum < b); break; case lte: return (sum <= b); break; case gt: return (sum > b); break; case gte: return (sum >= b); break; } assert(false); return false; }
inline bool operator==( point_t<origin_type::screen> const& lhs, point_t<origin_type::screen> const& rhs ) noexcept { return lhs.x() == rhs.x() && lhs.y() == rhs.y(); }
/** Returns true if intersection between the two line segments.*/ bool lineSegmentsIntersect(point_t a1, point_t a2,point_t b1,point_t b2) { //if lines vertical: if (a1.getX()==a2.getX()) if ((a1.getX()>b1.getX()&&a1.getX()<b2.getX()) ||(a1.getX()>b2.getX()&&a1.getX()<b1.getX())) { float m_b = (b1-b2).getSlope(); float b_b = b1.getY()-b1.getX()*m_b; float y_intersect = b_b + m_b*a1.getX(); return ((y_intersect>a1.getY())!=(y_intersect>a2.getY())); } if (b1.getX()==b2.getX()) if ((b1.getX()>a1.getX()&&b1.getX()<a2.getX()) ||(b1.getX()>a2.getX()&&b1.getX()<a1.getX())) { float m_a = (a1-a2).getSlope(); float b_a = a1.getY()-a1.getX()*m_a; float y_intersect = b_a + m_a*b1.getX(); return ((y_intersect>b1.getY())!=(y_intersect>b2.getY())); } //solve for intersection: float m_a = (a1-a2).getSlope(); float m_b = (b1-b2).getSlope(); float b_a = a1.getY()-a1.getX()*m_a; float b_b = b1.getY()-b1.getX()*m_b; //overlapping lines do not intersect. if (m_a==m_b) return false; float x_intersect = (b_a+b_b)/(m_a-m_b); return ((x_intersect>a1.getX()&&x_intersect<a2.getX())||(x_intersect>a2.getX()&&x_intersect<a1.getX())) && ((x_intersect>b1.getX()&&x_intersect<b2.getX())||(x_intersect>b2.getX()&&x_intersect<b1.getX())); }
inline bool operator==( point_t<origin_type::client> const& lhs, point_t<origin_type::client> const& rhs ) noexcept { return lhs.window_handle() == rhs.window_handle() && lhs.x() == rhs.x() && lhs.y() == rhs.y(); }
/** Returns point of intersection between the two line segments.*/ point_t lineSegmentsIntersectCoord(point_t a1, point_t a2,point_t b1,point_t b2) { point_t DEFAULT_RETURN = {-1,-1}; //solve for intersection: float m_a; float b_a; if (a1.getX()!=a2.getX()) { m_a = (a1-a2).getSlope(); b_a = a1.getY()-a1.getX()*m_a; } float m_b; float b_b; if (b1.getX()!=b2.getX()) { m_b = (b1-b2).getSlope(); b_b = b1.getY()-b1.getX()*m_b; } if (a1.getX()==a2.getX()&&b1.getX()==b2.getX()) return DEFAULT_RETURN; if (a1.getX()==a2.getX()) return {a1.getX(),b_b+m_b*a1.getX()}; if (b1.getX()==b2.getX()) return {b1.getX(),b_a+m_a*b1.getX()}; //overlapping lines do not intersect. if (m_a==m_b) return DEFAULT_RETURN; float x_intersect = (b_a+b_b)/(m_a-m_b); return {x_intersect, b_a + x_intersect*m_a}; }
inline bool operator<( point_t<Origin> const& lhs, point_t<Origin> const& rhs ) noexcept { return lhs.x() * lhs.x() + lhs.y() * lhs.y() < rhs.x() * rhs.x() + rhs.y() * rhs.y(); }
double get_angle(point_t v1, point_t v2) { return atan2(v1.dot(v2), v1.cross(v2)); }
void fill_t::printPointColor(point_t p){ std::cout<<"COLOR "<<colorArray[(int)p.getX()][(int)p.getY()].getR()<<std::endl; }
void TrainingData::addNegativeSamples(const std::vector<std::string> &filenamesBackground, const point_t &modelWindowSize, const point_t &dataOffset, const size_t numNegativeSamplesToAdd) { const size_t initialNumberOfTrainingSamples = getNumExamples(), finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + numNegativeSamplesToAdd; if(finalNumberOfTrainingSamples > getMaxNumExamples()) { throw std::runtime_error("TrainingData::addNegativeSamples is trying to add more data than initially specified"); } printf("\nCollecting %zi random negative samples\n", numNegativeSamplesToAdd); boost::progress_display progress_indicator(numNegativeSamplesToAdd); meta_datum_t metaDatum; integral_channels_t sampleIntegralChannels; #if defined(DEBUG) srand(1); #else srand(time(NULL)); #endif srand(1); const int samplesPerImage = std::max<int>(1, numNegativeSamplesToAdd / filenamesBackground.size()); // FIXME no idea what the +1 does const int minWidth = (modelWindowSize.x()+1 + 2*dataOffset.x()), minHeight = (modelWindowSize.y()+1 + 2*dataOffset.y()); const float maxSkippedFraction = 0.25; size_t numNegativesSamplesAdded = 0, numSkippedImages = 0, filenameIndex = 0; // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop while (numNegativesSamplesAdded < numNegativeSamplesToAdd) { if (filenameIndex >= filenamesBackground.size()) { // force to loop until we have reached the desired number of samples filenameIndex = 0; } const string &filename = filenamesBackground[filenameIndex]; filenameIndex +=1; gil::rgb8c_view_t imageView; gil::rgb8_image_t image; imageView = doppia::open_image(filename.c_str(), image); if ((imageView.width() < minWidth) or (imageView.height() < minHeight)) { // if input image is too small, we skip it //printf("Skipping negative sample %s, because it is too small\n", filename.c_str()); numSkippedImages += 1; const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size(); if (skippedFraction > maxSkippedFraction) { printf("Skipped %i images (out of %zi, %.3f%%) because they where too small\n", numSkippedImages, filenamesBackground.size(), skippedFraction*100); throw std::runtime_error("Too many negatives images where skipped. Dataset needs to be fixed"); } continue; } const int maxRandomX = (imageView.width() - modelWindowSize.x()+1 - 2*dataOffset.x()), maxRandomY = (imageView.height() - modelWindowSize.y()+1 - 2*dataOffset.y()); _integralChannelsComputer.set_image(imageView); _integralChannelsComputer.compute(); metaDatum.filename = filename; metaDatum.imageClass = _backgroundClassLabel; size_t numSamplesForImage = std::min<size_t>(samplesPerImage, (numNegativeSamplesToAdd - numNegativesSamplesAdded)); numSamplesForImage = 1; for (size_t randomSampleIndex = 0; randomSampleIndex < numSamplesForImage; randomSampleIndex += 1) { //const point_t::coordinate_t size_t x = dataOffset.x() + rand() % maxRandomX, y = dataOffset.y() + rand() % maxRandomY; //printf("random x,y == %i, %i\n", x,y); const point_t randomOffset(x,y); metaDatum.x = randomOffset.x(); metaDatum.y = randomOffset.y(); get_integral_channels(_integralChannelsComputer.get_integral_channels(), modelWindowSize, randomOffset, _integralChannelsComputer.get_shrinking_factor(), sampleIntegralChannels); setDatum(initialNumberOfTrainingSamples + numNegativesSamplesAdded, metaDatum, sampleIntegralChannels); numNegativesSamplesAdded += 1; ++progress_indicator; } } // end of "for each background image" if (numSkippedImages > 0) { const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size(); printf("Skipped %zi images (out of %zi, %.3f%%) because they where too small\n", numSkippedImages, filenamesBackground.size(), skippedFraction*100); } return; }
void TrainingData::addNegativeSamples(const std::vector<std::string> &filenamesBackground, const point_t &modelWindowSize, const point_t &dataOffset, const size_t numNegativeSamplesToAdd) { int feature_extraction_time = 0, image_loading_time = 0, tmp_time; const size_t initialNumberOfTrainingSamples = get_num_examples(), finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + numNegativeSamplesToAdd; if(finalNumberOfTrainingSamples > getMaxNumExamples()) { throw std::runtime_error("TrainingData::addNegativeSamples is trying to add more data than initially specified"); } printf("\nCollecting %zi random negative samples\n", numNegativeSamplesToAdd); doppia::progress_display_with_eta progress_indicator(numNegativeSamplesToAdd); meta_datum_t metaDatum; integral_channels_t sampleIntegralChannels; #if defined(DEBUG) srand(1); #else srand(time(NULL)); #endif srand(1); const int samplesPerImage = std::max<int>(1, numNegativeSamplesToAdd / filenamesBackground.size()); // FIXME no idea what the +1 does const int minWidth = (modelWindowSize.x()+1 + 2*dataOffset.x()), minHeight = (modelWindowSize.y()+1 + 2*dataOffset.y()); const float maxSkippedFraction = 0.25; size_t numNegativesSamplesAdded = 0, numSkippedImages = 0, filenameIndex = 0; // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop while (numNegativesSamplesAdded < numNegativeSamplesToAdd) { if (filenameIndex >= filenamesBackground.size()) { // force to loop until we have reached the desired number of samples filenameIndex = 0; } const string &background_image_path = filenamesBackground[filenameIndex]; filenameIndex +=1; gil::rgb8c_view_t imageView; gil::rgb8_image_t image; //tmp_time = (int)round(omp_get_wtime()); imageView = doppia::open_image(background_image_path.c_str(), image); //image_loading_time += (int)round(omp_get_wtime()) - tmp_time; if ((imageView.width() < minWidth) or (imageView.height() < minHeight)) { // if input image is too small, we skip it //printf("Skipping negative sample %s, because it is too small\n", filename.c_str()); numSkippedImages += 1; const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size(); if (skippedFraction > maxSkippedFraction) { printf("Skipped %zi images (out of %zi, %.3f%%) because they where too small (or too big to process)\n", numSkippedImages, filenamesBackground.size(), skippedFraction*100); throw std::runtime_error("Too many negatives images where skipped. Dataset needs to be fixed"); } continue; } const int maxRandomX = (imageView.width() - modelWindowSize.x()+1 - 2*dataOffset.x()), maxRandomY = (imageView.height() - modelWindowSize.y()+1 - 2*dataOffset.y()); try { // FIXME harcoded values const size_t expected_channels_size = imageView.size()*10, max_texture_size = 134217728; // 2**27 for CUDA capability 2.x if(expected_channels_size > max_texture_size) { throw std::invalid_argument("The image is monstruously big!"); } const boost::filesystem::path file_path = background_image_path; #if BOOST_VERSION <= 104400 const std::string filename = file_path.filename(); #else const std::string filename = file_path.filename().string(); #endif tmp_time = (int)round(omp_get_wtime()); _integralChannelsComputer->set_image(imageView, filename); image_loading_time += (int)round(omp_get_wtime()) - tmp_time; tmp_time = (int)round(omp_get_wtime()); _integralChannelsComputer->compute(); feature_extraction_time += (int)round(omp_get_wtime()) - tmp_time; } catch(std::exception &e) { printf("Computing integral channels of image %s \033[1;31mfailed\033[0m (size %zix%zi). Skipping it. Error was:\n%s\n", background_image_path.c_str(), imageView.width(), imageView.height(), e.what()); numSkippedImages += 1; continue; // we skip this image } catch(...) { printf("Computing integral channels of %s \033[1;31mfailed\033[0m (size %zix%zi). Skipping it. Received unknown error.\n", background_image_path.c_str(), imageView.width(), imageView.height()); numSkippedImages += 1; continue; // we skip this image } metaDatum.filename = background_image_path; metaDatum.imageClass = _backgroundClassLabel; size_t numSamplesForImage = std::min<size_t>(samplesPerImage, (numNegativeSamplesToAdd - numNegativesSamplesAdded)); numSamplesForImage = 1; for (size_t randomSampleIndex = 0; randomSampleIndex < numSamplesForImage; randomSampleIndex += 1) { //const point_t::coordinate_t size_t x = dataOffset.x() + rand() % maxRandomX, y = dataOffset.y() + rand() % maxRandomY; //printf("random x,y == %i, %i\n", x,y); const point_t randomOffset(x,y); metaDatum.x = randomOffset.x(); metaDatum.y = randomOffset.y(); //tmp_time = (int)round(omp_get_wtime()); get_integral_channels(_integralChannelsComputer->get_integral_channels(), modelWindowSize, randomOffset, doppia::IntegralChannelsForPedestrians::get_shrinking_factor(), sampleIntegralChannels); //image_loading_time += (int)round(omp_get_wtime()) - tmp_time; setDatum(initialNumberOfTrainingSamples + numNegativesSamplesAdded, metaDatum, sampleIntegralChannels); numNegativesSamplesAdded += 1; ++progress_indicator; } } // end of "for each background image" if (numSkippedImages > 0) { const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size(); printf("Skipped %zi images (out of %zi, %.3f%%) because they where too small (or too big to process)\n", numSkippedImages, filenamesBackground.size(), skippedFraction*100); } printf("Time elapsed while loading negative images: %02d:%02d:%02d\n", image_loading_time/3600, (image_loading_time%3600)/60, image_loading_time%60); printf("Time elapsed while extracting features from negative images: %02d:%02d:%02d\n", feature_extraction_time/3600, (feature_extraction_time%3600)/60, feature_extraction_time%60); return; }
inline POINT point_t<origin_type::screen>::client_to_screen( point_t< origin_type::client > const& src ) noexcept { POINT dst = src.data(); ClientToScreen( src.window_handle(), &dst ); return dst; }
inline int extract_y( const point_t& pt ) { return pt.get_y(); }
// volume evaluation with partial derivatives /* virtual */ void operator()( /* pointer to first point in rowwise control point grid */ const_point_iterator points, /* order in u dir */ std::size_t order_u, /* order in v dir */ std::size_t order_v, /* order in w dir */ std::size_t order_w, /* u-parameter for point to evaluate */ value_type u, /* v-parameter for point to evaluate */ value_type v, /* w-parameter for point to evaluate */ value_type w, /* resulting point at [u,v,w] */ point_t& point, /* first partial derivative in u at [u,v,w] */ point_t& du, /* first partial derivative in v at [u,v,w] */ point_t& dv, /* first partial derivative in w at [u,v,w] */ point_t& dw) const { pointmesh3d<point_t> mesh(points, points + order_u * order_v * order_w, order_u, order_v, order_w); // transform control points to homogenous space std::for_each( mesh.begin(), mesh.end(), std::mem_fn(&point_t::project_to_homogenous)); // first decasteljau in u direction until only u-linear volume is left for u for (std::size_t jv = 0; jv != order_v; ++jv) { for (std::size_t jw = 0; jw != order_w; ++jw) { for (std::size_t i = 0; i != order_u - 2; ++i) { for (std::size_t j = 0; j != order_u - 1 - i; ++j) { mesh(j, jv, jw) = (value_type(1) - u) * mesh(j, jv, jw) + u * mesh(j + 1, jv, jw); } } } } // secondly decasteljau in v direction until only uv-linear volume is left for (std::size_t ju = 0; ju != 2; ++ju) { for (std::size_t jw = 0; jw != order_w; ++jw) { for (std::size_t i = 0; i != order_v - 2; ++i) { for (std::size_t j = 0; j != order_v - 1 - i; ++j) { mesh(ju, j, jw) = (value_type(1) - v) * mesh(ju, j, jw) + v * mesh(ju, j + 1, jw); } } } } // thirdly decasteljau until only trilinear volume is left for (std::size_t ju = 0; ju != 2; ++ju) { for (std::size_t jv = 0; jv != 2; ++jv) { for (std::size_t i = 0; i != order_w - 2; ++i) { for (std::size_t j = 0; j != order_w - 1 - i; ++j) { mesh(ju, jv, j) = (value_type(1) - w) * mesh(ju, jv, j) + w * mesh(ju, jv, j + 1); } } } } // evaluate for u leaving a linear patch dependending on v,w point_t vw00 = (value_type(1) - u) * mesh(0, 0, 0) + u * mesh(1, 0, 0); point_t vw10 = (value_type(1) - u) * mesh(0, 1, 0) + u * mesh(1, 1, 0); point_t vw01 = (value_type(1) - u) * mesh(0, 0, 1) + u * mesh(1, 0, 1); point_t vw11 = (value_type(1) - u) * mesh(0, 1, 1) + u * mesh(1, 1, 1); // evaluate for v leaving a linear patch dependending on u,w point_t uw00 = (value_type(1) - v) * mesh(0, 0, 0) + v * mesh(0, 1, 0); point_t uw10 = (value_type(1) - v) * mesh(1, 0, 0) + v * mesh(1, 1, 0); point_t uw01 = (value_type(1) - v) * mesh(0, 0, 1) + v * mesh(0, 1, 1); point_t uw11 = (value_type(1) - v) * mesh(1, 0, 1) + v * mesh(1, 1, 1); // evaluating v,w plane for v resulting in last linear interpolation in w -> // to compute first partial derivative in w point_t w0 = (value_type(1) - v) * vw00 + v * vw10; point_t w1 = (value_type(1) - v) * vw01 + v * vw11; // evaluating v,w plane for w resulting in last linear interpolation in v -> // to compute first partial derivative in v point_t v0 = (value_type(1) - w) * vw00 + w * vw01; point_t v1 = (value_type(1) - w) * vw10 + w * vw11; // evaluating v,w plane for w resulting in last linear interpolation in v -> // to compute first partial derivative in v point_t u0 = (value_type(1) - w) * uw00 + w * uw01; point_t u1 = (value_type(1) - w) * uw10 + w * uw11; // last interpolation and back projection to euclidian space point = (value_type(1) - w) * w0 + w * w1; point.project_to_euclidian(); // M.S. Floater '91 : // // w[0]{n-1}(t) * w[1]{n-1}(t) // P'(t) = n * --------------------------- * P[1]{n-1}(t) - P[0]{n-1}(t) // w[0]{n})^2 // // 1. recalculate overwritten helping point P[0, n-1] // 2. project P[0, n-1] and P[1, n-1] into plane w=1 // 3. use formula above to find out the correct length of P'(t) du = (order_u - value_type(1)) * ((u0.weight() * u1.weight()) / (point.weight() * point.weight())) * (u1.as_euclidian() - u0.as_euclidian()); dv = (order_v - value_type(1)) * ((v0.weight() * v1.weight()) / (point.weight() * point.weight())) * (v1.as_euclidian() - v0.as_euclidian()); dw = (order_w - value_type(1)) * ((w0.weight() * w1.weight()) / (point.weight() * point.weight())) * (w1.as_euclidian() - w0.as_euclidian()); }
//friend point_t<T> operator+(const point_t<T>& p, const point_t<T>& q); friend point_t<T> operator+(const point_t<T>& p, const point_t<T>& q) { point_t<T> tmp = p; for (int i = 0; i < T; i++) tmp.coord(i) += q.coord(i); return tmp; }
bool operator ==(const point_t& a, const point_t& b) { return QuasiEqual(a.x(), b.x(), margin) && QuasiEqual(a.y(), b.y(), margin) && QuasiEqual(a.z(), b.z(), margin); }
void fill_t::checkerboard_fill(point_t p,float width,float height){ int minx=p.getX(); int miny=p.getY(); int maxx=p.getX(); int maxy=p.getY(); find_min_max(p,miny,maxy,minx,maxx,width,height); std::queue <point_t> fillQueue; fillQueue.push(p); float pointx=p.getX(); float pointy=p.getY(); color_t c=colorArray[(int)pointx][(int)pointy]; color_t pixels; float trans_x,trans_y; while(!fillQueue.empty()){ // std::cout<<c.getR()<<" ANSSS"<<std::endl; // exit(0); p=fillQueue.front(); pointx=p.getX(); pointy=p.getY(); fillQueue.pop(); //Added Canvas size check if(pointx>=width || pointy>=height || pointx<0 || pointy<0) continue; //glReadPixels(pointx,pointy,1.0,1.0,GL_RGB,GL_FLOAT,pixels); pixels = colorArray[(int)pointx][(int)pointy]; if( (pixels.getR()==c.getR()) && (pixels.getG()==c.getG()) && (pixels.getB()==c.getB()) ) { point_t p1(pointx, pointy); trans_x=pointx-minx; trans_y=pointy-miny; if((((int)trans_x/16)%2)==0){ if((((int)trans_y/16)%2)==0){ p1.draw(pen_t(color1,1)); } else{ p1.draw(pen_t(color2,1)); } } else{ if((((int)trans_y/16)%2)==0){ p1.draw(pen_t(color2,1)); } else{ p1.draw(pen_t(color1,1)); } } fillQueue.push(point_t(pointx+1,pointy)); fillQueue.push(point_t(pointx,pointy+1)); fillQueue.push(point_t(pointx-1,pointy)); fillQueue.push(point_t(pointx,pointy-1)); } } }
void TrainingData::addHardNegativeSamples(const std::vector<std::string> &filenamesHardNegatives, const point_t &modelWindowSize, const point_t &dataOffset) { int feature_extraction_time = 0, image_loading_time = 0, tmp_time; const size_t initialNumberOfTrainingSamples = get_num_examples(), finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + filenamesHardNegatives.size(); if(finalNumberOfTrainingSamples > getMaxNumExamples()) { throw std::runtime_error("TrainingData::addHardNegativeSamples " "is trying to add more data than initially specified"); } printf("\nCollecting %zi hard negative samples\n", filenamesHardNegatives.size()); doppia::progress_display_with_eta progress_indicator(filenamesHardNegatives.size()); meta_datum_t metaDatum; integral_channels_t sampleIntegralChannels; // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop for (size_t filenameIndex = 0; filenameIndex < filenamesHardNegatives.size(); filenameIndex +=1) { tmp_time = (int)round(omp_get_wtime()); gil::rgb8_image_t image; gil::rgb8c_view_t image_view = doppia::open_image(filenamesHardNegatives[filenameIndex].c_str(), image); const boost::filesystem::path file_path = filenamesHardNegatives[filenameIndex]; #if BOOST_VERSION <= 104400 const std::string filename = file_path.filename(); #else const std::string filename = file_path.filename().string(); #endif _integralChannelsComputer->set_image(image_view, filename); image_loading_time += (int)round(omp_get_wtime()) - tmp_time; tmp_time = (int)round(omp_get_wtime()); _integralChannelsComputer->compute(); feature_extraction_time += (int)round(omp_get_wtime()) - tmp_time; get_integral_channels(_integralChannelsComputer->get_integral_channels(), modelWindowSize, dataOffset, doppia::IntegralChannelsForPedestrians::get_shrinking_factor(), sampleIntegralChannels); metaDatum.filename = filenamesHardNegatives[filenameIndex]; metaDatum.imageClass = _backgroundClassLabel; metaDatum.x = dataOffset.x(); metaDatum.y = dataOffset.y(); setDatum(initialNumberOfTrainingSamples + filenameIndex, metaDatum, sampleIntegralChannels); ++progress_indicator; } // end of "for each filename" printf("Time elapsed while loading images for hard negatives extraction: %02d:%02d:%02d\n", image_loading_time/3600, (image_loading_time%3600)/60, image_loading_time%60); printf("Time elapsed while extracting features from hard negatives: %02d:%02d:%02d\n", feature_extraction_time/3600, (feature_extraction_time%3600)/60, feature_extraction_time%60); return; }
static POINT screen_to_client( HWND hwnd, point_t< origin_type::screen > const& src ) { POINT dst = src.data(); ScreenToClient( hwnd, &dst ); return dst; }