Residual Analyzer::translation(const ConstImageAdapter<double>&image, const ImagePoint& where, int _patchsize) const { debug(LOG_DEBUG, DEBUG_LOG, 0, "get translation at %s", std::string(where).c_str()); // create the subwindow we want to lock at int xoffset = where.x() - _patchsize / 2; int yoffset = where.y() - _patchsize / 2; ImagePoint patchcorner(xoffset, yoffset); ImageRectangle window(patchcorner, ImageSize(patchsize, _patchsize)); debug(LOG_DEBUG, DEBUG_LOG, 0, "window: %s", window.toString().c_str()); // we need a phase correlator to measure the transform transform::PhaseCorrelator pc(false); // compute the translation between the windows WindowAdapter<double> frompatch(image, window); WindowAdapter<double> topatch(baseimage, window); std::pair<Point, double> delta = pc(frompatch, topatch); Point translation = delta.first; double weight = delta.second; debug(LOG_DEBUG, DEBUG_LOG, 0, "%s -> %s", where.toString().c_str(), translation.toString().c_str()); // add the residual to the result set Residual residual(where, translation, weight); return residual; }
std::vector<double> Image::getPointMaxGradientAngles(const ImagePoint& point, const int gaussKernelRadius, const Image& gradX, const Image& gradY, const Image& gaussKernel, const BorderEffectType borderEffect) const { Descriptor largeDescriptor(BIN_ROTATION_IVARIANT_ORIENTATIONS_COUNT); auto angles = std::vector<double>(); for (auto i = point.getX() - gaussKernelRadius, kernel_i = 0; i < point.getX() + gaussKernelRadius; ++i, ++kernel_i) { for (auto j = point.getY() - gaussKernelRadius, kernel_j = 0; j < point.getY() + gaussKernelRadius; ++j, ++kernel_j) { const auto dx = gradX.getValue(i, j, borderEffect); const auto dy = gradY.getValue(i, j, borderEffect); const auto angle = ImageHelper::getNormalizedAngle(atan2(dy, dx)); const auto gradLength = sqrt(dx * dx + dy * dy) * gaussKernel.get(kernel_i, kernel_j); largeDescriptor.addValueOnAngle(angle, gradLength); } } return largeDescriptor.maxOrientationInterpolatedAngles(); }
void PointSet::add_layer(CVD::Image<unsigned char>& im, Image<TooN::Vector<2, float> >& xy_lookup, double const scale, bool const use_rhips){ // Container<ImageRef> corners; // Container<int> corner_scores; Container<ImageRef> max_corners(1024); // // Tom_fast9_detect(im, barrier, 11, corners); //// __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "FAST CORNERS %d", corners.size()); // if(corners.size()>0) // { // qsort(&(corners[0]), corners.size(), sizeof(ImageRef), ImgRefCompare); // } //// __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "SORTED FAST CORNERS %d", corners.size()); // compute_scores(im, corners, barrier, corner_scores); //// __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "COMPUTED FAST CORNERS %d", corners.size()); // max_corners.clear(); // nonmax_suppression(corners, corner_scores, max_corners); // //__android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "MAX FAST CORNERS %d, FASTSCORES %d", max_corners.size(), corner_scores.size()); // // for(int i =0; i < max_corners.size(); i++) // { // __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "%d %d", max_corners[i][0], max_corners[i][1]); // } // od.detector(im, fast_barrier, max_corners, nCornersPerLayer); od.detectDistributed(im, fast_barrier, max_corners, 2); int const num_max_corners = max_corners.size(); if(use_rhips) { for(int i=0; i<num_max_corners; i++){ ImagePoint ip; ip.rbuild_from_image(im, max_corners[i], xy_lookup, scale); database.add(ip); // __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "%d %d", max_corners[i].x, max_corners[i].y); } } else{ for(int i=0; i<num_max_corners; i++){ ImagePoint ip; ip.build_from_image(im, max_corners[i], xy_lookup,scale); database.add(ip); } } }
void QsiCcd::startExposure(const Exposure& exposure) { std::unique_lock<std::recursive_mutex> lock(_camera.mutex); Ccd::startExposure(exposure); debug(LOG_DEBUG, DEBUG_LOG, 0, "start QSI exposure"); try { // set the binning mode _camera.camera().put_BinX(exposure.mode().x()); _camera.camera().put_BinY(exposure.mode().y()); // compute the frame size in binned pixels, as this is what // the QSI camera expects ImagePoint origin = exposure.frame().origin() / exposure.mode(); ImageSize size = exposure.frame().size() / exposure.mode(); ImageRectangle frame(origin, size); debug(LOG_DEBUG, DEBUG_LOG, 0, "requesting %s image", frame.toString().c_str()); // set the subframe _camera.camera().put_NumX(size.width()); _camera.camera().put_NumY(size.height()); _camera.camera().put_StartX(origin.x()); _camera.camera().put_StartY(origin.y()); // turn off the led debug(LOG_DEBUG, DEBUG_LOG, 0, "turn LED off"); _camera.camera().put_LEDEnabled(false); // get shutter info bool light = (exposure.shutter() == Shutter::OPEN); _camera.camera().StartExposure(exposure.exposuretime(), light); debug(LOG_DEBUG, DEBUG_LOG, 0, "%fsec %s exposure started", exposure.exposuretime(), (light) ? "light" : "dark"); } catch (const std::exception& x) { debug(LOG_ERR, DEBUG_LOG, 0, "bad exposure parameters: %s", x.what()); cancelExposure(); throw BadParameter(x.what()); } // check the current state of the camera exposureStatus(); }
/** * \brief Find out whether a point is contained in the rectangle * defined by a size object */ bool ImageSize::bounds(const ImagePoint& p) const { return (0 <= p.x()) && (p.x() < _width) && (0 <= p.y()) && (p.y() < _height); }
/** * \brief Find the offset into an array with this size */ unsigned int ImageSize::offset(const ImagePoint& point) const { return offset(point.x(), point.y()); }
/** * \brief Test whether a point is in the rectangle. * * \param point */ bool ImageSize::contains(const ImagePoint& point) const { return contains(point.x(), point.y()); }
ImagePoint operator/(const ImagePoint& point, const Binning& binning) { return ImagePoint(point.x() / binning.x(), point.y() / binning.y()); }
ImagePoint MosaicType::greenb() const { ImagePoint r = red(); int bluey = 0x1 ^ r.y(); return ImagePoint(r.x(), bluey); }
ImagePoint MosaicType::greenr() const { ImagePoint r = red(); int bluex = 0x1 ^ r.x(); return ImagePoint(bluex, r.y()); }