void PTools::optimize(Panorama & pano, utils::MultiProgressDisplay & progDisplay, int maxIter) { // VariableMapVector res; // setup data structures aPrefs aP; OptInfo opt; SetAdjustDefaults(&aP); AlignInfoWrap aInfo; // copy pano information int libpano data structures if (aInfo.setInfo(pano)) { aInfo.setGlobal(); opt.numVars = aInfo.gl.numParam; opt.numData = aInfo.gl.numPts; opt.SetVarsToX = SetLMParams; opt.SetXToVars = SetAlignParams; opt.fcn = aInfo.gl.fcn; *opt.message = 0; DEBUG_DEBUG("starting optimizer"); RunLMOptimizer( &opt ); #ifdef DEBUG fullPath path; StringtoFullPath(&path, "c:/debug_optimizer.txt"); aInfo.gl.data = opt.message; WriteResults( "debug_test", &path, &aInfo.gl, distSquared, 0); #endif std::ostringstream oss; /* oss << "optimizing images"; for (UIntVector::const_iterator it = imgs.begin(); it != imgs.end(); ++it) { if (it + 1 != imgs.end()) { oss << *it << ","; } else { oss << *it; } } oss << "\n" << opt.message; progDisplay.setMessage(oss.str()); */ DEBUG_DEBUG("optimizer finished:" << opt.message); pano.updateVariables(aInfo.getVariables()); pano.updateCtrlPointErrors( aInfo.getCtrlPoints()); } }
PTOptEstimator(PanoramaData & pano, int i1, int i2, double maxError, bool optHFOV, bool optB) { m_maxError = maxError; UIntSet imgs; imgs.insert(i1); imgs.insert(i2); m_localPano = (pano.getNewSubset(imgs)); // don't forget to delete m_li1 = (i1 < i2) ? 0 : 1; m_li2 = (i1 < i2) ? 1 : 0; // get control points m_cps = m_localPano->getCtrlPoints(); // only use 2D control points BOOST_FOREACH(ControlPoint & kp, m_cps) { if (kp.mode == ControlPoint::X_Y) { m_xy_cps.push_back(kp); } } if (optHFOV) m_optvars.push_back(OptVarSpec(0,std::string("v"))); if (optB) m_optvars.push_back(OptVarSpec(0,std::string("b"))); m_optvars.push_back(OptVarSpec(m_li2,"r")); m_optvars.push_back(OptVarSpec(m_li2,"p")); m_optvars.push_back(OptVarSpec(m_li2,"y")); if (optHFOV) m_optvars.push_back(OptVarSpec(0,"v")); if (optB) m_optvars.push_back(OptVarSpec(0,"b")); /** optimisation for first pass */ m_opt_first_pass.resize(2); m_opt_first_pass[1].insert("r"); m_opt_first_pass[1].insert("p"); m_opt_first_pass[1].insert("y"); /** optimisation for second pass */ if (optHFOV || optB) { m_opt_second_pass = m_opt_first_pass; if (optHFOV) m_opt_second_pass[0].insert("v"); if (optB) m_opt_second_pass[0].insert("b"); } // number of points required for estimation m_numForEstimate = (m_optvars.size()+1)/2; // extract initial parameters from pano m_initParams.resize(m_optvars.size()); int i=0; BOOST_FOREACH(OptVarSpec & v, m_optvars) { m_initParams[i] = v.get(*m_localPano); DEBUG_DEBUG("get init var: " << v.m_name << ", " << v.m_img << ": " << m_initParams[i]); i++; }
void RotatePanorama::rotatePano(PanoramaData& panorama, const Matrix3& transformMat) { for (unsigned int i = 0; i < panorama.getNrOfImages(); i++) { const SrcPanoImage & image = panorama.getImage(i); SrcPanoImage copy = image; double y = image.getYaw(); double p = image.getPitch(); double r = image.getRoll(); Matrix3 mat; mat.SetRotationPT(DEG_TO_RAD(y), DEG_TO_RAD(p), DEG_TO_RAD(r)); DEBUG_DEBUG("rotation matrix (PT) for img " << i << " << ypr:" << y << " " << p << " " << r << std::endl << mat); Matrix3 rotated; rotated = transformMat * mat; DEBUG_DEBUG("rotation matrix after transform: " << rotated); rotated.GetRotationPT(y,p,r); y = RAD_TO_DEG(y); p = RAD_TO_DEG(p); r = RAD_TO_DEG(r); DEBUG_DEBUG("rotated angles of img " << i << ": " << y << " " << p << " " << r); // Don't update a variable linked to a variable we already updated. conditional_set(Yaw, y); conditional_set(Pitch, p); conditional_set(Roll, r); if(image.getX()!=0.0 || image.getY()!=0.0 || image.getZ()!=0.0) { // rotate translation vector Vector3 vecRot=transformMat.Inverse().TransformVector(Vector3(image.getZ(), image.getX(), image.getY())); conditional_set(X, vecRot.y); conditional_set(Y, vecRot.z); conditional_set(Z, vecRot.x); // rotate translation plane mat.SetRotationPT(DEG_TO_RAD(image.getTranslationPlaneYaw()), DEG_TO_RAD(image.getTranslationPlanePitch()), 0.0); rotated = transformMat * mat; rotated.GetRotationPT(y,p,r); conditional_set(TranslationPlaneYaw, RAD_TO_DEG(y)); conditional_set(TranslationPlanePitch, RAD_TO_DEG(p)); }; panorama.setImage(i, copy); panorama.imageChanged(i); } }
void VertexCoordRemapper::UpdateAndResetIndex() { DEBUG_DEBUG("mesh update update reset index"); // this sets scale, height, and width. MeshRemapper::UpdateAndResetIndex(); // we need to record the output projection for flipping around 180 degrees. output_projection = visualization_state->GetOptions()->getProjection(); o_width = visualization_state->GetOptions()->getWidth(); o_height = visualization_state->GetOptions()->getHeight(); // we want to make a remapped mesh, get the transformation we need: // HuginBase::SrcPanoImage *src = visualization_state->GetSrcImage(image_number); transform.createInvTransform(*image, *(visualization_state->GetOptions())); // use the scale to determine edge lengths in pixels for subdivision // DEBUG_INFO("updating mesh for image " << image_number << ", using scale " // << scale << ".\n"); // find key points used for +/- 180 degree boundary correction // {x|y}_add_360's are added to a value near the left/top boundary to get // the corresponding point over the right/bottom boundary. // other values are used to check where the boundary is. OutputProjectionInfo *info = visualization_state->GetProjectionInfo(); x_add_360 = info->GetXAdd360(); radius = info->GetRadius(); y_add_360 = info->GetYAdd360(); x_midpoint = info->GetMiddleX(); y_midpoint = info->GetMiddleY(); lower_bound = info->GetLowerX(); upper_bound = info->GetUpperX(); lower_bound_h = info->GetLowerY(); upper_bound_h = info->GetUpperY(); // Find the cropping region of the source image, so we can stick to the // bounding rectangle. SetCrop(); // the tree needs to know how to use the croping information for generating tree.x_crop_scale = crop_x2 - crop_x1; tree.x_crop_offs = crop_x1; tree.y_crop_scale = crop_y2 - crop_y1; tree.y_crop_offs = crop_y1; // make the tree reflect the transformation RecursiveUpdate(0, 0, 0); // now set up for examining the tree contents. tree.ResetIndex(); done_node = true; }
void ImagesPanel::panoramaImagesChanged(HuginBase::Panorama &pano, const HuginBase::UIntSet & _imgNr) { DEBUG_TRACE(""); // update text field if selected const HuginBase::UIntSet & selected = m_images_tree->GetSelectedImages(); DEBUG_DEBUG("nr of sel Images: " << selected.size()); if (pano.getNrOfImages() == 0) { DisableImageCtrls(); m_matchingButton->Disable(); } else { m_matchingButton->Enable(); wxTreeEvent ev; OnSelectionChanged(ev); }; //enable/disable optimize buttons XRCCTRL(*this, "images_optimize", wxButton)->Enable(pano.getNrOfImages()>0); XRCCTRL(*this, "images_photo_optimize", wxButton)->Enable(pano.getNrOfImages()>1); }
const double getCropFactor(Exiv2::ExifData &exifData, long width, long height) { double cropFactor=0; // some cameras do not provide Exif.Image.ImageWidth / Length // notably some Olympus long eWidth = 0; _getExiv2Value(exifData,"Exif.Image.ImageWidth",eWidth); long eLength = 0; _getExiv2Value(exifData,"Exif.Image.ImageLength",eLength); double sensorPixelWidth = 0; double sensorPixelHeight = 0; if (eWidth > 0 && eLength > 0) { sensorPixelHeight = (double)eLength; sensorPixelWidth = (double)eWidth; } else { // No EXIF information, use number of pixels in image sensorPixelWidth = width; sensorPixelHeight = height; } // force landscape sensor orientation if (sensorPixelWidth < sensorPixelHeight ) { double t = sensorPixelWidth; sensorPixelWidth = sensorPixelHeight; sensorPixelHeight = t; } DEBUG_DEBUG("sensorPixelWidth: " << sensorPixelWidth); DEBUG_DEBUG("sensorPixelHeight: " << sensorPixelHeight); // some cameras do not provide Exif.Photo.FocalPlaneResolutionUnit // notably some Olympus long exifResolutionUnits = 0; _getExiv2Value(exifData,"Exif.Photo.FocalPlaneResolutionUnit",exifResolutionUnits); float resolutionUnits= 0; switch (exifResolutionUnits) { case 3: resolutionUnits = 10.0f; break; //centimeter case 4: resolutionUnits = 1.0f; break; //millimeter case 5: resolutionUnits = .001f; break; //micrometer default: resolutionUnits = 25.4f; break; //inches } DEBUG_DEBUG("Resolution Units: " << resolutionUnits); // some cameras do not provide Exif.Photo.FocalPlaneXResolution and // Exif.Photo.FocalPlaneYResolution, notably some Olympus float fplaneXresolution = 0; _getExiv2Value(exifData,"Exif.Photo.FocalPlaneXResolution",fplaneXresolution); float fplaneYresolution = 0; _getExiv2Value(exifData,"Exif.Photo.FocalPlaneYResolution",fplaneYresolution); float CCDWidth = 0; if (fplaneXresolution != 0) { CCDWidth = (float)(sensorPixelWidth / ( fplaneXresolution / resolutionUnits)); } float CCDHeight = 0; if (fplaneYresolution != 0) { CCDHeight = (float)(sensorPixelHeight / ( fplaneYresolution / resolutionUnits)); } DEBUG_DEBUG("CCDHeight:" << CCDHeight); DEBUG_DEBUG("CCDWidth: " << CCDWidth); // calc sensor dimensions if not set and 35mm focal length is available hugin_utils::FDiff2D sensorSize; if (CCDHeight > 0 && CCDWidth > 0) { // read sensor size directly. sensorSize.x = CCDWidth; sensorSize.y = CCDHeight; std::string exifModel; if(_getExiv2Value(exifData, "Exif.Image.Model", exifModel)) { if (exifModel == "Canon EOS 20D") { // special case for buggy 20D camera sensorSize.x = 22.5; sensorSize.y = 15; } }; // check if sensor size ratio and image size fit together double rsensor = (double)sensorSize.x / sensorSize.y; double rimg = (double) width / height; if ( (rsensor > 1 && rimg < 1) || (rsensor < 1 && rimg > 1) ) { // image and sensor ratio do not match // swap sensor sizes float t; t = sensorSize.y; sensorSize.y = sensorSize.x; sensorSize.x = t; } DEBUG_DEBUG("sensorSize.y: " << sensorSize.y); DEBUG_DEBUG("sensorSize.x: " << sensorSize.x); cropFactor = sqrt(36.0*36.0+24.0*24.0) / sqrt(sensorSize.x*sensorSize.x + sensorSize.y*sensorSize.y); // FIXME: HACK guard against invalid image focal plane definition in EXIF metadata with arbitrarly chosen limits for the crop factor ( 1/100 < crop < 100) if (cropFactor < 0.1 || cropFactor > 100) { cropFactor = 0; } } else { // alternative way to calculate the crop factor for Olympus cameras // Windows debug stuff // left in as example on how to get "console output" // written to a log file // freopen ("oly.log","a",stdout); // fprintf (stdout,"Starting Alternative crop determination\n"); float olyFPD = 0; _getExiv2Value(exifData,"Exif.Olympus.FocalPlaneDiagonal",olyFPD); if (olyFPD > 0.0) { // Windows debug stuff // fprintf(stdout,"Oly_FPD:"); // fprintf(stdout,"%f",olyFPD); cropFactor = sqrt(36.0*36.0+24.0*24.0) / olyFPD; } else { // for newer Olympus cameras the FocalPlaneDiagonal tag was moved into // equipment (sub?)-directory, so check also there _getExiv2Value(exifData,"Exif.OlympusEq.FocalPlaneDiagonal",olyFPD); if (olyFPD > 0.0) { cropFactor = sqrt(36.0*36.0+24.0*24.0) / olyFPD; }; }; }; return cropFactor; };
Matrix3 StraightenPanorama::calcStraighteningRotation(const PanoramaData& panorama) { // landscape/non rotated portrait detection is not working correctly // should use the exif rotation tag but thats not stored anywhere currently... // 1: use y axis (image x axis), for normal image // 0: use z axis (image y axis), for non rotated portrait images // (usually rotation is just stored in EXIF tag) std::vector<int> coord_idx; for (unsigned int i = 0; i < panorama.getNrOfImages(); i++) { SrcPanoImage img = panorama.getSrcImage(i); // BUG: need to read exif data here, since exif orientation is not // stored in Panorama data model double fl=0; double crop=0; img.readEXIF(fl, crop, false, false); double roll = img.getExifOrientation(); if (roll == 90 || roll == 270 ) { coord_idx.push_back(2); } else { coord_idx.push_back(1); } } // build covariance matrix of X Matrix3 cov; unsigned int nrOfVariableImages=0; for (unsigned int i = 0; i < panorama.getNrOfImages(); i++) { const SrcPanoImage & img=panorama.getImage(i); if(img.YawisLinked()) { //only consider images which are not linked with the previous ones bool consider=true; for(unsigned int j=0; j<i; j++) { if(img.YawisLinkedWith(panorama.getImage(j))) { consider=false; break; }; }; if(!consider) continue; }; double y = const_map_get(panorama.getImageVariables(i), "y").getValue(); double p = const_map_get(panorama.getImageVariables(i), "p").getValue(); double r = const_map_get(panorama.getImageVariables(i), "r").getValue(); Matrix3 mat; mat.SetRotationPT(DEG_TO_RAD(y), DEG_TO_RAD(p), DEG_TO_RAD(r)); nrOfVariableImages++; DEBUG_DEBUG("mat = " << mat); for (int j=0; j<3; j++) { for (int k=0; k<3; k++) { cov.m[j][k] += mat.m[j][coord_idx[i]] * mat.m[k][coord_idx[i]]; } } } cov /= nrOfVariableImages; DEBUG_DEBUG("cov = " << cov); // calculate eigenvalues and vectors Matrix3 eigvectors; double eigval[3]; int eigvalIdx[3]; int maxsweep = 100; int maxannil = 0; double eps = 1e-16; hugin_utils::eig_jacobi(3, cov.m, eigvectors.m, eigval, eigvalIdx, &maxsweep, &maxannil, &eps); DEBUG_DEBUG("Eigenvectors & eigenvalues:" << std::endl << "V = " << eigvectors << std::endl << "D = [" << eigval[0] << ", " << eigval[1] << ", " << eigval[2] << " ]" << "idx = [" << eigvalIdx[0] << ", " << eigvalIdx[1] << ", " << eigvalIdx[2] << " ]"); // get up vector, eigenvector with smallest eigenvalue Vector3 up; up.x = eigvectors.m[eigvalIdx[2]][0]; up.y = eigvectors.m[eigvalIdx[2]][1]; up.z = eigvectors.m[eigvalIdx[2]][2]; // normalize vector up.Normalize(); DEBUG_DEBUG("Up vector: up = " << up ); double rotAngle = acos(up.Dot(Vector3(0,0,1))); if (rotAngle > M_PI/2) { // turn in shorter direction up *= -1; rotAngle = acos(up.Dot(Vector3(0,0,1))); } DEBUG_DEBUG("rotation Angle: " << rotAngle); // get rotation axis Vector3 rotAxis = up.Cross(Vector3(0,0,1)); DEBUG_DEBUG("rotAxis = " << rotAngle); // calculate rotation matrix Matrix3 rotMat = GetRotationAroundU(rotAxis, -rotAngle); DEBUG_DEBUG("rotMat = " << rotMat); return rotMat; }
void ImagesPanel::OnSelectionChanged(wxTreeEvent & e) { const HuginBase::UIntSet & sel = m_images_tree->GetSelectedImages(); DEBUG_DEBUG("selected Images: " << sel.size()); if (sel.size() == 0) { // nothing to edit DisableImageCtrls(); } else { // enable edit EnableImageCtrls(); const HuginBase::SrcPanoImage& img = m_pano->getImage(*sel.begin()); bool identical_projection=true; HuginBase::SrcPanoImage::Projection proj = img.getProjection(); double focallength = HuginBase::SrcPanoImage::calcFocalLength(img.getProjection(), img.getHFOV(), img.getCropFactor(),img.getSize());; double cropFactor=img.getCropFactor(); for (HuginBase::UIntSet::const_iterator it = sel.begin(); it != sel.end(); ++it) { const HuginBase::SrcPanoImage& img2 = m_pano->getImage(*it); if(proj!=img2.getProjection()) { identical_projection=false; }; double focallength2 = HuginBase::SrcPanoImage::calcFocalLength(img2.getProjection(), img2.getHFOV(), img2.getCropFactor(),img2.getSize()); if(focallength>0 && fabs(focallength-focallength2)>0.05) { focallength=-1; }; if(fabs(cropFactor-img2.getCropFactor())>0.1) { cropFactor=-1; }; }; if(identical_projection) { SelectListValue(m_lenstype, proj); } else { m_lenstype->Select(wxNOT_FOUND); }; if(focallength>0) { m_focallength->SetValue(hugin_utils::doubleTowxString(focallength,m_degDigits)); } else { m_focallength->Clear(); }; if(cropFactor>0) { m_cropfactor->SetValue(hugin_utils::doubleTowxString(cropFactor,m_degDigits)); } else { m_cropfactor->Clear(); }; if (sel.size() == 1) { ShowImage(*(sel.begin())); } else { m_smallImgCtrl->SetBitmap(m_empty); m_smallImgCtrl->GetParent()->Layout(); m_smallImgCtrl->Refresh(); }; } }