FrameProcessor::FrameProcessor(std::string intrinsicsFile, std::string extrinsicsFile, Size frameSize){ FileStorage intrinsics("device/camera_properties/intrinsics.yml", FileStorage::READ); FileStorage extrinsics("device/camera_properties/extrinsics.yml", FileStorage::READ); intrinsics["M1"] >> M1; intrinsics["M2"] >> M2; intrinsics["D1"] >> D1; intrinsics["D2"] >> D2; extrinsics["R1"] >> R1; extrinsics["R2"] >> R2; extrinsics["P1"] >> P1; extrinsics["P2"] >> P2; extrinsics["Q"] >> Q; intrinsics.release(); extrinsics.release(); initUndistortRectifyMap(M1, D1, R1, P1, frameSize, CV_16SC2, rmap[0][0], rmap[0][1]); initUndistortRectifyMap(M2, D2, R2, P2, frameSize, CV_16SC2, rmap[1][0], rmap[1][1]); // BlockMatcher.state->preFilterType = CV_STEREO_BM_XSOBEL; //CV_STEREO_BM_NORMALIZED_RESPONSE; // BlockMatcher.state->preFilterSize = 9; BlockMatcher.state->preFilterCap = 45; BlockMatcher.state->SADWindowSize = 31; // BlockMatcher.state->minDisparity = 0; BlockMatcher.state->numberOfDisparities = 128; // BlockMatcher.state->textureThreshold = 10; // BlockMatcher.state->uniquenessRatio = 15; //BlockMatcher.state->speckleRange = 60; //BlockMatcher.state->speckleWindowSize = 20; // BlockMatcher.state->trySmallerWindows = 0; // BlockMatcher.state->roi1 = BlockMatcher.state->roi2 = cvRect(160,120,480,360); // BlockMatcher.state->disp12MaxDiff = -1; }
struct calibrateCameraRetval calibrateCamera( struct TensorArray objectPoints, struct TensorArray imagePoints, struct SizeWrapper imageSize, struct TensorWrapper cameraMatrix, struct TensorWrapper distCoeffs, struct TensorArray rvecs, struct TensorArray tvecs, int flags, struct TermCriteriaWrapper criteria) { struct calibrateCameraRetval result; std::vector<MatT> intrinsics(2); std::vector<cv::Mat> rvecs_vec, tvecs_vec; intrinsics[0] = cameraMatrix.toMatT(); intrinsics[1] = distCoeffs.toMatT(); rvecs_vec = rvecs.toMatList(); tvecs_vec = tvecs.toMatList(); result.retval = cv::calibrateCamera(objectPoints.toMatList(), imagePoints.toMatList(), imageSize, intrinsics[0], intrinsics[1], rvecs_vec, tvecs_vec, flags, criteria); new(&result.intrinsics) TensorArray(intrinsics); std::vector<MatT> rvecs_vecT = get_vec_MatT(rvecs_vec), tvecs_vecT = get_vec_MatT(tvecs_vec); new(&result.rvecs) TensorArray(rvecs_vecT); new(&result.tvecs) TensorArray(tvecs_vecT); return result; }
int main( int argc, char* argv[]) { cv::Mat rvec; cv::Mat tvec; double tag_size = 0.0480000004172; std::vector<cv::Point3f> object_pts; std::vector<cv::Point2f> image_pts; double tag_radius = tag_size/2.; object_pts.push_back(cv::Point3f(-tag_radius, -tag_radius, 0)); object_pts.push_back(cv::Point3f( tag_radius, -tag_radius, 0)); object_pts.push_back(cv::Point3f( tag_radius, tag_radius, 0)); object_pts.push_back(cv::Point3f(-tag_radius, tag_radius, 0)); cv::Point2f p1, p2, p3, p4; p1.x = 454.5; p1.y = 331.5; p2.x = 485.5; p2.y = 333.5; p3.x = 487.5; p3.y = 305.5; p4.x = 454.5; p4.y = 303.5; image_pts.push_back(p4); image_pts.push_back(p3); image_pts.push_back(p2); image_pts.push_back(p1); cv::Matx33f intrinsics(529.2945040622658, 0, 466.96044871160075, 0, 531.2834529497384, 273.2593671723483, 0, 0, 1); cv::Vec4f distortion_coeff(0.0676550948466241, -0.058556753440857666, 0.007350271107666055, -0.00817256648923586); // Estimate 3D pose of tag // Methods: // CV_ITERATIVE // Iterative method based on Levenberg-Marquardt optimization. // Finds the pose that minimizes reprojection error, being the sum of squared distances // between the observed projections (image_points) and the projected points (object_pts). // CV_P3P // Based on: Gao et al, "Complete Solution Classification for the Perspective-Three-Point Problem" // Requires exactly four object and image points. // CV_EPNP // Moreno-Noguer, Lepetit & Fua, "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" int method = CV_ITERATIVE; bool use_extrinsic_guess = false; // only used for ITERATIVE method cv::solvePnP(object_pts, image_pts, intrinsics, distortion_coeff, rvec, tvec, use_extrinsic_guess, method); cv::Matx33d r; cv::Rodrigues(rvec, r); std::cout << r << std::endl; std::cout << tvec << std::endl; return 0; }
HRESULT BinaryDumpReader::createFirstConnected() { std::string filename = GlobalAppState::getInstance().s_BinaryDumpReaderSourceFile; std::cout << "Start loading binary dump" << std::endl; //BinaryDataStreamZLibFile inputStream(filename, false); BinaryDataStreamFile inputStream(filename, false); CalibratedSensorData sensorData; inputStream >> sensorData; std::cout << "Loading finished" << std::endl; std::cout << sensorData << std::endl; DepthSensor::init(sensorData.m_DepthImageWidth, sensorData.m_DepthImageHeight, std::max(sensorData.m_ColorImageWidth,1u), std::max(sensorData.m_ColorImageHeight,1u)); mat4f intrinsics(sensorData.m_CalibrationDepth.m_Intrinsic); initializeIntrinsics(sensorData.m_CalibrationDepth.m_Intrinsic(0,0), sensorData.m_CalibrationDepth.m_Intrinsic(1,1), sensorData.m_CalibrationDepth.m_Intrinsic(0,2), sensorData.m_CalibrationDepth.m_Intrinsic(1,2)); m_NumFrames = sensorData.m_DepthNumFrames; assert(sensorData.m_ColorNumFrames == sensorData.m_DepthNumFrames || sensorData.m_ColorNumFrames == 0); releaseData(); m_DepthD16Array = new USHORT*[m_NumFrames]; for (unsigned int i = 0; i < m_NumFrames; i++) { m_DepthD16Array[i] = new USHORT[getDepthWidth()*getDepthHeight()]; for (unsigned int k = 0; k < getDepthWidth()*getDepthHeight(); k++) { m_DepthD16Array[i][k] = (USHORT)(sensorData.m_DepthImages[i][k]*1000.0f + 0.5f); } } std::cout << "loading depth done" << std::endl; if (sensorData.m_ColorImages.size() > 0) { m_bHasColorData = true; m_ColorRGBXArray = new BYTE*[m_NumFrames]; for (unsigned int i = 0; i < m_NumFrames; i++) { m_ColorRGBXArray[i] = new BYTE[getColorWidth()*getColorHeight()*getColorBytesPerPixel()]; for (unsigned int k = 0; k < getColorWidth()*getColorHeight(); k++) { const BYTE* c = (BYTE*)&(sensorData.m_ColorImages[i][k]); m_ColorRGBXArray[i][k*getColorBytesPerPixel()+0] = c[0]; m_ColorRGBXArray[i][k*getColorBytesPerPixel()+1] = c[1]; m_ColorRGBXArray[i][k*getColorBytesPerPixel()+2] = c[2]; m_ColorRGBXArray[i][k*getColorBytesPerPixel()+3] = 255; //I don't know really why this has to be swapped... } //std::string outFile = "colorout//color" + std::to_string(i) + ".png"; //ColorImageR8G8B8A8 image(getColorHeight(), getColorWidth(), (vec4uc*)m_ColorRGBXArray[i]); //FreeImageWrapper::saveImage(outFile, image); } } else { m_bHasColorData = false; } sensorData.deleteData(); std::cout << "loading color done" << std::endl; return S_OK; }
int main(int argc, char **argv) { if (argc != 2) { printf("usage: ./borg LASERFILE\n"); return 1; } const char *laserfile = argv[1]; FILE *f = fopen(laserfile,"r"); if (!f) { printf("couldn't open [%s]\n", laserfile); return 1; } FILE *out = fopen("points.txt","w"); int line = 0; const double encoder_offset = 240; while (!feof(f)) { line++; double laser_ang, row, col; if (3 != fscanf(f, "%lf %lf %lf\n", &laser_ang, &row, &col)) { printf("error parsing line %d\n", line); break; } if ((line % 1) != 0) continue; extrinsics(tilt, (encoder_offset - laser_ang)*M_PI/180, 0, 135*M_PI/180, 0.48, 0.02, 0.22); intrinsics(col, row); intersect(); camera_to_world(); fprintf(out, "%f %f %f\n", world_point[0], world_point[1], world_point[2]); } fclose(f); fclose(out); return 0; }
struct calibrateCameraRetval fisheye_calibrate( struct TensorArray objectPoints, struct TensorArray imagePoints, struct SizeWrapper imageSize, struct TensorWrapper K, struct TensorWrapper D, struct TensorArray rvecs, struct TensorArray tvecs, int flags, struct TermCriteriaWrapper criteria) { struct calibrateCameraRetval result; std::vector<MatT> intrinsics(2), rvecs_vec, tvecs_vec; intrinsics[0] = K.toMatT(); intrinsics[1] = D.toMatT(); rvecs_vec = rvecs.toMatTList(); tvecs_vec = tvecs.toMatTList(); result.retval = fisheye::calibrate( objectPoints.toMatList(), imagePoints.toMatList(), imageSize, intrinsics[0], intrinsics[1], rvecs_vec, tvecs_vec, flags, criteria); new(&result.intrinsics) TensorArray(intrinsics); new(&result.rvecs) TensorArray(rvecs_vec); new(&result.tvecs) TensorArray(tvecs_vec); return result; }
void CUDASolverBundling::solve(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, const int* d_validImages, unsigned int numberOfImages, unsigned int nNonLinearIterations, unsigned int nLinearIterations, const CUDACache* cudaCache, const std::vector<float>& weightsSparse, const std::vector<float>& weightsDenseDepth, const std::vector<float>& weightsDenseColor, bool usePairwiseDense, float3* d_rotationAnglesUnknowns, float3* d_translationUnknowns, bool rebuildJT, bool findMaxResidual, unsigned int revalidateIdx) { nNonLinearIterations = std::min(nNonLinearIterations, (unsigned int)weightsSparse.size()); MLIB_ASSERT(numberOfImages > 1 && nNonLinearIterations > 0); if (numberOfCorrespondences > m_maxCorrPerImage*m_maxNumberOfImages) { //warning: correspondences will be invalidated AT RANDOM! std::cerr << "WARNING: #corr (" << numberOfCorrespondences << ") exceeded limit (" << m_maxCorrPerImage << "*" << m_maxNumberOfImages << "), please increase max #corr per image in the GAS" << std::endl; } float* convergence = NULL; if (m_bRecordConvergence) { m_convergence.resize(nNonLinearIterations + 1, -1.0f); convergence = m_convergence.data(); } m_solverState.d_xRot = d_rotationAnglesUnknowns; m_solverState.d_xTrans = d_translationUnknowns; SolverParameters parameters = m_defaultParams; parameters.nNonLinearIterations = nNonLinearIterations; parameters.nLinIterations = nLinearIterations; parameters.verifyOptDistThresh = m_verifyOptDistThresh; parameters.verifyOptPercentThresh = m_verifyOptPercentThresh; parameters.highResidualThresh = std::numeric_limits<float>::infinity(); parameters.weightSparse = weightsSparse.front(); parameters.weightDenseDepth = weightsDenseDepth.front(); parameters.weightDenseColor = weightsDenseColor.front(); parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0); parameters.useDenseDepthAllPairwise = usePairwiseDense; SolverInput solverInput; solverInput.d_correspondences = d_correspondences; solverInput.d_variablesToCorrespondences = d_variablesToCorrespondences; solverInput.d_numEntriesPerRow = d_numEntriesPerRow; solverInput.numberOfImages = numberOfImages; solverInput.numberOfCorrespondences = numberOfCorrespondences; solverInput.maxNumberOfImages = m_maxNumberOfImages; solverInput.maxCorrPerImage = m_maxCorrPerImage; solverInput.maxNumDenseImPairs = m_maxNumDenseImPairs; solverInput.weightsSparse = weightsSparse.data(); solverInput.weightsDenseDepth = weightsDenseDepth.data(); solverInput.weightsDenseColor = weightsDenseColor.data(); solverInput.d_validImages = d_validImages; if (cudaCache) { solverInput.d_cacheFrames = cudaCache->getCacheFramesGPU(); solverInput.denseDepthWidth = cudaCache->getWidth(); //TODO constant buffer for this? solverInput.denseDepthHeight = cudaCache->getHeight(); mat4f intrinsics = cudaCache->getIntrinsics(); solverInput.intrinsics = make_float4(intrinsics(0, 0), intrinsics(1, 1), intrinsics(0, 2), intrinsics(1, 2)); MLIB_ASSERT(solverInput.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor > 8); //need enough samples } else { solverInput.d_cacheFrames = NULL; solverInput.denseDepthWidth = 0; solverInput.denseDepthHeight = 0; solverInput.intrinsics = make_float4(-std::numeric_limits<float>::infinity()); } #ifdef NEW_GUIDED_REMOVE convertLiePosesToMatricesCU(m_solverState.d_xRot, m_solverState.d_xTrans, solverInput.numberOfImages, d_transforms, m_solverState.d_xTransformInverses); //debugging only (store transforms before opt) #endif #ifdef DEBUG_PRINT_SPARSE_RESIDUALS if (findMaxResidual) { float residualBefore = EvalResidual(solverInput, m_solverState, parameters, NULL); computeMaxResidual(solverInput, parameters, (unsigned int)-1); vec2ui beforeMaxImageIndices; float beforeMaxRes; unsigned int curFrame = (revalidateIdx == (unsigned int)-1) ? solverInput.numberOfImages - 1 : revalidateIdx; getMaxResidual(curFrame, d_correspondences, beforeMaxImageIndices, beforeMaxRes); std::cout << "\tbefore: (" << solverInput.numberOfImages << ") sumres = " << residualBefore << " / " << solverInput.numberOfCorrespondences << " = " << residualBefore / (float)solverInput.numberOfCorrespondences << " | maxres = " << beforeMaxRes << " images (" << beforeMaxImageIndices << ")" << std::endl; } #endif if (rebuildJT) { buildVariablesToCorrespondencesTable(d_correspondences, numberOfCorrespondences); } //if (cudaCache) { // cudaCache->printCacheImages("debug/cache/"); // int a = 5; //} solveBundlingStub(solverInput, m_solverState, parameters, m_solverExtra, convergence, m_timer); if (findMaxResidual) { computeMaxResidual(solverInput, parameters, revalidateIdx); #ifdef DEBUG_PRINT_SPARSE_RESIDUALS float residualAfter = EvalResidual(solverInput, m_solverState, parameters, NULL); vec2ui afterMaxImageIndices; float afterMaxRes; unsigned int curFrame = (revalidateIdx == (unsigned int)-1) ? solverInput.numberOfImages - 1 : revalidateIdx; getMaxResidual(curFrame, d_correspondences, afterMaxImageIndices, afterMaxRes); std::cout << "\tafter: (" << solverInput.numberOfImages << ") sumres = " << residualAfter << " / " << solverInput.numberOfCorrespondences << " = " << residualAfter / (float)solverInput.numberOfCorrespondences << " | maxres = " << afterMaxRes << " images (" << afterMaxImageIndices << ")" << std::endl; #endif } }