void Picture::setAnimation(const AnimationEnum &animationEnum, const cv::InputArray &startImg, const cv::InputArray &endImg) { delete animation; Mat startImage = startImg.getMat(); Mat endImage = endImg.getMat(); if (!startImg.empty() && startImg.size() != this->size()) cv::resize(startImg, startImage, this->size()); if (!endImg.empty() && endImg.size() != this->size()) cv::resize(endImg, endImage, this->size()); animation = AnimationFactory::createAnimation(animationEnum, startImage, endImage); }
void IPPE::PoseSolver::solveGeneric(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::OutputArray _rvec1, cv::OutputArray _tvec1, float& err1, cv::OutputArray _rvec2, cv::OutputArray _tvec2, float& err2) { cv::Mat normalizedImagePoints; //undistored version of imagePoints if (_cameraMatrix.empty()) { //there is no camera matrix and image points are given in normalized pixel coordinates. _imagePoints.copyTo(normalizedImagePoints); } else { //undistort the image points (i.e. put them in normalized pixel coordinates): cv::undistortPoints(_imagePoints, normalizedImagePoints, _cameraMatrix, _distCoeffs); } //solve: cv::Mat Ma, Mb; solveGeneric(_objectPoints, normalizedImagePoints, Ma, Mb); //the two poses computed by IPPE (sorted): cv::Mat M1, M2; //sort poses by reprojection error: sortPosesByReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, Ma, Mb, M1, M2, err1, err2); //fill outputs rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1); rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2); M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1); M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2); }
void cv::SCascade::detect(cv::InputArray _image, cv::InputArray _rois, std::vector<Detection>& objects) const { // only color images are supperted cv::Mat image = _image.getMat(); CV_Assert(image.type() == CV_8UC3); Fields& fld = *fields; fld.calcLevels(image.size(),(float) minScale, (float)maxScale, scales); objects.clear(); if (_rois.empty()) return detectNoRoi(image, objects); int shr = fld.shrinkage; cv::Mat roi = _rois.getMat(); cv::Mat mask(image.rows / shr, image.cols / shr, CV_8UC1); mask.setTo(cv::Scalar::all(0)); cv::Rect* r = roi.ptr<cv::Rect>(0); for (int i = 0; i < (int)roi.cols; ++i) cv::Mat(mask, cv::Rect(r[i].x / shr, r[i].y / shr, r[i].width / shr , r[i].height / shr)).setTo(cv::Scalar::all(1)); // create integrals ChannelStorage storage(image, shr); typedef std::vector<Level>::const_iterator lIt; for (lIt it = fld.levels.begin(); it != fld.levels.end(); ++it) { const Level& level = *it; // we train only 3 scales. if (level.origScale > 2.5) break; for (int dy = 0; dy < level.workRect.height; ++dy) { uchar* m = mask.ptr<uchar>(dy); for (int dx = 0; dx < level.workRect.width; ++dx) { if (m[dx]) { storage.offset = dy * storage.step + dx; fld.detectAt(dx, dy, level, storage, objects); } } } } if (rejCriteria != NO_REJECT) suppress(rejCriteria, objects); }
cv::viz::WCloud::WCloud(cv::InputArray cloud, cv::InputArray colors, cv::InputArray normals) { CV_Assert(!cloud.empty() && !colors.empty()); vtkSmartPointer<vtkCloudMatSource> cloud_source = vtkSmartPointer<vtkCloudMatSource>::New(); cloud_source->SetColorCloudNormals(cloud, colors, normals); cloud_source->Update(); vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New(); VtkUtils::SetInputData(mapper, cloud_source->GetOutput()); mapper->SetScalarModeToUsePointData(); #if VTK_MAJOR_VERSION < 8 mapper->ImmediateModeRenderingOff(); #endif mapper->SetScalarRange(0, 255); mapper->ScalarVisibilityOn(); vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New(); actor->GetProperty()->SetInterpolationToFlat(); actor->GetProperty()->BackfaceCullingOn(); actor->SetMapper(mapper); WidgetAccessor::setProp(*this, actor); }
void RadiometricResponse::directMap(cv::InputArray _E, cv::OutputArray _I) const { if (_E.empty()) { _I.clear(); return; } auto E = _E.getMat(); _I.create(_E.size(), CV_8UC3); auto I = _I.getMat(); #if CV_MAJOR_VERSION > 2 E.forEach<cv::Vec3f>( [&I, this](cv::Vec3f& v, const int* p) { I.at<cv::Vec3b>(p[0], p[1]) = inverseLUT(response_channels_, v); }); #else for (int i = 0; i < E.rows; i++) for (int j = 0; j < E.cols; j++) I.at<cv::Vec3b>(i, j) = inverseLUT(response_channels_, E.at<cv::Vec3f>(i, j)); #endif }
void IPPE::PoseSolver::evalReprojError(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::InputArray _M, float& err) { cv::Mat projectedPoints; cv::Mat imagePoints = _imagePoints.getMat(); cv::Mat r; rot2vec(_M.getMat().colRange(0, 3).rowRange(0, 3), r); if (_cameraMatrix.empty()) { //there is no camera matrix and image points are in normalized pixel coordinates cv::Mat K(3, 3, CV_64FC1); K.setTo(0); K.at<double>(0, 0) = 1; K.at<double>(1, 1) = 1; K.at<double>(2, 2) = 1; cv::Mat kc; cv::projectPoints(_objectPoints, r, _M.getMat().colRange(3, 4).rowRange(0, 3), K, kc, projectedPoints); } else { cv::projectPoints(_objectPoints, r, _M.getMat().colRange(3, 4).rowRange(0, 3), _cameraMatrix, _distCoeffs, projectedPoints); } err = 0; size_t n = _objectPoints.rows() * _objectPoints.cols(); float dx, dy; for (size_t i = 0; i < n; i++) { if (projectedPoints.depth() == CV_32FC1) { dx = projectedPoints.at<Vec2f>(i)[0] - imagePoints.at<Vec2f>(i)[0]; dy = projectedPoints.at<Vec2f>(i)[0] - imagePoints.at<Vec2f>(i)[0]; } else { dx = projectedPoints.at<Vec2d>(i)[0] - imagePoints.at<Vec2d>(i)[0]; dy = projectedPoints.at<Vec2d>(i)[0] - imagePoints.at<Vec2d>(i)[0]; } err += dx * dx + dy * dy; } err = sqrt(err / (2.0f * n)); }
void unprojectPointsFisheye( cv::InputArray distorted, cv::OutputArray undistorted, cv::InputArray K, cv::InputArray D, cv::InputArray R, cv::InputArray P) { // will support only 2-channel data now for points CV_Assert(distorted.type() == CV_32FC2 || distorted.type() == CV_64FC2); undistorted.create(distorted.size(), CV_MAKETYPE(distorted.depth(), 3)); CV_Assert(P.empty() || P.size() == cv::Size(3, 3) || P.size() == cv::Size(4, 3)); CV_Assert(R.empty() || R.size() == cv::Size(3, 3) || R.total() * R.channels() == 3); CV_Assert(D.total() == 4 && K.size() == cv::Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F)); cv::Vec2d f, c; if (K.depth() == CV_32F) { cv::Matx33f camMat = K.getMat(); f = cv::Vec2f(camMat(0, 0), camMat(1, 1)); c = cv::Vec2f(camMat(0, 2), camMat(1, 2)); } else { cv::Matx33d camMat = K.getMat(); f = cv::Vec2d(camMat(0, 0), camMat(1, 1)); c = cv::Vec2d(camMat(0, 2), camMat(1, 2)); } cv::Vec4d k = D.depth() == CV_32F ? (cv::Vec4d)*D.getMat().ptr<cv::Vec4f>(): *D.getMat().ptr<cv::Vec4d>(); cv::Matx33d RR = cv::Matx33d::eye(); if (!R.empty() && R.total() * R.channels() == 3) { cv::Vec3d rvec; R.getMat().convertTo(rvec, CV_64F); RR = cv::Affine3d(rvec).rotation(); } else if (!R.empty() && R.size() == cv::Size(3, 3)) R.getMat().convertTo(RR, CV_64F); if(!P.empty()) { cv::Matx33d PP; P.getMat().colRange(0, 3).convertTo(PP, CV_64F); RR = PP * RR; } // start undistorting const cv::Vec2f* srcf = distorted.getMat().ptr<cv::Vec2f>(); const cv::Vec2d* srcd = distorted.getMat().ptr<cv::Vec2d>(); cv::Vec3f* dstf = undistorted.getMat().ptr<cv::Vec3f>(); cv::Vec3d* dstd = undistorted.getMat().ptr<cv::Vec3d>(); size_t n = distorted.total(); int sdepth = distorted.depth(); for(size_t i = 0; i < n; i++ ) { cv::Vec2d pi = sdepth == CV_32F ? (cv::Vec2d)srcf[i] : srcd[i]; // image point cv::Vec2d pw((pi[0] - c[0])/f[0], (pi[1] - c[1])/f[1]); // world point double theta_d = sqrt(pw[0]*pw[0] + pw[1]*pw[1]); double theta = theta_d; if (theta_d > 1e-8) { // compensate distortion iteratively for(int j = 0; j < 10; j++ ) { double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta6*theta2; theta = theta_d / (1 + k[0] * theta2 + k[1] * theta4 + k[2] * theta6 + k[3] * theta8); } } double z = std::cos(theta); double r = std::sin(theta); cv::Vec3d pu = cv::Vec3d(r*pw[0], r*pw[1], z); //undistorted point // reproject cv::Vec3d pr = RR * pu; // rotated point optionally multiplied by new camera matrix cv::Vec3d fi; // final normalize(pr, fi); if( sdepth == CV_32F ) dstf[i] = fi; else dstd[i] = fi; } }
bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr) { mfxStatus res; mfxFrameSurface1 *workSurface = 0; mfxSyncPoint sync; if (!bgr.empty() && (bgr.dims() != 2 || bgr.type() != CV_8UC3 || bgr.size() != frameSize)) { MSG(cerr << "MFX: invalid frame passed to encoder: " << "dims/depth/cn=" << bgr.dims() << "/" << bgr.depth() << "/" << bgr.channels() << ", size=" << bgr.size() << endl); return false; } if (!bgr.empty()) { workSurface = pool->getFreeSurface(); if (!workSurface) { // not enough surfaces MSG(cerr << "MFX: Failed to get free surface" << endl); return false; } const int rows = workSurface->Info.Height; const int cols = workSurface->Info.Width; Mat Y(rows, cols, CV_8UC1, workSurface->Data.Y, workSurface->Data.Pitch); Mat UV(rows / 2, cols, CV_8UC1, workSurface->Data.UV, workSurface->Data.Pitch); to_nv12(bgr, Y, UV); CV_Assert(Y.ptr() == workSurface->Data.Y); CV_Assert(UV.ptr() == workSurface->Data.UV); } while (true) { outSurface = 0; DBG(cout << "Calling with surface: " << workSurface << endl); res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync); if (res == MFX_ERR_NONE) { res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout if (res == MFX_ERR_NONE) { // ready to write if (!bs->write()) { MSG(cerr << "MFX: Failed to write bitstream" << endl); return false; } else { DBG(cout << "Write bitstream" << endl); return true; } } else { MSG(cerr << "MFX: Sync error: " << res << endl); return false; } } else if (res == MFX_ERR_MORE_DATA) { DBG(cout << "ERR_MORE_DATA" << endl); return false; } else if (res == MFX_WRN_DEVICE_BUSY) { DBG(cout << "Waiting for device" << endl); sleep(1); continue; } else { MSG(cerr << "MFX: Bad status: " << res << endl); return false; } } }
void savePointCloud(const std::string file, cv::InputArray point_cloud, const bool is_binary, cv::InputArray color_image, const bool remove_miss_point) { const cv::Mat points = point_cloud.getMat(); const bool color_cloud = (!color_image.empty() && (point_cloud.size() == color_image.size())); if (points.empty()) return; if (points.type() != CV_32FC1) points.reshape(1, 3); if (color_cloud) { const cv::Mat im = color_image.getMat(); pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>); for (int row = 0; row < points.rows; ++row) { const auto ptr = points.ptr<cv::Vec3f>(row); const auto ptr_image = im.ptr<cv::Vec3b>(row); for (int col = 0; col < points.cols; ++col) { const auto val = ptr[col]; const auto color = ptr_image[col]; if (!remove_miss_point || (remove_miss_point && val[2] < 9999.9f)) { pcl::PointXYZRGB p; p.x = val[0], p.y = val[1], p.z = val[2], p.r = color[0], p.g = color[1], p.b = color[2]; cloud->push_back(p); } } } if (cloud->size() == 0) return; else if (is_binary) pcl::io::savePLYFileBinary(file, *cloud); else pcl::io::savePLYFileASCII(file, *cloud); } else { pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>); for (int row = 0; row < points.rows; ++row) { const auto ptr = points.ptr<cv::Vec3f>(row); for (int col = 0; col < points.cols; ++col) { const auto val = ptr[col]; if (!remove_miss_point || (remove_miss_point && val[2] < 9999.9f)) cloud->push_back(pcl::PointXYZ(val[0], val[1], val[2])); } } if (cloud->size() == 0) return; else if (is_binary) pcl::io::savePLYFileBinary(file, *cloud); else pcl::io::savePLYFileASCII(file, *cloud); } return; }