void Na2DViewer::paintCrosshair(QPainter& painter) { float scale = defaultScale * cameraModel.scale(); QBrush brush1(Qt::black); QBrush brush2(QColor(255, 255, 180)); QPen pen1(brush1, 2.0/scale); QPen pen2(brush2, 1.0/scale); // qDebug() << "paint crosshair"; // Q: Why all this complicated math instead of just [width()/2, height()/2]? // A: This helps debug/document placement of image focus qreal w2 = (pixmap.width() - 1.0) / 2.0; // origin at pixel center, not corner qreal h2 = (pixmap.height() - 1.0) / 2.0; // origin at pixel center, not corner qreal cx = w2 + flip_X * (cameraModel.focus().x() - w2) + 0.5; qreal cy = h2 + flip_Y * (cameraModel.focus().y() - h2) + 0.5; QPointF f(cx, cy); QPointF dx1(4.0 / scale, 0); QPointF dy1(0, 4.0 / scale); QPointF dx2(10.0 / scale, 0); // crosshair size is ten pixels QPointF dy2(0, 10.0 / scale); painter.setPen(pen1); painter.drawLine(f + dx1, f + dx2); painter.drawLine(f - dx1, f - dx2); painter.drawLine(f + dy1, f + dy2); painter.drawLine(f - dy1, f - dy2); painter.setPen(pen2); painter.drawLine(f + dx1, f + dx2); painter.drawLine(f - dx1, f - dx2); painter.drawLine(f + dy1, f + dy2); painter.drawLine(f - dy1, f - dy2); }
void InterpolateBicubic::set_table( const std::vector<Value>& ff ){ plumed_assert( getNumberOfSplinePoints()==ff.size() ); plumed_assert( ff[0].getNumberOfDerivatives()==2 ); dcross=0.0; unsigned iplus, iminus; for(unsigned i=1;i<np[0]-1;++i){ iplus=(i+1)*stride[0]; iminus=(i-1)*stride[0]; for(unsigned j=1;j<np[1]-1;++j){ dcross(i,j) = ( ff[iplus+j+1].get() + ff[iminus+j-1].get() - ff[iplus+j-1].get() - ff[iminus+j+1].get() ) / getCrossTermDenominator( i, j ); } } double d1, d2; Matrix<double> tc(4,4); std::vector<double> y(4), dy1(4), dy2(4), d2y12(4); unsigned pij=0; unsigned ipos; for (unsigned i=0;i<np[0]-1;++i){ ipos=i*stride[0]; d1 = getPointSpacing( 0, i ); for (unsigned j=0; j<np[1]-1;++j){ d2 = getPointSpacing( 1, j ); y[0] = ff[ipos+j].get(); y[1] = ff[ipos+stride[0]+j].get(); y[2] = ff[ipos+stride[0]+j+1].get(); y[3] = ff[ipos+j+1].get(); dy1[0] = ff[ipos+j].getDerivative(0); dy1[1] = ff[ipos+stride[0]+j].getDerivative(0); dy1[2] = ff[ipos+stride[0]+j+1].getDerivative(0); dy1[3] = ff[ipos+j+1].getDerivative(0); dy2[0] = ff[ipos+j].getDerivative(1); dy2[1] = ff[ipos+stride[0]+j].getDerivative(1); dy2[2] = ff[ipos+stride[0]+j+1].getDerivative(1); dy2[3] = ff[ipos+j+1].getDerivative(1); d2y12[0] = dcross( i, j ); d2y12[1] = dcross( i+1, j ); d2y12[2] = dcross( i+1, j+1 ); d2y12[3] = dcross( i, j+1 ); IBicCoeff( y, dy1, dy2, d2y12, d1, d2, tc); pij=( ipos+j )*16; for(unsigned k=0; k<4; ++k){ for(unsigned n=0; n<4; ++n){ clist[pij++]=tc(k,n); } } } } }
gmMatrix3 Algebraic::hess(const gmVector3 & v) { double dfdxy = dxdy(v), dfdxz = dxdz(v), dfdyz = dydz(v); double dfdxx = dx2(v), dfdyy = dy2(v), dfdzz = dz2(v); gmMatrix3 hess(dfdxx, dfdxy, dfdxz, dfdxy, dfdyy, dfdyz, dfdxz, dfdyz, dfdzz); return hess; }
bool TextureBoostedSaturatedGradientDataTest(bool create, int width, int height, const Func1 & f) { bool result = true; Data data(f.description); TEST_LOG_SS(Info, (create ? "Create" : "Verify") << " test " << f.description << " [" << width << ", " << height << "]."); View src(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dx1(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dy1(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dx2(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dy2(width, height, View::Gray8, NULL, TEST_ALIGN(width)); const int saturation = 16, boost = 4; if(create) { FillRandom(src); TEST_SAVE(src); f.Call(src, saturation, boost, dx1, dy1); TEST_SAVE(dx1); TEST_SAVE(dy1); } else { TEST_LOAD(src); TEST_LOAD(dx1); TEST_LOAD(dy1); f.Call(src, saturation, boost, dx2, dy2); TEST_SAVE(dx2); TEST_SAVE(dy2); result = result && Compare(dx1, dx2, 0, true, 32, 0, "dx"); result = result && Compare(dy1, dy2, 0, true, 32, 0, "dy"); } return result; }
bool TextureBoostedSaturatedGradientAutoTest(int width, int height, int saturation, int boost, const Func1 & f1, const Func1 & f2) { bool result = true; TEST_LOG_SS(Info, "Test " << f1.description << " & " << f2.description << " [" << width << ", " << height << "] <" << saturation << ", " << boost << ">."); View src(width, height, View::Gray8, NULL, TEST_ALIGN(width)); FillRandom(src); View dx1(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dy1(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dx2(width, height, View::Gray8, NULL, TEST_ALIGN(width)); View dy2(width, height, View::Gray8, NULL, TEST_ALIGN(width)); TEST_EXECUTE_AT_LEAST_MIN_TIME(f1.Call(src, saturation, boost, dx1, dy1)); TEST_EXECUTE_AT_LEAST_MIN_TIME(f2.Call(src, saturation, boost, dx2, dy2)); result = result && Compare(dx1, dx2, 0, true, 32, 0, "dx"); result = result && Compare(dy1, dy2, 0, true, 32, 0, "dy"); return result; }
////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// Tensor::Tensor(const IplImage *cv_image, BOOL isComputeGradient) { //保存原图像的副本 m_img=cvCreateImage(cvSize(cv_image->width,cv_image->height),cv_image->depth,3); cvCopyImage(cv_image,m_img); //获取非线性多尺度结构张量的参数值 m_levels = 2; ASSERT(m_levels > 0 ); m_dim = m_levels * SiNGLE_TENSOR_DIM; //SiNGLE_TENSOR_DIM单一张量 //SiNGLE_TENSOR_DIM=n(n+1)/2;反解n=m_axes_cnt,m_axes_cnt为坐标抽的维数 m_axes_cnt = (unsigned int)(sqrt(2 * SiNGLE_TENSOR_DIM + 0.25) - 0.5); // 2 m_grad_dim = m_levels * m_axes_cnt; //m_grad_dim //////////////////////////////////////////////////////////////////////////// //将多通道转化为单通道,默认为三个通道 unsigned int x,y,i,n; m_w = cv_image->width; m_h = cv_image->height; IplImage *cv_channels[3]; for (n = 0;n < 3;n++) { cv_channels[n] = cvCreateImage( cvGetSize(cv_image), cv_image->depth, 1 ); } cvSplit(cv_image, cv_channels[0], cv_channels[1], cv_channels[2], NULL); //////////////////////////////////////////////////////////////////////////// //初始化m_tensor,CMatrix(m_h,m_w)创建一个矩阵,其元素全为0 m_tensor = new CMatrix *[m_dim]; for (i=0;i<m_dim;i++) { m_tensor[i] = new CMatrix(m_h,m_w); } //////////////////////////////////////////////////////////////////////////// //将每一尺度的张量转化为彩色图像存储起来,申请空间 m_pImageTensorRGB=new Image<Color_RGB> *[m_levels]; for (i=0;i<m_levels;i++) { m_pImageTensorRGB[i] = new Image<Color_RGB> (m_w,m_h); } //初始化m_gradient if (isComputeGradient) { m_gradient = new CMatrix *[m_grad_dim]; for (i=0;i<m_grad_dim;i++) { m_gradient[i] = new CMatrix(m_h,m_w); } } else { m_gradient = NULL; } //辅助矩阵 CMatrix image(m_h, m_w); CMatrix dx(m_h,m_w); CMatrix dy(m_h,m_w); CMatrix dx2(m_h,m_w); CMatrix dy2(m_h,m_w); CMatrix dxdy(m_h,m_w); //利用固定数据创建一个矩阵 CvMat cv_dx2 = cvMat(m_h, m_w, CV_64FC1, dx2.GetData()); CvMat cv_dy2 = cvMat(m_h, m_w, CV_64FC1, dy2.GetData()); CvMat cv_dxdy =cvMat(m_h, m_w, CV_64FC1, dxdy.GetData()); //完成IplImage向CMatrix类型的转换,对每一个颜色通道分别进行处理 for (n = 0;n <3;n++) //n表示通道数,默认为3 { //将每一个通道的元素拷贝到image中 for (y = 0; y < m_h; y++) { for (x = 0; x < m_w; x++) { uchar* dst = &CV_IMAGE_ELEM( cv_channels[n], uchar, y, x ); image.SetElement(y, x, (double)(dst[0])); } } //计算每一个颜色通道的梯度(x方向,y方向)并分别赋给dx,dy image.centdiffX(dx); image.centdiffY(dy); //将dx,dy分别赋给cv_dx,cv_dy CvMat cv_dx = cvMat(m_h, m_w, CV_64FC1, dx.GetData()); CvMat cv_dy = cvMat(m_h, m_w, CV_64FC1, dy.GetData()); //初始化cv_tensor0,cv_tensor1,cv_tensor2,此时m_tensor[0],m_tensor[1],m_tensor[2]均初始化0 CvMat cv_tensor0 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[0])->GetData()); CvMat cv_tensor1 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[1])->GetData()); CvMat cv_tensor2 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[2])->GetData()); //计算图像的梯度,保存在cv_gradX,cv_gradY中,并赋值给m_gradient[0],m_gradient[1] if (isComputeGradient) { //cv_gradX,cv_gradY初始化并计算 CvMat cv_gradX = cvMat(m_h, m_w, CV_64FC1, (m_gradient[0])->GetData()); CvMat cv_gradY = cvMat(m_h, m_w, CV_64FC1, (m_gradient[1])->GetData()); cvAdd(&cv_gradX, &cv_dx, &cv_gradX);//对于三个通道进行累加 cvAdd(&cv_gradY, &cv_dy, &cv_gradY); } //计算结构张量,cv_tensor0=dx*dx,cv_tensor1=dy*dy,cv_tensor2=dx*dy cvMul(&cv_dx, &cv_dx, &cv_dx2); cvAdd(&cv_tensor0, &cv_dx2, &cv_tensor0); cvMul(&cv_dy, &cv_dy, &cv_dy2); cvAdd(&cv_tensor1, &cv_dy2, &cv_tensor1); cvMul(&cv_dx, &cv_dy, &cv_dxdy); cvAdd(&cv_tensor2, &cv_dxdy, &cv_tensor2); //单尺度计算完毕,以下为多尺度非线性结构张量的计算方法 if (m_levels > 1) { unsigned int wavelet_levels = m_levels - 1; //-1的原因是因为之前没有if (m_levels==1)的判断语句 double dMaxValue,dMinValue; cvMinMaxLoc(cv_channels[n], &dMinValue, &dMaxValue);//Finds global minimum, maximum //将图像的像素值归一化到[0,1] Wavelet *wave = new Wavelet(&image, dMinValue, dMaxValue, wavelet_levels); //调用Wavelet的构造函数 //新建WaveletDetailImages结构体的数组 WaveletDetailImages *D_images = new WaveletDetailImages[wavelet_levels]; for (i = 0; i < wavelet_levels; i++) { D_images[i].Detail_1 = new CMatrix(m_h, m_w); D_images[i].Detail_2 = new CMatrix(m_h, m_w); } wave->execute(D_images);//得到D(s,x),D(s,y) for (i = 0; i < wavelet_levels; i++) { //默认多尺度结构张量的比例因子a=2 double scale = pow((float)0.25, (int)(i + 1)); //见公式(2-15) CvMat cv_dx = cvMat(m_h, m_w, CV_64FC1, D_images[i].Detail_1->GetData()); CvMat cv_dy = cvMat(m_h, m_w, CV_64FC1, D_images[i].Detail_2->GetData()); CvMat cv_tensor0 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM])->GetData()); CvMat cv_tensor1 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM + 1])->GetData()); CvMat cv_tensor2 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM + 2])->GetData()); //计算梯度 if (isComputeGradient) { CvMat cv_gradX = cvMat(m_h, m_w, CV_64FC1, (m_gradient[(i+1) * m_axes_cnt])->GetData()); CvMat cv_gradY = cvMat(m_h, m_w, CV_64FC1, (m_gradient[(i+1) * m_axes_cnt + 1])->GetData()); cvAdd(&cv_gradX, &cv_dx, &cv_gradX); cvAdd(&cv_gradY, &cv_dy, &cv_gradY); } //计算张量 cvMul(&cv_dx, &cv_dx, &cv_dx2, scale); cvAdd(&cv_tensor0, &cv_dx2, &cv_tensor0); cvMul(&cv_dy, &cv_dy, &cv_dy2, scale); cvAdd(&cv_tensor1, &cv_dy2, &cv_tensor1); cvMul(&cv_dx, &cv_dy, &cv_dxdy, scale); cvAdd(&cv_tensor2, &cv_dxdy, &cv_tensor2); } for (i = 0; i < wavelet_levels; i++) { delete D_images[i].Detail_1; delete D_images[i].Detail_2; } delete [] D_images; delete wave; } cvReleaseImage(&cv_channels[n]); } //将每一尺度的结构张量转换为彩色图像存储起来 for (i=0;i<m_levels;i++) { for (y=0;y<m_h;y++) { for (x=0;x<m_w;x++) { (*m_pImageTensorRGB[i])(x,y).r=(m_tensor[i*SiNGLE_TENSOR_DIM])->GetElement(y,x); (*m_pImageTensorRGB[i])(x,y).g=(m_tensor[i*SiNGLE_TENSOR_DIM+1])->GetElement(y,x); (*m_pImageTensorRGB[i])(x,y).b=(m_tensor[i*SiNGLE_TENSOR_DIM+2])->GetElement(y,x); } } } m_tensors = NULL; }