Point2D getOptFlow(IplImage* currentFrame,Point2D p,IplImage* preFrame) { Point2D temp; double b[2]; b[0]=0;b[1]=0; double M11=0,M12=0,M22=0; for(int i = -OPTICAL_FLOW_POINT_AREA/2; i < OPTICAL_FLOW_POINT_AREA/2; i++) { for (int j = -OPTICAL_FLOW_POINT_AREA/2;j < OPTICAL_FLOW_POINT_AREA/2;j++) { temp = partial(currentFrame,Point2D(p.row+i,p.col+j)); M11 += temp.dcol*temp.dcol; M12 += temp.dcol*temp.drow; M22 += temp.drow*temp.drow; b[0] += temp.dcol*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j)); b[1] += temp.drow*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j)); } } double a[] = {M11,M12,M12,M22}; CvMat M=cvMat(2, 2, CV_64FC1, a); CvMat *Mi = cvCloneMat(&M); cvInvert(&M,Mi,CV_SVD); temp.col=0; temp.row=0; b[0] = -b[0]; b[1] = -b[1]; CvMat Mb = cvMat(2,1,CV_64FC1,b); CvMat *Mr = cvCloneMat(&Mb); cvMatMul( Mi, &Mb, Mr); double vy = (cvmGet(Mr,1,0)); double vx = (cvmGet(Mr,0,0)); return (Point2D(vy,vx)); }
/** Computes the optical center of camera from camera projection matrix (http://en.wikipedia.org/wiki/Camera_matrix ) * @param pM : Camera projection matrix (3x4). * @return : Optical center of camera. */ Point3D Utility::getOpticalCenter( CvMat* pM ) { CvMat *A = cvCreateMat(3, 3, CV_64FC1); CvMat *Ainv = cvCreateMat(3, 3, CV_64FC1); CvMat *b = cvCreateMat(3, 1, CV_64FC1); for(int i=0; i<3; ++i) { for(int j=0; j<3; ++j) cvmSet(A, i, j, cvmGet(pM,i,j)); cvmSet(b, i, 0, cvmGet(pM, i,3)); } cvInvert(A, Ainv); CvMat *oc = cvCreateMat(3, 1, CV_64FC1); cvMatMul(Ainv, b, oc); Point3D toRet; toRet.x = -1 * cvmGet(oc, 0, 0); //NULL SPACE OF MATRIX pM toRet.y = -1 * cvmGet(oc, 1, 0); toRet.z = -1 * cvmGet(oc, 2, 0); cvReleaseMat(&A); cvReleaseMat(&Ainv); cvReleaseMat(&b); cvReleaseMat(&oc); return toRet; }
void mCallBack(int event,int x,int y, int flags, void * userdata) { if(event == CV_EVENT_LBUTTONDOWN) { printf("x:%d y:%d\n",x,y); if(lb_type==TYPE_REC||lb_type==TYPE_PNT||lb_type==TYPE_SQR) { point_val[cur_point_ptr][0] = x; point_val[cur_point_ptr][1] = y; if(cur_point_ptr==1&&lb_type==TYPE_SQR) { cur_point_ptr = -1; } nextPoint(); } if(lb_type==TYPE_SP) { sp_val[(int)cvmGet(spmat,y,x)-1] = cur_point_ptr; } showImage(); } if(event == CV_EVENT_RBUTTONDOWN) { if(lb_type==TYPE_SP) { sp_val[(int)cvmGet(spmat,y,x)-1] = -1; } showImage(); } }
void PM(CvMat *m, char *filename) { if (filename) { FILE *f = fopen(filename, "w"); CvSize size = cvGetSize(m); char buf[50]; for (int i = 0; i < size.height; i++) { fprintf(f, "{"); for (int j = 0; j < size.width; j++) { sprintf(buf, "%f", cvmGet(m, i, j)); fprintf(f, "%15s, ", buf); } fprintf(f, "},"); fprintf(f, "\n"); } fclose(f); } else { CvSize size = cvGetSize(m); for (int r = 0; r < size.height; r++) { for (int c = 0; c < size.width; c++) cout << cvmGet(m, r, c) << ' '; cout << endl; } } }
bica::ShapeList SingleObjectModel::getGrDebugRel() { pthread_mutex_lock(&(jpdaf->mutex)); shapeListRel.clear(); // Ball bica::Point3DPtr p(new bica::Point3D); p->x = estimate.getPositionInRelativeCoordinates().x; p->y = estimate.getPositionInRelativeCoordinates().y;; p->z = 0.0f; bica::EllipsePtr estEllipse(new bica::Ellipse); estEllipse->center = p; estEllipse->width = 4 * sqrt(cvmGet(jpdaf->objects[0]->error_cov_post, 0, 0)); estEllipse->length = 4 * sqrt(cvmGet(jpdaf->objects[0]->error_cov_post, 1, 1)); estEllipse->angle = toDegrees(atan2(cvmGet(jpdaf->objects[0]->state_post, 1, 0), cvmGet(jpdaf->objects[0]->state_post, 0, 0))); estEllipse->z = 0.0f; estEllipse->color = bica::ORANGE; estEllipse->filled = true; estEllipse->opacity = 125; estEllipse->accKey = "o"; estEllipse->label = "Ball"; shapeListRel.push_back(estEllipse); pthread_mutex_unlock(&(jpdaf->mutex)); return shapeListRel; }
int LDARec::ActionRecognition(double* input){//Input: 1*6 vector vector <double> dist; dist.resize(cnum); ydata[0] = input[0]; ydata[1] = input[1]; ydata[2] = input[2]; ydata[3] = input[3]; ydata[4] = input[4]; ydata[5] = input[5]; //Compute one-dimensionl result //First: only compare once! int count = 0; for(int i=0;i<cnum;++i){ for(int j=i+1;j<cnum;++j){ double x1d = 0; for(int l=0;l<Ws.cols;++l) x1d += cvmGet(&y, 0, l) * cvmGet(&Ws, count, l); double d1 = fabs(x1d - cvmGet(&mean1d, count, 0)); double d2 = fabs(x1d - cvmGet(&mean1d, count, 1));//i-th with j-th if(d2>d1){// dist[i] +=1; dist[j] -=1; } else{ dist[i] -=1; dist[j] +=1; } ++count; } } int midx = 0; double mval = dist[0]; for(int i=0;i<dist.size();++i){ if(dist[i]>mval) midx = i,mval = dist[i]; } return midx; }
void Res_Func2(CvMat *vectX,CvMat *res) { double x = cvmGet(vectX,0,0); double y = cvmGet(vectX,1,0); cvmSet(res,0,0,(x-2)*(x-2)+(y+3)*(y+3)); cvmSet(res,1,0,x+y); return; }
void display_CvMat(CvMat * arrayToDisplay, int arraySize) { int i; printf("\n\n\tCvMat Coordinates (x;y)\n\n"); for(i=0;i<arraySize;i++) { printf("(%.1f;%.1f)\t",cvmGet(arrayToDisplay,i,0),cvmGet(arrayToDisplay,i,1)); } }
CvMat* AbstractCamera::computeRtMatrix(double a, double b, double g, double tX, double tY, double tZ) { //--- Represent 3d rotation with euler angles double sinG = sin(g); double cosG = cos(g); CvMat* rZg = cvCreateMat(3, 3, CV_32FC1); cvSetZero(rZg); cvmSet(rZg, 0, 0, cosG); cvmSet(rZg, 0, 1, -sinG); cvmSet(rZg, 1, 0, sinG); cvmSet(rZg, 1, 1, cosG); cvmSet(rZg, 2, 2, 1.0f); double sinB = sin(b); double cosB = cos(b); CvMat* rXb = cvCreateMat(3, 3, CV_32FC1); cvSetZero(rXb); cvmSet(rXb, 0, 0, 1.0f); cvmSet(rXb, 1, 1, cosB); cvmSet(rXb, 1, 2, -sinB); cvmSet(rXb, 2, 1, sinB); cvmSet(rXb, 2, 2, cosB); double sinA = sin(a); double cosA = cos(a); CvMat* rZa = cvCreateMat(3, 3, CV_32FC1); cvSetZero(rZa); cvmSet(rZa, 0, 0, cosA); cvmSet(rZa, 0, 1, -sinA); cvmSet(rZa, 1, 0, sinA); cvmSet(rZa, 1, 1, cosA); cvmSet(rZa, 2, 2, 1.0f); CvMat* rotationMatrix = cvCreateMat(3, 3, CV_32FC1); cvMatMul(rZg, rXb, rotationMatrix); cvMatMul(rotationMatrix, rZa, rotationMatrix); cvReleaseMat(&rZg); cvReleaseMat(&rXb); cvReleaseMat(&rZa); //--- [R|T] ; First camera rotation and translation matrix CvMat* rtMatrix = cvCreateMat(3, 4, CV_32FC1); for (int i = 0; i < 3; i++) { cvmSet(rtMatrix, i, 0, cvmGet(rotationMatrix, i, 0)); cvmSet(rtMatrix, i, 1, cvmGet(rotationMatrix, i, 1)); cvmSet(rtMatrix, i, 2, cvmGet(rotationMatrix, i, 2)); } cvmSet(rtMatrix, 0, 3, tX); cvmSet(rtMatrix, 1, 3, tY); cvmSet(rtMatrix, 2, 3, tZ); cvReleaseMat(&rotationMatrix); return rtMatrix; }
void saveRenseMatrix(CvMat *matrixRense){ sprintf(renseParameters, "%lf\n%lf\n%lf\n%lf\n", cvmGet(matrixRense,0,0), cvmGet(matrixRense,1,0), cvmGet(matrixRense,2,0), cvmGet(matrixRense,3,0) ); }
QString AbstractCamera::getDistorcionCoefficientsInfo() { float k1 = cvmGet(_distortionCoefficients, 0, 0); float k2 = cvmGet(_distortionCoefficients, 1, 0); float p1 = cvmGet(_distortionCoefficients, 2, 0); float p2 = cvmGet(_distortionCoefficients, 3, 0); return QString("\n\nDistorcion Coefficients:\nk1 = %1 k2 = %2\np1 = %3 p2 = %4").arg(k1).arg(k2).arg(p1).arg(p2); }
double PerformanceMatrix::getConfidence(int sym){ double conf = 0.0; double val = 0.0; for(size_t i = 0; i < PerformanceMatrix::size; i++){ val += cvmGet(pm, i, sym); } conf = cvmGet(pm, sym, sym) / val; return conf; }
void CDataOut::Cam() { CamFile<< cvmGet(pEstimator->pDataCam->translation,0,0)<<" "; CamFile<< cvmGet(pEstimator->pDataCam->translation,1,0)<<" "; CamFile<< cvmGet(pEstimator->pDataCam->translation,2,0)<<" "; CamFile<<"65000 "; CamFile<<"65000 "; CamFile<<endl; }
int quasi_poisson_solver(IplImage *im_src, IplImage *im_dst, IplImage *im_mask, int channel, int *offset){ int i, j, loop, neighbor, count_neighbors, ok; float error, sum_f, sum_vpq, fp; int naddr[NUM_NEIGHBOR][2]={{-1, 0}, {0, -1}, {0, 1}, {1, 0}}; CvMat* im_new = cvCreateMat(im_dst->height, im_dst->width, CV_64F); for(i=0; i<im_dst->height; i++){ for(j=0; j<im_dst->width; j++){ cvmSet(im_new, i, j, double(uchar(im_dst->imageData[(i)*im_dst->widthStep + (j)*im_dst->nChannels + channel]))); } } for(loop=0; loop<LOOP_MAX; loop++){ if (loop%100==0) { printf("loop:%d\n",loop); } ok = 1; for(i=0; i<im_mask->height; i++){ for(j=0; j<im_mask->width; j++){ if(int((uchar)im_mask->imageData[i*im_mask->widthStep + j]) > 0){ sum_f=0.0; sum_vpq=0.0; count_neighbors=0; for(neighbor=0; neighbor<NUM_NEIGHBOR; neighbor++){ if(i+offset[0]+naddr[neighbor][0] >= 0 && j+offset[1]+naddr[neighbor][1] >= 0 && i+offset[0]+naddr[neighbor][0] < im_dst->height && j+offset[1]+naddr[neighbor][1] < im_dst->width){ sum_f += cvmGet(im_new, i+offset[0]+naddr[neighbor][0], j+offset[1]+naddr[neighbor][1]); sum_vpq += float((uchar)im_src->imageData[(i)*im_src->widthStep + (j)*im_src->nChannels + channel]) - float((uchar)im_src->imageData[(i+naddr[neighbor][0])*im_src->widthStep + (j+naddr[neighbor][1])*im_src->nChannels + channel]); count_neighbors++; } } fp = (sum_f + sum_vpq)/(float)count_neighbors; error = fabs(fp - cvmGet(im_new, i+offset[0], j+offset[1])); if(ok && error > EPS * (1+fabs(fp))){ ok = 0; } cvmSet(im_new, i+offset[0], j+offset[1], fp); } } } if(ok){ break; } } for(i=0; i<im_dst->height; i++){ for(j=0; j<im_dst->width; j++){ if(cvmGet(im_new, i, j) > 255){ cvmSet(im_new, i, j, 255.0); } else if(cvmGet(im_new, i, j) < 0){ cvmSet(im_new, i, j, 0.0); } im_dst->imageData[(i)*im_dst->widthStep + (j)*im_dst->nChannels + channel] = (uchar)cvmGet(im_new, i, j); } } return 1; }
int DoubleSidesModelFitting::SingleFitList(const vector<CvPoint>& list, int horizon) { double belief; if (list.empty()) { return DSMF_FAIL_TOO_SHORT; } if (list.size() < DSMF_RANSAC_MIN_POINTS) { return DSMF_FAIL_TOO_SHORT; } matFullA = cvCreateMat(list.size(), 3, CV_32FC1); matFullB = cvCreateMat(list.size(), 1, CV_32FC1); matFullDiff = cvCreateMat(list.size(), 1, CV_32FC1); for (size_t i = 0; i < list.size(); i++) { cvmSet(matFullA, i, 0, 1.0 / (list.at(i).y - horizon)); cvmSet(matFullA, i, 1, (list.at(i).y - horizon)); cvmSet(matFullA, i, 2, 1.0); cvmSet(matFullB, i, 0, list.at(i).x); } int result = SingleFullFit(list, horizon, belief); if(result == DSMF_SUCCESS) { curve = cvScalar(cvmGet(matOptX, 0, 0), cvmGet(matOptX, 1, 0), cvmGet(matOptX, 2, 0), belief); } else { curve = cvScalar(0); } if (matFullA) { cvReleaseMat(&matFullA); matFullA = NULL; } if (matFullB) { cvReleaseMat(&matFullB); matFullB = NULL; } if (matFullDiff) { cvReleaseMat(&matFullDiff); matFullDiff = NULL; } return result; }
int main (int argc, char **argv) { int i, j; int nrow = 3; int ncol = 3; CvMat *src, *dst, *mul; double det; CvRNG rng = cvRNG (time (NULL)); /* 乱数の初期化 */ // (1) 行列のメモリ確保 src = cvCreateMat (nrow, ncol, CV_32FC1); dst = cvCreateMat (ncol, nrow, CV_32FC1); mul = cvCreateMat (nrow, nrow, CV_32FC1); // (2) 行列srcに乱数を代入 printf ("src\n"); cvmSet (src, 0, 0, 1); for (i = 0; i < src->rows; i++) { for (j = 0; j < src->cols; j++) { cvmSet (src, i, j, cvRandReal (&rng)); printf ("% lf\t", cvmGet (src, i, j)); } printf ("\n"); } // (3) 行列srcの逆行列を求めて,行列dstに代入 det = cvInvert (src, dst, CV_SVD); // (4) 行列srcの行列式を表示 printf ("det(src)=%lf\n", det); // (5) 行列dstの表示 printf ("dst\n"); for (i = 0; i < dst->rows; i++) { for (j = 0; j < dst->cols; j++) { printf ("% lf\t", cvmGet (dst, i, j)); } printf ("\n"); } // (6) 行列srcとdstの積を計算して確認 cvMatMul (src, dst, mul); printf ("mul\n"); for (i = 0; i < mul->rows; i++) { for (j = 0; j < mul->cols; j++) { printf ("% lf\t", cvmGet (mul, i, j)); } printf ("\n"); } // (7) 行列のメモリを開放 cvReleaseMat (&src); cvReleaseMat (&dst); cvReleaseMat (&mul); return 0; }
/*IplImage* HomographyCalculationThread::rectifyImage(IplImage* inImage, IplImage* outImage,double oldFirstApexX,double oldFirstApexY,double width,double height) { double x1 = 0; double x2 = 0; double x3 = 0; double y_position = 0; double x_position = 0; uchar* data = (uchar *)inImage->imageData; uchar* dataOut = (uchar *)outImage->imageData; for(int row=0;row<height;row++) { for(int col=0;col<width;col++) { x1 = cvmGet(mHMatrix,0,0) * col + cvmGet(mHMatrix,0,1) * row + cvmGet(mHMatrix,0,2); x2 = cvmGet(mHMatrix,1,0) * col + cvmGet(mHMatrix,1,1) * row + cvmGet(mHMatrix,1,2); x3 = cvmGet(mHMatrix,2,0) * col + cvmGet(mHMatrix,2,1) * row + 1; y_position = x2/x3 + oldFirstApexY; if(inImage->height < y_position) { y_position = (inImage->height-1); } x_position = x1/x3 + oldFirstApexX; if(inImage->width < x_position) { x_position = (inImage->width-1); } int temp_y = (int)y_position; int temp_x = (int)x_position; if(dataOut!=NULL && data!=NULL) { dataOut[row*outImage->widthStep+col*outImage->nChannels] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels]; dataOut[row*outImage->widthStep+col*outImage->nChannels+1] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels+1]; dataOut[row*outImage->widthStep+col*outImage->nChannels+2] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels+2]; } } } cvReleaseMat(&mHMatrix); return outImage; } */ void HomographyCalculationThread::correctHomographicMatrix(IplImage* inImage,CvMat* invH,double lastPictureApexX,double lastPictureApexY,double width,double height) { CvMat *hCoeff = cvCreateMat(3,3,CV_32FC1); CvMat* multipleMat = cvCreateMat(3,3,CV_32FC1); double old_height = inImage->height; double y_position = lastPictureApexY; double x_position = lastPictureApexX; for (int i=1;i<10;i++) { double x1 = cvmGet(invH,0,0) * (x_position) + cvmGet(invH,0,1) * (y_position) + cvmGet(invH,0,2); double x2 = cvmGet(invH,1,0) * (x_position) + cvmGet(invH,1,1) * (y_position) + cvmGet(invH,1,2); double x3 = cvmGet(invH,2,0) * (x_position) + cvmGet(invH,2,1) * (y_position) + 1; x1 = x1/x3; x2 = x2/x3; double H_coeff = ((x1/width)+(x2/height))/2 - 0.01; for(int coeffRow=0;coeffRow<3;coeffRow++) { for(int coeffCol=0;coeffCol<3;coeffCol++) { cvmSet(hCoeff,coeffRow,coeffCol,H_coeff); cvmSet(multipleMat,coeffRow,coeffCol,cvmGet(mHMatrix,coeffRow,coeffCol)); } } cvMul(multipleMat,hCoeff,mHMatrix); cvInvert(mHMatrix,invH); } cvReleaseMat(&multipleMat); cvReleaseMat(&hCoeff); }
void GetCameraParameterCV( TomGine::tgCamera::Parameter& camPar, CvMat *pImgSize, CvMat *pIntrinsicDistort, CvMat *pDistortions, const cv::Mat &R, const cv::Mat &T) { if(pImgSize){ camPar.width = cvmGet(pImgSize, 0,0); camPar.height = cvmGet(pImgSize, 1,0); }else{ throw std::runtime_error("[utilities::GetCameraParameterCV] Wrong format for CvMat pImgSize"); } if(pIntrinsicDistort){ camPar.fx = cvmGet(pIntrinsicDistort, 0,0); camPar.fy = cvmGet(pIntrinsicDistort, 1,1); camPar.cx = cvmGet(pIntrinsicDistort, 0,2); camPar.cy = cvmGet(pIntrinsicDistort, 1,2); }else{ throw std::runtime_error("[utilities::GetCameraParameterCV] Wrong format for CvMat pIntrinsicDistort"); } if(pDistortions){ camPar.k1 = cvmGet(pDistortions, 0,0); camPar.k2 = cvmGet(pDistortions, 1,0); camPar.k3 = cvmGet(pDistortions, 2,0); //camPar.k4 = cvmGet(pIntrinsicDistort, 3,0); camPar.p1 = 0.0f; camPar.p2 = 0.0f; }else{ throw std::runtime_error("[utilities::GetCameraParameterCV] Wrong format for CvMat pDistortions"); } camPar.zFar = 5.0f; camPar.zNear = 0.1f; // Invert pose of calibration pattern to get pose of camera vec3 r; cv::Mat Rm = cv::Mat( 3,3, CV_64F ); cv::Mat rvec = cv::Mat( 3,1, CV_64F ); cv::Mat tvec = cv::Mat( 3,1, CV_64F ); cv::Rodrigues(R, Rm); cv::transpose(Rm, Rm); tvec = -Rm * T; // t = -R^T * t cv::Rodrigues(Rm, rvec); r.x = (float)rvec.at<double>(0,0); r.y = (float)rvec.at<double>(1,0); r.z = (float)rvec.at<double>(2,0); camPar.pos.x = tvec.at<double>(0,0); camPar.pos.y = tvec.at<double>(1,0); camPar.pos.z = tvec.at<double>(2,0); camPar.rot.fromRotVector(r); }
//tests void Jac_Func2(CvMat *vectX,CvMat *Jac) { double x = cvmGet(vectX,0,0); double y = cvmGet(vectX,1,0); cvmSet(Jac,0,0,2*(x-2)); cvmSet(Jac,0,1,2*(y+3)); cvmSet(Jac,1,0,1); cvmSet(Jac,1,1,1); return; }
void FacePredict::FaceSynthesis(AAM_Shape &shape, CvMat* texture, IplImage* newImage) { double thisfacewidth = shape.GetWidth(); shape.Scale(stdwidth / thisfacewidth); shape.Translate(-shape.MinX(), -shape.MinY()); AAM_PAW paw; CvMat* points = cvCreateMat (1, __shape.nPoints(), CV_32FC2); CvMemStorage* storage = cvCreateMemStorage(0); paw.Train(shape, points, storage, __paw.GetTri(), false); //the actual shape __AAMRefShape.Translate(-__AAMRefShape.MinX(), -__AAMRefShape.MinY()); //refShape, central point is at (0,0);translate the min to (0,0) double minV, maxV; cvMinMaxLoc(texture, &minV, &maxV); cvConvertScale(texture, texture, 1/(maxV-minV)*255, -minV*255/(maxV-minV)); cvZero(newImage); int x1, x2, y1, y2, idx1 = 0, idx2 = 0; int tri_idx, v1, v2, v3; int minx, miny, maxx, maxy; minx = shape.MinX(); miny = shape.MinY(); maxx = shape.MaxX(); maxy = shape.MaxY(); for(int y = miny; y < maxy; y++) { y1 = y-miny; for(int x = minx; x < maxx; x++) { x1 = x-minx; idx1 = paw.Rect(y1, x1); if(idx1 >= 0) { tri_idx = paw.PixTri(idx1); v1 = paw.Tri(tri_idx, 0); v2 = paw.Tri(tri_idx, 1); v3 = paw.Tri(tri_idx, 2); x2 = paw.Alpha(idx1)*__AAMRefShape[v1].x + paw.Belta(idx1)*__AAMRefShape[v2].x + paw.Gamma(idx1)*__AAMRefShape[v3].x; y2 = paw.Alpha(idx1)*__AAMRefShape[v1].y + paw.Belta(idx1)*__AAMRefShape[v2].y + paw.Gamma(idx1)*__AAMRefShape[v3].y; idx2 = __paw.Rect(y2, x2); if(idx2 < 0) continue; CV_IMAGE_ELEM(newImage, byte, y, 3*x) = cvmGet(texture, 0, 3*idx2); CV_IMAGE_ELEM(newImage, byte, y, 3*x+1) = cvmGet(texture, 0, 3*idx2+1); CV_IMAGE_ELEM(newImage, byte, y, 3*x+2) = cvmGet(texture, 0, 3*idx2+2); } } } cvReleaseMat(&points); cvReleaseMemStorage(&storage); }
void affinity::compute_cvGetQuandrangleSubPix_transform(CvMat * A_q, int w, int h) { float det = float(cvmGet(0, 0) * cvmGet(1, 1) - cvmGet(1, 0) * cvmGet(0, 1)); ::cvmSet(A_q, 0, 0, cvmGet(1, 1) / det); ::cvmSet(A_q, 0, 1, -cvmGet(0, 1) / det); ::cvmSet(A_q, 1, 0, -cvmGet(1, 0) / det); ::cvmSet(A_q, 1, 1, cvmGet(0, 0) / det); ::cvmSet(A_q, 0, 2, ::cvmGet(A_q, 0, 0) * (w / 2 - cvmGet(0, 2)) + ::cvmGet(A_q, 0, 1) * (h / 2 - cvmGet(1, 2))); ::cvmSet(A_q, 1, 2, ::cvmGet(A_q, 1, 0) * (w / 2 - cvmGet(0, 2)) + ::cvmGet(A_q, 1, 1) * (h / 2 - cvmGet(1, 2))); }
float calculateY(CvMat* P, int current_row){ float Y=0, P_34=0, P_24=0, P_32=0, P_22=0; /* Y = (v*P34 - P24) / (v*P32 - P22) */ P_34 = cvmGet(P, 2,3); P_24 = cvmGet(P, 1,3); P_32 = cvmGet(P, 2,1); P_22 = cvmGet(P, 1,1); Y = (current_row*P_34 - P_24) / (P_22 - current_row*P_32); return Y; }
/** *update the weight matrix *@param feature_train The positive and negative training feature *@param current_best_classifier The current weak classifier for selection. *@param imith To control the shape of the sign function *@param curIteration The current iteration number of strong classifier */ int adaBoostBinary::updataWeight(CvMat *feature_train,lda_classifier *current_best_classifier,float imith, int curIteration) { double min_error=current_best_classifier->minerror; int sample_num= pos_sample_num + neg_sample_num; int i=curIteration; //printf("the error rate in current iteration is %f\n",min_error); //printf("the threshold for current iteration is %e\n", current_best_classifier->pro_thresh); if(min_error<=0.0) { //printf("the min error for current iteration is 0, so the program have to stop and break, sorry!!\n"); return -1; } current_best_classifier->alpha=0.5*log((1.0+min_error)/(1-min_error)); strongclassifier.push_back(*current_best_classifier); ///update the weight double ztsumpos=0.0,ztsumneg=0.0; float error=0.0; float tmpscore; //int best_position=current_best_classifier.x*size+current_best_classifier.y; int binnum=current_best_classifier->bindex; float threshold=current_best_classifier->pro_thresh; int thr_sign=current_best_classifier->thr_sign; int colsize=feature_train->cols; for(int t=0; t<sample_num; t++) { //if(best_res_pos[t]==false) tmpscore=feature_train->data.fl[t*colsize+binnum]*thr_sign-threshold; double rtx=tmpscore/sqrt(tmpscore*tmpscore+imith*imith); error+=cvmGet(weight,t,0)*rtx*labelsign[t]; double tmp=cvmGet(weight,t,0)*exp(-current_best_classifier->alpha*labelsign[t]*iter_res[i*sample_num+t]); cvmSet(weight,t,0,tmp); if(labelsign[t]==1) { ztsumpos+=cvmGet(weight,t,0); //printf("*%f %f",rtx, iter_res[i*sample_num+t]); } else { ztsumneg+=cvmGet(weight,t,0); //printf("%f ",rtx); } } //printf("binnum=%d\n",binnum); //printf("current error just for check %f\n", error); //printf("ztsumpos=%f, ztsumneg=%f\n",ztsumpos, ztsumneg); if(ztsumpos+ztsumneg!=0) { for(int j=0;j<sample_num;j++) { cvmSet(weight,j,0,cvmGet(weight,j,0)/(ztsumpos+ztsumneg)); } } return 1; }
/*! * \brief Update step of K-measn algorithm. * * After the assigment of a new data are recomputed the weights * vectors and the means one. * * \param[in] value of the assigned data * \param[in] index of the cluster data is assigned * \param[in,out] vectors of cluster's means * \param[in,out] vectors of cluster's weights */ static void update_step (double val, int idx, CvMat *means, CvMat *weights) { double cur, w; cur = cvmGet(means, idx, 0); w = cvmGet(weights, idx, 0); cur = (cur*w + val)/(++w); cvmSet(means, idx, 0, cur); cvmSet(weights, idx, 0, w); }
//!Apply to a matrix of points void AffineTransform::applyToPoints(const CvMat * positions, CvMat * newPositions) const { CvMat newPositions2d; cvGetSubRect(newPositions, &newPositions2d, cvRect(0,0,newPositions->cols, 2)); cvMatMul(*this, positions, &newPositions2d); for(int i=0; i<newPositions->cols; i++) { cvmSet(newPositions, 0, i, cvmGet(&newPositions2d, 0, i)); cvmSet(newPositions, 1, i, cvmGet(&newPositions2d, 1, i)); cvmSet(newPositions, 2, i, 1.0); } }
int Scale_Mat(CvMat *input,double scale) { double tmp; double val; double min; double max; min=20000.0; max=-20000.0; int i,j; for(i=0;i<input->rows;i++) { for(j=0;j<input->cols;j++) { tmp=cvmGet(input,i,j); //if(tmp==-INF) // printf("%d--%d\n",i,j); if(tmp<min) min=tmp; if(tmp>max) max=tmp; } } //printf("%g - %g\n",min,max); for(i=0;i<input->rows;i++) { for(j=0;j<input->cols;j++) { tmp=cvmGet(input,i,j); val=scale*((tmp-min)/(max-min)); // printf("%g * ",val); cvmSet(input,i,j,val); } } /*max=0.0; for(i=0;i<input->rows;i++) { for(j=0;j<input->cols;j++) { tmp=cvmGet(input,i,j); if(max<tmp) max=tmp; } } printf("max =%g\n",max); */ return 1; }
// // 外部パラメータ行列を画面に表示する // // 引数: // rotationMatrix : 回転行列 // translationVector : 並進ベクトル // void printExtrinsicMatrix( CvMat *rotationMatrix, CvMat *translationVector ) { for ( int i = 0; i<3; i++ ) { printf( "%lf %lf %lf %lf\n", cvmGet( rotationMatrix, i, 0 ), cvmGet( rotationMatrix, i, 1 ), cvmGet( rotationMatrix, i, 2 ), cvmGet( translationVector, 0, i ) ); } }
// Calculate the area with 95% of probability of surrounding the object. // Area of ellipse = Pi * a * b, where a and b are the semi-axis. float SingleObjectModel::calculateSingleObjectQuality( int object ) { float a = 4 * sqrt(cvmGet(this->jpdaf->objects[object]->error_cov_post, 0, 0)); float b = 4 * sqrt(cvmGet(this->jpdaf->objects[object]->error_cov_post, 1, 1)); float area = pi * a * b; return area > ObjectState::MAX_UNCERTAINTY_AREA ? 0.0f : 1.0f - (area / ObjectState::MAX_UNCERTAINTY_AREA); if (area > ObjectState::MAX_UNCERTAINTY_AREA) { return 0.0f; } else { return 1.0f - (area / ObjectState::MAX_UNCERTAINTY_AREA); } }
QString AbstractCamera::getIntrinsicParamsInfo() { QString info(""); if (_intrinsicParams != NULL) { float fX = cvmGet(_intrinsicParams, 0, 0); float cX = cvmGet(_intrinsicParams, 0, 2); float fY = cvmGet(_intrinsicParams, 1, 1); float cY = cvmGet(_intrinsicParams, 1, 2); info = QString("\n\nIntrinsic Params:\nfX = %1 fY = %2\ncX = %3 cY = %4").arg(fX).arg(fY).arg(cX).arg(cY); } return info; }
void SingleObjectModel::updateEstimate() { // Last estimation (ball seen or not seen) estimate.setPositionAndVelocityInRelativeCoordinates( Vector2<double>(cvmGet(this->jpdaf->objects[0]->state_post, 0, 0), cvmGet(this->jpdaf->objects[0]->state_post, 1, 0))); // Update the quality estimations estimate.setQuality( calculateSingleObjectQuality(0) ); // Update reliability updateReliability(); }