void BackgroundSubtractorGMGImpl::release() { frameSize_ = Size(); nfeatures_.release(); colors_.release(); weights_.release(); buf_.release(); }
int copyMakeBorder(/*const*/ Mat_<_Tp, chs>& src, Mat_<_Tp, chs>& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar()) { FBC_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0); if (src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0) { Size wholeSize; Point ofs; src.locateROI(wholeSize, ofs); int dtop = std::min(ofs.y, top); int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom); int dleft = std::min(ofs.x, left); int dright = std::min(wholeSize.width - src.cols - ofs.x, right); src.adjustROI(dtop, dbottom, dleft, dright); top -= dtop; left -= dleft; bottom -= dbottom; right -= dright; } if (dst.empty() || dst.rows != (src.rows + top + bottom) || dst.cols != (src.cols + left + right)) { dst.release(); dst = Mat_<_Tp, chs>(src.rows + top + bottom, src.cols + left + right); } if (top == 0 && left == 0 && bottom == 0 && right == 0) { if (src.data != dst.data || src.step != dst.step) src.copyTo(dst); return 0; } borderType &= ~BORDER_ISOLATED; if (borderType != BORDER_CONSTANT) { copyMakeBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, src.elemSize(), borderType); } else { int cn = src.channels, cn1 = cn; AutoBuffer<double> buf(cn); scalarToRawData<_Tp, chs>(value, buf, cn); copyMakeConstBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, (int)src.elemSize(), (uchar*)(double*)buf); } return 0; }
void mapUpdateMsgHandler(MSG_INSTANCE msgInstance, void *callData, void *clientData) { struct PlanMap* pm = (PlanMap*) callData; AppContext* ac = (AppContext*) clientData; //globalCounter++; //sendHS(42, globalCounter); pthread_mutex_lock(&ac->map_update_mutex); Mat_<uchar> m = Mat(1120, 1120, CV_8UC1, pm->map); for (int i = 0; i < 1119; i++) { m(0, i) = 90; m(i, 0) = 90; m(1119, i) = 90; m(i, 1119) = 90; } for (int i = 0; i < 1119; i++) { for (int j = 0; j < 1119; j++) { ac->plannerMap.map[i][j] = pm->map[i][j]; } } ac->plannerMap.x = pm->x; ac->plannerMap.y = pm->y; pthread_cond_signal(&ac->map_update_cv); pthread_mutex_unlock(&ac->map_update_mutex); sendData(); free(callData); imshow("map", m); m.release(); //waitKey(10); }
int main(int argc, char **argv) { char c; char *filepath; int cut_horizontal = 0; int cut_vertical = 0; int timed = 0; int show = 0; while ((c = getopt(argc, argv, "f:h:v:ts")) != -1) { switch (c) { case 'f': filepath = optarg; break; case 'h': cut_horizontal = (int)strtol(optarg, NULL, 10); break; case 'v': cut_vertical = (int)strtol(optarg, NULL, 10); break; case 't': timed = 1; break; case 's': show = 1; break; default: exit(1); } } // OpenCL boilerplate std::string ..._kernel_str; std::string ..._name_str = std::string("..."); std::string ..._kernel_file = std::string("..."); cl_vars_t cv; cl_kernel ...; readFile(..._kernel_file, ..._kernel_str); initialize_ocl(cv); compile_ocl_program(..., cv, ..._kernel_str.c_str(), ..._name_str.c_str()); // Read image Mat_<Vec3b> image = imread(filepath); if (!image.data) { cout << "Invalid input"; image.release(); return -1; } if (show) { imshow("Original Image", image); } SeamCarver s(image); // imshow("Gradient", s.energy); // Mat tmp = s.energy/195075.0*255.0; // s.energy.convertTo(tmp,CV_8U,-1); // imwrite("bench_gradient.jpg", tmp); // vector<uint> sm = s.findVerticalSeam(); // s.showVerticalSeam(sm); // Carving happens here double start = get_time(); ...; double elapsed = get_time() - start; // -------------------- // double start = get_time(); // for (int i = 0; i < cut_horizontal; ++i) { // vector<uint> seam = s.findHorizontalSeam(); // // s.showHorizontalSeam(seam); // s.removeHorizontalSeam(seam); // } // for (int i = 0; i < cut_vertical; ++i) { // vector<uint> seam = s.findVerticalSeam(); // // s.showVerticalSeam(seam); // s.removeVerticalSeam(seam); // } // double elapsed = get_time() - start; if (timed) { printf("Elapsed time: %.3lf seconds\n", elapsed); } Mat_<Vec3b> output = s.getImage(); imwrite("scarved.jpg", output); if (show) { imshow("Carved Image", output); while (waitKey(20) != 27); } // cout << "Seam Length: " << seam.size() << endl; // s.showImage(); // s.showEnergy(); // imwrite("bench_carved.jpg", s.getImage()); // for (int i = 0; i < 5; ++i) { // for (int j = 0; j < 5; ++j) { // cout << s.energy.at<uint32_t>(i,j) << " "; // } // cout << endl; // } uninitialize_ocl(cv); ...; clReleaseMemObject(...); image.release(); return 0; }
void NNLSOptimizer::estimateExpressionParameters(const vector<Point2f> &featurePoints, const Mat &cameraMatrix, const Mat& lensDist, Face* face_ptr,const vector<int> &point_indices, const Mat &rotation, const Mat &translation, vector<double> &weights_ex) { Mat_<double> weakCamera(2,3); //matrices for A_ex * w_ex = f and A_id * w_id = f Mat_<double> A_ex, seg_A_ex; //matrix Z = kron(weights, Identity_matrix) Mat_<double> Z_ex; Mat_<double> ZU; Mat_<double> pr, Pt; //core tensor rows Mat_<double> Mi; //featurePoints converted to 2x1 matrices Mat_<double> fi; Mat_<double> f; int index = 0; const int exr_size = model->getExpSize(); const int id_size = model->getIdSize(); double *w_id = new double[id_size]; double *w_exp = new double[exr_size]; Mat_<double> rmatrix; Mat_<double> x_t(1,exr_size); Mat_<double> y_t(1,id_size); Mat_<double> x(exr_size,1); Mat_<double> y(id_size,1); //used for x_t*U_ex and y_t*U_id Mat_<double> lin_comb_x(exr_size,1); Mat_<double> lin_comb_y(id_size,1); double Z_avg; double average_depth; Point3f p; Mat_<double> proP; Mat_<double> pM(3,1); Rodrigues(rotation,rmatrix); //get weights from the current face instance face_ptr->getWeights(w_id,id_size,w_exp,exr_size); //find average depth average_depth = face_ptr->getAverageDepth(); pM(0,0) = 1; pM(1,0) = 1; pM(2,0) = average_depth; proP = cameraMatrix*rmatrix*pM; proP = proP + cameraMatrix*translation; Z_avg = proP(0,2); /***************************************************************/ /*create weak-perspecitve camera matrix from camera matrix*/ /***************************************************************/ for(int i=0;i<2;i++) for(int j=0;j<3;j++) weakCamera(i,j) = cameraMatrix.at<double>(i,j); f = Mat_<double>(featurePoints.size()*2,1); fi = Mat_<double>(2,1); //precompute translation Pt = (1./translation.at<double>(2,0)) * (weakCamera*translation); //preprocess points and core tensor rows //for points subtract translation too for(unsigned int i=0;i<featurePoints.size();i++) { fi = Mat_<double>(2,1); fi(0,0) = featurePoints[i].x; fi(1,0) = featurePoints[i].y; fi = fi - Pt; f.row(2*i) = fi.row(0) + 0.0; f.row(2*i+1) = fi.row(1) + 0.0; } pr = (1.0/Z_avg)*weakCamera*rmatrix; A_ex = Mat_<double>::zeros(2*featurePoints.size(),exr_size); seg_A_ex = Mat_<double>::zeros(2*featurePoints.size(),exr_size); for(int i=0;i<id_size;i++) y_t(0,i) = w_id[i]; lin_comb_y = (y_t*u_id).t(); Z_ex = Matrix::kronecker(lin_comb_y,Mat_<double>::eye(exr_size,exr_size)); ZU = Z_ex*(u_ex.t()); for(unsigned int i=0;i<point_indices.size();++i) { index = point_indices[i]; seg_A_ex = pr*M[index]*ZU; A_ex.row(2*i) = seg_A_ex.row(0) + 0.0; A_ex.row(2*i+1) = seg_A_ex.row(1) + 0.0; } //do not use the guess in the first frame but do for every following frame for(int i=0;i<exr_size;i++) x_t(0,i) = x(i,0) = w_exp[i]; // Mat_<double> temp = (A_ex*x - f); // Mat_<double> e = temp.t()*temp; // cout << "exp, error before " << e(0,0)<<endl; //op .. lets see if we get here this->scannls(A_ex,f,x); // temp = (A_ex*x - f); // e = temp.t()*temp; // cout << "exp, error after " << e(0,0)<<endl; for(int i=0;i<exr_size;i++){ w_exp[i] = x(i,0); } for(int i=0;i<exr_size;i++){ weights_ex.push_back(w_exp[i]); } face_ptr->setNewIdentityAndExpression(w_id,w_exp); //unecessary it seems face_ptr->setAverageDepth(average_depth); ZU.release(); Z_ex.release(); A_ex.release(); f.release(); delete[] w_id; delete[] w_exp; }
void NNLSOptimizer::estimateModelParameters(const vector<Point2f> &featurePoints, const Mat &cameraMatrix, const Mat& lensDist, Face* face_ptr,const vector<int> &point_indices, const Mat &rotation, const Mat &translation, vector<double> &weights_id, vector<double> &weights_ex) { Mat_<double> weakCamera(2,3); //matrices for A_ex * w_ex = f and A_id * w_id = f Mat_<double> A_ex , A_id; //matrix Z = kron(weights, Identity_matrix) Mat_<double> Z_ex, Z_id; Mat_<double> ZU; Mat_<double> pr, Pt, PRM, prm; //core tensor rows Mat_<double> Mi; //featurePoints converted to 2x1 matrices Mat_<double> fi; Mat_<double> f; Mat_<double> O; int index = 0; const int exr_size = model->getExpSize(); const int id_size = model->getIdSize(); double *w_id = new double[id_size]; double *w_exp = new double[exr_size]; Mat_<double> rmatrix; Mat_<double> x_t(1,exr_size); Mat_<double> y_t(1,id_size); Mat_<double> x(exr_size,1); Mat_<double> y(id_size,1); //used for x_t*U_ex and y_t*U_id Mat_<double> lin_comb_x(exr_size,1); Mat_<double> lin_comb_y(id_size,1); double Z_avg; double average_depth; Point3f p; Mat_<double> proP; Mat_<double> pM(3,1); Rodrigues(rotation,rmatrix); //get weights from the current face instance face_ptr->getWeights(w_id,id_size,w_exp,exr_size); average_depth = face_ptr->getAverageDepth(); pM(0,0) = 1; pM(1,0) = 1; pM(2,0) = average_depth; proP = cameraMatrix*rmatrix*pM; proP = proP + cameraMatrix*translation; Z_avg = proP(0,2); /***************************************************************/ /*create weak-perspecitve camera matrix from camera matrix*/ /***************************************************************/ for(int i=0;i<2;i++) for(int j=0;j<3;j++) weakCamera(i,j) = cameraMatrix.at<double>(i,j); f = Mat_<double>(featurePoints.size()*2,1); fi = Mat_<double>(2,1); //precompute translation Pt = (1./translation.at<double>(2,0)) * (weakCamera*translation); //preprocess points and core tensor rows //for points subtract translation too for(unsigned int i=0;i<featurePoints.size();i++) { fi = Mat_<double>(2,1); fi(0,0) = featurePoints[i].x; fi(1,0) = featurePoints[i].y; fi = fi - Pt; f.row(2*i) = fi.row(0) + 0.0; f.row(2*i+1) = fi.row(1) + 0.0; } pr = (1.0/Z_avg)*weakCamera*rmatrix; PRM = Mat_<double>(2*featurePoints.size(),exr_size*id_size); prm = Mat_<double>(2,exr_size*id_size); for(unsigned int i=0;i<point_indices.size();++i) { index = point_indices[i]; prm = pr*M[index]; PRM.row(2*i) = prm.row(0) + 0.0; PRM.row(2*i+1) = prm.row(1) + 0.0; } //do not use the guess in the first frame but do for every following frame for(int i=0;i<exr_size;i++) x_t(0,i) = x(i,0) = w_exp[i]; for(int i=0;i<id_size;i++) y_t(0,i) = y(i,0) = w_id[i]; A_ex = Mat_<double>::zeros(2*featurePoints.size(),exr_size); A_id = Mat_<double>::zeros(2*featurePoints.size(),id_size); for(int count = 0;count < max_iterations; count++) { //put the guesses into matrix y and x y_t = y.t(); lin_comb_y = (y_t*u_id).t(); Z_ex = Matrix::kronecker(lin_comb_y,Mat_<double>::eye(exr_size,exr_size)); ZU = Z_ex*(u_ex.t()); A_ex = PRM*ZU; this->scannls(A_ex,f,x); x_t = x.t(); lin_comb_x = (x_t*u_ex).t(); Z_id = Matrix::kronecker(Mat_<double>::eye(id_size,id_size),lin_comb_x); ZU = Z_id*(u_id.t()); /* compute the formula : * Sum( U*Z'*Mi'*R'*PW'*PW*R*Mi*Z*U' )*y = Sum( U*Z'*Mi'*R'*PW'*fi - (1/tz)*U*Z'*Mi'*R'*PW'*PW'*t ) */ A_id = PRM*ZU; // Mat_<double> temp = (A_id*y - f); // Mat_<double> e = temp.t()*temp; // cout << "id, error before " << e(0,0)<<endl; this->scannls(A_id,f,y); // temp = (A_id*y - f); // e = temp.t()*temp; // cout << "id, error after " << e(0,0)<<endl; } for(int i=0;i<exr_size;i++){ w_exp[i] = x(i,0); } for(int i=0;i<id_size;i++){ w_id[i] = y(i,0); } //we cannot normalize because then we lose 1/z_avg which is the scale // Vector3::normalize(w_exp, exr_size); // Vector3::normalize(w_id, id_size); for(int i=0;i<exr_size;i++){ weights_ex.push_back(w_exp[i]); } for(int i=0;i<id_size;i++){ weights_id.push_back(w_id[i]); } face_ptr->setNewIdentityAndExpression(w_id,w_exp); face_ptr->setAverageDepth(average_depth); prm.release(); PRM.release(); ZU.release(); Z_id.release(); A_id.release(); Z_ex.release(); A_ex.release(); f.release(); delete[] w_id; delete[] w_exp; }
void NNLSOptimizer::estimateIdentityParameters(const vector<vector<Point2f> >&featurePointsVector, const Mat &cameraMatrix, const Mat& lensDist, Face* face_ptr,const vector<vector<int> >&point_indices_vector, const vector<Mat> &rotation, const vector<Mat> &translation, const vector<vector<double> > &weights_ex, vector<double> &weights_id) { Mat_<double> weakCamera(2,3); //matrices for A_id * w_id = f Mat_<double> A_id, seg_A_id; //matrix Z = kron(weights, Identity_matrix) Mat_<double> Z_id; Mat_<double> ZU; Mat_<double> pr, Pt; //featurePoints converted to 2x1 matrices Mat_<double> fi; Mat_<double> f; int index = 0; //total number of feature points int featurePointNum = 0; const int exr_size = model->getExpSize(); const int id_size = model->getIdSize(); double *w_id = new double[id_size]; double *w_exp = new double[exr_size]; Mat_<double> rmatrix; Mat_<double> x_t(1,exr_size); Mat_<double> y_t(1,id_size); Mat_<double> x(exr_size,1); Mat_<double> y(id_size,1); //used for x_t*U_ex and y_t*U_id Mat_<double> lin_comb_x(exr_size,1); double Z_avg; double average_depth; Mat_<double> proP; Mat_<double> pM(3,1); //get weights from the current face instance face_ptr->getWeights(w_id,id_size,w_exp,exr_size); /***************************************************************/ /*create weak-perspecitve camera matrix from camera matrix*/ /***************************************************************/ for(int i=0;i<2;i++) for(int j=0;j<3;j++) weakCamera(i,j) = cameraMatrix.at<double>(i,j); for(unsigned int i=0;i<featurePointsVector.size();i++) featurePointNum += featurePointsVector[i].size(); f = Mat_<double>(featurePointNum*2,1); fi = Mat_<double>(2,1); //preprocess points and core tensor rows //for points subtract translation too for(unsigned int i=0, count=0;i<featurePointsVector.size();i++) { //precompute translation Pt = (1./translation[i].at<double>(2,0)) * (weakCamera*translation[i]); for(unsigned int j=0;j<featurePointsVector[i].size();j++) { fi = Mat_<double>(2,1); fi(0,0) = featurePointsVector[i][j].x; fi(1,0) = featurePointsVector[i][j].y; fi = fi - Pt; f.row(count + 2*j) = fi.row(0) + 0.0; f.row(count + 2*j+1) = fi.row(1) + 0.0; } count += 2*featurePointsVector[i].size(); } A_id = Mat_<double>(2*featurePointNum,id_size); seg_A_id = Mat_<double>(2,id_size); average_depth = face_ptr->getAverageDepth(); pM(0,0) = 1; pM(1,0) = 1; pM(2,0) = average_depth; for(unsigned int i=0, count = 0;i<point_indices_vector.size();++i) { Rodrigues(rotation[i],rmatrix); proP = cameraMatrix*rmatrix*pM; proP = proP + cameraMatrix*translation[i]; Z_avg = proP(0,2); pr = (1.0/Z_avg)*weakCamera*rmatrix; //load the appropriate weights for the i-th frame for(unsigned int k=0;k<weights_ex[i].size();k++) x_t(0,k) = weights_ex[i][k]; lin_comb_x = (x_t*u_ex).t(); Z_id = Matrix::kronecker(Mat_<double>::eye(id_size,id_size),lin_comb_x); ZU = Z_id*(u_id.t()); for(unsigned int j=0;j<point_indices_vector[i].size();++j) { index = point_indices_vector[i][j]; seg_A_id = pr*M[index]*ZU; A_id.row(count + 2*j) = seg_A_id.row(0) + 0.0; A_id.row(count + 2*j+1) = seg_A_id.row(1) + 0.0; } count += 2*point_indices_vector[i].size(); } //do not use the guess in the first frame but do for every following frame for(int i=0;i<id_size;i++) y_t(0,i) = y(i,0) = w_id[i]; // Mat_<double> temp = (A_id*y - f); // Mat_<double> e = temp.t()*temp; // cout << "id, error before " << e(0,0)<<endl; //optimize using sequential coordinate descent this->scannls(A_id,f,y); // temp = (A_id*y - f); // e = temp.t()*temp; // cout << "id, error after " << e(0,0)<<endl; for(int i=0;i<id_size;i++){ w_id[i] = y(i,0); } for(int i=0;i<exr_size;i++){ w_exp[i] = weights_ex[0][i]; } for(int i=0;i<id_size;i++){ weights_id.push_back(w_id[i]); } face_ptr->setNewIdentityAndExpression(w_id,w_exp); face_ptr->setAverageDepth(average_depth); ZU.release(); Z_id.release(); A_id.release(); f.release(); delete[] w_id; delete[] w_exp; }