void RGBDCamera::update(const RawFrame* this_frame) { //Check the timestamp, and skip if we have already seen this frame if (this_frame->timestamp <= latest_stamp_) { return; } else { latest_stamp_ = this_frame->timestamp; } //Apply bilateral filter to incoming depth uint16_t* filtered_depth; cudaMalloc((void**)&filtered_depth, this_frame->width*this_frame->height*sizeof(uint16_t)); bilateralFilter(this_frame->depth, filtered_depth, this_frame->width, this_frame->height); //Convert the input color data to intensity float* temp_intensity; cudaMalloc((void**)&temp_intensity, this_frame->width*this_frame->height*sizeof(float)); colorToIntensity(this_frame->color, temp_intensity, this_frame->width*this_frame->height); //Create pyramids for (int i = 0; i < PYRAMID_DEPTH; i++) { //Fill in sizes the first two times through if (pass_ < 2) { current_icp_frame_[i] = new ICPFrame(this_frame->width/pow(2,i), this_frame->height/pow(2,i)); current_rgbd_frame_[i] = new RGBDFrame(this_frame->width/pow(2,i), this_frame->height/pow(2,i)); } //Add ICP data generateVertexMap(filtered_depth, current_icp_frame_[i]->vertex, current_icp_frame_[i]->width, current_icp_frame_[i]->height, focal_length_, make_int2(this_frame->width, this_frame->height)); generateNormalMap(current_icp_frame_[i]->vertex, current_icp_frame_[i]->normal, current_icp_frame_[i]->width, current_icp_frame_[i]->height); //Add RGBD data cudaMemcpy(current_rgbd_frame_[i]->vertex, current_icp_frame_[i]->vertex, current_rgbd_frame_[i]->width*current_rgbd_frame_[i]->height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); cudaMemcpy(current_rgbd_frame_[i]->intensity, temp_intensity, current_rgbd_frame_[i]->width*current_rgbd_frame_[i]->height*sizeof(float), cudaMemcpyDeviceToDevice); //Downsample depth and color if not the last iteration if (i != (PYRAMID_DEPTH-1)) { subsampleDepth(filtered_depth, current_icp_frame_[i]->width, current_icp_frame_[i]->height); subsample(temp_intensity, current_rgbd_frame_[i]->width, current_rgbd_frame_[i]->height); cudaDeviceSynchronize(); } } //Clear the filtered depth and temporary color since they are no longer needed cudaFree(filtered_depth); cudaFree(temp_intensity); if (pass_ >= 1) { glm::mat4 update_trans(1.0f); //Loop through pyramids backwards (coarse first) for (int i = PYRAMID_DEPTH - 1; i >= 0; i--) { //Get a copy of the ICP frame for this pyramid level ICPFrame icp_f(current_icp_frame_[i]->width, current_icp_frame_[i]->height); cudaMemcpy(icp_f.vertex, current_icp_frame_[i]->vertex, icp_f.width*icp_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); cudaMemcpy(icp_f.normal, current_icp_frame_[i]->normal, icp_f.width*icp_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); //Get a copy of the RGBD frame for this pyramid level //RGBDFrame rgbd_f(current_rgbd_frame_[i]->width, current_rgbd_frame_[i]->height); //cudaMemcpy(rgbd_f.vertex, current_rgbd_frame_[i]->vertex, rgbd_f.width*rgbd_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); //cudaMemcpy(rgbd_f.intensity, current_rgbd_frame_[i]->intensity, rgbd_f.width*rgbd_f.height*sizeof(float), cudaMemcpyDeviceToDevice); //Apply the most recent update to the points/normals if (i < (PYRAMID_DEPTH-1)) { transformVertexMap(icp_f.vertex, update_trans, icp_f.width*icp_f.height); transformNormalMap(icp_f.normal, update_trans, icp_f.width*icp_f.height); cudaDeviceSynchronize(); } //Loop through iterations for (int j = 0; j < PYRAMID_ITERS[i]; j++) { //Get the Geometric ICP cost values float A1[6 * 6]; float b1[6]; computeICPCost2(last_icp_frame_[i], icp_f, A1, b1); //Get the Photometric RGB-D cost values //float A2[6*6]; //float b2[6]; //compueRGBDCost(last_rgbd_frame_, rgbd_f, A2, b2); //Combine the two //for (size_t k = 0; k < 6; k++) { //for (size_t l = 0; l < 6; l++) { //A1[6 * k + l] += A2[6 * k + l]; //} //b1[k] += b2[k]; //} //Solve for the optimized camera transformation float x[6]; solveCholesky(6, A1, b1, x); //Check for NaN/divergence if (isnan(x[0]) || isnan(x[1]) || isnan(x[2]) || isnan(x[3]) || isnan(x[4]) || isnan(x[5])) { printf("Camera tracking is lost.\n"); break; } //Update position/orientation of the camera glm::mat4 this_trans = glm::rotate(glm::mat4(1.0f), -x[2] * 180.0f / 3.14159f, glm::vec3(0.0f, 0.0f, 1.0f)) * glm::rotate(glm::mat4(1.0f), -x[1] * 180.0f / 3.14159f, glm::vec3(0.0f, 1.0f, 0.0f)) * glm::rotate(glm::mat4(1.0f), -x[0] * 180.0f / 3.14159f, glm::vec3(1.0f, 0.0f, 0.0f)) * glm::translate(glm::mat4(1.0f), glm::vec3(x[3], x[4], x[5])); update_trans = this_trans * update_trans; //Apply the update to the points/normals if (j < (PYRAMID_ITERS[i] - 1)) { transformVertexMap(icp_f.vertex, this_trans, icp_f.width*icp_f.height); transformNormalMap(icp_f.normal, this_trans, icp_f.width*icp_f.height); cudaDeviceSynchronize(); } } } //Update the global transform with the result position_ = glm::vec3(glm::vec4(position_, 1.0f) * update_trans); orientation_ = glm::mat3(glm::mat4(orientation_) * update_trans); } if (pass_ < 2) { pass_++; } //Swap current and last frames for (int i = 0; i < PYRAMID_DEPTH; i++) { ICPFrame* temp = current_icp_frame_[i]; current_icp_frame_[i] = last_icp_frame_[i]; last_icp_frame_[i] = temp; //TODO: Longterm, only RGBD should do this. ICP should not swap, as last_frame should be updated by a different function RGBDFrame* temp2 = current_rgbd_frame_[i]; current_rgbd_frame_[i] = last_rgbd_frame_[i]; last_rgbd_frame_[i] = temp2; } }
void SurfacePropagation::applyConstraintOLD() { if (!imp_int) return; // imp_int->getImplicit()->interpolate(ps,flexible); Implicit *imp = imp_int->getImplicit(); if (!imp) return; if (speedfac == 0.0) return; int i,j,k; gmVector3 gradient; gmVector3 xi; int qlen = imp->qlen(); // # of parameters int n = ps->size(); // # of control particles // Allocate some derivative vectors DoubleArray dFdQi(0.0, qlen); DoubleArray dFdQj(0.0, qlen); DoubleArray dqdt(0.0, qlen); /* Solve for Lagrangian multipliers * Equation (7) of WH. * * Sum_j (dFdQ[i].dFdQ[j]) lambda[j] = * dFdQ[i].dQdt + grad(x[i]).v[i] + phi*proc(x[i]) * * Ax = b * x = vector of Lagrangians lambda[j], one per control particle * A = matrix of dot products of dFdQi with dFdQj * b = RHS of above equation */ TNT::Vector<double> b(n); TNT::Matrix<double> A(n,n); TNT::Vector<double> x(n); // Cycle through control particles for (i=0; i<n; i++) { // dFdQi = derivative of F wrt Q at control particle i imp->procq(position->x[i], dFdQi); #ifdef DEBUG_MATRIX std::cerr << "dFdQ: "; for(int k=0;k<dFdQi.size();k++) std::cerr << dFdQi[k] << " "; std::cerr << std::endl; #endif // b = - speedfac * speed function * grad(x[i]) magnitude * speed function b[i] = -speedfac*speed(i)*imp->grad(position->x[i]).length(); // Build row i of matrix A for(j=0; j<n; j++) { A[i][j] = 0.0; // Get derivative of F wrt Q at particle j imp->procq(position->x[j], dFdQj); // Aij = dFdQi . dFdQj for (k = 0; k < qlen; k++) A[i][j] += dFdQi[k]*dFdQj[k]; } // end matrix loop } // end control particle loop #ifdef DEBUG_MATRIX std::cerr << A; #endif // Try solving Ax = b using Cholesky if (!solveCholesky(A, x, b)) { //std::cerr << "Using SVD... " << std::endl; // Let SVD take care of it SVD svd; if (!svd.solve(A, x, b)) std::cerr << "problem solving constraints!!" << std::endl; } #ifdef DEBUG_MATRIX TNT::Vector<double> r(n); r = A*x - b; double residual = sqrt(TNT::dot_prod(r, r)); std::cerr << "residual: " << sqrt(TNT::dot_prod(r,r)) << std::endl; #endif // Update parameters for(j=0; j<n; j++) { imp->procq(position->x[j], dFdQj); dFdQj *= (double)x[j]; for(k=0; k<qlen; k++) dqdt[k] += dFdQj[k]; } // Apply the changes to the implicit surface parameter std::valarray<double> q(qlen); imp->getq(q); q += dqdt * (double)ps->dt; imp->setq(q); }