void eigen_hermitian_impl( const matrix<std::complex<T1>,D1, A1>& A, matrix<std::complex<T2>, D2, A2>& V, Otor o, const T eps = T( 1.0e-10 ) ) { assert( A.row() == A.col() ); std::size_t const n = A.row(); auto const A_ = feng::real( A ); auto const B_ = feng::imag( A ); auto const AA = ( A_ || (-B_) ) && ( B_ || A_ ); matrix<T1,D1> VV(n+n, n+n); matrix<T1,D1> LL(n+n, n+n); eigen_real_symmetric( AA, VV, LL, eps); std::vector<T1> vec(n+n); std::copy( LL.diag_begin(), LL.diag_end(), vec.begin() ); std::sort( vec.begin(), vec.end() ); V.resize(n,n); for ( std::size_t i = 0; i != n; ++i ) { std::size_t const offset = std::distance( LL.diag_begin(), std::find( LL.diag_begin(), LL.diag_end(), vec[i+i] ) ); assert( offset < n+n ); feng::for_each( V.col_begin(i), V.col_end(i), VV.col_begin(offset), [](std::complex<T2>& c, T1 const r) { c.real(r); } ); feng::for_each( V.col_begin(i), V.col_end(i), VV.col_begin(offset)+n, [](std::complex<T2>& c, T1 const i) { c.imag(i); } ); *o++ = vec[i+i]; } }//eigen_hermitian
void decode_arp(const u_char * packet) { struct arphdr * arp_t; arp_t = (struct arphdr*)packet; V(1, "ARP %s ", str_arp_op(htons(arp_t->ar_op))); VV(0, "Protocol : %s\n", str_arp_pro(htons(arp_t->ar_pro))); VVV(1, "Hardware Type : %s\n", str_arp_hw(htons(arp_t->ar_hrd))); const u_char * arp_data = packet + sizeof(struct arphdr); u_char * hwd_addr = malloc(sizeof(arp_t->ar_hln)); u_char * pro_addr = malloc(sizeof(arp_t->ar_pln)); struct ether_addr mac,mac2; struct in_addr ip, ip2; memcpy(hwd_addr, arp_data, arp_t->ar_hln); memcpy(pro_addr, arp_data + arp_t->ar_hln, arp_t->ar_pln); VV(1, "From : "); if(htons(arp_t->ar_hrd) == 0x01 && arp_t->ar_hln == ETH_ALEN) { memcpy(&(mac.ether_addr_octet), hwd_addr, ETH_ALEN); VV(0, "%s (", ether_ntoa(&mac)); } if(htons(arp_t->ar_pro) == 0x0800 && arp_t->ar_pln == 0x04) { memcpy(&(ip.s_addr), pro_addr, arp_t->ar_pln); VV(0, "%s)\n", inet_ntoa(ip)); } memcpy(hwd_addr, arp_data + arp_t->ar_hln + arp_t->ar_pln, arp_t->ar_hln); memcpy(pro_addr, arp_data + 2*arp_t->ar_hln + arp_t->ar_pln, arp_t->ar_pln); VV(1, "To : "); if(htons(arp_t->ar_hrd) == 0x01 && arp_t->ar_hln == ETH_ALEN) { memcpy(&(mac2.ether_addr_octet), hwd_addr, ETH_ALEN); VV(0, "%s (", ether_ntoa(&mac2)); } if(htons(arp_t->ar_pro) == 0x0800 && arp_t->ar_pln == 0x04) { memcpy(&(ip2.s_addr), pro_addr, arp_t->ar_pln); VV(0, "%s)\n", inet_ntoa(ip2)); } char buffer[128]; bzero(buffer, 128); char * yellow_str; if(htons(arp_t->ar_op) == ARPOP_REQUEST) { sprintf(buffer, "Who is %s ? (to %s [%s])\n", inet_ntoa(ip2), inet_ntoa(ip), ether_ntoa(&mac)); } else if(htons(arp_t->ar_op) == ARPOP_REPLY) { sprintf(buffer, "%s is %s\n", inet_ntoa(ip), ether_ntoa(&mac)); } if(strcmp(buffer, "")) { yellow_str = yellow(buffer); V(1, yellow_str); free(yellow_str); } free(hwd_addr); free(pro_addr); }
void decode_ip6(const u_char * packet) { char str_sip[INET6_ADDRSTRLEN]; char str_dip[INET6_ADDRSTRLEN]; char buffer[128]; char * yellow_plen, *yellow_nxt, *b_sip, *b_dip; const struct ip6_hdr * ip_t; ip_t = (struct ip6_hdr *)(packet); inet_ntop(AF_INET6, &(ip_t->ip6_src), str_sip, INET6_ADDRSTRLEN); inet_ntop(AF_INET6, &(ip_t->ip6_dst), str_dip, INET6_ADDRSTRLEN); b_sip = bold(str_sip); b_dip = bold(str_dip); sprintf(buffer, "%d", htons(ip_t->ip6_ctlun.ip6_un1.ip6_un1_plen)); yellow_plen = yellow(buffer); sprintf(buffer, "%x", ip_t->ip6_ctlun.ip6_un1.ip6_un1_plen); yellow_nxt = yellow(buffer); V(1, "IPv6 - %s --> %s\n", b_sip, b_dip); uint32_t flow = htonl(ip_t->ip6_ctlun.ip6_un1.ip6_un1_flow); VV(1, "Payload Length : %s - Next Header : %s\n", yellow_plen, yellow_nxt); /* htons(ip_t->ip6_ctlun.ip6_un1.ip6_un1_plen), ip_t->ip6_ctlun.ip6_un1.ip6_un1_nxt); */ VVV(1,"Version : %d - Traffic Class : %x - Flow Label : %x - Hop Limit : %d\n", flow >> 28, (flow & 0x0ff00000) >> 20, flow & 0x000fffff, ip_t->ip6_ctlun.ip6_un1.ip6_un1_hlim); int offset = sizeof(struct ip6_hdr); switch(ip_t->ip6_ctlun.ip6_un1.ip6_un1_nxt) { case 0x06: decode_tcp(packet + offset); break; case 0x11: decode_udp(packet + offset); break; } free(yellow_plen); free(yellow_nxt); free(b_sip); free(b_dip); }
NOX::Abstract::Group::ReturnType LOCA::BorderedSolver::Nested::applyTranspose( const NOX::Abstract::MultiVector& X, const NOX::Abstract::MultiVector::DenseMatrix& Y, NOX::Abstract::MultiVector& U, NOX::Abstract::MultiVector::DenseMatrix& V) const { int num_cols = X.numVectors(); Teuchos::RCP<NOX::Abstract::MultiVector> XX = unbordered_grp->getX().createMultiVector(num_cols); Teuchos::RCP<NOX::Abstract::MultiVector> UU = unbordered_grp->getX().createMultiVector(num_cols); NOX::Abstract::MultiVector::DenseMatrix YY(myWidth, num_cols); NOX::Abstract::MultiVector::DenseMatrix VV(myWidth, num_cols); NOX::Abstract::MultiVector::DenseMatrix YY1(Teuchos::View, YY, underlyingWidth, num_cols, 0, 0); NOX::Abstract::MultiVector::DenseMatrix YY2(Teuchos::View, YY, numConstraints, num_cols, underlyingWidth, 0); NOX::Abstract::MultiVector::DenseMatrix VV1(Teuchos::View, VV, underlyingWidth, num_cols, 0, 0); NOX::Abstract::MultiVector::DenseMatrix VV2(Teuchos::View, VV, numConstraints, num_cols, underlyingWidth, 0); grp->extractSolutionComponent(X, *XX); grp->extractParameterComponent(false, X, YY1); YY2.assign(Y); NOX::Abstract::Group::ReturnType status = solver->applyTranspose(*XX, YY, *UU, VV); V.assign(VV2); grp->loadNestedComponents(*UU, VV1, U); return status; }
int VBX_T(vbw_vec_reverse_test)() { unsigned int aN[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 20, 25, 31, 32, 33, 35, 40, 48, 60, 61, 62, 63, 64, 64, 65, 66, 67, 68, 70, 80, 90, 99, 100, 101, 110, 128, 128, 144, 144, 160, 160, 176, 176, 192, 192, 224, 224, 256, 256, 288, 288, 320, 320, 352, 352, 384, 384, 400, 450, 512, 550, 600, 650, 700, 768, 768, 900, 900, 1023, 1024, 1200, 1400, 1600, 1800, 2048, 2048, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, 3400, 3500, 3600, 3700, 3800, 3900, 4000, 4096, 4096, 4100, 4200, 4300, 4400, 4500, 4600, 4700, 4800, 4900, 5000, 6000, 7000, 8000, 8192, 8192, 9000, 10000, 11000, 12000, 13000, 14000, 15000, 16000, 16384, 16384, 20000, 25000, 30000, 32767, 32768, 32768, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 65535, 65536, 65536 }; int retval; unsigned int N; unsigned int NBYTES; unsigned int NREPS = 100; unsigned int i,k; vbx_timestamp_t start=0,finish=0; vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP(); const unsigned int VBX_SCRATCHPAD_SIZE = this_mxp->scratchpad_size; for( i=0; i<sizeof(aN)/4; i++ ) { N = aN[i]; //printf( "testing with vector size %d\n", N ); NBYTES = sizeof(vbx_sp_t)*N; if( 2*NBYTES > VBX_SCRATCHPAD_SIZE ) continue; vbx_sp_t *vsrc = vbx_sp_malloc( NBYTES ); vbx_sp_t *vdst = vbx_sp_malloc( NBYTES ); //printf("bytes alloc: %d\n", NBYTES ); if( !vsrc ) VBX_EXIT(-1); if( !vdst ) VBX_EXIT(-1); #if ( VBX_TEMPLATE_T == BYTESIZE_DEF | VBX_TEMPLATE_T == UBYTESIZE_DEF ) unsigned int mask = 0x007F; #elif ( VBX_TEMPLATE_T == HALFSIZE_DEF | VBX_TEMPLATE_T == UHALFSIZE_DEF ) unsigned int mask = 0x7FFF; #else unsigned int mask = 0xFFFF; #endif vbx_set_vl( N ); vbx( SV(T), VMOV, vdst, -1, 0 ); // Fill the destination vector with -1 vbx( SE(T), VAND, vsrc, mask, 0 ); // Fill the source vector with enumerated values //VBX_T(print_vector)( "vsrcInit", vsrc, N ); //VBX_T(print_vector)( "vdstInit", vdst, N ); /** measure performance of function call **/ vbx_sync(); start = vbx_timestamp(); for(k=0; k<NREPS; k++ ) { retval = VBX_T(vbw_vec_reverse)( vdst, vsrc, N ); vbx_sync(); } finish = vbx_timestamp(); printf( "length %d (%s):\tvbware sp f():\t%llu", N, VBX_EXPAND_AND_QUOTE(BYTEHALFWORD), (unsigned long long) vbx_mxp_cycles((finish-start)/NREPS) ); //VBX_T(print_vector)( "vsrcPost", vsrc, N ); //VBX_T(print_vector)( "vdstPost", vdst, N ); #if VERIFY_VBWARE_ALGORITHM VBX_T(verify_vector)( vsrc, vdst, N ); #else printf(" [VERIFY OFF]"); #endif printf("\treturn value: %X", retval); vbx_set_vl( N ); vbx( SE(T), VAND, vsrc, mask, 0 ); // Reset the source vector /** measure performance of simple algorithm **/ vbx_sync(); vbx_set_vl( 1 ); vbx_set_2D( N, -sizeof(vbx_sp_t), sizeof(vbx_sp_t), 0 ); start = vbx_timestamp(); for(k=0; k<NREPS; k++ ) { vbx_2D( VV(T), VMOV, vdst+N-1, vsrc, 0 ); vbx_sync(); } finish = vbx_timestamp(); printf( "\tsimple (vl=1):\t%llu", (unsigned long long) vbx_mxp_cycles((finish-start)/NREPS) ); #if VERIFY_SIMPLE_ALGORITHM VBX_T(verify_vector)( vsrc, vdst, N ); #else printf(" [VERIFY OFF]"); #endif printf("\tcycles\n"); vbx_sp_free(); } vbx_sp_free(); printf("All tests passed successfully.\n"); return 0; }
void IBFECentroidPostProcessor::reconstructVariables(double data_time) { EquationSystems* equation_systems = d_fe_data_manager->getEquationSystems(); const MeshBase& mesh = equation_systems->get_mesh(); const int dim = mesh.mesh_dimension(); AutoPtr<QBase> qrule = QBase::build(QGAUSS, NDIM, CONSTANT); // Set up all system data required to evaluate the mesh functions. System& X_system = equation_systems->get_system<System>(IBFEMethod::COORDS_SYSTEM_NAME); const DofMap& X_dof_map = X_system.get_dof_map(); for (unsigned d = 0; d < NDIM; ++d) TBOX_ASSERT(X_dof_map.variable_type(d) == X_dof_map.variable_type(0)); std::vector<std::vector<unsigned int> > X_dof_indices(NDIM); AutoPtr<FEBase> X_fe(FEBase::build(dim, X_dof_map.variable_type(0))); X_fe->attach_quadrature_rule(qrule.get()); const std::vector<libMesh::Point>& q_point = X_fe->get_xyz(); const std::vector<std::vector<double> >& phi_X = X_fe->get_phi(); const std::vector<std::vector<VectorValue<double> > >& dphi_X = X_fe->get_dphi(); X_system.solution->localize(*X_system.current_local_solution); NumericVector<double>& X_data = *(X_system.current_local_solution); X_data.close(); for (std::set<unsigned int>::const_iterator cit = d_var_fcn_systems.begin(); cit != d_var_fcn_systems.end(); ++cit) { System& system = equation_systems->get_system(*cit); system.update(); } const unsigned int num_scalar_vars = d_scalar_var_systems.size(); std::vector<const DofMap*> scalar_var_dof_maps(num_scalar_vars); std::vector<std::vector<unsigned int> > scalar_var_dof_indices(num_scalar_vars); std::vector<NumericVector<double>*> scalar_var_data(num_scalar_vars); std::vector<unsigned int> scalar_var_system_num(num_scalar_vars); std::vector<std::vector<NumericVector<double>*> > scalar_var_fcn_data(num_scalar_vars); for (unsigned int k = 0; k < num_scalar_vars; ++k) { scalar_var_dof_maps[k] = &d_scalar_var_systems[k]->get_dof_map(); scalar_var_data[k] = d_scalar_var_systems[k]->solution.get(); scalar_var_system_num[k] = d_scalar_var_systems[k]->number(); scalar_var_fcn_data[k].reserve(d_scalar_var_fcn_systems[k].size()); for (std::vector<unsigned int>::const_iterator cit = d_scalar_var_fcn_systems[k].begin(); cit != d_scalar_var_fcn_systems[k].end(); ++cit) { System& system = equation_systems->get_system(*cit); scalar_var_fcn_data[k].push_back(system.current_local_solution.get()); } } const unsigned int num_vector_vars = d_vector_var_systems.size(); std::vector<const DofMap*> vector_var_dof_maps(num_vector_vars); std::vector<std::vector<std::vector<unsigned int> > > vector_var_dof_indices( num_vector_vars); std::vector<NumericVector<double>*> vector_var_data(num_vector_vars); std::vector<unsigned int> vector_var_system_num(num_vector_vars); std::vector<std::vector<NumericVector<double>*> > vector_var_fcn_data(num_vector_vars); for (unsigned int k = 0; k < num_vector_vars; ++k) { vector_var_dof_maps[k] = &d_vector_var_systems[k]->get_dof_map(); vector_var_dof_indices[k].resize(d_vector_var_dims[k]); vector_var_data[k] = d_vector_var_systems[k]->solution.get(); vector_var_system_num[k] = d_vector_var_systems[k]->number(); vector_var_fcn_data[k].reserve(d_vector_var_fcn_systems[k].size()); for (std::vector<unsigned int>::const_iterator cit = d_vector_var_fcn_systems[k].begin(); cit != d_vector_var_fcn_systems[k].end(); ++cit) { System& system = equation_systems->get_system(*cit); vector_var_fcn_data[k].push_back(system.current_local_solution.get()); } } const unsigned int num_tensor_vars = d_tensor_var_systems.size(); std::vector<const DofMap*> tensor_var_dof_maps(num_tensor_vars); std::vector<boost::multi_array<std::vector<unsigned int>, 2> > tensor_var_dof_indices( num_tensor_vars); std::vector<NumericVector<double>*> tensor_var_data(num_tensor_vars); std::vector<unsigned int> tensor_var_system_num(num_tensor_vars); std::vector<std::vector<NumericVector<double>*> > tensor_var_fcn_data(num_tensor_vars); for (unsigned int k = 0; k < num_tensor_vars; ++k) { tensor_var_dof_maps[k] = &d_tensor_var_systems[k]->get_dof_map(); typedef boost::multi_array<std::vector<unsigned int>, 2> array_type; array_type::extent_gen extents; tensor_var_dof_indices[k].resize(extents[d_tensor_var_dims[k]][d_tensor_var_dims[k]]); tensor_var_data[k] = d_tensor_var_systems[k]->solution.get(); tensor_var_system_num[k] = d_tensor_var_systems[k]->number(); tensor_var_fcn_data[k].reserve(d_tensor_var_fcn_systems[k].size()); for (std::vector<unsigned int>::const_iterator cit = d_tensor_var_fcn_systems[k].begin(); cit != d_tensor_var_fcn_systems[k].end(); ++cit) { System& system = equation_systems->get_system(*cit); tensor_var_fcn_data[k].push_back(system.current_local_solution.get()); } } // Reconstruct the variables via simple function evaluation. TensorValue<double> FF_qp, VV; libMesh::Point X_qp; VectorValue<double> V; double v; boost::multi_array<double, 2> X_node; const MeshBase::const_element_iterator el_begin = mesh.active_local_elements_begin(); const MeshBase::const_element_iterator el_end = mesh.active_local_elements_end(); for (MeshBase::const_element_iterator el_it = el_begin; el_it != el_end; ++el_it) { Elem* const elem = *el_it; X_fe->reinit(elem); for (unsigned int d = 0; d < NDIM; ++d) { X_dof_map.dof_indices(elem, X_dof_indices[d], d); } const unsigned int n_qp = qrule->n_points(); TBOX_ASSERT(n_qp == 1); const unsigned int qp = 0; get_values_for_interpolation(X_node, X_data, X_dof_indices); interpolate(X_qp, qp, X_node, phi_X); jacobian(FF_qp, qp, X_node, dphi_X); const libMesh::Point& s_qp = q_point[qp]; // Scalar-valued variables. for (unsigned int k = 0; k < num_scalar_vars; ++k) { scalar_var_dof_maps[k]->dof_indices(elem, scalar_var_dof_indices[k], 0); d_scalar_var_fcns[k](v, FF_qp, X_qp, s_qp, elem, scalar_var_fcn_data[k], data_time, d_scalar_var_fcn_ctxs[k]); scalar_var_data[k]->set(scalar_var_dof_indices[k][0], v); } // Vector-valued variables. for (unsigned int k = 0; k < num_vector_vars; ++k) { for (unsigned int i = 0; i < d_vector_var_dims[k]; ++i) { vector_var_dof_maps[k]->dof_indices(elem, vector_var_dof_indices[k][i], i); } d_vector_var_fcns[k](V, FF_qp, X_qp, s_qp, elem, vector_var_fcn_data[k], data_time, d_vector_var_fcn_ctxs[k]); for (unsigned int i = 0; i < d_vector_var_dims[k]; ++i) { vector_var_data[k]->set(vector_var_dof_indices[k][i][0], V(i)); } } // Tensor-valued variables. for (unsigned int k = 0; k < num_tensor_vars; ++k) { for (unsigned int i = 0; i < d_tensor_var_dims[k]; ++i) { for (unsigned int j = 0; j < d_tensor_var_dims[k]; ++j) { tensor_var_dof_maps[k]->dof_indices( elem, tensor_var_dof_indices[k][i][j], j + i * d_tensor_var_dims[k]); } } d_tensor_var_fcns[k](VV, FF_qp, X_qp, s_qp, elem, tensor_var_fcn_data[k], data_time, d_tensor_var_fcn_ctxs[k]); for (unsigned int i = 0; i < d_tensor_var_dims[k]; ++i) { for (unsigned int j = 0; j < d_tensor_var_dims[k]; ++j) { tensor_var_data[k]->set(tensor_var_dof_indices[k][i][j][0], VV(i, j)); } } } } // Close all vectors. for (unsigned int k = 0; k < num_scalar_vars; ++k) { scalar_var_data[k]->close(); } for (unsigned int k = 0; k < num_vector_vars; ++k) { vector_var_data[k]->close(); } for (unsigned int k = 0; k < num_tensor_vars; ++k) { tensor_var_data[k]->close(); } return; } // reconstructVariables
/// Sets up the location and orientation of the leap in order to properly /// transform relatively located leap events into absolutely located ones. void SetLocAndOrientation (Vect const& orig, Vect const& nrm, Vect const& ov) { origin = VV (orig); normal = VV(nrm); over = VV(ov); transform = Leap::Matrix (over, normal, over . cross (normal), origin); }
/* * ==================================================================== * Make a (pseudo)inverse of a dense matrix using CLAPACK SVD. * * Note the different definitions of a matrix here (double **) and * in other routines (double *). * * The matrix A must be of size at least [max(rows,cols)][max(rows,cols)] * since the pseudoinverse (and not its transpose) is returned in A. * * Singular value decomposition is used to calculate the psuedo-inverse. * If #rows=#cols then (an approximation of) the inverse is found. * Otherwise an approximation of the pseudoinverse is obtained. * * The return value of this routine is the estimated rank of the system. * ==================================================================== */ int pinv(double **A, int rows, int cols) { char fctName[] = "pinv_new"; int i,j,k; int nsv, rank; double tol; /* Variables needed for interaction with CLAPACK routines * (FORTRAN style) */ double *amat, *svals, *U, *V, *work; long int lda, ldu, ldvt, lwork, m,n,info; /* Short-hands for this routine only [undef'ed at end of routine] */ #define AA(i,j) amat[i + j * (int) lda] #define UU(i,j) U[i + j * (int) ldu] #define VV(i,j) V[i + j * (int) ldvt] /* Set up arrays for call to CLAPACK routine */ /* Use leading dimensions large enough to calculate both under- * and over-determined systems of equations. The way this is * calculated, we need to store U*(Z^-1^T) in U. The array U must * have at least cols columns to perform this operation, and at * least rows columns to store the initial U. However, for an under- * determined system the last cols-rows columns of U*(Z^-1^T) will be * zero. Exploiting this, the size of the array U is just mxm */ m = (long int) rows; n = (long int) cols; lda = m; ldu = m; ldvt = n; nsv = MIN(rows,cols); /* Allocate memory for: * amat : The matrix to factor * svals : Vector of singular values * U : mxm unitary matrix * V : nxn unitary matrix */ amat = (double *) calloc((int) m*n+1, sizeof(double)); svals = (double *) calloc((int) m+n, sizeof(double)); U = (double *) calloc((int) ldu*m+1,sizeof(double)); V = (double *) calloc((int) ldvt*n+1, sizeof(double)); /* In matrix form amat = U * diag(svals) * V^T */ /* Leading dimensions [number of rows] of the above arrays */ /* Work storage */ lwork = 100*(m+n); work = (double *) calloc(lwork, sizeof(double)); /* Copy A to temporary storage */ for (j=0; j<cols; j++) { for (i=0; i<rows; i++) { AA(i,j) = A[i][j]; } } { char jobu,jobvt; jobu = 'A'; /* Calculate all columns of matrix U */ jobvt = 'A'; /* Calculate all columns of matrix V^T */ /* Note that dgesvd_ returns V^T rather than V */ dgesvd_(&jobu, &jobvt, &m, &n, amat, &lda, svals, U, &ldu, V, &ldvt, work, &lwork, &info); /* Test info from dgesvd */ if (info) { if (info<0) { printf("%s: ERROR: clapack routine 'dgesvd_' complained about\n" " illegal value of argument # %d\n",fctName,(int) -info); _EXIT_; } else{ printf("%s: ERROR: clapack routine 'dgesvd_' complained that\n" " %d superdiagonals didn't converge.\n", fctName,(int) info); _EXIT_; } } } /* Test the the singular values are returned in correct ordering * (This is done because there were problems with this with a former * implementation [using f2c'ed linpack] when high optimization * was used) */ if (svals[0]<0.0) { printf("%s: ERROR: First singular value returned by clapack \n" " is negative: %16.6e.\n",fctName,svals[0]); _EXIT_; } for (i=1; i<nsv; i++){ if ( svals[i] > svals[i-1] ) { printf("%s: ERROR: Singular values returned by clapack \n" " are not approprately ordered!\n" " svals[%d] = %16.6e > svals[%d] = %16.6e", fctName,i,svals[i],i-1,svals[i-1]); _EXIT_; } } /* Test rank of matrix by examining the singular values. */ /* The singular values of the matrix is sorted in decending order, * so that svals[i] >= svals[i+1]. The first that is zero (to some * precision) yields information on the rank (to some precision) */ rank = nsv; tol = DBL_EPSILON * svals[0] * MAX(rows,cols); for (i=0; i<nsv; i++){ if ( svals[i] <= tol ){ rank = i; break; } } /* Compute (pseudo-) inverse matrix using the computed SVD: * A = U*S*V' * A^+ = V * (Zi) U' * = V * (U * (Zi)')' * = { (U * Zi') * V' }' * Here Zi is the "inverse" of the diagonal matrix S, i.e. Zi is * diagonal and has the same size as S'. The non-zero entries in * Zi is calculated from the non-zero entries in S as Zi_ii=1/S_ii * Note that Zi' is of the same size as S. * The last line here is used in the present computation. This * notation avoids any need to transpose the output from the CLAPACK * rotines (which deliver U, S and V). */ /* Inverse of [non-zero part of] diagonal matrix */ tol = 1.0e-10 * svals[0]; for (i=0; i< nsv; i++) { if (svals[i] < tol) svals[i] = 0.0; else svals[i] = 1.0 / svals[i]; } /* Calculate UZ = U * Zi', ie. scale COLUMN j in U by * the j'th singular value [nsv columns only - since the diagonal * matrix in general is not square]. If rows>cols then the last * columns in UZ wil be zero (no need to compute). */ for (j=0; j<nsv; j++){ for (i=0; i<rows; i++){ UU(i,j) *= svals[j]; } } /* U*Zi' is stored in array U. It has size (rows x nsv). * If cols>rows, then it should be though of as the larger matrix * (rows x cols) with zero columns added on the right. */ /* Zero out the full array A to avoid confusion upon return. * This is not abosolutely necessary, zeroin out A[j][i] could be * part of the next loop. */ for (i=0; i< MAX(cols,rows); i++) { for (j=0; j< MAX(cols,rows); j++) { A[i][j] = 0.0; } } /* Matrix-matrix multiply (U*Zi') * V'. * Only the first nsv columns in U*Zi' are non-zero, so the inner * most loop will go to k=nsv (-1). * The result will be the transpose of the (psuedo-) inverse of A, * so store directly in A'=A[j][i]. * A[j][i] = sum_k (U*Zi)[i][k] * V'[k][j] : */ for (i=0; i< rows; i++) { for (j=0; j< cols; j++) { /*A[j][i] = 0.0; */ /* Include if A is not zeroed out above */ for (k=0;k<nsv;k++){ A[j][i] += UU(i,k)*VV(k,j); } } } /* Free memory allocated in this routine */ FREE(amat); FREE(svals); FREE(U); FREE(V); FREE(work); return rank; /* Undefine macros for this routine */ #undef AA #undef UU #undef VV } /* End of routine pinv */
/* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo, integer *ihi, doublereal *h, integer *ldh, doublereal *wr, doublereal *wi, doublereal *z, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* -- LAPACK routine (version 2.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= DHSEQR computes the eigenvalues of a real upper Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q, so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= JOB (input) CHARACTER*1 = 'E': compute eigenvalues only; = 'S': compute eigenvalues and the Schur form T. COMPZ (input) CHARACTER*1 = 'N': no Schur vectors are computed; = 'I': Z is initialized to the unit matrix and the matrix Z of Schur vectors of H is returned; = 'V': Z must contain an orthogonal matrix Q on entry, and the product Q*Z is returned. N (input) INTEGER The order of the matrix H. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to SGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise ILO and IHI should be set to 1 and N respectively. 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if JOB = 'S', H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. If JOB = 'E', the contents of H are unspecified on exit. LDH (input) INTEGER The leading dimension of the array H. LDH >= max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) The real and imaginary parts, respectively, of the computed eigenvalues. If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) If COMPZ = 'N': Z is not referenced. If COMPZ = 'I': on entry, Z need not be set, and on exit, Z contains the orthogonal matrix Z of the Schur vectors of H. If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q, which is assumed to be equal to the unit matrix except for the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z. Normally Q is the orthogonal matrix generated by DORGHR after the call to DGEHRD which formed the Hessenberg matrix H. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise. WORK (workspace) DOUBLE PRECISION array, dimension (N) LWORK (input) INTEGER This argument is currently redundant. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, DHSEQR failed to compute all of the eigenvalues in a total of 30*(IHI-ILO+1) iterations; elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. ===================================================================== Decode and test the input parameters Parameter adjustments Function Body */ /* Table of constant values */ static doublereal c_b9 = 0.; static doublereal c_b10 = 1.; static integer c__4 = 4; static integer c_n1 = -1; static integer c__2 = 2; static integer c__8 = 8; static integer c__15 = 15; static logical c_false = FALSE_; static integer c__1 = 1; /* System generated locals */ address a__1[2]; integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4, i__5; doublereal d__1, d__2; char ch__1[2]; /* Builtin functions Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static integer maxb; static doublereal absw; static integer ierr; static doublereal unfl, temp, ovfl; static integer i, j, k, l; static doublereal s[225] /* was [15][15] */, v[16]; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer itemp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i1, i2; static logical initz, wantt, wantz; extern doublereal dlapy2_(doublereal *, doublereal *); extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static integer ii, nh; extern doublereal dlamch_(char *); extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static integer nr, ns; extern integer idamax_(integer *, doublereal *, integer *); static integer nv; extern doublereal dlanhs_(char *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static doublereal vv[16]; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dlarfx_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal smlnum; static integer itn; static doublereal tau; static integer its; static doublereal ulp, tst1; #define S(I) s[(I)] #define WAS(I) was[(I)] #define V(I) v[(I)] #define VV(I) vv[(I)] #define WR(I) wr[(I)-1] #define WI(I) wi[(I)-1] #define WORK(I) work[(I)-1] #define H(I,J) h[(I)-1 + ((J)-1)* ( *ldh)] #define Z(I,J) z[(I)-1 + ((J)-1)* ( *ldz)] wantt = lsame_(job, "S"); initz = lsame_(compz, "I"); wantz = initz || lsame_(compz, "V"); *info = 0; if (! lsame_(job, "E") && ! wantt) { *info = -1; } else if (! lsame_(compz, "N") && ! wantz) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*ldh < max(1,*n)) { *info = -7; } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) { *info = -11; } if (*info != 0) { i__1 = -(*info); xerbla_("DHSEQR", &i__1); return 0; } /* Initialize Z, if necessary */ if (initz) { dlaset_("Full", n, n, &c_b9, &c_b10, &Z(1,1), ldz); } /* Store the eigenvalues isolated by DGEBAL. */ i__1 = *ilo - 1; for (i = 1; i <= *ilo-1; ++i) { WR(i) = H(i,i); WI(i) = 0.; /* L10: */ } i__1 = *n; for (i = *ihi + 1; i <= *n; ++i) { WR(i) = H(i,i); WI(i) = 0.; /* L20: */ } /* Quick return if possible. */ if (*n == 0) { return 0; } if (*ilo == *ihi) { WR(*ilo) = H(*ilo,*ilo); WI(*ilo) = 0.; return 0; } /* Set rows and columns ILO to IHI to zero below the first subdiagonal. */ i__1 = *ihi - 2; for (j = *ilo; j <= *ihi-2; ++j) { i__2 = *n; for (i = j + 2; i <= *n; ++i) { H(i,j) = 0.; /* L30: */ } /* L40: */ } nh = *ihi - *ilo + 1; /* Determine the order of the multi-shift QR algorithm to be used. Writing concatenation */ i__3[0] = 1, a__1[0] = job; i__3[1] = 1, a__1[1] = compz; s_cat(ch__1, a__1, i__3, &c__2, 2L); ns = ilaenv_(&c__4, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, 6L, 2L); /* Writing concatenation */ i__3[0] = 1, a__1[0] = job; i__3[1] = 1, a__1[1] = compz; s_cat(ch__1, a__1, i__3, &c__2, 2L); maxb = ilaenv_(&c__8, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, 6L, 2L); if (ns <= 2 || ns > nh || maxb >= nh) { /* Use the standard double-shift algorithm */ dlahqr_(&wantt, &wantz, n, ilo, ihi, &H(1,1), ldh, &WR(1), &WI(1) , ilo, ihi, &Z(1,1), ldz, info); return 0; } maxb = max(3,maxb); /* Computing MIN */ i__1 = min(ns,maxb); ns = min(i__1,15); /* Now 2 < NS <= MAXB < NH. Set machine-dependent constants for the stopping criterion. If norm(H) <= sqrt(OVFL), overflow should not occur. */ unfl = dlamch_("Safe minimum"); ovfl = 1. / unfl; dlabad_(&unfl, &ovfl); ulp = dlamch_("Precision"); smlnum = unfl * (nh / ulp); /* I1 and I2 are the indices of the first row and last column of H to which transformations must be applied. If eigenvalues only are being computed, I1 and I2 are set inside the main loop. */ if (wantt) { i1 = 1; i2 = *n; } /* ITN is the total number of multiple-shift QR iterations allowed. */ itn = nh * 30; /* The main loop begins here. I is the loop index and decreases from IHI to ILO in steps of at most MAXB. Each iteration of the loop works with the active submatrix in rows and columns L to I. Eigenvalues I+1 to IHI have already converged. Either L = ILO or H(L,L-1) is negligible so that the matrix splits. */ i = *ihi; L50: l = *ilo; if (i < *ilo) { goto L170; } /* Perform multiple-shift QR iterations on rows and columns ILO to I until a submatrix of order at most MAXB splits off at the bottom because a subdiagonal element has become negligible. */ i__1 = itn; for (its = 0; its <= itn; ++its) { /* Look for a single small subdiagonal element. */ i__2 = l + 1; for (k = i; k >= l+1; --k) { tst1 = (d__1 = H(k-1,k-1), abs(d__1)) + (d__2 = H(k,k), abs(d__2)); if (tst1 == 0.) { i__4 = i - l + 1; tst1 = dlanhs_("1", &i__4, &H(l,l), ldh, &WORK(1)); } /* Computing MAX */ d__2 = ulp * tst1; if ((d__1 = H(k,k-1), abs(d__1)) <= max(d__2, smlnum)) { goto L70; } /* L60: */ } L70: l = k; if (l > *ilo) { /* H(L,L-1) is negligible. */ H(l,l-1) = 0.; } /* Exit from loop if a submatrix of order <= MAXB has split off . */ if (l >= i - maxb + 1) { goto L160; } /* Now the active submatrix is in rows and columns L to I. If eigenvalues only are being computed, only the active submatr ix need be transformed. */ if (! wantt) { i1 = l; i2 = i; } if (its == 20 || its == 30) { /* Exceptional shifts. */ i__2 = i; for (ii = i - ns + 1; ii <= i; ++ii) { WR(ii) = ((d__1 = H(ii,ii-1), abs(d__1)) + ( d__2 = H(ii,ii), abs(d__2))) * 1.5; WI(ii) = 0.; /* L80: */ } } else { /* Use eigenvalues of trailing submatrix of order NS as shifts. */ dlacpy_("Full", &ns, &ns, &H(i-ns+1,i-ns+1), ldh, s, &c__15); dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &WR(i - ns + 1), &WI(i - ns + 1), &c__1, &ns, &Z(1,1), ldz, & ierr); if (ierr > 0) { /* If DLAHQR failed to compute all NS eigenvalues , use the unconverged diagonal elements as the remaining shifts. */ i__2 = ierr; for (ii = 1; ii <= ierr; ++ii) { WR(i - ns + ii) = S(ii + ii * 15 - 16); WI(i - ns + ii) = 0.; /* L90: */ } } } /* Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns)) where G is the Hessenberg submatrix H(L:I,L:I) and w is the vector of shifts (stored in WR and WI). The result is stored in the local array V. */ V(0) = 1.; i__2 = ns + 1; for (ii = 2; ii <= ns+1; ++ii) { V(ii - 1) = 0.; /* L100: */ } nv = 1; i__2 = i; for (j = i - ns + 1; j <= i; ++j) { if (WI(j) >= 0.) { if (WI(j) == 0.) { /* real shift */ i__4 = nv + 1; dcopy_(&i__4, v, &c__1, vv, &c__1); i__4 = nv + 1; d__1 = -WR(j); dgemv_("No transpose", &i__4, &nv, &c_b10, &H(l,l), ldh, vv, &c__1, &d__1, v, &c__1); ++nv; } else if (WI(j) > 0.) { /* complex conjugate pair of shifts */ i__4 = nv + 1; dcopy_(&i__4, v, &c__1, vv, &c__1); i__4 = nv + 1; d__1 = WR(j) * -2.; dgemv_("No transpose", &i__4, &nv, &c_b10, &H(l,l), ldh, v, &c__1, &d__1, vv, &c__1); i__4 = nv + 1; itemp = idamax_(&i__4, vv, &c__1); /* Computing MAX */ d__2 = (d__1 = VV(itemp - 1), abs(d__1)); temp = 1. / max(d__2,smlnum); i__4 = nv + 1; dscal_(&i__4, &temp, vv, &c__1); absw = dlapy2_(&WR(j), &WI(j)); temp = temp * absw * absw; i__4 = nv + 2; i__5 = nv + 1; dgemv_("No transpose", &i__4, &i__5, &c_b10, &H(l,l), ldh, vv, &c__1, &temp, v, &c__1); nv += 2; } /* Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero, reset it to the unit vector. */ itemp = idamax_(&nv, v, &c__1); temp = (d__1 = V(itemp - 1), abs(d__1)); if (temp == 0.) { V(0) = 1.; i__4 = nv; for (ii = 2; ii <= nv; ++ii) { V(ii - 1) = 0.; /* L110: */ } } else { temp = max(temp,smlnum); d__1 = 1. / temp; dscal_(&nv, &d__1, v, &c__1); } } /* L120: */ } /* Multiple-shift QR step */ i__2 = i - 1; for (k = l; k <= i-1; ++k) { /* The first iteration of this loop determines a reflect ion G from the vector V and applies it from left and right to H, thus creating a nonzero bulge below the subdiagonal. Each subsequent iteration determines a reflection G t o restore the Hessenberg form in the (K-1)th column, an d thus chases the bulge one step toward the bottom of the ac tive submatrix. NR is the order of G. Computing MIN */ i__4 = ns + 1, i__5 = i - k + 1; nr = min(i__4,i__5); if (k > l) { dcopy_(&nr, &H(k,k-1), &c__1, v, &c__1); } dlarfg_(&nr, v, &V(1), &c__1, &tau); if (k > l) { H(k,k-1) = V(0); i__4 = i; for (ii = k + 1; ii <= i; ++ii) { H(ii,k-1) = 0.; /* L130: */ } } V(0) = 1.; /* Apply G from the left to transform the rows of the ma trix in columns K to I2. */ i__4 = i2 - k + 1; dlarfx_("Left", &nr, &i__4, v, &tau, &H(k,k), ldh, & WORK(1)); /* Apply G from the right to transform the columns of th e matrix in rows I1 to min(K+NR,I). Computing MIN */ i__5 = k + nr; i__4 = min(i__5,i) - i1 + 1; dlarfx_("Right", &i__4, &nr, v, &tau, &H(i1,k), ldh, & WORK(1)); if (wantz) { /* Accumulate transformations in the matrix Z */ dlarfx_("Right", &nh, &nr, v, &tau, &Z(*ilo,k), ldz, &WORK(1)); } /* L140: */ } /* L150: */ } /* Failure to converge in remaining number of iterations */ *info = i; return 0; L160: /* A submatrix of order <= MAXB in rows and columns L to I has split off. Use the double-shift QR algorithm to handle it. */ dlahqr_(&wantt, &wantz, n, &l, &i, &H(1,1), ldh, &WR(1), &WI(1), ilo, ihi, &Z(1,1), ldz, info); if (*info > 0) { return 0; } /* Decrement number of remaining iterations, and return to start of the main loop with a new value of I. */ itn -= its; i = l - 1; goto L50; L170: return 0; /* End of DHSEQR */ } /* dhseqr_ */
IGL_INLINE bool igl::copyleft::boolean::mesh_boolean( const Eigen::PlainObjectBase<DerivedVA> & VA, const Eigen::PlainObjectBase<DerivedFA> & FA, const Eigen::PlainObjectBase<DerivedVB> & VB, const Eigen::PlainObjectBase<DerivedFB> & FB, const WindingNumberOp& wind_num_op, const KeepFunc& keep, const ResolveFunc& resolve_fun, Eigen::PlainObjectBase<DerivedVC > & VC, Eigen::PlainObjectBase<DerivedFC > & FC, Eigen::PlainObjectBase<DerivedJ > & J) { #ifdef MESH_BOOLEAN_TIMING const auto & tictoc = []() -> double { static double t_start = igl::get_seconds(); double diff = igl::get_seconds()-t_start; t_start += diff; return diff; }; const auto log_time = [&](const std::string& label) -> void { std::cout << "mesh_boolean." << label << ": " << tictoc() << std::endl; }; tictoc(); #endif typedef typename DerivedVC::Scalar Scalar; //typedef typename DerivedFC::Scalar Index; typedef CGAL::Epeck Kernel; typedef Kernel::FT ExactScalar; typedef Eigen::Matrix<Scalar,Eigen::Dynamic,3> MatrixX3S; //typedef Eigen::Matrix<Index,Eigen::Dynamic,Eigen::Dynamic> MatrixXI; typedef Eigen::Matrix<typename DerivedJ::Scalar,Eigen::Dynamic,1> VectorXJ; // Generate combined mesh. typedef Eigen::Matrix< ExactScalar, Eigen::Dynamic, Eigen::Dynamic, DerivedVC::IsRowMajor> MatrixXES; MatrixXES V; DerivedFC F; VectorXJ CJ; { DerivedVA VV(VA.rows() + VB.rows(), 3); DerivedFC FF(FA.rows() + FB.rows(), 3); VV << VA, VB; FF << FA, FB.array() + VA.rows(); //// Handle annoying empty cases //if(VA.size()>0) //{ // VV<<VA; //} //if(VB.size()>0) //{ // VV<<VB; //} //if(FA.size()>0) //{ // FF<<FA; //} //if(FB.size()>0) //{ // FF<<FB.array()+VA.rows(); //} resolve_fun(VV, FF, V, F, CJ); } #ifdef MESH_BOOLEAN_TIMING log_time("resolve_self_intersection"); #endif // Compute winding numbers on each side of each facet. const size_t num_faces = F.rows(); Eigen::MatrixXi W; Eigen::VectorXi labels(num_faces); std::transform(CJ.data(), CJ.data()+CJ.size(), labels.data(), [&](int i) { return i<FA.rows() ? 0:1; }); bool valid = true; if (num_faces > 0) { valid = valid & igl::copyleft::cgal::propagate_winding_numbers(V, F, labels, W); } else { W.resize(0, 4); } assert((size_t)W.rows() == num_faces); if (W.cols() == 2) { assert(FB.rows() == 0); Eigen::MatrixXi W_tmp(num_faces, 4); W_tmp << W, Eigen::MatrixXi::Zero(num_faces, 2); W = W_tmp; } else { assert(W.cols() == 4); } #ifdef MESH_BOOLEAN_TIMING log_time("propagate_input_winding_number"); #endif // Compute resulting winding number. Eigen::MatrixXi Wr(num_faces, 2); for (size_t i=0; i<num_faces; i++) { Eigen::MatrixXi w_out(1,2), w_in(1,2); w_out << W(i,0), W(i,2); w_in << W(i,1), W(i,3); Wr(i,0) = wind_num_op(w_out); Wr(i,1) = wind_num_op(w_in); } #ifdef MESH_BOOLEAN_TIMING log_time("compute_output_winding_number"); #endif // Extract boundary separating inside from outside. auto index_to_signed_index = [&](size_t i, bool ori) -> int { return (i+1)*(ori?1:-1); }; //auto signed_index_to_index = [&](int i) -> size_t { // return abs(i) - 1; //}; std::vector<int> selected; for(size_t i=0; i<num_faces; i++) { auto should_keep = keep(Wr(i,0), Wr(i,1)); if (should_keep > 0) { selected.push_back(index_to_signed_index(i, true)); } else if (should_keep < 0) { selected.push_back(index_to_signed_index(i, false)); } } const size_t num_selected = selected.size(); DerivedFC kept_faces(num_selected, 3); DerivedJ kept_face_indices(num_selected, 1); for (size_t i=0; i<num_selected; i++) { size_t idx = abs(selected[i]) - 1; if (selected[i] > 0) { kept_faces.row(i) = F.row(idx); } else { kept_faces.row(i) = F.row(idx).reverse(); } kept_face_indices(i, 0) = CJ[idx]; } #ifdef MESH_BOOLEAN_TIMING log_time("extract_output"); #endif // Finally, remove duplicated faces and unreferenced vertices. { DerivedFC G; DerivedJ JJ; igl::resolve_duplicated_faces(kept_faces, G, JJ); igl::slice(kept_face_indices, JJ, 1, J); #ifdef DOUBLE_CHECK_EXACT_OUTPUT { // Sanity check on exact output. igl::copyleft::cgal::RemeshSelfIntersectionsParam params; params.detect_only = true; params.first_only = true; MatrixXES dummy_VV; DerivedFC dummy_FF, dummy_IF; Eigen::VectorXi dummy_J, dummy_IM; igl::copyleft::cgal::SelfIntersectMesh< Kernel, MatrixXES, DerivedFC, MatrixXES, DerivedFC, DerivedFC, Eigen::VectorXi, Eigen::VectorXi > checker(V, G, params, dummy_VV, dummy_FF, dummy_IF, dummy_J, dummy_IM); if (checker.count != 0) { throw "Self-intersection not fully resolved."; } } #endif MatrixX3S Vs(V.rows(), V.cols()); for (size_t i=0; i<(size_t)V.rows(); i++) { for (size_t j=0; j<(size_t)V.cols(); j++) { igl::copyleft::cgal::assign_scalar(V(i,j), Vs(i,j)); } } Eigen::VectorXi newIM; igl::remove_unreferenced(Vs,G,VC,FC,newIM); } #ifdef MESH_BOOLEAN_TIMING log_time("clean_up"); #endif return valid; }
IGL_INLINE void igl::copyleft::boolean::mesh_boolean( const Eigen::PlainObjectBase<DerivedVA> & VA, const Eigen::PlainObjectBase<DerivedFA> & FA, const Eigen::PlainObjectBase<DerivedVB> & VB, const Eigen::PlainObjectBase<DerivedFB> & FB, const WindingNumberOp& wind_num_op, const KeepFunc& keep, const ResolveFunc& resolve_fun, Eigen::PlainObjectBase<DerivedVC > & VC, Eigen::PlainObjectBase<DerivedFC > & FC, Eigen::PlainObjectBase<DerivedJ > & J) { typedef typename DerivedVC::Scalar Scalar; //typedef typename DerivedFC::Scalar Index; typedef CGAL::Epeck Kernel; typedef Kernel::FT ExactScalar; typedef Eigen::Matrix<Scalar,Eigen::Dynamic,3> MatrixX3S; //typedef Eigen::Matrix<Index,Eigen::Dynamic,Eigen::Dynamic> MatrixXI; typedef Eigen::Matrix<typename DerivedJ::Scalar,Eigen::Dynamic,1> VectorXJ; // Generate combined mesh. typedef Eigen::Matrix< ExactScalar, Eigen::Dynamic, Eigen::Dynamic, DerivedVC::IsRowMajor> MatrixXES; MatrixXES V; DerivedFC F; VectorXJ CJ; { DerivedVA VV(VA.rows() + VB.rows(), 3); DerivedFC FF(FA.rows() + FB.rows(), 3); VV << VA, VB; FF << FA, FB.array() + VA.rows(); //// Handle annoying empty cases //if(VA.size()>0) //{ // VV<<VA; //} //if(VB.size()>0) //{ // VV<<VB; //} //if(FA.size()>0) //{ // FF<<FA; //} //if(FB.size()>0) //{ // FF<<FB.array()+VA.rows(); //} resolve_fun(VV, FF, V, F, CJ); } // Compute winding numbers on each side of each facet. const size_t num_faces = F.rows(); Eigen::MatrixXi W; Eigen::VectorXi labels(num_faces); std::transform(CJ.data(), CJ.data()+CJ.size(), labels.data(), [&](int i) { return i<FA.rows() ? 0:1; }); igl::copyleft::cgal::propagate_winding_numbers(V, F, labels, W); assert((size_t)W.rows() == num_faces); if (W.cols() == 2) { assert(FB.rows() == 0); Eigen::MatrixXi W_tmp(num_faces, 4); W_tmp << W, Eigen::MatrixXi::Zero(num_faces, 2); W = W_tmp; } else { assert(W.cols() == 4); } // Compute resulting winding number. Eigen::MatrixXi Wr(num_faces, 2); for (size_t i=0; i<num_faces; i++) { Eigen::MatrixXi w_out(1,2), w_in(1,2); w_out << W(i,0), W(i,2); w_in << W(i,1), W(i,3); Wr(i,0) = wind_num_op(w_out); Wr(i,1) = wind_num_op(w_in); } // Extract boundary separating inside from outside. auto index_to_signed_index = [&](size_t i, bool ori) -> int{ return (i+1)*(ori?1:-1); }; //auto signed_index_to_index = [&](int i) -> size_t { // return abs(i) - 1; //}; std::vector<int> selected; for(size_t i=0; i<num_faces; i++) { auto should_keep = keep(Wr(i,0), Wr(i,1)); if (should_keep > 0) { selected.push_back(index_to_signed_index(i, true)); } else if (should_keep < 0) { selected.push_back(index_to_signed_index(i, false)); } } const size_t num_selected = selected.size(); DerivedFC kept_faces(num_selected, 3); DerivedJ kept_face_indices(num_selected, 1); for (size_t i=0; i<num_selected; i++) { size_t idx = abs(selected[i]) - 1; if (selected[i] > 0) { kept_faces.row(i) = F.row(idx); } else { kept_faces.row(i) = F.row(idx).reverse(); } kept_face_indices(i, 0) = CJ[idx]; } // Finally, remove duplicated faces and unreferenced vertices. { DerivedFC G; DerivedJ JJ; igl::resolve_duplicated_faces(kept_faces, G, JJ); igl::slice(kept_face_indices, JJ, 1, J); MatrixX3S Vs(V.rows(), V.cols()); for (size_t i=0; i<(size_t)V.rows(); i++) { for (size_t j=0; j<(size_t)V.cols(); j++) { igl::copyleft::cgal::assign_scalar(V(i,j), Vs(i,j)); } } Eigen::VectorXi newIM; igl::remove_unreferenced(Vs,G,VC,FC,newIM); } }