LinearCombinationObjective(const std::vector<Teuchos::RCP<Objective<Real> > > &obj) : Objective<Real>(), obj_(obj), xdual_(Teuchos::null), initialized_(false) { size_ = obj_.size(); weights_.clear(); weights_.assign(size_,static_cast<Real>(1)); }
GLuint Normals(std::vector<T>& dest) const { auto n = _normals(); dest.assign(n.begin(), n.end()); return 3; }
GLuint TexCoordinates(std::vector<T>& dest) const { auto t = _tex_coords(); dest.assign(t.begin(), t.end()); return 3; }
//FIXME: improve performance //TODO void filter_matches_by_lis(const std::vector<cv::KeyPoint> src_keypoints, const std::vector<cv::KeyPoint> dst_keypoints, const std::vector<cv::DMatch> &matches, std::vector<char> &mask) { if(matches.size() != mask.size()) return; int valid_before = 0; for(size_t i=0; i<mask.size(); i++) { if(mask[i]) valid_before++; } std::vector<int> sequence(matches.size()); //determine horizontal order of matches (with respect to src_keypoints) for(size_t i = 0; i < matches.size(); i++) { if(mask[i] == 0) { continue; } const float x_min = src_keypoints[matches[i].queryIdx].pt.x; // minimum of current match size_t x_min_counter = 0; // how many matches are smaller than current match for(size_t j = 0; j < matches.size(); j++) { size_t kp_index = matches[j].queryIdx; if(src_keypoints[kp_index].pt.x < x_min) { x_min_counter++; } } sequence[i] = x_min_counter; } float last_min = 0.0; std::vector<int> sorted_sequence; sorted_sequence.reserve(matches.size()); std::map<int,int> seq2index_map; //maps sequence index to matches index //sort matches (with respect to dst_keypoints) for(size_t i = 0; i < matches.size(); i++) { float x_min = std::numeric_limits<float>::max(); size_t x_min_index = 0; for(size_t j = 0; j < matches.size(); j++) { if(mask[j] == 0) continue; size_t kp_index = matches[j].trainIdx; if(dst_keypoints[kp_index].pt.x < x_min && dst_keypoints[kp_index].pt.x > last_min) { x_min = dst_keypoints[kp_index].pt.x; x_min_index = j; } } sorted_sequence.push_back(sequence[x_min_index]); seq2index_map.insert( std::make_pair(sequence[x_min_index], x_min_index)); last_min = x_min; } std::vector<int> lis; find_lis(sorted_sequence, lis); mask.assign(mask.size(), 0); for(size_t i = 0; i < lis.size(); i++) { mask[ seq2index_map[lis[i]] ] = 1; } int valid_after = 0; for(size_t i=0; i<mask.size(); i++) { if(mask[i]) valid_after++; } dout << "filtered " << valid_before - valid_after << " matches by lis" << std::endl; }
void EvaluateYDerivatives(double time, const std::vector<double>& rY, std::vector<double>& rDY) { rDY.assign(rY.begin(), rY.end()); }
// callback for the complete message void complete_message_callback(const homog_track::HomogComplete& msg) { /********** Begin splitting up the incoming message *********/ // getting boolean indicating the reference has been set reference_set = msg.reference_set; // if the reference is set then will break out the points if (reference_set) { // initializer temp scalar to zero temp_scalar = cv::Mat::zeros(1,1,CV_64F); // getting the current marker points circles_curr = msg.current_points; // getting the refernce marker points circles_ref = msg.reference_points; // setting the current points to the point vector curr_red_p.x = circles_curr.red_circle.x; curr_green_p.x = circles_curr.green_circle.x; curr_cyan_p.x = circles_curr.cyan_circle.x; curr_purple_p.x = circles_curr.purple_circle.x; curr_red_p.y = circles_curr.red_circle.y; curr_green_p.y = circles_curr.green_circle.y; curr_cyan_p.y = circles_curr.cyan_circle.y; curr_purple_p.y = circles_curr.purple_circle.y; curr_points_p.push_back(curr_red_p); curr_points_p.push_back(curr_green_p); curr_points_p.push_back(curr_cyan_p); curr_points_p.push_back(curr_purple_p); // converting the points to be the projective coordinates for (int ii = 0; ii < curr_points_m.size(); ii++) { curr_points_m[ii] = K.inv(cv::DECOMP_LU)*curr_points_m[ii]; std::cout << "currpoints at " << ii << " is: " << curr_points_m[ii] << std::endl; } // setting the reference points to the point vector ref_red_p.x = circles_ref.red_circle.x; ref_green_p.x = circles_ref.green_circle.x; ref_cyan_p.x = circles_ref.cyan_circle.x; ref_purple_p.x = circles_ref.purple_circle.x; ref_red_p.y = circles_ref.red_circle.y; ref_green_p.y = circles_ref.green_circle.y; ref_cyan_p.y = circles_ref.cyan_circle.y; ref_purple_p.y = circles_ref.purple_circle.y; ref_points_p.push_back(ref_red_p); ref_points_p.push_back(ref_green_p); ref_points_p.push_back(ref_cyan_p); ref_points_p.push_back(ref_purple_p); // setting the reference points to the matrix vector, dont need to do the last one because its already 1 ref_red_m.at<double>(0,0) = ref_red_p.x; ref_red_m.at<double>(1,0) = ref_red_p.y; ref_green_m.at<double>(0,0) = ref_green_p.x; ref_green_m.at<double>(1,0) = ref_green_p.y; ref_cyan_m.at<double>(0,0) = ref_cyan_p.x; ref_cyan_m.at<double>(1,0) = ref_cyan_p.y; ref_purple_m.at<double>(0,0) = ref_purple_p.x; ref_purple_m.at<double>(1,0) = ref_purple_p.y; ref_points_m.push_back(ref_red_m); ref_points_m.push_back(ref_green_m); ref_points_m.push_back(ref_cyan_m); ref_points_m.push_back(ref_purple_m); // converting the points to be the projective coordinates for (int ii = 0; ii < ref_points_m.size(); ii++) { ref_points_m[ii] = K.inv(cv::DECOMP_LU)*ref_points_m[ii]; //std::cout << "refpoints at " << ii << " is: " << ref_points_m[ii] << std::endl; } // if any of the points have a -1 will skip over the homography if (curr_red_p.x != -1 && curr_green_p.x != -1 && curr_cyan_p.x != -1 && curr_purple_p.x != -1) { //std::cout << "hi" << std::endl; // finding the perspective homography G = cv::findHomography(curr_points_p,ref_points_p,0); //G = cv::findHomography(ref_points_p,ref_points_p,0); std::cout << "G: " << G << std::endl; // decomposing the homography into the four solutions // G and K are 3x3 // R is 3x3 // 3x1 // 3x1 // successful_decomp is the number of solutions found successful_decomp = cv::decomposeHomographyMat(G,K,R,T,n); std::cout << "successful_decomp: " << successful_decomp << std::endl; // if the decomp is successful will find the best matching if (successful_decomp > 0) { std::cout << std::endl << std::endl << " begin check for visibility" << std::endl; // finding the alphas alpha_red.data = 1/(G.at<double>(2,0)*ref_red_p.x + G.at<double>(2,1)*ref_red_p.y + 1); alpha_green.data = 1/(G.at<double>(2,0)*ref_green_p.x + G.at<double>(2,1)*ref_green_p.y + 1); alpha_cyan.data = 1/(G.at<double>(2,0)*ref_cyan_p.x + G.at<double>(2,1)*ref_cyan_p.y + 1); alpha_purple.data = 1/(G.at<double>(2,0)*ref_purple_p.x + G.at<double>(2,1)*ref_purple_p.y + 1); // finding the solutions that give the positive results for (int ii = 0; ii < successful_decomp; ii++) { std::cout << "solution set number " << ii << std::endl; // performing the operation transpose(m)*R*n to check if greater than 0 later // order operating on is red green cyan purple for (int jj = 0; jj < 4; jj++) { //std::cout << " T size: " << T[ii].size() << std::endl; //std::cout << " T type: " << T[ii].type() << std::endl; std::cout << " T value: " << T[ii] << std::endl; //std::cout << " temp scalar 1 " << std::endl; //std::cout << " temp scalar size: " << temp_scalar.size() << std::endl; //std::cout << " temp scalar type: " << temp_scalar.type() << std::endl; //std::cout << " temp scalar value " << temp_scalar <<std::endl; temp_scalar = curr_points_m[jj].t(); //std::cout << " temp scalar 2 " << std::endl; //std::cout << " temp scalar size: " << temp_scalar.size() << std::endl; //std::cout << " temp scalar type: " << temp_scalar.type() << std::endl; //std::cout << " temp scalar value " << temp_scalar <<std::endl; //std::cout << " R size: " << R[ii].size() << std::endl; //std::cout << " R type: " << R[ii].type() << std::endl; //std::cout << " R value: " << R[ii] << std::endl; temp_scalar = temp_scalar*R[ii]; //std::cout << " temp scalar 3 " << std::endl; //std::cout << " temp scalar size: " << temp_scalar.size() << std::endl; //std::cout << " temp scalar type: " << temp_scalar.type() << std::endl; //std::cout << " temp scalar value " << temp_scalar <<std::endl; //std::cout << " n size: " << n[ii].size() << std::endl; //std::cout << " n type: " << n[ii].type() << std::endl; std::cout << " n value: " << n[ii] << std::endl; temp_scalar = temp_scalar*n[ii]; //std::cout << " temp scalar size: " << temp_scalar.size() << std::endl; //std::cout << " temp scalar type: " << temp_scalar.type() << std::endl; //std::cout << " temp scalar value " << temp_scalar <<std::endl; //std::cout << " temp scalar value at 0,0" << temp_scalar.at<double>(0,0) << std::endl; scalar_value_check.push_back(temp_scalar.at<double>(0,0)); ////std::cout << " scalar value check size: " << scalar_value_check.size() << std::endl; //std::cout << " \tthe value for the " << jj << " visibility check is: " << scalar_value_check[4*ii+jj] << std::endl; } } std::cout << " end check for visibility" << std::endl << std::endl; // restting first solution found and second solution found first_solution_found = false; second_solution_found = false; fc_found = false; // getting the two solutions or only one if there are not two for (int ii = 0; ii < successful_decomp; ii++) { // getting the values onto the temporary vector // getting the start and end of the next solution temp_solution_start = scalar_value_check.begin() + 4*ii; temp_solution_end = scalar_value_check.begin() + 4*ii+4; temp_solution.assign(temp_solution_start,temp_solution_end); // checking if all the values are positive all_positive = true; current_temp_index = 0; while (all_positive && current_temp_index < 4) { if (temp_solution[current_temp_index] >= 0) { current_temp_index++; } else { all_positive = false; } } // if all the values were positive and a first solution has not been found will assign // to first solution. if all positive and first solution has been found will assign // to second solution. if all positive is false then will not do anything if (all_positive && first_solution_found && !second_solution_found) { // setting it to indicate a solution has been found second_solution_found = true; // setting the rotation, translation, and normal to be the second set second_R = R[ii]; second_T = T[ii]; second_n = n[ii]; // setting the projected values second_solution = temp_solution; } else if (all_positive && !first_solution_found) { // setting it to indicate a solution has been found first_solution_found = true; // setting the rotation, translation, and normal to be the first set first_R = R[ii]; first_T = T[ii]; first_n = n[ii]; // setting the projected values first_solution = temp_solution; } // erasing all the values from the temp solution temp_solution.erase(temp_solution.begin(),temp_solution.end()); } // erasing all the scalar values from the check scalar_value_check.erase(scalar_value_check.begin(),scalar_value_check.end()); // displaying the first solution if it was found if (first_solution_found) { std::cout << std::endl << "first R: " << first_R << std::endl; std::cout << "first T: " << first_T << std::endl; std::cout << "first n: " << first_n << std::endl; for (double ii : first_solution) { std::cout << ii << " "; } std::cout << std::endl; } // displaying the second solution if it was found if (second_solution_found) { std::cout << std::endl << "second R: " << second_R << std::endl; std::cout << "second T: " << second_T << std::endl; std::cout << "second n: " << second_n << std::endl; for (double ii : second_solution) { std::cout << ii << " "; } std::cout << std::endl; } // because the reference is set to the exact value when when n should have only a z componenet, the correct // choice should be the one closest to n_ref = [0,0,1]^T which will be the one with the greatest dot product with n_ref if (first_solution_found && second_solution_found) { if (first_n.dot(n_ref) >= second_n.dot(n_ref)) { R_fc = first_R; T_fc = first_T; } else { R_fc = second_R; T_fc = second_T; } fc_found = true; } else if(first_solution_found) { R_fc = first_R; T_fc = first_T; fc_found = true; } //if a solution was found will publish // need to convert to pose message so use if (fc_found) { // converting the rotation from a cv matrix to quaternion, first need it as a matrix3x3 R_fc_tf[0][0] = R_fc.at<double>(0,0); R_fc_tf[0][1] = R_fc.at<double>(0,1); R_fc_tf[0][2] = R_fc.at<double>(0,2); R_fc_tf[1][0] = R_fc.at<double>(1,0); R_fc_tf[1][1] = R_fc.at<double>(1,1); R_fc_tf[1][2] = R_fc.at<double>(1,2); R_fc_tf[2][0] = R_fc.at<double>(2,0); R_fc_tf[2][1] = R_fc.at<double>(2,1); R_fc_tf[2][2] = R_fc.at<double>(2,2); std::cout << "Final R:\n" << R_fc << std::endl; // converting the translation to a vector 3 T_fc_tf.setX(T_fc.at<double>(0,0)); T_fc_tf.setY(T_fc.at<double>(0,1)); T_fc_tf.setZ(T_fc.at<double>(0,2)); std::cout << "Final T :\n" << T_fc << std::endl; // getting the rotation as a quaternion R_fc_tf.getRotation(Q_fc_tf); std::cout << "current orientation:" << "\n\tx:\t" << Q_fc_tf.getX() << "\n\ty:\t" << Q_fc_tf.getY() << "\n\tz:\t" << Q_fc_tf.getZ() << "\n\tw:\t" << Q_fc_tf.getW() << std::endl; std::cout << "norm of quaternion:\t" << Q_fc_tf.length() << std::endl; // getting the negated version of the quaternion for the check Q_fc_tf_negated = tf::Quaternion(-Q_fc_tf.getX(),-Q_fc_tf.getY(),-Q_fc_tf.getZ(),-Q_fc_tf.getW()); std::cout << "negated orientation:" << "\n\tx:\t" << Q_fc_tf_negated.getX() << "\n\ty:\t" << Q_fc_tf_negated.getY() << "\n\tz:\t" << Q_fc_tf_negated.getZ() << "\n\tw:\t" << Q_fc_tf_negated.getW() << std::endl; std::cout << "norm of negated quaternion:\t" << Q_fc_tf_negated.length() << std::endl; // showing the last orientation std::cout << "last orientation:" << "\n\tx:\t" << Q_fc_tf_last.getX() << "\n\ty:\t" << Q_fc_tf_last.getY() << "\n\tz:\t" << Q_fc_tf_last.getZ() << "\n\tw:\t" << Q_fc_tf_last.getW() << std::endl; std::cout << "norm of last quaternion:\t" << Q_fc_tf_last.length() << std::endl; // checking if the quaternion has flipped Q_norm_current_diff = std::sqrt(std::pow(Q_fc_tf.getX() - Q_fc_tf_last.getX(),2.0) + std::pow(Q_fc_tf.getY() - Q_fc_tf_last.getY(),2.0) + std::pow(Q_fc_tf.getZ() - Q_fc_tf_last.getZ(),2.0) + std::pow(Q_fc_tf.getW() - Q_fc_tf_last.getW(),2.0)); std::cout << "current difference:\t" << Q_norm_current_diff << std::endl; Q_norm_negated_diff = std::sqrt(std::pow(Q_fc_tf_negated.getX() - Q_fc_tf_last.getX(),2.0) + std::pow(Q_fc_tf_negated.getY() - Q_fc_tf_last.getY(),2.0) + std::pow(Q_fc_tf_negated.getZ() - Q_fc_tf_last.getZ(),2.0) + std::pow(Q_fc_tf_negated.getW() - Q_fc_tf_last.getW(),2.0)); std::cout << "negated difference:\t" << Q_norm_negated_diff << std::endl; if (Q_norm_current_diff > Q_norm_negated_diff) { Q_fc_tf = Q_fc_tf_negated; } // updating the last Q_fc_tf_last = Q_fc_tf; // converting the tf quaternion to a geometry message quaternion Q_fc_gm.x = Q_fc_tf.getX(); Q_fc_gm.y = Q_fc_tf.getY(); Q_fc_gm.z = Q_fc_tf.getZ(); Q_fc_gm.w = Q_fc_tf.getW(); // converting the tf vector3 to a point P_fc_gm.x = T_fc_tf.getX(); P_fc_gm.y = T_fc_tf.getY(); P_fc_gm.z = T_fc_tf.getZ(); // setting the transform with the values fc_tf.setOrigin(T_fc_tf); fc_tf.setRotation(Q_fc_tf); tf_broad.sendTransform(tf::StampedTransform(fc_tf, msg.header.stamp,"f_star","f_current")); // setting the decomposed message pose_fc_gm.position = P_fc_gm; pose_fc_gm.orientation = Q_fc_gm; decomposed_msg.pose = pose_fc_gm; decomposed_msg.header.stamp = msg.header.stamp; decomposed_msg.header.frame_id = "current_frame_normalized"; decomposed_msg.alpha_red = alpha_red; decomposed_msg.alpha_green = alpha_green; decomposed_msg.alpha_cyan = alpha_cyan; decomposed_msg.alpha_purple = alpha_purple; homog_decomp_pub.publish(decomposed_msg); std::cout << "complete message\n" << decomposed_msg << std::endl << std::endl; // publish the marker marker.pose = pose_fc_gm; marker_pub.publish(marker); } } } // erasing all the temporary points if (first_solution_found || second_solution_found) { // erasing all the point vectors and matrix vectors curr_points_p.erase(curr_points_p.begin(),curr_points_p.end()); ref_points_p.erase(ref_points_p.begin(),ref_points_p.end()); curr_points_m.erase(curr_points_m.begin(),curr_points_m.end()); ref_points_m.erase(ref_points_m.begin(),ref_points_m.end()); } } /********** End splitting up the incoming message *********/ }
void BsplinesCurveEvaluator::evaluateCurve(const std::vector<Point>& ptvCtrlPts, std::vector<Point>& ptvEvaluatedCurvePts, const float& fAniLength, const bool& bWrap) const { if (s_AddNewPt) return; int iCtrlPtCount = ptvCtrlPts.size(); ptvEvaluatedCurvePts.assign(ptvCtrlPts.begin(), ptvCtrlPts.end()); ptvEvaluatedCurvePts.clear(); float x = 0.0; float y1; // Bezier if (bWrap) { for (int i = 0; i < iCtrlPtCount; i++) { std::cout << "i" << i << std::endl; int p0 = (i) % iCtrlPtCount; int p1 = (i + 1) % iCtrlPtCount; int p2 = (i + 2) % iCtrlPtCount; int p3 = (i + 3) % iCtrlPtCount; for(float n=0; n < s_iSegCount; n++){ float u = ((float)n)/((float)s_iSegCount-1); float y = (-1.0 / 6.0 * pow(u, 3) + 1.0 / 2.0 * pow(u, 2) - 1.0 / 2.0 * u + 1.0 / 6.0) * ptvCtrlPts[p0].y + \ ( 1.0 / 2.0 * pow(u, 3) - pow(u, 2) + 2.0 / 3.0) * ptvCtrlPts[p1].y + \ (-1.0 / 2.0 * pow(u, 3) + 1.0 / 2.0 * pow(u, 2) + 1.0 / 2.0 * u + 1.0 / 6.0) * ptvCtrlPts[p2].y + \ (1.0 / 6.0 * pow(u, 3) ) * ptvCtrlPts[p3].y; float len = ptvCtrlPts[p2].x - ptvCtrlPts[p1].x; if (len < 0) len += fAniLength; float x = ptvCtrlPts[p1].x + u * len; if (x > fAniLength) x = x - fAniLength; ptvEvaluatedCurvePts.push_back(Point(x,y)); } } } else { std::vector<Point> newPtvCtrlPts; newPtvCtrlPts.push_back(ptvCtrlPts[0]); newPtvCtrlPts.push_back(ptvCtrlPts[0]); for(int i=0;i<iCtrlPtCount;i++){ newPtvCtrlPts.push_back(ptvCtrlPts[i]); } if(iCtrlPtCount>1){ newPtvCtrlPts.push_back(ptvCtrlPts[iCtrlPtCount-1]); newPtvCtrlPts.push_back(ptvCtrlPts[iCtrlPtCount-1]); } int i = 0; for(; i < newPtvCtrlPts.size()-3; i++){ for(float n=0; n < s_iSegCount; n++){ float u = ((float)n)/((float)s_iSegCount-1); float x=0.0; float y=0.0; x = (-1.0 / 6.0 * pow(u, 3) + 1.0 / 2.0 * pow(u, 2) - 1.0 / 2.0 * u + 1.0 / 6.0) * newPtvCtrlPts[i].x + \ ( 1.0 / 2.0 * pow(u, 3) - pow(u, 2) + 2.0 / 3.0) * newPtvCtrlPts[i + 1].x + \ (-1.0 / 2.0 * pow(u, 3) + 1.0 / 2.0 * pow(u, 2) + 1.0 / 2.0 * u + 1.0 / 6.0) * newPtvCtrlPts[i + 2].x + \ (1.0 / 6.0 * pow(u, 3) ) * newPtvCtrlPts[i + 3].x; y = (-1.0 / 6.0 * pow(u, 3) + 1.0 / 2.0 * pow(u, 2) - 1.0 / 2.0 * u + 1.0 / 6.0) * newPtvCtrlPts[i].y + \ ( 1.0 / 2.0 * pow(u, 3) - pow(u, 2) + 2.0 / 3.0) * newPtvCtrlPts[i + 1].y + \ (-1.0 / 2.0 * pow(u, 3) + 1.0 / 2.0 * pow(u, 2) + 1.0 / 2.0 * u + 1.0 / 6.0) * newPtvCtrlPts[i + 2].y + \ (1.0 / 6.0 * pow(u, 3) ) * newPtvCtrlPts[i + 3].y; ptvEvaluatedCurvePts.push_back(Point(x,y)); } } // start ptvEvaluatedCurvePts.push_back(Point(0, ptvCtrlPts[0].y)); // end ptvEvaluatedCurvePts.push_back(Point(fAniLength, ptvCtrlPts[iCtrlPtCount-1].y)); } }
//从zip文件中读取全部文件内容到文件中或者内存中, //zip文件必须是通过addPatchFile加入的patch文件, bool Updater::_readContentsFromZip(const char* szZipFile, const char* szFileName, const char* szDiskFileName, char** ppMemoryBuf, unsigned int& nFileSize) { //参数检查 assert(szZipFile && szFileName && (szDiskFileName || ppMemoryBuf) ); if(!szZipFile || szZipFile[0]==0 || !szFileName || szFileName[0]==0 || (!szDiskFileName && !ppMemoryBuf)) { setLastError(AXP_ERR_PARAM); return false; } //搜索加入的zip文件 PATCHFILE_MAP::iterator itPatch = m_mapPatchFile.find(normaliseName(szZipFile)); //无法找到zip文件 assert(itPatch != m_mapPatchFile.end()); if(itPatch == m_mapPatchFile.end()) { setLastError(AXP_ERR_PARAM, "%s not inserted", szZipFile); return false; } //获得ZIP句柄 ZZIP_DIR* mZzipDir = (ZZIP_DIR*)(itPatch->second); assert(mZzipDir); std::string norFileName = normaliseName(szFileName, false, true); //得到文件信息 ZZIP_STAT zstat; memset(&zstat, 0, sizeof(ZZIP_STAT)); //打开文件,如果打不开,是空文件 ZZIP_FILE* zzipFile = zzip_file_open(mZzipDir, norFileName.c_str(), ZZIP_ONLYZIP | ZZIP_CASELESS); if(zzipFile) { int zip_err = zzip_dir_stat(mZzipDir, norFileName.c_str(), &zstat, ZZIP_CASEINSENSITIVE); if(zip_err!=0) { zzip_file_close(zzipFile); setLastError(AXP_ERR_ZIPFILE, "ziperr=%d", mZzipDir->errcode); return false; } } //如果需要写入文件 if(szDiskFileName) { //确认文件所在目录存在 char szDiskFilePath[MAX_PATH] = {0}; strncpy(szDiskFilePath, szDiskFileName, MAX_PATH); PathRemoveFileSpec(szDiskFilePath); if(szDiskFilePath[0]!=0 && !forceCreatePath(szDiskFilePath)) { if(zzipFile)zzip_file_close(zzipFile); setLastError(AXP_ERR_FILE_ACCESS, "Path=%s, WinErr=%d", szDiskFilePath, ::GetLastError()); return false; } //创建该文件 HANDLE hDiskFile = ::CreateFile(szDiskFileName, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, 0, CREATE_ALWAYS, FILE_ATTRIBUTE_ARCHIVE, 0); if(hDiskFile == INVALID_HANDLE_VALUE) { if(zzipFile)zzip_file_close(zzipFile); setLastError(AXP_ERR_FILE_ACCESS, "File=%s, WinErr=%d", szDiskFileName, ::GetLastError()); return false; } if(zstat.st_size > 0) { const int MAX_BUFFER_SIZE = 4096; char buffer[MAX_BUFFER_SIZE] = {0}; zzip_seek(zzipFile, 0, SEEK_SET); zzip_ssize_t zReadSize = zzip_file_read(zzipFile, buffer, sizeof(buffer)); //实际已经写入的尺寸 unsigned int nActWriteSize = 0; //分块读写文件内容 do { //文件结束 if(zReadSize==0) break; //写入磁盘文件 DWORD dwBytesWrite; if(!WriteFile(hDiskFile, buffer, (DWORD)zReadSize, &dwBytesWrite, 0) || dwBytesWrite != (DWORD)zReadSize) { zzip_file_close(zzipFile); CloseHandle(hDiskFile); setLastError(AXP_ERR_FILE_WRITE, "File=%s, WinErr: %d", szDiskFileName, GetLastError()); return false; } //文件结束 if(zzip_tell(zzipFile) >=zstat.st_size) break; zReadSize = zzip_file_read(zzipFile, buffer, sizeof(buffer)); }while(true); } //关闭句柄 CloseHandle(hDiskFile); hDiskFile=0; } //如果需要读入内存 if(ppMemoryBuf) { //所需要的内存 unsigned int nMemoryNeed = (unsigned int)zstat.st_size+1; while(nMemoryNeed%4)nMemoryNeed++; //upbound 4 //扩大静态内存大小 static std::vector< unsigned char > s_autoMemory; if(s_autoMemory.size() < nMemoryNeed) { s_autoMemory.resize(nMemoryNeed); } s_autoMemory.assign(s_autoMemory.size(), 0); //读入文件内容 if(zstat.st_size > 0) { zzip_seek(zzipFile, 0, SEEK_SET); zzip_ssize_t nZipSize = zzip_file_read(zzipFile, (char*)&(s_autoMemory[0]), zstat.st_size); if(nZipSize != zstat.st_size) { zzip_file_close(zzipFile); setLastError(AXP_ERR_ZIPFILE, "ziperr=%d", mZzipDir->errcode); return false; } } //返回内容 *ppMemoryBuf = (char *)(&(s_autoMemory[0])); } //关闭句柄 if(zzipFile)zzip_file_close(zzipFile); zzipFile=0; nFileSize = (unsigned int)zstat.st_size; return true; }
// helpers static void resetLightUniformValues() { const auto& conf = Configuration::getInstance(); int maxDirLight = conf->getMaxSupportDirLightInShader(); int maxPointLight = conf->getMaxSupportPointLightInShader(); int maxSpotLight = conf->getMaxSupportSpotLightInShader(); s_dirLightUniformColorValues.assign(maxDirLight, Vec3::ZERO); s_dirLightUniformDirValues.assign(maxDirLight, Vec3::ZERO); s_pointLightUniformColorValues.assign(maxPointLight, Vec3::ZERO); s_pointLightUniformPositionValues.assign(maxPointLight, Vec3::ZERO); s_pointLightUniformRangeInverseValues.assign(maxPointLight, 0.0f); s_spotLightUniformColorValues.assign(maxSpotLight, Vec3::ZERO); s_spotLightUniformPositionValues.assign(maxSpotLight, Vec3::ZERO); s_spotLightUniformDirValues.assign(maxSpotLight, Vec3::ZERO); s_spotLightUniformInnerAngleCosValues.assign(maxSpotLight, 0.0f); s_spotLightUniformOuterAngleCosValues.assign(maxSpotLight, 0.0f); s_spotLightUniformRangeInverseValues.assign(maxSpotLight, 0.0f); }
void PHG4HoughTransform::projectToRadius(const SvtxTrackState* state, int charge, double B, double radius, std::vector<double>& intersection) { intersection.clear(); intersection.assign(3,NAN); // find 2d intersections in x,y plane std::set<std::vector<double> > intersections; if (B != 0.0) { // magentic field present, project track as a circle leaving the state position // compute the center of rotation and the helix parameters // x(u) = r*cos(q*u+cphi) + cx // y(u) = r*sin(q*u+cphi) + cy // z(u) = b*u + posz; double cr = state->get_pt() * 333.6 / B; // radius of curvature double cx = state->get_x() - (state->get_py()*cr)/charge/state->get_pt(); // center of rotation, x double cy = (state->get_px()*cr)/charge/state->get_pt() + state->get_y(); // center of rotation, y double cphi = atan2(state->get_y()-cy,state->get_x()-cx); // phase of state position double b = state->get_pz()/state->get_pt()*cr; // pitch along z if (!circle_circle_intersections(0.0,0.0,radius, cx,cy,cr, &intersections)) { return; } if (intersections.empty()) return; // downselect solutions based on track direction // we want the solution where the state vector would exit the cylinder // this can be determined by the direction that the track circulates in // rotate the px,py to the postion of the solution // then ask if the dot product of the displacement vector between the solution // and the cylinder center with the rotated momentum vector is positive std::set<std::vector<double> >::iterator remove_iter = intersections.end(); double intersection_z = 0.0; for (std::set<std::vector<double> >::iterator iter = intersections.begin(); iter != intersections.end(); ++iter) { double x = iter->at(0); double y = iter->at(1); // find the azimuthal rotation about the center of rotation between the state vector and the solution // displacement between center of rotation and solution double dx = x - cx; double dy = y - cy; double dphi = atan2(dy,dx); // displacement between center of rotation and state position double state_dx = state->get_x() - cx; double state_dy = state->get_y() - cy; double state_dphi = atan2(state_dy,state_dx); // relative rotation angle double rotphi = (dphi-state_dphi); // rotated momentum at the solution double rotpx = cos(rotphi)*state->get_px() - sin(rotphi)*state->get_py(); double rotpy = sin(rotphi)*state->get_px() + cos(rotphi)*state->get_py(); // assumes cylinder is centered at 0,0 double dot = rotpx*x + rotpy*y; // our solution will have a momentum vector leaving the cylinder surface if (dot >= 0.0) { // find the solution for z double u = (dphi - cphi)/charge; // look only along the projection (not backward) if (u > 2.0*M_PI) { u = u - 2.0*M_PI; } else if (u < 0.0) { u = u + 2.0*M_PI; } intersection_z = b*u+state->get_z(); } else { remove_iter = iter; } } if (remove_iter != intersections.end()) { intersections.erase(remove_iter); } if (intersections.empty()) return; intersection[0] = intersections.begin()->at(0); intersection[1] = intersections.begin()->at(1); intersection[2] = intersection_z; return; } else { // no magnetic field project track as a line circle_line_intersections(0.0,0.0,radius, state->get_x(),state->get_y(),state->get_px(),state->get_py(), &intersections); if (intersections.empty()) return; // downselect solutions based on track direction // we want the solution where the state vector would point outward // since the track doesn't bend this will be the solution where // the dot product of the displacement between the solution and the cylinder center // and the momentum vector is positive std::set<std::vector<double> >::iterator remove_iter = intersections.end(); double intersection_z = 0.0; for (std::set<std::vector<double> >::iterator iter = intersections.begin(); iter != intersections.end(); ++iter) { double x = iter->at(0); double y = iter->at(1); // assumes cylinder is centered at 0,0 double dot = state->get_px()*x + state->get_py()*y; if (dot >= 0.0) { // x(u) = px*u + x1 // y(u) = py*u + y1 // z(u) = pz*u + z1 double u = NAN; if (state->get_px() != 0) { u = (intersection[0] - state->get_x()) / state->get_px(); } else if (state->get_py() != 0) { u = (intersection[1] - state->get_y()) / state->get_py(); } intersection_z = state->get_pz()*u+state->get_z(); } else { remove_iter = iter; } } if (remove_iter != intersections.end()) { intersections.erase(remove_iter); } if (intersections.empty()) return; intersection[0] = intersections.begin()->at(0); intersection[1] = intersections.begin()->at(1); intersection[2] = intersection_z; return; } return; }
bool ICOOutput::write_scanline(int y, int z, TypeDesc format, const void* data, stride_t xstride) { m_spec.auto_stride(xstride, format, spec().nchannels); const void* origdata = data; data = to_native_scanline(format, data, xstride, m_scratch, m_dither, y, z); if (data == origdata) { m_scratch.assign((unsigned char*)data, (unsigned char*)data + m_spec.scanline_bytes()); data = &m_scratch[0]; } if (m_want_png) { if (!PNG_pvt::write_row(m_png, (png_byte*)data)) { error("PNG library error"); return false; } } else { unsigned char* bdata = (unsigned char*)data; unsigned char buf[4]; fseek(m_file, m_offset + sizeof(ico_bitmapinfo) + (m_spec.height - y - 1) * m_xor_slb, SEEK_SET); // write the XOR mask size_t buff_size = 0; for (int x = 0; x < m_spec.width; x++) { switch (m_color_type) { // reuse PNG constants case PNG_COLOR_TYPE_GRAY: buf[0] = buf[1] = buf[2] = bdata[x]; buff_size = 3; break; case PNG_COLOR_TYPE_GRAY_ALPHA: buf[0] = buf[1] = buf[2] = bdata[x * 2 + 0]; buf[3] = bdata[x * 2 + 1]; buff_size = 4; break; case PNG_COLOR_TYPE_RGB: buf[0] = bdata[x * 3 + 2]; buf[1] = bdata[x * 3 + 1]; buf[2] = bdata[x * 3 + 0]; buff_size = 3; break; case PNG_COLOR_TYPE_RGB_ALPHA: buf[0] = bdata[x * 4 + 2]; buf[1] = bdata[x * 4 + 1]; buf[2] = bdata[x * 4 + 0]; buf[3] = bdata[x * 4 + 3]; buff_size = 4; break; } if (!fwrite(buf, 1, buff_size)) { return false; } } fseek(m_file, m_offset + sizeof(ico_bitmapinfo) + m_spec.height * m_xor_slb + (m_spec.height - y - 1) * m_and_slb, SEEK_SET); // write the AND mask // It's required even for 32-bit images because it can be used when // drawing at colour depths lower than 24-bit. If it's not present, // Windows will read out-of-bounds, treating any data that it // encounters as the AND mask, resulting in ugly transparency effects. // Only need to do this for images with alpha, becasue 0 is opaque, // and we've already filled the file with zeros. if (m_color_type != PNG_COLOR_TYPE_GRAY && m_color_type != PNG_COLOR_TYPE_RGB) { for (int x = 0; x < m_spec.width; x += 8) { buf[0] = 0; for (int b = 0; b < 8 && x + b < m_spec.width; b++) { switch (m_color_type) { case PNG_COLOR_TYPE_GRAY_ALPHA: buf[0] |= bdata[(x + b) * 2 + 1] <= 127 ? (1 << (7 - b)) : 0; break; case PNG_COLOR_TYPE_RGB_ALPHA: buf[0] |= bdata[(x + b) * 4 + 3] <= 127 ? (1 << (7 - b)) : 0; break; } } if (!fwrite(&buf[0], 1, 1)) { return false; } } } } return true; }
bool IOStub::write(const uint8_t* buff, size_t bytes) { if(bytes == 0) { return true; } size_t last_index = _input.size(); _input.insert(_input.end(), buff, buff + bytes); auto start = _input.begin() + last_index; auto match = std::find(start, _input.end(), '\n'); if(match == _input.end()) { return true; } std::string data = std::string(start, match); _input.clear(); bool is_query = false; switch(data[0]) { case '?': is_query = true; break; case '=': is_query = false; break; default: return false; } std::string cmd; std::string args; if(is_query) { cmd = data.substr(1); } else { size_t end = data.find(','); cmd = data.substr(1, end - 1); //apparent fencepost error here is to remove the newline args = data.substr(end + 1); } boost::to_upper(cmd); boost::to_upper(args); const auto& processor = _processors.find(cmd); std::string resp = ""; if(processor != _processors.end()) { resp = processor->second(processor->first, args); } if(!resp.empty()) { _response_data.assign(resp.begin(), resp.end()); } return true; }
void Train(mxnet::NDArray data_array, mxnet::NDArray label_array, int max_epoches, int val_fold, float start_learning_rate, std::vector<mxnet::NDArray> &argsl) { /*prepare ndarray*/ learning_rate = start_learning_rate; size_t data_count = data_array.shape()[0]; size_t val_data_count = data_count * val_fold / 10; size_t train_data_count = data_count - val_data_count; train_data = data_array.Slice(0, train_data_count); train_label = label_array.Slice(0, train_data_count); val_data = data_array.Slice(train_data_count, data_count); val_label = label_array.Slice(train_data_count, data_count); size_t batch_size = in_args[0].shape()[0]; /*start the training*/ for (int iter = 0; iter < max_epoches; ++iter) { CHECK(optimizer); size_t start_index = 0; in_args[0] = train_data.Slice(start_index, start_index + batch_size).Copy(ctx_dev); in_args[in_args.size() - 1] = train_label.Slice(start_index, start_index + batch_size) .Copy(ctx_dev); in_args[0].WaitToRead(); in_args[in_args.size() - 1].WaitToRead(); while (start_index < train_data_count) { /*rebind the excutor*/ delete exe; exe = mxnet::Executor::Bind(net, ctx_dev, g2c, in_args, arg_grad_store, grad_req_type, aux_states); CHECK(exe); exe->Forward(true); exe->Backward(std::vector<mxnet::NDArray>()); start_index += batch_size; if (start_index < train_data_count) { if (start_index + batch_size >= train_data_count) start_index = train_data_count - batch_size; in_args[0] = train_data.Slice(start_index, start_index + batch_size) .Copy(ctx_dev); in_args[in_args.size() - 1] = train_label.Slice(start_index, start_index + batch_size) .Copy(ctx_dev); } for (size_t i = 1; i < in_args.size() - 1; ++i) { optimizer->Update(i, &in_args[i], &arg_grad_store[i], learning_rate); } for (size_t i = 1; i < in_args.size() - 1; ++i) { in_args[i].WaitToRead(); } in_args[0].WaitToRead(); in_args[in_args.size() - 1].WaitToRead(); } /*call every iter*/ TrainingCallBack(iter, exe); } argsl.assign(in_args.begin(), in_args.end()); }
/**get vector of minimal and maximal values from the class */ void MDWSDescription::getMinMax(std::vector<double> &min, std::vector<double> &max) const { min.assign(m_DimMin.begin(), m_DimMin.end()); max.assign(m_DimMax.begin(), m_DimMax.end()); }
GLuint Bitangents(std::vector<T>& dest) const { auto v = _bitangents(); dest.assign(v.begin(), v.end()); return 3; }
/// Encode translation and structure linear program with slack variables /// in order to handle noisy measurements. void EncodeTiXi_withNoise ( const Mat & M, //Scene representation const std::vector<Mat3> & Ri, double sigma, // Start upper bound sRMat & A, Vec & C, std::vector<LP_Constraints::eLP_SIGN> & vec_sign, std::vector<double> & vec_costs, std::vector<std::pair<double,double>> & vec_bounds ) { // Build Constraint matrix. const size_t Ncam = (size_t) M.row(3).maxCoeff()+1; const size_t N3D = (size_t) M.row(2).maxCoeff()+1; const size_t Nobs = M.cols(); assert(Ncam == Ri.size()); A.resize(5*Nobs+3, 3 * (Ncam + N3D + Nobs)); C.resize(5 * Nobs + 3, 1); C.fill(0.0); vec_sign.resize(5*Nobs+3); const size_t transStart = 0; const size_t pointStart = transStart + 3*Ncam; const size_t offsetStart = pointStart + 3*N3D; # define TVAR(i, el) (0 + 3*(i) + (el)) # define XVAR(j, el) (pointStart + 3*(j) + (el)) # define OFSVAR(k, el) (offsetStart + 3*(k) + (el)) // Set objective coefficients. vec_costs = std::vector<double>(3 * (Ncam + N3D + Nobs), 0.0); for (size_t k = 0; k < Nobs; ++k) { vec_costs[OFSVAR(k, 0)] = 1.0; vec_costs[OFSVAR(k, 1)] = 1.0; vec_costs[OFSVAR(k, 2)] = 1.0; } // By default set free variable: vec_bounds.assign( 3 * (Ncam + N3D + Nobs), {std::numeric_limits<double>::lowest(), std::numeric_limits<double>::max()}); // Change the offset to be positive for (size_t k = 0; k < 3*Nobs; ++k) vec_bounds[offsetStart + k].first = 0; size_t rowPos = 0; // Add the cheirality conditions (R_i*X_j + T_i)_3 >= 1 for (size_t k = 0; k < Nobs; ++k) { const size_t indexPt3D = M(2,k); const size_t indexCam = M(3,k); const Mat3 & R = Ri[indexCam]; A.coeffRef(rowPos, XVAR(indexPt3D, 0)) = R(2,0); A.coeffRef(rowPos, XVAR(indexPt3D, 1)) = R(2,1); A.coeffRef(rowPos, XVAR(indexPt3D, 2)) = R(2,2); A.coeffRef(rowPos, TVAR(indexCam, 2)) = 1.0; A.coeffRef(rowPos, OFSVAR(k, 2)) = 1.0; C(rowPos) = 1.0; vec_sign[rowPos] = LP_Constraints::LP_GREATER_OR_EQUAL; ++rowPos; } // end for (k) // Add conic constraint: for (size_t k = 0; k < Nobs; ++k) { const Vec2 pt = M.block<2,1>(0,k); const double u = pt(0); const double v = pt(1); const size_t indexPt3D = M(2,k); const size_t indexCam = M(3,k); const Mat3 & R = Ri[indexCam]; // x-residual => // (R_i*X_j + T_i)_1 / (R_i*X_j + T_i)_3 - u >= -sigma // (R_i*X_j + T_i)_1 - u * (R_i*X_j + T_i)_3 + sigma (R_i*X_j + T_i)_3 >= 0.0 // R_i_3 * (sigma-u) + R_i_1 + t_i_1 + t_i_3 * (sigma-u) >= 0 A.coeffRef(rowPos, XVAR(indexPt3D, 0)) = R(0,0) + (sigma-u) * R(2,0); A.coeffRef(rowPos, XVAR(indexPt3D, 1)) = R(0,1) + (sigma-u) * R(2,1); A.coeffRef(rowPos, XVAR(indexPt3D, 2)) = R(0,2) + (sigma-u) * R(2,2); A.coeffRef(rowPos, TVAR(indexCam, 0)) = 1.0; A.coeffRef(rowPos, TVAR(indexCam, 2)) = sigma-u; A.coeffRef(rowPos, OFSVAR(k, 0)) = 1.0; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_GREATER_OR_EQUAL; ++rowPos; A.coeffRef(rowPos, XVAR(indexPt3D, 0)) = R(0,0) - (sigma+u) * R(2,0); A.coeffRef(rowPos, XVAR(indexPt3D, 1)) = R(0,1) - (sigma+u) * R(2,1); A.coeffRef(rowPos, XVAR(indexPt3D, 2)) = R(0,2) - (sigma+u) * R(2,2); A.coeffRef(rowPos, TVAR(indexCam, 0)) = 1.0; A.coeffRef(rowPos, TVAR(indexCam, 2)) = -(sigma + u); A.coeffRef(rowPos, OFSVAR(k, 0)) = -1.0; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_LESS_OR_EQUAL; ++rowPos; // y-residual => A.coeffRef(rowPos, XVAR(indexPt3D, 0)) = R(1,0) + (sigma-v) * R(2,0); A.coeffRef(rowPos, XVAR(indexPt3D, 1)) = R(1,1) + (sigma-v) * R(2,1); A.coeffRef(rowPos, XVAR(indexPt3D, 2)) = R(1,2) + (sigma-v) * R(2,2); A.coeffRef(rowPos, OFSVAR(k, 1)) = 1.0; A.coeffRef(rowPos, TVAR(indexCam, 1)) = 1.0; A.coeffRef(rowPos, TVAR(indexCam, 2)) = sigma-v; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_GREATER_OR_EQUAL; ++rowPos; A.coeffRef(rowPos, XVAR(indexPt3D, 0)) = R(1,0) - (sigma+v) * R(2,0); A.coeffRef(rowPos, XVAR(indexPt3D, 1)) = R(1,1) - (sigma+v) * R(2,1); A.coeffRef(rowPos, XVAR(indexPt3D, 2)) = R(1,2) - (sigma+v) * R(2,2); A.coeffRef(rowPos, TVAR(indexCam, 1)) = 1.0; A.coeffRef(rowPos, TVAR(indexCam, 2)) = -(sigma + v); A.coeffRef(rowPos, OFSVAR(k, 1)) = -1.0; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_LESS_OR_EQUAL; ++rowPos; } // Fix the translation ambiguity. (set first cam at (0,0,0) //LP_EQUAL A.coeffRef(rowPos, TVAR(0, 0)) = 1.0; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_EQUAL; ++rowPos; A.coeffRef(rowPos, TVAR(0, 1)) = 1.0; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_EQUAL; ++rowPos; A.coeffRef(rowPos, TVAR(0, 2)) = 1.0; C(rowPos) = 0.0; vec_sign[rowPos] = LP_Constraints::LP_EQUAL; ++rowPos; # undef TVAR # undef XVAR # undef OFSVAR }
GLuint TexCoordinates(std::vector<T>& dest) const { auto v = _tex_coords(); dest.assign(v.begin(), v.end()); return 2; }
void ETHZParser::getStateVectorNames(std::vector<std::string>& stateVecNames) { stateVecNames.clear(); stateVecNames.assign(_vStateVectorNames.begin(), _vStateVectorNames.end()); }
void TensorMechanicsPlasticWeakPlaneShear::activeConstraints(const std::vector<Real> & f, const RankTwoTensor & stress, const Real & intnl, const RankFourTensor & Eijkl, std::vector<bool> & act, RankTwoTensor & returned_stress) const { act.assign(1, false); returned_stress = stress; if (f[0] <= _f_tol) return; // in the following i will derive returned_stress for the case smoother=0 Real tanpsi = tan_psi(intnl); Real tanphi = tan_phi(intnl); // norm is the normal to the yield surface // with f having psi (dilation angle) instead of phi: // norm(0) = df/dsig(2,0) = df/dsig(0,2) // norm(1) = df/dsig(2,1) = df/dsig(1,2) // norm(2) = df/dsig(2,2) std::vector<Real> norm(3, 0.0); Real tau = std::sqrt(std::pow((stress(0,2) + stress(2,0))/2, 2) + std::pow((stress(1,2) + stress(2,1))/2, 2)); if (tau > 0.0) { norm[0] = 0.25*(stress(0, 2)+stress(2,0))/tau; norm[1] = 0.25*(stress(1, 2)+stress(2,1))/tau; } else { returned_stress(2, 2) = cohesion(intnl)/tanphi; act[0] = true; return; } norm[2] = tanpsi; // to get the flow directions, we have to multiply norm by Eijkl. // I assume that E(0,2,0,2) = E(1,2,1,2), and E(2,2,0,2) = 0 = E(0,2,1,2), etc // with the usual symmetry. This makes finding the returned_stress // much easier. // returned_stress = stress - alpha*n // where alpha is chosen so that f = 0 Real alpha = f[0]/(Eijkl(0,2,0,2) + Eijkl(2,2,2,2)*tanpsi*tanphi); if (1 - alpha*Eijkl(0,2,0,2)/tau >= 0) { // returning to the "surface" of the cone returned_stress(2, 2) = stress(2, 2) - alpha*Eijkl(2, 2, 2, 2)*norm[2]; returned_stress(0, 2) = returned_stress(2, 0) = stress(0, 2) - alpha*2*Eijkl(0, 2, 0, 2)*norm[0]; returned_stress(1, 2) = returned_stress(2, 1) = stress(1, 2) - alpha*2*Eijkl(1, 2, 1, 2)*norm[1]; } else { // returning to the "tip" of the cone returned_stress(2, 2) = cohesion(intnl)/tanphi; returned_stress(0, 2) = returned_stress(2, 0) = returned_stress(1, 2) = returned_stress(2, 1) = 0; } returned_stress(0, 0) = stress(0, 0) - Eijkl(0, 0, 2, 2)*(stress(2, 2) - returned_stress(2, 2))/Eijkl(2, 2, 2, 2); returned_stress(1, 1) = stress(1, 1) - Eijkl(1, 1, 2, 2)*(stress(2, 2) - returned_stress(2, 2))/Eijkl(2, 2, 2, 2); act[0] = true; }
// Extract the input data from the specified file and save the data in the data container. // Returns the nonzero image size if successful, else returns 0,0. // xySize ImageReaderBMP::getData(std::string const &filename, std::vector<float> &dataContainer, ColorChannel_t colorChannel) { FILE* f; fopen_s(&f, filename.c_str(), "rb"); if (f == NULL) { return { 0, 0 }; } // Read the BMP header to get the image dimensions: unsigned char info[54]; if (fread(info, sizeof(unsigned char), 54, f) != 54) { fclose(f); return { 0, 0 }; } if (info[0] != 'B' || info[1] != 'M') { fclose(f); return { 0, 0 }; } // Verify the offset to the pixel data. It should be the same size as the info[] data read above. size_t dataOffset = (info[13] << 24) + (info[12] << 16) + (info[11] << 8) + info[10]; // Verify that the file contains 24 bits (3 bytes) per pixel (red, green blue at 8 bits each): int pixelDepth = (info[29] << 8) + info[28]; if (pixelDepth != 24) { fclose(f); return { 0, 0 }; } // This method of converting 4 bytes to a uint32_t is portable for little- or // big-endian environments: uint32_t width = (info[21] << 24) + (info[20] << 16) + (info[19] << 8) + info[18]; uint32_t height = (info[25] << 24) + (info[24] << 16) + (info[23] << 8) + info[22]; // Position the read pointer to the first byte of pixel data: if (fseek(f, dataOffset, SEEK_SET) != 0) { fclose(f); return { 0, 0 }; } uint32_t rowLen_padded = (width*3 + 3) & (~3); std::unique_ptr<unsigned char[]> imageData {new unsigned char[rowLen_padded]}; dataContainer.clear(); dataContainer.assign(width * height, 0); // Pre-allocate to make random access easy // Fill the data container with 8-bit data taken from the image data: for (uint32_t y = 0; y < height; ++y) { if (fread(imageData.get(), sizeof(unsigned char), rowLen_padded, f) != rowLen_padded) { fclose(f); return { 0, 0 }; } // BMP pixels are arranged in memory in the order (B, G, R): unsigned val = 0; for (uint32_t x = 0; x < width; ++x) { if (colorChannel == NNet::R) { val = imageData[x * 3 + 2]; // Red } else if (colorChannel == NNet::G) { val = imageData[x * 3 + 1]; // Green } else if (colorChannel == NNet::B) { val = imageData[x * 3 + 0]; // Blue } else if (colorChannel == NNet::BW) { // Rounds down: val = (unsigned)(0.3 * imageData[x*3 + 2] + // Red 0.6 * imageData[x*3 + 1] + // Green 0.1 * imageData[x*3 + 0]); // Blue } else { err << "Error: unknown pixel conversion" << endl; throw exceptionImageFile(); } // Convert the pixel from the range 0..256 to a smaller // range that we can input into the neural net: // Also we'll invert the rows so that the origin is the upper left at 0,0: dataContainer[flattenXY(x, (height - y) - 1, height)] = pixelToNetworkInputRange(val); } } fclose(f); return { width, height }; }
bool ReduceCrashingFunctions::TestFuncs(std::vector<Function*> &Funcs) { // If main isn't present, claim there is no problem. if (KeepMain && std::find(Funcs.begin(), Funcs.end(), BD.getProgram()->getFunction("main")) == Funcs.end()) return false; // Clone the program to try hacking it apart... ValueToValueMapTy VMap; Module *M = CloneModule(BD.getProgram(), VMap).release(); // Convert list to set for fast lookup... std::set<Function*> Functions; for (unsigned i = 0, e = Funcs.size(); i != e; ++i) { Function *CMF = cast<Function>(VMap[Funcs[i]]); assert(CMF && "Function not in module?!"); assert(CMF->getFunctionType() == Funcs[i]->getFunctionType() && "wrong ty"); assert(CMF->getName() == Funcs[i]->getName() && "wrong name"); Functions.insert(CMF); } outs() << "Checking for crash with only these functions: "; PrintFunctionList(Funcs); outs() << ": "; if (!ReplaceFuncsWithNull) { // Loop over and delete any functions which we aren't supposed to be playing // with... for (Function &I : *M) if (!I.isDeclaration() && !Functions.count(&I)) DeleteFunctionBody(&I); } else { std::vector<GlobalValue*> ToRemove; // First, remove aliases to functions we're about to purge. for (GlobalAlias &Alias : M->aliases()) { Constant *Root = Alias.getAliasee()->stripPointerCasts(); Function *F = dyn_cast<Function>(Root); if (F) { if (Functions.count(F)) // We're keeping this function. continue; } else if (Root->isNullValue()) { // This referenced a globalalias that we've already replaced, // so we still need to replace this alias. } else if (!F) { // Not a function, therefore not something we mess with. continue; } PointerType *Ty = cast<PointerType>(Alias.getType()); Constant *Replacement = ConstantPointerNull::get(Ty); Alias.replaceAllUsesWith(Replacement); ToRemove.push_back(&Alias); } for (Function &I : *M) { if (!I.isDeclaration() && !Functions.count(&I)) { PointerType *Ty = cast<PointerType>(I.getType()); Constant *Replacement = ConstantPointerNull::get(Ty); I.replaceAllUsesWith(Replacement); ToRemove.push_back(&I); } } for (auto *F : ToRemove) { F->eraseFromParent(); } // Finally, remove any null members from any global intrinsic. RemoveFunctionReferences(M, "llvm.used"); RemoveFunctionReferences(M, "llvm.compiler.used"); } // Try running the hacked up program... if (TestFn(BD, M)) { BD.setNewProgram(M); // It crashed, keep the trimmed version... // Make sure to use function pointers that point into the now-current // module. Funcs.assign(Functions.begin(), Functions.end()); return true; } delete M; return false; }
void assign(T value) { data.assign(width * height, value); }
void assign(const char* beg, const char* end) { _data.assign(beg, end); _beg = _data.begin(); _end = _data.end(); }
/// Split - Splits a string of comma separated items in to a vector of strings. /// static void Split(std::vector<std::string> &V, StringRef S) { SmallVector<StringRef, 3> Tmp; S.split(Tmp, ',', -1, false /* KeepEmpty */); V.assign(Tmp.begin(), Tmp.end()); }
GLuint Positions(std::vector<T>& dest) const { auto p = _positions(); dest.assign(p.begin(), p.end()); return 3; }
void Player_AlphaBeta::getMoves(GameState & state, const MoveArray & moves, std::vector<Action> & moveVec) { moveVec.clear(); alphaBeta->doSearch(state); moveVec.assign(alphaBeta->getResults().bestMoves.begin(), alphaBeta->getResults().bestMoves.end()); }
GLuint Tangents(std::vector<T>& dest) const { auto t = _tangents(); dest.assign(t.begin(), t.end()); return 3; }
void AsmWriterEmitter:: FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands, std::vector<unsigned> &InstIdxs, std::vector<unsigned> &InstOpsUsed) const { InstIdxs.assign(NumberedInstructions.size(), ~0U); // This vector parallels UniqueOperandCommands, keeping track of which // instructions each case are used for. It is a comma separated string of // enums. std::vector<std::string> InstrsForCase; InstrsForCase.resize(UniqueOperandCommands.size()); InstOpsUsed.assign(UniqueOperandCommands.size(), 0); for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) { const AsmWriterInst *Inst = getAsmWriterInstByID(i); if (Inst == 0) continue; // PHI, INLINEASM, LABEL, etc. std::string Command; if (Inst->Operands.empty()) continue; // Instruction already done. Command = " " + Inst->Operands[0].getCode() + "\n"; // If this is the last operand, emit a return. if (Inst->Operands.size() == 1) Command += " return true;\n"; // Check to see if we already have 'Command' in UniqueOperandCommands. // If not, add it. bool FoundIt = false; for (unsigned idx = 0, e = UniqueOperandCommands.size(); idx != e; ++idx) if (UniqueOperandCommands[idx] == Command) { InstIdxs[i] = idx; InstrsForCase[idx] += ", "; InstrsForCase[idx] += Inst->CGI->TheDef->getName(); FoundIt = true; break; } if (!FoundIt) { InstIdxs[i] = UniqueOperandCommands.size(); UniqueOperandCommands.push_back(Command); InstrsForCase.push_back(Inst->CGI->TheDef->getName()); // This command matches one operand so far. InstOpsUsed.push_back(1); } } // For each entry of UniqueOperandCommands, there is a set of instructions // that uses it. If the next command of all instructions in the set are // identical, fold it into the command. for (unsigned CommandIdx = 0, e = UniqueOperandCommands.size(); CommandIdx != e; ++CommandIdx) { for (unsigned Op = 1; ; ++Op) { // Scan for the first instruction in the set. std::vector<unsigned>::iterator NIT = std::find(InstIdxs.begin(), InstIdxs.end(), CommandIdx); if (NIT == InstIdxs.end()) break; // No commonality. // If this instruction has no more operands, we isn't anything to merge // into this command. const AsmWriterInst *FirstInst = getAsmWriterInstByID(NIT-InstIdxs.begin()); if (!FirstInst || FirstInst->Operands.size() == Op) break; // Otherwise, scan to see if all of the other instructions in this command // set share the operand. bool AllSame = true; for (NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx); NIT != InstIdxs.end(); NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx)) { // Okay, found another instruction in this command set. If the operand // matches, we're ok, otherwise bail out. const AsmWriterInst *OtherInst = getAsmWriterInstByID(NIT-InstIdxs.begin()); if (!OtherInst || OtherInst->Operands.size() == Op || OtherInst->Operands[Op] != FirstInst->Operands[Op]) { AllSame = false; break; } } if (!AllSame) break; // Okay, everything in this command set has the same next operand. Add it // to UniqueOperandCommands and remember that it was consumed. std::string Command = " " + FirstInst->Operands[Op].getCode() + "\n"; // If this is the last operand, emit a return after the code. if (FirstInst->Operands.size() == Op+1) Command += " return true;\n"; UniqueOperandCommands[CommandIdx] += Command; InstOpsUsed[CommandIdx]++; } } // Prepend some of the instructions each case is used for onto the case val. for (unsigned i = 0, e = InstrsForCase.size(); i != e; ++i) { std::string Instrs = InstrsForCase[i]; if (Instrs.size() > 70) { Instrs.erase(Instrs.begin()+70, Instrs.end()); Instrs += "..."; } if (!Instrs.empty()) UniqueOperandCommands[i] = " // " + Instrs + "\n" + UniqueOperandCommands[i]; } }
TimeSeriesPluginTestFixture() { // Asyn manager doesn't like it if we try to reuse the same port name for multiple drivers // (even if only one is ever instantiated at once), so we change it slightly for each test case. std::string simport("simTS"), testport("TS"); uniqueAsynPortName(simport); uniqueAsynPortName(testport); // We need some upstream driver for our test plugin so that calls to connectArrayPort // don't fail, but we can then ignore it and send arrays by calling processCallbacks directly. driver = boost::shared_ptr<asynNDArrayDriver>(new asynNDArrayDriver(simport.c_str(), 1, true, 0, asynGenericPointerMask, asynGenericPointerMask, 0, 0, 0, 0)); arrayPool = driver->pNDArrayPool; // This is the plugin under test ts = boost::shared_ptr<TimeSeriesPluginWrapper>(new TimeSeriesPluginWrapper(testport.c_str(), 50, 1, simport.c_str(), 0, 1, 0, 0, 2000000)); // This is the mock downstream plugin downstream_plugin = new TestingPlugin(testport.c_str(), 0); // Enable the plugin ts->start(); // start the plugin thread although not required for this unittesting ts->write(NDPluginDriverEnableCallbacksString, 1); ts->write(NDPluginDriverBlockingCallbacksString, 1); client = boost::shared_ptr<asynGenericPointerClient>(new asynGenericPointerClient(testport.c_str(), 0, NDArrayDataString)); client->registerInterruptUser(&TS_callback); // 1D: 8 channels with a single scalar sample element in each one size_t tmpdims_1d[] = {8}; dims_1d.assign(tmpdims_1d, tmpdims_1d + sizeof(tmpdims_1d)/sizeof(tmpdims_1d[0])); arrays_1d.resize(200); // We create 200 samples fillNDArraysFromPool(dims_1d, NDFloat32, arrays_1d, arrayPool); // Fill some NDArrays with unimportant data // 2D: three time series channels, each with 20 elements size_t tmpdims_2d[] = {3,20}; dims_2d.assign(tmpdims_2d, tmpdims_2d + sizeof(tmpdims_2d)/sizeof(tmpdims_2d[0])); arrays_2d.resize(24); fillNDArraysFromPool(dims_2d, NDFloat32, arrays_2d, arrayPool); // 3D: four channels with 2D images of 5x6 pixel (like an RGB image) // Not valid input for the Time Series plugin size_t tmpdims_3d[] = {4,5,6}; dims_3d.assign(tmpdims_3d, tmpdims_3d + sizeof(tmpdims_3d)/sizeof(tmpdims_3d[0])); arrays_3d.resize(24); fillNDArraysFromPool(dims_3d, NDFloat32, arrays_3d, arrayPool); // Plugin setup: TimePerPoint=0.001 and AveragingTime=0.01 thus NumAverage=10 BOOST_REQUIRE_NO_THROW(ts->write(TSTimePerPointString, 0.001)); BOOST_REQUIRE_NO_THROW(ts->write(TSAveragingTimeString, 0.01)); BOOST_REQUIRE_NO_THROW(ts->write(TSNumPointsString, 20)); BOOST_REQUIRE_NO_THROW(ts->write(TSAcquireModeString, 0)); // TSAcquireModeFixed=0 // Double check plugin setup BOOST_REQUIRE_EQUAL(ts->readInt(TSNumAverageString), 10); BOOST_REQUIRE_EQUAL(ts->readInt(TSNumPointsString), 20); }
UF(int num_elements) { rank.assign(num_elements, 0); parent.assign(num_elements, 0); for (int i = 0; i < num_elements; i++) parent[i] = i; //All separate sets, each is its own root }