pointwise_aggregates(const Matrix &A, const params &prm, unsigned min_aggregate) : count(0) { if (prm.block_size == 1) { plain_aggregates aggr(A, prm); remove_small_aggregates(A.nrows, 1, min_aggregate, aggr); count = aggr.count; strong_connection.swap(aggr.strong_connection); id.swap(aggr.id); } else { strong_connection.resize( nonzeros(A) ); id.resize( rows(A) ); Matrix Ap = pointwise_matrix(A, prm.block_size); plain_aggregates pw_aggr(Ap, prm); remove_small_aggregates( Ap.nrows, prm.block_size, min_aggregate, pw_aggr); count = pw_aggr.count * prm.block_size; #pragma omp parallel { std::vector<ptrdiff_t> marker(Ap.nrows, -1); #ifdef _OPENMP int nt = omp_get_num_threads(); int tid = omp_get_thread_num(); size_t chunk_size = (Ap.nrows + nt - 1) / nt; size_t chunk_start = tid * chunk_size; size_t chunk_end = std::min(Ap.nrows, chunk_start + chunk_size); #else size_t chunk_start = 0; size_t chunk_end = Ap.nrows; #endif for(size_t ip = chunk_start, ia = ip * prm.block_size; ip < chunk_end; ++ip) { ptrdiff_t row_beg = Ap.ptr[ip]; ptrdiff_t row_end = row_beg; for(unsigned k = 0; k < prm.block_size; ++k, ++ia) { id[ia] = prm.block_size * pw_aggr.id[ip] + k; for(ptrdiff_t ja = A.ptr[ia], ea = A.ptr[ia+1]; ja < ea; ++ja) { ptrdiff_t cp = A.col[ja] / prm.block_size; if (marker[cp] < row_beg) { marker[cp] = row_end; strong_connection[ja] = pw_aggr.strong_connection[row_end]; ++row_end; } else { strong_connection[ja] = pw_aggr.strong_connection[ marker[cp] ]; } } } } } } }
void ResolveVertexDataArray(std::vector<T>& data_out, const Scope& source, const std::string& MappingInformationType, const std::string& ReferenceInformationType, const char* dataElementName, const char* indexDataElementName, size_t vertex_count, const std::vector<unsigned int>& mapping_counts, const std::vector<unsigned int>& mapping_offsets, const std::vector<unsigned int>& mappings) { std::vector<T> tempUV; ParseVectorDataArray(tempUV,GetRequiredElement(source,dataElementName)); // handle permutations of Mapping and Reference type - it would be nice to // deal with this more elegantly and with less redundancy, but right // now it seems unavoidable. if (MappingInformationType == "ByVertice" && ReferenceInformationType == "Direct") { data_out.resize(vertex_count); for (size_t i = 0, e = tempUV.size(); i < e; ++i) { const unsigned int istart = mapping_offsets[i], iend = istart + mapping_counts[i]; for (unsigned int j = istart; j < iend; ++j) { data_out[mappings[j]] = tempUV[i]; } } } else if (MappingInformationType == "ByVertice" && ReferenceInformationType == "IndexToDirect") { data_out.resize(vertex_count); std::vector<int> uvIndices; ParseVectorDataArray(uvIndices,GetRequiredElement(source,indexDataElementName)); for (size_t i = 0, e = uvIndices.size(); i < e; ++i) { const unsigned int istart = mapping_offsets[i], iend = istart + mapping_counts[i]; for (unsigned int j = istart; j < iend; ++j) { if(static_cast<size_t>(uvIndices[i]) >= tempUV.size()) { DOMError("index out of range",&GetRequiredElement(source,indexDataElementName)); } data_out[mappings[j]] = tempUV[uvIndices[i]]; } } } else if (MappingInformationType == "ByPolygonVertex" && ReferenceInformationType == "Direct") { if (tempUV.size() != vertex_count) { FBXImporter::LogError(Formatter::format("length of input data unexpected for ByPolygon mapping: ") << tempUV.size() << ", expected " << vertex_count ); return; } data_out.swap(tempUV); } else if (MappingInformationType == "ByPolygonVertex" && ReferenceInformationType == "IndexToDirect") { data_out.resize(vertex_count); std::vector<int> uvIndices; ParseVectorDataArray(uvIndices,GetRequiredElement(source,indexDataElementName)); if (uvIndices.size() != vertex_count) { FBXImporter::LogError("length of input data unexpected for ByPolygonVertex mapping"); return; } unsigned int next = 0; BOOST_FOREACH(int i, uvIndices) { if(static_cast<size_t>(i) >= tempUV.size()) { DOMError("index out of range",&GetRequiredElement(source,indexDataElementName)); } data_out[next++] = tempUV[i]; } }
void SimRender::InterpolatePointsRNS(std::vector<CVector2D>& points, bool closed, float offset, int segmentSamples /* = 4 */) { PROFILE("InterpolatePointsRNS"); ENSURE(segmentSamples > 0); std::vector<CVector2D> newPoints; // (This does some redundant computations for adjacent vertices, // but it's fairly fast (<1ms typically) so we don't worry about it yet) // TODO: Instead of doing a fixed number of line segments between each // control point, it should probably be somewhat adaptive to get a nicer // curve with fewer points size_t n = points.size(); if (closed) { if (n < 1) return; // we need at least a single point to not crash } else { if (n < 2) return; // in non-closed mode, we need at least n=2 to not crash } size_t imax = closed ? n : n-1; newPoints.reserve(imax*segmentSamples); // these are primarily used inside the loop, but for open paths we need them outside the loop once to compute the last point CVector2D a0; CVector2D a1; CVector2D a2; CVector2D a3; for (size_t i = 0; i < imax; ++i) { // Get the relevant points for this spline segment; each step interpolates the segment between p1 and p2; p0 and p3 are the points // before p1 and after p2, respectively; they're needed to compute tangents and whatnot. CVector2D p0; // normally points[(i-1+n)%n], but it's a bit more complicated due to open/closed paths -- see below CVector2D p1 = points[i]; CVector2D p2 = points[(i+1)%n]; CVector2D p3; // normally points[(i+2)%n], but it's a bit more complicated due to open/closed paths -- see below if (!closed && (i == 0)) // p0's point index is out of bounds, and we can't wrap around because we're in non-closed mode -- create an artificial point // that extends p1 -> p0 (i.e. the first segment's direction) p0 = points[0] + (points[0] - points[1]); else // standard wrap-around case p0 = points[(i-1+n)%n]; // careful; don't use (i-1)%n here, as the result is machine-dependent for negative operands (e.g. if i==0, the result could be either -1 or n-1) if (!closed && (i == n-2)) // p3's point index is out of bounds; create an artificial point that extends p_(n-2) -> p_(n-1) (i.e. the last segment's direction) // (note that p2's index should not be out of bounds, because in non-closed mode imax is reduced by 1) p3 = points[n-1] + (points[n-1] - points[n-2]); else // standard wrap-around case p3 = points[(i+2)%n]; // Do the RNS computation (based on GPG4 "Nonuniform Splines") float l1 = (p2 - p1).Length(); // length of spline segment (i)..(i+1) CVector2D s0 = (p1 - p0).Normalized(); // unit vector of spline segment (i-1)..(i) CVector2D s1 = (p2 - p1).Normalized(); // unit vector of spline segment (i)..(i+1) CVector2D s2 = (p3 - p2).Normalized(); // unit vector of spline segment (i+1)..(i+2) CVector2D v1 = (s0 + s1).Normalized() * l1; // spline velocity at i CVector2D v2 = (s1 + s2).Normalized() * l1; // spline velocity at i+1 // Compute standard cubic spline parameters a0 = p1*2 + p2*-2 + v1 + v2; a1 = p1*-3 + p2*3 + v1*-2 + v2*-1; a2 = v1; a3 = p1; // Interpolate at regular points across the interval for (int sample = 0; sample < segmentSamples; sample++) newPoints.push_back(EvaluateSpline(sample/((float) segmentSamples), a0, a1, a2, a3, offset)); } if (!closed) // if the path is open, we should take care to include the last control point // NOTE: we can't just do push_back(points[n-1]) here because that ignores the offset newPoints.push_back(EvaluateSpline(1.f, a0, a1, a2, a3, offset)); points.swap(newPoints); }
// //////////////////////////////////////////////////////////////////////////// bool InsContext::CheckContext(std::vector<std::string> &error_list) const { typedef std::map<unsigned int, unsigned int> DVMap; typedef DVMap::iterator DVMapIter; typedef DVMap::const_iterator DVMapIterC; typedef std::pair<unsigned int, unsigned int> DVMapPair; unsigned int ins_item_count = static_cast<unsigned int>(ins_item_list_.size()); unsigned int dict_value_count = static_cast<unsigned int>(dict_value_list_.size()); unsigned int count_1; DVMap dv_map; std::vector<std::string> tmp_error_list; for (count_1 = 0; count_1 < ins_item_count; ++count_1) { const InsItem &this_item(ins_item_list_[count_1]); std::stringstream item_text; item_text << "Error in instruction item index " << count_1 << ", fid " << this_item.auxiliary_id_ << " ('" << this_item.field_name_ << "') with parent index " << this_item.parent_index_ << ": "; try { if (this_item.parent_index_ >= ins_item_count) { std::ostringstream o_str; o_str << "The parent index (" << this_item.parent_index_ << ") is not less than the number of instruction items (" << ins_item_count << ")."; tmp_error_list.push_back(item_text.str() + o_str.str()); } if ((count_1 + this_item.element_count_) > ins_item_count) { std::ostringstream o_str; o_str << "The item index plus the element count (" << count_1 << " + " << this_item.element_count_ << " = " << (count_1 + this_item.element_count_) << ") is greater " "than number of instruction items (" << ins_item_count << ")."; tmp_error_list.push_back(item_text.str() + o_str.str()); } if (this_item.field_operator_ == FieldOperator_None) { if ((this_item.dict_value_index_) && (this_item.data_type_ != DataType_Template)) { std::ostringstream o_str; o_str << "The field has an operator of 'None', but the " "dictionary value index is " << this_item.dict_value_index_ << " (should be 0)."; tmp_error_list.push_back(item_text.str() + o_str.str()); } if ((this_item.dict_value_count_) && (this_item.data_type_ != DataType_Template)) { std::ostringstream o_str; o_str << "The field has an operator of 'None', but the " "dictionary value count is " << this_item.dict_value_count_ << " (should be 0)."; tmp_error_list.push_back(item_text.str() + o_str.str()); } } else { if (!this_item.dict_value_count_) { std::ostringstream o_str; o_str << "The field has an operator of '" << this_item.field_operator_ << "', but the dictionary value " "count is 0."; tmp_error_list.push_back(item_text.str() + o_str.str()); } else if ((this_item.dict_value_index_ + this_item.dict_value_count_) > dict_value_count) { std::ostringstream o_str; o_str << "The dictionary value index plus the dictionary value " "count (" << this_item.dict_value_index_ << " + " << this_item.dict_value_count_ << " = " << (this_item.dict_value_index_ + this_item.dict_value_count_) << ") is greater than number of dictionary values (" << dict_value_count << ")."; tmp_error_list.push_back(item_text.str() + o_str.str()); } } } catch (const std::exception &except) { tmp_error_list.push_back(item_text.str() + except.what()); } } error_list.swap(tmp_error_list); return(error_list.empty()); }
bool smooth_curve(const BVH *bvh, const VectorXu &E2E, std::vector<CurvePoint> &curve, bool watertight) { const MatrixXu &F = *bvh->F(); const MatrixXf &V = *bvh->V(), &N = *bvh->N(); cout << endl; std::vector<CurvePoint> curve_new; std::vector<Float> weight; std::vector<uint32_t> path; cout << "Input: " << curve.size() << " vertices" << endl; for (int it=0;; ++it) { if (curve.size() < 2) return false; for (uint32_t it2=0; it2<curve.size(); ++it2) { curve_new.clear(); curve_new.push_back(curve[0]); for (uint32_t i=1; i<curve.size()-1; ++i) { Vector3f p_new = 0.5f * (curve[i-1].p + curve[i+1].p); Vector3f n_new = (curve[i-1].n + curve[i+1].n).normalized(); Float maxlength = (curve[i-1].p - curve[i+1].p).norm()*2; Ray ray1(p_new, n_new, 0, maxlength); Ray ray2(p_new, -n_new, 0, maxlength); uint32_t idx1 = 0, idx2 = 0; Float t1 = 0, t2 = 0; Vector2f uv1, uv2; bool hit1 = bvh->rayIntersect(ray1, idx1, t1, &uv1); bool hit2 = bvh->rayIntersect(ray2, idx2, t2, &uv2); if (!hit1 && !hit2) continue; CurvePoint pt; if (t1 < t2) { pt.p = ray1(t1); pt.f = idx1; pt.n = ((1 - uv1.sum()) * N.col(F(0, idx1)) + uv1.x() * N.col(F(1, idx1)) + uv1.y() * N.col(F(2, idx1))).normalized(); } else { pt.p = ray2(t2); pt.f = idx2; pt.n = ((1 - uv2.sum()) * N.col(F(0, idx2)) + uv2.x() * N.col(F(1, idx2)) + uv2.y() * N.col(F(2, idx2))).normalized(); } curve_new.push_back(pt); } curve_new.push_back(curve[curve.size()-1]); curve.swap(curve_new); } if (!watertight && it == 1) break; curve_new.clear(); curve_new.push_back(curve[0]); for (uint32_t i=1; i<curve.size(); ++i) { if (!astar(F, E2E, V, curve[i-1].f, curve[i].f, path)) return false; auto closest = [](const Vector3f &p0, const Vector3f &p1, const Vector3f &target) -> Vector3f { Vector3f d = (p1-p0).normalized(); return p0 + d * std::min(std::max((target-p0).dot(d), 0.0f), (p0-p1).norm()); }; if (path.size() > 2) { uint32_t base = curve_new.size() - 1; for (uint32_t j=1; j<path.size()-1; ++j) { uint32_t f = path[j]; Vector3f p0 = V.col(F(0, f)), p1 = V.col(F(1, f)), p2 = V.col(F(2, f)); CurvePoint pt2; pt2.f = f; pt2.n = (p1-p0).cross(p2-p0).normalized(); pt2.p = (p0+p1+p2) * (1.0f / 3.0f); curve_new.push_back(pt2); } curve_new.push_back(curve[i]); for (uint32_t q=1; q<path.size()-1; ++q) { for (uint32_t j=1; j<path.size()-1; ++j) { Float bestDist1 = std::numeric_limits<Float>::infinity(); Float bestDist2 = std::numeric_limits<Float>::infinity(); Vector3f bestPt1 = Vector3f::Zero(), bestPt2 = Vector3f::Zero(); uint32_t f = path[j]; for (uint32_t k=0; k<3; ++k) { Vector3f closest1 = closest(V.col(F(k, f)), V.col(F((k + 1) % 3, f)), curve_new[base+j-1].p); Vector3f closest2 = closest(V.col(F(k, f)), V.col(F((k + 1) % 3, f)), curve_new[base+j+1].p); Float dist1 = (closest1 - curve_new[base+j-1].p).norm(); Float dist2 = (closest2 - curve_new[base+j+1].p).norm(); if (dist1 < bestDist1) { bestDist1 = dist1; bestPt1 = closest1; } if (dist2 < bestDist2) { bestDist2 = dist2; bestPt2 = closest2; } } curve_new[base+j].p = (bestPt1 + bestPt2) * 0.5f; } } } else { curve_new.push_back(curve[i]); } } curve.swap(curve_new); curve_new.clear(); curve_new.push_back(curve[0]); weight.clear(); weight.push_back(1.0f); for (uint32_t i=0; i<curve.size(); ++i) { auto &cur = curve_new[curve_new.size()-1]; auto &cur_weight = weight[weight.size()-1]; if (cur.f == curve[i].f) { cur.p += curve[i].p; cur.n += curve[i].n; cur_weight += 1; } else { curve_new.push_back(curve[i]); weight.push_back(1.f); } } for (uint32_t i=0; i<curve_new.size(); ++i) { curve_new[i].p /= weight[i]; curve_new[i].n.normalize(); } curve_new[0] = curve[0]; curve_new[curve_new.size()-1] = curve[curve.size()-1]; if (curve_new.size() < 2 || curve_new[0].f == curve_new[curve_new.size()-1].f) return false; curve_new.swap(curve); if (it > 2) break; } cout << "Smoothed curve: " << curve.size() << " vertices" << endl; return true; }
/*static*/ void AbstractTexture::resizeData(unsigned int width, unsigned int height, std::vector<unsigned char>& data, unsigned int newWidth, unsigned int newHeight, bool resizeSmoothly, std::vector<unsigned char>& newData) { #ifdef DEBUG_TEXTURE assert(data.size() == width * height * sizeof(int)); #endif // DEBUG_TEXTURE newData.clear(); if (data.empty() || newWidth == 0 || newHeight == 0) return; if (newWidth == width && newHeight == height) { newData.swap(data); return; } const auto size = newWidth * newHeight * sizeof(int); const float xFactor = ((float)width - 1.0f)/((float)newWidth - 1.0f); const float yFactor = ((float)height - 1.0f)/((float)newHeight - 1.0f); newData.resize(size); uint idx = 0; float y = 0.0f; for (uint q = 0; q < newHeight; ++q) { uint j = (uint) floorf(y); const float dy = y - (float)j; if (j >= height) j = height - 1; float x = 0.0f; for (uint p = 0; p < newWidth; ++p) { uint i = (uint)floorf(x); if (i >= width) i = width - 1; const uint ijTL = (i + width * j) << 2; if (resizeSmoothly) { // bilinear interpolation const float dx = x - (float)i; const float dxy = dx * dy; const uint ijTR = i < width - 1 ? ijTL + 4 : ijTL; const uint ijBL = j < height - 1 ? ijTL + (width << 2) : ijTL; const uint ijBR = (i < width - 1) && (j < height - 1) ? ijTL + ((width + 1) << 2) : ijTL; const float wTL = 1.0f - dx - dy + dxy; const float wTR = dx - dxy; const float wBL = dy - dxy; const float wBR = dxy; for (uint k = 0; k < 4; ++k) { const float color = wTL * data[ijTL + k] + wTR * data[ijTR + k] + wBL * data[ijBL + k] + wBR * data[ijBR + k]; newData[idx + k] = (unsigned char)floorf(color); } } else { // nearest pixel color for (uint k = 0; k < 4; ++k) newData[idx + k] = data[ijTL + k]; } idx += 4; x += xFactor; } y += yFactor; } #ifdef DEBUG_TEXTURE assert(newData.size() == newWidth * newHeight * sizeof(int)); #endif // DEBUG_TEXTURE }
pointwise_aggregates(const Matrix &A, const params &prm, unsigned min_aggregate) : count(0) { typedef typename backend::value_type<Matrix>::type value_type; typedef typename math::scalar_of<value_type>::type scalar_type; if (prm.block_size == 1) { plain_aggregates aggr(A, prm); remove_small_aggregates(A.nrows, 1, min_aggregate, aggr); count = aggr.count; strong_connection.swap(aggr.strong_connection); id.swap(aggr.id); } else { strong_connection.resize( nonzeros(A) ); id.resize( rows(A) ); auto ap = backend::pointwise_matrix(A, prm.block_size); backend::crs<scalar_type> &Ap = *ap; plain_aggregates pw_aggr(Ap, prm); remove_small_aggregates( Ap.nrows, prm.block_size, min_aggregate, pw_aggr); count = pw_aggr.count * prm.block_size; #pragma omp parallel { std::vector<ptrdiff_t> j(prm.block_size); std::vector<ptrdiff_t> e(prm.block_size); #pragma omp for for(ptrdiff_t ip = 0; ip < static_cast<ptrdiff_t>(Ap.nrows); ++ip) { ptrdiff_t ia = ip * prm.block_size; for(unsigned k = 0; k < prm.block_size; ++k, ++ia) { id[ia] = prm.block_size * pw_aggr.id[ip] + k; j[k] = A.ptr[ia]; e[k] = A.ptr[ia+1]; } for(ptrdiff_t jp = Ap.ptr[ip], ep = Ap.ptr[ip+1]; jp < ep; ++jp) { ptrdiff_t cp = Ap.col[jp]; bool sp = (cp == ip) || pw_aggr.strong_connection[jp]; ptrdiff_t col_end = (cp + 1) * prm.block_size; for(unsigned k = 0; k < prm.block_size; ++k) { ptrdiff_t beg = j[k]; ptrdiff_t end = e[k]; while(beg < end && A.col[beg] < col_end) { strong_connection[beg] = sp && A.col[beg] != (ia + k); ++beg; } j[k] = beg; } } } } } }
DWORD WINAPI SearchWorkThreadMgrProc(LPVOID lpParm) { CQuickSearchDlg * lpDlg = (CQuickSearchDlg *)lpParm; if (lpDlg == NULL) { lpDlg->NotifyStatusMsg(_T("线程的非法引用!请退出程序,然后重试。")); return ERROR_INVALID_PARAMETER; } if (!EnablePrivilege(SE_DEBUG_NAME, TRUE)) { lpDlg->NotifyStatusMsg(_T("提权失败!")); return ERROR_ACCESS_DENIED; } for (int i = 0; lpDlg->GetSafeHwnd() == NULL && i < 10; i++) Sleep(100); lpDlg->NotifyStatusMsg(_T("就绪...")); while (TRUE) { TCHAR szMsg[nMSG_SIZE] = { 0 }; ULONG nFoundedCont = 0; WaitForSingleObject(g_hSearchEvent, INFINITE); if (WaitForSingleObject(g_hQuitEvent, 50) == WAIT_OBJECT_0) { // 执行搜索之前检测有无退出信号 break; } lpDlg->InitList(); SEARCH_PROGRRESS_INFO spi[26] = { 0 }; for (short i = 0; i < 26; i++) { spi[i].m_nFoundCount = 0; spi[i].m_nViewCount = 0; spi[i].m_lpDlg = lpDlg; spi[i].m_Update = update; ParseSearchObject(lpDlg->GetSearchString().GetBuffer(), spi[i].m_vecSearchStrings); lpDlg->GetSearchString().ReleaseBuffer(); } g_nTotalFound = 0; // 已经找到的数量 g_nTotalView = 0; // 已经查找了的数量 CString strSearchStartPath(lpDlg->GetSearchLocation()); short nSearchThreadIndex = 0; HANDLE hSearchThread[26] = { 0 }; DWORD dwSeachThreadId[26] = { 0 }; if (strSearchStartPath == _T("0")) { TCHAR szAllDriverLetters[100] = { 0 }; DWORD len = GetLogicalDriveStrings(sizeof(szAllDriverLetters) / sizeof(TCHAR), szAllDriverLetters); for (TCHAR * lpszCurrentDriverLetter = szAllDriverLetters; *lpszCurrentDriverLetter; lpszCurrentDriverLetter += _tcslen(lpszCurrentDriverLetter) + 1) { // 创建搜索线程 Sleep(nSearchThreadIndex * 1000); StringCchPrintf(spi[nSearchThreadIndex].m_szStartLocation, MAX_PATH - 1, _T("%C:"), lpszCurrentDriverLetter[0]); spi[nSearchThreadIndex].m_bFastMode = lpDlg->GetFastMode(); hSearchThread[nSearchThreadIndex] = CreateThread( NULL, // 使用默认的安全描述符 0, // 使用默认的栈大小 (LPTHREAD_START_ROUTINE)SearchThreadProc, (LPVOID)(spi + nSearchThreadIndex), CREATE_SUSPENDED, // 先挂起 dwSeachThreadId + nSearchThreadIndex); // 取得线程ID if (hSearchThread[nSearchThreadIndex]) { if (spi[nSearchThreadIndex].m_bFastMode) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_TIME_CRITICAL)) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_HIGHEST)) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_ABOVE_NORMAL)) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_HIGHEST)) SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_NORMAL); ResumeThread(hSearchThread[nSearchThreadIndex]); SetThreadPriorityBoost(hSearchThread[nSearchThreadIndex], !spi[nSearchThreadIndex].m_bFastMode); // 系统动态调整线程优先级选项 nSearchThreadIndex++; } } WaitForMultipleObjects(nSearchThreadIndex, hSearchThread, TRUE, INFINITE); for (short i = 0; i < nSearchThreadIndex; i++) { if (hSearchThread[i]) CloseHandle(hSearchThread[i]); } } else { // 创建搜索线程 DWORD ThreadID = 0; StringCchPrintf(spi[0].m_szStartLocation, MAX_PATH - 1, _T("%s"), strSearchStartPath); spi[0].m_bFastMode = lpDlg->GetFastMode(); hSearchThread[0] = CreateThread( NULL, // 使用默认的安全描述符 0, // 使用默认的栈大小 (LPTHREAD_START_ROUTINE)SearchThreadProc, (LPVOID)&spi[0], CREATE_SUSPENDED, // 先挂起 &ThreadID); // 取得线程ID if (hSearchThread) { if (spi[0].m_bFastMode) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_TIME_CRITICAL)) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_HIGHEST)) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_ABOVE_NORMAL)) if (!SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_HIGHEST)) SetThreadPriority(hSearchThread[nSearchThreadIndex], THREAD_PRIORITY_NORMAL); ResumeThread(hSearchThread[nSearchThreadIndex]); SetThreadPriorityBoost(hSearchThread[nSearchThreadIndex], !spi[nSearchThreadIndex].m_bFastMode); // 系统动态调整线程优先级选项 WaitForSingleObject(hSearchThread[nSearchThreadIndex], INFINITE); CloseHandle(hSearchThread[nSearchThreadIndex]); nSearchThreadIndex++; } } if (g_vecCurrentFindData.size()) { lpDlg->InitList(); lpDlg->NotifyStatusMsg(_T("正在更新搜索结果列表,请稍候...")); lpDlg->UpdateList(g_vecCurrentFindData); ULONG nViewCount = 0, nFoundCount = 0; for (short i = 0; i < nSearchThreadIndex; i++) { nViewCount += spi[i].m_nViewCount; nFoundCount += spi[i].m_nFoundCount; } StringCchPrintf(szMsg, nMSG_SIZE - 1, _T("搜索完成,在%d个项目中共找到了%d个对象。"), nViewCount, nFoundCount); lpDlg->NotifyStatusMsg(szMsg); } else { lpDlg->NotifyStatusMsg(_T("未找到!")); } // 搜索完了,重置状态 g_vecCurrentFindData.clear(); std::vector<FIND_DATA> vecCurrentFindDataTemp; g_vecCurrentFindData.swap(vecCurrentFindDataTemp); g_hBrokenEvent ? ResetEvent(g_hBrokenEvent) : 0; g_hSearchEvent ? ResetEvent(g_hSearchEvent) : 0; if (WaitForSingleObject(g_hQuitEvent, 50) == WAIT_OBJECT_0) { // 搜索任务完成之后检测有无退出信号 OutputDebugString(_T("Quiting...")); break; } } return 0; }
bool CExporter::buildMeshLod(std::vector<ExportVertex>& exportVertices, std::vector< std::vector<grp::LodIndices> >& buffers) { std::vector<LodGenerator*> generators(buffers.size()); for (size_t i = 0; i < buffers.size(); ++i) { generators[i] = new LodGenerator(exportVertices, buffers[i]); generators[i]->calculate(m_options.lodMaxError, m_options.lodLevelScale); } //所有顶点按塌陷顺序重新排序 std::map<int, int> vertexMap; int newIndex = 0; //以误差从大到小的顺序填充所有mesh buffer的所有lod级别用到的顶点 std::vector<int> lastLevel(buffers.size()); for (size_t i = 0; i < buffers.size(); ++i) { assert(buffers[i].size() > 0); lastLevel[i] = buffers[i].size() - 1; } while (true) { int maxErrorBuffer = -1; float maxError = -1.0f; for (size_t i = 0; i < lastLevel.size(); ++i) { if (lastLevel[i] < 0) { continue; } //这里有点费解,但又懒得解释 if (lastLevel[i] == buffers[i].size() - 1) { maxErrorBuffer = i; maxError = FLT_MAX; } else if (buffers[i][lastLevel[i] + 1].maxError > maxError) { maxErrorBuffer = i; maxError = buffers[i][lastLevel[i] + 1].maxError; } } if (maxErrorBuffer < 0) { break; } //填充顶点 grp::LodIndices& lodIndices = buffers[maxErrorBuffer][lastLevel[maxErrorBuffer]]; for (size_t i = 0; i < lodIndices.indices.size(); ++i) { int index = lodIndices.indices[i]; if (vertexMap.find(index) == vertexMap.end()) { vertexMap.insert(std::make_pair(index, newIndex)); ++newIndex; } } --lastLevel[maxErrorBuffer]; } //顶点重新排序 std::vector<ExportVertex> sorted(vertexMap.size()); for (std::map<int, int>::iterator iterMap = vertexMap.begin(); iterMap != vertexMap.end(); ++iterMap) { assert(iterMap->second < sorted.size()); sorted[iterMap->second] = exportVertices[iterMap->first]; } //整理index buffer for (size_t bufferIndex = 0; bufferIndex < buffers.size(); ++bufferIndex) { std::vector<grp::LodIndices>& buffer = buffers[bufferIndex]; for (size_t levelIndex = 0; levelIndex < buffer.size(); ++levelIndex) { grp::LodIndices& lodIndice = buffer[levelIndex]; lodIndice.maxIndex = 0; for (size_t indexIndex = 0; indexIndex < lodIndice.indices.size(); ++indexIndex) { grp::Index32& index = lodIndice.indices[indexIndex]; assert(vertexMap.find(index) != vertexMap.end()); index = vertexMap[index]; if (index > lodIndice.maxIndex) { lodIndice.maxIndex = index; } } } } //整理冗余关系 for (size_t vertexIndex = 0; vertexIndex < sorted.size(); ++vertexIndex) { ExportVertex& vertex = sorted[vertexIndex]; std::map<int, int>::iterator found = vertexMap.find(vertex.copyPos); if (found != vertexMap.end()) { vertex.copyPos = found->second; } found = vertexMap.find(vertex.copyNormal); if (found != vertexMap.end()) { vertex.copyNormal = found->second; } } //纠正冗余关系,保证copyPos和copyNormal总是小于顶点下标 for (size_t vertexIndex = 0; vertexIndex < sorted.size(); ++vertexIndex) { ExportVertex& vertex = sorted[vertexIndex]; if (vertex.copyPos >= 0) { changeCopyPos(sorted, vertexIndex, vertex.copyPos); } if (vertex.copyNormal >= 0) { changeCopyNormal(sorted, vertexIndex, vertex.copyNormal); } } exportVertices.swap(sorted); return true; }
void PreviewGenerator::GetClsidsFromExt(CString ext, std::vector<CLSID> &retVal) { std::vector<CLSID> res; DWORD size = 1024; TCHAR buff[1024]; LPCWSTR extra = L"{8895B1C6-B41F-4C1C-A562-0D564250836F}"; CString cs = L""; cs.Format(L".%s", ext); HRESULT hr = AssocQueryString(ASSOCF_VERIFY, ASSOCSTR_SHELLEXTENSION, cs, extra, buff, &size); if (hr == S_OK) { CLSID cls; CLSIDFromString(buff, &cls); res.push_back(cls); } extra = L"{BB2E617C-0920-11d1-9A0B-00C04FC2D6C1}"; hr = AssocQueryString(ASSOCF_VERIFY, ASSOCSTR_SHELLEXTENSION, cs, extra, buff, &size); if (hr == S_OK) { CLSID cls; CLSIDFromString(buff, &cls); std::vector<CLSID>::iterator it = find(res.begin(), res.end(), cls); if (it == res.end()) res.push_back(cls); } extra = L"{e357fccd-a995-4576-b01f-234630154e96}"; hr = AssocQueryString(ASSOCF_VERIFY, ASSOCSTR_SHELLEXTENSION, cs, extra, buff, &size); if (hr == S_OK) { CLSID cls; CLSIDFromString(buff, &cls); std::vector<CLSID>::iterator it = find(res.begin(), res.end(), cls); if (it == res.end()) res.push_back(cls); } //extra = L"{534A1E02-D58F-44f0-B58B-36CBED287C7C}"; //CLSID cls; //CLSIDFromString(extra, &cls); //res.push_back(cls); //hr = AssocQueryString(ASSOCF_VERIFY, ASSOCSTR_SHELLEXTENSION, cs, extra, buff, &size); //if (hr == S_OK) //{ // std::vector<CString>::iterator it = find(res.begin(), res.end(), buff); // if (it == res.end()) // res.push_back(buff); //} PERCEIVED ptype; PERCEIVEDFLAG pflag; PWSTR ppszType; cs = L""; WCHAR wcData[MAX_PATH]; LONG cData = sizeof(wcData); cs.Format(L".%s", ext); hr = RegQueryValue(HKEY_CLASSES_ROOT, cs, wcData, &cData); if (hr == S_OK) { cs.Format(L"%s\\shellex\\{8895b1c6-b41f-4c1c-a562-0d564250836f}", wcData); cData = sizeof(wcData); hr = RegQueryValue(HKEY_CLASSES_ROOT, cs, wcData, &cData); if (hr == S_OK) { CLSID cls; CLSIDFromString(wcData, &cls); res.push_back(cls); } } cs.Format(L".%s", ext); hr = AssocGetPerceivedType(cs, &ptype, &pflag, &ppszType); if (hr == S_OK) { cs.Format(L"SystemFileAssociations\\%s\\ShellEx\\{e357fccd-a995-4576-b01f-234630154e96}", ppszType); hr = RegQueryValue(HKEY_CLASSES_ROOT, cs, wcData, &cData); if (hr == S_OK) { CLSID cls; CLSIDFromString(buff, &cls); std::vector<CLSID>::iterator it = find(res.begin(), res.end(), cls); if (it == res.end()) res.push_back(cls); } cs.Format(L"SystemFileAssociations\\%s\\ShellEx\\{BB2E617C-0920-11d1-9A0B-00C04FC2D6C1}", ppszType); hr = RegQueryValue(HKEY_CLASSES_ROOT, cs, wcData, &cData); if (hr == S_OK) { CLSID cls; CLSIDFromString(buff, &cls); std::vector<CLSID>::iterator it = find(res.begin(), res.end(), cls); if (it == res.end()) res.push_back(cls); } cs.Format(L"SystemFileAssociations\\%s\\ShellEx\\ContextMenuHandlers\\ShellVideoSlideshow", ppszType); hr = RegQueryValue(HKEY_CLASSES_ROOT, cs, wcData, &cData); if (hr == S_OK) { CLSID cls; CLSIDFromString(buff, &cls); std::vector<CLSID>::iterator it = find(res.begin(), res.end(), cls); if (it == res.end()) res.push_back(cls); } } retVal.swap(res); }
void set_killmask(std::vector<int> killmask_in) { killmask.swap(killmask_in); dedisp_error error = dedisp_set_killmask(plan,&killmask[0]); ErrorChecker::check_dedisp_error(error,"set_killmask"); }
void iter_swap(std::vector<aItem>::iterator a, std::vector<aItem>::iterator b) { a->swap(*b); };
template <class T> void swap(std::vector<T>& x, std::vector<T>& y) { x.swap(y); }
static void LoadFileStateData(const std::string& filename, std::vector<u8>& ret_data) { Flush(); File::IOFile f(filename, "rb"); if (!f) { Core::DisplayMessage("State not found", 2000); return; } StateHeader header; f.ReadArray(&header, 1); if (memcmp(SConfig::GetInstance().m_LocalCoreStartupParameter.GetUniqueID().c_str(), header.gameID, 6)) { Core::DisplayMessage(StringFromFormat("State belongs to a different game (ID %.*s)", 6, header.gameID), 2000); return; } std::vector<u8> buffer; if (header.size != 0) // non-zero size means the state is compressed { Core::DisplayMessage("Decompressing State...", 500); buffer.resize(header.size); lzo_uint i = 0; while (true) { lzo_uint32 cur_len = 0; // number of bytes to read lzo_uint new_len = 0; // number of bytes to write if (!f.ReadArray(&cur_len, 1)) break; f.ReadBytes(out, cur_len); const int res = lzo1x_decompress(out, cur_len, &buffer[i], &new_len, nullptr); if (res != LZO_E_OK) { // This doesn't seem to happen anymore. PanicAlertT("Internal LZO Error - decompression failed (%d) (%li, %li) \n" "Try loading the state again", res, i, new_len); return; } i += new_len; } } else // uncompressed { const size_t size = (size_t)(f.GetSize() - sizeof(StateHeader)); buffer.resize(size); if (!f.ReadBytes(&buffer[0], size)) { PanicAlert("wtf? reading bytes: %i", (int)size); return; } } // all good ret_data.swap(buffer); }
void sort() { sort_arg_t *thread_data; // struct timeval t1, t2; // gettimeofday(&t1, nullptr); if (thread_count == 1) { std::sort(data->begin(), data->end()); } // divide input data on threads for sorting else { pthread_t *threads = new pthread_t[thread_count]; thread_data = new sort_arg_t[thread_count]; size_t begin = 0; size_t per_thread = data->size() / thread_count; size_t remainder = data->size() - per_thread * thread_count; for (int i = 0; i < thread_count; i++) { size_t size = per_thread + (i < remainder ? 1 : 0); thread_data[i].data = data; thread_data[i].begin = begin; thread_data[i].end = begin + size; begin += size; } for (int i = 0; i < thread_count; i++) { pthread_create(&threads[i], nullptr, thunk<ParallelSort, &ParallelSort::sort_thread>, new std::pair<void *, void *>(this, &thread_data[i])); } for (int i = 0; i < thread_count; i++) { pthread_join(threads[i], nullptr); } delete threads; } // gettimeofday(&t2, nullptr); // printf("%f,", t2.tv_sec + (double)t2.tv_usec / 1000000 - t1.tv_sec - (double)t1.tv_usec / 1000000); // gettimeofday(&t1, nullptr); if (thread_count == 1) { } // uses a single 2-way merge to merge the two sorted parts else if (thread_count == 2) { merge_arg_t a, b; a.data = thread_data[0].data; a.begin = thread_data[0].begin; a.end = thread_data[0].end; a.delete_data_when_done = false; b.data = thread_data[1].data; b.begin = thread_data[1].begin; b.end = thread_data[1].end; b.delete_data_when_done = false; std::vector<T> *out = ParallelSort::merge_sorted(a, b); data->swap(*out); delete out; } // uses a single n-way merge to merge the sorted parts. // this is about 2x slower than the solution below // else { // std::vector<T> *out = new std::vector<T>(); // merge_data_t d; // // std::priority_queue<merge_data_t> heap; // // for (int i = 0; i < thread_count; i++) { // d.pos = thread_data[i].begin; // d.value = (*(thread_data[i].data))[d.pos]; // d.slice = i; // heap.push(d); // } // // while (heap.size() > 0) { // d = heap.top(); // heap.pop(); // // out->push_back(d.value); // // d.pos++; // if (d.pos < thread_data[d.slice].end) { // d.value = (*(thread_data[d.slice].data))[d.pos]; // heap.push(d); // } // } // data->swap(*out); // delete out; // } // uses threaded 2-way merges to merge pairs of sorted parts until // only one big sorted part is left else { pthread_mutex_init(&parts_mutex, nullptr); pthread_mutex_init(&parts_exist_mutex, nullptr); size_t merge_thread_count = thread_count / 2; if (merge_thread_count < 2) { merge_thread_count = 2; } remaining_parts = 0; for (int i = 0; i < thread_count; i++) { merge_arg_t a; a.data = thread_data[i].data; a.begin = thread_data[i].begin; a.end = thread_data[i].end; a.delete_data_when_done = false; pending_parts.push(a); remaining_parts++; } pthread_mutex_lock(&parts_exist_mutex); pthread_t *threads = new pthread_t[merge_thread_count]; for (int i = 0; i < merge_thread_count; i++) { pthread_create(&threads[i], nullptr, thunk<ParallelSort, &ParallelSort::merge_thread>, new std::pair<void *, void *>(this, NULL)); } for (int i = 0; i < merge_thread_count; i++) { pthread_join(threads[i], nullptr); } delete threads; pthread_mutex_destroy(&parts_mutex); pthread_mutex_destroy(&parts_exist_mutex); merge_arg_t res = pending_parts.front(); data->swap(*res.data); delete res.data; } // gettimeofday(&t2, nullptr); // printf("%f\n", t2.tv_sec + (double)t2.tv_usec / 1000000 - t1.tv_sec - (double)t1.tv_usec / 1000000); if (thread_count > 1) { delete thread_data; } }
unique_connections(my_type&& My_){Connections.swap(My_.Connections);}
operators_found(operators_found &&other) : operators_found() { quotes.swap(other.quotes); negative.swap(other.negative); }
const my_type& operator=(my_type&& My_){ hmLib_assert(Connections.size() == 0, inquiry_exception, "Connections already have a connection"); Connections.swap(My_.Connections); return *this; }
void swap(rmq_support_sparse_table& rm) { std::swap(m_k, rm.m_k); m_table.swap(rm.m_table); }
void Observers::getReapedSlaves(std::vector<int>& d) { IceUtil::Mutex::Lock sync(_reapedMutex); d.swap(_reaped); }
inline void yet_another_watershed( const volume_ptr<ID>& seg_ptr, const region_graph_ptr<ID,F> rg_ptr, std::vector<std::size_t>& counts, const L& lowl) { F low = static_cast<F>(lowl); std::vector<std::size_t> new_counts({0}); std::vector<ID> remaps(counts.size()); zi::disjoint_sets<ID> sets(counts.size()); std::vector<F> maxs(counts.size()); region_graph<ID,F>& rg = *rg_ptr; ID next_id = 1; ID merged = 0; for ( auto& it: rg ) { if ( std::get<0>(it) <= low ) { break; } ID s1 = std::get<1>(it); ID s2 = std::get<2>(it); F f = std::get<0>(it); if ( s1 && s2 ) { if ( (remaps[s1] == 0) || (remaps[s2] == 0) ) { if ( remaps[s1] == 0 ) { std::swap(s1,s2); } if ( remaps[s1] == 0 ) { maxs[next_id] = f; remaps[s1] = remaps[s2] = next_id; new_counts.push_back(counts[s1]+counts[s2]); ++next_id; } else { ID actual = sets.find_set(remaps[s1]); remaps[s2] = remaps[s1]; new_counts[actual] += counts[s2]; } } else { ID a1 = sets.find_set(remaps[s1]); ID a2 = sets.find_set(remaps[s2]); if ( 0 && a1 != a2 && ((maxs[a1]==f)||(maxs[a2]==f)) ) { ++merged; new_counts[a1] += new_counts[a2]; new_counts[a2] = 0; maxs[a1] = std::max(maxs[a1],maxs[a2]); maxs[a2] = 0; ID a = sets.join(a1,a2); std::swap(new_counts[a], new_counts[a1]); std::swap(maxs[a], maxs[a1]); } } } } next_id -= merged; std::vector<ID> remaps2(counts.size()); next_id = 1; for ( ID id = 0; id < counts.size(); ++id ) { ID s = sets.find_set(remaps[id]); if ( s && (remaps2[s]==0) ) { remaps2[s] = next_id; new_counts[next_id] = new_counts[s]; ++next_id; } } new_counts.resize(next_id); std::ptrdiff_t xdim = seg_ptr->shape()[0]; std::ptrdiff_t ydim = seg_ptr->shape()[1]; std::ptrdiff_t zdim = seg_ptr->shape()[2]; std::ptrdiff_t total = xdim * ydim * zdim; ID* seg_raw = seg_ptr->data(); for ( std::ptrdiff_t idx = 0; idx < total; ++idx ) { seg_raw[idx] = remaps2[remaps[seg_raw[idx]]]; } region_graph<ID,F> new_rg; for ( auto& it: rg ) { ID s1 = remaps2[remaps[std::get<1>(it)]]; ID s2 = remaps2[remaps[std::get<2>(it)]]; if ( s1 != s2 && s1 && s2 ) { auto mm = std::minmax(s1,s2); new_rg.emplace_back(std::get<0>(it), mm.first, mm.second); } } rg.swap(new_rg); counts.swap(new_counts); std::cout << "New count: " << counts.size() << std::endl; std::cout << "Done with updating the region graph, size: " << rg.size() << std::endl; }
/*! Set the imaginary part of the input signal. This part can be of zero size. */ void SetImSig(std::vector<double> &temp){temp.swap(imSig_);};
void test_swap_member_vector() { auto l_vec = getSourceVector(); TEST_CODE(g_target_vector.swap(l_vec), 1); }
/*! Set the real part of the input signal. */ void SetReSig(std::vector<double> &temp){temp.swap(reSig_);};
void swap(TreeItem& rhs) noexcept { strName.swap(rhs.strName); Last.swap(rhs.Last); std::swap(Depth, rhs.Depth); }
// //////////////////////////////////////////////////////////////////////////// // CODE NOTE: To be moved into ProcessId.cpp unsigned int ProcessNameToProcessId(const std::string &process_name, std::vector<ProcessId> &process_id_list, unsigned int max_count) { #ifdef _Windows unsigned int pid_count = 0; unsigned int pid_count_allocated = 8192; boost::shared_array<DWORD> pid_list(new DWORD[pid_count_allocated]); do { DWORD current_size = pid_count_allocated * sizeof(DWORD); DWORD needed_size = 0; if (!::EnumProcesses(pid_list.get(), current_size, &needed_size)) ThrowSystemError("Call to EnumProcesses() failed"); // We assume that if the number of bytes used is less than the number // allocated that we've received the entire list of process ids. // Otherwise, we re-allocate and re-try the operation. if (needed_size < current_size) { pid_count = needed_size / sizeof(DWORD); break; } pid_count_allocated *= 2; pid_list.reset(new DWORD[pid_count_allocated]); } while (pid_count_allocated); std::vector<ProcessId> tmp_process_id_list; unsigned int done_count = 0; unsigned int count_1; for (count_1 = 0; count_1 < pid_count; ++count_1) { HANDLE process_handle; if ((process_handle = ::OpenProcess(PROCESS_SET_INFORMATION | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, pid_list[count_1])) != NULL) { // First module is main EXE, so only need an array of one... HMODULE module_handle; DWORD needed_size; if (::EnumProcessModules(process_handle, &module_handle, sizeof(module_handle), &needed_size) != 0) { char module_name[(MaxPathNameLength * 2) + 1]; if (::GetModuleBaseName(process_handle, module_handle, module_name, sizeof(module_name) - 1) != 0) { if (!stricmp(process_name.c_str(), module_name)) { try { tmp_process_id_list.push_back(pid_list[count_1]); ++done_count; } catch (...) { ::CloseHandle(process_handle); throw; } } } } ::CloseHandle(process_handle); if (max_count && (done_count >= max_count)) break; } } if (!tmp_process_id_list.size()) ThrowException("Function 'ProcessNameToProcessId()' was unable to " "locate a process named '" + process_name + "'."); process_id_list.swap(tmp_process_id_list); return(static_cast<unsigned int>(process_id_list.size())); #else ThrowException("Function 'ProcessNameToProcessId()' not yet supported " "under this operating system."); return(0); #endif // #ifdef _Windows }
void get_sorted(std::vector<T>& v) const { std::vector<T> vec(data_); std::sort(vec.begin(), vec.end(), comp_); v.swap(vec); }
void swap(ModesExts& other) { modelist.swap(other.modelist); extlist.swap(other.extlist); }
void match::domatch(std::vector<SamePoint>& resultData) { const int nBlockSize = 512; int scale = 1; int nx1, ny1, nx2, ny2, nband1, nband2; uchar *pBuf = NULL; m_pImage->Open(_bstr_t(m_szPathNameL), modeRead); m_pImage->GetCols(&nx1); m_pImage->GetRows(&ny1); m_pImage->GetBandNum(&nband1); m_pImage->Close(); m_pImage->Open(_bstr_t(m_szPathNameR), modeRead); m_pImage->GetCols(&nx2); m_pImage->GetRows(&ny2); m_pImage->GetBandNum(&nband2); m_pImage->Close(); int mincr = /*max(max(max(nx1, ny1), nx2),ny2)*/(nx1+nx2+ny1+ny2)/4; while(mincr/scale >1024) { scale *= 2; } int nxsize1 = nx1/scale; int nysize1 = ny1/scale; int nxsize2 = nx2/scale; int nysize2 = ny2/scale; m_pImage->Open(_bstr_t(m_szPathNameL), modeRead); pBuf = new uchar[nxsize1*nysize1*nband1]; m_pImage->ReadImg(0, 0, nx1, ny1, pBuf, nxsize1, nysize1, nband1, 0, 0, nxsize1, nysize1, -1, 0); m_pImage->Close(); m_pImage->CreateImg(_bstr_t("templ.tif"), modeCreate, nxsize1, nysize1, Pixel_Byte, nband1, BIL, 0, 0, 1); m_pImage->WriteImg(0, 0, nxsize1, nysize1, pBuf, nxsize1, nysize1, nband1, 0, 0, nxsize1, nysize1, -1, 0); m_pImage->Close(); delete [] pBuf; m_pImage->Open(_bstr_t(m_szPathNameR), modeRead); pBuf = new uchar[nxsize2*nysize2*nband2]; m_pImage->ReadImg(0, 0, nx2, ny2, pBuf, nxsize2, nysize2, nband2, 0, 0, nxsize2, nysize2, -1, 0); m_pImage->Close(); m_pImage->CreateImg(_bstr_t("tempr.tif"), modeCreate, nxsize2, nysize2, Pixel_Byte, nband2, BIL, 0, 0, 1); m_pImage->WriteImg(0, 0, nxsize2, nysize2, pBuf, nxsize2, nysize2, nband2, 0, 0, nxsize2, nysize2, -1, 0); m_pImage->Close(); delete []pBuf; pBuf = NULL; sl.fetchFeatures("templ.tif"); sr.fetchFeatures("tempr.tif"); /*sl.fetchFeatures(m_szPathNameL); sr.fetchFeatures(m_szPathNameR);*/ Keypoint** nbrs; kd_node* kd_root; int nsize = sl.m_listKeyPoint.size(); int nsize2 = sr.m_listKeyPoint.size(); Keypoint* feat1 = (Keypoint*)malloc(nsize*sizeof(Keypoint)); std::list<Keypoint>::iterator temIte = sl.m_listKeyPoint.begin(); int i = 0; while(temIte != sl.m_listKeyPoint.end()) { feat1[i] = *temIte; ++i; ++temIte; } sl.m_listKeyPoint.clear(); Keypoint* feat2 = (Keypoint*)malloc(nsize2*sizeof(Keypoint)); temIte = sr.m_listKeyPoint.begin(); i = 0; while(temIte != sr.m_listKeyPoint.end()) { feat2[i] = *temIte; ++i; ++temIte; } sr.m_listKeyPoint.clear(); kd_root = kdtree_build(feat2, nsize2); int k = 0; double d0, d1; Keypoint* feat; int matchnum = 0; std::vector<SamePoint> sp; for (i = 0; i < nsize; ++i) { feat = feat1+i; k = kdtree_bbf_knn(kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS); if (k == 2) { d0 = descr_dist_sq(feat, nbrs[0]); d1 = descr_dist_sq(feat, nbrs[1]); if (d0 < d1*NN_SQ_DIST_RATIO_THR) { sp.push_back(SamePoint(feat->dx, feat->dy, nbrs[0]->dx, nbrs[0]->dy)); ++matchnum; } } free(nbrs); } kdtree_release(kd_root); free(feat1); free(feat2); feat1 = NULL; feat2 = NULL; std::vector<double> matParameters; MatParamEstimator mpEstimator(5); int numForEstimate = 3; mpEstimator.leastSquaresEstimate(sp, matParameters); double usedData = Ransac<SamePoint, double>::compute(matParameters, &mpEstimator, sp, numForEstimate, resultData); sp.swap(std::vector<SamePoint>()); resultData.swap(std::vector<SamePoint>()); Pt lLeftTop(0, 0); Pt lRightBottom(nx1, ny1); double a = matParameters[0]; double b = matParameters[1]; double c = matParameters[2]*scale; double d = matParameters[3]; double e = matParameters[4]; double f = matParameters[5]*scale; std::list<Block> listDataBlock; for (int y = 0; y < ny1;) { if (y+nBlockSize < ny1) { for (int x = 0; x < nx1;) { if (x+nBlockSize < nx1) { Rec rect(a*x+b*y+c, a*(x+nBlockSize)+b*y+c, d*x+e*y+f, d*x+e*(y+nBlockSize)+f); if (rect.Intersects(Rec(0, nx2, 0, ny2))) { listDataBlock.push_back(Block(NULL, x, y, nBlockSize, nBlockSize)); } x += nBlockSize; } else { Rec rect(a*x+b*y+c, a*nx1+b*y+c, d*x+e*y+f, d*x+e*(y+nBlockSize)+f); if (rect.Intersects(Rec(0, nx2, 0, ny2))) { listDataBlock.push_back(Block(NULL, x, y, nx1-x, nBlockSize)); } x = nx1; } } y += nBlockSize; } else { for (int x = 0; x < nx1;) { if (x+nBlockSize < nx1) { Rec rect(a*x+b*y+c, a*(x+nBlockSize)+b*y+c, d*x+e*y+f, d*x+e*ny1+f); if (rect.Intersects(Rec(0, nx2, 0, ny2))) { listDataBlock.push_back(Block(NULL, x, y, nBlockSize, ny1-y)); } x += nBlockSize; } else { Rec rect(a*x+b*y+c, a*nx1+b*y+c, d*x+e*y+f, d*x+e*ny1+f); if (rect.Intersects(Rec(0, nx2, 0, ny2))) { listDataBlock.push_back(Block(NULL, x, y, nx1-x, ny1-y)); } x = nx1; } } y = ny1; } } int nBlockNumx = (nx2+nBlockSize-1)/nBlockSize; int nBlockNumy = (ny2+nBlockSize-1)/nBlockSize; int nBlockNum = nBlockNumx*nBlockNumy; std::vector<RBTree> vecKDTree(nBlockNum); std::list<Block>::iterator blockIte = listDataBlock.begin(); m_pImage->Open(_bstr_t(m_szPathNameL), modeRead); m_pImage2->Open(_bstr_t(m_szPathNameR), modeRead); int countblock = 0; std::list<SamePoint> listSP; std::vector<Keypoint> feature; std::vector<Keypoint> feature2; while(blockIte != listDataBlock.end()) { pBuf = new uchar[blockIte->nXSize*blockIte->nYSize*nband1]; m_pImage->ReadImg(blockIte->nXOrigin, blockIte->nYOrigin, blockIte->nXOrigin+blockIte->nXSize, blockIte->nYOrigin+blockIte->nYSize, pBuf, blockIte->nXSize, blockIte->nYSize, nband1, 0, 0, blockIte->nXSize, blockIte->nYSize, -1, 0); pixel_t* p = new pixel_t[blockIte->nXSize*blockIte->nYSize]; for (int y = 0; y < blockIte->nYSize; ++y) { for (int x = 0, m = 0; x < blockIte->nXSize*nband1; x += nband1, ++m) { double sum = 0; for (int n = 0; n < nband1; ++n) { sum += pBuf[y*blockIte->nXSize*nband1+x+n]; } p[y*blockIte->nXSize+m] = sum/(nband1*225.0); } } delete []pBuf; pBuf = NULL; sift(p, feature, blockIte->nXSize, blockIte->nYSize); p = NULL; std::vector<Keypoint>::iterator feaIte = feature.begin(); int count = 0; while(feaIte != feature.end()) { std::cout<<countblock<<"/"<<listDataBlock.size()<<":"<<count<<"/"<<feature.size()<<":"<<listSP.size()<<std::endl; ++count; feaIte->dx += blockIte->nXOrigin; feaIte->dy += blockIte->nYOrigin; int calx = int(feaIte->dx*a+feaIte->dy*b+c); int caly = int(feaIte->dx*d+feaIte->dy*e+f); int idx = calx/nBlockSize; int idy = caly/nBlockSize; if (idx >= nBlockNumx || idy >= nBlockNumy) { ++feaIte; continue; } int nBlockIndex = idy*nBlockNumx+idx; if (vecKDTree[nBlockIndex].num != 0 && vecKDTree[nBlockIndex].num < 50) { ++feaIte; continue; } if (vecKDTree[nBlockIndex].node == NULL) { int xo = idx*nBlockSize; int yo = idy*nBlockSize; int xsize = nBlockSize; int ysize = nBlockSize; if (idx == nBlockNumx-1) { xsize = nx2%nBlockSize; if (xsize == 0) { xsize = nBlockSize; } } if (idy == nBlockNumy-1) { ysize = ny2%nBlockSize; if (ysize == 0) { ysize = nBlockSize; } } pBuf = new uchar[xsize*ysize*nband2]; m_pImage2->ReadImg(xo, yo, xo+xsize, yo+ysize, pBuf, xsize, ysize, nband2, 0, 0, xsize, ysize, -1, 0); p = new pixel_t[xsize*ysize]; for(int y = 0; y < ysize; ++y) { for (int x = 0, m = 0; x < xsize*nband2; x += nband2, ++m) { double sum = 0; for (int n = 0; n < nband2; ++n) { sum += pBuf[y*xsize*nband2+x+n]; } p[y*xsize+m] = sum/(nband2*255.0); } } delete []pBuf; pBuf = NULL; sift(p, feature2, xsize, ysize); p = NULL; int nf2 = feature2.size(); vecKDTree[nBlockIndex].num = nf2; if (nf2 < 50) { ++feaIte; continue; } feat2 = (Keypoint*)malloc(nf2*sizeof(Keypoint)); std::vector<Keypoint>::iterator kIte2 = feature2.begin(); i = 0; while(kIte2 != feature2.end()) { kIte2->dx += xo; kIte2->dy += yo; feat2[i] = *kIte2; ++i; ++kIte2; } feature2.swap(std::vector<Keypoint>()); kd_root = kdtree_build(feat2, nf2); vecKDTree[nBlockIndex].node = kd_root; vecKDTree[nBlockIndex].feature = feat2; } k = kdtree_bbf_knn(vecKDTree[nBlockIndex].node, &(*feaIte), 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS); if (k == 2) { d0 = descr_dist_sq(&(*feaIte), nbrs[0]); d1 = descr_dist_sq(&(*feaIte), nbrs[1]); if (d0 < d1*NN_SQ_DIST_RATIO_THR) { listSP.push_back(SamePoint(feaIte->dx, feaIte->dy, nbrs[0]->dx, nbrs[0]->dy)); } } free(nbrs); ++feaIte; } feature.swap(std::vector<Keypoint>()); std::vector<RBTree>::iterator kdIte = vecKDTree.begin(); while(kdIte != vecKDTree.end()) { if (kdIte->node != NULL) { free(kdIte->node); kdIte->node = NULL; free(kdIte->feature); kdIte->feature = NULL; } ++kdIte; } ++countblock; ++blockIte; } m_pImage->Close(); m_pImage2->Close(); std::vector<SamePoint> vecsp(std::make_move_iterator(std::begin(listSP)), std::make_move_iterator(std::end(listSP))); listSP.clear(); sp.swap(vecsp); mpEstimator.leastSquaresEstimate(sp, matParameters); if (sp.size() < 500) { usedData = Ransac<SamePoint, double>::compute(matParameters, &mpEstimator, sp, numForEstimate, resultData); } else { usedData = Ransac<SamePoint, double>::compute(matParameters, &mpEstimator, sp, numForEstimate, 0.5, 0.8, resultData); } std::cout<<usedData<<":"<<resultData.size()<<std::endl; sp.swap(std::vector<SamePoint>()); resultData.swap(std::vector<SamePoint>()); a = matParameters[0]; b = matParameters[1]; c = matParameters[2]; d = matParameters[3]; e = matParameters[4]; f = matParameters[5]; //mosaic Pt calrLeftTop; Pt calrRightBottom; calrLeftTop.x = lLeftTop.x*a+lLeftTop.y*b+c; calrLeftTop.y = lLeftTop.x*d+lLeftTop.y*e+f; calrRightBottom.x = lRightBottom.x*a+lRightBottom.y*b+c; calrRightBottom.y = lRightBottom.x*d+lRightBottom.y*e+f; Rec calRight(calrLeftTop.x, calrRightBottom.x, calrLeftTop.y, calrRightBottom.y); calRight.extend(); Rec Right(0, nx2, 0, ny2); Rec resultRec = Right.Union(calRight); Rec resultRectR = calRight.Intersected(Right); resultRectR.extend(); Pt callLeftTop; Pt callRightBottom; callLeftTop.y = (d/a*resultRectR.left-resultRectR.top+f-c*d/a)/(b*d/a-e); callLeftTop.x = (resultRectR.left-b*callLeftTop.y-c)/a; callRightBottom.y = (d/a*resultRectR.right-resultRectR.bottom+f-c*d/a)/(b*d/a-e); callRightBottom.x = (resultRectR.right-b*callRightBottom.y-c)/a; Rec resultRectL(callLeftTop.x, callRightBottom.x, callLeftTop.y, callRightBottom.y); resultRectL.extend(); m_pImage->CreateImg(_bstr_t("result.tif"), modeCreate, (int)resultRec.Width(), (int)resultRec.Height(), Pixel_Byte, nband1, BIL, 0, 0, 1); IImage* pImage = NULL; IImage* pImage2 = NULL; ::CoCreateInstance(CLSID_ImageDriver, NULL, CLSCTX_ALL, IID_IImage, (void**)&pImage); ::CoCreateInstance(CLSID_ImageDriver, NULL, CLSCTX_ALL, IID_IImage, (void**)&pImage2); pImage->Open(_bstr_t(m_szPathNameR), modeRead); pBuf = new uchar[nx2*nBlockSize*nband2]; for (int i = 0; i < ny2;) { if (i + nBlockSize < ny2) { pImage->ReadImg(0, i, nx2, i+nBlockSize, pBuf, nx2, nBlockSize, nband2, 0, 0, nx2, nBlockSize, -1, 0); m_pImage->WriteImg(int(0-resultRec.left), int(i-resultRec.top), int(nx2-resultRec.left), int(i+nBlockSize-resultRec.top), pBuf, nx2, nBlockSize, nband2, 0, 0, nx2, nBlockSize, -1, 0); i += nBlockSize; } else { pImage->ReadImg(0, i, nx2, ny2, pBuf, nx2, nBlockSize, nband2, 0, 0, nx2, ny2-i, -1, 0); m_pImage->WriteImg(int(0-resultRec.left), int(i-resultRec.top), int(nx2-resultRec.left), int(ny2-resultRec.top), pBuf, nx2, nBlockSize, nband2, 0, 0, nx2, ny2-i, -1, 0); i = ny2; } } pImage->Close(); delete []pBuf; pImage->Open(_bstr_t(m_szPathNameL), modeRead); pBuf = new uchar[nx1*nBlockSize*nband1]; for (int i = 0; i < calRight.Height();) { if (i+nBlockSize < calRight.Height()) { pImage->ReadImg(0, i, nx1, i+nBlockSize, pBuf, nx1, nBlockSize, nband1, 0, 0, nx1, nBlockSize, -1, 0); m_pImage->WriteImg(int(calRight.left-resultRec.left), int(calRight.top+i-resultRec.top), int(calRight.right-resultRec.left), int(calRight.top+i+nBlockSize-resultRec.top), pBuf, nx1, nBlockSize, nband1, 0, 0, nx1, nBlockSize, -1, 0); i += nBlockSize; } else { pImage->ReadImg(0, i, nx1, ny1, pBuf, nx1, nBlockSize, nband1, 0, 0, nx1, ny1-i, -1, 0); m_pImage->WriteImg(int(calRight.left-resultRec.left), int(calRight.top+i-resultRec.top), int(calRight.right-resultRec.left), int(calRight.bottom-resultRec.top), pBuf, nx1, nBlockSize, nband1, 0, 0, nx1, ny1-i, -1, 0); i = (int)calRight.Height(); } } pImage->Close(); delete []pBuf; pBuf = NULL; m_pImage->Close(); pImage->Open(_bstr_t(m_szPathNameL), modeRead); pImage2->Open(_bstr_t(m_szPathNameR), modeRead); m_pImage->Open(_bstr_t("result.tif"), modeReadWrite); pBuf = new uchar[nband1*(int)resultRectR.Width()*(int)resultRectR.Height()]; m_pImage->ReadImg(int(resultRectR.left-resultRec.left), int(resultRectR.top-resultRec.top), int(resultRectR.right-resultRec.left), int(resultRectR.bottom-resultRec.top), pBuf, (int)resultRectR.Width(), (int)resultRectR.Height(), nband1, 0, 0, (int)resultRectR.Width(), (int)resultRectR.Height(), -1, 0); uchar* pBufl = new uchar[nband1*(int)resultRectL.Width()*(int)resultRectL.Height()]; pImage->ReadImg((int)resultRectL.left, (int)resultRectL.top, (int)resultRectL.right, (int)resultRectL.bottom, pBufl, (int)resultRectL.Width(), (int) resultRectL.Height(), nband1, 0, 0, (int)resultRectL.Width(), (int)resultRectL.Height(), -1, 0); uchar* pBufr = new uchar[nband2*(int)resultRectR.Width()*(int)resultRectR.Height()]; pImage2->ReadImg((int)resultRectR.left, (int)resultRectR.top, (int)resultRectR.right, (int)resultRectR.bottom, pBufr, (int)resultRectR.Width(), (int)resultRectR.Height(), nband2, 0, 0, (int)resultRectR.Width(), (int)resultRectR.Height(), -1, 0); double lx, ly; for (int y = 0; y < (int)resultRectR.Height(); ++y) { for (int x = 0; x < (int)resultRectR.Width()*nband1; x += nband1) { ly = (d/a*(x+resultRectR.left)-(y+resultRectR.top)+f-c*d/a)/(b*d/a-e); lx = ((x+resultRectR.left)-b*ly-c)/a; if (ly < resultRectL.top || lx < resultRectL.left) { continue; } if (pBufl[(int)(ly-resultRectL.top)*(int)resultRectL.Width()*nband1+(int)(lx-resultRectL.left)] < 20 && pBufl[(int)(ly-resultRectL.top)*(int)resultRectL.Width()*nband1+(int)(lx-resultRectL.left)+1] < 20 && pBufl[(int)(ly-resultRectL.top)*(int)resultRectL.Width()*nband1+(int)(lx-resultRectL.left)+2] < 20) { for (int n = 0; n < nband2; ++n) { pBuf[y*(int)resultRectR.Width()*nband1+x+n] = pBufr[y*(int)resultRectR.Width()*nband2+x+n]; } } else if (pBufr[y*(int)resultRectR.Width()*nband1+x] < 20 && pBufr[y*(int)resultRectR.Width()*nband1+x+1]<20 && pBufr[y*(int)resultRectR.Width()*nband1+x+2]<20) { for (int n = 0; n < nband1; ++n) { pBuf[y*(int)resultRectR.Width()*nband1+x+n] = pBufl[(int)(ly-resultRectL.top)*(int)resultRectL.Width()*nband1+(int)(lx-resultRectL.left)+n]; } } else { for (int n = 0; n < nband1; ++n) { pBuf[y*(int)resultRectR.Width()*nband1+x+n] = (pBufl[(int)(ly-resultRectL.top)*(int)resultRectL.Width()*nband1+(int)(lx-resultRectL.left)+n] + pBufr[y*(int)resultRectR.Width()*nband2+x+n])/2; } } } } delete []pBufl; pBufl = NULL; delete []pBufr; pBufr = NULL; m_pImage->WriteImg(int(resultRectR.left-resultRec.left), int(resultRectR.top-resultRec.top), int(resultRectR.right-resultRec.left), int(resultRectR.bottom-resultRec.top), pBuf, (int)resultRectR.Width(), (int)resultRectR.Height(), nband1, 0, 0, (int)resultRectR.Width(), (int)resultRectR.Height(), -1, 0); delete []pBuf; pBuf = NULL; m_pImage->Close(); }
template <typename PointT> void pcl::ProgressiveMorphologicalFilter<PointT>::extract (std::vector<int>& ground) { bool segmentation_is_possible = initCompute (); if (!segmentation_is_possible) { deinitCompute (); return; } // Compute the series of window sizes and height thresholds std::vector<float> height_thresholds; std::vector<float> window_sizes; int iteration = 0; float window_size = 0.0f; float height_threshold = 0.0f; while (window_size < max_window_size_) { // Determine the initial window size. if (exponential_) window_size = cell_size_ * (2.0f * std::pow (base_, iteration) + 1.0f); else window_size = cell_size_ * (2.0f * (iteration+1) * base_ + 1.0f); // Calculate the height threshold to be used in the next iteration. if (iteration == 0) height_threshold = initial_distance_; else height_threshold = slope_ * (window_size - window_sizes[iteration-1]) * cell_size_ + initial_distance_; // Enforce max distance on height threshold if (height_threshold > max_distance_) height_threshold = max_distance_; window_sizes.push_back (window_size); height_thresholds.push_back (height_threshold); iteration++; } // Ground indices are initially limited to those points in the input cloud we // wish to process ground = *indices_; // Progressively filter ground returns using morphological open for (int i = 0; i < window_sizes.size (); ++i) { PCL_DEBUG (" Iteration %d (height threshold = %f, window size = %f)...", i, height_thresholds[i], window_sizes[i]); // Limit filtering to those points currently considered ground returns typename pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT>); pcl::copyPointCloud<PointT> (*input_, ground, *cloud); // Create new cloud to hold the filtered results. Apply the morphological // opening operation at the current window size. typename pcl::PointCloud<PointT>::Ptr cloud_f (new pcl::PointCloud<PointT>); pcl::applyMorphologicalOperator<PointT> (cloud, window_sizes[i], MORPH_OPEN, *cloud_f); // Find indices of the points whose difference between the source and // filtered point clouds is less than the current height threshold. std::vector<int> pt_indices; for (boost::int32_t p_idx = 0; p_idx < ground.size (); ++p_idx) { float diff = cloud->points[p_idx].z - cloud_f->points[p_idx].z; if (diff < height_thresholds[i]) pt_indices.push_back (ground[p_idx]); } // Ground is now limited to pt_indices ground.swap (pt_indices); PCL_DEBUG ("ground now has %d points\n", ground.size ()); } deinitCompute (); }