int main (int argc, char** argv) { pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>); if ( pcl::io::loadPCDFile <pcl::PointXYZRGBA> ("/home/andrew/School/CV-3/data/tripod_3/kinect_1000.pcd", *cloud) == -1) { std::cout << "Cloud reading failed." << std::endl; return (-1); } std::vector<int> idx; pcl::removeNaNFromPointCloud(*cloud, *cloud, idx); pcl::search::Search<pcl::PointXYZRGBA>::Ptr tree = boost::shared_ptr<pcl::search::Search<pcl::PointXYZRGBA> > (new pcl::search::KdTree<pcl::PointXYZRGBA>); pcl::PointCloud <pcl::Normal>::Ptr normals (new pcl::PointCloud <pcl::Normal>); pcl::NormalEstimation<pcl::PointXYZRGBA, pcl::Normal> normal_estimator; normal_estimator.setSearchMethod (tree); normal_estimator.setInputCloud (cloud); normal_estimator.setKSearch (100); normal_estimator.compute (*normals); std::cout << "PassThrough" << std::endl; pcl::IndicesPtr indices (new std::vector <int>); pcl::PassThrough<pcl::PointXYZRGBA> pass; pass.setInputCloud (cloud); pass.setFilterFieldName ("z"); pass.setFilterLimits (0.0, 1.0); pass.filter (*indices); std::cout << "Region" << std::endl; pcl::RegionGrowingRGB<pcl::PointXYZRGBA, pcl::Normal> reg; //reg.setMinClusterSize (50); //reg.setMaxClusterSize (10000); reg.setSearchMethod (tree); reg.setNumberOfNeighbours (50); reg.setInputCloud (cloud); reg.setPointColorThreshold(12); reg.setRegionColorThreshold(7); //reg.setIndices (indices); reg.setInputNormals (normals); reg.setSmoothnessThreshold (3.0 / 180.0 * M_PI); reg.setCurvatureThreshold (1.0); std::vector <pcl::PointIndices> clusters; reg.extract (clusters); std::cout << "Number of clusters is equal to " << clusters.size () << std::endl; std::cout << "First cluster has " << clusters[0].indices.size () << " points." << endl; std::cout << "These are the indices of the points of the initial" << std::endl << "cloud that belong to the first cluster:" << std::endl; int counter = 0; while (counter < 5 || counter > clusters[0].indices.size ()) { std::cout << clusters[0].indices[counter] << std::endl; counter++; } pcl::PointCloud <pcl::PointXYZRGB>::Ptr colored_cloud = reg.getColoredCloud (); pcl::visualization::CloudViewer viewer ("Cluster viewer"); viewer.showCloud(colored_cloud); while (!viewer.wasStopped ()) { } return (0); }
static bool cg_d3d9_renderchain_init_shader_fvf(void *data, void *pass_data) { CGparameter param; unsigned index, i, count; unsigned tex_index = 0; bool texcoord0_taken = false; bool texcoord1_taken = false; bool stream_taken[4] = {false}; cg_renderchain_t *chain = (cg_renderchain_t*)data; Pass *pass = (Pass*)pass_data; static const D3DVERTEXELEMENT decl_end = D3DDECL_END(); static const D3DVERTEXELEMENT position_decl = DECL_FVF_POSITION(0); static const D3DVERTEXELEMENT tex_coord0 = DECL_FVF_TEXCOORD(1, 3, 0); static const D3DVERTEXELEMENT tex_coord1 = DECL_FVF_TEXCOORD(2, 5, 1); static const D3DVERTEXELEMENT color = DECL_FVF_COLOR(3, 7, 0); D3DVERTEXELEMENT decl[MAXD3DDECLLENGTH] = {{0}}; if (cgD3D9GetVertexDeclaration(pass->vPrg, decl) == CG_FALSE) return false; for (count = 0; count < MAXD3DDECLLENGTH; count++) { if (memcmp(&decl_end, &decl[count], sizeof(decl_end)) == 0) break; } /* This is completely insane. * We do not have a good and easy way of setting up our * attribute streams, so we have to do it ourselves, yay! * * Stream 0 => POSITION * Stream 1 => TEXCOORD0 * Stream 2 => TEXCOORD1 * Stream 3 => COLOR (Not really used for anything.) * Stream {4..N} => Texture coord streams for varying resources * which have no semantics. */ std::vector<bool> indices(count); param = find_param_from_semantic(pass->vPrg, "POSITION"); if (!param) param = find_param_from_semantic(pass->vPrg, "POSITION0"); if (param) { stream_taken[0] = true; RARCH_LOG("[FVF]: POSITION semantic found.\n"); index = cgGetParameterResourceIndex(param); decl[index] = position_decl; indices[index] = true; } param = find_param_from_semantic(pass->vPrg, "TEXCOORD"); if (!param) param = find_param_from_semantic(pass->vPrg, "TEXCOORD0"); if (param) { stream_taken[1] = true; texcoord0_taken = true; RARCH_LOG("[FVF]: TEXCOORD0 semantic found.\n"); index = cgGetParameterResourceIndex(param); decl[index] = tex_coord0; indices[index] = true; } param = find_param_from_semantic(pass->vPrg, "TEXCOORD1"); if (param) { stream_taken[2] = true; texcoord1_taken = true; RARCH_LOG("[FVF]: TEXCOORD1 semantic found.\n"); index = cgGetParameterResourceIndex(param); decl[index] = tex_coord1; indices[index] = true; } param = find_param_from_semantic(pass->vPrg, "COLOR"); if (!param) param = find_param_from_semantic(pass->vPrg, "COLOR0"); if (param) { stream_taken[3] = true; RARCH_LOG("[FVF]: COLOR0 semantic found.\n"); index = cgGetParameterResourceIndex(param); decl[index] = color; indices[index] = true; } /* Stream {0, 1, 2, 3} might be already taken. Find first vacant stream. */ for (index = 0; index < 4 && stream_taken[index]; index++); /* Find first vacant texcoord declaration. */ if (texcoord0_taken && texcoord1_taken) tex_index = 2; else if (texcoord1_taken && !texcoord0_taken) tex_index = 0; else if (texcoord0_taken && !texcoord1_taken) tex_index = 1; for (i = 0; i < count; i++) { if (indices[i]) pass->attrib_map.push_back(0); else { D3DVERTEXELEMENT elem = DECL_FVF_TEXCOORD(index, 3, tex_index); pass->attrib_map.push_back(index); decl[i] = elem; /* Find next vacant stream. */ index++; while (index < 4 && stream_taken[index]) index++; /* Find next vacant texcoord declaration. */ tex_index++; if (tex_index == 1 && texcoord1_taken) tex_index++; } } if (FAILED(chain->dev->CreateVertexDeclaration( decl, &pass->vertex_decl))) return false; return true; }
ShapeParser::ShapeParseResult ShapeParser::serialized(const XMLNode& node, ParserState& S) { auto filename = S.map_asset_filepath(S.def_storage.prop_string(node, "filename")); int submesh_index = S.def_storage.prop_int(node, "shapeIndex"); bool flipNormals = S.def_storage.prop_bool(node, "flipNormals", false); bool faceNormals = S.def_storage.prop_bool(node, "faceNormals", false); float maxSmoothAngle = S.def_storage.prop_float(node, "maxSmoothAngle", 0.0f); auto name = boost::filesystem::path(filename).stem().string(); auto compiled_tar_folder = S.scene.getFileManager()->getCompiledMeshPath("") + name + "/"; auto get_compiled_submesh_filename = [&](size_t i) { return compiled_tar_folder + std::to_string(i) + ".xmsh"; }; if (!boost::filesystem::exists(compiled_tar_folder) || !boost::filesystem::exists(get_compiled_submesh_filename(0))) { boost::filesystem::create_directory(compiled_tar_folder); enum DataPresentFlag : uint32_t { VertexNormals = 0x0001, TextureCoords = 0x0002, VertexColors = 0x0008, UseFaceNormals = 0x0010, SinglePrecision = 0x1000, DoublePrecision = 0x2000, }; struct inflateStream { std::ifstream& m_childStream; size_t str_length; z_stream m_inflateStream; uint8_t m_inflateBuffer[32768]; inflateStream(std::ifstream& str) :m_childStream(str) { size_t pos = m_childStream.tellg(); m_childStream.seekg(0, m_childStream.end); str_length = m_childStream.tellg(); m_childStream.seekg(pos, m_childStream.beg); m_inflateStream.zalloc = Z_NULL; m_inflateStream.zfree = Z_NULL; m_inflateStream.opaque = Z_NULL; m_inflateStream.avail_in = 0; m_inflateStream.next_in = Z_NULL; int windowBits = 15; auto retval = inflateInit2(&m_inflateStream, windowBits); if (retval != Z_OK) std::cout << "erro, ret : " << retval << std::endl; } void read(void *ptr, size_t size) { uint8_t *targetPtr = (uint8_t *)ptr; while (size > 0) { if (m_inflateStream.avail_in == 0) { size_t remaining = str_length - m_childStream.tellg(); m_inflateStream.next_in = m_inflateBuffer; m_inflateStream.avail_in = (uInt)std::min(remaining, sizeof(m_inflateBuffer)); if (m_inflateStream.avail_in == 0) std::cout << "more bytes req : " << size << std::endl; m_childStream.read((char*)m_inflateBuffer, m_inflateStream.avail_in); } m_inflateStream.avail_out = (uInt)size; m_inflateStream.next_out = targetPtr; int retval = inflate(&m_inflateStream, Z_NO_FLUSH); switch (retval) { case Z_STREAM_ERROR: throw std::runtime_error("inflate(): stream error!"); case Z_NEED_DICT: throw std::runtime_error("inflate(): need dictionary!"); case Z_DATA_ERROR: throw std::runtime_error("inflate(): data error!"); case Z_MEM_ERROR: throw std::runtime_error("inflate(): memory error!"); }; size_t outputSize = size - (size_t)m_inflateStream.avail_out; targetPtr += outputSize; size -= outputSize; if (size > 0 && retval == Z_STREAM_END) throw std::runtime_error("inflate(): attempting to read past the end of the stream!"); } } }; std::ifstream ser_str(filename, std::ios::binary); uint16_t magic_maj, version_maj; ser_str.read((char*)&magic_maj, 2); if (magic_maj != 1052) throw std::runtime_error("corrupt file"); ser_str.read((char*)&version_maj, 2); ser_str.seekg(-4, ser_str.end); uint32_t n_meshes; ser_str.read((char*)&n_meshes, sizeof(n_meshes)); ser_str.seekg(-(sizeof(uint32_t) + (version_maj == 4 ? sizeof(uint64_t) : sizeof(uint32_t)) * n_meshes), ser_str.end); std::vector<uint64_t> mesh_offsets(n_meshes); if (version_maj == 4) ser_str.read((char*)mesh_offsets.data(), n_meshes * sizeof(uint64_t)); else { auto q = std::vector<uint32_t>(n_meshes); ser_str.read((char*)q.data(), n_meshes * sizeof(uint32_t)); for (size_t i = 0; i < n_meshes; i++) mesh_offsets[i] = q[i]; } for (size_t num_submesh = 0; num_submesh < n_meshes; num_submesh++) { ser_str.seekg(mesh_offsets[num_submesh], ser_str.beg); uint16_t magic, version; ser_str.read((char*)&magic, 2); if (magic == 0) break; ser_str.read((char*)&version, 2); if (version != 3 && version != 4) throw std::runtime_error("invalid version in serialized mesh file"); inflateStream comp_str(ser_str); DataPresentFlag flag; comp_str.read(&flag, sizeof(flag)); std::string name = "default"; if (version == 4) { name = ""; char last_read; do { comp_str.read(&last_read, sizeof(last_read)); name += last_read; } while (last_read != 0); } uint64_t nVertices, nTriangles; comp_str.read(&nVertices, sizeof(nVertices)); comp_str.read(&nTriangles, sizeof(nTriangles)); std::vector<Vec3f> positions(nVertices), normals(nVertices), colors(nVertices); std::vector<Vec2f> uvcoords(nVertices); std::vector<uint32_t> indices(nTriangles * 3); bool isSingle = true; auto read_n_vector = [&](int dim, float* buffer) { if (isSingle) comp_str.read((char*)buffer, sizeof(float) * dim * nVertices); else { double* double_storage = (double*)alloca(dim * sizeof(double)); for (size_t i = 0; i < nVertices; i++) { comp_str.read((char*)double_storage, dim * sizeof(double)); for (int j = 0; j < dim; j++) buffer[i * dim + j] = float(double_storage[j]); } } }; read_n_vector(3, (float*)positions.data()); if ((flag & DataPresentFlag::VertexNormals) == DataPresentFlag::VertexNormals) read_n_vector(3, (float*)normals.data()); if ((flag & DataPresentFlag::TextureCoords) == DataPresentFlag::TextureCoords) read_n_vector(2, (float*)uvcoords.data()); else std::fill(uvcoords.begin(), uvcoords.end(), Vec2f(0.0f)); if ((flag & DataPresentFlag::VertexColors) == DataPresentFlag::VertexColors) read_n_vector(3, (float*)colors.data()); comp_str.read((char*)indices.data(), sizeof(uint32_t) * nTriangles * 3); for (size_t i = 0; i < nTriangles * 3; i += 3) std::swap(indices[i + 0], indices[i + 2]); auto compiled_submesh_filename = get_compiled_submesh_filename(num_submesh); FileOutputStream fOut(compiled_submesh_filename); fOut << (unsigned int)MeshCompileType::Static; auto mat = Material(name.size() > 60 ? name.substr(0, 60) : name); mat.bsdf = CreateAggregate<BSDFALL>(diffuse()); Mesh::CompileMesh(positions.data(), (int)positions.size(), normals.data(), uvcoords.data(), indices.data(), (int)indices.size(), mat, 0.0f, fOut, flipNormals, faceNormals, maxSmoothAngle); fOut.Close(); } ser_str.close(); } auto obj = S.scene.CreateNode(get_compiled_submesh_filename(submesh_index)); parseGeneric(obj, node, S); return obj; }
void TclObject::toVariant (VARIANT *pDest, const Type &type, Tcl_Interp *interp, bool addRef) { VariantClear(pDest); VARTYPE vt = type.vartype(); Reference *pReference = Extension::referenceHandles.find(interp, m_pObj); if (pReference != 0) { // Convert interface pointer handle to interface pointer. if (addRef) { // Must increment reference count of interface pointers returned // from methods. pReference->unknown()->AddRef(); } IDispatch *pDispatch = pReference->dispatch(); if (pDispatch != 0) { V_VT(pDest) = VT_DISPATCH; V_DISPATCH(pDest) = pDispatch; } else { V_VT(pDest) = VT_UNKNOWN; V_UNKNOWN(pDest) = pReference->unknown(); } } else if (m_pObj->typePtr == &Extension::unknownPointerType) { // Convert to interface pointer. IUnknown *pUnknown = static_cast<IUnknown *>( m_pObj->internalRep.otherValuePtr); if (addRef && pUnknown != 0) { // Must increment reference count of interface pointers returned // from methods. pUnknown->AddRef(); } V_VT(pDest) = VT_UNKNOWN; V_UNKNOWN(pDest) = pUnknown; } else if (vt == VT_SAFEARRAY) { const Type &elementType = type.elementType(); V_VT(pDest) = VT_ARRAY | elementType.vartype(); V_ARRAY(pDest) = getSafeArray(elementType, interp); } else if (m_pObj->typePtr == TclTypes::listType()) { // Convert Tcl list to array of VARIANT. int numElements; Tcl_Obj **pElements; if (Tcl_ListObjGetElements(interp, m_pObj, &numElements, &pElements) != TCL_OK) { _com_issue_error(E_INVALIDARG); } SAFEARRAYBOUND bounds[2]; bounds[0].cElements = numElements; bounds[0].lLbound = 0; unsigned numDimensions; // Check if the first element of the list is a list. if (numElements > 0 && pElements[0]->typePtr == TclTypes::listType()) { int colSize; Tcl_Obj **pCol; if (Tcl_ListObjGetElements(interp, pElements[0], &colSize, &pCol) != TCL_OK) { _com_issue_error(E_INVALIDARG); } bounds[1].cElements = colSize; bounds[1].lLbound = 0; numDimensions = 2; } else { numDimensions = 1; } SAFEARRAY *psa = SafeArrayCreate(VT_VARIANT, numDimensions, bounds); std::vector<long> indices(numDimensions); fillSafeArray(m_pObj, psa, 1, &indices[0], interp, addRef); V_VT(pDest) = VT_ARRAY | VT_VARIANT; V_ARRAY(pDest) = psa; #if TCL_MINOR_VERSION >= 1 } else if (m_pObj->typePtr == TclTypes::byteArrayType()) { // Convert Tcl byte array to SAFEARRAY of bytes. V_VT(pDest) = VT_ARRAY | VT_UI1; V_ARRAY(pDest) = newSafeArray(m_pObj, VT_UI1); #endif } else if (m_pObj->typePtr == &Extension::naType) { // This variant indicates a missing optional argument. VariantCopy(pDest, &vtMissing); } else if (m_pObj->typePtr == &Extension::nullType) { V_VT(pDest) = VT_NULL; } else if (m_pObj->typePtr == &Extension::variantType) { VariantCopy( pDest, static_cast<_variant_t *>(m_pObj->internalRep.otherValuePtr)); } else if (m_pObj->typePtr == TclTypes::intType()) { long value; if (Tcl_GetLongFromObj(interp, m_pObj, &value) != TCL_OK) { value = 0; } V_VT(pDest) = VT_I4; V_I4(pDest) = value; if (vt != VT_VARIANT && vt != VT_USERDEFINED) { VariantChangeType(pDest, pDest, 0, vt); } } else if (m_pObj->typePtr == TclTypes::doubleType()) { double value; if (Tcl_GetDoubleFromObj(interp, m_pObj, &value) != TCL_OK) { value = 0.0; } V_VT(pDest) = VT_R8; V_R8(pDest) = value; if (vt != VT_VARIANT && vt != VT_USERDEFINED) { VariantChangeType(pDest, pDest, 0, vt); } } else if (m_pObj->typePtr == TclTypes::booleanType()) { int value; if (Tcl_GetBooleanFromObj(interp, m_pObj, &value) != TCL_OK) { value = 0; } V_VT(pDest) = VT_BOOL; V_BOOL(pDest) = (value != 0) ? VARIANT_TRUE : VARIANT_FALSE; if (vt != VT_VARIANT && vt != VT_USERDEFINED) { VariantChangeType(pDest, pDest, 0, vt); } } else if (vt == VT_BOOL) { V_VT(pDest) = VT_BOOL; V_BOOL(pDest) = getBool() ? VARIANT_TRUE : VARIANT_FALSE; } else { V_VT(pDest) = VT_BSTR; V_BSTR(pDest) = getBSTR(); // If trying to convert from a string to a date, // we need to convert to a double (VT_R8) first. if (vt == VT_DATE) { VariantChangeType(pDest, pDest, 0, VT_R8); } // Try to convert from a string representation. if (vt != VT_VARIANT && vt != VT_USERDEFINED && vt != VT_LPWSTR) { VariantChangeType(pDest, pDest, 0, vt); } } }
void ApplyNonMaximumSuppresion(std::vector< LkTracker* >& in_out_source, float in_nms_threshold) { if (in_out_source.empty()) return; unsigned int size = in_out_source.size(); std::vector<float> area(size); std::vector<float> scores(size); std::vector<int> x1(size); std::vector<int> y1(size); std::vector<int> x2(size); std::vector<int> y2(size); std::vector<unsigned int> indices(size); std::vector<bool> is_suppresed(size); for(unsigned int i = 0; i< in_out_source.size(); i++) { ObjectDetection tmp = in_out_source[i]->GetTrackedObject(); area[i] = tmp.rect.width * tmp.rect.height; if (area[i]>0) is_suppresed[i] = false; else { is_suppresed[i] = true; in_out_source[i]->NullifyLifespan(); } indices[i] = i; scores[i] = tmp.score; x1[i] = tmp.rect.x; y1[i] = tmp.rect.y; x2[i] = tmp.rect.width + tmp.rect.x; y2[i] = tmp.rect.height + tmp.rect.y; } Sort(area, indices);//returns indices ordered based on scores for(unsigned int i=0; i< size; i++) { for(unsigned int j= i+1; j< size; j++) { if(is_suppresed[indices[i]] || is_suppresed[indices[j]]) continue; int x1_max = std::max(x1[indices[i]], x1[indices[j]]); int x2_min = std::min(x2[indices[i]], x2[indices[j]]); int y1_max = std::max(y1[indices[i]], y1[indices[j]]); int y2_min = std::min(y2[indices[i]], y2[indices[j]]); int overlap_width = x2_min - x1_max + 1; int overlap_height = y2_min - y1_max + 1; if(overlap_width > 0 && overlap_height>0) { float overlap_part = (overlap_width*overlap_height)/area[indices[j]]; if(overlap_part > in_nms_threshold) { is_suppresed[indices[j]] = true; in_out_source[indices[j]]->NullifyLifespan(); if (in_out_source[indices[j]]->GetFrameCount() > in_out_source[indices[i]]->GetFrameCount()) { in_out_source[indices[i]]->object_id = in_out_source[indices[j]]->object_id; } } } } } return ; }
std::vector<GraspHypothesis> Localization::localizeHands(const std::string& pcd_filename_left, const std::string& pcd_filename_right, bool calculates_antipodal, bool uses_clustering) { std::vector<int> indices(0); return localizeHands(pcd_filename_left, pcd_filename_right, indices, calculates_antipodal, uses_clustering); }
void SMDImporter::CreateGeometry() { if ( !m_pTriangles.GetUsed() ) return; long t; XSI::MATH::CTransformation xfo; xfo.SetRotationFromXYZAnglesValues ( -1.570796, 0.0, 0.0 ); for (t=0;t<m_pTriangles.GetUsed();t++) { for (int v=0;v<3;v++) { XSI::MATH::CVector3 vec = XSI::MATH::MapObjectPositionToWorldSpace ( xfo, m_pTriangles[t]->m_pVertex[v].m_vPosition ); long outindex; compress.AddVertex ( vec.GetX(), vec.GetY(), vec.GetZ(), m_pTriangles[t]->m_pVertex[v].m_vUV.GetX(), m_pTriangles[t]->m_pVertex[v].m_vUV.GetY(), &m_pTriangles[t]->m_pVertex[v], &outindex); m_lVertexMap.Extend(1); m_lVertexMap[m_lVertexMap.GetUsed()-1] = outindex; } } XSI::MATH::CVector3Array verts(compress.GetCount()); long vindex = 0; long cnt = compress.GetCount (); for (t=0;t<compress.GetCount ();t++) { uvvec vec; compress.GetVertex (t, &vec); verts[t] = XSI::MATH::CVector3 ( vec.x, vec.y, vec.z ); } XSI::CLongArray indices((m_pTriangles.GetUsed() * 3) + m_pTriangles.GetUsed()); long iindex = 0; char *l_szGlobalTexture = m_pTriangles[0]->m_szTexture; CSIBCArray<TriCluster> ClusterList; for (t=0;t<m_pTriangles.GetUsed();t++) { XSI::MATH::CVector3 vec1 = XSI::MATH::MapObjectPositionToWorldSpace ( xfo, m_pTriangles[t]->m_pVertex[0].m_vPosition ); XSI::MATH::CVector3 vec2 = XSI::MATH::MapObjectPositionToWorldSpace ( xfo, m_pTriangles[t]->m_pVertex[1].m_vPosition ); XSI::MATH::CVector3 vec3 = XSI::MATH::MapObjectPositionToWorldSpace ( xfo, m_pTriangles[t]->m_pVertex[2].m_vPosition ); long i1 = compress.GetIndex ( vec1.GetX(), vec1.GetY(), vec1.GetZ(), m_pTriangles[t]->m_pVertex[0].m_vUV.GetX(), m_pTriangles[t]->m_pVertex[0].m_vUV.GetY()); long i2 = compress.GetIndex ( vec2.GetX(), vec2.GetY(), vec2.GetZ(), m_pTriangles[t]->m_pVertex[1].m_vUV.GetX(), m_pTriangles[t]->m_pVertex[1].m_vUV.GetY()); long i3 = compress.GetIndex ( vec3.GetX(), vec3.GetY(), vec3.GetZ(), m_pTriangles[t]->m_pVertex[2].m_vUV.GetX(), m_pTriangles[t]->m_pVertex[2].m_vUV.GetY()); indices[iindex] = 3; indices[iindex+1] = i1; indices[iindex+2] = i2; indices[iindex+3] = i3; iindex += 4; if ( strcmp ( l_szGlobalTexture, m_pTriangles[t]->m_szTexture )) { // // found a local material // TriCluster* cls = NULL; for (int c=0;c<ClusterList.GetUsed();c++) { if ( !strcmp ( ClusterList[c].m_szName, m_pTriangles[t]->m_szTexture)) { cls = &ClusterList[c]; break; } } if ( cls == NULL ) { ClusterList.Extend(1); strcpy ( ClusterList[ClusterList.GetUsed()-1].m_szName, m_pTriangles[t]->m_szTexture ); cls = &ClusterList[ClusterList.GetUsed()-1]; } cls->m_indices.Add ( t ); } } char mname[1024]; sprintf (mname, "mesh" ); if ( m_pMeshNode ) { sprintf (mname, FixName(m_pMeshNode->m_szName)); } LPWSTR l_wszModelName; DSA2W(&l_wszModelName,mname); m_pModel.AddPolygonMesh ( verts, indices, l_wszModelName, m_pMesh ); XSI::Application app; XSI::CValueArray args(4); XSI::CValue outArg; XSI::CStatus st; args[0] = XSI::CValue( XSI::CString(L"") ); args[1] = XSI::CValue(false); args[0] = XSI::CValue(m_pMesh.GetRef()); args[1] = XSI::CValue((long)XSI::siTxtUV); args[2] = XSI::CValue((long)XSI::siTxtDefaultSpherical); args[3] = XSI::CValue(XSI::CString(L"Texture_Support")); app.ExecuteCommand( L"CreateTextureSupport", args, outArg ); XSI::CValueArray moreargs(1); XSI::CValueArray moreoutargs(3); moreargs[0] = m_pMesh.GetRef(); app.ExecuteCommand(L"FreezeObj",moreargs, outArg); XSI::Material l_matMaterial; st = m_pMesh.AddMaterial(L"Phong", true, L"CubeMat", l_matMaterial); XSI::OGLTexture l_oglTexture(l_matMaterial.GetOGLTexture()); XSI::CString l_szFullNameDefaultOut = l_oglTexture.GetFullName(); int l_nHeightDefaultOut = l_oglTexture.GetHeight(); int l_nWidthDefaultOut = l_oglTexture.GetWidth(); // Now actually add a texture, so we can test it. args[0] = XSI::CValue( XSI::CString(L"Image") ); args[1] = XSI::CValue(m_pMesh.GetRef()); args[2] = XSI::CValue((short)1); args[3] = XSI::CValue(false); st = app.ExecuteCommand( L"BlendInPresets", args, outArg ); // // create the texture and connect // XSI::CValueArray clipargs(3); XSI::ImageClip2 l_pClip; char l_szTextureFullname[1024]; sprintf ( l_szTextureFullname, "%s%s", m_szDirectory, m_pTriangles[0]->m_szTexture); char clipname[1024]; _splitpath ( m_pTriangles[0]->m_szTexture, NULL, NULL, clipname, NULL ); LPWSTR l_wszClipName; DSA2W(&l_wszClipName,l_szTextureFullname); LPWSTR l_wszClipName2; DSA2W(&l_wszClipName2,clipname); clipargs[0] = XSI::CValue( XSI::CString(l_wszClipName) ); clipargs[1] = XSI::CValue( XSI::CString(l_wszClipName2) ); clipargs[2] = XSI::CValue(l_pClip.GetRef()); app.ExecuteCommand( L"SICreateImageClip", clipargs, outArg ); XSI::CString l_szMaterialName = l_matMaterial.GetFullName(); XSI::CString l_szImageNode = l_szMaterialName + L".CubeMat.ambient_blend.Image.tex"; XSI::CString l_szFullclipname = L"Clips." + XSI::CString(l_wszClipName2); XSI::CValueArray clipargs2(2); clipargs2[0] = XSI::CValue( XSI::CString(l_szFullclipname) ); clipargs2[1] = XSI::CValue( XSI::CString(l_szImageNode) ); app.ExecuteCommand( L"SIConnectShaderToCnxPoint", clipargs2, outArg ); // // Create all clusters // XSI::Geometry geom( m_pMesh.GetActivePrimitive().GetGeometry() ); for (int b=0;b<ClusterList.GetUsed();b++) { TriCluster* cls = &ClusterList[b]; sprintf ( l_szTextureFullname, "%s%s", m_szDirectory, cls->m_szName); _splitpath ( cls->m_szName, NULL, NULL, clipname, NULL ); DSA2W(&l_wszClipName,l_szTextureFullname); DSA2W(&l_wszClipName2,clipname); XSI::CLongArray array; XSI::Cluster polyCluster ; geom.AddCluster( XSI::siPolygonCluster, l_wszClipName2, cls->m_indices, polyCluster ) ; st = polyCluster.AddMaterial(L"Phong", true, L"CubeMat", l_matMaterial); XSI::OGLTexture l_oglTexture(l_matMaterial.GetOGLTexture()); // Now actually add a texture, so we can test it. args[0] = XSI::CValue( XSI::CString(L"Image") ); args[1] = XSI::CValue(polyCluster.GetRef()); args[2] = XSI::CValue((short)1); args[3] = XSI::CValue(false); st = app.ExecuteCommand( L"BlendInPresets", args, outArg ); clipargs[0] = XSI::CValue( XSI::CString(l_wszClipName) ); clipargs[1] = XSI::CValue( XSI::CString(l_wszClipName2) ); clipargs[2] = XSI::CValue(l_pClip.GetRef()); app.ExecuteCommand( L"SICreateImageClip", clipargs, outArg ); l_szMaterialName = l_matMaterial.GetFullName(); l_szImageNode = l_szMaterialName + L".CubeMat.ambient_blend.Image.tex"; l_szFullclipname = L"Clips." + XSI::CString(l_wszClipName2); clipargs2[0] = XSI::CValue( XSI::CString(l_szFullclipname) ); clipargs2[1] = XSI::CValue( XSI::CString(l_szImageNode) ); app.ExecuteCommand( L"SIConnectShaderToCnxPoint", clipargs2, outArg ); } if ( m_pMesh.IsValid () ) { XSI::Geometry geom( m_pMesh.GetActivePrimitive().GetGeometry() ); XSI::PolygonMesh mesh(m_pMesh.GetActivePrimitive().GetGeometry()); XSI::CPointRefArray Geompoints = geom.GetPoints(); XSI::CTriangleRefArray triangles(geom.GetTriangles()); XSI::ClusterProperty UVWProp(m_pMesh.GetMaterial().GetCurrentUV()); if ( UVWProp.IsValid() ) { XSI::CClusterPropertyElementArray clusterPropertyElements = UVWProp.GetElements(); XSI::CDoubleArray elementArray = clusterPropertyElements.GetArray(); long totalUvCount = elementArray.GetCount (); int cc=0; int uvc = 0; for (int c=0;c<m_pTriangles.GetUsed();c++) { long l_iNumVertex = indices[cc]; cc++; for (int i=0;i<l_iNumVertex;i++) { long l_iID = indices[cc]; cc++; uvvec vec; compress.GetVertex (l_iID, &vec); elementArray[ uvc * 3 ] = vec.u; elementArray[ (uvc * 3) + 1] = vec.v; elementArray[ (uvc * 3) + 2] = 0.0f; uvc++; } } clusterPropertyElements.PutArray(elementArray); } } }
//int MeshOn2D(const std::vector<ModelT> &model_set, const std::vector<poseT> &poses, cv::Mat &map2d, float fx = FOCAL_X, float fy = FOCAL_Y); float segAcc(const std::vector<ModelT> &model_set, const std::vector<poseT> &poses, pcl::PointCloud<PointT>::Ptr cloud) { pcl::PointCloud<PointT>::Ptr link_cloud(new pcl::PointCloud<PointT>()); pcl::PointCloud<PointT>::Ptr node_cloud(new pcl::PointCloud<PointT>()); for( int i = 0 ; i < model_set.size() ; i++ ) { uint32_t cur_label; pcl::PointCloud<PointT>::Ptr cur_cloud(new pcl::PointCloud<PointT>()); if( model_set[i].model_label == "link" ) { cur_label = 1; cur_cloud = link_cloud; } else if( model_set[i].model_label == "node" ) { cur_label = 2; cur_cloud = node_cloud; } pcl::copyPointCloud(*model_set[i].model_cloud, *cur_cloud); for( pcl::PointCloud<PointT>::iterator it = cur_cloud->begin() ; it < cur_cloud->end() ; it++ ) it->rgba = cur_label; } Eigen::Quaternionf calibrate_rot(Eigen::AngleAxisf(M_PI/2, Eigen::Vector3f (1, 0, 0))); pcl::PointCloud<PointT>::Ptr all_cloud(new pcl::PointCloud<PointT>()); for(std::vector<poseT>::const_iterator it = poses.begin() ; it < poses.end() ; it++) { for( int i = 0 ; i < model_set.size() ; i++ ) { if(model_set[i].model_label == it->model_name ) { pcl::PointCloud<PointT>::Ptr cur_cloud(new pcl::PointCloud<PointT>()); if( it->model_name == "link" ) pcl::copyPointCloud(*link_cloud, *cur_cloud); else if( it->model_name == "node" ) pcl::copyPointCloud(*node_cloud, *cur_cloud); pcl::transformPointCloud(*cur_cloud, *cur_cloud, it->shift, it->rotation*calibrate_rot); all_cloud->insert(all_cloud->end(), cur_cloud->begin(), cur_cloud->end()); } } } pcl::search::KdTree<PointT> tree; tree.setInputCloud (all_cloud); uint8_t tmp_color = 255; uint32_t red = tmp_color << 16; uint32_t blue = tmp_color; int pos_count = 0; float T = 0.02*0.02; for ( pcl::PointCloud<PointT>::iterator it = cloud->begin() ; it < cloud->end() ; it++ ) { std::vector<int> indices (1); std::vector<float> sqr_distances (1); int nres = tree.nearestKSearch(*it, 1, indices, sqr_distances); if( it->rgba > 255 ) it->rgba = 1; else if( it->rgba > 0 ) it->rgba = 2; else it->rgba = 0; if ( nres == 1 && sqr_distances[0] < T ) { if(it->rgba == all_cloud->at(indices[0]).rgba) pos_count++; } else if( it->rgba == 0 || sqr_distances[0] > T ) pos_count++; if( nres == 1 && sqr_distances[0] < T ) { if( all_cloud->at(indices[0]).rgba == 1 ) it->rgba = red; else if( all_cloud->at(indices[0]).rgba == 2 ) it->rgba = blue; } else it->rgba = 0; } return (pos_count +0.0) / cloud->size(); }
double FastMKSRules<KernelType, TreeType>::CalculateBound(TreeType& queryNode) const { // We have four possible bounds -- just like NeighborSearchRules, but they are // slightly different in this context. // // (1) min ( min_{all points p in queryNode} P_p[k], // min_{all children c in queryNode} B(c) ); // (2) max_{all points p in queryNode} P_p[k] + (worst child distance + worst // descendant distance) sqrt(K(I_p[k], I_p[k])); // (3) max_{all children c in queryNode} B(c) + <-- not done yet. ignored. // (4) B(parent of queryNode); double worstPointKernel = DBL_MAX; double bestAdjustedPointKernel = -DBL_MAX; const double queryDescendantDistance = queryNode.FurthestDescendantDistance(); // Loop over all points in this node to find the best and worst. for (size_t i = 0; i < queryNode.NumPoints(); ++i) { const size_t point = queryNode.Point(i); if (products(products.n_rows - 1, point) < worstPointKernel) worstPointKernel = products(products.n_rows - 1, point); if (products(products.n_rows - 1, point) == -DBL_MAX) continue; // Avoid underflow. // This should be (queryDescendantDistance + centroidDistance) for any tree // but it works for cover trees since centroidDistance = 0 for cover trees. const double candidateKernel = products(products.n_rows - 1, point) - queryDescendantDistance * referenceKernels[indices(indices.n_rows - 1, point)]; if (candidateKernel > bestAdjustedPointKernel) bestAdjustedPointKernel = candidateKernel; } // Loop over all the children in the node. double worstChildKernel = DBL_MAX; for (size_t i = 0; i < queryNode.NumChildren(); ++i) { if (queryNode.Child(i).Stat().Bound() < worstChildKernel) worstChildKernel = queryNode.Child(i).Stat().Bound(); } // Now assemble bound (1). const double firstBound = (worstPointKernel < worstChildKernel) ? worstPointKernel : worstChildKernel; // Bound (2) is bestAdjustedPointKernel. const double fourthBound = (queryNode.Parent() == NULL) ? -DBL_MAX : queryNode.Parent()->Stat().Bound(); // Pick the best of these bounds. const double interA = (firstBound > bestAdjustedPointKernel) ? firstBound : bestAdjustedPointKernel; // const double interA = 0.0; const double interB = fourthBound; return (interA > interB) ? interA : interB; }
int main(int argc, char** argv) { // initialize variables pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_original (new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_unaltered (new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_objects (new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_planeInliers (new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_planeOutliers (new pcl::PointCloud<pcl::PointXYZ>); pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients()); pcl::PointIndices::Ptr indices (new pcl::PointIndices()); pcl::PCDReader reader; pcl::PCDWriter writer; int result = 0; // catch function return // enable variables for time logging boost::posix_time::ptime time_before_execution; boost::posix_time::ptime time_after_execution; boost::posix_time::time_duration difference; // read point cloud through command line if (argc != 2) { std::cout << "usage: " << argv[0] << " <filename>\n"; return 0; } else { // read cloud and display its size std::cout << "Reading Point Cloud" << std::endl; reader.read(argv[1], *cloud_original); #ifdef DEBUG std::cout << "size of original cloud: " << cloud_original->points.size() << " points" << std::endl; #endif } //************************************************************************** // test passthroughFilter() function std::cout << "RUNNING PASSTHROUGH FILTER TESTS" << std::endl; log_passthroughFilter(cloud_original, cloud_filtered, "../pictures/T01_passthrough_01.pcd", "passthrough filter, custom 1: ","z", -2.5, 0); log_passthroughFilter(cloud_original, cloud_filtered, "../pictures/T01_passthrough_02.pcd", "passthrough filter, custom 2: ","y", -3.0, 3.0); log_passthroughFilter(cloud_original, cloud_filtered, "../pictures/T01_passthrough_03.pcd", "passthrough filter, custom 3: ","x", -3.0, 3.0); //************************************************************************** // test voxelFilter() function std::cout << "RUNNING VOXEL FILTER TEST" << std::endl; log_voxelFilter(cloud_original, cloud_filtered, "../pictures/T02_voxel_01.pcd", "voxel filter, custom 1:", 0.01); log_voxelFilter(cloud_original, cloud_filtered, "../pictures/T02_voxel_02.pcd", "voxel filter, custom 2:", 0.02); log_voxelFilter(cloud_original, cloud_filtered, "../pictures/T02_voxel_03.pcd", "voxel filter, custom 3:", 0.04); log_voxelFilter(cloud_original, cloud_filtered, "../pictures/T02_voxel_04.pcd", "voxel filter, custom 4:", 0.08); //************************************************************************** // test removeNoise() function std::cout << "RUNNING NOISE REMOVAL TEST" << std::endl; log_removeNoise(cloud_original, cloud_filtered, "../pictures/T03_noise_01.pcd", "noise removal, custom 1: ", 50, 1.0); log_removeNoise(cloud_original, cloud_filtered, "../pictures/T03_noise_01.pcd", "noise removal, custom 1: ", 100, 1.0); log_removeNoise(cloud_original, cloud_filtered, "../pictures/T03_noise_01.pcd", "noise removal, custom 1: ", 10, 1.0); log_removeNoise(cloud_original, cloud_filtered, "../pictures/T03_noise_01.pcd", "noise removal, custom 1: ", 50, 1.9); log_removeNoise(cloud_original, cloud_filtered, "../pictures/T03_noise_01.pcd", "noise removal, custom 1: ", 50, 0.1); //************************************************************************** // test getPlane() function std::cout << "RUNNING PLANE SEGMENTATION TEST" << std::endl; log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_01.pcd", "plane segmentation, custom 1: ", 0, 1.57, 1000, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_02.pcd", "plane segmentation, custom 2: ", 0, 1.57, 1, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_03.pcd", "plane segmentation, custom 3: ", 0, 1.57, 2000, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_04.pcd", "plane segmentation, custom 4: ", 0, 0.76, 1000, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_05.pcd", "plane segmentation, custom 5: ", 0, 2.09, 1000, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_06.pcd", "plane segmentation, custom 6: ", 0.35, 1.57, 1000, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_07.pcd", "plane segmentation, custom 7: ", 0.79, 1.57, 1000, 0.01); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_08.pcd", "plane segmentation, custom 8: ", 0, 1.57, 1000, 0.001); log_getPlane(cloud_original, cloud_planeInliers, cloud_planeOutliers, coefficients, indices, "../pictures/T04_planeInliers_09.pcd", "plane segmentation, custom 9: ", 0, 1.57, 1000, 0.01); //************************************************************************** // test getPrism() function std::cout << "RUNNING EXTRACT PRISM TEST" << std::endl; log_getPrism(cloud_original, cloud_objects, "../pictures/T05_objects_01.pcd", "polygonal prism, custom 1: ", 0, 1.57, 1000, 0.01, 0.02, 0.2); // test getPrism() function std::cout << "RUNNING EXTRACT PRISM TEST" << std::endl; // get output from default plane time_before_execution = boost::posix_time::microsec_clock::local_time(); // time before result = c44::getPrism(cloud_original, cloud_objects, 0, 1.57, 1000, 0.01, 0.02, 0.2); time_after_execution = boost::posix_time::microsec_clock::local_time(); // time after if(result < 0) { std::cout << "ERROR: could not find polygonal prism data" << std::endl; } else { writer.write<pcl::PointXYZ> ("../pictures/T05_objects_01.pcd", *cloud_objects); // write out cloud #ifdef DEBUG difference = time_after_execution - time_before_execution; // get execution time std::cout << std::setw(5) << difference.total_milliseconds() << ": " << "polygonal prism, default: " << cloud_objects->points.size() << " points" << std::endl; #endif } return 0; }
std::vector<uint32_t> G3D::readIndices(AbstractReader& reader, const GeometryInfo& info) { std::vector<uint32_t> indices(info.numberIndices); reader.read((char*)indices.data(), info.numberIndices * info.indexSize); return indices; }
std::vector<uint8_t> Bwt::encode(const std::vector<uint8_t> & source) const { const uint32_t srcSize = static_cast<uint32_t>(source.size()); if (srcSize > 0 && m_blockSize > 0) { //allocate destination data std::vector<uint8_t> dest(srcSize + 4 + 4 + 4 * ((srcSize / m_blockSize) + 1)); uint32_t destIndex = 0; //output source size *((uint32_t *)&dest[destIndex]) = srcSize; destIndex += 4; //output BWT block size *((uint32_t *)&dest[destIndex]) = m_blockSize; destIndex += 4; //build array of indices into source array std::vector<int32_t> indices(2 * m_blockSize); //allocate array for block data storage and easier access std::vector<uint8_t> block(2 * m_blockSize); //loop through blocks uint32_t srcIndex = 0; while (srcIndex < srcSize) { //clamp block size so we don't read past the end of source const uint32_t size = (srcIndex + m_blockSize) > srcSize ? (srcSize - srcIndex) : m_blockSize; //copy data into block in reverse order and duplicate it. this makes it possible to write the data //with increasing indices when decoding. also the suffix array algorithm will generate un-decodable data //when using only a single block of data. the generated indices are screwed up. //If you know the correct way to sort the string without needing to duplicate indices, please let me know! std::reverse_copy(std::next(source.cbegin(), srcIndex), std::next(source.cbegin(), srcIndex + size), block.begin()); std::reverse_copy(std::next(source.cbegin(), srcIndex), std::next(source.cbegin(), srcIndex + size), std::next(block.begin(), size)); //build suffix array from data saisxx<uint8_t *, int32_t *, int32_t>(block.data(), indices.data(), 2 * size, 256); //store reference to start index for writing it later uint32_t & startIndex = ((uint32_t &)dest[destIndex]); destIndex += 4; //encode data to output while looking for start index uint32_t count = 0; for (uint32_t i = 0; i < 2 * size; ++i) { uint32_t index = static_cast<uint32_t>(indices[i]); //we have duplicated the input data, thus we only need to use indices that are "from the first half" if (index < size) { //check if we've found the start index if (index == 0) { //yes. store it and copy last input symbol to output startIndex = count; index = size; } //store symbol from input data to output data dest[destIndex++] = block[index - 1]; count++; } } //move to next block of input data srcIndex += size; } dest.resize(destIndex); return dest; } return std::vector<uint8_t>(); }
void SkyBox::load( const he::String& asset ) { HE_ASSERT(m_Drawable == nullptr, "Skybox is loaded twice!"); m_Drawable = HENew(Drawable)(); m_Drawable->setLocalScale(vec3(1000000000)); // bounds must be huge ////////////////////////////////////////////////////////////////////////// /// Load Model ////////////////////////////////////////////////////////////////////////// ModelMesh* const cube( ResourceFactory<gfx::ModelMesh>::getInstance()->get(ResourceFactory<gfx::ModelMesh>::getInstance()->create())); cube->setName(he::String("skybox-") + asset); he::PrimitiveList<vec3> vertices(8); vertices.add(vec3(-1, 1, -1)); vertices.add(vec3( 1, 1, -1)); vertices.add(vec3(-1, -1, -1)); vertices.add(vec3( 1, -1, -1)); vertices.add(vec3(-1, 1, 1)); vertices.add(vec3( 1, 1, 1)); vertices.add(vec3(-1, -1, 1)); vertices.add(vec3( 1, -1, 1)); he::PrimitiveList<uint16> indices(36); indices.add(0); indices.add(1); indices.add(2); //front indices.add(1); indices.add(3); indices.add(2); indices.add(5); indices.add(4); indices.add(7); //back indices.add(4); indices.add(6); indices.add(7); indices.add(4); indices.add(0); indices.add(6); //left indices.add(0); indices.add(2); indices.add(6); indices.add(1); indices.add(5); indices.add(3); //right indices.add(5); indices.add(7); indices.add(3); indices.add(4); indices.add(5); indices.add(0); //top indices.add(5); indices.add(1); indices.add(0); indices.add(3); indices.add(7); indices.add(2); //bottom indices.add(7); indices.add(6); indices.add(2); VertexLayout layout; layout.addElement(VertexElement(eShaderAttribute_Position, eShaderAttributeType_Float, eShaderAttributeTypeComponents_3, 0)); cube->init(layout, MeshDrawMode_Triangles); cube->setVertices(&vertices[0], static_cast<uint32>(vertices.size()), MeshUsage_Static, false); cube->setIndices(&indices[0], static_cast<uint32>(indices.size()), IndexStride_UShort, MeshUsage_Static); cube->setLoaded(eLoadResult_Success); m_Drawable->setModelMesh(cube); cube->release(); Material* const material(CONTENT->loadMaterial("engine/sky.material")); m_Drawable->setMaterial(material); material->release(); const int8 cubeMap(m_Drawable->getMaterial()->findParameter(HEFS::strcubeMap)); if (cubeMap >= 0) { const TextureCube* cube(CONTENT->asyncLoadTextureCube(asset)); m_Drawable->getMaterial()->getParameter(cubeMap).setTextureCube(cube); cube->release(); } }
void GPUData::InitCube(EffectID aEffect, GPUContext& aGPUContext, AssetContainer& aAssetContainer) { #pragma region Vertices CU::GrowingArray<VertexPosColor> vertices; vertices.Init(24); float size = 1.f; float halfWidth = size / 2.f; float halfHeight = size / 2.f; float halfDepth = size / 2.f; CU::Vector4<float> aColour(1.f, 1.f, 1.f, 1.f); //0 - 3 (Top) vertices.Add({ { -halfWidth, halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, halfHeight, halfDepth }, aColour }); vertices.Add({ { -halfWidth, halfHeight, halfDepth }, aColour }); //4 - 7 (Bottom) vertices.Add({ { -halfWidth, -halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, -halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, -halfHeight, halfDepth }, aColour }); vertices.Add({ { -halfWidth, -halfHeight, halfDepth }, aColour }); //8 - 11 (Left) vertices.Add({ { -halfWidth, -halfHeight, halfDepth }, aColour }); vertices.Add({ { -halfWidth, -halfHeight, -halfDepth }, aColour }); vertices.Add({ { -halfWidth, halfHeight, -halfDepth }, aColour }); vertices.Add({ { -halfWidth, halfHeight, halfDepth }, aColour }); //12 - 15 (Right) vertices.Add({ { halfWidth, -halfHeight, halfDepth }, aColour }); vertices.Add({ { halfWidth, -halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, halfHeight, halfDepth }, aColour }); //16 - 19 (Front) vertices.Add({ { -halfWidth, -halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, -halfHeight, -halfDepth }, aColour }); vertices.Add({ { halfWidth, halfHeight, -halfDepth }, aColour }); vertices.Add({ { -halfWidth, halfHeight, -halfDepth }, aColour }); //20 - 23 (Back) vertices.Add({ { -halfWidth, -halfHeight, halfDepth }, aColour }); vertices.Add({ { halfWidth, -halfHeight, halfDepth }, aColour }); vertices.Add({ { halfWidth, halfHeight, halfDepth }, aColour }); vertices.Add({ { -halfWidth, halfHeight, halfDepth }, aColour }); #pragma endregion #pragma region Indices CU::GrowingArray<int> indices(24); //Top indices.Add(3); indices.Add(1); indices.Add(0); indices.Add(2); indices.Add(1); indices.Add(3); //Bottom indices.Add(6); indices.Add(4); indices.Add(5); indices.Add(7); indices.Add(4); indices.Add(6); //Left indices.Add(11); indices.Add(9); indices.Add(8); indices.Add(10); indices.Add(9); indices.Add(11); //Right indices.Add(14); indices.Add(12); indices.Add(13); indices.Add(15); indices.Add(12); indices.Add(14); //Front indices.Add(19); indices.Add(17); indices.Add(16); indices.Add(18); indices.Add(17); indices.Add(19); //Back indices.Add(22); indices.Add(20); indices.Add(21); indices.Add(23); indices.Add(20); indices.Add(22); #pragma endregion SetTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); AddInputElement(new D3D11_INPUT_ELEMENT_DESC({ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 })); AddInputElement(new D3D11_INPUT_ELEMENT_DESC({ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 })); int indexCount = indices.Size(); int vertexCount = vertices.Size(); int vertexStride = sizeof(VertexPosColor); Init(aEffect, indexCount, reinterpret_cast<char*>(&indices[0]), vertexCount , vertexStride, reinterpret_cast<char*>(&vertices[0]), aGPUContext, aAssetContainer); myIndexData = new IndexData(); myVertexData = new VertexData(); myIndexData->myNumberOfIndices = indexCount; myVertexData->myNumberOfVertices = vertexCount; myVertexData->myStride = vertexStride; }
void VQModel::Adapt(const std::shared_ptr<Model>& other, const std::vector< DynamicVector<Real> >& samples, unsigned int iterations, Real relevanceFactor) { const VQModel* model = dynamic_cast<VQModel*>(other.get()); if (model == nullptr) { std::cout << "Not VQModel." << std::endl; return; } SetOrder(model->GetOrder()); Init(); std::vector<unsigned int> indices(samples.size()); // Initialize the feature vectors of the centroids. for (unsigned int c = 0; c < GetOrder(); c++) { mClusterCentroids[c] = model->mClusterCentroids[c]; } // Do the iterations. for (unsigned int i = 0; i < iterations; i++) { //Find the closest centroid to each sample for (unsigned int n = 0; n < samples.size(); n++) { Real minDist = std::numeric_limits<Real>::max(); for (unsigned int c = 0; c < GetOrder(); c++) { Real dist = samples[n].Distance(mClusterCentroids[c]); if (dist < minDist) { minDist = dist; indices[n] = c; } } } //Set the centroids to the average of the samples in each centroid for (unsigned int c = 0; c < GetOrder(); ++c) { mClusterCentroids[c].Assign(0.0f); mClusterSizes[c] = 0; } for (unsigned int s = 0; s < samples.size(); ++s) { mClusterCentroids[indices[s]].Add(samples[s]); ++mClusterSizes[indices[s]]; } for (unsigned int c = 0; c < GetOrder(); ++c) { if (mClusterSizes[c] > 0) { mClusterCentroids[c].Multiply(1.0f / static_cast<Real>(mClusterSizes[c])); } } //Calculate the adapted values for (unsigned int c = 0; c < GetOrder(); ++c) { Real size = static_cast<Real>(mClusterSizes[c]); Real w = size / (size + static_cast<Real>(relevanceFactor)); DynamicVector<Real> ubmc = model->mClusterCentroids[c]; ubmc.Multiply(1.0f - w); mClusterCentroids[c].Multiply(w); mClusterCentroids[c].Add(ubmc); } } }
bool avtLCSFilter::RectilinearGridIterativeCalc( std::vector<avtIntegralCurve*> &ics ) { //algorithm sends index to global datastructure as well as end points. //Send List of index into global array to rank 0 //Send end positions into global array to rank 0 size_t nics = ics.size(); //loop over all the intelgral curves and add it back to the //original list of seeds. intVector indices(nics); doubleVector points(nics*3); doubleVector times(nics); for(size_t i=0, j=0; i<nics; ++i, j+=3) { avtStreamlineIC * ic = (avtStreamlineIC *) ics[i]; indices[i] = ic->id; avtVector point = ic->GetEndPoint(); points[j+0] = point[0]; points[j+1] = point[1]; points[j+2] = point[2]; if( doPathlines ) times[i] = ic->GetTime() - seedTime0; else times[i] = ic->GetTime(); } int* all_indices = 0; int* index_counts = 0; double* all_points = 0; int *point_counts = 0; double* all_times = 0; int *time_counts = 0; Barrier(); CollectIntArraysOnRootProc(all_indices, index_counts, &indices.front(), (int)indices.size()); CollectDoubleArraysOnRootProc(all_points, point_counts, &points.front(), (int)points.size()); CollectDoubleArraysOnRootProc(all_times, time_counts, ×.front(), (int)times.size()); Barrier(); //root should now have index into global structure and all //matching end positions. if(PAR_Rank() != 0) { return true; } else { //variable name. std::string var = outVarRoot + outVarName; //now global grid has been created. if( fsle_ds == 0 ) fsle_ds = CreateIterativeCalcDataSet(); // Get the stored data arrays vtkDoubleArray *exponents = (vtkDoubleArray *) fsle_ds->GetPointData()->GetArray(var.c_str()); vtkDoubleArray *component = (vtkDoubleArray *) fsle_ds->GetPointData()->GetArray("component"); vtkDoubleArray *times = (vtkDoubleArray *) fsle_ds->GetPointData()->GetArray("times"); size_t nTuples = exponents->GetNumberOfTuples(); // Storage for the points and times std::vector<avtVector> remapPoints(nTuples); std::vector<double> remapTimes(nTuples); //update remapPoints with new value bounds from integral curves. int par_size = PAR_Size(); size_t total = 0; for(int i = 0; i < par_size; ++i) { if(index_counts[i]*3 != point_counts[i] || index_counts[i] != time_counts[i]) { EXCEPTION1(VisItException, "Index count does not the result count." ); } total += index_counts[i]; } for(size_t j=0, k=0; j<total; ++j, k+=3) { size_t index = all_indices[j]; if(nTuples <= index) { EXCEPTION1(VisItException, "More integral curves were generatated than " "grid points." ); } remapPoints[index].set( all_points[k+0], all_points[k+1], all_points[k+2]); remapTimes[index] = all_times[j]; } // Store the times for the exponent. for(size_t l=0; l<nTuples; ++l) times->SetTuple1(l, remapTimes[l]); //use static function in avtGradientExpression to calculate //gradients. since this function only does scalar, break our //vectors into scalar components and calculate one at a time. vtkDataArray* jacobian[3]; for(int i = 0; i < 3; ++i) { // Store the point component by component for(size_t l=0; l<nTuples; ++l) component->SetTuple1(l, remapPoints[l][i]); jacobian[i] = avtGradientExpression::CalculateGradient(fsle_ds, "component"); } for (size_t i = 0; i < nTuples; i++) component->SetTuple1(i, std::numeric_limits<double>::epsilon()); //now have the jacobian - 3 arrays with 3 components. ComputeLyapunovExponent(jacobian, component); jacobian[0]->Delete(); jacobian[1]->Delete(); jacobian[2]->Delete(); // Compute the FSLE ComputeFSLE( component, times, exponents ); bool haveAllExponents = true; // For each integral curve check it's mask value to see it // additional integration is required. // ARS - FIX ME not parallelized!!!!!!!! for(size_t i=0; i<ics.size(); ++i) { avtStreamlineIC * ic = (avtStreamlineIC *) ics[i]; int ms = ic->GetMaxSteps(); if( ms < maxSteps ) { ic->SetMaxSteps(ms+1); ic->status.ClearTerminationMet(); } size_t l = ic->id; // The curve id is the index into the VTK data. // Check to see if all exponents have been found. if( exponents->GetTuple1(l) == std::numeric_limits<double>::min() && ms < maxSteps ) haveAllExponents = false; } //cleanup. if (all_indices) delete [] all_indices; if (index_counts) delete [] index_counts; if (all_points) delete [] all_points; if (point_counts) delete [] point_counts; if (all_times) delete [] all_times; if (time_counts) delete [] time_counts; return haveAllExponents; } }
Teuchos::RCP<Epetra_CrsGraph> create_epetra_graph(int numProcs, int localProc) { if (localProc == 0) { std::cout << " creating Epetra_CrsGraph with un-even distribution..." << std::endl; } //create an Epetra_CrsGraph with rows spread un-evenly over //processors. Epetra_MpiComm comm(MPI_COMM_WORLD); int local_num_rows = 800; int nnz_per_row = local_num_rows/4+1; int global_num_rows = numProcs*local_num_rows; int mid_proc = numProcs/2; bool num_procs_even = numProcs%2==0 ? true : false; int adjustment = local_num_rows/2; //adjust local_num_rows so that it's not equal on all procs. if (localProc < mid_proc) { local_num_rows -= adjustment; } else { local_num_rows += adjustment; } //if numProcs is not an even number, undo the local_num_rows adjustment //on one proc so that the total will still be correct. if (localProc == numProcs-1) { if (num_procs_even == false) { local_num_rows -= adjustment; } } //now we're ready to create a row-map. Epetra_Map rowmap(global_num_rows, local_num_rows, 0, comm); //create a graph Teuchos::RCP<Epetra_CrsGraph> graph = Teuchos::rcp(new Epetra_CrsGraph(Copy, rowmap, nnz_per_row)); std::vector<int> indices(nnz_per_row); std::vector<double> coefs(nnz_per_row); int err = 0; for(int i=0; i<local_num_rows; ++i) { int global_row = rowmap.GID(i); int first_col = global_row - nnz_per_row/2; if (first_col < 0) { first_col = 0; } else if (first_col > (global_num_rows - nnz_per_row)) { first_col = global_num_rows - nnz_per_row; } for(int j=0; j<nnz_per_row; ++j) { indices[j] = first_col + j; coefs[j] = 1.0; } err = graph->InsertGlobalIndices(global_row, nnz_per_row, &indices[0]); if (err < 0) { throw Isorropia::Exception("create_epetra_graph: error inserting indices in graph"); } } err = graph->FillComplete(); if (err != 0) { throw Isorropia::Exception("create_epetra_graph: error in graph.FillComplete()"); } return(graph); }
/** Render text and clip it to the specified rectangle if wanted, it will also * do checking for missing characters in font and lazy load them. * \param text The text to be rendering. * \param position The position to be rendering. * \param color The color used when rendering. * \param hcenter If rendered horizontally center. * \param vcenter If rendered vertically center. * \param clip If clipping is needed. * \param font_settings \ref FontSettings to use. * \param char_collector \ref FontCharCollector to render billboard text. */ void FontWithFace::render(const core::stringw& text, const core::rect<s32>& position, const video::SColor& color, bool hcenter, bool vcenter, const core::rect<s32>* clip, FontSettings* font_settings, FontCharCollector* char_collector) { #ifndef SERVER_ONLY const bool black_border = font_settings ? font_settings->useBlackBorder() : false; const bool rtl = font_settings ? font_settings->isRTL() : false; const float scale = font_settings ? font_settings->getScale() : 1.0f; const float shadow = font_settings ? font_settings->useShadow() : false; if (shadow) { assert(font_settings); // Avoid infinite recursion font_settings->setShadow(false); core::rect<s32> shadowpos = position; shadowpos.LowerRightCorner.X += 2; shadowpos.LowerRightCorner.Y += 2; render(text, shadowpos, font_settings->getShadowColor(), hcenter, vcenter, clip, font_settings); // Set back font_settings->setShadow(true); } core::position2d<float> offset(float(position.UpperLeftCorner.X), float(position.UpperLeftCorner.Y)); core::dimension2d<s32> text_dimension; if (rtl || hcenter || vcenter || clip) { text_dimension = getDimension(text.c_str(), font_settings); if (hcenter) offset.X += (position.getWidth() - text_dimension.Width) / 2; else if (rtl) offset.X += (position.getWidth() - text_dimension.Width); if (vcenter) offset.Y += (position.getHeight() - text_dimension.Height) / 2; if (clip) { core::rect<s32> clippedRect(core::position2d<s32> (s32(offset.X), s32(offset.Y)), text_dimension); clippedRect.clipAgainst(*clip); if (!clippedRect.isValid()) return; } } // Collect character locations const unsigned int text_size = text.size(); core::array<s32> indices(text_size); core::array<core::position2d<float>> offsets(text_size); std::vector<bool> fallback(text_size); // Test again if lazy load char is needed, // as some text isn't drawn with getDimension insertCharacters(text.c_str()); updateCharactersList(); for (u32 i = 0; i < text_size; i++) { wchar_t c = text[i]; if (c == L'\r' || // Windows breaks c == L'\n' ) // Unix breaks { if (c==L'\r' && text[i+1]==L'\n') c = text[++i]; offset.Y += m_font_max_height * scale; offset.X = float(position.UpperLeftCorner.X); if (hcenter) offset.X += (position.getWidth() - text_dimension.Width) >> 1; continue; } // if lineBreak bool use_fallback_font = false; const FontArea &area = getAreaFromCharacter(c, &use_fallback_font); fallback[i] = use_fallback_font; if (char_collector == NULL) { float glyph_offset_x = area.bearing_x * (fallback[i] ? m_fallback_font_scale : scale); float glyph_offset_y = area.offset_y * (fallback[i] ? m_fallback_font_scale : scale); offset.X += glyph_offset_x; offset.Y += glyph_offset_y; offsets.push_back(offset); offset.X -= glyph_offset_x; offset.Y -= glyph_offset_y; } else { // Billboard text specific, use offset_y_bt instead float glyph_offset_x = area.bearing_x * (fallback[i] ? m_fallback_font_scale : scale); float glyph_offset_y = area.offset_y_bt * (fallback[i] ? m_fallback_font_scale : scale); offset.X += glyph_offset_x; offset.Y += glyph_offset_y; offsets.push_back(offset); offset.X -= glyph_offset_x; offset.Y -= glyph_offset_y; } indices.push_back(area.spriteno); offset.X += getCharWidth(area, fallback[i], scale); } // for i < text_size // Do the actual rendering const int indice_amount = indices.size(); core::array<gui::SGUISprite>& sprites = m_spritebank->getSprites(); core::array<core::rect<s32>>& positions = m_spritebank->getPositions(); core::array<gui::SGUISprite>* fallback_sprites; core::array<core::rect<s32>>* fallback_positions; if (m_fallback_font != NULL) { fallback_sprites = &m_fallback_font->m_spritebank->getSprites(); fallback_positions = &m_fallback_font->m_spritebank->getPositions(); } else { fallback_sprites = NULL; fallback_positions = NULL; } const int sprite_amount = sprites.size(); if ((black_border || isBold()) && char_collector == NULL) { // Draw black border first, to make it behind the real character // which make script language display better video::SColor black(color.getAlpha(),0,0,0); for (int n = 0; n < indice_amount; n++) { const int sprite_id = indices[n]; if (!fallback[n] && (sprite_id < 0 || sprite_id >= sprite_amount)) continue; if (indices[n] == -1) continue; const int tex_id = (fallback[n] ? (*fallback_sprites)[sprite_id].Frames[0].textureNumber : sprites[sprite_id].Frames[0].textureNumber); core::rect<s32> source = (fallback[n] ? (*fallback_positions) [(*fallback_sprites)[sprite_id].Frames[0].rectNumber] : positions[sprites[sprite_id].Frames[0].rectNumber]); core::dimension2d<float> size(0.0f, 0.0f); float cur_scale = (fallback[n] ? m_fallback_font_scale : scale); size.Width = source.getSize().Width * cur_scale; size.Height = source.getSize().Height * cur_scale; core::rect<float> dest(offsets[n], size); video::ITexture* texture = (fallback[n] ? m_fallback_font->m_spritebank->getTexture(tex_id) : m_spritebank->getTexture(tex_id)); for (int x_delta = -2; x_delta <= 2; x_delta++) { for (int y_delta = -2; y_delta <= 2; y_delta++) { if (x_delta == 0 || y_delta == 0) continue; draw2DImage(texture, dest + core::position2d<float> (float(x_delta), float(y_delta)), source, clip, black, true); } } } } for (int n = 0; n < indice_amount; n++) { const int sprite_id = indices[n]; if (!fallback[n] && (sprite_id < 0 || sprite_id >= sprite_amount)) continue; if (indices[n] == -1) continue; const int tex_id = (fallback[n] ? (*fallback_sprites)[sprite_id].Frames[0].textureNumber : sprites[sprite_id].Frames[0].textureNumber); core::rect<s32> source = (fallback[n] ? (*fallback_positions)[(*fallback_sprites)[sprite_id].Frames[0] .rectNumber] : positions[sprites[sprite_id].Frames[0].rectNumber]); core::dimension2d<float> size(0.0f, 0.0f); float cur_scale = (fallback[n] ? m_fallback_font_scale : scale); size.Width = source.getSize().Width * cur_scale; size.Height = source.getSize().Height * cur_scale; core::rect<float> dest(offsets[n], size); video::ITexture* texture = (fallback[n] ? m_fallback_font->m_spritebank->getTexture(tex_id) : m_spritebank->getTexture(tex_id)); if (fallback[n] || isBold()) { video::SColor top = GUIEngine::getSkin()->getColor("font::top"); video::SColor bottom = GUIEngine::getSkin() ->getColor("font::bottom"); top.setAlpha(color.getAlpha()); bottom.setAlpha(color.getAlpha()); video::SColor title_colors[] = {top, bottom, top, bottom}; if (char_collector != NULL) { char_collector->collectChar(texture, dest, source, title_colors); } else { draw2DImage(texture, dest, source, clip, title_colors, true); } } else { if (char_collector != NULL) { video::SColor colors[] = {color, color, color, color}; char_collector->collectChar(texture, dest, source, colors); } else { draw2DImage(texture, dest, source, clip, color, true); } } } #endif } // render
void AssimpScene::setupVAOs( OpenGLFunctions & gl , const aiScene * scene) { assert(scene); // For each mesh for (unsigned int m = 0; m < scene->mNumMeshes; ++m) { const aiMesh * mesh = scene->mMeshes[m]; // create array with faces // have to convert from Assimp format to array std::vector<unsigned int> indices(mesh->mNumFaces * 3); for (unsigned int f = 0, i = 0; f < mesh->mNumFaces; ++f, i += 3) { const aiFace * face = &mesh->mFaces[f]; indices[i + 0] = face->mIndices[0]; indices[i + 1] = face->mIndices[1]; indices[i + 2] = face->mIndices[2]; } AssimpMesh * amesh = new AssimpMesh(); amesh->faces = mesh->mNumFaces; amesh->vao.create(); amesh->vao.bind(); // create buffers amesh->indices = new QOpenGLBuffer(QOpenGLBuffer::IndexBuffer); amesh->indices->create(); amesh->indices->setUsagePattern(QOpenGLBuffer::StaticDraw); amesh->indices->bind(); amesh->indices->allocate(indices.data(), indices.size() * sizeof(unsigned int)); if (mesh->HasPositions()) { amesh->vertices = new QOpenGLBuffer(QOpenGLBuffer::VertexBuffer); amesh->vertices->create(); amesh->vertices->setUsagePattern(QOpenGLBuffer::StaticDraw); amesh->vertices->bind(); amesh->vertices->allocate(mesh->mVertices, mesh->mNumVertices * sizeof(float) * 3); gl.glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, nullptr); gl.glEnableVertexAttribArray(0); } if (mesh->HasNormals()) { amesh->normals = new QOpenGLBuffer(QOpenGLBuffer::VertexBuffer); amesh->normals->create(); amesh->normals->setUsagePattern(QOpenGLBuffer::StaticDraw); amesh->normals->bind(); amesh->normals->allocate(mesh->mNormals, mesh->mNumVertices * sizeof(float) * 3); gl.glVertexAttribPointer(1, 3, GL_FLOAT, GL_TRUE, sizeof(float) * 3, nullptr); gl.glEnableVertexAttribArray(1); } if (mesh->HasTextureCoords(0)) { float * texcs = new float[2 * mesh->mNumVertices]; for (unsigned int t = 0; t < mesh->mNumVertices; ++t) { texcs[t * 2 + 0] = mesh->mTextureCoords[0][t].x; texcs[t * 2 + 1] = mesh->mTextureCoords[0][t].y; } amesh->texcs= new QOpenGLBuffer(QOpenGLBuffer::VertexBuffer); amesh->texcs->create(); amesh->texcs->setUsagePattern(QOpenGLBuffer::StaticDraw); amesh->texcs->bind(); amesh->texcs->allocate(texcs, mesh->mNumVertices * sizeof(float) * 2); gl.glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, nullptr); gl.glEnableVertexAttribArray(2); } amesh->vao.release(); AssimpMaterial & material(amesh->material); // create material uniform buffer aiMaterial * mtl = scene->mMaterials[mesh->mMaterialIndex]; // support single texture on diffuse channel only for now... TODO aiString path; if (AI_SUCCESS == mtl->GetTexture(aiTextureType_DIFFUSE, 0, &path)) { material.texture = FileAssociatedTexture::getOrCreate2D(QString(path.C_Str()), gl); material.texCount = 1; } else material.texCount = 0; retrieveColor(mtl, AI_MATKEY_COLOR_DIFFUSE, material.diffuse, 0.8f, 0.8f, 0.8f, 1.0f); retrieveColor(mtl, AI_MATKEY_COLOR_AMBIENT, material.ambient, 0.2f, 0.2f, 0.2f, 1.0f); retrieveColor(mtl, AI_MATKEY_COLOR_SPECULAR, material.specular, 0.0f, 0.0f, 0.0f, 1.0f); retrieveColor(mtl, AI_MATKEY_COLOR_EMISSIVE, material.emissive, 0.0f, 0.0f, 0.0f, 1.0f); material.shininess = 0.f; unsigned int max; aiGetMaterialFloatArray(mtl, AI_MATKEY_SHININESS, &material.shininess, &max); m_meshes.push_back(amesh); } }
void rasterize(const std::vector<tinyobj::shape_t>& shapes,const std::vector<tinyobj::material_t>& materials,uint32_t width,uint32_t height,Eigen::Matrix4f camera_matrix,bool drawdepth) { //camera_matrix=Eigen::Matrix4f::Identity(); uraster::Framebuffer<BunnyPixel> tp(width,height); /*BunnyVert vertsin[3]={ {-1.0f,-1.0f,0.0f, 1.0f,0.0f,0.0f, 0.0f,0.0f}, {0.0f,0.0f,0.0f, 0.0f,1.0f,0.0f, 0.0f,1.0f}, {1.0f,-1.0f,0.0f, 0.0f,0.0f,1.0f, 1.0f,0.0f}}; size_t indexin[3]={0,1,2}; uraster::draw(tp, vertsin,vertsin+3, indexin,indexin+3, (BunnyVertVsOut*)NULL,(BunnyVertVsOut*)NULL, std::bind(example_vertex_shader,std::placeholders::_1,camera_matrix), example_fragment_shader );*/ Eigen::Vector3f mm=Eigen::Vector3f(100000000.0,100000000.0,100000000.0); Eigen::Vector3f mx=-Eigen::Vector3f(100000000.0,100000000.0,100000000.0); std::vector<TinyImage> images; for(const tinyobj::material_t& mat : materials) { std::cerr << "Loading " << mat.diffuse_texname << std::endl; images.emplace_back(mat.diffuse_texname); } for(const tinyobj::shape_t& shs : shapes) { std::vector<BunnyVert> vertsin(shs.mesh.positions.size()/3); for(size_t i=0;i<vertsin.size();i++) { BunnyVert& vert=vertsin[i]; vert.x=shs.mesh.positions[3*i]; vert.y=shs.mesh.positions[3*i+1]; vert.z=shs.mesh.positions[3*i+2]; Eigen::Vector4f tproj=camera_matrix*Eigen::Vector4f(vert.x,vert.y,vert.z,1.0); //std::cerr << tproj.z() << ","; mm.x()=std::min(tproj.x(),mm.x());mm.y()=std::min(tproj.y(),mm.y());mm.z()=std::min(tproj.z(),mm.z()); mx.x()=std::max(tproj.x(),mx.x());mx.y()=std::max(tproj.y(),mx.y());mx.z()=std::max(tproj.z(),mx.z()); vert.nx=-shs.mesh.normals[3*i]; vert.ny=-shs.mesh.normals[3*i+1]; vert.nz=-shs.mesh.normals[3*i+2]; vert.s=shs.mesh.texcoords[2*i]; vert.t=shs.mesh.texcoords[2*i+1]; } std::vector<size_t> indices(shs.mesh.indices.cbegin(),shs.mesh.indices.cend()); uraster::draw(tp, &vertsin[0],&vertsin[0]+vertsin.size(), &indices[0],&indices[0]+indices.size(), (BunnyVertVsOut*)NULL,(BunnyVertVsOut*)NULL, std::bind(example_vertex_shader,std::placeholders::_1,camera_matrix), std::bind(example_fragment_shader,std::placeholders::_1,images[shs.mesh.material_ids[0]]) ); } std::cerr << "mn:" << mm.z() << ",\n mx: " << mx.z() << std::endl; //std::cerr << "mn':" << camera_matrix*Eigen::Vector4f(mm.x(),mm.y(),mm.z(),1.0) << ",\n mx': " << camera_matrix*Eigen::Vector4f(mx.x(),mx.y(),mx.z(),1.0) << std::endl; std::cerr << "Rendering complete. Postprocessing." << std::endl; write_framebuffer(tp,[](const BunnyPixel& bp){ return bp.diffuse_color;},"out_diffuse.png"); write_framebuffer(tp,[](const BunnyPixel& bp){ std::array<float,1> t; t[0]=bp.new_depth; return t;},"out_height.png"); write_framebuffer(tp,[](const BunnyPixel& bp){ return bp.normal;},"out_normals.png"); }
TclObject::TclObject (VARIANT *pSrc, const Type &type, Tcl_Interp *interp, int bytes) { if (V_ISARRAY(pSrc)) { SAFEARRAY *psa = V_ISBYREF(pSrc) ? *V_ARRAYREF(pSrc) : V_ARRAY(pSrc); VARTYPE elementType = V_VT(pSrc) & VT_TYPEMASK; unsigned numDimensions = SafeArrayGetDim(psa); std::vector<long> indices(numDimensions); m_pObj = convertFromSafeArray( psa, elementType, 1, &indices[0], type, interp, bytes); } else if (vtMissing == pSrc) { m_pObj = Extension::newNaObj(); } else { switch (V_VT(pSrc)) { case VT_BOOL: m_pObj = Tcl_NewBooleanObj(V_BOOL(pSrc)); break; case VT_ERROR: m_pObj = Tcl_NewLongObj(V_ERROR(pSrc)); break; case VT_I1: case VT_UI1: m_pObj = Tcl_NewLongObj(V_I1(pSrc)); break; case VT_I2: case VT_UI2: m_pObj = Tcl_NewLongObj(V_I2(pSrc)); break; case VT_I4: case VT_UI4: case VT_INT: case VT_UINT: m_pObj = Tcl_NewLongObj(V_I4(pSrc)); break; #ifdef V_I8 case VT_I8: case VT_UI8: m_pObj = Tcl_NewWideIntObj(V_I8(pSrc)); break; #endif case VT_R4: m_pObj = Tcl_NewDoubleObj(V_R4(pSrc)); break; case VT_DATE: case VT_R8: m_pObj = Tcl_NewDoubleObj(V_R8(pSrc)); break; case VT_DISPATCH: m_pObj = convertFromUnknown(V_DISPATCH(pSrc), type.iid(), interp); break; case VT_DISPATCH | VT_BYREF: m_pObj = convertFromUnknown( (V_DISPATCHREF(pSrc) != 0) ? *V_DISPATCHREF(pSrc) : 0, type.iid(), interp); break; case VT_UNKNOWN: m_pObj = convertFromUnknown(V_UNKNOWN(pSrc), type.iid(), interp); break; case VT_UNKNOWN | VT_BYREF: m_pObj = convertFromUnknown( (V_UNKNOWNREF(pSrc) != 0) ? *V_UNKNOWNREF(pSrc) : 0, type.iid(), interp); break; case VT_NULL: m_pObj = Extension::newNullObj(); break; case VT_LPSTR: m_pObj = Tcl_NewStringObj(V_I1REF(pSrc), -1); break; case VT_LPWSTR: { #if TCL_MINOR_VERSION >= 2 // Uses Unicode function introduced in Tcl 8.2. m_pObj = newUnicodeObj(V_UI2REF(pSrc), -1); #else const wchar_t *pWide = V_UI2REF(pSrc); _bstr_t str(pWide); m_pObj = Tcl_NewStringObj(str, -1); #endif } break; default: if (V_VT(pSrc) == VT_USERDEFINED && type.name() == "GUID") { Uuid uuid(*static_cast<UUID *>(V_BYREF(pSrc))); m_pObj = Tcl_NewStringObj( const_cast<char *>(uuid.toString().c_str()), -1); } else { if (V_VT(pSrc) == (VT_VARIANT | VT_BYREF)) { pSrc = V_VARIANTREF(pSrc); } _bstr_t str(pSrc); #if TCL_MINOR_VERSION >= 2 // Uses Unicode function introduced in Tcl 8.2. wchar_t *pWide = str; m_pObj = newUnicodeObj( reinterpret_cast<Tcl_UniChar *>(pWide), str.length()); #else m_pObj = Tcl_NewStringObj(str, -1); #endif } } } Tcl_IncrRefCount(m_pObj); }
void PrimitiveShapeClassifier::process(const sensor_msgs::PointCloud2::ConstPtr& ros_cloud, const sensor_msgs::PointCloud2::ConstPtr& ros_normal, const jsk_recognition_msgs::ClusterPointIndices::ConstPtr& ros_indices, const jsk_recognition_msgs::PolygonArray::ConstPtr& ros_polygons) { boost::mutex::scoped_lock lock(mutex_); if (!checkFrameId(ros_cloud, ros_normal, ros_indices, ros_polygons)) return; pcl::PointCloud<PointT>::Ptr input(new pcl::PointCloud<PointT>); pcl::fromROSMsg(*ros_cloud, *input); pcl::PointCloud<pcl::Normal>::Ptr normal(new pcl::PointCloud<pcl::Normal>); pcl::fromROSMsg(*ros_normal, *normal); pcl::ExtractIndices<PointT> ext_input; ext_input.setInputCloud(input); pcl::ExtractIndices<pcl::Normal> ext_normal; ext_normal.setInputCloud(normal); std::vector<jsk_recognition_utils::Polygon::Ptr> polygons = jsk_recognition_utils::Polygon::fromROSMsg(*ros_polygons); jsk_recognition_msgs::ClassificationResult result; result.header = ros_cloud->header; result.classifier = "primitive_shape_classifier"; result.target_names.push_back("box"); result.target_names.push_back("circle"); result.target_names.push_back("other"); pcl::PointCloud<PointT>::Ptr projected_cloud(new pcl::PointCloud<PointT>); std::vector<pcl::PointIndices::Ptr> boundary_indices; NODELET_DEBUG_STREAM("Cluster num: " << ros_indices->cluster_indices.size()); for (size_t i = 0; i < ros_indices->cluster_indices.size(); ++i) { pcl::PointIndices::Ptr indices(new pcl::PointIndices); indices->indices = ros_indices->cluster_indices[i].indices; NODELET_DEBUG_STREAM("Estimating cluster #" << i << " (" << indices->indices.size() << " points)"); pcl::PointCloud<PointT>::Ptr cluster_cloud(new pcl::PointCloud<PointT>); ext_input.setIndices(indices); ext_input.filter(*cluster_cloud); pcl::PointCloud<pcl::Normal>::Ptr cluster_normal(new pcl::PointCloud<pcl::Normal>); ext_normal.setIndices(indices); ext_normal.filter(*cluster_normal); pcl::ModelCoefficients::Ptr support_plane(new pcl::ModelCoefficients); if (!getSupportPlane(cluster_cloud, polygons, support_plane)) { NODELET_ERROR_STREAM("cloud " << i << " has no support plane. skipped"); continue; } pcl::PointIndices::Ptr b(new pcl::PointIndices); pcl::PointCloud<PointT>::Ptr pc(new pcl::PointCloud<PointT>); float circle_likelihood, box_likelihood; estimate(cluster_cloud, cluster_normal, support_plane, b, pc, circle_likelihood, box_likelihood); boundary_indices.push_back(std::move(b)); *projected_cloud += *pc; if (circle_likelihood > circle_threshold_) { // circle result.labels.push_back(1); result.label_names.push_back("circle"); result.label_proba.push_back(circle_likelihood); } else if (box_likelihood > box_threshold_) { // box result.labels.push_back(0); result.label_names.push_back("box"); result.label_proba.push_back(box_likelihood); } else { // other result.labels.push_back(3); result.label_names.push_back("other"); result.label_proba.push_back(0.0); } } // publish results sensor_msgs::PointCloud2 ros_projected_cloud; pcl::toROSMsg(*projected_cloud, ros_projected_cloud); ros_projected_cloud.header = ros_cloud->header; pub_projected_cloud_.publish(ros_projected_cloud); jsk_recognition_msgs::ClusterPointIndices ros_boundary_indices; ros_boundary_indices.header = ros_cloud->header; for (size_t i = 0; i < boundary_indices.size(); ++i) { pcl_msgs::PointIndices ri; pcl_conversions::moveFromPCL(*boundary_indices[i], ri); ros_boundary_indices.cluster_indices.push_back(ri); } pub_boundary_indices_.publish(ros_boundary_indices); pub_class_.publish(result); }
unsigned * APolygonalMesh::polygonIndices(unsigned idx) const { return &indices()[faceDrifts()[idx]]; }
// [fromDate,]間に購入して保持している人の購入価格予想 // サンプリングされたプレーヤー達の購入価格の配列を返す {3500,2800,2000,..} vector<double> estimatedPurchasedPrices(vector<Record>& records, int fromDate, int toDay) { int startIdx = indexOfDate(records, fromDate-1) + 1; int toIdx = indexOfDate(records, toDay) + 1; long long maxVolume = 0; for (int i = startIdx; i < records.size(); i++) { if (records[i].volume > maxVolume) maxVolume = records[i].volume; } double x = 1.0; vector<double> S; const int maxSize = 100000; // 保有:S人、ある日の購入者N人 // 1. p∈Sが売却 => 買値v(p)を変更 // 2. 以前からの保有者から購入 S += {v} // S >= N => Sですべて賄う。NをどうS上にマップするかという問題になる。 // 購入価格が安い順にN人が売却と考えてしまおう。 // S < N => N - S人が新規と考えてしまう for (int i = startIdx; i < toIdx; i++) { long long N = (long long)(records[i].volume * x); cerr << records[i].date << ":" << records[i].end << " => " << N << "(" << x << ")" << endl; // 新規が多すぎるのでSをまず減らす if (N > maxSize) { cull(S, 1.0 * maxSize / N); x *= 1.0 * maxSize / N; N = maxSize; } // vector<double> f; Price lo = records[i].low; Price hi = records[i].high; for (int i = 0; i < (int)N; i++) { // [lo, hi]に均等に分布と仮定 if (N == 1) f.push_back((lo + hi) / 2); else f.push_back(lo + (hi - lo) * i / (N - 1)); } int l = min((int)S.size(), (int)N); /* 基本下からだけどある程度ランダム性を入れる */ vector<int> indices(S.size()); for (int i = 0; i < S.size(); i++) indices[i] = i; // 例えば0.1だと下からピックアップしたうち1割をランダムにする const double random_factor = 0.6; for (int i = 0; i < l; i++) if ((rand() & 65535) / 65535.0 < random_factor) { int j = rand() % S.size(); swap(indices[i], indices[j]); } for (int i = 0; i < l; i++) { S[indices[i]] = f[i]; } for (int i = l; i < N; i++) { S.push_back(f[i]); } sort(S.begin(), S.end()); } return S; }
void Game::CreateSphere(int latLines, int longLines) { NumSphereVertices = ((latLines - 2) * longLines) + 2; NumSphereFaces = ((latLines - 3)*(longLines)* 2) + (longLines * 2); float sphereYaw = 0.0f; float spherePitch = 0.0f; std::vector<Vertex> vertices(NumSphereVertices); XMVECTOR currVertPos = XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f); vertices[0].pos.x = 0.0f; vertices[0].pos.y = 0.0f; vertices[0].pos.z = 1.0f; for (DWORD i = 0; i < latLines - 2; ++i) { spherePitch = (i + 1) * (3.14 / (latLines - 1)); skyRotationX = XMMatrixRotationX(spherePitch); for (DWORD j = 0; j < longLines; ++j) { sphereYaw = j * (6.28 / (longLines)); skyRotationY = XMMatrixRotationZ(sphereYaw); currVertPos = XMVector3TransformNormal(XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f), (skyRotationX * skyRotationY)); currVertPos = XMVector3Normalize(currVertPos); vertices[i*longLines + j + 1].pos.x = XMVectorGetX(currVertPos); vertices[i*longLines + j + 1].pos.y = XMVectorGetY(currVertPos); vertices[i*longLines + j + 1].pos.z = XMVectorGetZ(currVertPos); } } vertices[NumSphereVertices - 1].pos.x = 0.0f; vertices[NumSphereVertices - 1].pos.y = 0.0f; vertices[NumSphereVertices - 1].pos.z = -1.0f; D3D11_BUFFER_DESC vertexBufferDesc; ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc)); vertexBufferDesc.Usage = D3D11_USAGE_DEFAULT; vertexBufferDesc.ByteWidth = sizeof(Vertex)* NumSphereVertices; vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER; vertexBufferDesc.CPUAccessFlags = 0; vertexBufferDesc.MiscFlags = 0; D3D11_SUBRESOURCE_DATA vertexBufferData; ZeroMemory(&vertexBufferData, sizeof(vertexBufferData)); vertexBufferData.pSysMem = &vertices[0]; hr = device->CreateBuffer(&vertexBufferDesc, &vertexBufferData, &sphereVertBuffer); std::vector<DWORD> indices(NumSphereFaces * 3); int k = 0; for (DWORD l = 0; l < longLines - 1; ++l) { indices[k] = 0; indices[k + 1] = l + 1; indices[k + 2] = l + 2; k += 3; } indices[k] = 0; indices[k + 1] = longLines; indices[k + 2] = 1; k += 3; for (DWORD i = 0; i < latLines - 3; ++i) { for (DWORD j = 0; j < longLines - 1; ++j) { indices[k] = i*longLines + j + 1; indices[k + 1] = i*longLines + j + 2; indices[k + 2] = (i + 1)*longLines + j + 1; indices[k + 3] = (i + 1)*longLines + j + 1; indices[k + 4] = i*longLines + j + 2; indices[k + 5] = (i + 1)*longLines + j + 2; k += 6; // next quad } indices[k] = (i*longLines) + longLines; indices[k + 1] = (i*longLines) + 1; indices[k + 2] = ((i + 1)*longLines) + longLines; indices[k + 3] = ((i + 1)*longLines) + longLines; indices[k + 4] = (i*longLines) + 1; indices[k + 5] = ((i + 1)*longLines) + 1; k += 6; } for (DWORD l = 0; l < longLines - 1; ++l) { indices[k] = NumSphereVertices - 1; indices[k + 1] = (NumSphereVertices - 1) - (l + 1); indices[k + 2] = (NumSphereVertices - 1) - (l + 2); k += 3; } indices[k] = NumSphereVertices - 1; indices[k + 1] = (NumSphereVertices - 1) - longLines; indices[k + 2] = NumSphereVertices - 2; D3D11_BUFFER_DESC indexBufferDesc; ZeroMemory(&indexBufferDesc, sizeof(indexBufferDesc)); indexBufferDesc.Usage = D3D11_USAGE_DEFAULT; indexBufferDesc.ByteWidth = sizeof(DWORD)* NumSphereFaces * 3; indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER; indexBufferDesc.CPUAccessFlags = 0; indexBufferDesc.MiscFlags = 0; D3D11_SUBRESOURCE_DATA iinitData; iinitData.pSysMem = &indices[0]; device->CreateBuffer(&indexBufferDesc, &iinitData, &sphereIndexBuffer); }
int LinePartitioner<GraphType,Scalar>::Compute_Blocks_AutoLine(Teuchos::ArrayView<local_ordinal_type> blockIndices) const { typedef local_ordinal_type LO; const LO invalid = Teuchos::OrdinalTraits<LO>::invalid(); const Scalar zero = Teuchos::ScalarTraits<Scalar>::zero(); const MT mzero = Teuchos::ScalarTraits<MT>::zero(); Teuchos::ArrayRCP<const Scalar> xvalsRCP, yvalsRCP, zvalsRCP; Teuchos::ArrayView<const Scalar> xvals, yvals, zvals; xvalsRCP = coord_->getData(0); xvals = xvalsRCP(); if(coord_->getNumVectors() > 1) { yvalsRCP = coord_->getData(1); yvals = yvalsRCP(); } if(coord_->getNumVectors() > 2) { zvalsRCP = coord_->getData(2); zvals = zvalsRCP(); } MT tol = threshold_; size_t N = this->Graph_->getNodeNumRows(); size_t allocated_space = this->Graph_->getNodeMaxNumRowEntries(); Teuchos::Array<LO> cols(allocated_space); Teuchos::Array<LO> indices(allocated_space); Teuchos::Array<MT> dist(allocated_space); Teuchos::Array<LO> itemp(2*allocated_space); Teuchos::Array<MT> dtemp(allocated_space); LO num_lines = 0; for(LO i=0; i<(LO)N; i+=NumEqns_) { size_t nz=0; // Short circuit if I've already been blocked if(blockIndices[i] != invalid) continue; // Get neighbors and sort by distance this->Graph_->getLocalRowCopy(i,cols(),nz); Scalar x0 = (!xvals.is_null()) ? xvals[i/NumEqns_] : zero; Scalar y0 = (!yvals.is_null()) ? yvals[i/NumEqns_] : zero; Scalar z0 = (!zvals.is_null()) ? zvals[i/NumEqns_] : zero; LO neighbor_len=0; for(size_t j=0; j<nz; j+=NumEqns_) { MT mydist = mzero; LO nn = cols[j] / NumEqns_; if(cols[j] >=(LO)N) continue; // Check for off-proc entries if(!xvals.is_null()) mydist += square<Scalar>(x0 - xvals[nn]); if(!yvals.is_null()) mydist += square<Scalar>(y0 - yvals[nn]); if(!zvals.is_null()) mydist += square<Scalar>(z0 - zvals[nn]); dist[neighbor_len] = Teuchos::ScalarTraits<MT>::squareroot(mydist); indices[neighbor_len]=cols[j]; neighbor_len++; } Teuchos::ArrayView<MT> dist_view = dist(0,neighbor_len); Tpetra::sort2(dist_view.begin(),dist_view.end(),indices.begin()); // Number myself for(LO k=0; k<NumEqns_; k++) blockIndices[i + k] = num_lines; // Fire off a neighbor line search (nearest neighbor) if(neighbor_len > 2 && dist[1]/dist[neighbor_len-1] < tol) { local_automatic_line_search(NumEqns_,blockIndices,i,indices[1],num_lines,tol,itemp,dtemp); } // Fire off a neighbor line search (second nearest neighbor) if(neighbor_len > 3 && dist[2]/dist[neighbor_len-1] < tol) { local_automatic_line_search(NumEqns_,blockIndices,i,indices[2],num_lines,tol,itemp,dtemp); } num_lines++; } return num_lines; }
void KENLM<Model>::EvaluateWhenApplied(const ManagerBase &mgr, const Hypothesis &hypo, const FFState &prevState, Scores &scores, FFState &state) const { KenLMState &stateCast = static_cast<KenLMState&>(state); const System &system = mgr.system; const lm::ngram::State &in_state = static_cast<const KenLMState&>(prevState).state; if (!hypo.GetTargetPhrase().GetSize()) { stateCast.state = in_state; return; } const std::size_t begin = hypo.GetCurrTargetWordsRange().GetStartPos(); //[begin, end) in STL-like fashion. const std::size_t end = hypo.GetCurrTargetWordsRange().GetEndPos() + 1; const std::size_t adjust_end = std::min(end, begin + m_ngram->Order() - 1); std::size_t position = begin; typename Model::State aux_state; typename Model::State *state0 = &stateCast.state, *state1 = &aux_state; float score = m_ngram->Score(in_state, TranslateID(hypo.GetWord(position)), *state0); ++position; for (; position < adjust_end; ++position) { score += m_ngram->Score(*state0, TranslateID(hypo.GetWord(position)), *state1); std::swap(state0, state1); } if (hypo.GetBitmap().IsComplete()) { // Score end of sentence. std::vector<lm::WordIndex> indices(m_ngram->Order() - 1); const lm::WordIndex *last = LastIDs(hypo, &indices.front()); score += m_ngram->FullScoreForgotState(&indices.front(), last, m_ngram->GetVocabulary().EndSentence(), stateCast.state).prob; } else if (adjust_end < end) { // Get state after adding a long phrase. std::vector<lm::WordIndex> indices(m_ngram->Order() - 1); const lm::WordIndex *last = LastIDs(hypo, &indices.front()); m_ngram->GetState(&indices.front(), last, stateCast.state); } else if (state0 != &stateCast.state) { // Short enough phrase that we can just reuse the state. stateCast.state = *state0; } score = TransformLMScore(score); bool OOVFeatureEnabled = false; if (OOVFeatureEnabled) { std::vector<float> scoresVec(2); scoresVec[0] = score; scoresVec[1] = 0.0; scores.PlusEquals(system, *this, scoresVec); } else { scores.PlusEquals(system, *this, score); } }
int main(int argc, char * argv[]) { struct arg_file * train_file = arg_file1("f", "features", "<filename>", "Training dataset"); struct arg_file * index_file = arg_file0("x", "index", "<filename>", "Training dataset index"); struct arg_file * output_index = arg_file0(NULL, "output-index", "<filename>", "Save used index to file"); struct arg_file * input = arg_file0("i", "input", "<filename>", "Input dataset in libsvm format"); struct arg_file * output = arg_file0("o", "output", "<filename>", ""); struct arg_lit * hist = arg_lit0(NULL, "hist", ""); struct arg_lit * help = arg_lit0("h", "help", "Print this help and exit"); struct arg_int * verbosity = arg_int0 ("v", "verbosity", "{0..4}", "Log verbosity" "\nIndex parameters :"); struct arg_int * distance = arg_int0("d", "distance", "{1..9}", "Distance metric" "\n\t1=L2 (default), 2=L1, 3=MINKOWSKI,\n\t4=MAX, 5=HIST_INTERSECT, 6=HELLLINGER," "\n\t7=CS, 8=KULLBACK_LEIBLER, 9=HAMMING"); struct arg_int * index_type = arg_int0("t", "index-type", "{0..5}", "Constructed index type" "\n t=0 - linear brute force search" "\n t=1 - kd-tree :"); struct arg_int * kd_tree_count = arg_int0(NULL, "kd-tree-count", "{1..16+}", "Number of parallel trees (default 4)" "\n t=2 - k-means :"); struct arg_int * km_branching = arg_int0(NULL, "km-branching", "n", "Branching factor (default 32)"); struct arg_int * km_iterations = arg_int0(NULL, "km-iterations", "n", "Maximum iterations (default 11)"); struct arg_int * km_centers = arg_int0(NULL, "km-centers", "{0..2}", "Initial cluster centers" "\n\t0=CENTERS_RANDOM (default)\n\t1=CENTERS_GONZALES\n\t2=CENTERS_KMEANSPP"); struct arg_dbl * km_index = arg_dbl0(NULL, "km-index", "", "Cluster boundary index (default 0.2)" "\n t=3 (default) - kd-tree + k-means" "\n t=4 - LSH :"); struct arg_int * lsh_table_count = arg_int0(NULL, "lsh-table-count", "{0..}", "Number of hash tables"); struct arg_int * lsh_key_size = arg_int0(NULL, "lsh-key-size", "{0..}", "Hash key bits"); struct arg_int * lsh_probe_level = arg_int0(NULL, "lsh-probe-level", "{0..}", "Bit shift for neighboring bucket check" "\n t=5 - automatically tuned index :"); struct arg_dbl * auto_precision = arg_dbl0(NULL, "auto-precision", "[0,1]", "Expected percentage of exact hits"); struct arg_dbl * auto_build_weight = arg_dbl0(NULL, "auto-build-weight", "", ""); struct arg_dbl * auto_memory_weight = arg_dbl0(NULL, "auto-memory-weight", "", ""); struct arg_dbl * auto_sample_fraction = arg_dbl0(NULL, "auto-sample-fraction", "[0,1]", "" "\nSearch parameters :"); struct arg_int * neighbors = arg_int0("n", "neighbors", "n", "Neighbor count (default 1)"); struct arg_dbl * radius = arg_dbl0("r", "radius", "r", "Search radius, requests radius search"); struct arg_int * checks = arg_int0("c", "checks", "...", "Search checks (default 32)"); struct arg_end * end = arg_end(20); void * argtable[] = { train_file, index_file, output_index, input, output, hist, help, verbosity, distance, index_type, kd_tree_count, km_branching, km_iterations, km_centers, km_index, lsh_table_count, lsh_key_size, lsh_probe_level, auto_precision, auto_build_weight, auto_memory_weight, auto_sample_fraction, neighbors, radius, checks, end }; if(arg_nullcheck(argtable) != 0) { fprintf(stderr, "%s: insufficient memory\n", argv[0]); return EXIT_FAILURE; } // -- Defaults -- verbosity->ival[0] = 0; distance->ival[0] = 1;//L2 index_type->ival[0] = 3;//kd + km // kd-tree kd_tree_count->ival[0] = 4; // k-means km_branching ->ival[0] = 32; km_iterations->ival[0] = 11; km_centers ->ival[0] = 0;//CENTERS_RANDOM km_index ->dval[0] = 0.2; // auto auto_precision ->dval[0] = 0.9; auto_build_weight ->dval[0] = 0.01; auto_memory_weight ->dval[0] = 0; auto_sample_fraction->dval[0] = 0.1; // search neighbors->ival[0] = 1; checks->ival[0] = 32; // -- Parse -- int arg_errors = arg_parse(argc, argv, argtable); if(help->count > 0) { printf("Usage: %s", argv[0]); arg_print_syntax(stdout, argtable, "\n"); arg_print_glossary(stdout, argtable," %-25s %s\n"); return EXIT_SUCCESS; } if(arg_errors > 0) { arg_print_errors(stderr, end, argv[0]); fprintf(stderr, "Try '%s --help' for more information.\n", argv[0]); return EXIT_FAILURE; } cvflann::log_verbosity(verbosity->ival[0]); std::cout << "Loading features '" << train_file->filename[0] << "' ..." << std::flush; auto train = load(train_file->filename[0]); cv::Mat_<float> mat = cv::Mat_<float>::zeros(train.data.size(), train.dim); boost::dynamic_bitset<> train_class_set; std::vector<size_t> train_class_hist; for(size_t i = 0; i < train.data.size(); ++i) { size_t const c = train.data[i].first; if(c >= train_class_set.size()) { train_class_set.resize(c+1); train_class_hist.resize(c+1, 0); } train_class_set.set(c); train_class_hist[c] += 1; for(auto p : train.data[i].second) mat(i, p.first-1) = p.second; } std::cout << " OK\n" "\tdata : " << train.data.size() << 'x' << train.dim << ", " << train_class_set.count() << " classes\n"; if(hist->count > 0) { std::cout << "\thistogram :\n"; for(size_t i = 0; i < train_class_set.size(); ++i) std::cout << '\t' << i << " : " << train_class_hist[i] << " (" << (100.*train_class_hist[i]/train.data.size()) << "%)\n"; } // -- Index -- cv::flann::Index index; if(index_file->count > 0) { std::cout << "Loading index '" << index_file->filename[0] << "' ..." << std::flush; if(!index.load(mat, index_file->filename[0])) { fprintf(stderr, "Can't load index '%s'.\n", index_file->filename[0]); return EXIT_SUCCESS; } } else { std::cout << "Building index ..." << std::flush; // Parameters std::unique_ptr<cv::flann::IndexParams> params; switch(index_type->ival[0]) { case 0 : // linear brute force search params = std::make_unique<cv::flann::LinearIndexParams>(); break; case 1 : // k-d tree params = std::make_unique<cv::flann::KDTreeIndexParams>( kd_tree_count->ival[0]); break; case 2 : // k-means params = std::make_unique<cv::flann::KMeansIndexParams>( km_branching ->ival[0], km_iterations->ival[0], static_cast<cvflann::flann_centers_init_t>(km_centers->ival[0]), km_index->dval[0]); break; case 3 : // k-d tree + k-means params = std::make_unique<cv::flann::CompositeIndexParams>( kd_tree_count->ival[0], km_branching ->ival[0], km_iterations->ival[0], static_cast<cvflann::flann_centers_init_t>(km_centers->ival[0]), km_index->dval[0]); break; case 4 : // lsh if((lsh_table_count->count == 0) || (lsh_key_size == 0) || (lsh_probe_level == 0)) { std::cerr << "For t=4, lsh-table-count, lsh-key-size and lsh-probe-level must be set.\n"; return EXIT_FAILURE; } params = std::make_unique<cv::flann::LshIndexParams>( lsh_table_count->ival[0], lsh_key_size ->ival[0], lsh_probe_level->ival[0]); break; case 5 : // autotuned index params = std::make_unique<cv::flann::AutotunedIndexParams>( auto_precision ->dval[0], auto_build_weight ->dval[0], auto_memory_weight ->dval[0], auto_sample_fraction->dval[0]); break; default : std::cerr << "Unknown index type " << index_type->ival[0] << std::endl; return EXIT_FAILURE; } index.build(mat, *params, static_cast<cvflann::flann_distance_t>(distance->ival[0])); } std::cout << " OK\n"; if(output_index->count > 0) { std::cout << "Saving index '" << output_index->filename[0] << "' ..." << std::flush; index.save(output_index->filename[0]); std::cout << " OK\n"; } // -- Query -- if(input->count > 0) { std::cout << "Loading query '" << input->filename[0] << "' ..." << std::flush; auto test = load(input->filename[0]); boost::dynamic_bitset<> test_class_set(train_class_set.size()); std::vector<size_t> test_class_hist(train_class_set.size()); for(size_t i = 0; i < test.data.size(); ++i) { size_t const c = test.data[i].first; if(c >= test_class_set.size()) { test_class_set.resize(c+1); test_class_hist.resize(c+1, 0); } test_class_set.set(c); test_class_hist[c] += 1; } std::cout << " OK\n" "\tdata : " << test.data.size() << 'x' << test.dim << ", " << test_class_set.count() << " classes\n"; if(!test_class_set.is_subset_of(train_class_set)) std::cout << "\t!!! " << (test_class_set-train_class_set).count() << " test classes not in training data\n"; //std::cout << "\thistogram :\n"; //for(size_t i = 0; i < test_class_set.size(); ++i) // std::cout << '\t' << i << " : " << test_class_hist[i] << " (" << (100.*test_class_hist[i]/test.data.size()) << "%)\n"; std::cout << "Searching ..." << std::flush; cv::flann::SearchParams const params{checks->ival[0]}; std::ofstream file; if(output->count > 0) { file.open(output->filename[0]); if(!file) { fprintf(stderr, "Can't open output file '%s'\n", output->filename[0]); return EXIT_FAILURE; } } auto const n = neighbors->ival[0]; std::vector<size_t> match_counts(n, 0); std::vector<size_t> cumulative_match_counts(n, 0); std::vector<size_t> match_hist(n+1, 0); namespace acc = boost::accumulators; std::vector<acc::accumulator_set<double, acc::stats< acc::tag::mean, acc::tag::min, acc::tag::max>>> class_matches(test_class_set.size()); for(size_t i = 0; i < test.data.size(); ++i) { std::vector<float> query(test.dim, 0); std::vector<int > indices(n); std::vector<float> dists(n); for(auto p : test.data[i].second) query[p.first-1] = p.second; if(radius->count > 0) index.radiusSearch(query, indices, dists, radius->dval[0], n, params); else index.knnSearch(query, indices, dists, n, params); unsigned matching_neighbors = 0; bool found = false; file << test.data[i].first; for(int j = 0; j < n; ++j) { file << ' ' << indices[j] << ':' << train.data[indices[j]].first; bool ok = abs(test.data[i].first - train.data[indices[j]].first) < 0.1; if(ok) { ++matching_neighbors; ++match_counts[j]; } found = found || ok; if(found) ++cumulative_match_counts[j]; } file << ' ' << matching_neighbors << std::endl; ++match_hist[matching_neighbors]; class_matches[test.data[i].first](matching_neighbors); } std::cout << " OK\n"; auto const count = test.data.size(); for(int i = 0; i < n; ++i) { std::cout << i << " : " << match_counts[i] << ", total " << cumulative_match_counts[i] << " (" << (100.*match_counts[i]/count) << "%, " << (100.*cumulative_match_counts[i]/count) << "%)\n"; } for(int i = 0; i < (n+1); ++i) { std::cout << i << " : " << match_hist[i] << " (" << (100.*match_hist[i]/count) << "%)\n"; } std::cout << "Class matches :\n"; for(size_t i = 0; i < class_matches.size(); ++i) { auto const cnt = acc::count(class_matches[i]); if(cnt > 0) { auto const mean = acc::mean(class_matches[i]); std::cout << i << " : " << cnt << " (" << (100.*cnt/count) << "%) - " << mean << " (" << (100.*mean/n) << "%) in [" << acc::min(class_matches[i]) << ',' << acc::max(class_matches[i]) << "]\n"; } } } return EXIT_SUCCESS; }
bool Model::parseMeshesOld(bgfx::VertexDecl global_vertex_decl, FS::IFile& file, FileVersion version, u32 global_flags) { int object_count = 0; file.read(&object_count, sizeof(object_count)); if (object_count <= 0) return false; m_meshes.reserve(object_count); char model_dir[MAX_PATH_LENGTH]; PathUtils::getDir(model_dir, MAX_PATH_LENGTH, getPath().c_str()); struct Offsets { i32 attribute_array_offset; i32 attribute_array_size; i32 indices_offset; i32 mesh_tri_count; }; Array<Offsets> mesh_offsets(m_allocator); for (int i = 0; i < object_count; ++i) { i32 str_size; file.read(&str_size, sizeof(str_size)); char material_name[MAX_PATH_LENGTH]; file.read(material_name, str_size); if (str_size >= MAX_PATH_LENGTH) return false; material_name[str_size] = 0; char material_path[MAX_PATH_LENGTH]; copyString(material_path, model_dir); catString(material_path, material_name); catString(material_path, ".mat"); auto* material_manager = m_resource_manager.getOwner().get(Material::TYPE); Material* material = static_cast<Material*>(material_manager->load(Path(material_path))); Offsets& offsets = mesh_offsets.emplace(); file.read(&offsets.attribute_array_offset, sizeof(offsets.attribute_array_offset)); file.read(&offsets.attribute_array_size, sizeof(offsets.attribute_array_size)); file.read(&offsets.indices_offset, sizeof(offsets.indices_offset)); file.read(&offsets.mesh_tri_count, sizeof(offsets.mesh_tri_count)); file.read(&str_size, sizeof(str_size)); if (str_size >= MAX_PATH_LENGTH) { material_manager->unload(*material); return false; } char mesh_name[MAX_PATH_LENGTH]; mesh_name[str_size] = 0; file.read(mesh_name, str_size); bgfx::VertexDecl vertex_decl = global_vertex_decl; if (version <= FileVersion::SINGLE_VERTEX_DECL) { parseVertexDecl(file, &vertex_decl); if (i != 0 && global_vertex_decl.m_hash != vertex_decl.m_hash) { g_log_error.log("Renderer") << "Model " << getPath().c_str() << " contains meshes with different vertex declarations."; } if(i == 0) global_vertex_decl = vertex_decl; } m_meshes.emplace(material, vertex_decl, mesh_name, m_allocator); addDependency(*material); } i32 indices_count = 0; file.read(&indices_count, sizeof(indices_count)); if (indices_count <= 0) return false; u32 INDICES_16BIT_FLAG = 1; int index_size = global_flags & INDICES_16BIT_FLAG ? 2 : 4; Array<u8> indices(m_allocator); indices.resize(indices_count * index_size); file.read(&indices[0], indices.size()); i32 vertices_size = 0; file.read(&vertices_size, sizeof(vertices_size)); if (vertices_size <= 0) return false; Array<u8> vertices(m_allocator); vertices.resize(vertices_size); file.read(&vertices[0], vertices.size()); int vertex_count = 0; for (const Offsets& offsets : mesh_offsets) { vertex_count += offsets.attribute_array_size / global_vertex_decl.getStride(); } if (version > FileVersion::BOUNDING_SHAPES_PRECOMPUTED) { file.read(&m_bounding_radius, sizeof(m_bounding_radius)); file.read(&m_aabb, sizeof(m_aabb)); } float bounding_radius_squared = 0; Vec3 min_vertex(0, 0, 0); Vec3 max_vertex(0, 0, 0); int vertex_size = global_vertex_decl.getStride(); int position_attribute_offset = global_vertex_decl.getOffset(bgfx::Attrib::Position); int uv_attribute_offset = global_vertex_decl.getOffset(bgfx::Attrib::TexCoord0); int weights_attribute_offset = global_vertex_decl.getOffset(bgfx::Attrib::Weight); int bone_indices_attribute_offset = global_vertex_decl.getOffset(bgfx::Attrib::Indices); bool keep_skin = global_vertex_decl.has(bgfx::Attrib::Weight) && global_vertex_decl.has(bgfx::Attrib::Indices); for (int i = 0; i < m_meshes.size(); ++i) { Offsets& offsets = mesh_offsets[i]; Mesh& mesh = m_meshes[i]; mesh.indices_count = offsets.mesh_tri_count * 3; mesh.indices.resize(mesh.indices_count * index_size); copyMemory(&mesh.indices[0], &indices[offsets.indices_offset * index_size], mesh.indices_count * index_size); int mesh_vertex_count = offsets.attribute_array_size / global_vertex_decl.getStride(); int mesh_attributes_array_offset = offsets.attribute_array_offset; mesh.vertices.resize(mesh_vertex_count); mesh.uvs.resize(mesh_vertex_count); if (keep_skin) mesh.skin.resize(mesh_vertex_count); for (int j = 0; j < mesh_vertex_count; ++j) { int offset = mesh_attributes_array_offset + j * vertex_size; if (keep_skin) { mesh.skin[j].weights = *(const Vec4*)&vertices[offset + weights_attribute_offset]; copyMemory(mesh.skin[j].indices, &vertices[offset + bone_indices_attribute_offset], sizeof(mesh.skin[j].indices)); } mesh.vertices[j] = *(const Vec3*)&vertices[offset + position_attribute_offset]; mesh.uvs[j] = *(const Vec2*)&vertices[offset + uv_attribute_offset]; float sq_len = mesh.vertices[j].squaredLength(); bounding_radius_squared = Math::maximum(bounding_radius_squared, sq_len > 0 ? sq_len : 0); min_vertex.x = Math::minimum(min_vertex.x, mesh.vertices[j].x); min_vertex.y = Math::minimum(min_vertex.y, mesh.vertices[j].y); min_vertex.z = Math::minimum(min_vertex.z, mesh.vertices[j].z); max_vertex.x = Math::maximum(max_vertex.x, mesh.vertices[j].x); max_vertex.y = Math::maximum(max_vertex.y, mesh.vertices[j].y); max_vertex.z = Math::maximum(max_vertex.z, mesh.vertices[j].z); } } if (version <= FileVersion::BOUNDING_SHAPES_PRECOMPUTED) { m_bounding_radius = sqrt(bounding_radius_squared); m_aabb = AABB(min_vertex, max_vertex); } for (int i = 0; i < m_meshes.size(); ++i) { Mesh& mesh = m_meshes[i]; Offsets offsets = mesh_offsets[i]; ASSERT(!bgfx::isValid(mesh.index_buffer_handle)); if (global_flags & INDICES_16BIT_FLAG) { mesh.flags.set(Mesh::Flags::INDICES_16_BIT); } int indices_size = index_size * mesh.indices_count; const bgfx::Memory* mem = bgfx::copy(&indices[offsets.indices_offset * index_size], indices_size); mesh.index_buffer_handle = bgfx::createIndexBuffer(mem, index_size == 4 ? BGFX_BUFFER_INDEX32 : 0); if (!bgfx::isValid(mesh.index_buffer_handle)) return false; ASSERT(!bgfx::isValid(mesh.vertex_buffer_handle)); const bgfx::Memory* vertices_mem = bgfx::copy(&vertices[offsets.attribute_array_offset], offsets.attribute_array_size); mesh.vertex_buffer_handle = bgfx::createVertexBuffer(vertices_mem, mesh.vertex_decl); if (!bgfx::isValid(mesh.vertex_buffer_handle)) return false; } return true; }
bool CameraRenderable::buildFrustumMesh() { bool result = false; if(m_FrustumMesh) { delete m_FrustumMesh; m_FrustumMesh = nullptr; } Camera* parent = dynamic_cast<Camera*>(m_Parent); if(parent) { Graphics::VertexBuffer* vb = OcularGraphics->createVertexBuffer(); Graphics::IndexBuffer* ib = OcularGraphics->createIndexBuffer(); //------------------------------------------------------------ // Get the corners that comprise the frustum Math::Frustum frustum = parent->getFrustum(); frustum.rebuild(); auto nearCorners = frustum.getNearClipCorners(); auto farCorners = frustum.getFarClipCorners(); //------------------------------------------------------------ // Build the vertex buffer std::vector<Graphics::Vertex> vertices(8); vertices[0].position = nearCorners[0]; // Near bottom left vertices[1].position = nearCorners[1]; // Near bottom right vertices[2].position = nearCorners[2]; // Near top right vertices[3].position = nearCorners[3]; // Near top left vertices[4].position = farCorners[0]; // Far bottom left vertices[5].position = farCorners[1]; // Far bottom right vertices[6].position = farCorners[2]; // Far top right vertices[7].position = farCorners[3]; // Far top left vb->addVertices(vertices); if(!vb->build()) { result = false; } //-------------------------------------------------------- // Build the index buffer std::vector<uint32_t> indices(24); indices[0] = 0; indices[1] = 1; // Line from near bottom left to near bottom right indices[2] = 1; indices[3] = 2; // Line from near bottom right to near top right indices[4] = 2; indices[5] = 3; // Line from near top right to near top left indices[6] = 3; indices[7] = 0; // Line from near top left to near bottom left indices[8] = 4; indices[9] = 5; // Line from far bottom left to far bottom right indices[10] = 5; indices[11] = 6; // Line from far bottom right to far top right indices[12] = 6; indices[13] = 7; // Line from far top right to far top left indices[14] = 7; indices[15] = 4; // Line from far top left to far bottom left indices[16] = 0; indices[17] = 4; // Line from near bottom left to far bottom left indices[18] = 1; indices[19] = 5; // Line from near bottom right to far bottom right indices[20] = 2; indices[21] = 6; // Line from near top right to far top right indices[22] = 3; indices[23] = 7; // Line from near top left to far top left ib->addIndices(indices); if(!ib->build()) { result = false; } //-------------------------------------------------------- // Build the mesh m_FrustumMesh = new Graphics::Mesh(); m_FrustumMesh->addSubMesh(); m_FrustumMesh->setVertexBuffer(vb); m_FrustumMesh->setIndexBuffer(ib); } return result; }