/** * @brief GetPermutationMatrices * @details * See PermTestingShared.h for information */ arma::cube TwoSampleGetPermutationMatrices(int nPermutations, int N, int nGroup1) { arma::arma_rng::set_seed_random(); // set the seed to a random value arma::cube permutationMatrices(nPermutations, N, 2, arma::fill::zeros); arma::mat permutationMatrix1(nPermutations, N, arma::fill::zeros); arma::mat permutationMatrix2 = arma::ones(nPermutations, N); arma::mat indexList; indexList = arma::linspace<arma::mat>(0, N-1, N); for(int i = 0;i < nPermutations;i++) { indexList = arma::shuffle(indexList); for(int j = 0;j < nGroup1;j++) { permutationMatrix1(i,indexList(j)) = 1; } } permutationMatrix2 = permutationMatrix2 - permutationMatrix1; permutationMatrices.slice(0) = permutationMatrix1; permutationMatrices.slice(1) = permutationMatrix2; return permutationMatrices; }
GraphType* FinleyDomain::createTrilinosGraph(bool reducedOrder) const { index_t myNumTargets; index_t numTargets; const index_t* target; const_TrilinosMap_ptr rowMap; const_TrilinosMap_ptr colMap; if (reducedOrder) { myNumTargets = m_nodes->getNumReducedDegreesOfFreedom(); numTargets = m_nodes->getNumReducedDegreesOfFreedomTargets(); target = m_nodes->borrowTargetReducedDegreesOfFreedom(); rowMap = m_nodes->trilinosReducedRowMap; colMap = m_nodes->trilinosReducedColMap; } else { myNumTargets = m_nodes->getNumDegreesOfFreedom(); numTargets = m_nodes->getNumDegreesOfFreedomTargets(); target = m_nodes->borrowTargetDegreesOfFreedom(); rowMap = m_nodes->trilinosRowMap; colMap = m_nodes->trilinosColMap; } boost::scoped_array<IndexList> indexList(new IndexList[numTargets]); #pragma omp parallel { // insert contributions from element matrices into columns in // index list IndexList_insertElements(indexList.get(), m_elements, reducedOrder, target, reducedOrder, target); IndexList_insertElements(indexList.get(), m_faceElements, reducedOrder, target, reducedOrder, target); IndexList_insertElements(indexList.get(), m_contactElements, reducedOrder, target, reducedOrder, target); IndexList_insertElements(indexList.get(), m_points, reducedOrder, target, reducedOrder, target); } Teuchos::ArrayRCP<size_t> rowPtr(myNumTargets + 1); for (size_t i = 0; i < myNumTargets; i++) { rowPtr[i+1] = rowPtr[i] + indexList[i].count(0, numTargets); } Teuchos::ArrayRCP<LO> colInd(rowPtr[myNumTargets]); #pragma omp parallel for for (index_t i = 0; i < myNumTargets; i++) { indexList[i].toArray(&colInd[rowPtr[i]], 0, numTargets, 0); std::sort(&colInd[rowPtr[i]], &colInd[rowPtr[i+1]]); } GraphType* graph = new GraphType(rowMap, colMap, rowPtr, colInd); Teuchos::RCP<Teuchos::ParameterList> params = Teuchos::parameterList(); params->set("Optimize Storage", true); graph->fillComplete(rowMap, rowMap, params); return graph; }
ManualObject* OgreNewtonMesh::CreateEntity (const String& name) const { ManualObject* const object = new ManualObject(name); int pointCount = GetPointCount(); int indexCount = GetTotalIndexCount(); dNewtonScopeBuffer<int> indexList (indexCount); dNewtonScopeBuffer<int> remapIndex (indexCount); dNewtonScopeBuffer<dNewtonMesh::dPoint> posits (pointCount); dNewtonScopeBuffer<dNewtonMesh::dPoint> normals (pointCount); dNewtonScopeBuffer<dNewtonMesh::dUV> uv0 (pointCount); dNewtonScopeBuffer<dNewtonMesh::dUV> uv1 (pointCount); GetVertexStreams(&posits[0], &normals[0], &uv0[0], &uv1[0]); void* const materialsHandle = BeginMaterialHandle (); for (int handle = GetMaterialIndex (materialsHandle); handle != -1; handle = GetNextMaterialIndex (materialsHandle, handle)) { int materialIndex = MaterialGetMaterial (materialsHandle, handle); int indexCount = MaterialGetIndexCount (materialsHandle, handle); MaterialGetIndexStream (materialsHandle, handle, &indexList[0]); MaterialMap::const_iterator materialItr = m_materialMap.find(materialIndex); //object->begin("BaseWhiteNoLighting", Ogre::RenderOperation::OT_TRIANGLE_LIST); object->begin(materialItr->second->getName(), Ogre::RenderOperation::OT_TRIANGLE_LIST); // ogre does not support shared vertex for sub mesh, we will have remap the vertex data int vertexCount = 0; memset (&remapIndex[0], 0xff, indexCount * sizeof (remapIndex[0])); for( int i = 0; i < indexCount; i ++) { int index = indexList[i]; if (remapIndex[index] == -1) { remapIndex[index] = vertexCount; object->position (posits[index].m_x, posits[index].m_y, posits[index].m_z); object->normal (normals[index].m_x, normals[index].m_y, normals[index].m_z); object->textureCoord (uv0[index].m_u, uv0[index].m_v); vertexCount ++; } indexList[i] = remapIndex[index]; } for (int i = 0; i < indexCount; i += 3) { object->triangle (indexList[i + 0], indexList[i + 1], indexList[i + 2]); } object->end(); } EndMaterialHandle (materialsHandle); return object; }
/** Index of list data for model index. */ int indexListId(const QModelIndex &index) const { if(!index.isValid()) return -1; const List *l = indexList(index); for(int i = 0; i < _lists.size(); ++i) if(&_lists[i] == l) return i; qDebug() << l->_module->name(); Q_ASSERT(false); return -1; }
arma::mat OneSampleGetPermutationMatrix(int nPermutations, int N) { arma::arma_rng::set_seed_random(); // set the seed to a random value int cutoff; bool cutoffOne = false; arma::mat indexList; arma::mat permutationMatrix(nPermutations, N, arma::fill::ones); indexList = arma::linspace<arma::mat>(0, N-1, N); indexList = arma::shuffle(indexList); for(int i = 0;i < nPermutations;i++) { cutoff = indexList(0,0); indexList = arma::shuffle(indexList); for(int j = 0;j < cutoff;j++) { permutationMatrix(i, indexList(j,0)) = -1; } } return permutationMatrix; }
CSwordVerseKey indexToVerseKey(const QModelIndex &index) const { Q_ASSERT(indexDepth(index) == 2); const List *l = indexList(index); QMutexLocker locker(&BtMiniSwordMutex); CSwordVerseKey key(l->_module); key.setIntros(true); key.setIndex(l->_hasScope ? l->_scopeMap[index.row()] : index.row() + l->_firstEntry); return key; }
void ContextMenuActionProvider::addIrcUserActions(QMenu *menu, const QModelIndex &index) { // this can be called: a) as a nicklist context menu (index has IrcUserItemType) // b) as a query buffer context menu (index has BufferItemType and is a QueryBufferItem) // c) right-click in a query chatview (same as b), index will be the corresponding QueryBufferItem) // d) right-click on some nickname (_contextItem will be non-null, _filter -> chatview, index -> message buffer) if (contextItem().isNull()) { // cases a, b, c bool haveQuery = indexList().count() == 1 && findQueryBuffer(index).isValid(); NetworkModel::ItemType itemType = static_cast<NetworkModel::ItemType>(index.data(NetworkModel::ItemTypeRole).toInt()); addAction(_nickModeMenuAction, menu, itemType == NetworkModel::IrcUserItemType); addAction(_nickCtcpMenuAction, menu); IrcUser *ircUser = qobject_cast<IrcUser *>(index.data(NetworkModel::IrcUserRole).value<QObject *>()); if (ircUser) { Network *network = ircUser->network(); // only show entries for usermode +h if server supports it if (network && network->prefixModes().contains('h')) { action(NickHalfop)->setVisible(true); action(NickDehalfop)->setVisible(true); } else { action(NickHalfop)->setVisible(false); action(NickDehalfop)->setVisible(false); } // ignoreliststuff QString bufferName; BufferInfo bufferInfo = index.data(NetworkModel::BufferInfoRole).value<BufferInfo>(); if (bufferInfo.type() == BufferInfo::ChannelBuffer) bufferName = bufferInfo.bufferName(); QMap<QString, bool> ignoreMap = Client::ignoreListManager()->matchingRulesForHostmask(ircUser->hostmask(), ircUser->network()->networkName(), bufferName); addIgnoreMenu(menu, ircUser->hostmask(), ignoreMap); // end of ignoreliststuff } menu->addSeparator(); addAction(NickQuery, menu, itemType == NetworkModel::IrcUserItemType && !haveQuery && indexList().count() == 1); addAction(NickSwitchTo, menu, itemType == NetworkModel::IrcUserItemType && haveQuery); menu->addSeparator(); addAction(NickWhois, menu, true); } else if (!contextItem().isEmpty() && messageFilter()) { // case d // TODO } }
void Cube::init() { std::vector<Vertex> vertexList(8); vertexList[0] = Vertex(-1.0, 1.0, 1.0, 1.0); vertexList[1] = Vertex(-1.0, -1.0, 1.0, 1.0); vertexList[2] = Vertex(1.0, -1.0, 1.0, 1.0); vertexList[3] = Vertex(1.0, 1.0, 1.0, 1.0); vertexList[4] = Vertex(-1.0, 1.0, -1.0, 1.0); vertexList[5] = Vertex(-1.0, -1.0, -1.0, 1.0); vertexList[6] = Vertex(1.0, -1.0, -1.0, 1.0); vertexList[7] = Vertex(1.0, 1.0, -1.0, 1.0); setVertexList(vertexList); std::vector<GLuint> indexList(24); indexList[0] = 0; indexList[1] = 1; indexList[2] = 2; indexList[3] = 3; indexList[4] = 3; indexList[5] = 2; indexList[6] = 6; indexList[7] = 7; indexList[8] = 7; indexList[9] = 6; indexList[10] = 5; indexList[11] = 4; indexList[12] = 4; indexList[13] = 5; indexList[14] = 1; indexList[15] = 0; indexList[16] = 0; indexList[17] = 3; indexList[18] = 7; indexList[19] = 4; indexList[20] = 1; indexList[21] = 2; indexList[22] = 6; indexList[23] = 5; setIndexList(indexList); }
void ContextMenuActionProvider::addIrcUserActions(QMenu *menu, const QModelIndex &index) { // this can be called: a) as a nicklist context menu (index has IrcUserItemType) // b) as a query buffer context menu (index has BufferItemType and is a QueryBufferItem) // c) right-click in a query chatview (same as b), index will be the corresponding QueryBufferItem) // d) right-click on some nickname (_contextItem will be non-null, _filter -> chatview, index -> message buffer) if(contextItem().isNull()) { // cases a, b, c bool haveQuery = indexList().count() == 1 && findQueryBuffer(index).isValid(); NetworkModel::ItemType itemType = static_cast<NetworkModel::ItemType>(index.data(NetworkModel::ItemTypeRole).toInt()); addAction(_nickModeMenuAction, menu, itemType == NetworkModel::IrcUserItemType); addAction(_nickCtcpMenuAction, menu); menu->addSeparator(); addAction(NickQuery, menu, itemType == NetworkModel::IrcUserItemType && !haveQuery && indexList().count() == 1); addAction(NickSwitchTo, menu, itemType == NetworkModel::IrcUserItemType && haveQuery); menu->addSeparator(); addAction(NickWhois, menu, true); } else if(!contextItem().isEmpty() && messageFilter()) { // case d // TODO } }
dgMeshEffect * dgMeshEffect::CreateVoronoiConvexDecomposition (dgMemoryAllocator * const allocator, dgInt32 pointCount, dgInt32 pointStrideInBytes, const dgFloat32 * const pointCloud, dgInt32 materialId, const dgMatrix & textureProjectionMatrix) { dgFloat32 normalAngleInRadians = 30.0f * 3.1416f / 180.0f; dgStack<dgBigVector> buffer (pointCount + 16); dgBigVector * const pool = &buffer[0]; dgInt32 count = 0; dgFloat64 quantizeFactor = dgFloat64 (16.0f); dgFloat64 invQuantizeFactor = dgFloat64 (1.0f) / quantizeFactor; dgInt32 stride = pointStrideInBytes / sizeof (dgFloat32); dgBigVector pMin (dgFloat32 (1.0e10f), dgFloat32 (1.0e10f), dgFloat32 (1.0e10f), dgFloat32 (0.0f)); dgBigVector pMax (dgFloat32 (-1.0e10f), dgFloat32 (-1.0e10f), dgFloat32 (-1.0e10f), dgFloat32 (0.0f)); for (dgInt32 i = 0; i < pointCount; i ++) { dgFloat64 x = pointCloud[i * stride + 0]; dgFloat64 y = pointCloud[i * stride + 1]; dgFloat64 z = pointCloud[i * stride + 2]; x = floor (x * quantizeFactor) * invQuantizeFactor; y = floor (y * quantizeFactor) * invQuantizeFactor; z = floor (z * quantizeFactor) * invQuantizeFactor; dgBigVector p (x, y, z, dgFloat64 (0.0f)); pMin = dgBigVector (dgMin (x, pMin.m_x), dgMin (y, pMin.m_y), dgMin (z, pMin.m_z), dgFloat64 (0.0f)); pMax = dgBigVector (dgMax (x, pMax.m_x), dgMax (y, pMax.m_y), dgMax (z, pMax.m_z), dgFloat64 (0.0f)); pool[count] = p; count ++; } // add the bbox as a barrier pool[count + 0] = dgBigVector ( pMin.m_x, pMin.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 1] = dgBigVector ( pMax.m_x, pMin.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 2] = dgBigVector ( pMin.m_x, pMax.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 3] = dgBigVector ( pMax.m_x, pMax.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 4] = dgBigVector ( pMin.m_x, pMin.m_y, pMax.m_z, dgFloat64 (0.0f)); pool[count + 5] = dgBigVector ( pMax.m_x, pMin.m_y, pMax.m_z, dgFloat64 (0.0f)); pool[count + 6] = dgBigVector ( pMin.m_x, pMax.m_y, pMax.m_z, dgFloat64 (0.0f)); pool[count + 7] = dgBigVector ( pMax.m_x, pMax.m_y, pMax.m_z, dgFloat64 (0.0f)); count += 8; dgStack<dgInt32> indexList (count); count = dgVertexListToIndexList (&pool[0].m_x, sizeof (dgBigVector), 3, count, &indexList[0], dgFloat64 (5.0e-2f)); dgAssert (count >= 8); dgFloat64 maxSize = dgMax (pMax.m_x - pMin.m_x, pMax.m_y - pMin.m_y, pMax.m_z - pMin.m_z); pMin -= dgBigVector (maxSize, maxSize, maxSize, dgFloat64 (0.0f)); pMax += dgBigVector (maxSize, maxSize, maxSize, dgFloat64 (0.0f)); // add the a guard zone, so that we do no have to clip dgInt32 guadVertexKey = count; pool[count + 0] = dgBigVector ( pMin.m_x, pMin.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 1] = dgBigVector ( pMax.m_x, pMin.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 2] = dgBigVector ( pMin.m_x, pMax.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 3] = dgBigVector ( pMax.m_x, pMax.m_y, pMin.m_z, dgFloat64 (0.0f)); pool[count + 4] = dgBigVector ( pMin.m_x, pMin.m_y, pMax.m_z, dgFloat64 (0.0f)); pool[count + 5] = dgBigVector ( pMax.m_x, pMin.m_y, pMax.m_z, dgFloat64 (0.0f)); pool[count + 6] = dgBigVector ( pMin.m_x, pMax.m_y, pMax.m_z, dgFloat64 (0.0f)); pool[count + 7] = dgBigVector ( pMax.m_x, pMax.m_y, pMax.m_z, dgFloat64 (0.0f)); count += 8; dgDelaunayTetrahedralization delaunayTetrahedras (allocator, &pool[0].m_x, count, sizeof (dgBigVector), dgFloat32 (0.0f)); delaunayTetrahedras.RemoveUpperHull (); // delaunayTetrahedras.Save("xxx0.txt"); dgInt32 tetraCount = delaunayTetrahedras.GetCount(); dgStack<dgBigVector> voronoiPoints (tetraCount + 32); dgStack<dgDelaunayTetrahedralization::dgListNode *> tetradrumNode (tetraCount); dgTree<dgList<dgInt32>, dgInt32> delanayNodes (allocator); dgInt32 index = 0; const dgHullVector * const delanayPoints = delaunayTetrahedras.GetHullVertexArray(); for (dgDelaunayTetrahedralization::dgListNode * node = delaunayTetrahedras.GetFirst(); node; node = node->GetNext()) { dgConvexHull4dTetraherum & tetra = node->GetInfo(); voronoiPoints[index] = tetra.CircumSphereCenter (delanayPoints); tetradrumNode[index] = node; for (dgInt32 i = 0; i < 4; i ++) { dgTree<dgList<dgInt32>, dgInt32>::dgTreeNode * header = delanayNodes.Find (tetra.m_faces[0].m_index[i]); if (!header) { dgList<dgInt32> list (allocator); header = delanayNodes.Insert (list, tetra.m_faces[0].m_index[i]); } header->GetInfo().Append (index); } index ++; } dgMeshEffect * const voronoiPartition = new (allocator) dgMeshEffect (allocator); voronoiPartition->BeginPolygon(); dgFloat64 layer = dgFloat64 (0.0f); dgTree<dgList<dgInt32>, dgInt32>::Iterator iter (delanayNodes); for (iter.Begin(); iter; iter ++) { dgTree<dgList<dgInt32>, dgInt32>::dgTreeNode * const nodeNode = iter.GetNode(); const dgList<dgInt32> & list = nodeNode->GetInfo(); dgInt32 key = nodeNode->GetKey(); if (key < guadVertexKey) { dgBigVector pointArray[512]; dgInt32 indexArray[512]; dgInt32 count = 0; for (dgList<dgInt32>::dgListNode * ptr = list.GetFirst(); ptr; ptr = ptr->GetNext()) { dgInt32 i = ptr->GetInfo(); pointArray[count] = voronoiPoints[i]; count ++; dgAssert (count < dgInt32 (sizeof (pointArray) / sizeof (pointArray[0]))); } count = dgVertexListToIndexList (&pointArray[0].m_x, sizeof (dgBigVector), 3, count, &indexArray[0], dgFloat64 (1.0e-3f)); if (count >= 4) { dgMeshEffect convexMesh (allocator, &pointArray[0].m_x, count, sizeof (dgBigVector), dgFloat64 (0.0f)); if (convexMesh.GetCount()) { convexMesh.CalculateNormals (normalAngleInRadians); convexMesh.UniformBoxMapping (materialId, textureProjectionMatrix); for (dgInt32 i = 0; i < convexMesh.m_pointCount; i ++) convexMesh.m_points[i].m_w = layer; for (dgInt32 i = 0; i < convexMesh.m_atribCount; i ++) convexMesh.m_attrib[i].m_vertex.m_w = layer; voronoiPartition->MergeFaces (&convexMesh); layer += dgFloat64 (1.0f); } } } } voronoiPartition->EndPolygon (dgFloat64 (1.0e-8f), false); // voronoiPartition->SaveOFF("xxx0.off"); //voronoiPartition->ConvertToPolygons(); return voronoiPartition; }
// add a list of actions sensible for the current item(s) void ContextMenuActionProvider::addActions(QMenu *menu, const QList<QModelIndex> &indexList_, MessageFilter *filter_, const QString &contextItem_, QObject *receiver_, const char *method_, bool isCustomBufferView) { if (!indexList_.count()) return; setIndexList(indexList_); setMessageFilter(filter_); setContextItem(contextItem_); setSlot(receiver_, method_); if (!messageFilter()) { // this means we are in a BufferView (or NickView) rather than a ChatView // first index in list determines the menu type (just in case we have both buffers and networks selected, for example) QModelIndex index = indexList().at(0); NetworkModel::ItemType itemType = static_cast<NetworkModel::ItemType>(index.data(NetworkModel::ItemTypeRole).toInt()); switch (itemType) { case NetworkModel::NetworkItemType: addNetworkItemActions(menu, index); break; case NetworkModel::BufferItemType: addBufferItemActions(menu, index, isCustomBufferView); break; case NetworkModel::IrcUserItemType: addIrcUserActions(menu, index); break; default: return; } } else { // ChatView actions if (contextItem().isEmpty()) { // a) query buffer: handle like ircuser // b) general chatview: handle like channel iff it displays a single buffer // NOTE stuff breaks probably with merged buffers, need to rework a lot around here then if (messageFilter()->containedBuffers().count() == 1) { // we can handle this like a single bufferItem QModelIndex index = Client::networkModel()->bufferIndex(messageFilter()->containedBuffers().values().at(0)); setIndexList(index); addBufferItemActions(menu, index); return; } else { // TODO: actions for merged buffers... _indexList contains the index of the message we clicked on } } else { // context item = chan or nick, _indexList = buf where the msg clicked on originated if (isChannelName(contextItem())) { QModelIndex msgIdx = indexList().at(0); if (!msgIdx.isValid()) return; NetworkId networkId = msgIdx.data(NetworkModel::NetworkIdRole).value<NetworkId>(); BufferId bufId = Client::networkModel()->bufferId(networkId, contextItem()); if (bufId.isValid()) { QModelIndex targetIdx = Client::networkModel()->bufferIndex(bufId); setIndexList(targetIdx); addAction(BufferJoin, menu, targetIdx, InactiveState); addAction(BufferSwitchTo, menu, targetIdx, ActiveState); } else addAction(JoinChannel, menu); } else { // TODO: actions for a nick } } } }
// Triangulate a no convex polygon void glc::triangulatePolygon(QList<GLuint>* pIndexList, const QList<float>& bulkList) { int size= pIndexList->size(); if (polygonIsConvex(pIndexList, bulkList)) { QList<GLuint> indexList(*pIndexList); pIndexList->clear(); for (int i= 0; i < size - 2; ++i) { pIndexList->append(indexList.at(0)); pIndexList->append(indexList.at(i + 1)); pIndexList->append(indexList.at(i + 2)); } } else { // Get the polygon vertice QList<GLC_Point3d> originPoints; QHash<int, int> indexMap; QList<int> face; GLC_Point3d currentPoint; int delta= 0; for (int i= 0; i < size; ++i) { const int currentIndex= pIndexList->at(i); currentPoint= GLC_Point3d(bulkList.at(currentIndex * 3), bulkList.at(currentIndex * 3 + 1), bulkList.at(currentIndex * 3 + 2)); if (!originPoints.contains(currentPoint)) { originPoints.append(GLC_Point3d(bulkList.at(currentIndex * 3), bulkList.at(currentIndex * 3 + 1), bulkList.at(currentIndex * 3 + 2))); indexMap.insert(i - delta, currentIndex); face.append(i - delta); } else { qDebug() << "Multi points"; ++delta; } } // Values of PindexList must be reset pIndexList->clear(); // Update size size= size - delta; // Check new size if (size < 3) return; //-------------- Change frame to mach polygon plane // Compute face normal const GLC_Point3d point1(originPoints[0]); const GLC_Point3d point2(originPoints[1]); const GLC_Point3d point3(originPoints[2]); const GLC_Vector3d edge1(point2 - point1); const GLC_Vector3d edge2(point3 - point2); GLC_Vector3d polygonPlaneNormal(edge1 ^ edge2); polygonPlaneNormal.normalize(); // Create the transformation matrix GLC_Matrix4x4 transformation; GLC_Vector3d rotationAxis(polygonPlaneNormal ^ Z_AXIS); if (!rotationAxis.isNull()) { const double angle= acos(polygonPlaneNormal * Z_AXIS); transformation.setMatRot(rotationAxis, angle); } QList<GLC_Point2d> polygon; // Transform polygon vertexs for (int i=0; i < size; ++i) { originPoints[i]= transformation * originPoints[i]; // Create 2d vector polygon << originPoints[i].toVector2d(Z_AXIS); } // Create the index QList<int> index= face; const bool faceIsCounterclockwise= isCounterclockwiseOrdered(polygon); if(!faceIsCounterclockwise) { //qDebug() << "face Is Not Counterclockwise"; const int max= size / 2; for (int i= 0; i < max; ++i) { polygon.swap(i, size - 1 -i); int temp= face[i]; face[i]= face[size - 1 - i]; face[size - 1 - i]= temp; } } QList<int> tList; triangulate(polygon, index, tList); size= tList.size(); for (int i= 0; i < size; i+= 3) { // Avoid normal problem if (faceIsCounterclockwise) { pIndexList->append(indexMap.value(face[tList[i]])); pIndexList->append(indexMap.value(face[tList[i + 1]])); pIndexList->append(indexMap.value(face[tList[i + 2]])); } else { pIndexList->append(indexMap.value(face[tList[i + 2]])); pIndexList->append(indexMap.value(face[tList[i + 1]])); pIndexList->append(indexMap.value(face[tList[i]])); } } Q_ASSERT(size == pIndexList->size()); } }
bool BernoulliRBM::train_(MatrixFloat &data){ const UINT numTrainingSamples = data.getNumRows(); numInputDimensions = data.getNumCols(); numOutputDimensions = numHiddenUnits; numVisibleUnits = numInputDimensions; trainingLog << "NumInputDimensions: " << numInputDimensions << std::endl; trainingLog << "NumOutputDimensions: " << numOutputDimensions << std::endl; if( randomizeWeightsForTraining ){ //Init the weights matrix weightsMatrix.resize(numHiddenUnits, numVisibleUnits); Float a = 1.0 / numVisibleUnits; for(UINT i=0; i<numHiddenUnits; i++) { for(UINT j=0; j<numVisibleUnits; j++) { weightsMatrix[i][j] = rand.getRandomNumberUniform(-a, a); } } //Init the bias units visibleLayerBias.resize( numVisibleUnits ); hiddenLayerBias.resize( numHiddenUnits ); std::fill(visibleLayerBias.begin(),visibleLayerBias.end(),0); std::fill(hiddenLayerBias.begin(),hiddenLayerBias.end(),0); }else{ if( weightsMatrix.getNumRows() != numHiddenUnits ){ errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of hidden units!" << std::endl; return false; } if( weightsMatrix.getNumCols() != numVisibleUnits ){ errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of visible units!" << std::endl; return false; } if( visibleLayerBias.size() != numVisibleUnits ){ errorLog << "train_(MatrixFloat &data) - Visible layer bias size does not match the number of visible units!" << std::endl; return false; } if( hiddenLayerBias.size() != numHiddenUnits ){ errorLog << "train_(MatrixFloat &data) - Hidden layer bias size does not match the number of hidden units!" << std::endl; return false; } } //Flag the model has been trained encase the user wants to save the model during a training iteration using an observer trained = true; //Make sure the data is scaled between [0 1] ranges = data.getRanges(); if( useScaling ){ for(UINT i=0; i<numTrainingSamples; i++){ for(UINT j=0; j<numInputDimensions; j++){ data[i][j] = grt_scale(data[i][j], ranges[j].minValue, ranges[j].maxValue, 0.0, 1.0); } } } const UINT numBatches = static_cast<UINT>( ceil( Float(numTrainingSamples)/batchSize ) ); //Setup the batch indexs Vector< BatchIndexs > batchIndexs( numBatches ); UINT startIndex = 0; for(UINT i=0; i<numBatches; i++){ batchIndexs[i].startIndex = startIndex; batchIndexs[i].endIndex = startIndex + batchSize; //Make sure the last batch end index is not larger than the number of training examples if( batchIndexs[i].endIndex >= numTrainingSamples ){ batchIndexs[i].endIndex = numTrainingSamples; } //Get the batch size batchIndexs[i].batchSize = batchIndexs[i].endIndex - batchIndexs[i].startIndex; //Set the start index for the next batch startIndex = batchIndexs[i].endIndex; } Timer timer; UINT i,j,n,epoch,noChangeCounter = 0; Float startTime = 0; Float alpha = learningRate; Float error = 0; Float err = 0; Float delta = 0; Float lastError = 0; Vector< UINT > indexList(numTrainingSamples); TrainingResult trainingResult; MatrixFloat wT( numVisibleUnits, numHiddenUnits ); //Stores a transposed copy of the weights vector MatrixFloat vW( numHiddenUnits, numVisibleUnits ); //Stores the weight velocity updates MatrixFloat tmpW( numHiddenUnits, numVisibleUnits ); //Stores the weight values that will be used to update the main weights matrix at each batch update MatrixFloat v1( batchSize, numVisibleUnits ); //Stores the real batch data during a batch update MatrixFloat v2( batchSize, numVisibleUnits ); //Stores the sampled batch data during a batch update MatrixFloat h1( batchSize, numHiddenUnits ); //Stores the hidden states given v1 and the current weightsMatrix MatrixFloat h2( batchSize, numHiddenUnits ); //Stores the sampled hidden states given v2 and the current weightsMatrix MatrixFloat c1( numHiddenUnits, numVisibleUnits ); //Stores h1' * v1 MatrixFloat c2( numHiddenUnits, numVisibleUnits ); //Stores h2' * v2 MatrixFloat vDiff( batchSize, numVisibleUnits ); //Stores the difference between v1-v2 MatrixFloat hDiff( batchSize, numVisibleUnits ); //Stores the difference between h1-h2 MatrixFloat cDiff( numHiddenUnits, numVisibleUnits ); //Stores the difference between c1-c2 VectorFloat vDiffSum( numVisibleUnits ); //Stores the column sum of vDiff VectorFloat hDiffSum( numHiddenUnits ); //Stores the column sum of hDiff VectorFloat visibleLayerBiasVelocity( numVisibleUnits ); //Stores the velocity update of the visibleLayerBias VectorFloat hiddenLayerBiasVelocity( numHiddenUnits ); //Stores the velocity update of the hiddenLayerBias //Set all the velocity weights to zero vW.setAllValues( 0 ); std::fill(visibleLayerBiasVelocity.begin(),visibleLayerBiasVelocity.end(),0); std::fill(hiddenLayerBiasVelocity.begin(),hiddenLayerBiasVelocity.end(),0); //Randomize the order that the training samples will be used in for(UINT i=0; i<numTrainingSamples; i++) indexList[i] = i; if( randomiseTrainingOrder ){ std::random_shuffle(indexList.begin(), indexList.end()); } //Start the main training loop timer.start(); for(epoch=0; epoch<maxNumEpochs; epoch++) { startTime = timer.getMilliSeconds(); error = 0; //Randomize the batch order std::random_shuffle(batchIndexs.begin(),batchIndexs.end()); //Run each of the batch updates for(UINT k=0; k<numBatches; k+=batchStepSize){ //Resize the data matrices, the matrices will only be resized if the rows cols are different v1.resize( batchIndexs[k].batchSize, numVisibleUnits ); h1.resize( batchIndexs[k].batchSize, numHiddenUnits ); v2.resize( batchIndexs[k].batchSize, numVisibleUnits ); h2.resize( batchIndexs[k].batchSize, numHiddenUnits ); //Setup the data pointers, using data pointers saves a few ms on large matrix updates Float **w_p = weightsMatrix.getDataPointer(); Float **wT_p = wT.getDataPointer(); Float **vW_p = vW.getDataPointer(); Float **data_p = data.getDataPointer(); Float **v1_p = v1.getDataPointer(); Float **v2_p = v2.getDataPointer(); Float **h1_p = h1.getDataPointer(); Float **h2_p = h2.getDataPointer(); Float *vlb_p = &visibleLayerBias[0]; Float *hlb_p = &hiddenLayerBias[0]; //Get the batch data UINT index = 0; for(i=batchIndexs[k].startIndex; i<batchIndexs[k].endIndex; i++){ for(j=0; j<numVisibleUnits; j++){ v1_p[index][j] = data_p[ indexList[i] ][j]; } index++; } //Copy a transposed version of the weights matrix, this is used to compute h1 and h2 for(i=0; i<numHiddenUnits; i++) for(j=0; j<numVisibleUnits; j++) wT_p[j][i] = w_p[i][j]; //Compute h1 h1.multiple(v1, wT); for(n=0; n<batchIndexs[k].batchSize; n++){ for(i=0; i<numHiddenUnits; i++){ h1_p[n][i] = sigmoidRandom( h1_p[n][i] + hlb_p[i] ); } } //Compute v2 v2.multiple(h1, weightsMatrix); for(n=0; n<batchIndexs[k].batchSize; n++){ for(i=0; i<numVisibleUnits; i++){ v2_p[n][i] = sigmoidRandom( v2_p[n][i] + vlb_p[i] ); } } //Compute h2 h2.multiple(v2,wT); for(n=0; n<batchIndexs[k].batchSize; n++){ for(i=0; i<numHiddenUnits; i++){ h2_p[n][i] = grt_sigmoid( h2_p[n][i] + hlb_p[i] ); } } //Compute c1, c2 and the difference between v1-v2 c1.multiple(h1,v1,true); c2.multiple(h2,v2,true); vDiff.subtract(v1, v2); //Compute the sum of vdiff for(j=0; j<numVisibleUnits; j++){ vDiffSum[j] = 0; for(i=0; i<batchIndexs[k].batchSize; i++){ vDiffSum[j] += vDiff[i][j]; } } //Compute the difference between h1 and h2 hDiff.subtract(h1, h2); for(j=0; j<numHiddenUnits; j++){ hDiffSum[j] = 0; for(i=0; i<batchIndexs[k].batchSize; i++){ hDiffSum[j] += hDiff[i][j]; } } //Compute the difference between c1 and c2 cDiff.subtract(c1,c2); //Update the weight velocities for(i=0; i<numHiddenUnits; i++){ for(j=0; j<numVisibleUnits; j++){ vW_p[i][j] = ((momentum * vW_p[i][j]) + (alpha * cDiff[i][j])) / batchIndexs[k].batchSize; } } for(i=0; i<numVisibleUnits; i++){ visibleLayerBiasVelocity[i] = ((momentum * visibleLayerBiasVelocity[i]) + (alpha * vDiffSum[i])) / batchIndexs[k].batchSize; } for(i=0; i<numHiddenUnits; i++){ hiddenLayerBiasVelocity[i] = ((momentum * hiddenLayerBiasVelocity[i]) + (alpha * hDiffSum[i])) / batchIndexs[k].batchSize; } //Update the weights weightsMatrix.add( vW ); //Update the bias for the visible layer for(i=0; i<numVisibleUnits; i++){ visibleLayerBias[i] += visibleLayerBiasVelocity[i]; } //Update the bias for the visible layer for(i=0; i<numHiddenUnits; i++){ hiddenLayerBias[i] += hiddenLayerBiasVelocity[i]; } //Compute the reconstruction error err = 0; for(i=0; i<batchIndexs[k].batchSize; i++){ for(j=0; j<numVisibleUnits; j++){ err += SQR( v1[i][j] - v2[i][j] ); } } error += err / batchIndexs[k].batchSize; } error /= numBatches; delta = lastError - error; lastError = error; trainingLog << "Epoch: " << epoch+1 << "/" << maxNumEpochs; trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds"; trainingLog << " Learning rate: " << alpha; trainingLog << " Momentum: " << momentum; trainingLog << " Average reconstruction error: " << error; trainingLog << " Delta: " << delta << std::endl; //Update the learning rate alpha *= learningRateUpdate; trainingResult.setClassificationResult(epoch, error, this); trainingResults.push_back(trainingResult); trainingResultsObserverManager.notifyObservers( trainingResult ); //Check for convergance if( fabs(delta) < minChange ){ if( ++noChangeCounter >= minNumEpochs ){ trainingLog << "Stopping training. MinChange limit reached!" << std::endl; break; } }else noChangeCounter = 0; } trainingLog << "Training complete after " << epoch << " epochs. Total training time: " << timer.getMilliSeconds()/1000.0 << " seconds" << std::endl; trained = true; return true; }