static int createEQ(char **ret) { int len = 0; int i; int j; int k; int lastLayerSize; int lastlen; Part *parts; Part **eoParts = &parts; int weightIndex = 0; int starti = 1; char *dest; char *str; if (nnData.isRBF) { starti++; for (j = 0; j < nnData.layerSizes[1]; j++) { len += lastlen = asprintf(&dest, " float v0001%04x = rbf(vec2(v00000000, v00000001), vec2(weights[%d], weights[%d]), weights[%d]);\n", j, weightIndex, weightIndex + 1, weightIndex + 2); weightIndex += 3; addPart(dest, lastlen, &eoParts); } } for (i = starti; i < nnData.layers; i++) { for (j = 0; j < nnData.layerSizes[i]; j++) { if (i == nnData.layers - 1) len += lastlen = asprintf(&dest, " float val = activate(weights[%d]", weightIndex++); else len += lastlen = asprintf(&dest, " float v%04x%04x = activate(weights[%d]", i, j, weightIndex++); addPart(dest, lastlen, &eoParts); for (k = 0; k < nnData.layerSizes[i - 1]; k++) { char *source; len += lastlen = asprintf(&source, " + weights[%d] * v%04x%04x", weightIndex++, i - 1, k); addPart(source, lastlen, &eoParts); } len += lastlen = 3; addPart(strdup(");\n"), lastlen, &eoParts); } lastLayerSize = nnData.layerSizes[i]; } str = (char *)malloc(len + 1); *ret = str; while(parts != NULL) { Part *next; memcpy(str, parts->text, parts->len); str += parts->len; next = parts->next; free(parts->text); free(parts); parts = next; } *str = 0; return len; }
int Branch::makePart(bool isHandler, QJsonArray *arr) { for (int i=0; i < arr->count(); i++) { if (arr->at(i).isObject() != true) return -1; QJsonObject obj = arr->at(i); if (obj.value("Type").isString() != true) return -1; if (obj.value("Options").isObject() != true) return -1; IConnector *part; if (isHandler) { part = HandlerFact::MakeHandler(obj.value("Type").toString(), obj.value("Options").toObject()); } else { part = TransportFact::makeTransport(obj.value("Type").toString(), obj.value("Options").toObject()); } if (part == NULL) return -1; addPart(part); } return 0; }
void MessagePattern::addLimitPart(int32_t start, UMessagePatternPartType type, int32_t index, int32_t length, int32_t value, UErrorCode &errorCode) { partsList->a[start].limitPartIndex=partsLength; addPart(type, index, length, value, errorCode); }
// Takes as arguments a ciphertext-part p relative to s' and a key-switching // matrix W = W[s'->s], uses W to switch p relative to (1,s), and adds the // result to *this. // It is assumed that the part p does not include any of the special primes, // and that if *this is not an empty ciphertext then its primeSet is // p.getIndexSet() \union context.specialPrimes void Ctxt::keySwitchPart(const CtxtPart& p, const KeySwitch& W) { FHE_TIMER_START; // no special primes in the input part assert(context.specialPrimes.disjointFrom(p.getIndexSet())); // For parts p that point to 1 or s, only scale and add if (p.skHandle.isOne() || p.skHandle.isBase(W.toKeyID)) { CtxtPart pp = p; pp.addPrimesAndScale(context.specialPrimes); addPart(pp, /*matchPrimeSet=*/true); return; } // some sanity checks assert(W.fromKey == p.skHandle); // the handles must match // Compute the number of digits that we need and the esitmated // added noise from switching this ciphertext part. long nDigits; NTL::xdouble addedNoise; std::tie(nDigits,addedNoise)= keySwitchNoise(p, pubKey, W.ptxtSpace); // Break the ciphertext part into digits, if needed, and scale up these // digits using the special primes. This is the most expensive operation // during homormophic evaluation, so it should be thoroughly optimized. vector<DoubleCRT> polyDigits; p.breakIntoDigits(polyDigits, nDigits); // Finally we multiply the vector of digits by the key-switching matrix keySwitchDigits(W, polyDigits); noiseVar += addedNoise; // update the noise estimate } // restore random state upon destruction of the RandomState, see NumbTh.h
int Samurai::IO::Net::DNS::Name::split() { size_t len = size; char* last = name; for (size_t n = 0; n < len; n++) { if (name[n] == '.') { name[n] = 0; addPart(new Label((const char*) last, (uint8_t) strlen(last))); last = &name[n+1]; } } if (strlen(last)) addPart(new Label((const char*) last, (uint8_t) strlen(last))); return countParts(); }
Ship::Ship(int size, Sea *sea, int id) :size(size), I(0), sea(sea), id(id) { std::cout << "creat statek" << std::endl; for (int i = 0; i < size; ++i) { tab[i].setId(id); addPart(&tab[i]); } }
bool FstReader::addTextures(const QFileInfo& texdir) { QStringList filter; filter << "*.png" << "*.tif" << "*.jpg" << "*.jpeg"; QFileInfoList list = QDir(texdir.filePath()).entryInfoList(filter, QDir::Files | QDir::AllDirs | QDir::NoDotAndDotDot | QDir::NoSymLinks); foreach (QFileInfo info, list) { if (info.isFile()) { // Compress and copy if (!compressFile(info.filePath(), _zipDir.path() + "/" + info.fileName())) { return false; } _totalSize += info.size(); if (!addPart(_zipDir.path() + "/" + info.fileName(), QString("texture%1").arg(++_texturesCount))) { return false; } } else if (info.isDir()) { if (!addTextures(info)) { return false; } } else { qDebug() << "[DEBUG] Invalid file type : " << info.filePath(); } } return true; }
int QgsVectorLayerEditUtils::addPart( const QList<QgsPoint> &points, QgsFeatureId featureId ) { QgsPointSequenceV2 l; for ( QList<QgsPoint>::const_iterator it = points.constBegin(); it != points.constEnd(); ++it ) { l << QgsPointV2( *it ); } return addPart( l, featureId ); }
QgsGeometry::OperationResult QgsVectorLayerEditUtils::addPart( const QList<QgsPointXY> &points, QgsFeatureId featureId ) { QgsPointSequence l; for ( QList<QgsPointXY>::const_iterator it = points.constBegin(); it != points.constEnd(); ++it ) { l << QgsPoint( *it ); } return addPart( l, featureId ); }
GunTower::GunTower(): m_target(NULL) { Part base(BASE_ID); base.setDestructible(false); base.setTexture(Resources::getTexture("entities/guntower-base.png")); Part turret(CANON_ID, 16); const sf::Texture& img_turret = Resources::getTexture("entities/guntower-turret.png"); turret.setTexture(img_turret); turret.setOrigin(img_turret.getSize().x / 2, img_turret.getSize().y / 2); addPart(turret, img_turret.getSize().x / 2, img_turret.getSize().y / 2); addPart(base, 0, BASE_OFFSET); m_weapon.init("laser-pink"); m_weapon.setOwner(this); m_weapon.setPosition(img_turret.getSize().x / 2.f, img_turret.getSize().y / 2.f); }
void Snake::stealParts(int pos, Snake* other){ for(int i = pos; i < other->getCount(); i++){ SnakePart* temp = other->getPart(i); addPart(temp); other->removePart(other->indexOfPart(temp)); other->substract( temp->getValue() ); addPoints( temp->getValue() ); i--; } }
/** * Creates a new document part. * @return A pointer to the new document */ KTextEditor::Document* EditorManager::add() { KTextEditor::Document* pDoc; // Create the document pDoc = KTextEditor::EditorChooser::createDocument(this); addPart(pDoc); return pDoc; }
void C3DMeshSkin::copyFrom(C3DMeshSkin* skin, C3DNode::CloneContext& context) { _bindShape = skin->_bindShape; std::map<const C3DNode*, C3DNode*>::iterator it = context.cloneMap.find(skin->_rootJoint); C3DBone* rootbone = NULL; if (it == context.cloneMap.end()) rootbone = (C3DBone*)skin->_rootJoint->clone(context); else rootbone = (C3DBone*)it->second; setJointCount(skin->getJointCount()); setRootJoint(rootbone); size_t i; for (i = 0; i < skin->_joints.size(); i++) { std::string strid = skin->_joints[i]->getId(); // strid += context.idSuffix; //C3DBone* bone = (strid == rootbone->getId() ? rootbone : (C3DBone*)rootbone->findNode(strid.c_str())); //C3DBone* bone = (C3DBone*)context.cloneMap[skin->_joints[i]]; ///.... std::map<const C3DNode*, C3DNode*>::iterator itr = context.cloneMap.find(skin->_joints[i]); C3DBone* bone = NULL; if (itr != context.cloneMap.end()) { bone = static_cast<C3DBone*>(itr->second); } else { C3DBone* newNode = static_cast<C3DBone*>(skin->_joints[i]->clone(context)); if (newNode) { context.cloneMap[skin->_joints[i]] = newNode; bone = newNode; } } //..... setJoint(bone, i); //bone->release(); } for (i = 0; i < skin->_partCount; i++) { addPart(skin->_parts[i]->_batchID, skin->_parts[i]->_offsetVertexIndex, skin->_parts[i]->_numVertexIndex); _parts[i]->setIndexData(&skin->_parts[i]->_indices[0], skin->_parts[i]->_indices.size()); } setBonePartIndex(skin->getBonePartIndex()); }
//-------------------------------------------------------------- void testApp::setup(){ ofEnableSmoothing(); ofBackground(0); bFill = true; setupColores(); myMesh.clear(); puntos.clear(); vels.clear(); // poner puntos en circulo nPuntosBase=0; zentro = ofVec2f(ofGetWidth()/2.0, ofGetHeight()/2.0); radio = ofGetHeight()/2.0*0.8; for(int i=0; i<60; i++) { float ang=(float)TWO_PI/60.0*i; puntos.push_back(ofPoint(zentro.x+radio*cos(ang), zentro.y+radio*sin(ang))); vels.push_back(ofPoint(0,0)); nPuntosBase++; } // poner cuadricula de puntos int nLado = 11; float lado = 2*radio; for(int j=0; j<nLado; j++) { for(int i=0; i<nLado; i++) { ofPoint pTmp = ofPoint(-lado/2+i*lado/nLado,-lado/2+j*lado/nLado); if(pTmp.length()<radio) { pTmp+=zentro; puntos.push_back(pTmp); vels.push_back(ofPoint(0,0)); nPuntosBase++; } } } // int nPtsMoviles = 1; // for(int i=0; i<nPtsMoviles; i++) { addPart(); // } // radio1 = ofGetHeight()/2.0*0.25; // for(int i=0; i<60; i++) { // float ang=(float)TWO_PI/60.0*i; // puntos.push_back(ofPoint(zentro.x+radio1*cos(ang), zentro.y+radio1*sin(ang))); // vels.push_back(ofPoint(ofRandom(-2,2), ofRandom(-2,2)) ); // } triangulation.addPoints(puntos); triangulation.triangulate(); addColors(); }
//-------------------------------------------------------------------------------------------------- /// //-------------------------------------------------------------------------------------------------- void ModelBasicTreeNode::mergeParts(double maxExtent, uint minimumPrimitiveCount) { uint numChildren = childCount(); std::vector<ModelBasicTreeNode*> childrenToBeRemoved; uint i; for (i = 0; i < numChildren; i++) { ModelBasicTreeNode* c = child(i); CVF_ASSERT(c); c->mergeParts(maxExtent, minimumPrimitiveCount); // Devour any child node being too small. // Any candidate child node for being devoured is a leaf node because we have already recursed downwards. // Check if this is a leaf node if (c->childCount() == 0) { size_t primCount = c->primitiveCount(); if (primCount < minimumPrimitiveCount) { size_t numChildParts = 0; if (c->m_partList.notNull()) { numChildParts = c->m_partList->partCount(); } size_t j; for (j = 0; j < numChildParts; j++) { addPart(c->m_partList->part(j)); } childrenToBeRemoved.push_back(c); } } } if (childrenToBeRemoved.size() > 0) { // Remove children from last to first index to make sure the indices are valid std::vector<ModelBasicTreeNode*>::iterator it; for (it = childrenToBeRemoved.begin(); it != childrenToBeRemoved.end(); it++) { removeChild(*it); } } // Merge too small parts in own list of parts. Any child node being devoured by the above code is // in own list of parts. They are too small, but will be merged also by the statement below. m_partList->mergeParts(maxExtent, minimumPrimitiveCount); }
void Mesh::meshFill(long npart, int wlmtype, std::vector<Particle >* pvec) { for (int i=0; i<(dim[0] * dim[1]); i++) { data[i] = 0; } for (int i=0; i<npart; i++) { /*calculate position of particle on mesh and add it to all where it belongs */ if ((*pvec)[i].type == wlmtype) addPart((*pvec)[i].pos.x, (*pvec)[i].pos.y); } }
// Add a constant polynomial void Ctxt::addConstant(const DoubleCRT& dcrt, double size) { FHE_TIMER_START; // If the size is not given, we use the default value phi(m)*(ptxtSpace/2)^2 if (size < 0.0) { // WARNING: the following line is written just so to prevent overflow size = ((double) context.zMStar.getPhiM()) * ptxtSpace*ptxtSpace /4.0; } // Scale the constant, then add it to the part that points to one long f = (ptxtSpace>2)? rem(context.productOfPrimes(primeSet),ptxtSpace): 1; if (f!=1) { DoubleCRT tmp = dcrt; tmp *= f; addPart(tmp, SKHandle(0,1,0)); } else addPart(dcrt, SKHandle(0,1,0)); noiseVar += (size*f)*f; FHE_TIMER_STOP; }
bool LHMail::attachPart(LHMailBase* mp) { setSendDataValid( FALSE ); if (isSinglePart()) { //first we need to change body into one of parts LHMailPart* mpb = new LHMailPart(this); mpb->setEncoding(encoding()); mpb->setString(messageBody()); addPart(mpb); header().setData("Content-Transfer-Encoding", "7bit"); header().setData("Content-Type", "Multipart/Mixed"); header().removeParameter("Content-Type", "charset"); header().setParameter("Content-Type", "boundary", LHMime::getBoundaryString()); } //check boundary while(0) { QString bound = header().getParameter("Content-Type", "boundary"); if (testBoundary(bound)) { break; } else { header().setParameter("Content-Type", "boundary", LHMime::getBoundaryString()); } } //then add file part addPart(mp); return true; }
// Add a constant polynomial void Ctxt::addConstant(const DoubleCRT& dcrt, double size) { // If the size is not given, we use the default value phi(m)*(ptxtSpace/2)^2 if (size < 0.0) { // WARNING: the following line is written to prevent integer overflow size = ((double) context.zMStar.getPhiM()) * ptxtSpace*ptxtSpace /4.0; } // Scale the constant, then add it to the part that points to one long f = (ptxtSpace>2)? rem(context.productOfPrimes(primeSet),ptxtSpace): 1; noiseVar += (size*f)*f; IndexSet delta = dcrt.getIndexSet() / primeSet; // set minus if (f==1 && empty(delta)) { // just add it addPart(dcrt, SKHandle(0,1,0)); return; } // work with a local copy DoubleCRT tmp = dcrt; if (!empty(delta)) tmp.removePrimes(delta); if (f!=1) tmp *= f; addPart(tmp, SKHandle(0,1,0)); }
void Core::isOnFood() { t_pos head; head = getHead(); if (head.x == this->_food.x && head.y == this->_food.y) { this->_lib->soundEat(); _isEaten = true; addPart(); addItem(FOOD); this->_score += SCORE; } }
int32_t MessagePattern::parseSimpleStyle(int32_t index, UParseError *parseError, UErrorCode &errorCode) { if(U_FAILURE(errorCode)) { return 0; } int32_t start=index; int32_t nestedBraces=0; while(index<msg.length()) { UChar c=msg.charAt(index++); if(c==u_apos) { // Treat apostrophe as quoting but include it in the style part. // Find the end of the quoted literal text. index=msg.indexOf(u_apos, index); if(index<0) { // Quoted literal argument style text reaches to the end of the message. setParseError(parseError, start); errorCode=U_PATTERN_SYNTAX_ERROR; return 0; } // skip the quote-ending apostrophe ++index; } else if(c==u_leftCurlyBrace) { ++nestedBraces; } else if(c==u_rightCurlyBrace) { if(nestedBraces>0) { --nestedBraces; } else { int32_t length=--index-start; if(length>Part::MAX_LENGTH) { setParseError(parseError, start); // Argument style text too long. errorCode=U_INDEX_OUTOFBOUNDS_ERROR; return 0; } addPart(UMSGPAT_PART_TYPE_ARG_STYLE, start, length, 0, errorCode); return index; } } // c is part of literal text } setParseError(parseError, 0); // Unmatched '{' braces in message. errorCode=U_UNMATCHED_BRACES; return 0; }
void DownloadSession::readHandler(const boost::system::error_code &err, std::size_t bytes) { if (!err) { //------------------------------------------- //std::string str("RECEIVE: "); //str += std::to_string(m_receivedPart.m_partSize) + " " + std::to_string(m_receivedPart.m_partHash) + " " + std::to_string(m_receivedPart.m_partNumber); //display(str); display(std::to_string(m_receivedPart.m_partNumber)); //------------------------------------------- addPart(m_receivedPart); } else { std::string str("RECEIVE FALSE: "); str += std::to_string(m_receivedPart.m_partSize) + " " + std::to_string(m_receivedPart.m_partHash) + " " + std::to_string(m_receivedPart.m_partNumber); display(str); setEnd(StatusValue::notRead); return; } }
void MessagePattern::addArgDoublePart(double numericValue, int32_t start, int32_t length, UErrorCode &errorCode) { if(U_FAILURE(errorCode)) { return; } int32_t numericIndex=numericValuesLength; if(numericValuesList==NULL) { numericValuesList=new MessagePatternDoubleList(); if(numericValuesList==NULL) { errorCode=U_MEMORY_ALLOCATION_ERROR; return; } } else if(!numericValuesList->ensureCapacityForOneMore(numericValuesLength, errorCode)) { return; } else { if(numericIndex>Part::MAX_VALUE) { errorCode=U_INDEX_OUTOFBOUNDS_ERROR; return; } } numericValuesList->a[numericValuesLength++]=numericValue; addPart(UMSGPAT_PART_TYPE_ARG_DOUBLE, start, length, numericIndex, errorCode); }
void ArticulatedModel::loadHeightfield(const Specification& specification) { Part* part = addPart("root"); Geometry* geom = addGeometry("geom"); Mesh* mesh = addMesh("mesh", part, geom); mesh->material = UniversalMaterial::create(); shared_ptr<Image1> im = Image1::fromFile(specification.filename); geom->cpuVertexArray.hasTangent = false; geom->cpuVertexArray.hasTexCoord0 = true; const bool spaceCentered = true; const bool generateBackFaces = specification.heightfieldOptions.generateBackfaces; const Vector2& textureScale = specification.heightfieldOptions.textureScale; Array<Point3> vertex; Array<Point2> texCoord; MeshAlg::generateGrid (vertex, texCoord, mesh->cpuIndexArray, im->width(), im->height(), textureScale, spaceCentered, generateBackFaces, CFrame(Matrix4::scale((float)im->width(), 1.0, (float)im->height()).upper3x3()), im); // Copy the vertex data into the mesh geom->cpuVertexArray.vertex.resize(vertex.size()); CPUVertexArray::Vertex* vertexPtr = geom->cpuVertexArray.vertex.getCArray(); for (uint32 i = 0; i < (uint32)vertex.size(); ++i) { CPUVertexArray::Vertex& v = vertexPtr[i]; v.position = vertex[i]; v.texCoord0 = texCoord[i]; v.tangent.x = v.normal.x = fnan(); } // for }
bool FstReader::zip() { // File Dialog QString filename = QFileDialog::getOpenFileName(NULL, "Select your .fst file ...", QStandardPaths::writableLocation(QStandardPaths::DownloadLocation), "*.fst"); // First we check the FST file QFile fst(filename); if (!fst.open(QFile::ReadOnly | QFile::Text)) { qDebug() << "[ERROR] Could not open FST file : " << fst.fileName(); return false; } // Compress and copy the fst if (!compressFile(QFileInfo(fst).filePath(), _zipDir.path() + "/" + QFileInfo(fst).fileName())) { return false; } _totalSize += QFileInfo(fst).size(); if (!addPart(_zipDir.path() + "/" + QFileInfo(fst).fileName(), QString("fst"))) { return false; } qDebug() << "Reading FST file : " << QFileInfo(fst).filePath(); // Let's read through the FST file QTextStream stream(&fst); QList<QString> line; while (!stream.atEnd()) { line = stream.readLine().split(QRegExp("[ =]"), QString::SkipEmptyParts); if (line.isEmpty()) { continue; } if (_totalSize > MAX_SIZE) { qDebug() << "[ERROR] Model too big, over " << MAX_SIZE << " Bytes."; return false; } // according to what is read, we modify the command if (line.first() == NAME_FIELD) { QHttpPart textPart; textPart.setHeader(QNetworkRequest::ContentDispositionHeader, "form-data;" " name=\"model_name\""); textPart.setBody(line[1].toUtf8()); _dataMultiPart->append(textPart); } else if (line.first() == FILENAME_FIELD) { QFileInfo fbx(QFileInfo(fst).path() + "/" + line[1]); if (!fbx.exists() || !fbx.isFile()) { // Check existence qDebug() << "[ERROR] FBX file " << fbx.absoluteFilePath() << " doesn't exist."; return false; } // Compress and copy if (!compressFile(fbx.filePath(), _zipDir.path() + "/" + line[1])) { return false; } _totalSize += fbx.size(); if (!addPart(_zipDir.path() + "/" + line[1], "fbx")) { return false; } } else if (line.first() == TEXDIR_FIELD) { // Check existence QFileInfo texdir(QFileInfo(fst).path() + "/" + line[1]); if (!texdir.exists() || !texdir.isDir()) { qDebug() << "[ERROR] Texture directory " << texdir.absolutePath() << " doesn't exist."; return false; } if (!addTextures(texdir)) { // Recursive compress and copy return false; } } else if (line.first() == LOD_FIELD) { QFileInfo lod(QFileInfo(fst).path() + "/" + line[1]); if (!lod.exists() || !lod.isFile()) { // Check existence qDebug() << "[ERROR] FBX file " << lod.absoluteFilePath() << " doesn't exist."; return false; } // Compress and copy if (!compressFile(lod.filePath(), _zipDir.path() + "/" + line[1])) { return false; } _totalSize += lod.size(); if (!addPart(_zipDir.path() + "/" + line[1], QString("lod%1").arg(++_lodCount))) { return false; } } } _readyToSend = true; return true; }
// Takes as arguments a ciphertext-part p relative to s' and a key-switching // matrix W = W[s'->s], uses W to switch p relative to (1,s), and adds the // result to *this. // It is assumed that the part p does not include any of the special primes, // and that if *this is not an empty ciphertext then its primeSet is // p.getIndexSet() \union context.specialPrimes void Ctxt::keySwitchPart(const CtxtPart& p, const KeySwitch& W) { FHE_TIMER_START; // no special primes in the input part assert(context.specialPrimes.disjointFrom(p.getIndexSet())); // For parts p that point to 1 or s, only scale and add if (p.skHandle.isOne() || p.skHandle.isBase(W.toKeyID)) { CtxtPart pp = p; pp.addPrimesAndScale(context.specialPrimes); addPart(pp, /*matchPrimeSet=*/true); return; } // some sanity checks assert(W.fromKey == p.skHandle); // the handles must match // Compute the number of digits that we need and the esitmated added noise // from switching this ciphertext part. long pSpace = W.ptxtSpace; long nDigits = 0; xdouble addedNoise = to_xdouble(0.0); double sizeLeft = context.logOfProduct(p.getIndexSet()); for (size_t i=0; i<context.digits.size() && sizeLeft>0.0; i++) { nDigits++; double digitSize = context.logOfProduct(context.digits[i]); if (sizeLeft<digitSize) digitSize=sizeLeft; // need only part of this digit // Added noise due to this digit is phi(m) * sigma^2 * pSpace^2 * |Di|^2/4, // where |Di| is the magnitude of the digit // WARNING: the following line is written just so to prevent overflow addedNoise += to_xdouble(context.zMStar.getPhiM()) * pSpace*pSpace * xexp(2*digitSize) * context.stdev*context.stdev / 4.0; sizeLeft -= digitSize; } // Sanity-check: make sure that the added noise is not more than the special // primes can handle: After dividing the added noise by the product of all // the special primes, it should be smaller than the added noise term due // to modulus switching, i.e., keyWeight * phi(m) * pSpace^2 / 12 long keyWeight = pubKey.getSKeyWeight(p.skHandle.getSecretKeyID()); double phim = context.zMStar.getPhiM(); double logModSwitchNoise = log((double)keyWeight) +2*log((double)pSpace) +log(phim) -log(12.0); double logKeySwitchNoise = log(addedNoise) -2*context.logOfProduct(context.specialPrimes); assert(logKeySwitchNoise < logModSwitchNoise); // Break the ciphertext part into digits, if needed, and scale up these // digits using the special primes. This is the most expensive operation // during homormophic evaluation, so it should be thoroughly optimized. vector<DoubleCRT> polyDigits; p.breakIntoDigits(polyDigits, nDigits); // Finally we multiply the vector of digits by the key-switching matrix // An object to hold the pseudorandom ai's, note that it must be defined // with the maximum number of levels, else the PRG will go out of synch. // FIXME: This is a bug waiting to happen. DoubleCRT ai(context); // Set the first ai using the seed, subsequent ai's (if any) will // use the evolving RNG state (NOTE: this is not thread-safe) RandomState state; SetSeed(W.prgSeed); // Add the columns in, one by one DoubleCRT tmp(context, IndexSet::emptySet()); for (unsigned long i=0; i<polyDigits.size(); i++) { ai.randomize(); tmp = polyDigits[i]; // The operations below all use the IndexSet of tmp // add part*a[i] with a handle pointing to base of W.toKeyID tmp.Mul(ai, /*matchIndexSet=*/false); addPart(tmp, SKHandle(1,1,W.toKeyID), /*matchPrimeSet=*/true); // add part*b[i] with a handle pointing to one polyDigits[i].Mul(W.b[i], /*matchIndexSet=*/false); addPart(polyDigits[i], SKHandle(), /*matchPrimeSet=*/true); } noiseVar += addedNoise; } // restore random state upon destruction of the RandomState, see NumbTh.h
void ArticulatedModel::loadPLY(const Specification& specification) { // Read the data in name = FilePath::base(specification.filename); Part* part = addPart(name); Mesh* mesh = addMesh("mesh", part); mesh->material = Material::create(); ParsePLY parseData; { BinaryInput bi(specification.filename, G3D_LITTLE_ENDIAN); parseData.parse(bi); } // Convert the format part->cpuVertexArray.vertex.resize(parseData.numVertices); part->cpuVertexArray.hasTangent = false; part->cpuVertexArray.hasTexCoord0 = false; part->m_hasTexCoord0 = false; // The PLY format is technically completely flexible, so we have // to search for the location of the X, Y, and Z fields within each // vertex. int axisIndex[3]; const std::string axisName[3] = {"x", "y", "z"}; const int numVertexProperties = parseData.vertexProperty.size(); for (int a = 0; a < 3; ++a) { axisIndex[a] = 0; for (int p = 0; p < numVertexProperties; ++p) { if (parseData.vertexProperty[p].name == axisName[a]) { axisIndex[a] = p; break; } } } for (int v = 0; v < parseData.numVertices; ++v) { CPUVertexArray::Vertex& vertex = part->cpuVertexArray.vertex[v]; // Read the position for (int a = 0; a < 3; ++a) { vertex.position[a] = parseData.vertexData[v * numVertexProperties + axisIndex[a]]; } // Flag the normal as undefined vertex.normal.x = fnan(); } if (parseData.numFaces > 0) { // Read faces for (int f = 0; f < parseData.numFaces; ++f) { const ParsePLY::Face& face = parseData.faceArray[f]; // Read and tessellate into triangles, assuming convex polygons for (int i = 2; i < face.size(); ++i) { mesh->cpuIndexArray.append(face[0], face[1], face[i]); } } } else { // Read tristrips for (int f = 0; f < parseData.numTriStrips; ++f) { const ParsePLY::TriStrip& triStrip = parseData.triStripArray[f]; // Convert into an indexed triangle list and append to the end of the // index array. bool clockwise = false; for (int i = 2; i < triStrip.size(); ++i) { if (triStrip[i] == -1) { // Restart clockwise = false; // Skip not only this element, but the next two i += 2; } else if (clockwise) { // clockwise face debugAssert(triStrip[i - 1] >= 0 && triStrip[i - 2] >= 0 && triStrip[i] >= 0); mesh->cpuIndexArray.append(triStrip[i - 1], triStrip[i - 2], triStrip[i]); clockwise = ! clockwise; } else { // counter-clockwise face debugAssert(triStrip[i - 1] >= 0 && triStrip[i - 2] >= 0 && triStrip[i] >= 0); mesh->cpuIndexArray.append(triStrip[i - 2], triStrip[i - 1], triStrip[i]); clockwise = ! clockwise; } } } } }
// There is no "ParseOFF" because OFF parsing is trivial--it has no subparts or materials, // and is directly an indexed format. void ArticulatedModel::loadOFF(const Specification& specification) { Part* part = addPart(m_name); Geometry* geom = addGeometry("geom"); Mesh* mesh = addMesh("mesh", part, geom); mesh->material = UniversalMaterial::create(); TextInput::Settings s; s.cppBlockComments = false; s.cppLineComments = false; s.otherCommentCharacter = '#'; TextInput ti(specification.filename, s); /////////////////////////////////////////////////////////////// // Parse header std::string header = ti.readSymbol(); bool hasTexCoords = false; bool hasColors = false; bool hasNormals = false; bool hasHomogeneous = false; bool hasHighDimension = false; if (beginsWith(header, "ST")) { hasTexCoords = true; header = header.substr(2); } if (beginsWith(header, "C")) { hasColors = true; header = header.substr(1); } if (beginsWith(header, "N")) { hasNormals = true; header = header.substr(1); } if (beginsWith(header, "4")) { hasHomogeneous = true; header = header.substr(1); } if (beginsWith(header, "n")) { hasHighDimension = true; header = header.substr(1); } geom->cpuVertexArray.hasTexCoord0 = hasTexCoords; geom->cpuVertexArray.hasTangent = false; // Remaining header should be "OFF", but is not required according to the spec Token t = ti.peek(); if ((t.type() == Token::SYMBOL) && (t.string() == "BINARY")) { throw std::string("BINARY OFF files are not supported by this version of G3D::ArticulatedModel"); } int ndim = 3; if (hasHighDimension) { ndim = int(ti.readNumber()); } if (hasHomogeneous) { ++ndim; } if (ndim < 3) { throw std::string("OFF files must contain at least 3 dimensions"); } int nV = iFloor(ti.readNumber()); int nF = iFloor(ti.readNumber()); int nE = iFloor(ti.readNumber()); (void)nE; /////////////////////////////////////////////////// Array<int>& index = mesh->cpuIndexArray; geom->cpuVertexArray.vertex.resize(nV); // Read the per-vertex data for (int v = 0; v < nV; ++v) { CPUVertexArray::Vertex& vertex = geom->cpuVertexArray.vertex[v]; // Position for (int i = 0; i < 3; ++i) { vertex.position[i] = float(ti.readNumber()); } // Ignore higher dimensions for (int i = 3; i < ndim; ++i) { (void)ti.readNumber(); } if (hasNormals) { // Normal (assume always 3 components) for (int i = 0; i < 3; ++i) { vertex.normal[i] = float(ti.readNumber()); } } else { vertex.normal.x = fnan(); } if (hasColors) { // Color (assume always 3 components) for (int i = 0; i < 3; ++i) { ti.readNumber(); } } if (hasTexCoords) { // Texcoords (assume always 2 components) for (int i = 0; i < 2; ++i) { vertex.texCoord0[i] = float(ti.readNumber()); } } // Skip to the end of the line. If the file was corrupt we'll at least get the next vertex right ti.readUntilNewlineAsString(); } // Faces // Convert arbitrary triangle fans to triangles Array<int> poly; for (int i = 0; i < nF; ++i) { poly.fastClear(); int polySize = iFloor(ti.readNumber()); debugAssert(polySize > 2); if (polySize == 3) { // Triangle (common case) for (int j = 0; j < 3; ++j) { index.append(iFloor(ti.readNumber())); } } else { poly.resize(polySize); for (int j = 0; j < polySize; ++j) { poly[j] = iFloor(ti.readNumber()); debugAssertM(poly[j] < nV, "OFF file contained an index greater than the number of vertices."); } // Expand the poly into triangles MeshAlg::toIndexedTriList(poly, PrimitiveType::TRIANGLE_FAN, index); } // Trim to the end of the line, except on the last line of the // file (where it doesn't matter) if (i != nF - 1) { // Ignore per-face colors ti.readUntilNewlineAsString(); } } }
void Snake::addPart(int i){ SnakePart* t = new SnakePart(); t->setValue(i); addPart(t); }
void ArticulatedModel::load3DS(const Specification& specification) { // During loading, we make no attempt to optimize the mesh. We leave that until the // Parts have been created. The vertex arrays are therefore much larger than they // need to be. Stopwatch timer; Parse3DS parseData; { BinaryInput bi(specification.filename, G3D_LITTLE_ENDIAN); timer.after(" open file"); parseData.parse(bi); timer.after(" parse"); } name = FilePath::base(specification.filename); const std::string& path = FilePath::parent(specification.filename); /* if (specification.stripMaterials) { stripMaterials(parseData); } if (specification.mergeMeshesByMaterial) { mergeGroupsAndMeshesByMaterial(parseData); }*/ for (int p = 0; p < parseData.objectArray.size(); ++p) { Parse3DS::Object& object = parseData.objectArray[p]; // Create a unique name for this part std::string name = object.name; int count = 0; while (this->part(name) != NULL) { ++count; name = object.name + format("_#%d", count); } // Create the new part // All 3DS parts are promoted to the root in the current implementation. Part* part = addPart(name); // Process geometry part->cpuVertexArray.vertex.resize(object.vertexArray.size()); part->cframe = object.keyframe.approxCoordinateFrame(); debugAssert(isFinite(part->cframe.rotation.determinant())); debugAssert(part->cframe.rotation.isOrthonormal()); if (! part->cframe.rotation.isRightHanded()) { // TODO: how will this impact other code? I think we can't just force it like this -- Morgan part->cframe.rotation.setColumn(0, -part->cframe.rotation.column(0)); } debugAssert(part->cframe.rotation.isRightHanded()); //debugPrintf("%s %d %d\n", object.name.c_str(), object.hierarchyIndex, object.nodeID); if (part->cpuVertexArray.vertex.size() > 0) { // Convert vertices to object space (there is no surface normal data at this point) Matrix4 netXForm = part->cframe.inverse().toMatrix4(); debugAssertM(netXForm.row(3) == Vector4(0,0,0,1), "3DS file loading requires that the last row of the xform matrix be 0, 0, 0, 1"); if (object.texCoordArray.size() > 0) { part->m_hasTexCoord0 = true; part->cpuVertexArray.hasTexCoord0 = true; } const Matrix3& S = netXForm.upper3x3(); const Vector3& T = netXForm.column(3).xyz(); for (int v = 0; v < part->cpuVertexArray.vertex.size(); ++v) { # ifdef G3D_DEBUG { const Vector3& vec = object.vertexArray[v]; debugAssert(vec.isFinite()); } # endif CPUVertexArray::Vertex& vertex = part->cpuVertexArray.vertex[v]; vertex.position = S * object.vertexArray[v] + T; vertex.tangent = Vector4::nan(); vertex.normal = Vector3::nan(); if (part->m_hasTexCoord0) { vertex.texCoord0 = object.texCoordArray[v]; } # ifdef G3D_DEBUG { const Vector3& vec = vertex.position; debugAssert(vec.isFinite()); } # endif } if (object.faceMatArray.size() == 0) { // Merge all geometry into one mesh since there are no materials Mesh* mesh = addMesh("mesh", part); mesh->cpuIndexArray = object.indexArray; debugAssert(mesh->cpuIndexArray.size() % 3 == 0); } else { for (int m = 0; m < object.faceMatArray.size(); ++m) { const Parse3DS::FaceMat& faceMat = object.faceMatArray[m]; if (faceMat.faceIndexArray.size() > 0) { Material::Ref mat; bool twoSided = false; const std::string& materialName = faceMat.materialName; if (parseData.materialNameToIndex.containsKey(materialName)) { int i = parseData.materialNameToIndex[materialName]; const Parse3DS::Material& material = parseData.materialArray[i]; //if (! materialSubstitution.get(material.texture1.filename, mat)) { const Material::Specification& spec = compute3DSMaterial(&material, path, specification); mat = Material::create(spec); //} twoSided = material.twoSided || mat->hasAlphaMask(); } else { mat = Material::create(); logPrintf("Referenced unknown material '%s'\n", materialName.c_str()); } Mesh* mesh = addMesh(materialName, part); debugAssert(isValidHeapPointer(mesh)); mesh->material = mat; mesh->twoSided = twoSided; // Construct an index array for this part for (int i = 0; i < faceMat.faceIndexArray.size(); ++i) { // 3*f is an index into object.indexArray int f = faceMat.faceIndexArray[i]; debugAssert(f >= 0); for (int v = 0; v < 3; ++v) { mesh->cpuIndexArray.append(object.indexArray[3 * f + v]); } } debugAssert(mesh->cpuIndexArray.size() > 0); debugAssert(mesh->cpuIndexArray.size() % 3 == 0); } } // for m } // if has materials } } timer.after(" convert"); }