void defiNonDefault::print(FILE* f) const { int i; fprintf(f, "nondefaultrule %s\n", name()); fprintf(f, "%d layers %d vias %d viarules %d mincuts\n", numLayers(), numVias(), numViaRules(), numMinCuts()); for (i = 0; i < numLayers(); i++) { fprintf(f, " Layer %s\n", layerName(i)); fprintf(f, " WIDTH %g\n", layerWidth(i)); if (hasLayerDiagWidth(i)) fprintf(f, " DIAGWIDTH %g\n", layerDiagWidth(i)); if (hasLayerSpacing(i)) fprintf(f, " SPACING %g\n", layerSpacing(i)); if (hasLayerWireExt(i)) fprintf(f, " WIREEXT %g\n", layerWireExt(i)); } for (i = 0; i < numVias(); i++) { fprintf(f, " VIA %s\n", viaName(i)); } for (i = 0; i < numViaRules(); i++) { fprintf(f, " VIARULE %s\n", viaRuleName(i)); } for (i = 0; i < numMinCuts(); i++) { fprintf(f, " MINCUTS %s %d\n", cutLayerName(i), numCuts(i)); } }
bool LayeredConstruction_Impl::insertLayer(unsigned layerIndex, const Material& material) { OS_ASSERT(material.model() == model()); layerIndex = mf_clearNullLayers(layerIndex); unsigned n = numLayers(); MaterialVector layers = this->layers(); MaterialVector::iterator layersBegin = layers.begin(); MaterialVector::iterator layersEnd = layers.end(); MaterialVector::iterator insertAtIt = layersBegin; while ((static_cast<unsigned>(insertAtIt - layersBegin) < layerIndex) && (insertAtIt != layersEnd)) { ++insertAtIt; } layers.insert(insertAtIt, material); OS_ASSERT(layers.size() == ++n); if ((model().strictnessLevel() < StrictnessLevel::Final) || LayeredConstruction::layersAreValid(layers)) { IdfExtensibleGroup idfGroup = insertExtensibleGroup(layerIndex,StringVector()); OS_ASSERT(!idfGroup.empty()); ModelExtensibleGroup group = idfGroup.cast<ModelExtensibleGroup>(); bool ok = group.setPointer(0,material.handle()); OS_ASSERT(ok); return true; } return false; }
K3b::Msf K3b::Device::DiskInfo::firstLayerSize() const { if( numLayers() > 1 ) return d->firstLayerSize; else return size(); }
void NN_File::addStructure(TiXmlElement * pRoot) { for(int i=0; i<numLayers(); i++) { TiXmlElement * pLayer = new TiXmlElement("layer"); pLayer->SetAttribute("neuron", size(i)); pRoot->LinkEndChild(pLayer); } }
bool LayeredConstruction_Impl::setLayer(unsigned layerIndex, const Material& material) { OS_ASSERT(material.model() == model()); layerIndex = mf_clearNullLayers(layerIndex); if (layerIndex >= numLayers()) { LOG(Info,"Asked to change the Material at layer " << layerIndex << " in " << briefDescription() << ", but there are only " << numLayers() << " layers."); return false; } MaterialVector layers = this->layers(); layers[layerIndex] = material; if ((model().strictnessLevel() < StrictnessLevel::Final) || LayeredConstruction::layersAreValid(layers)) { ModelExtensibleGroup group = getExtensibleGroup(layerIndex).cast<ModelExtensibleGroup>(); OS_ASSERT(!group.empty()); bool ok = group.setPointer(0,material.handle()); OS_ASSERT(ok); return true; } return false; }
void K3b::Device::DiskInfo::debug() const { kDebug() << "DiskInfo:" << endl << "Mediatype: " << K3b::Device::mediaTypeString( mediaType() ) << endl << "Current Profile: " << K3b::Device::mediaTypeString( currentProfile() ) << endl << "Disk state: " << ( diskState() == K3b::Device::STATE_EMPTY ? "empty" : ( diskState() == K3b::Device::STATE_INCOMPLETE ? "incomplete" : ( diskState() == K3b::Device::STATE_COMPLETE ? "complete" : ( diskState() == K3b::Device::STATE_NO_MEDIA ? "no media" : "unknown" ) ) ) ) << endl << "Empty: " << empty() << endl << "Rewritable: " << rewritable() << endl << "Appendable: " << appendable() << endl << "Sessions: " << numSessions() << endl << "Tracks: " << numTracks() << endl << "Layers: " << numLayers() << endl << "Capacity: " << capacity() << " (LBA " << capacity().lba() << ") (" << capacity().mode1Bytes() << " Bytes)" << endl << "Remaining size: " << remainingSize() << " (LBA " << remainingSize().lba() << ") (" << remainingSize().mode1Bytes() << " Bytes)" << endl << "Used Size: " << size() << " (LBA " << size().lba() << ") (" << size().mode1Bytes() << " Bytes)" << endl; if( mediaType() == K3b::Device::MEDIA_DVD_PLUS_RW ) kDebug() << "Bg Format: " << ( bgFormatState() == BG_FORMAT_NONE ? "none" : ( bgFormatState() == BG_FORMAT_INCOMPLETE ? "incomplete" : ( bgFormatState() == BG_FORMAT_IN_PROGRESS ? "in progress" : ( bgFormatState() == BG_FORMAT_COMPLETE ? "complete" : "unknown" ) ) ) ) << endl; }
void NN_File::addWeights(TiXmlElement * pRoot) { TiXmlElement * pWeights = new TiXmlElement("weights"); for(int l=1; l<numLayers(); l++) { TiXmlElement * pLayer = new TiXmlElement("layer"); pLayer->SetAttribute("num", l); int neurons = size(l); int prevLayerNeurons = size(l-1); //Neuron Weights for(int i=0; i<neurons; i++) //current layer neuron { TiXmlElement * pNeuron = new TiXmlElement("neuron"); for(int j=0; j<prevLayerNeurons; j++) //j is each neuron of the prev layer { TiXmlElement * pW = new TiXmlElement("w"); float weight = w(l, j, i); TiXmlText * pText = new TiXmlText( toString(weight) ); pW->LinkEndChild(pText); pNeuron->LinkEndChild(pW); } //Bias TiXmlElement * pBias = new TiXmlElement("bias"); float bias = b(l, i); TiXmlText * pText = new TiXmlText( toString(bias) ); pBias->LinkEndChild(pText); pNeuron->LinkEndChild(pBias); //Link Neuron to the layers pLayer->LinkEndChild(pNeuron); } pWeights->LinkEndChild(pLayer); } pRoot->LinkEndChild(pWeights); }
bool NeuralNet::loadNet( const char * filename ) { // check for existing layers if( numLayers() != 0 ) { return false; } std::ifstream inp; assert(inp); inp.open(filename, std::ios::in); // load network parameters unsigned int loadLayers = 0; double learnRate = 0.0; double mom = 0.0; double decay = 0.0; unsigned int outType = 0; inp >> loadLayers; inp >> learnRate; inp >> mom; inp >> decay; inp >> outType; setParams(learnRate,mom,decay,outType); // construct network for(unsigned int m=0;m<loadLayers;++m) { unsigned int type = 0; inp >> type; unsigned int inputs = 0; inp >> inputs; unsigned int nodes = 0; inp >> nodes; std::vector<std::vector<double> > weights(inputs+1,std::vector<double>(nodes,0.0)); for(unsigned int i=0;i<inputs+1;++i) { for(unsigned int j=0;j<nodes;++j) { inp >> weights[i][j]; } } NeuralLayer * pHiddenLayer = nullptr; switch(type) { case(LINEAR): pHiddenLayer = new NeuralLinearLayer(inputs,nodes); break; case(TANH): pHiddenLayer = new NeuralTanhLayer(inputs,nodes); break; case(SIGMOID): pHiddenLayer = new NeuralSigmoidLayer(inputs,nodes); break; case(SOFTMAX): pHiddenLayer = new NeuralSoftmaxLayer(inputs,nodes); break; } pHiddenLayer->loadWeights(weights); addLayer( pHiddenLayer ); } inp.close(); return true; }
// save network bool NeuralNet::saveNet( const char * filename ) { unsigned int nLayers = numLayers(); // don't save empty network if( nLayers == 0 ) { return false; } std::ofstream outp; assert(outp); std::string temp; if( !filename ) { time_t now = time(0); struct tm* localnow = localtime(&now); std::ostringstream fname; fname << "netsave_" << localnow->tm_mon+1 << "-" << localnow->tm_mday << "-" << localnow->tm_year+1900 << "_"; fname << localnow->tm_hour << "-" << localnow->tm_min << "-" << localnow->tm_sec; fname << ".data"; temp = fname.str().c_str(); } else temp = filename; outp.open(temp, std::ios::out); // save info about network outp << nLayers << "\t"; outp << m_learningRate << "\t"; outp << m_momentum << "\t"; outp << m_weightDecay << "\t"; outp << m_outType << "\t"; outp << std::endl << std::endl; // save individual layers for(unsigned int m=0;m<nLayers;++m) { outp << m_layers[m]->getType() << "\t"; outp << m_layers[m]->numInputs() << "\t"; outp << m_layers[m]->numNodes() << std::endl; auto weights = m_layers[m]->retrieveWeights(); for(auto i=weights.begin();i<weights.end();++i) { for(auto j=i->begin();j<(i->end());++j) { outp << (*j) << "\t\t"; } outp << std::endl; } outp << std::endl; } outp.close(); return true; }