void LEGACY_NETLIST_READER::LoadNetlist() throw ( IO_ERROR, PARSE_ERROR, boost::bad_pointer )
{
    int state            = 0;
    bool is_comment      = false;
    COMPONENT* component = NULL;

    while( m_lineReader->ReadLine() )
    {
        char* line = StrPurge( m_lineReader->Line() );

        if( is_comment ) // Comments in progress
        {
            // Test for end of the current comment
            if( ( line = strchr( line, '}' ) ) == NULL )
                continue;

            is_comment = false;
        }

        if( *line == '{' ) // Start Comment or Pcbnew info section
        {
            is_comment = true;

            if( m_loadFootprintFilters && state == 0
              && (strnicmp( line, "{ Allowed footprints", 20 ) == 0) )
            {
                loadFootprintFilters();
                continue;
            }

            if( ( line = strchr( line, '}' ) ) == NULL )
                continue;
        }

        if( *line == '(' )
            state++;

        if( *line == ')' )
            state--;

        if( state == 2 )
        {
            component = loadComponent( line );
            continue;
        }

        if( state >= 3 ) // Pad descriptions are read here.
        {
            wxASSERT( component != NULL );

            loadNet( line, component );
            state--;
        }
    }

    if( m_footprintReader )
    {
        m_footprintReader->Load( m_netlist );
    }
}
void TokenLoader::loadUnitSideAttributes(UnitToken* token, Json unitTokenParameters) {
  Json meleeParameters = unitTokenParameters.getObject("melee"); //TEST na default
  Json rangedParameters = unitTokenParameters.getObject("ranged");
  loadMelee(token, meleeParameters);
  loadRanged(token, rangedParameters);
  loadShield(token, unitTokenParameters.getStringArray("armor"));
  loadNet(token, unitTokenParameters.getStringArray("net"));
}
Example #3
0
void SportsLayer::Vesion(int vesion){

	vsion=vesion;
	if(vesion==1){
		loadbasic();
	}else if(vesion==2){
		loadNet();
	}

	

}
Example #4
0
CNeuralNet::CNeuralNet(char *filename) {

#ifdef FSTREAM_BINARY
	std::fstream file_op(filename, std::ios::binary | std::ios::in);
#else
	std::fstream file_op(filename,std::ios::in);
#endif

	file_op.precision(std::numeric_limits<real>::digits10);
	file_op >> m_numOfInputs;
	file_op >> m_numOfHiddenNeurons;
	file_op >> m_numOfOutputs;
	file_op >> m_gameCounter;
	file_op >> m_trainingCounter;
	m_hiddenNeurons = new CNeuron[m_numOfHiddenNeurons];
	if (m_hiddenNeurons) {
		for (int i = 0; i < m_numOfHiddenNeurons; ++i)
			m_hiddenNeurons[i].setNumOfWeights(m_numOfInputs);
	} else {
		std::cout << "Uh oh, no memory for " << m_numOfHiddenNeurons
				<< " neurons!" << std::endl;
		return;
	}

	m_outNeurons = new CNeuron[m_numOfOutputs];
	if (m_outNeurons) {
		for (int i = 0; i < m_numOfOutputs; ++i)
			m_outNeurons[i].setNumOfWeights(m_numOfHiddenNeurons);
	} else {
		std::cout << "Uh oh, no memory for " << m_numOfOutputs << " neurons!"
				<< std::endl;
		return;
	}
	inp2 = new real[m_numOfHiddenNeurons];
	out = new real[m_numOfOutputs];
	file_op.close();
	loadNet(filename);
}
Example #5
0
cv::Ptr<cv::ml::StatModel>
train(const string& protoFile, const string& modelFile,
      const vector<cv::Mat>& trainData, const vector<int>& trainLabel,
      const string& type, int kFold) {
  cv::Ptr<cv::dnn::Net> net = loadNet(protoFile, modelFile);
  const cv::dnn::Blob input = cv::dnn::Blob(trainData);
  cout << input.shape() << endl;
  net->setBlob(".data", input);
  net->forward();
  // 全結合層 fc7(InnerProduct)の出力を特徴量として抽出
  cout << "extract feature" << endl;
  const cv::dnn::Blob blob = net->getBlob("fc7");
  net.release();
  const cv::Mat feature = blob.matRefConst();
  cout << feature.size() << endl;
  cout << "train model" << endl;
  // 線形SVM or ロジスティック回帰で学習
  cv::Ptr<cv::ml::TrainData> data =
    cv::ml::TrainData::create(feature, cv::ml::ROW_SAMPLE, cv::Mat(trainLabel, false));
  if(type == "SVM") {
    cv::Ptr<cv::ml::SVM> clf = cv::ml::SVM::create();
    clf->setType(cv::ml::SVM::C_SVC);
    clf->setKernel(cv::ml::SVM::LINEAR);
    clf->trainAuto(data, kFold); // k-fold cross validation
    return clf;
  } else {
    cv::Ptr<cv::ml::LogisticRegression> clf =
      cv::ml::LogisticRegression::create();
    clf->setLearningRate(0.01);
    clf->setIterations(1000);
    clf->setRegularization(cv::ml::LogisticRegression::REG_L2);
    clf->setTrainMethod(cv::ml::LogisticRegression::MINI_BATCH);
    clf->setMiniBatchSize(10);
    clf->train(data);
    return clf;
  }
}