// adds just a value c on the input void eval(RealMatrix const& patterns, RealMatrix& output, State& state)const { output.resize(patterns.size1(),m_dim); for(std::size_t p = 0; p != patterns.size1();++p){ for (size_t i=0;i!=m_dim;++i) output(p,i)=patterns(p,i)+m_c; } }
double evalDerivative( RealMatrix const&, RealMatrix const& prediction, RealMatrix& gradient ) const { gradient.resize(prediction.size1(),prediction.size2()); gradient.clear(); return 0; }
void CMACMap::eval(RealMatrix const& patterns,RealMatrix &output) const { SIZE_CHECK(patterns.size2() == m_inputSize); std::size_t numPatterns = patterns.size1(); output.resize(numPatterns,m_outputSize); output.clear(); for(std::size_t i = 0; i != numPatterns; ++i) { std::vector<std::size_t> indizes = getIndizes(row(patterns,i)); for (std::size_t o=0; o!=m_outputSize; ++o) { for (std::size_t j = 0; j != m_tilings; ++j) { output(i,o) += m_parameters(indizes[j] + o*m_parametersPerTiling); } } } }
void SVMLightDataModel::save (std::string filePath) { std::cout << "Saving model to " << filePath << std::endl; // create new datastream std::ofstream ofs; ofs.open (filePath.c_str()); if (!ofs) throw (SHARKSVMEXCEPTION ("[export_SVMLight] file can not be opened for writing")); // create header first saveHeader (ofs); RealMatrix preparedAlphas = container -> m_alphas; // prepare for binary case switch (container -> m_svmType) { case SVMTypes::CSVC: { // nothing to prepare for CSVC break; } case SVMTypes::Pegasos: { // FIXME: multiclass // throw away the alphas we do not need preparedAlphas.resize (container -> m_alphas.size1(), 1); // FIXME: better copy... for (size_t j = 0; j < container -> m_alphas.size1(); ++j) { preparedAlphas (j, 0) = container -> m_alphas (j, 1); } break; } default: { throw (SHARKSVMEXCEPTION ("[SVMLightDataModel::save] Unsupported SVM type.")); } } // then save data in SVMLight format container -> saveSparseLabelAndData (ofs); ofs.close(); }
void OnlineRNNet::eval(RealMatrix const& pattern, RealMatrix& output){ SIZE_CHECK(pattern.size1()==1);//we can only process a single input at a time. SIZE_CHECK(pattern.size2() == inputSize()); std::size_t numUnits = mpe_structure->numberOfUnits(); if(m_lastActivation.size() != numUnits){ m_activation.resize(numUnits); m_lastActivation.resize(numUnits); zero(m_activation); zero(m_lastActivation); } swap(m_lastActivation,m_activation); //we want to treat input and bias neurons exactly as hidden or output neurons, so we copy the current //pattern at the beginning of the the last activation pattern aand set the bias neuron to 1 ////so m_lastActivation has the format (input|1|lastNeuronActivation) noalias(subrange(m_lastActivation,0,mpe_structure->inputs())) = row(pattern,0); m_lastActivation(mpe_structure->bias())=1; m_activation(mpe_structure->bias())=1; //activation of the hidden neurons is now just a matrix vector multiplication fast_prod( mpe_structure->weights(), m_lastActivation, subrange(m_activation,inputSize()+1,numUnits) ); //now apply the sigmoid function for (std::size_t i = inputSize()+1;i != numUnits;i++){ m_activation(i) = mpe_structure->neuron(m_activation(i)); } //copy the result to the output output.resize(1,outputSize()); noalias(row(output,0)) = subrange(m_activation,numUnits-outputSize(),numUnits); }
void CSpace::allocate_coordinates(RealMatrix& coordinates) const { coordinates.resize(nb_states(),element_type().dimension()); }
void LibSVMDataModel::save (std::string filePath) { BOOST_LOG_TRIVIAL (debug) << "Saving model to " << filePath; // create new datastream std::ofstream ofs; ofs.open (filePath.c_str()); if (!ofs) throw (SHARKSVMEXCEPTION ("File can not be opened for writing!")); // create header first saveHeader (ofs); RealMatrix preparedAlphas = container -> m_alphas; // prepare for binary case switch (container -> m_svmType) { case SVMTypes::CSVC: { // nothing to prepare for CSVC break; } // FIXME: can we apply binary data to multiclass, if so, what happens to the data model? case SVMTypes::MCSVMCS: case SVMTypes::MCSVMLLW: case SVMTypes::MCSVMMMR: case SVMTypes::MCSVMOVA: case SVMTypes::MCSVMADM: case SVMTypes::MCSVMATM: case SVMTypes::MCSVMATS: case SVMTypes::MCSVMWW:{ // nothing to prepare break; } case SVMTypes::SVRG: case SVMTypes::BSGD: case SVMTypes::Pegasos: { BOOST_LOG_TRIVIAL (debug) << "Preparing alpha coefficients from Pegasos/BSGD to LibSVM..."; // we need to throw away the alphas we do not need, // for the binary case if (container -> m_alphas.size2() == 2) { BOOST_LOG_TRIVIAL (debug) << "Found two alphas, so one of them is not needed, removing it."; preparedAlphas.resize (container -> m_alphas.size1(), 1); for (size_t j = 0; j < container -> m_alphas.size1(); ++j) { preparedAlphas (j, 0) = container -> m_alphas (j, 1); } } break; } default: { throw (SHARKSVMEXCEPTION ("Unsupported SVM type!")); } } // copy over our prepared alphas container -> m_alphas = preparedAlphas; // sanity check if (container -> m_alphas.size2() == 2) throw SHARKSVMEXCEPTION ("Removing extra alpha coefficients in binary case failed!"); // then save data in libsvm format container -> saveSparseData (ofs); ofs.close(); }