//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// evaluate //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// RealVector MultiLayerPerceptron::evaluate( const RealVector& x ) { /// Assert validity of input. assert( !m_useBiasNodes && x.size() == m_input.size() || m_useBiasNodes && x.size() == m_input.size() - 1 ); /// Set input. for ( size_t i = 0; i < x.size(); ++i ) { m_input[ i ] = x[ i ]; } /// Simple forward propagation when there are no hidden layers. if ( m_y.size() == 0 ) { propagateForward( m_input, m_output, m_weights[ 0 ] ); } else { /// Propage from input node to first hidden layer. propagateForward( m_input, m_y[ 0 ], m_weights[ 0 ] ); applyActivationFunc( m_y[ 0 ], m_x[ 0 ] ); /// Propagate to last hidden layer. for ( size_t iHiddenLayer = 0; iHiddenLayer < m_y.size() - 1; ++iHiddenLayer ) { propagateForward( m_x[ iHiddenLayer ], m_y[ iHiddenLayer + 1 ], m_weights[ iHiddenLayer + 1 ] ); applyActivationFunc( m_y[ iHiddenLayer + 1 ], m_x[ iHiddenLayer + 1 ] ); } /// Propagate to output nodes. propagateForward( m_x.back(), m_output, m_weights.back() ); } return m_output; }
double RecurrentNN::nodeDifference(MNIndividual *ind) { RecurrentNN *other = dynamic_cast<RecurrentNN *>(ind); if (!other) return INFINITY; long length = std::max(other->nodes.size(), nodes.size()); double d = 0; for (long i=0; i<length; i++) { double d11 = 0; double d21 = 0; double d12 = 0; double d22 = 0; if (i < nodes.size()) { d11 = applyActivationFunc(nodes[i], 0) - applyActivationFunc(nodes[i], 1); d21 = applyActivationFunc(nodes[i], -1) - applyActivationFunc(nodes[i], 0); } if (i < other->nodes.size()) { d12 = applyActivationFunc(other->nodes[i], 0) - applyActivationFunc(other->nodes[i], 1); d22 = applyActivationFunc(other->nodes[i], -1) - applyActivationFunc(other->nodes[i], 0); } d += fabs((d11 - d21)/2 - (d12 - d22)/2); } return d; }
std::vector<double> RecurrentNN::nodeOutputsForInputs(std::vector<double> inputs, std::vector<std::vector<double> > &memory) { std::vector<double> newOutputs(nodes.size()); std::deque<long> toVisitNodes; for (int i=0; i<nodes.size(); i++) toVisitNodes.push_back(i); std::set<long> resolvedNodes; int runs = 0; while (toVisitNodes.size() > 0) { long n = toVisitNodes.front(); bool isInput = (inputNodes.find(n) != inputNodes.end()); std::vector<DelayEdge> inputEdges = inputsToNode(n); double inputSum = nodes[n].bias; bool tryAgain = false; for (long j=0; j<inputEdges.size(); j++) { DelayEdge &e = inputEdges[j]; long from = e.nodeFrom; long delay = e.delay; std::vector<double> history = memory[from]; bool isSelfInspection = false; if (from == n) { // we're looking at ourselves assert(delay > 0); isSelfInspection = true; } bool longEnoughHistory = (history.size() > delay) ; // if (longEnoughHistory) { // if (std::count(resolvedNodes.begin(), resolvedNodes.end(), from) > 0 || delay > 0) { // std::vector<double>::iterator valuePos = history.end() - (delay +1); // assert(!isUnreasonable(*valuePos)); // inputSum += *valuePos; // } else { // tryAgain = true; // break; // } // } if (longEnoughHistory) { // we've already resolved the output of this node // the values can be taken from memory std::vector<double>::iterator valuePos = history.end() - (delay+1); inputSum += *valuePos; } else { if (longEnoughHistory) { // we should have an answer, but we don't - reschedule for later tryAgain = true; break; } } } if (!tryAgain) { // all input nodes resolved resolvedNodes.insert(n); if (isInput) { inputSum += (inputs[n]); // the first [inputNodes.size()] nodes are the inputs } double output = applyActivationFunc(nodes[n], inputSum); assert(!isUnreasonable(output)); newOutputs[n] = output; std::vector<double> &thisMemory = memory[n]; thisMemory.push_back(output); } else { // save this for later toVisitNodes.push_back(n); } toVisitNodes.pop_front(); runs++; } return newOutputs; }