void RNNet::weightedParameterDerivative( BatchInputType const& patterns, BatchInputType const& coefficients, State const& state, RealVector& gradient )const{ //SIZE_CHECK(pattern.size() == coefficients.size()); InternalState const& s = state.toState<InternalState>(); gradient.resize(numberOfParameters()); gradient.clear(); std::size_t numUnits = mpe_structure->numberOfUnits(); std::size_t numNeurons = mpe_structure->numberOfNeurons(); std::size_t warmUpLength=m_warmUpSequence.size(); for(std::size_t b = 0; b != patterns.size(); ++b){ Sequence const& sequence = s.timeActivation[b]; std::size_t sequenceLength = s.timeActivation[b].size(); RealMatrix errorDerivative(sequenceLength,numNeurons); errorDerivative.clear(); //copy errors for (std::size_t t = warmUpLength+1; t != sequenceLength; ++t) for(std::size_t i = 0; i != outputSize(); ++i) errorDerivative(t,i+numNeurons-outputSize())=coefficients[b][t-warmUpLength-1](i); //backprop through time for (std::size_t t = (int)sequence.size()-1; t > 0; t--){ for (std::size_t j = 0; j != numNeurons; ++j){ double derivative = mpe_structure->neuronDerivative(sequence[t](j+mpe_structure->inputs()+1)); errorDerivative(t,j)*=derivative; } noalias(row(errorDerivative,t-1)) += prod( trans(columns(mpe_structure->weights(), inputSize()+1,numUnits)), row(errorDerivative,t) ); } //update gradient for batch element i std::size_t param = 0; for (std::size_t i = 0; i != numNeurons; ++i){ for (std::size_t j = 0; j != numUnits; ++j){ if(!mpe_structure->connection(i,j))continue; for(std::size_t t=1;t != sequence.size(); ++t) gradient(param)+=errorDerivative(t,i) * sequence[t-1](j); ++param; } } //sanity check SIZE_CHECK(param == mpe_structure->parameters()); } }
void CMACMap::weightedParameterDerivative( RealMatrix const& patterns, RealMatrix const& coefficients, State const&,//not needed RealVector &gradient ) const { SIZE_CHECK(patterns.size2() == m_inputSize); SIZE_CHECK(coefficients.size2() == m_outputSize); SIZE_CHECK(coefficients.size1() == patterns.size1()); std::size_t numPatterns = patterns.size1(); gradient.resize(m_parameters.size()); gradient.clear(); for(std::size_t i = 0; i != numPatterns; ++i) { std::vector<std::size_t> indizes = getIndizes(row(patterns,i)); for (std::size_t o=0; o!=m_outputSize; ++o) { for (std::size_t j=0; j != m_tilings; ++j) { gradient(indizes[j] + o*m_parametersPerTiling) += coefficients(i,o); } } } }
void GreensFunction3DRadInf::makeRnTable(RealVector& RnTable, Real r, Real t) const { RnTable.clear(); const Real sigma(getSigma()); const Real D(getD()); const Real kf(getkf()); { // First, estimate the size of p_corr, and if it's small enough, // we don't need to calculate it in the first place. const Real pirr(p_irr(r, t, r0, kf, D, sigma)); const Real ipfree_max(ip_free(M_PI, r, t) * 2 * M_PI * r * r); if(fabs((pirr - ipfree_max) / ipfree_max) < 1e-8) { return; } } const Real pfreemax(p_free_max(r, r0, t, D)); gsl_integration_workspace* workspace(gsl_integration_workspace_alloc(2000)); Real Rn_prev(0.0); const Real RnFactor(1.0 / (4.0 * M_PI * sqrt(r * r0))); const Real integrationTolerance(pfreemax / RnFactor * THETA_TOLERANCE); const Real truncationTolerance(pfreemax * THETA_TOLERANCE * 1e-1); unsigned int n(0); for (;;) { const Real Rn(this->Rn(n, r, t, workspace, integrationTolerance)); RnTable.push_back(Rn); // truncate when converged enough. const Real absRn(fabs(Rn)); if(absRn * RnFactor < truncationTolerance && absRn < Rn_prev) { break; } if(n >= this->MAX_ORDER) { log_.info("GreensFunction3DRadInf: Rn didn't converge"); break; } Rn_prev = fabs(Rn); ++n; } gsl_integration_workspace_free(workspace); }