bool OME::interpolateResponse(const RealVec &freqs) { if(freqs.empty()) { LOUDNESS_ERROR("OME: Input vector has no elements."); return 0; } else { response_.resize(freqs.size(), 0.0); //load the data into vectors getData(); //the spline spline s; //middle ear if(middleEarType_ != NONE) { s.set_points(middleEarFreqPoints_, middleEardB_); for (uint i=0; i < freqs.size(); i++) { if((freqs[i]<=75) && (middleEarType_ == ANSIS342007_MIDDLE_EAR_HPF)) response_[i] = -14.6; //Post HPF correction else if(freqs[i] >= middleEarFreqPoints_[40]) response_[i] = middleEardB_[40]; else response_[i] = s(freqs[i]); } } else { LOUDNESS_WARNING("OME: No middle ear filter used."); } //outer ear if(outerEarType_ != NONE) { Real lastFreq = outerEarFreqPoints_.back(); Real lastDataPoint = outerEardB_.back(); s.set_points(outerEarFreqPoints_, outerEardB_); for(uint i = 0; i < freqs.size(); i++) { if(freqs[i] >= lastFreq) response_[i] += lastDataPoint; else response_[i] += s(freqs[i]); } } else { LOUDNESS_WARNING("OME: No outer ear filter used."); } return 1; } }
//Window functions: void Window::hann(RealVec &window, bool periodic) { unsigned int N = window.size(); int denom = N-1;//produces zeros on both sides if(periodic)//Harris (1978) Eq 27b denom = N; for(uint i=0; i<window.size(); i++) { window[i] = 0.5 - 0.5 * cos(2.0*PI*i/denom); } }
// return a vector of the top k indices of a void topk(RealVec a, vector<size_t> & indtop){ multimap<real, size_t> m; // mapping from value to its index RealVecItr it; for (it = a.begin(); it != a.end(); ++it) m.insert(make_pair(*it, it - a.begin())); multimap<real, size_t>::reverse_iterator itm; // mapping from value to its index size_t indx=0; for (itm = m.rbegin(); itm != m.rend(); ++itm){ //cout << itm->first <<" , "<< itm->second << endl; indtop[indx]=itm->second; indx++; } }
double ReferenceIntegrateRPMDStepKernel::computeKineticEnergy(ContextImpl& context, const RPMDIntegrator& integrator) { const System& system = context.getSystem(); int numParticles = system.getNumParticles(); vector<RealVec>& velData = extractVelocities(context); double energy = 0.0; for (int i = 0; i < numParticles; ++i) { double mass = system.getParticleMass(i); if (mass > 0) { RealVec v = velData[i]; energy += mass*(v.dot(v)); } } return 0.5*energy; }
void Window::normaliseWindow(RealVec &window, const Normalisation& normalisation) { if (normalisation != NONE) { double x = 0.0; double sum = 0.0, sumSquares = 0.0; double normFactor = 1.0; uint wSize = window.size(); for(uint i=0; i < wSize; i++) { x = window[i]; sum += x; sumSquares += x*x; } switch (normalisation) { case (ENERGY): normFactor = sqrt(wSize/sumSquares); LOUDNESS_DEBUG(name_ << ": Normalising for energy."); break; case (AMPLITUDE): normFactor = wSize/sum; LOUDNESS_DEBUG(name_ << ": Normalising for amplitude."); break; default: normFactor = sqrt(wSize/sumSquares); } LOUDNESS_DEBUG(name_ << ": Normalising window using factor: " << normFactor); for(uint i=0; i < wSize; i++) window[i] *= normFactor; } }
MatrixXd Scalar<Element>::computeSourceTerm(const double time, const PetscInt time_idx) { mSource.setZero(); for (auto &source : Element::Sources()) { RealVec pnt; if (Element::NumDim() == 2) { pnt.resize(2); pnt << source->LocR(), source->LocS(); } if (Element::NumDim() == 3) { pnt.resize(3); pnt << source->LocR(), source->LocS(), source->LocT(); } mSource += (Element::getDeltaFunctionCoefficients(pnt) * source->fire(time, time_idx)); } return Element::applyTestAndIntegrate(mSource); }
void MCMCGRDiagnosticPSRF::outputResults( const std::string& filenameGRScalars, const RealVec& sampledInd, int precData) const { /* sampledInd may contain more values than we monitored */ RealVec::const_iterator it; advance(it, scalars.size()); RealVec sampledIndTmp(sampledInd.begin(), it); std::vector < std::string > colNames; getScalarColNames(colNames); colNames.push_back("W"); colNames.push_back("B"); colNames.push_back("estVarV"); colNames.push_back("rhat"); colNames.push_back("rhatFlag"); colNames.push_back("sampled?"); std::vector < const RealVec* > data; addDataPtrs(data, scalars); data.push_back(&Ws); data.push_back(&Bs); data.push_back(&estVarV); data.push_back(&rhat); data.push_back(&rhatFlag); data.push_back(&sampledIndTmp); outputToFileVertical(data, colNames, filenameGRScalars, precData); }
void TriangleFunction::apply( RealVec& inputs, RealVec& outputs ) { // --- out <- 2.0*( (x-c)/a-floor((x-c)/a+0.5) ) for( int i=0; i<(int)inputs.size(); i++ ) { Real sawtooth = (inputs[i]-phasev)/spanv-floor((inputs[i]-phasev)/spanv+0.5); outputs[i] = amplitudev*( 1.0 - fabs( sawtooth ) ); } }
void PowerSpectrumAndSpatialDetection::hannWindow(RealVec &w, int fftSize) { int windowSize = (int)w.size(); Real norm = sqrt(2.0/(fftSize*windowSize*0.375*2e-5*2e-5)); for(int i=0; i< windowSize; i++) w[i] = norm*(0.5+0.5*cos(2*PI*(i-0.5*(windowSize-1))/windowSize)); }
// fill a vector with probabilities as reals cxsc::real EquiProbProposal::EquiProbProposal::fillNodeProposalProbs( const size_t nLeaf, const size_t nCherry, RealVec& probs) const { cxsc::real retSum = 0.0; probs.reserve(nLeaf + nCherry); if (nLeaf + nCherry > 0) { cxsc::real pNode = 1.0/(nLeaf + nCherry); retSum += pNode*(1.0*nLeaf + 1.0*nCherry); probs.assign(nLeaf+nCherry, pNode); } return retSum; }
// fill a vector with probabilities as reals cxsc::real UniformSSMProposal::fillNodeProposalProbs( const size_t nLeaf, const size_t nCherry, RealVec& probs) const { cxsc::real retSum = 0.0; probs.reserve(nLeaf + nCherry); if (nLeaf > 0) { cxsc::real pLeaf = probSplitMerge/nLeaf; retSum += (1.0*nLeaf * pLeaf); probs.assign(nLeaf, pLeaf); } if (nCherry > 0) { cxsc::real pCherry = probSplitMerge/nCherry; retSum += (1.0*nCherry * pCherry); probs.insert(probs.end(), nCherry, pCherry); } return retSum; }
void ReferenceIntegrateDrudeSCFStepKernel::minimize(ContextImpl& context, double tolerance) { // Record the initial positions and determine a normalization constant for scaling the tolerance. vector<RealVec>& pos = extractPositions(context); int numDrudeParticles = drudeParticles.size(); double norm = 0.0; for (int i = 0; i < numDrudeParticles; i++) { RealVec p = pos[drudeParticles[i]]; minimizerPos[3*i] = p[0]; minimizerPos[3*i+1] = p[1]; minimizerPos[3*i+2] = p[2]; norm += p.dot(p); } norm /= numDrudeParticles; norm = (norm < 1 ? 1 : sqrt(norm)); minimizerParams.epsilon = tolerance/norm; // Perform the minimization. lbfgsfloatval_t fx; MinimizerData data(context, drudeParticles); lbfgs(numDrudeParticles*3, minimizerPos, &fx, evaluate, NULL, &data, &minimizerParams); }
int MCMCGRDiagnosticPSRF::calcDiagnosticsForLoop() { if (keepLogs) { // store the current runningSumAllChains as well runningSumOverall.push_back(runningSumAllChains); } // convergence diagnostics calculations for v's // the Ws: average, over chains, of sample variance of scalar value cxsc::real thisW = sumOfSampleVariancesOverChains/(chains * 1.0); #ifdef MYDEBUG_CALCS_EXTRA cout << "and thisW = " << thisW << endl; #endif Ws.push_back(thisW); // the Bs size_t statesForCalcs = states - statesNotInCalcs; cxsc::real thisB = (1.0/( (chains - 1) * statesForCalcs ) * ( sumOfSquaresOfRunningSums - (runningSumAllChains * runningSumAllChains/(chains * 1.0)) ) ); Bs.push_back(thisB); #ifdef MYDEBUG_CALCS //check thisB is correct, doing it the long way // runningSumPtr has one running sum for each chain RealVec chainAverages; cxsc::real accRunningSums(0.0); for (RealVecItr it = runningSum.begin(); it < runningSum.end(); ++it) { cxsc::real thisChainRunningSum = (*it); cxsc::real thisChainAv = thisChainRunningSum/(statesForCalcs * 1.0); chainAverages.push_back(thisChainAv); accRunningSums+=thisChainRunningSum; } cxsc::real overallAv = accRunningSums/(statesForCalcs * chains * 1.0); cxsc::dotprecision accDiffs(0.0); for (RealVecItr it = chainAverages.begin(); it < chainAverages.end(); ++it) { cxsc::real thisDiff = (*it) - overallAv; // sum up the squares of the differences compared to overall average cxsc::accumulate(accDiffs, thisDiff, thisDiff); } cxsc::real altB = rnd(accDiffs)*( statesForCalcs/(chains - 1.0) ); cout << "\nthisB for v's is\t" << thisB << endl; cout << "altB for v's is\t" << altB << endl; //assert(thisB == altB); #endif // the estimated var(v) cxsc::real thisVarV(0.0); if (statesForCalcs > 1) { thisVarV = statesForCalcs/(statesForCalcs-1.0) * thisW + (1.0/statesForCalcs)*thisB; } #ifndef OLDCALCMETHOD if (statesForCalcs > 1) thisVarV +=thisB/(1.0*chains*statesForCalcs); #endif estVarV.push_back(thisVarV); // the rhats cxsc::real thisRhat(0.0); // allow division by 0 if w = 0 when var does not if (thisW > 0.0 || thisVarV > 0.0) { thisRhat = thisVarV/thisW; } rhat.push_back(thisRhat); #ifdef MYDEBUG_CALCS_EXTRA cout << "thisRhat = " << thisRhat << " - press any key " << endl; getchar(); #endif if ( (thisRhat <= 1.0 + tol) && (thisRhat >= 1.0 - tol) ) { // if we have not been converged before on this scalar value if (!rhatDiagnosticFlag) { #ifdef MYDEBUG cout << "\n" << getScalarsName() << " convergence test satisfied in state " << states << " (states in calcs = " << statesForCalcs << ")"<< endl; #endif // set the flag for this scalar value rhatDiagnosticFlag = 1; } } else { // not converged on this scalar value // if we were okay on this scalar value before if (rhatDiagnosticFlag) { #ifdef MYDEBUG cout << "\n--------- Note: " << getScalarsName() << " convergence test now NOT satisfied in state " << states << endl; #endif rhatDiagnosticFlag = 0; // update the flag } } if (keepLogs) { // store the flag as well, as a real, which is a fudge... rhatFlag.push_back(rhatDiagnosticFlag); } // end of checking diagnostic for v's return rhatDiagnosticFlag; } // end calculations
void SawtoothFunction::apply( RealVec& inputs, RealVec& outputs ) { // --- out <- 2.0*( (x-c)/a-floor((x-c)/a+0.5) ) for( int i=0; i<(int)inputs.size(); i++ ) { outputs[i] = amplitudev*( (inputs[i]-phasev)/spanv-floor((inputs[i]-phasev)/spanv+0.5) ); } }
void PseudoGaussFunction::apply( RealVec& inputs, RealVec& outputs ) { for( int i=0; i<(int)inputs.size(); i++ ) { outputs[i] = 0.5*amplitudev*( sin( 2.0*PI_GRECO*((inputs[i]-phasev)/spanv+0.25) ) + 1.0 ); } }
void SinFunction::apply( RealVec& inputs, RealVec& outputs ) { for( int i=0; i<(int)inputs.size(); i++ ) { outputs[i] = amplitudev*sin(2.0*PI_GRECO*(inputs[i]/spanv)-PI_GRECO*phasev); } }
double CpuCalcNonbondedForceKernel::execute(ContextImpl& context, bool includeForces, bool includeEnergy, bool includeDirect, bool includeReciprocal) { if (!hasInitializedPme) { hasInitializedPme = true; useOptimizedPme = false; if (nonbondedMethod == PME) { // If available, use the optimized PME implementation. try { optimizedPme = getPlatform().createKernel(CalcPmeReciprocalForceKernel::Name(), context); optimizedPme.getAs<CalcPmeReciprocalForceKernel>().initialize(gridSize[0], gridSize[1], gridSize[2], numParticles, ewaldAlpha); useOptimizedPme = true; } catch (OpenMMException& ex) { // The CPU PME plugin isn't available. } } } AlignedArray<float>& posq = data.posq; vector<RealVec>& posData = extractPositions(context); vector<RealVec>& forceData = extractForces(context); RealVec boxSize = extractBoxSize(context); float floatBoxSize[3] = {(float) boxSize[0], (float) boxSize[1], (float) boxSize[2]}; double energy = (includeReciprocal ? ewaldSelfEnergy : 0.0); bool ewald = (nonbondedMethod == Ewald); bool pme = (nonbondedMethod == PME); if (nonbondedMethod != NoCutoff) { // Determine whether we need to recompute the neighbor list. double padding = 0.15*nonbondedCutoff; bool needRecompute = false; double closeCutoff2 = 0.25*padding*padding; double farCutoff2 = 0.5*padding*padding; int maxNumMoved = numParticles/10; vector<int> moved; for (int i = 0; i < numParticles; i++) { RealVec delta = posData[i]-lastPositions[i]; double dist2 = delta.dot(delta); if (dist2 > closeCutoff2) { moved.push_back(i); if (dist2 > farCutoff2 || moved.size() > maxNumMoved) { needRecompute = true; break; } } } if (!needRecompute && moved.size() > 0) { // Some particles have moved further than half the padding distance. Look for pairs // that are missing from the neighbor list. int numMoved = moved.size(); double cutoff2 = nonbondedCutoff*nonbondedCutoff; double paddedCutoff2 = (nonbondedCutoff+padding)*(nonbondedCutoff+padding); for (int i = 1; i < numMoved && !needRecompute; i++) for (int j = 0; j < i; j++) { RealVec delta = posData[moved[i]]-posData[moved[j]]; if (delta.dot(delta) < cutoff2) { // These particles should interact. See if they are in the neighbor list. RealVec oldDelta = lastPositions[moved[i]]-lastPositions[moved[j]]; if (oldDelta.dot(oldDelta) > paddedCutoff2) { needRecompute = true; break; } } } } if (needRecompute) { neighborList->computeNeighborList(numParticles, posq, exclusions, floatBoxSize, data.isPeriodic, nonbondedCutoff+padding, data.threads); lastPositions = posData; } nonbonded->setUseCutoff(nonbondedCutoff, *neighborList, rfDielectric); } if (data.isPeriodic) { double minAllowedSize = 1.999999*nonbondedCutoff; if (boxSize[0] < minAllowedSize || boxSize[1] < minAllowedSize || boxSize[2] < minAllowedSize) throw OpenMMException("The periodic box size has decreased to less than twice the nonbonded cutoff."); nonbonded->setPeriodic(floatBoxSize); } if (ewald) nonbonded->setUseEwald(ewaldAlpha, kmax[0], kmax[1], kmax[2]); if (pme) nonbonded->setUsePME(ewaldAlpha, gridSize); if (useSwitchingFunction) nonbonded->setUseSwitchingFunction(switchingDistance); double nonbondedEnergy = 0; if (includeDirect) nonbonded->calculateDirectIxn(numParticles, &posq[0], posData, particleParams, exclusions, data.threadForce, includeEnergy ? &nonbondedEnergy : NULL, data.threads); if (includeReciprocal) { if (useOptimizedPme) { PmeIO io(&posq[0], &data.threadForce[0][0], numParticles); Vec3 periodicBoxSize(boxSize[0], boxSize[1], boxSize[2]); optimizedPme.getAs<CalcPmeReciprocalForceKernel>().beginComputation(io, periodicBoxSize, includeEnergy); nonbondedEnergy += optimizedPme.getAs<CalcPmeReciprocalForceKernel>().finishComputation(io); } else nonbonded->calculateReciprocalIxn(numParticles, &posq[0], posData, particleParams, exclusions, forceData, includeEnergy ? &nonbondedEnergy : NULL); } energy += nonbondedEnergy; if (includeDirect) { ReferenceBondForce refBondForce; ReferenceLJCoulomb14 nonbonded14; refBondForce.calculateForce(num14, bonded14IndexArray, posData, bonded14ParamArray, forceData, includeEnergy ? &energy : NULL, nonbonded14); if (data.isPeriodic) energy += dispersionCoefficient/(boxSize[0]*boxSize[1]*boxSize[2]); } return energy; }
double ReferenceCalcDrudeForceKernel::execute(ContextImpl& context, bool includeForces, bool includeEnergy) { vector<RealVec>& pos = extractPositions(context); vector<RealVec>& force = extractForces(context); int numParticles = particle.size(); double energy = 0; // Compute the interactions from the harmonic springs. for (int i = 0; i < numParticles; i++) { int p = particle[i]; int p1 = particle1[i]; int p2 = particle2[i]; int p3 = particle3[i]; int p4 = particle4[i]; RealOpenMM a1 = (p2 == -1 ? 1 : aniso12[i]); RealOpenMM a2 = (p3 == -1 || p4 == -1 ? 1 : aniso34[i]); RealOpenMM a3 = 3-a1-a2; RealOpenMM k3 = charge[i]*charge[i]/(polarizability[i]*a3); RealOpenMM k1 = charge[i]*charge[i]/(polarizability[i]*a1) - k3; RealOpenMM k2 = charge[i]*charge[i]/(polarizability[i]*a2) - k3; // Compute the isotropic force. RealVec delta = pos[p]-pos[p1]; RealOpenMM r2 = delta.dot(delta); energy += 0.5*k3*r2; force[p] -= delta*k3; force[p1] += delta*k3; // Compute the first anisotropic force. if (p2 != -1) { RealVec dir = pos[p1]-pos[p2]; RealOpenMM invDist = 1.0/sqrt(dir.dot(dir)); dir *= invDist; RealOpenMM rprime = dir.dot(delta); energy += 0.5*k1*rprime*rprime; RealVec f1 = dir*(k1*rprime); RealVec f2 = (delta-dir*rprime)*(k1*rprime*invDist); force[p] -= f1; force[p1] += f1-f2; force[p2] += f2; } // Compute the second anisotropic force. if (p3 != -1 && p4 != -1) { RealVec dir = pos[p3]-pos[p4]; RealOpenMM invDist = 1.0/sqrt(dir.dot(dir)); dir *= invDist; RealOpenMM rprime = dir.dot(delta); energy += 0.5*k2*rprime*rprime; RealVec f1 = dir*(k2*rprime); RealVec f2 = (delta-dir*rprime)*(k2*rprime*invDist); force[p] -= f1; force[p1] += f1; force[p3] -= f2; force[p4] += f2; } } // Compute the screened interaction between bonded dipoles. int numPairs = pair1.size(); for (int i = 0; i < numPairs; i++) { int dipole1 = pair1[i]; int dipole2 = pair2[i]; int dipole1Particles[] = {particle[dipole1], particle1[dipole1]}; int dipole2Particles[] = {particle[dipole2], particle1[dipole2]}; for (int j = 0; j < 2; j++) for (int k = 0; k < 2; k++) { int p1 = dipole1Particles[j]; int p2 = dipole2Particles[k]; RealOpenMM chargeProduct = charge[dipole1]*charge[dipole2]*(j == k ? 1 : -1); RealVec delta = pos[p1]-pos[p2]; RealOpenMM r = sqrt(delta.dot(delta)); RealOpenMM u = r*pairThole[i]/pow(polarizability[dipole1]*polarizability[dipole2], 1.0/6.0); RealOpenMM screening = 1.0 - (1.0+0.5*u)*exp(-u); energy += ONE_4PI_EPS0*chargeProduct*screening/r; RealVec f = delta*(ONE_4PI_EPS0*chargeProduct*screening/(r*r*r)); force[p1] += f; force[p2] -= f; } } return energy; }
bool CompressSpectrum::initializeInternal(const SignalBank &input) { LOUDNESS_ASSERT(input.getNChannels() > 1, name_ << ": Insufficient number of channels."); /* * This code is sloppy due to along time spent figuring how * to implement the damn thing. * It's currently in two parts, one that searches for the limits of each * summation range in order to satisfy summation criterion. * The other that finds the average Centre frequencies per compressed band. */ int nChannels = input.getNChannels(); int i=0, binIdxPrev = 0; Real dif = hertzToCam(input.getCentreFreq(1)) - hertzToCam(input.getCentreFreq(0)); int groupSize = max(2.0, std::floor(alpha_/(dif))); int groupSizePrev = groupSize; vector<int> groupSizeStore, binIdx; while(i < nChannels-1) { //compute different between adjacent bins on Cam scale dif = hertzToCam(input.getCentreFreq(i+1)) - hertzToCam(input.getCentreFreq(i)); //Check if we can sum bins in group size if(dif < (alpha_/double(groupSize))) { /* * from here we can group bins in groupSize * whilst maintaining alpha spacing */ //Check we have zero idx if((binIdx.size() < 1) && (i>0)) { binIdx.push_back(0); groupSizeStore.push_back(1); } /* * This line ensures the next group starts at the next multiple of the previous * groupSize above the previous starting position. * This is why you sometimes get finer resolution than the criterion */ int store = ceil((i-binIdxPrev)/double(groupSizePrev))*groupSizePrev+binIdxPrev; /* * This line is cheeky; it re-evaluates the groupSize at the new multiple * in attempt to maintain alpha spacing, I'm not 100% but the algorithm * seems to satisfy various criteria */ if((store > 0) && (store < nChannels)) { dif = hertzToCam(input.getCentreFreq(store)) - hertzToCam(input.getCentreFreq(store-1)); groupSize = max((double)groupSize, std::floor(alpha_/dif)); } //fill variables groupSizePrev = groupSize; binIdxPrev = store; //storage binIdx.push_back(store); groupSizeStore.push_back(groupSize); //print "Bin: %d, Binnew: %d, composite bin size: %d" % (i, store, groupSize) //Move i along i = store+groupSize; //increment groupSize for wider group groupSize += 1; } else i += 1; } //add the final frequency if(binIdx[binIdx.size()-1] < nChannels) binIdx.push_back(nChannels); //PART 2 //compressed spectrum RealVec cfs; Real fa = 0; int count = 0; int j = 0; i = 0; while(i < nChannels) { //bounds check out? if(i<binIdx[j+1]) { fa += input.getCentreFreq(i); count++; if (count==groupSizeStore[j]) { //upper limit upperBandIdx_.push_back(i+1); //+1 for < conditional //set the output frequency cfs.push_back(fa/count); count = 0; fa = 0; } i++; } else j++; } //add the final component if it didn't make it if (count>0) { cfs.push_back(fa/count); upperBandIdx_.push_back(i); } //check #if defined(DEBUG) Real freqLimit = 0.0; for(unsigned int i=0; i<cfs.size()-1; i++) { if((hertzToCam(cfs[i+1]) - hertzToCam(cfs[i])) > alpha_) freqLimit = cfs[i]; } LOUDNESS_DEBUG("CompressSpectrum: Criterion satisfied above " << freqLimit << " Hz."); #endif //set output SignalBank output_.initialize(input.getNSources(), input.getNEars(), cfs.size(), 1, input.getFs()); output_.setCentreFreqs(cfs); output_.setFrameRate(input.getFrameRate()); LOUDNESS_DEBUG(name_ << ": Number of bins comprising the compressed spectrum: " << output_.getNChannels()); return 1; }
void ReferenceIntegrateDrudeLangevinStepKernel::execute(ContextImpl& context, const DrudeLangevinIntegrator& integrator) { vector<RealVec>& pos = extractPositions(context); vector<RealVec>& vel = extractVelocities(context); vector<RealVec>& force = extractForces(context); // Update velocities of ordinary particles. const RealOpenMM vscale = exp(-integrator.getStepSize()*integrator.getFriction()); const RealOpenMM fscale = (1-vscale)/integrator.getFriction(); const RealOpenMM kT = BOLTZ*integrator.getTemperature(); const RealOpenMM noisescale = sqrt(2*kT*integrator.getFriction())*sqrt(0.5*(1-vscale*vscale)/integrator.getFriction()); for (int i = 0; i < (int) normalParticles.size(); i++) { int index = normalParticles[i]; RealOpenMM invMass = particleInvMass[index]; if (invMass != 0.0) { RealOpenMM sqrtInvMass = sqrt(invMass); for (int j = 0; j < 3; j++) vel[index][j] = vscale*vel[index][j] + fscale*invMass*force[index][j] + noisescale*sqrtInvMass*SimTKOpenMMUtilities::getNormallyDistributedRandomNumber(); } } // Update velocities of Drude particle pairs. const RealOpenMM vscaleDrude = exp(-integrator.getStepSize()*integrator.getDrudeFriction()); const RealOpenMM fscaleDrude = (1-vscaleDrude)/integrator.getDrudeFriction(); const RealOpenMM kTDrude = BOLTZ*integrator.getDrudeTemperature(); const RealOpenMM noisescaleDrude = sqrt(2*kTDrude*integrator.getDrudeFriction())*sqrt(0.5*(1-vscaleDrude*vscaleDrude)/integrator.getDrudeFriction()); for (int i = 0; i < (int) pairParticles.size(); i++) { int p1 = pairParticles[i].first; int p2 = pairParticles[i].second; RealOpenMM mass1fract = pairInvTotalMass[i]/particleInvMass[p1]; RealOpenMM mass2fract = pairInvTotalMass[i]/particleInvMass[p2]; RealOpenMM sqrtInvTotalMass = sqrt(pairInvTotalMass[i]); RealOpenMM sqrtInvReducedMass = sqrt(pairInvReducedMass[i]); RealVec cmVel = vel[p1]*mass1fract+vel[p2]*mass2fract; RealVec relVel = vel[p2]-vel[p1]; RealVec cmForce = force[p1]+force[p2]; RealVec relForce = force[p2]*mass1fract - force[p1]*mass2fract; for (int j = 0; j < 3; j++) { cmVel[j] = vscale*cmVel[j] + fscale*pairInvTotalMass[i]*cmForce[j] + noisescale*sqrtInvTotalMass*SimTKOpenMMUtilities::getNormallyDistributedRandomNumber(); relVel[j] = vscaleDrude*relVel[j] + fscaleDrude*pairInvReducedMass[i]*relForce[j] + noisescaleDrude*sqrtInvReducedMass*SimTKOpenMMUtilities::getNormallyDistributedRandomNumber(); } vel[p1] = cmVel-relVel*mass2fract; vel[p2] = cmVel+relVel*mass1fract; } // Update the particle positions. int numParticles = particleInvMass.size(); vector<RealVec> xPrime(numParticles); RealOpenMM dt = integrator.getStepSize(); for (int i = 0; i < numParticles; i++) if (particleInvMass[i] != 0.0) xPrime[i] = pos[i]+vel[i]*dt; // Apply constraints. extractConstraints(context).apply(pos, xPrime, particleInvMass, integrator.getConstraintTolerance()); // Record the constrained positions and velocities. RealOpenMM dtInv = 1.0/dt; for (int i = 0; i < numParticles; i++) { if (particleInvMass[i] != 0.0) { vel[i] = (xPrime[i]-pos[i])*dtInv; pos[i] = xPrime[i]; } } // Apply hard wall constraints. const RealOpenMM maxDrudeDistance = integrator.getMaxDrudeDistance(); if (maxDrudeDistance > 0) { const RealOpenMM hardwallscaleDrude = sqrt(kTDrude); for (int i = 0; i < (int) pairParticles.size(); i++) { int p1 = pairParticles[i].first; int p2 = pairParticles[i].second; RealVec delta = pos[p1]-pos[p2]; RealOpenMM r = sqrt(delta.dot(delta)); RealOpenMM rInv = 1/r; if (rInv*maxDrudeDistance < 1.0) { // The constraint has been violated, so make the inter-particle distance "bounce" // off the hard wall. if (rInv*maxDrudeDistance < 0.5) throw OpenMMException("Drude particle moved too far beyond hard wall constraint"); RealVec bondDir = delta*rInv; RealVec vel1 = vel[p1]; RealVec vel2 = vel[p2]; RealOpenMM mass1 = particleMass[p1]; RealOpenMM mass2 = particleMass[p2]; RealOpenMM deltaR = r-maxDrudeDistance; RealOpenMM deltaT = dt; RealOpenMM dotvr1 = vel1.dot(bondDir); RealVec vb1 = bondDir*dotvr1; RealVec vp1 = vel1-vb1; if (mass2 == 0) { // The parent particle is massless, so move only the Drude particle. if (dotvr1 != 0.0) deltaT = deltaR/abs(dotvr1); if (deltaT > dt) deltaT = dt; dotvr1 = -dotvr1*hardwallscaleDrude/(abs(dotvr1)*sqrt(mass1)); RealOpenMM dr = -deltaR + deltaT*dotvr1; pos[p1] += bondDir*dr; vel[p1] = vp1 + bondDir*dotvr1; } else { // Move both particles. RealOpenMM invTotalMass = pairInvTotalMass[i]; RealOpenMM dotvr2 = vel2.dot(bondDir); RealVec vb2 = bondDir*dotvr2; RealVec vp2 = vel2-vb2; RealOpenMM vbCMass = (mass1*dotvr1 + mass2*dotvr2)*invTotalMass; dotvr1 -= vbCMass; dotvr2 -= vbCMass; if (dotvr1 != dotvr2) deltaT = deltaR/abs(dotvr1-dotvr2); if (deltaT > dt) deltaT = dt; RealOpenMM vBond = hardwallscaleDrude/sqrt(mass1); dotvr1 = -dotvr1*vBond*mass2*invTotalMass/abs(dotvr1); dotvr2 = -dotvr2*vBond*mass1*invTotalMass/abs(dotvr2); RealOpenMM dr1 = -deltaR*mass2*invTotalMass + deltaT*dotvr1; RealOpenMM dr2 = deltaR*mass1*invTotalMass + deltaT*dotvr2; dotvr1 += vbCMass; dotvr2 += vbCMass; pos[p1] += bondDir*dr1; pos[p2] += bondDir*dr2; vel[p1] = vp1 + bondDir*dotvr1; vel[p2] = vp2 + bondDir*dotvr2; } } } } ReferenceVirtualSites::computePositions(context.getSystem(), pos); data.time += integrator.getStepSize(); data.stepCount++; }
//----------------------------------------------------------------------- // optimizing over smoothing parameter Temp - // getting best histogram over parameter in [t_lo, t_hi] by max/minmising // a CV based score //----------------------------------------------------------------------- // this is a slower and generic K-fold CV method -- USE Leave1Out for Histograms!!! // CAUTON: held out Lkl is used as an example and should be replaced //with the appropriate scoring rule for maximization over parameter in [t_lo, t_hi] bool selectPriorByLlkCV (RVecData & transformedData, size_t K, double t_lo, double t_hi, int LocalMaxTempIterations, int MaxTempIterations, RealVec & AvgHeldOutLkls, vector<double> & Temperatures, double & t_opt, double & AvgHeldOutLkls_opt, size_t minPoints, int chooseStarts, int keep, bool stopOnMaxPosterior, string postFileName, string checkPostFileNameBase, int precPQ, unsigned long int seedStarts) { RealVec AvgHeldOutEmpiricalDeviations; // first get a root box containing all points AdaptiveHistogram adhA0;//main hist object bool successfulInsertion = false; successfulInsertion = adhA0.insertFromRVec(transformedData);//insert transformed data if (!successfulInsertion) throw std::runtime_error("Failed to insert transformed data"); //transformedData.clear();//keep the transformed data!! size_t n = adhA0.getRootCounter(); size_t d = adhA0.getDimensions (); //cout << "transformed data: n = " << n << endl; getchar(); bool success=false; const size_t N = adhA0.getRootCounter();//size of successfully inserted transformed data const size_t KofN = N/K; //cout << KofN << endl; getchar(); size_t nTrain; adhA0.clearAllHistData();//clear the transformed data to make space during CV RVecData transformedDataT;//container to keep the transformed Training data from first burst RVecData transformedDataV;//container to keep the transformed Validation data from first burst // set up for permutations const gsl_rng_type * T; gsl_rng * r; gsl_permutation * p = gsl_permutation_alloc (N); gsl_permutation * q = gsl_permutation_alloc (N); gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); //printf ("initial permutation:"); gsl_permutation_init (p); //gsl_permutation_fprintf (stdout, p, " %u"); printf ("\n"); getchar(); //////////////////////////////////////////////////////////////////////////////////////////////// int TempIterations=0; std::vector<double> LocalTemperatures; do{// outer temp iterations if (TempIterations!=0){ std::vector<size_t> indtop(AvgHeldOutLkls.size()); topk(AvgHeldOutLkls, indtop); double tBest =Temperatures[indtop[0]]; double t2ndBest =Temperatures[indtop[1]]; double t3rdBest =Temperatures[indtop[2]]; double tWorst = max(abs(tBest-t2ndBest),abs(tBest-t3rdBest)); t_lo = max(0.00001,tBest-tWorst); t_hi = tBest+tWorst; // cout << t_lo << " , " << t_hi << " : "<< tBest << " , " << t2ndBest << " , " << t3rdBest << endl; getchar(); if (t_hi-t_lo<0.001 || abs(AvgHeldOutLkls[indtop[0]]-AvgHeldOutLkls[indtop[1]])<0.01) { cout << "reaching temp values < 0.001 or likl diff < 1.0"<<endl; getchar(); break; } } double t_Delta=(t_hi-t_lo)/double(LocalMaxTempIterations-1); LocalTemperatures.clear(); for(int i=0; i<LocalMaxTempIterations; i++){ Temperatures.push_back(t_lo+(double(i))*t_Delta); LocalTemperatures.push_back(t_lo+(double(i))*t_Delta); } int LocalTempIterations=0; do { //LogTemperaturePrior logPrior(Temperatures[TempIterations]);//t_lo); LogTemperaturePrior logPrior(LocalTemperatures[LocalTempIterations]);//t_lo); seedStarts += TempIterations; real HeldOutLkl = 0.0; real HeldOutEmpiricalDeviation = 0.0; // a container for our histograms at various temperatures AdaptiveHistogram adhA0cv(adhA0.getRootBox()); // make adh for CV with root box from adhA0 for (int cvI=1; cvI<K; cvI++)//K-fold CV loop { gsl_ran_shuffle (r, p->data, N, sizeof(size_t)); successfulInsertion = false; std::vector< subpavings::AdaptiveHistogram* > histsT; adhA0cv.clearAllHistData();//clear the cv histogram before insertion adhA0cv.mergeUp();//Merge the possibly multileaf cv histogram up to just root. transformedDataV.clear(); transformedDataT.clear();//clear Cv containers for(size_t i=0; i<KofN; i++) transformedDataV.push_back(transformedData[gsl_permutation_get(p,i)]); for(size_t i=KofN; i<N; i++) transformedDataT.push_back(transformedData[gsl_permutation_get(p,i)]); successfulInsertion = adhA0cv.insertFromRVec(transformedDataT);//insert transformed data if (!successfulInsertion) throw std::runtime_error("Failed to insert transformed data"); /* some guesses for max points in a node to stop posterior queue */ nTrain = adhA0cv.getRootCounter(); //cout << "nTrain = " << nTrain << endl; getchar(); size_t critSEB = static_cast<size_t>(std::log(static_cast<double>(nTrain)));//can be as low as 1 /* some guesses for maximum leaves we'll let SEB queue go to */ size_t maxLeavesSEB = nTrain/2;// / critSEB; // integer division size_t maxLeavesCarving = maxLeavesSEB / 3; // integer division SPSNodeMeasureVolMassMinus compCarving(nTrain); AdaptiveHistogram::PrioritySplitQueueEvaluator evaluatorCarving(compCarving, maxLeavesCarving); SPSNodeMeasureCount compSEB; AdaptiveHistogram::PrioritySplitQueueEvaluator evaluatorSEB(compSEB, critSEB, maxLeavesSEB); CarverSEB::findStartingPointsBest(adhA0cv, histsT, evaluatorCarving, evaluatorSEB, logPrior, minPoints, chooseStarts, keep, stopOnMaxPosterior, postFileName, checkPostFileNameBase, precPQ, seedStarts); PiecewiseConstantFunction pcfT(*histsT[0]); pcfT.smearZeroValues(0.0000001); //assert (pcfT.getTotalIntegral() == cxsc::real(1.0)); histsT[0]->clearAllHistData();//clear the data from training big burst histsT[0]->insertFromRVec(transformedDataV);//insert validation data HeldOutLkl += pcfT.getLogLikelihood(*histsT[0]); PiecewiseConstantFunction pcfV(*histsT[0]); //if(pcfT.getTotalIntegral() != cxsc::real(1.0)) {cout << "433!!!"; getchar();} //histsT[0]->clearAllHistData();//clear the data from validation big burst //histsT[0]->mergeUp();//merge up to root HeldOutEmpiricalDeviation += pcfT.getL1Distance(pcfV); //cout << HeldOutLkl << '\t' << HeldOutEmpiricalDeviation << endl; getchar(); //to free all the contents of histsT at the end for (size_t i = 0; i < histsT.size(); ++i) { if (NULL != histsT[i]) delete histsT[i]; histsT[i] = NULL; } } //cout << "Avg HeldOutLkl = " << HeldOutLkl/double(K) << '\n' // << "Avg HeldOutEmpiricalDeviation = " << HeldOutEmpiricalDeviation/double(K) << endl; getchar(); AvgHeldOutLkls.push_back(HeldOutLkl/double(K)); AvgHeldOutEmpiricalDeviations.push_back(HeldOutEmpiricalDeviation/double(K)); LocalTempIterations++; } while (LocalTempIterations<LocalMaxTempIterations);// && CVgain>0.1); TempIterations++; } while (TempIterations<MaxTempIterations);// && CVgain>0.1); //////////////////////////////////////////////////////////////////////////////////////////////// cout << Temperatures << endl; cout << AvgHeldOutLkls << endl << AvgHeldOutEmpiricalDeviations << endl; cout << "Temp Iteration Number " << TempIterations << endl; getchar(); cout << "MaxTempIterations = " << MaxTempIterations << endl; getchar(); vector<size_t> indtop(AvgHeldOutLkls.size()); topk(AvgHeldOutLkls, indtop); t_opt = _double(Temperatures[indtop[0]]); AvgHeldOutLkls_opt = _double(AvgHeldOutLkls[indtop[0]]); gsl_permutation_free (p); gsl_rng_free (r); success=true; return success; }
//----------------------------------------------------------------------- // optimizing over smoothing parameter Temp - // getting best histogram over parameter in [t_lo, t_hi] by max/minmising // a CV based score //----------------------------------------------------------------------- bool selectPriorByLv1OutCV (RVecData & Data, double t_lo, double t_hi, int LocalMaxTempIterations, int MaxTempIterations, RealVec & Lv1OutCVScores, vector<double> & Temperatures, double & t_opt, double & Lv1OutCVScores_opt, size_t minPoints, double minVolume, int chooseStarts, int keep, bool stopOnMaxPosterior, string postFileName, string checkPostFileNameBase, string burstsFileBaseName, bool printHist, int precPQ, bool CarvingMaxPosterior, unsigned long int seedStarts) { bool success=false; int TempIterations=0; std::vector<double> LocalTemperatures; do{// outer temp iterations if (TempIterations!=0){ std::vector<size_t> indtop(Lv1OutCVScores.size()); topk(Lv1OutCVScores, indtop); double tBest =Temperatures[indtop[0]]; double t2ndBest =Temperatures[indtop[1]]; double t3rdBest =Temperatures[indtop[2]]; double tWorst = max(abs(tBest-t2ndBest),abs(tBest-t3rdBest)); t_lo = max(t_lo,tBest-tWorst); t_hi = tBest+tWorst; // cout << t_lo << " , " << t_hi << " : "<< tBest << " , " << t2ndBest << " , " << t3rdBest << endl; getchar(); if (t_hi-t_lo<0.001){ //|| abs(Lv1OutCVScores[indtop[0]]-Lv1OutCVScores[indtop[1]])<0.00001) { cout << "reaching temp values < 0.001 or Lv1OutCVScores diff < 0.00001"<<endl; getchar(); break; } } double t_Delta=(t_hi-t_lo)/double(LocalMaxTempIterations-1); LocalTemperatures.clear(); for(int i=0; i<LocalMaxTempIterations; i++){ Temperatures.push_back(t_lo+(double(i))*t_Delta); LocalTemperatures.push_back(t_lo+(double(i))*t_Delta); } int LocalTempIterations=0; do { //LogTemperaturePrior logPrior(Temperatures[TempIterations]);//t_lo); //LogTemperaturePrior logPrior(LocalTemperatures[LocalTempIterations]);//t_lo); double temperatureNow = LocalTemperatures[LocalTempIterations]; seedStarts += TempIterations; // a container for our histograms std::vector< subpavings::AdaptiveHistogram* > hists; // a container for our PCFs of histograms std::vector< subpavings::PiecewiseConstantFunction* > pcfs; bool succPQMCopt = false; //ostringstream strs; strs << temperatureNow; //string burstsFileBaseNameNow = burstsFileBaseName+"_"+strs.str(); succPQMCopt = optPQMCAdapHist (Data, temperatureNow, hists, pcfs, minPoints, minVolume, chooseStarts, keep, stopOnMaxPosterior, postFileName, checkPostFileNameBase, burstsFileBaseName, printHist, precPQ, CarvingMaxPosterior, seedStarts); real lv1outCVScore = pcfs[0]->getLeave1OutCVScore(*hists[0]); Lv1OutCVScores.push_back(-1.0*lv1outCVScore);// -1.0* so we want to max to be min LocalTempIterations++; //to free all the contents of pcfs at the end for (size_t i = 0; i < pcfs.size(); ++i) { if (NULL != pcfs[i]) delete pcfs[i]; pcfs[i] = NULL;} //to free all the contents of hists at the end if(CarvingMaxPosterior) { for (size_t i = 0; i < hists.size(); ++i) { if (NULL != hists[i]) delete hists[i]; hists[i] = NULL;} } } while (LocalTempIterations<LocalMaxTempIterations);// && CVgain>0.1); TempIterations++; } while (TempIterations<MaxTempIterations);// && CVgain>0.1); //////////////////////////////////////////////////////////////////////////////////////////////// cout << "Temperatures:" << endl; cout << Temperatures << endl; cout << "- Lv1OutCVScores: (looking for maximum of -1.0*Lv1OutCVScores)" << endl; cout << Lv1OutCVScores << endl << endl; cout << "Temp Iteration Number " << TempIterations << endl; //getchar(); cout << "MaxTempIterations = " << MaxTempIterations << endl; //getchar(); vector<size_t> indtop(Lv1OutCVScores.size()); topk(Lv1OutCVScores, indtop); t_opt = _double(Temperatures[indtop[0]]); Lv1OutCVScores_opt = _double(Lv1OutCVScores[indtop[0]]); success=true; return success; }