void CState::randuVelocity(double mean, double vmax, long *seed) { mat velocities(nAtoms, 3); rowvec momentum(nAtoms); // generating random, uniform velocities for (int i = 0 ; i < nAtoms; i++) { for (int j = 0; j < 3; j++) { velocities(i, j) = (ran2(seed)-0.5)*vmax + mean; } } // removing any linear momentum from the system momentum = sum(velocities)/nAtoms; for(int i = 0; i < nAtoms; i++) { velocities.row(i) -= momentum; } // sending the velocities to the atoms for (int i = 0; i < nAtoms; i++) { atoms[i]->setVelocity(velocities.row(i).t()); } }
void killMomentum(void) { Vec3 P = momentum(); int n = numParticles(); Vec3 Pcorr = scale(P, -1/n); forEveryParticleD(&addMomentum, (void*)&Pcorr); }
Real SodProblem::energyTotal(Real t, const Point &p) { RealVectorValue momentum(momentumX(t, p), momentumY(t, p), momentumZ(t, p)); Real rho = density(t, p); Real pre = pressure(t, p); return pre/(_gamma-1) +0.5*momentum.size_sq()/rho; }
bool physicsCheck(void) { Vec3 P = momentum(); double PPP = length(P) / numParticles(); //printf("%le\n",PPP); if (PPP > 1e-20) { fprintf(stderr, "\nMOMENTUM CONSERVATION VIOLATED! " "Momentum per particle: |P| = %le\n", PPP); return false; } return true; }
StatusCode HepMCHistograms::execute() { auto evt = m_hepmchandle.get(); info() << "Processing event with " << evt->particles_size() << " particles" << endmsg; for (auto it = evt->particles_begin(); it != evt->particles_end(); ++it) { auto particle = *it; m_eta->Fill(particle->momentum().eta()); m_pt->Fill(particle->momentum().perp()); } for(auto it = evt->vertices_begin(); it != evt->vertices_end(); ++it) { auto vertex = *it; m_d0->Fill(vertex->position().perp()); m_z0->Fill(vertex->position().z()); } return StatusCode::SUCCESS; }
void Tr21Stokes :: computeInternalForcesVector(FloatArray &answer, TimeStep *tStep) { IntegrationRule *iRule = integrationRulesArray [ 0 ]; FluidDynamicMaterial *mat = ( FluidDynamicMaterial * ) this->domain->giveMaterial(this->material); FloatArray a_pressure, a_velocity, devStress, epsp, BTs, Nh, dNv(12); double r_vol, pressure; FloatMatrix dN, B(3, 12); B.zero(); this->computeVectorOf(EID_MomentumBalance, VM_Total, tStep, a_velocity); this->computeVectorOf(EID_ConservationEquation, VM_Total, tStep, a_pressure); FloatArray momentum(12), conservation(3); momentum.zero(); conservation.zero(); GaussPoint *gp; for ( int i = 0; i < iRule->getNumberOfIntegrationPoints(); i++ ) { gp = iRule->getIntegrationPoint(i); FloatArray *lcoords = gp->giveCoordinates(); double detJ = fabs(this->interpolation_quad.giveTransformationJacobian(* lcoords, FEIElementGeometryWrapper(this))); this->interpolation_quad.evaldNdx(dN, * lcoords, FEIElementGeometryWrapper(this)); this->interpolation_lin.evalN(Nh, * lcoords, FEIElementGeometryWrapper(this)); double dA = detJ * gp->giveWeight(); for ( int j = 0, k = 0; j < 6; j++, k += 2 ) { dNv(k) = B(0, k) = B(2, k + 1) = dN(j, 0); dNv(k + 1) = B(1, k + 1) = B(2, k) = dN(j, 1); } pressure = Nh.dotProduct(a_pressure); epsp.beProductOf(B, a_velocity); mat->computeDeviatoricStressVector(devStress, r_vol, gp, epsp, pressure, tStep); BTs.beTProductOf(B, devStress); momentum.add(dA, BTs); momentum.add(-pressure*dA, dNv); conservation.add(r_vol*dA, Nh); } FloatArray temp(15); temp.zero(); temp.addSubVector(momentum, 1); temp.addSubVector(conservation, 13); answer.resize(15); answer.zero(); answer.assemble(temp, this->ordering); }
// unsure about this // Will it give me hard collision by default? // I doubt it... if not just bring circbuf back bool AABB::collide(AABB& other) { if (overlaps(other)) { float Msum_1 = 1.f / (M + other.M); // denominator float Cr = 0.5f * (E + other.E); // coef of rest vec2 P1 = momentum(), P2 = other.momentum(); vec2 A = P1 + P2, B = Cr * (P1 - P2); vec2 v1 = (A - B) * Msum_1; vec2 v2 = (A + B) * Msum_1; V = v1; other.V = v2; return true; } return false; }
void GravityOrb::init() { name("Earth"); position(Vector3()); // position(Vector3()); velocity(Vector3()); // velocity(Vector3()); mass(60.0f); // mass(6e24); Vector3 temp = velocity(); momentum(temp.scalarMult(mass())); // momentum(Vector3()); width(12.8f); // width(12.8e6/100); kfriction(0.0f); sFriction(0.0f); r(0.0f); g(0.0f); b(1.0f); }
string Particle::toString() const { stringstream out; out << "Particle information" << "\n"; out << setw(30) << "energy" << setw(30) << "px" << setw(30) << "py" << setw(30) << "pz" << "\n"; out << setw(30) << energy() << setw(30) << px() << setw(30) << py() << setw(30) << pz() << "\n"; out << setw(30) << "phi" << setw(30) << "eta" << setw(30) << "theta" << setw(30) << " " << "\n"; out << setw(30) << phi() << setw(30) << eta() << setw(30) << theta() << setw(30) << " " << "\n"; out << setw(30) << "momentum" << setw(30) << "E_T" << setw(30) << "p_T" << setw(30) << " " << "\n"; out << setw(30) << momentum() << setw(30) << et() << setw(30) << pt() << setw(30) << " " << "\n"; out << setw(30) << "m_dyn" << setw(30) << "m_fix" << setw(30) << "charge" << setw(30) << " " << "\n"; out << setw(30) << massFromEnergyAndMomentum() << setw(30) << mass() << setw(30) << charge() << setw(30) << " " << "\n"; out << setw(30) << "d0 =" << setw(30) << "d0_bs" << setw(30) << " " << setw(30) << " " << "\n"; out << setw(30) << d0() << setw(30) << d0_wrtBeamSpot() << setw(30) << " " << setw(30) << " " << "\n"; return out.str(); }
void GravityOrb::init(std::string n, Vector3 pos, Vector3 vel, long double m, long double w, long double kF, long double sF, float red, float green, float blue) { name(n); position(pos); velocity(vel); mass(m); Vector3 temp = velocity(); momentum(temp.scalarMult(mass())); width(w); kfriction(kF); sFriction(sF); r(red); g(green); b(blue); }
void unittest::LineOnEllipseIntersection(){ Point3D centre(13,5,0); double a = 3; double b = 2; ellipse e(centre, a, b); Point3D position(1,3.005,0); Vector3D momentum(1,0,0); Photon testphoton; testphoton.SetPosition(position); testphoton.SetMomentum(momentum); e.LineOnEllipseIntersection(testphoton); }
void NS(void) { //surface_tension_geo(); //surface_tension(); // if(iteration%ioutput==0) // { // print_2D_array("GeoSurfx.dat",iteration,"GeoSurfx",surfx[0],l-1,m-1,dx); // print_2D_array("GeoSurfy.dat",iteration,"GeoSurfy",surfy[0],l-1,m-1,dx); // } /* surface_tension(); if(iteration%ioutput==0) { print_2D_array("FreeSurfx.dat",iteration,"FreeSurfx",surfx[0],l-1,m-1,dx); print_2D_array("FreeSurfy.dat",iteration,"FreeSurfy",surfy[0],l-1,m-1,dx); } */ momentum(); divergence(); // print_2D_array("surfx",iteration,"surfx",surfx[0],l-1,m-1,dx); // print_2D_array("surfy",iteration,"surfy",surfy[0],l-1,m-1,dx); // print_2D_array("sigma",iteration,"sigma",sigma[0],l+1,m+1,dx); }
void unittest::Ellipse_Points3D(){ Point3D centre(13,5,0); double a = 3; double b = 2; ellipse e(centre, a, b); Point3D position(2,2,0); Vector3D momentum(11,3,-1); Photon testphoton; testphoton.SetPosition(position); testphoton.SetMomentum(momentum); e.points3D(testphoton); Test reader; reader.PrintPoint(e.GetStorage().GetPoint()); reader.PrintPoint(e.GetStorage().GetPoint2()); }
void extr(jvec &ext_EP,jvec &ext_ED,jvec &ext_Q2,jvec &ext_fP,jvec &ext_fM,jvec &ext_f0,jvec &ext_fT,int il_sea,int il,int ic) { ////////////////////////////////////////// R0 ////////////////////////////////////// jvec R0_corr; jack R0(njack); //load standing jvec ll0_st=load_3pts("V0",il,il,0,RE,ODD,1); jvec lc0_st=load_3pts("V0",ic,il,0,RE,ODD,1); jvec cc0_st=load_3pts("V0",ic,ic,0,RE,ODD,1); //build R0 R0_corr=lc0_st*lc0_st.simmetric()/(cc0_st*ll0_st); //fit and plot R0=constant_fit(R0_corr,TH-tmax,tmax,combine("plots/R0_il_%d_ic_%d.xmg",il,ic).c_str()); //////////////////////////////////////////// R2 //////////////////////////////////// jvec R2_corr[nth]; jvec RT_corr[nth]; jvec R2(nth,njack); jvec RT(nth,njack); ofstream out_R2(combine("plots/R2_il_%d_ic_%d.xmg",il,ic).c_str()); ofstream out_RT(combine("plots/RT_il_%d_ic_%d.xmg",il,ic).c_str()); jvec lcK_th[nth],lc0_th[nth],lcT_th[nth]; for(int ith=0;ith<nth;ith++) { //load corrs lcK_th[ith]=load_3pts("VK",ic,il,ith,IM,EVN,-1)/(6*th_P[ith]); lc0_th[ith]=load_3pts("V0",ic,il,ith,RE,ODD,1); lcT_th[ith]=load_3pts("VTK",ic,il,ith,IM,ODD,1)/(6*th_P[ith]); //build ratios R2_corr[ith]=lcK_th[ith]/lc0_th[ith]; RT_corr[ith]=lcT_th[ith]/lcK_th[ith]; //fit R2[ith]=constant_fit(R2_corr[ith],tmin,tmax); RT[ith]=constant_fit(RT_corr[ith],tmin,tmax); //plot out_R2<<write_constant_fit_plot(R2_corr[ith],R2[ith],tmin,tmax); out_RT<<write_constant_fit_plot(RT_corr[ith],RT[ith],tmin,tmax); } ////////////////////////////////////////// R1 ////////////////////////////////////// jvec R1_corr[nth]; jvec R1(nth,njack); ofstream out_P(combine("plots/out_P_il_%d_ic_%d.xmg",il,ic).c_str()); out_P<<"@type xydy"<<endl; ofstream out_D(combine("plots/out_D_il_%d_ic_%d.xmg",il,ic).c_str()); out_D<<"@type xydy"<<endl; ofstream out_R1(combine("plots/out_R1_il_%d_ic_%d.xmg",il,ic).c_str()); out_R1<<"@type xydy"<<endl; //load Pi and D jvec P_corr[nth],D_corr[nth]; jvec ED(nth,njack),EP(nth,njack); for(int ith=0;ith<nth;ith++) { //load moving pion P_corr[ith]=load_2pts("2pts_P5P5.dat",il_sea,il,ith); out_P<<"@type xydy"<<endl; EP[ith]=constant_fit(effective_mass(P_corr[ith]),tmin_P,TH,combine("plots/P_eff_mass_il_%d_ic_%d_ith_%d.xmg", il,ic,ith).c_str()); out_P<<write_constant_fit_plot(effective_mass(P_corr[ith]),EP[ith],tmin_P,TH); out_P<<"&"<<endl; //recompute EP and ED from standing one if(ith) { ED[ith]=latt_en(ED[0],th_P[ith]); EP[ith]=latt_en(EP[0],th_P[ith]); } //load moving D D_corr[ith]=load_2pts("2pts_P5P5.dat",il,ic,ith); out_D<<"@type xydy"<<endl; ED[ith]=constant_fit(effective_mass(D_corr[ith]),tmin_D,TH,combine("plots/D_eff_mass_il_%d_ic_%d_ith_%d.xmg", il,ic,ith).c_str()); out_D<<write_constant_fit_plot(effective_mass(D_corr[ith]),ED[ith],tmin_D,TH); out_D<<"&"<<endl; //build the ratio R1_corr[ith]=lc0_th[ith]/lc0_th[0]; for(int t=0;t<TH;t++) { int E_fit_reco_flag=1; jack Dt(njack),Pt(njack); if(E_fit_reco_flag==0) { Dt=D_corr[0][t]/D_corr[ith][t]; Pt=P_corr[0][TH-t]/P_corr[ith][TH-t]; } else { jack ED_th=latt_en(ED[0],th_P[ith]),EP_th=latt_en(EP[0],th_P[ith]); Dt=exp(-(ED[0]-ED_th)*t)*ED_th/ED[0]; Pt=exp(-(EP[0]-EP_th)*(TH-t))*EP_th/EP[0]; } R1_corr[ith][t]*=Dt*Pt; } //fit R1[ith]=constant_fit(R1_corr[ith],tmin,tmax); //plot out_R1<<write_constant_fit_plot(R1_corr[ith],R1[ith],tmin,tmax); } //////////////////////////////////////// solve the ratios ////////////////////////////// //compute f0[q2max] jvec f0_r(nth,njack),fP_r(nth,njack),fT_r(nth,njack); f0_r[0]=sqrt(R0*4*ED[0]*EP[0])/(ED[0]+EP[0]); cout<<"f0_r[q2max]: "<<f0_r[0]<<endl; //compute QK and Q2 double mom[nth]; jvec PK(nth,njack),QK(nth,njack); jvec P0(nth,njack),Q0(nth,njack),Q2(nth,njack),P2(nth,njack); jvec P0_r(nth,njack),Q0_r(nth,njack),Q2_r(nth,njack),P2_r(nth,njack); for(int ith=0;ith<nth;ith++) { P0[ith]=ED[ith]+EP[ith]; //P=initial+final Q0[ith]=ED[ith]-EP[ith]; //Q=initial-final P0_r[ith]=latt_en(ED[0],th_P[ith])+latt_en(EP[0],th_P[ith]); Q0_r[ith]=latt_en(ED[0],th_P[ith])-latt_en(EP[0],th_P[ith]); //we are describing the process D->Pi mom[ith]=momentum(th_P[ith]); double P_D=-mom[ith]; double P_Pi=mom[ith]; PK[ith]=P_D+P_Pi; QK[ith]=P_D-P_Pi; P2[ith]=sqr(P0[ith])-3*sqr(PK[ith]); Q2[ith]=sqr(Q0[ith])-3*sqr(QK[ith]); //reconstruct Q2 P2_r[ith]=sqr(P0_r[ith])-3*sqr(PK[ith]); Q2_r[ith]=sqr(Q0_r[ith])-3*sqr(QK[ith]); } //checking Pion dispertion relation ofstream out_disp_P(combine("plots/Pion_disp_rel_il_%d_ic_%d.xmg",il,ic).c_str()); out_disp_P<<"@type xydy"<<endl; for(int ith=0;ith<nth;ith++) out_disp_P<<3*sqr(mom[ith])<<" "<<sqr(EP[ith])<<endl; out_disp_P<<"&"<<endl; for(int ith=0;ith<nth;ith++) out_disp_P<<3*sqr(mom[ith])<<" "<<sqr(cont_en(EP[0],th_P[ith]))<<endl; out_disp_P<<"&"<<endl; for(int ith=0;ith<nth;ith++) out_disp_P<<3*sqr(mom[ith])<<" "<<sqr(latt_en(EP[0],th_P[ith]))<<endl; out_disp_P<<"&"<<endl; //checking D dispertion relation ofstream out_disp_D(combine("plots/D_disp_rel_il_%d_ic_%d.xmg",il,ic).c_str()); out_disp_D<<"@type xydy"<<endl; for(int ith=0;ith<nth;ith++) out_disp_D<<3*sqr(mom[ith])<<" "<<sqr(ED[ith])<<endl; out_disp_D<<"&"<<endl; for(int ith=0;ith<nth;ith++) out_disp_D<<3*sqr(mom[ith])<<" "<<sqr(cont_en(ED[0],th_P[ith]))<<endl; out_disp_D<<"&"<<endl; for(int ith=0;ith<nth;ith++) out_disp_D<<3*sqr(mom[ith])<<" "<<sqr(latt_en(ED[0],th_P[ith]))<<endl; out_disp_D<<"&"<<endl; //compute xi jvec xi(nth,njack); for(int ith=1;ith<nth;ith++) { int E_fit_reco_flag=0; //it makes no diff jack P0_th=E_fit_reco_flag?P0_r[ith]:P0[ith]; jack Q0_th=E_fit_reco_flag?Q0_r[ith]:Q0[ith]; xi[ith]=R2[ith]*P0_th; xi[ith]/=QK[ith]-R2[ith]*Q0_th; } //compute fP ofstream out_fP_r(combine("plots/fP_r_il_%d_ic_%d.xmg",il,ic).c_str()); out_fP_r<<"@type xydy"<<endl; for(int ith=1;ith<nth;ith++) { int E_fit_reco_flag=1; //it makes no diff jack P0_th=E_fit_reco_flag?P0_r[ith]:P0[ith]; jack Q0_th=E_fit_reco_flag?Q0_r[ith]:Q0[ith]; jack c=P0_th/(ED[0]+EP[0])*(1+xi[ith]*Q0_th/P0_th); fP_r[ith]=R1[ith]/c*f0_r[0]; out_fP_r<<Q2[ith].med()<<" "<<fP_r[ith]<<endl; } //compute f0 and fT ofstream out_f0_r(combine("plots/f0_r_il_%d_ic_%d.xmg",il,ic).c_str()); ofstream out_fT_r(combine("plots/fT_r_il_%d_ic_%d.xmg",il,ic).c_str());; out_f0_r<<"@type xydy"<<endl; out_f0_r<<Q2[0].med()<<" "<<f0_r[0]<<endl; out_fT_r<<"@type xydy"<<endl; for(int ith=1;ith<nth;ith++) { //it seems better here to solve using reconstructed energies int E_fit_reco_flag=0; jack EP_th=E_fit_reco_flag?latt_en(EP[0],th_P[ith]):EP[ith]; jack ED_th=E_fit_reco_flag?latt_en(ED[0],th_P[ith]):ED[ith]; jack Q2_th=E_fit_reco_flag?Q2_r[ith]:Q2[ith]; jack fM_r=xi[ith]*fP_r[ith]; //checked f0_r[ith]=fP_r[ith]+fM_r[ith]*Q2_th/(sqr(ED_th)-sqr(EP_th)); out_f0_r<<Q2[ith].med()<<" "<<f0_r[ith]<<endl; fT_r[ith]=fM_r[ith]*RT[ith]*Zt_med[ibeta]/Zv_med[ibeta]*(EP[0]+ED[0])/(ED[ith]+EP[ith]); //ADD out_fT_r<<Q2[ith].med()<<" "<<fT_r[ith]<<endl; } //////////////////////////////////////// analytic method ///////////////////////////// jvec fP_a(nth,njack),fM_a(nth,njack),f0_a(nth,njack),fT_a(nth,njack); jvec fP_n(nth,njack),fM_n(nth,njack),f0_n(nth,njack),fT_n(nth,njack); //determine M and Z for pion and D jvec ZP(nth,njack),ZD(nth,njack); for(int ith=0;ith<nth;ith++) { jack E,Z2; two_pts_fit(E,Z2,P_corr[ith],tmin_P,TH); ZP[ith]=sqrt(Z2); two_pts_fit(E,Z2,D_corr[ith],tmin_D,TH); ZD[ith]=sqrt(Z2); } //compute V jvec VK_a(nth,njack),V0_a(nth,njack),TK_a(nth,njack); jvec VK_n(nth,njack),V0_n(nth,njack),TK_n(nth,njack); for(int ith=0;ith<nth;ith++) { ofstream out_V0(combine("plots/V0_il_%d_ic_%d_ith_%d_analytic_numeric.xmg",il,ic,ith).c_str()); out_V0<<"@type xydy"<<endl; ofstream out_VK(combine("plots/VK_il_%d_ic_%d_ith_%d_analytic_numeric.xmg",il,ic,ith).c_str()); out_VK<<"@type xydy"<<endl; ofstream out_TK(combine("plots/TK_il_%d_ic_%d_ith_%d_analytic_numeric.xmg",il,ic,ith).c_str()); out_TK<<"@type xydy"<<endl; ofstream out_dt(combine("plots/dt_il_%d_ic_%d_ith_%d.xmg",il,ic,ith).c_str()); out_dt<<"@type xydy"<<endl; //computing time dependance jvec dt_a(TH+1,njack),dt_n(TH+1,njack); { //it seems better here to use fitted energies int E_fit_reco_flag=1; jack EP_th=E_fit_reco_flag?latt_en(EP[0],th_P[ith]):EP[ith]; jack ED_th=E_fit_reco_flag?latt_en(ED[0],th_P[ith]):ED[ith]; for(int t=0;t<=TH;t++) { dt_a[t]=exp(-(ED_th*t+EP_th*(TH-t)))*ZP[0]*ZD[0]/(4*EP_th*ED_th); dt_n[t]=D_corr[ith][t]*P_corr[ith][TH-t]/(ZD[0]*ZP[0]); } } //remove time dependance using analytic or numeric expression jvec VK_corr_a=Zv_med[ibeta]*lcK_th[ith]/dt_a,V0_corr_a=Zv_med[ibeta]*lc0_th[ith]/dt_a; jvec VK_corr_n=Zv_med[ibeta]*lcK_th[ith]/dt_n,V0_corr_n=Zv_med[ibeta]*lc0_th[ith]/dt_n; jvec TK_corr_n=Zt_med[ibeta]*lcT_th[ith]/dt_n,TK_corr_a=Zt_med[ibeta]*lcT_th[ith]/dt_a; //fit V0 V0_a[ith]=constant_fit(V0_corr_a,tmin,tmax); V0_n[ith]=constant_fit(V0_corr_n,tmin,tmax); out_V0<<write_constant_fit_plot(V0_corr_a,V0_a[ith],tmin,tmax)<<"&"<<endl; out_V0<<write_constant_fit_plot(V0_corr_n,V0_n[ith],tmin,tmax)<<"&"<<endl; //fit VK VK_a[ith]=constant_fit(VK_corr_a,tmin,tmax); VK_n[ith]=constant_fit(VK_corr_n,tmin,tmax); out_VK<<write_constant_fit_plot(VK_corr_a,VK_a[ith],tmin,tmax)<<"&"<<endl; out_VK<<write_constant_fit_plot(VK_corr_n,VK_n[ith],tmin,tmax)<<"&"<<endl; //fit TK TK_a[ith]=constant_fit(TK_corr_a,tmin,tmax); TK_n[ith]=constant_fit(TK_corr_n,tmin,tmax); out_TK<<write_constant_fit_plot(TK_corr_a,TK_a[ith],tmin,tmax)<<"&"<<endl; out_TK<<write_constant_fit_plot(TK_corr_n,TK_n[ith],tmin,tmax)<<"&"<<endl; } //compute f0(q2max) f0_a[0]=V0_a[0]/(ED[0]+EP[0]); f0_n[0]=V0_n[0]/(ED[0]+EP[0]); cout<<"f0_a["<<Q2[0].med()<<"]: "<<f0_a[0]<<endl; cout<<"f0_n["<<Q2[0].med()<<"]: "<<f0_n[0]<<endl; //solve for fP and f0 for(int ith=1;ith<nth;ith++) { jack delta=P0[ith]*QK[ith]-Q0[ith]*PK[ith]; //solve using analytic fit jack deltaP_a=V0_a[ith]*QK[ith]-Q0[ith]*VK_a[ith]; jack deltaM_a=P0[ith]*VK_a[ith]-V0_a[ith]*PK[ith]; fP_a[ith]=deltaP_a/delta; fM_a[ith]=deltaM_a/delta; //solve using numeric fit jack deltaP_n=V0_n[ith]*QK[ith]-Q0[ith]*VK_n[ith]; jack deltaM_n=P0[ith]*VK_n[ith]-V0_n[ith]*PK[ith]; fP_n[ith]=deltaP_n/delta; fM_n[ith]=deltaM_n/delta; //compute f0 f0_a[ith]=fP_a[ith]+fM_a[ith]*Q2[ith]/(ED[0]*ED[0]-EP[0]*EP[0]); f0_n[ith]=fP_n[ith]+fM_n[ith]*Q2[ith]/(ED[0]*ED[0]-EP[0]*EP[0]); //solve fT fT_a[ith]=-TK_a[ith]*(EP[0]+ED[0])/(2*(ED[ith]+EP[ith]))/mom[ith]; fT_n[ith]=-TK_n[ith]*(EP[0]+ED[0])/(2*(ED[ith]+EP[ith]))/mom[ith]; } //write analytic and umeric plot of fP and f0 ofstream out_fP_a("plots/fP_a.xmg"),out_fP_n("plots/fP_n.xmg"); ofstream out_fM_a("plots/fM_a.xmg"),out_fM_n("plots/fM_n.xmg"); ofstream out_f0_a("plots/f0_a.xmg"),out_f0_n("plots/f0_n.xmg"); ofstream out_fT_a("plots/fT_a.xmg"),out_fT_n("plots/fT_n.xmg"); out_fP_a<<"@type xydy"<<endl; out_fP_n<<"@type xydy"<<endl; out_f0_a<<"@type xydy"<<endl; out_f0_n<<"@type xydy"<<endl; out_fM_a<<"@type xydy"<<endl; out_fM_n<<"@type xydy"<<endl; out_fT_a<<"@type xydy"<<endl; out_fT_n<<"@type xydy"<<endl; out_f0_a<<Q2[0].med()<<" "<<f0_a[0]<<endl; out_f0_n<<Q2[0].med()<<" "<<f0_n[0]<<endl; for(int ith=1;ith<nth;ith++) { out_fP_a<<Q2[ith].med()<<" "<<fP_a[ith]<<endl; out_fP_n<<Q2[ith].med()<<" "<<fP_n[ith]<<endl; out_fM_a<<Q2[ith].med()<<" "<<fM_a[ith]<<endl; out_fM_n<<Q2[ith].med()<<" "<<fM_n[ith]<<endl; out_f0_a<<Q2[ith].med()<<" "<<f0_a[ith]<<endl; out_f0_n<<Q2[ith].med()<<" "<<f0_n[ith]<<endl; out_fT_a<<Q2[ith].med()<<" "<<fT_a[ith]<<endl; out_fT_n<<Q2[ith].med()<<" "<<fT_n[ith]<<endl; } ext_EP=EP; ext_ED=ED; ext_Q2=Q2; ext_fP=fP_a; ext_fM=fM_a; ext_f0=f0_a; ext_fT=fT_a; }
jack cont_en(jack M,double th) {return sqrt(M*M+3*sqr(momentum(th)));}
jack latt_en(jack M,double th) {return 2*asinh(sqrt(3*sqr(sin(momentum(th)/2))+sqr(sinh(M/2))));}
void BbarBrick::formInertiaTerms( int tangFlag ) { static const int ndm = 3 ; static const int ndf = 3 ; static const int numberNodes = 8 ; static const int numberGauss = 8 ; static const int nShape = 4 ; static const int massIndex = nShape - 1 ; double xsj ; // determinant jacaobian matrix double dvol[numberGauss] ; //volume element static double shp[nShape][numberNodes] ; //shape functions at a gauss point static double Shape[nShape][numberNodes][numberGauss] ; //all the shape functions static double gaussPoint[ndm] ; static Vector momentum(ndf) ; int i, j, k, p, q ; int jj, kk ; double temp, rho, massJK ; //zero mass mass.Zero( ) ; //compute basis vectors and local nodal coordinates computeBasis( ) ; //gauss loop to compute and save shape functions int count = 0 ; for ( i = 0; i < 2; i++ ) { for ( j = 0; j < 2; j++ ) { for ( k = 0; k < 2; k++ ) { gaussPoint[0] = sg[i] ; gaussPoint[1] = sg[j] ; gaussPoint[2] = sg[k] ; //get shape functions shp3d( gaussPoint, xsj, shp, xl ) ; //save shape functions for ( p = 0; p < nShape; p++ ) { for ( q = 0; q < numberNodes; q++ ) Shape[p][q][count] = shp[p][q] ; } // end for p //volume element to also be saved dvol[count] = wg[count] * xsj ; count++ ; } //end for k } //end for j } // end for i //gauss loop for ( i = 0; i < numberGauss; i++ ) { //extract shape functions from saved array for ( p = 0; p < nShape; p++ ) { for ( q = 0; q < numberNodes; q++ ) shp[p][q] = Shape[p][q][i] ; } // end for p //node loop to compute acceleration momentum.Zero( ) ; for ( j = 0; j < numberNodes; j++ ) //momentum += shp[massIndex][j] * ( nodePointers[j]->getTrialAccel() ) ; momentum.addVector( 1.0, nodePointers[j]->getTrialAccel(), shp[massIndex][j] ) ; //density rho = materialPointers[i]->getRho() ; //multiply acceleration by density to form momentum momentum *= rho ; //residual and tangent calculations node loops jj = 0 ; for ( j = 0; j < numberNodes; j++ ) { temp = shp[massIndex][j] * dvol[i] ; for ( p = 0; p < ndf; p++ ) resid( jj+p ) += ( temp * momentum(p) ) ; if ( tangFlag == 1 ) { //multiply by density temp *= rho ; //node-node mass kk = 0 ; for ( k = 0; k < numberNodes; k++ ) { massJK = temp * shp[massIndex][k] ; for ( p = 0; p < ndf; p++ ) mass( jj+p, kk+p ) += massJK ; kk += ndf ; } // end for k loop } // end if tang_flag jj += ndf ; } // end for j loop } //end for i gauss loop }
phys::momentum phys::momentum::calcMomentum(velocity v, float m) { vector3D out = v * m; return momentum(out.x,out.y,out.z); }
void doc_manager::populate(void) { add_class_descriptor(ml::k_base); add_class_descriptors(ml::k_base, { ml::k_classification, ml::k_regression }); add_class_descriptors(ml::k_regression, { ml::k_ann, ml::k_linreg, ml::k_logreg }); add_class_descriptors(ml::k_classification, { ml::k_svm, ml::k_adaboost, ml::k_anbc, ml::k_dtw, ml::k_hmmc, ml::k_softmax, ml::k_randforest, ml::k_mindist, ml::k_knn, ml::k_gmm, ml::k_dtree }); add_class_descriptors(ml::k_feature_extraction, { ml::k_peak, ml::k_minmax, ml::k_zerox }); descriptors[ml::k_ann].desc("Artificial Neural Network").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/MLP"); descriptors[ml::k_linreg].desc("Linear Regression").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/LinearRegression"); descriptors[ml::k_logreg].desc("Logistic Regression").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/LogisticRegression"); descriptors[ml::k_peak].desc("Peak Detection").url("").num_outlets(1); descriptors[ml::k_minmax].desc("Minimum / Maximum Detection").url("").num_outlets(1); descriptors[ml::k_zerox].desc("Zero Crossings Detection").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/ZeroCrossingCounter"); descriptors[ml::k_svm].desc("Support Vector Machine").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/SVM"); descriptors[ml::k_adaboost].desc("Adaptive Boosting").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/AdaBoost"); descriptors[ml::k_anbc].desc("Adaptive Naive Bayes Classifier").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/ANBC"); descriptors[ml::k_dtw].desc("Dynamic Time Warping").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/DTW"); descriptors[ml::k_hmmc].desc("Continuous Hidden Markov Model").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/HMM"); descriptors[ml::k_softmax].desc("Softmax Classifier").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/Softmax"); descriptors[ml::k_randforest].desc("Random Forests").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/RandomForests"); descriptors[ml::k_mindist].desc("Minimum Distance").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/MinDist"); descriptors[ml::k_knn].desc("K Nearest Neighbour").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/KNN"); descriptors[ml::k_gmm].desc("Gaussian Mixture Model").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/GMMClassifier"); descriptors[ml::k_dtree].desc("Decision Trees").url("http://www.nickgillian.com/wiki/pmwiki.php/GRT/DecisionTree"); for (auto& desc : {&descriptors[ml::k_hmmc], &descriptors[ml::k_dtw]}) { desc->notes( "add and map messages for time series should be delimited with record messages, e.g. record 1, add 1 40 50, add 1 41 50, record 0" ); } // base descriptor message_descriptor add( "add", "list comprising a class id followed by n features, <class> <feature 1> <feature 2> etc", "1 0.2 0.7 0.3 0.1" ); message_descriptor train( "train", "train the model based on vectors added with 'add'" ); message_descriptor map( "map", "generate the output value(s) for the input feature vector", "0.2 0.7 0.3 0.1" ); message_descriptor write( "write", "write training data and / or model, first argument gives path to write file", "/path/to/my_ml-lib_data" ); message_descriptor read( "read", "read training data and / or model, first argument gives path to the read file", "/path/to/my_ml-lib_data" ); message_descriptor clear( "clear", "clear the stored training data and model" ); message_descriptor help( "help", "post usage statement to the console" ); valued_message_descriptor<int> scaling( "scaling", "sets whether values are automatically scaled", {0, 1}, 1 ); valued_message_descriptor<int> record( "record", "start or stop time series recording for a single example of a given class", {0, 1}, 0 ); ranged_message_descriptor<float> training_rate( "training_rate", "set the learning rate, used to update the weights at each step of learning algorithms such as stochastic gradient descent.", 0.01, 1.0, 0.1 ); ranged_message_descriptor<float> min_change( "min_change", "set the minimum change that must be achieved between two training epochs for the training to continue", 0.0, 1.0, 1.0e-5 ); ranged_message_descriptor<int> max_iterations( "max_iterations", "set the maximum number of training iterations", 0, 1000, 100 ); record.insert_before = "add"; descriptors[ml::k_base].add_message_descriptor(add, write, read, train, clear, map, help, scaling, training_rate, min_change, max_iterations); // generic classification descriptor valued_message_descriptor<bool> null_rejection( "null_rejection", "toggle NULL rejection off or on, when 'on' classification results below the NULL-rejection threshold will be discarded", {false, true}, true ); ranged_message_descriptor<float> null_rejection_coeff( "null_rejection_coeff", "set a multiplier for the NULL-rejection threshold ", 0.1, 1.0, 0.9 ); valued_message_descriptor<int> probs( "probs", "determines whether probabilities are sent from the right outlet", {0, 1}, 0 ); descriptors[ml::k_classification].add_message_descriptor(null_rejection_coeff, probs, null_rejection); // generic feature extraction descriptor // descriptors[ml::k_feature_extraction].add_message_descriptor(null_rejection_coeff, null_rejection); // generic regression descriptor // descriptors[ml::k_regression].add_message_descriptor(training_rate, min_change, max_iterations); // Object-specific descriptors //-- Regressifiers //---- ann valued_message_descriptor<ml::data_type> mode("mode", "set the mode of the ANN, " + std::to_string(ml::LABELLED_CLASSIFICATION) + " for classification, " + std::to_string(ml::LABELLED_REGRESSION) + " for regression", {ml::LABELLED_CLASSIFICATION, ml::LABELLED_REGRESSION, ml::LABELLED_TIME_SERIES_CLASSIFICATION}, ml::defaults::data_type ); message_descriptor add_ann( "add", "class id followed by n features, <class> <feature 1> <feature 2> etc when in classification mode or N output values followed by M input values when in regression mode (N = num_outputs)", "1 0.2 0.7 0.3 0.1" ); ranged_message_descriptor<int> num_outputs( "num_outputs", "set the number of neurons in the output layer", 1, 1000, ml::defaults::num_output_dimensions ); ranged_message_descriptor<int> num_hidden( "num_hidden", "set the number of neurons in the hidden layer", 1, 1000, ml::defaults::num_hidden_neurons ); ranged_message_descriptor<int> min_epochs( "min_epochs", "setting the minimum number of training iterations", 1, 1000, 10 ); // TODO: check if the "epochs" are still needed or if we can use "iterations" as inherited from ml_regression ranged_message_descriptor<int> max_epochs( "max_epochs", "setting the maximum number of training iterations", 1, 10000, 100 ); ranged_message_descriptor<float> momentum( "momentum", "set the momentum", 0.0, 1.0, 0.5 ); ranged_message_descriptor<float> gamma( "gamma", "set the gamma", 0.0, 10.0, 2.0 ); // TODO: have optional value_labels for value labels valued_message_descriptor<int> input_activation_function( "input_activation_function", "set the activation function for the input layer, 0:LINEAR, 1:SIGMOID, 2:BIPOLAR_SIGMOID", {0, 1, 2}, 0 ); valued_message_descriptor<int> hidden_activation_function( "hidden_activation_function", "set the activation function for the hidden layer, 0:LINEAR, 1:SIGMOID, 2:BIPOLAR_SIGMOID", {0, 1, 2}, 0 ); valued_message_descriptor<int> output_activation_function( "output_activation_function", "set the activation function for the output layer, 0:LINEAR, 1:SIGMOID, 2:BIPOLAR_SIGMOID", {0, 1, 2}, 0 ); ranged_message_descriptor<int> rand_training_iterations( "rand_training_iterations", "set the number of random training iterations", 0, 1000, 10 ); valued_message_descriptor<bool> use_validation_set( "use_validation_set", "set whether to use a validation training set", {false, true}, true ); ranged_message_descriptor<int> validation_set_size( "validation_set_size", "set the size of the validation set", 1, 100, 20 ); valued_message_descriptor<bool> randomize_training_order( "randomize_training_order", "sets whether to randomize the training order", {false, true}, false ); descriptors[ml::k_ann].add_message_descriptor(add_ann, probs, mode, null_rejection, null_rejection_coeff, num_outputs, num_hidden, min_epochs, max_epochs, momentum, gamma, input_activation_function, hidden_activation_function, output_activation_function, rand_training_iterations, use_validation_set, validation_set_size, randomize_training_order); //-- Classifiers //---- ml.svm ranged_message_descriptor<int> type( "type", "set SVM type," " 0:C-SVC (multi-class)," " 1:nu-SVC (multi-class)," " 2:one-class SVM," // " 3:epsilon-SVR (regression)," // " 4:nu-SVR (regression)" , 0, 2, 0 // " 0 -- C-SVC (multi-class classification)\n" // " 1 -- nu-SVC (multi-class classification)\n" // " 2 -- one-class SVM\n" // " 3 -- epsilon-SVR (regression)\n" // " 4 -- nu-SVR (regression)\n" ); ranged_message_descriptor<int> kernel( "kernel", "set type of kernel function, " "0:linear, " // (u'*v)," "1:polynomial, " // (gamma*u'*v + coef0)^degree," "2:radial basis function, " //: exp(-gamma*|u-v|^2)," "3:sigmoid, " // tanh(gamma*u'*v + coef0)," "4:precomputed kernel (kernel values in training_set_file)", 0, 4, 0 // " 0 -- linear: u'*v\n" // " 1 -- polynomial: (gamma*u'*v + coef0)^degree\n" // " 2 -- radial basis function: exp(-gamma*|u-v|^2)\n" // " 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n" // " 4 -- precomputed kernel (kernel values in training_set_file)\n" ); ranged_message_descriptor<float> degree( "degree", "set degree in kernel function", 0, 20, 3 ); ranged_message_descriptor<float> svm_gamma( "gamma", "set gamma in kernel function", 0.0, 1.0, 0.5 ); ranged_message_descriptor<float> coef0( "coef0", "coef0 in kernel function", INFINITY * -1.f, INFINITY, 0.0 ); ranged_message_descriptor<float> cost( "cost", "set the parameter C of C-SVC, epsilon-SVR, and nu-SVR", INFINITY * -1.f, INFINITY, 1.0 ); ranged_message_descriptor<float> nu( "nu", "set the parameter nu of nu-SVC, one-class SVM, and nu-SVR", INFINITY * -1.f, INFINITY, 0.5 ); message_descriptor cross_validation( "cross_validation", "perform cross validation" ); ranged_message_descriptor<int> num_folds( "num_folds", "set the number of folds used for cross validation", 1, 100, 10 ); descriptors[ml::k_svm].add_message_descriptor(cross_validation, num_folds, type, kernel, degree, svm_gamma, coef0, cost, nu); //---- ml.adaboost ranged_message_descriptor<int> num_boosting_iterations( "num_boosting_iterations", "set the number of boosting iterations that should be used when training the model", 0, 200, 20 ); valued_message_descriptor<int> prediction_method( "prediction_method", "set the Adaboost prediction method, 0:MAX_VALUE, 1:MAX_POSITIVE_VALUE", {GRT::AdaBoost::MAX_VALUE, GRT::AdaBoost::MAX_POSITIVE_VALUE}, GRT::AdaBoost::MAX_VALUE ); valued_message_descriptor<int> set_weak_classifier( "set_weak_classifier", "sets the weak classifier to be used by Adaboost, 0:DECISION_STUMP, 1:RADIAL_BASIS_FUNCTION", {ml::weak_classifiers::DECISION_STUMP, ml::weak_classifiers::RADIAL_BASIS_FUNCTION}, ml::weak_classifiers::DECISION_STUMP ); valued_message_descriptor<int> add_weak_classifier( "add_weak_classifier", "add a weak classifier to the list of classifiers used by Adaboost", {ml::weak_classifiers::DECISION_STUMP, ml::weak_classifiers::RADIAL_BASIS_FUNCTION}, ml::weak_classifiers::DECISION_STUMP ); descriptors[ml::k_adaboost].add_message_descriptor(num_boosting_iterations, prediction_method, set_weak_classifier, add_weak_classifier); //---- ml.anbc message_descriptor weights("weights", "vector of 1 integer and N floating point values where the integer is a class label and the floats are the weights for that class. Sending weights with a vector size of zero clears all weights" ); descriptors[ml::k_anbc].add_message_descriptor(weights); //---- ml.dtw valued_message_descriptor<int> rejection_mode( "rejection_mode", "sets the method used for null rejection, 0:TEMPLATE_THRESHOLDS, 1:CLASS_LIKELIHOODS, 2:THRESHOLDS_AND_LIKELIHOODS", {GRT::DTW::TEMPLATE_THRESHOLDS, GRT::DTW::CLASS_LIKELIHOODS, GRT::DTW::THRESHOLDS_AND_LIKELIHOODS}, GRT::DTW::TEMPLATE_THRESHOLDS ); ranged_message_descriptor<float> warping_radius( "warping_radius", "sets the radius of the warping path, which is used if the constrain_warping_path is set to 1", 0.0, 1.0, 0.2 ); valued_message_descriptor<bool> offset_time_series( "offset_time_series", "set if each timeseries should be offset by the first sample in the time series", {false, true}, false ); valued_message_descriptor<bool> constrain_warping_path( "constrain_warping_path", "sets the warping path should be constrained to within a specific radius from the main diagonal of the cost matrix", {false, true}, true ); valued_message_descriptor<bool> enable_z_normalization( "enable_z_normalization", "turn z-normalization on or off for training and prediction", {false, true}, false ); valued_message_descriptor<bool> enable_trim_training_data( "enable_trim_training_data", "enabling data trimming prior to training", {false, true}, false ); descriptors[ml::k_dtw].insert_message_descriptor(record); descriptors[ml::k_dtw].add_message_descriptor(rejection_mode, warping_radius, offset_time_series, constrain_warping_path, enable_z_normalization, enable_trim_training_data); //---- ml.hmmc valued_message_descriptor<int> model_type( "model_type", "set the model type used, 0:ERGODIC, 1:LEFTRIGHT", {HMM_ERGODIC, HMM_LEFTRIGHT}, HMM_LEFTRIGHT ); ranged_message_descriptor<int> delta( "delta", "control how many states a model can transition to if the LEFTRIGHT model type is used", 1, 100, 11 ); ranged_message_descriptor<int> max_num_iterations( "max_num_iterations", "set the maximum number of training iterations", 1, 1000, 100 ); ranged_message_descriptor<int> committee_size( "committee_size", "set the committee size for the number of votes combined to make a prediction", 1, 1000, 5 ); ranged_message_descriptor<int> downsample_factor( "downsample_factor", "set the downsample factor for the resampling of each training time series. A factor of 5 will result in each time series being resized (smaller) by a factor of 5", 1, 1000, 5 ); descriptors[ml::k_hmmc].insert_message_descriptor(record); descriptors[ml::k_hmmc].add_message_descriptor(model_type, delta, max_num_iterations, committee_size, downsample_factor); //---- ml.softmax //---- ml.randforest ranged_message_descriptor<int> num_random_splits( "num_random_splits", "set the number of steps that will be used to search for the best spliting value for each node", 1, 1000, 100 ); ranged_message_descriptor<int> min_samples_per_node2( "min_samples_per_node", "set the minimum number of samples that are allowed per node", 1, 100, 5 ); ranged_message_descriptor<int> max_depth( "max_depth", "sets the maximum depth of the tree, any node that reaches this depth will automatically become a leaf node", 1, 100, 10 ); descriptors[ml::k_randforest].add_message_descriptor(num_random_splits, min_samples_per_node2, max_depth); //----ml.mindist ranged_message_descriptor<int> num_clusters( "num_clusters", "set how many clusters each model will try to find during the training phase", 1, 100, 10 ); descriptors[ml::k_mindist].add_message_descriptor(num_clusters); //---- ml.knn // "best_k_value_search:\tbool (0 or 1) set whether k value search is enabled or not (default 0)\n"; ranged_message_descriptor<int> k( "k", "sets the K nearest neighbours that will be searched for by the algorithm during prediction", 1, 500, 10 ); ranged_message_descriptor<int> min_k_search_value( "min_k_search_value", "set the minimum K value to use when searching for the best K value", 1, 500, 1 ); ranged_message_descriptor<int> max_k_search_value( "max_k_search_value", "set the maximum K value to use when searching for the best K value", 1, 500, 10 ); valued_message_descriptor<bool> best_k_value_search( "best_k_value_search", "set whether k value search is enabled or not", {false, true}, false ); descriptors[ml::k_knn].add_message_descriptor(k, min_k_search_value, max_k_search_value, best_k_value_search); //---- ml.gmm ranged_message_descriptor<int> num_mixture_models( "num_mixture_models", "sets the number of mixture models used for class", 1, 20, 2 ); descriptors[ml::k_gmm].add_message_descriptor(num_mixture_models); //---- ml.dtree valued_message_descriptor<bool> training_mode( "training_mode", "set the training mode", {GRT::Tree::BEST_ITERATIVE_SPILT, GRT::Tree::BEST_RANDOM_SPLIT}, GRT::Tree::BEST_ITERATIVE_SPILT ); ranged_message_descriptor<int> num_splitting_steps( "num_splitting_steps", "set the number of steps that will be used to search for the best spliting value for each node", 1, 500, 100 ); ranged_message_descriptor<int> min_samples_per_node( "min_samples_per_node", "sets the minimum number of samples that are allowed per node, if the number of samples at a node is below this value then the node will automatically become a leaf node", 1, 100, 5 ); ranged_message_descriptor<int> dtree_max_depth( "max_depth", "sets the maximum depth of the tree, any node that reaches this depth will automatically become a leaf node", 1, 100, 10 ); valued_message_descriptor<bool> remove_features_at_each_split( "remove_features_at_each_split", "set if a feature is removed at each spilt so it can not be used again", {false, true}, false ); descriptors[ml::k_dtree].add_message_descriptor(training_mode, num_splitting_steps, min_samples_per_node, dtree_max_depth, remove_features_at_each_split); //-- Feature extraction //---- ml.peak ranged_message_descriptor<int> search_window_size( "search_window_size", "set the search window size in values", 1, 500, 5 ); ranged_message_descriptor<float> peak( "float", "set the current value of the peak detector, a bang will be output when a peak is detected", INFINITY * -1.f, INFINITY, 1 ); message_descriptor reset( "reset", "reset the peak detector" ); message_descriptor peak_help( "help", "post usage statement to the console" ); descriptors[ml::k_peak].add_message_descriptor(peak, reset, search_window_size, peak_help); //---- ml.minmax message_descriptor input( "list", "list of float values in which to find minima and maxima", "0.1 0.5 -0.3 0.1 0.2 -0.1 0.7 0.1 0.3" ); ranged_message_descriptor<float> minmax_delta( "delta", "setting the minmax delta. Input values will be considered to be peaks if they are greater than the previous and next value by at least the delta value", 0, 1, 0.1 ); descriptors[ml::k_minmax].add_message_descriptor(input, minmax_delta); //---- ml.zerox valued_message_descriptor<float> zerox_map( "map", "a stream of input values in which to detect zero crossings", 0.5 ); ranged_message_descriptor<float> dead_zone_threshold( "dead_zone_threshold", "set the dead zone threshold", 0.f, 1.f, 0.01f ); ranged_message_descriptor<int> zerox_search_window_size( "search_window_size", "set the search window size in values", 1, 500, 20 ); descriptors[ml::k_zerox].add_message_descriptor(zerox_map, dead_zone_threshold, zerox_search_window_size); }
void ConstantPressureVolumeQuad::formInertiaTerms( int tangFlag ) { static const int ndm = 2 ; static const int ndf = 2 ; static const int numberNodes = 4 ; static const int numberGauss = 4 ; static const int nShape = 3 ; static const int massIndex = nShape - 1 ; double xsj ; // determinant jacaobian matrix double dvol ; //volume element static double shp[nShape][numberNodes] ; //shape functions at a gauss point static Vector momentum(ndf) ; static Matrix sx(ndm,ndm) ; int i, j, k, p ; int jj, kk ; double temp, rho, massJK ; //zero mass mass.Zero( ) ; //gauss loop for ( i = 0; i < numberGauss; i++ ) { //get shape functions shape2d( sg[i], tg[i], xl, shp, xsj, sx ) ; //volume element dvol = wg[i] * xsj ; //node loop to compute acceleration momentum.Zero( ) ; for ( j = 0; j < numberNodes; j++ ) //momentum += shp[massIndex][j] * ( nodePointers[j]->getTrialAccel() ) ; momentum.addVector( 1.0, nodePointers[j]->getTrialAccel(), shp[massIndex][j] ) ; //density rho = materialPointers[i]->getRho() ; //multiply acceleration by density to form momentum momentum *= rho ; //residual and tangent calculations node loops jj = 0 ; for ( j = 0; j < numberNodes; j++ ) { temp = shp[massIndex][j] * dvol ; if ( tangFlag == 1 ) { //multiply by density temp *= rho ; //node-node mass kk = 0 ; for ( k = 0; k < numberNodes; k++ ) { massJK = temp * shp[massIndex][k] ; for ( p = 0; p < ndf; p++ ) mass( jj+p, kk+p ) += massJK ; kk += ndf ; } // end for k loop } // end if tang_flag else for ( p = 0; p < ndf; p++ ) resid( jj+p ) += ( temp * momentum(p) ) ; jj += ndf ; } // end for j loop } //end for i gauss loop }
void NineNodeMixedQuad::formInertiaTerms( int tangFlag ) { static const int ndm = 2 ; static const int ndf = 2 ; static const int numberNodes = 9 ; // static const int numberGauss = 9 ; static const int nShape = 3 ; static const int massIndex = nShape - 1 ; double xsj ; // determinant jacaobian matrix double dvol ; //volume element static double shp[nShape][numberNodes] ; //shape functions at a gauss point static Vector momentum(ndf) ; static Matrix sx(ndm,ndm) ; static double GaussPoint[2] ; int j, k, p, q, r ; int jj, kk ; double temp, rho, massJK ; //zero mass mass.Zero( ) ; //node coordinates computeBasis() ; //gauss loop int count = 0 ; for ( p=0; p<3; p++ ) { for ( q=0; q<3; q++ ) { GaussPoint[0] = sg[p] ; GaussPoint[1] = sg[q] ; //get shape functions shape2dNine( GaussPoint, xl, shp, xsj ) ; //volume element dvol = ( wg[p] * wg[q] ) * xsj ; //node loop to compute acceleration momentum.Zero( ) ; for ( j = 0; j < numberNodes; j++ ) //momentum += shp[massIndex][j] * ( nodePointers[j]->getTrialAccel() ) ; momentum.addVector( 1.0, nodePointers[j]->getTrialAccel(), shp[massIndex][j] ) ; //density rho = materialPointers[count]->getRho() ; //multiply acceleration by density to form momentum momentum *= rho ; //residual and tangent calculations node loops for ( jj=0, j=0; j<numberNodes; j++, jj+=ndf ) { temp = shp[massIndex][j] * dvol ; for ( r=0; r<ndf; r++ ) resid( jj+r ) += ( temp * momentum(r) ) ; if ( tangFlag == 1 ) { //multiply by density temp *= rho ; //node-node mass for ( kk=0, k=0; k<numberNodes; k++, kk+=ndf ) { massJK = temp * shp[massIndex][k] ; for ( r=0; r<ndf; r++ ) mass( jj+r, kk+r ) += massJK ; } // end for k loop } // end if tang_flag } // end for j loop count++ ; }//end for q gauss loop } //end for p gauss loop }
void ShellMITC9::formInertiaTerms( int tangFlag ) { //translational mass only //rotational inertia terms are neglected static const int ndf = 6 ; static const int numberNodes = 9 ; static const int numberGauss = 9 ; static const int nShape = 3 ; static const int massIndex = nShape - 1 ; double xsj ; // determinant jacaobian matrix double dvol ; //volume element static double shp[nShape][numberNodes] ; //shape functions at a gauss point static Vector momentum(ndf) ; int i, j, k, p; int jj, kk ; double temp, rhoH, massJK ; //zero mass mass.Zero( ) ; //gauss loop for ( i = 0; i < numberGauss; i++ ) { //get shape functions shape2d( sg[i], tg[i], xl, shp, xsj ) ; //volume element to also be saved dvol = wg[i] * xsj ; //node loop to compute accelerations momentum.Zero( ) ; for ( j = 0; j < numberNodes; j++ ) //momentum += ( shp[massIndex][j] * nodePointers[j]->getTrialAccel() ) ; momentum.addVector(1.0, nodePointers[j]->getTrialAccel(), shp[massIndex][j] ) ; //density rhoH = materialPointers[i]->getRho() ; //multiply acceleration by density to form momentum momentum *= rhoH ; //residual and tangent calculations node loops //jj = 0 ; for ( j=0, jj=0; j<numberNodes; j++, jj+=ndf ) { temp = shp[massIndex][j] * dvol ; for ( p = 0; p < 3; p++ ) resid( jj+p ) += ( temp * momentum(p) ) ; if ( tangFlag == 1 && rhoH != 0.0) { //multiply by density temp *= rhoH ; //node-node translational mass //kk = 0 ; for ( k=0, kk=0; k<numberNodes; k++, kk+=ndf ) { massJK = temp * shp[massIndex][k] ; for ( p = 0; p < 3; p++ ) mass( jj+p, kk+p ) += massJK ; } // end for k loop } // end if tang_flag } // end for j loop } //end for i gauss loop }