void StokesFOImplicitThicknessUpdateResid<EvalT, Traits>:: evaluateFields(typename Traits::EvalData workset) { typedef Intrepid::FunctionSpaceTools FST; // Initialize residual to 0.0 Kokkos::deep_copy(Residual.get_kokkos_view(), ScalarT(0.0)); Intrepid::FieldContainer<ScalarT> res(numNodes,3); double rho_g=rho*g; for (std::size_t cell=0; cell < workset.numCells; ++cell) { for (int i = 0; i < res.size(); i++) res(i) = 0.0; for (std::size_t qp=0; qp < numQPs; ++qp) { ScalarT dHdiffdx = 0;//Ugrad(cell,qp,2,0); ScalarT dHdiffdy = 0;//Ugrad(cell,qp,2,1); for (std::size_t node=0; node < numNodes; ++node) { dHdiffdx += (H(cell,node)-H0(cell,node)) * gradBF(cell,node, qp,0); dHdiffdy += (H(cell,node)-H0(cell,node)) * gradBF(cell,node, qp,1); } for (std::size_t node=0; node < numNodes; ++node) { res(node,0) += rho_g*dHdiffdx*wBF(cell,node,qp); res(node,1) += rho_g*dHdiffdy*wBF(cell,node,qp); } } for (std::size_t node=0; node < numNodes; ++node) { Residual(cell,node,0) = res(node,0); Residual(cell,node,1) = res(node,1); } } }
void UpdateZCoordinateMovingTop<EvalT, Traits>:: evaluateFields(typename Traits::EvalData workset) { Teuchos::RCP<const Tpetra_Vector> xT = workset.xT; Teuchos::ArrayRCP<const ST> xT_constView = xT->get1dView(); const Albany::LayeredMeshNumbering<LO>& layeredMeshNumbering = *workset.disc->getLayeredMeshNumbering(); const Albany::NodalDOFManager& solDOFManager = workset.disc->getOverlapDOFManager("ordinary_solution"); int numLayers = layeredMeshNumbering.numLayers; const Teuchos::ArrayRCP<Teuchos::ArrayRCP<GO> >& wsElNodeID = workset.disc->getWsElNodeID()[workset.wsIndex]; const Teuchos::ArrayRCP<double>& layers_ratio = layeredMeshNumbering.layers_ratio; Teuchos::ArrayRCP<double> sigmaLevel(numLayers+1); sigmaLevel[0] = 0.; sigmaLevel[numLayers] = 1.; for(int i=1; i<numLayers; ++i) sigmaLevel[i] = sigmaLevel[i-1] + layers_ratio[i-1]; for (std::size_t cell=0; cell < workset.numCells; ++cell ) { const Teuchos::ArrayRCP<GO>& elNodeID = wsElNodeID[cell]; const Teuchos::ArrayRCP<Teuchos::ArrayRCP<int> >& nodeID = workset.wsElNodeEqID[cell]; const int neq = nodeID[0].size(); const std::size_t num_dof = neq * this->numNodes; for (std::size_t node = 0; node < this->numNodes; ++node) { LO lnodeId = workset.disc->getOverlapNodeMapT()->getLocalElement(elNodeID[node]); LO base_id, ilevel; layeredMeshNumbering.getIndices(lnodeId, base_id, ilevel); MeshScalarT h = H0(cell,node)+dH(cell,node); MeshScalarT bed = topSurface(cell,node)- H0(cell,node); for(std::size_t icomp=0; icomp< numDims; icomp++) { typename PHAL::Ref<MeshScalarT>::type val = coordVecOut(cell,node,icomp); val = (icomp==2) ? (h>minH) ? MeshScalarT(bed + sigmaLevel[ ilevel]*h) : MeshScalarT(bed + sigmaLevel[ ilevel]*minH) : coordVecIn(cell,node,icomp); } } } }
Matrix doubleTouchThread::findH0(skinContact &sc) { // Set the proper orientation for the touching end-effector Matrix H0(4,4); Vector x(3,0.0), z(3,0.0), y(3,0.0); x = sc.getNormalDir(); z[0] = -x[2]/x[0]; z[2] = 1; y = -1*(cross(x,z)); // Let's make them unitary vectors: x = x / norm(x); y = y / norm(y); z = z / norm(z); H0.zero(); H0(3,3) = 1; H0.setSubcol(x,0,0); H0.setSubcol(y,0,1); H0.setSubcol(z,0,2); H0.setSubcol(sc.getGeoCenter(),0,3); return H0; }
double SQuIDS::GetExpectationValueD(const SU_vector& op, unsigned int nrh, double xi, SQuIDS::expectationValueDBuffer& buf, double scale, std::vector<bool>& avr) const{ //find bracketing state entries auto xit=std::lower_bound(x.begin(),x.end(),xi); if(xit==x.end()) throw std::runtime_error("SQUIDS::GetExpectationValueD : x value not in the array."); if(xit!=x.begin()) xit--; size_t xid=std::distance(x.begin(),xit); //linearly interpolate between the two states double f2=((xi-x[xid])/(x[xid+1]-x[xid])); double f1=1-f2; buf.state =f1*state[xid].rho[nrh]; buf.state+=f2*state[xid+1].rho[nrh]; //compute the evolved operator std::unique_ptr<double[]> evol_buf(new double[H0(xi,nrh).GetEvolveBufferSize()]); H0(xi,nrh).PrepareEvolve(evol_buf.get(),t-t_ini,scale,avr); buf.op=op.Evolve(evol_buf.get()); //apply operator to state return (buf.op*state[xid].rho[nrh])*f1 + (buf.op*state[xid+1].rho[nrh])*f2; //return buf.state*buf.op; }
void PhotonDINT1D::process(Candidate *candidate) const { if (candidate->current.getId() != 22) return; // Initialize the spectrum Spectrum inputSpectrum; NewSpectrum(&inputSpectrum, NUM_MAIN_BINS); double criticalEnergy = candidate->current.getEnergy() / (eV * ELECTRON_MASS); // units of dint int maxBin = (int) ((log10(criticalEnergy * ELECTRON_MASS) - MAX_ENERGY_EXP) * BINS_PER_DECADE + NUM_MAIN_BINS); inputSpectrum.spectrum[PHOTON][maxBin] = 1.; // Initialize the bField dCVector bField; New_dCVector(&bField, 1); // Initialize output spectrum Spectrum outputSpectrum; NewSpectrum(&outputSpectrum, NUM_MAIN_BINS); double h = H0() * Mpc / 1000; double ol = omegaL(); double om = omegaM(); double showerPropDistance = candidate->current.getPosition().getR() / Mpc; double z = candidate->getRedshift(); if (z == 0) { //TODO: use z value for distance calculation } prop_second(showerPropDistance, &bField, &impl->energyGrid, &impl->energyWidth, &inputSpectrum, &outputSpectrum, dataPath, IRFlag, Zmax, RadioFlag, h, om, ol, Cutcascade_Magfield); #pragma omp critical { impl->saveSpectrum(&outputSpectrum); } DeleteSpectrum(&outputSpectrum); DeleteSpectrum(&inputSpectrum); Delete_dCVector(&bField); candidate->setActive(false); }
double SQuIDS::GetExpectationValueD(const SU_vector& op, unsigned int nrh, double xi, SQuIDS::expectationValueDBuffer& buf) const{ //find bracketing state entries auto xit=std::lower_bound(x.begin(),x.end(),xi); if(xit==x.end()) throw std::runtime_error("SQUIDS::GetExpectationValueD : x value not in the array."); if(xit!=x.begin()) xit--; size_t xid=std::distance(x.begin(),xit); //linearly interpolate between the two states double f2=((xi-x[xid])/(x[xid+1]-x[xid])); double f1=1-f2; buf.state =f1*state[xid].rho[nrh]; buf.state+=f2*state[xid+1].rho[nrh]; //compute the evolved operator buf.op=op.Evolve(H0(xi,nrh),t-t_ini); //apply operator to state return buf.state*buf.op; }
Matrix doubleTouchThread::findH0(skinContact &sc) { // Set the proper orientation for the touching end-effector Matrix H0(4,4); Vector x(3,0.0), z(3,0.0), y(3,0.0); x = sc.getNormalDir(); if (x[0] == 0.0) { x[0] = 0.00000001; // Avoid the division by 0 } if (curTaskType!="LHtoR" && curTaskType!="RHtoL") { z[0] = -x[2]/x[0]; z[2] = 1; y = -1*(cross(x,z)); } else { // When x[0]==+-1, We can exploit an easier rule: z[1] = x[2]; y = -1*(cross(x,z)); } // Let's make them unitary vectors: x = x / norm(x); y = y / norm(y); z = z / norm(z); H0 = eye(4); H0.setSubcol(x,0,0); H0.setSubcol(y,0,1); H0.setSubcol(z,0,2); H0.setSubcol(sc.getCoP(),0,3); return H0; }
/***************************************************************************** * Help: *****************************************************************************/ static void Help( x264_param_t *defaults, int b_longhelp ) { #define H0 printf #define H1 if(b_longhelp) printf H0( "x264 core:%d%s\n" "Syntax: x264 [options] -o outfile infile [widthxheight]\n" "\n" "Infile can be raw YUV 4:2:0 (in which case resolution is required),\n" " or YUV4MPEG 4:2:0 (*.y4m),\n" " or AVI or Avisynth if compiled with AVIS support (%s).\n" "Outfile type is selected by filename:\n" " .264 -> Raw bytestream\n" " .mkv -> Matroska\n" " .mp4 -> MP4 if compiled with GPAC support (%s)\n" "\n" "Options:\n" "\n" " -h, --help List the more commonly used options\n" " --longhelp List all options\n" "\n", X264_BUILD, X264_VERSION, #ifdef AVIS_INPUT "yes", #else "no", #endif #ifdef MP4_OUTPUT "yes" #else "no" #endif ); H0( "Frame-type options:\n" ); H0( "\n" ); H0( " -I, --keyint <integer> Maximum GOP size [%d]\n", defaults->i_keyint_max ); H1( " -i, --min-keyint <integer> Minimum GOP size [%d]\n", defaults->i_keyint_min ); H1( " --scenecut <integer> How aggressively to insert extra I-frames [%d]\n", defaults->i_scenecut_threshold ); H1( " --pre-scenecut Faster, less precise scenecut detection.\n" " Required and implied by multi-threading.\n" ); H0( " -b, --bframes <integer> Number of B-frames between I and P [%d]\n", defaults->i_bframe ); H1( " --b-adapt Adaptive B-frame decision method [%d]\n" " Higher values may lower threading efficiency.\n" " - 0: Disabled\n" " - 1: Fast\n" " - 2: Optimal (slow with high --bframes)\n", defaults->i_bframe_adaptive ); H1( " --b-bias <integer> Influences how often B-frames are used [%d]\n", defaults->i_bframe_bias ); H0( " --b-pyramid Keep some B-frames as references\n" ); H0( " --no-cabac Disable CABAC\n" ); H0( " -r, --ref <integer> Number of reference frames [%d]\n", defaults->i_frame_reference ); H1( " --no-deblock Disable loop filter\n" ); H0( " -f, --deblock <alpha:beta> Loop filter AlphaC0 and Beta parameters [%d:%d]\n", defaults->i_deblocking_filter_alphac0, defaults->i_deblocking_filter_beta ); H0( " --interlaced Enable pure-interlaced mode\n" ); H0( "\n" ); H0( "Ratecontrol:\n" ); H0( "\n" ); H0( " -q, --qp <integer> Set QP (0=lossless) [%d]\n", defaults->rc.i_qp_constant ); H0( " -B, --bitrate <integer> Set bitrate (kbit/s)\n" ); H0( " --crf <float> Quality-based VBR (nominal QP)\n" ); H1( " --vbv-maxrate <integer> Max local bitrate (kbit/s) [%d]\n", defaults->rc.i_vbv_max_bitrate ); H0( " --vbv-bufsize <integer> Enable CBR and set size of the VBV buffer (kbit) [%d]\n", defaults->rc.i_vbv_buffer_size ); H1( " --vbv-init <float> Initial VBV buffer occupancy [%.1f]\n", defaults->rc.f_vbv_buffer_init ); H1( " --qpmin <integer> Set min QP [%d]\n", defaults->rc.i_qp_min ); H1( " --qpmax <integer> Set max QP [%d]\n", defaults->rc.i_qp_max ); H1( " --qpstep <integer> Set max QP step [%d]\n", defaults->rc.i_qp_step ); H0( " --ratetol <float> Allowed variance of average bitrate [%.1f]\n", defaults->rc.f_rate_tolerance ); H0( " --ipratio <float> QP factor between I and P [%.2f]\n", defaults->rc.f_ip_factor ); H0( " --pbratio <float> QP factor between P and B [%.2f]\n", defaults->rc.f_pb_factor ); H1( " --chroma-qp-offset <integer> QP difference between chroma and luma [%d]\n", defaults->analyse.i_chroma_qp_offset ); H1( " --aq-mode <integer> AQ method [%d]\n" " - 0: Disabled\n" " - 1: Variance AQ (complexity mask)\n", defaults->rc.i_aq_mode ); H0( " --aq-strength <float> Reduces blocking and blurring in flat and\n" " textured areas. [%.1f]\n" " - 0.5: weak AQ\n" " - 1.5: strong AQ\n", defaults->rc.f_aq_strength ); H0( "\n" ); H0( " -p, --pass <1|2|3> Enable multipass ratecontrol\n" " - 1: First pass, creates stats file\n" " - 2: Last pass, does not overwrite stats file\n" " - 3: Nth pass, overwrites stats file\n" ); H0( " --stats <string> Filename for 2 pass stats [\"%s\"]\n", defaults->rc.psz_stat_out ); H0( " --qcomp <float> QP curve compression: 0.0 => CBR, 1.0 => CQP [%.2f]\n", defaults->rc.f_qcompress ); H1( " --cplxblur <float> Reduce fluctuations in QP (before curve compression) [%.1f]\n", defaults->rc.f_complexity_blur ); H1( " --qblur <float> Reduce fluctuations in QP (after curve compression) [%.1f]\n", defaults->rc.f_qblur ); H0( " --zones <zone0>/<zone1>/... Tweak the bitrate of some regions of the video\n" ); H1( " Each zone is of the form\n" " <start frame>,<end frame>,<option>\n" " where <option> is either\n" " q=<integer> (force QP)\n" " or b=<float> (bitrate multiplier)\n" ); H1( " --qpfile <string> Force frametypes and QPs for some or all frames\n" " Format of each line: framenumber frametype QP\n" " QP of -1 lets x264 choose. Frametypes: I,i,P,B,b.\n" ); H0( "\n" ); H0( "Analysis:\n" ); H0( "\n" ); H0( " -A, --partitions <string> Partitions to consider [\"p8x8,b8x8,i8x8,i4x4\"]\n" " - p8x8, p4x4, b8x8, i8x8, i4x4\n" " - none, all\n" " (p4x4 requires p8x8. i8x8 requires --8x8dct.)\n" ); H0( " --direct <string> Direct MV prediction mode [\"%s\"]\n" " - none, spatial, temporal, auto\n", strtable_lookup( x264_direct_pred_names, defaults->analyse.i_direct_mv_pred ) ); H0( " -w, --weightb Weighted prediction for B-frames\n" ); H0( " --me <string> Integer pixel motion estimation method [\"%s\"]\n", strtable_lookup( x264_motion_est_names, defaults->analyse.i_me_method ) ); H1( " - dia: diamond search, radius 1 (fast)\n" " - hex: hexagonal search, radius 2\n" " - umh: uneven multi-hexagon search\n" " - esa: exhaustive search\n" " - tesa: hadamard exhaustive search (slow)\n" ); else H0( " - dia, hex, umh\n" );
void ProgValidationNonTilt::run() { //Clustering Tendency and Cluster Validity Stephen D. Scott randomize_random_generator(); MetaData md,mdGallery,mdOut,mdOut2,mdSort; MDRow row; FileName fnOut,fnOut2, fnGallery; fnOut = fnDir+"/clusteringTendency.xmd"; fnGallery = fnDir+"/gallery.doc"; fnOut2 = fnDir+"/validation.xmd"; size_t nSamplesRandom = 500; md.read(fnParticles); mdGallery.read(fnGallery); mdSort.sort(md,MDL_IMAGE_IDX,true,-1,0); size_t maxNImg; size_t sz = md.size(); if (useSignificant) mdSort.getValue(MDL_IMAGE_IDX,maxNImg,sz); else { mdSort.getValue(MDL_ITEM_ID,maxNImg,sz); } String expression; MDRow rowP,row2; SymList SL; int symmetry, sym_order; SL.readSymmetryFile(fnSym.c_str()); SL.isSymmetryGroup(fnSym.c_str(), symmetry, sym_order); /* double non_reduntant_area_of_sphere = SL.nonRedundantProjectionSphere(symmetry,sym_order); double area_of_sphere_no_symmetry = 4.*PI; double correction = std::sqrt(non_reduntant_area_of_sphere/area_of_sphere_no_symmetry); */ double correction = 1; double validation = 0; double num_images = 0; MetaData tempMd; std::vector<double> sum_u(nSamplesRandom); double sum_w=0; std::vector<double> H0(nSamplesRandom); std::vector<double> H(nSamplesRandom); std::vector<double> p(nSamplesRandom); if (rank==0) init_progress_bar(maxNImg); for (size_t idx=0; idx<=maxNImg;idx++) { if ((idx)%Nprocessors==rank) { if (useSignificant) expression = formatString("imageIndex == %lu",idx); else expression = formatString("itemId == %lu",idx); tempMd.importObjects(md, MDExpression(expression)); if (tempMd.size()==0) continue; //compute H_0 from noise obtainSumU_2(mdGallery, tempMd,sum_u,H0); //compute H from experimental obtainSumW(tempMd,sum_w,sum_u,H,correction); std::sort(H0.begin(),H0.end()); std::sort(H.begin(),H.end()); double P = 0; for(size_t j=0; j<sum_u.size();j++) { //P += H0.at(j)/H.at(j); P += H0.at(size_t((1-significance_noise)*nSamplesRandom))/H.at(j); p.at(j) = H0.at(j)/H.at(j); } P /= (nSamplesRandom); if (useSignificant) rowP.setValue(MDL_IMAGE_IDX,idx); else rowP.setValue(MDL_ITEM_ID,idx); rowP.setValue(MDL_WEIGHT,P); mdPartial.addRow(rowP); tempMd.clear(); if (rank==0) progress_bar(idx+1); } } if (rank==0) progress_bar(maxNImg); synchronize(); gatherClusterability(); if (rank == 0) { mdPartial.write(fnOut); std::vector<double> P; mdPartial.getColumnValues(MDL_WEIGHT,P); for (size_t idx=0; idx< P.size();idx++) { if (P[idx] > 1) validation += 1.; num_images += 1.; } validation /= (num_images); row2.setValue(MDL_IMAGE,fnInit); row2.setValue(MDL_WEIGHT,validation); mdOut2.addRow(row2); mdOut2.write(fnOut2); } }
static int get_hash_0(int index) { H0(crypt_out[index]); }
static int binary_hash_0(void *binary) { H0((char *)binary); }
void ProgValidationNonTilt::run() { //Clustering Tendency and Cluster Validity Stephen D. Scott randomize_random_generator(); //char buffer[400]; //sprintf(buffer, "xmipp_reconstruct_significant -i %s --initvolumes %s --odir %s --sym %s --iter 1 --alpha0 %f --angularSampling %f",fnIn.c_str(), fnInit.c_str(),fnDir.c_str(),fnSym.c_str(),alpha0,angularSampling); //system(buffer); MetaData md,mdOut,mdOut2; FileName fnMd,fnOut,fnOut2; fnMd = fnDir+"/angles_iter001_00.xmd"; fnOut = fnDir+"/clusteringTendency.xmd"; fnOut2 = fnDir+"/validation.xmd"; size_t nSamplesRandom = 250; md.read(fnMd); size_t maxNImg; size_t sz = md.size(); md.getValue(MDL_IMAGE_IDX,maxNImg,sz); String expression; MDRow rowP,row2; SymList SL; int symmetry, sym_order; SL.readSymmetryFile(fnSym.c_str()); SL.isSymmetryGroup(fnSym.c_str(), symmetry, sym_order); double non_reduntant_area_of_sphere = SL.nonRedundantProjectionSphere(symmetry,sym_order); double area_of_sphere_no_symmetry = 4.*PI; double correction = std::sqrt(non_reduntant_area_of_sphere/area_of_sphere_no_symmetry); double validation = 0; MetaData tempMd; std::vector<double> sum_u(nSamplesRandom); //std::vector<double> sum_w(nSamplesRandom); double sum_w=0; std::vector<double> H0(nSamplesRandom); std::vector<double> H(nSamplesRandom); if (rank==0) init_progress_bar(maxNImg); for (size_t idx=0; idx<=maxNImg;idx++) { if ((idx+1)%Nprocessors==rank) { expression = formatString("imageIndex == %lu",idx); tempMd.importObjects(md, MDExpression(expression)); if (tempMd.size()==0) continue; //compute H_0 from noise obtainSumU(tempMd,sum_u,H0); //compute H from experimental obtainSumW(tempMd,sum_w,sum_u,H,correction); std::sort(H0.begin(),H0.end()); std::sort(H.begin(),H.end()); double P = 0; for(size_t j=0; j<sum_u.size();j++) P += H0.at(j)/H.at(j); P /= (nSamplesRandom); rowP.setValue(MDL_IMAGE_IDX,idx); rowP.setValue(MDL_WEIGHT,P); mdPartial.addRow(rowP); //sum_u.clear(); //sum_w.clear(); //H0.clear(); //H.clear(); tempMd.clear(); if (rank==0) progress_bar(idx+1); } } if (rank==0) progress_bar(maxNImg); synchronize(); gatherClusterability(); if (rank == 0) { mdPartial.write(fnOut); std::vector<double> P; mdPartial.getColumnValues(MDL_WEIGHT,P); for (size_t idx=0; idx< P.size();idx++) { if (P[idx] > 1) validation += 1; } validation /= (maxNImg+1); } row2.setValue(MDL_IMAGE,fnInit); row2.setValue(MDL_WEIGHT,validation); mdOut2.addRow(row2); mdOut2.write(fnOut2); }
/////////////////////////////////////////////////////////////////////////// // Drawing Firework /////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// void DrawFirework(int no) { /* to display * each firework particle * You should calculate and display the trail of each * particle here */ int i, j, k, u; float interpolated_pt[2]; float Color[4]; for (i=0; i< fire[no].total_no; i++) { // if the paticle is still "alive" if (!fire[no].particle[i].death) { glLineWidth(1.5); glBegin(GL_LINE_STRIP); // for each paticle, generate its spline curve Color[0] = fire[no].particle[i].col[0]; Color[1] = fire[no].particle[i].col[1]; Color[2] = fire[no].particle[i].col[2]; Color[3] = fire[no].particle[i].col[3]; for (k=0; k<3; k++) Color[k] = 1.0; // print the initial position glColor4fv(Color); glVertex2fv(fire[no].init_pos); // interpolation for (u=1; u<=10; u++) { for (j=0; j<2; j++) // using the Hermite Spline Interpolation technique to find the interpolated points interpolated_pt[j] = fire[no].init_pos[j]*H0(u/10.0)+ fire[no].particle[i].pos[j]*H1(u/10.0)+ fire[no].particle[i].init_vel[j]*fire[no].t*H2(u/10.0)+ fire[no].particle[i].vel[j]*fire[no].t*H3(u/10.0); // print the interpolated points for (k=0; k<3; k++) Color[k] = (1-fire[no].particle[i].col[k])*0.49 + fire[no].particle[i].col[k]; glColor4fv(Color); glVertex2fv(interpolated_pt); } // print the current point for (k=0; k<3; k++) Color[k] = (1-fire[no].particle[i].col[k])*0.4 + fire[no].particle[i].col[k]; glColor4fv(Color); glVertex2fv(fire[no].particle[i].pos); glEnd(); } } }
double SQuIDS::GetExpectationValue(SU_vector op, unsigned int nrh, unsigned int i) const{ SU_vector h0=H0(x[i],nrh); return state[i].rho[nrh]*op.Evolve(h0,t-t_ini); }
// Fonction d'intérpolation Hermite void SolveTCB ( float t, int *x, int *y, int *z) { // Déclaration des Keyframes utilisés Key *NextKey, *NextNextKey, *CurKey, *PrevKey; // Varaible d'incrémentation int i; // Taille du tableau de Keyframe const int NumKeys = ((double)sizeof(TabKey))/((double)sizeof(Key)); const int NumKeysMinusOne = NumKeys-1; // Boucle de parcours des Keyframes //.. for(i = 0; i < NumKeys;i++) { NextKey = &TabKey[i]; if (t<NextKey->t){ if(i==0) { CurKey = &TabKey[NumKeysMinusOne]; PrevKey = &TabKey[NumKeysMinusOne-1]; NextNextKey = &TabKey[1]; } else if(i==1){ CurKey = &TabKey[0]; PrevKey = &TabKey[NumKeysMinusOne]; NextNextKey = &TabKey[2]; } else if(i==NumKeysMinusOne){ CurKey = &TabKey[i-1]; PrevKey = &TabKey[i-2]; NextNextKey = &TabKey[0]; } else { CurKey = &TabKey[i-1]; PrevKey = &TabKey[i-2]; NextNextKey = &TabKey[i+1]; } // calcul tangents //curent float u = ((t-CurKey->t) / (NextKey->t - CurKey->t)); float ctx,cty,ctz; float ntx,nty,ntz; ctx = Tn(CurKey->tension,CurKey->bias,CurKey->continuity,PrevKey->pos.x,CurKey->pos.x); cty = Tn(CurKey->tension,CurKey->bias,CurKey->continuity,PrevKey->pos.y,CurKey->pos.y); ctz = Tn(CurKey->tension,CurKey->bias,CurKey->continuity,PrevKey->pos.z,CurKey->pos.z); //printf("ctx -- > %lf \n",ctx); //next ntx = Tn1(CurKey->tension,CurKey->bias,CurKey->continuity,PrevKey->pos.x,CurKey->pos.x,NextKey->pos.x,NextNextKey->pos.x); nty = Tn1(CurKey->tension,CurKey->bias,CurKey->continuity,PrevKey->pos.y,CurKey->pos.y,NextKey->pos.y,NextNextKey->pos.y); ntz = Tn1(CurKey->tension,CurKey->bias,CurKey->continuity,PrevKey->pos.z,CurKey->pos.z,NextKey->pos.z,NextNextKey->pos.z); //printf(" ntx -- > %lf \n",ntx); // mise a jour des positions *x = (int) (H0(u)*CurKey->pos.x + H1(u)*NextKey->pos.x + H2(u)*ctx + H3(u)*ntx); *y = (int) (H0(u)*CurKey->pos.y + H1(u)*NextKey->pos.y + H2(u)*cty + H3(u)*nty); *z = (int) (H0(u)*CurKey->pos.z + H1(u)*NextKey->pos.z + H2(u)*ctz + H3(u)*ntz); printf("\n%lf\n",( CurKey->pos.x )); printf("\n%lf, %lf\n",H1(t),( NextKey->pos.x )); printf("\n%lf\n",( H2(t)*CurKey->tension )); printf("\n%lf\n",( H3(t)*NextKey->tension )); //printf(" time -> %f X -- > %d \n",t,*x); //printf(" H0(t) = %f , H1(t) = %f , H2(t) = %f , H3(t) = %f \n",H0(t),H1(t),H2(t),H3(t)); //if(*x < (-8000) || *x > 8000) //exit(0); //break; return; } } time = 0; return; }
static int get_hash_0(int index) { H0(buffer[index].out); }
double SQuIDS::GetExpectationValue(SU_vector op, unsigned int nrh, unsigned int i, double scale, std::vector<bool>& avr) const { SU_vector h0=H0(x[i],nrh); std::unique_ptr<double[]> evol_buf(new double[h0.GetEvolveBufferSize()]); h0.PrepareEvolve(evol_buf.get(),t-t_ini,scale,avr); return state[i].rho[nrh]*op.Evolve(evol_buf.get()); }