int QCDBlitzTunedVersion(BenchmarkExt<int>& bench) { bench.beginImplementation("Blitz++ (tuned)"); while (!bench.doneImplementationBenchmark()) { int length = bench.getParameter(); int iters = (int)bench.getIterations(); Array<latticeUnit,1> lattice(length); initializeRandomDouble((double*)lattice.data(), length * sizeof(latticeUnit) / sizeof(double)); bench.start(); long i; for (i=0; i < iters; ++i) { for (int i=0; i < length; ++i) lattice(i).two = lattice(i).gauge * lattice(i).two; } bench.stop(); // Time overhead bench.startOverhead(); for (i=0; i < iters; ++i) { } bench.stopOverhead(); } bench.endImplementation(); return 0; }
// "Integer" turbulence function. Takes coordinates in the range // [0:1] expressed as a fraction of 2^32 (works with 64 bit ints too; // it just doesn't use the whole range). The output range is // guaranteed to be within [-1:1], with a typical output range of +/- // 0.6 or so. float Turbulence::iturb(unsigned int x, unsigned int y) { float amplitude = 0.5; // start here, so it all sums to ~1.0 float total = 0; int wrapmax = 2; int startgen = _gens - MEANINGFUL_GENS; for(int g=startgen; g<_gens; g++) { int xl = x >> (32 - g); // lattice coordinates int yl = y >> (32 - g); float xfrac = i2fu(x << g); // interpolation fractions float yfrac = i2fu(y << g); xfrac = xfrac*xfrac*(3 - 2*xfrac); // ... as cubics yfrac = yfrac*yfrac*(3 - 2*yfrac); float p00 = lattice(xl, yl); // lattice values float p01 = lattice(xl, yl+1); float p10 = lattice(xl+1, yl); float p11 = lattice(xl+1, yl+1); float p0 = p00 * (1-yfrac) + p01 * yfrac; float p1 = p10 * (1-yfrac) + p11 * yfrac; float p = p0 * (1-xfrac) + p1 * xfrac; total += p * amplitude; amplitude *= 0.5; wrapmax *= 2; } return total; }
void test_staggered() { mdp << "START TESTING STAGGERED ACTIONS\n"; int box[]={64,6,6,6}, nc=3; generic_lattice lattice(4,box,default_partitioning<0>, torus_topology, 0, 3); gauge_field U(lattice,nc); gauge_field V(lattice,nc); staggered_field psi(lattice, nc); staggered_field chi1(lattice, nc); staggered_field chi2(lattice, nc); coefficients coeff; coeff["mass"]=1.0; double t0, t1; inversion_stats stats; set_hot(U); set_random(psi); mdp << "ATTENTION: need to adjust asqtad coefficnets\n"; default_staggered_action=StaggeredAsqtadActionFast::mul_Q; default_staggered_inverter=MinimumResidueInverter<staggered_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Staggered Min Res TIME=" << t1 << endl; default_staggered_inverter=BiConjugateGradientStabilizedInverter<staggered_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Staggered BiCGStab TIME=" << t1 << endl; default_staggered_inverter=StaggeredBiCGUML::inverter; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Staggered SSE BiCGStabUML TIME=" << t1 << endl; default_staggered_action=StaggeredAsqtadActionSSE2::mul_Q; default_staggered_inverter=MinimumResidueInverter<staggered_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Staggered SSE Min Res TIME=" << t1 << endl; default_staggered_inverter=BiConjugateGradientStabilizedInverter<staggered_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Staggered SSE BiCGStab TIME=" << t1 << endl; default_staggered_inverter=StaggeredBiCGUML::inverter; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Staggered SSE BiCGStabUML TIME=" << t1 << endl; }
nodeDescriptor createNetwork::streamInLattice ( int sizex, int sizey,string s ) { nodeBlueprint *n = new nodeVirtualEdges<streamInNode >( s ); nodeDescriptor ret = lattice ( sizex,sizey,1,n, stdEdge ); delete n; return ret; }
int main(int argc, char** argv) { mdp.open_wormholes(argc,argv); define_base_matrices("FERMILAB"); int nc=3; int box[]={8,4,4,4}; mdp_lattice lattice(4,box); gauge_field U(lattice,nc); fermi_field source(lattice,nc); fermi_field sink(lattice,nc); mdp_site x(lattice); mdp_matrix c2(box[0],1); for(int t=0; t<c2.size(); t++) c2(t)=0; coefficients quark; quark["kappa"]=1.1; quark["c_{sw}"]=0.3; U.load(argv[1]); compute_em_field(U); for(int alpha=0; alpha<4; alpha++) for(int i=0; i<nc; i++) { source=0; forallsites(x) source(x,alpha,i)=1; mul_invQ(sink,source,U,quark); for(int beta=0; beta<4; beta++) for(int j=0; j<nc; j++) { forallsites(x) c2(x(TIME))+=real(pow(sink(x,beta,j),2)); } } mdp.add(c2); for(int t=0; t<c2.size(); t++) mdp << t << "\t" << c2(t) << endl; mdp.close_wormholes(); return 0; }
void testOnLattice( BiLexicon& biLexicon, const std::string& word, const std::string& lexeme, const std::string& cat, const std::list<std::string>& expectedEntries) { AnnotationItemManager aim; Lattice lattice(aim, word); AnnotationItem item(cat, StringFrag(lexeme)); LayerTagCollection tags = lattice.getLayerTagManager().createTagCollectionFromList( boost::assign::list_of("lexeme")("fake")); Lattice::EdgeDescriptor edge = lattice.addEdge( lattice.getFirstVertex(), lattice.getLastVertex(), item, tags); biLexicon.processEdge(lattice, edge); std::list<std::string> tagList = boost::assign::list_of("bilexicon"); Lattice::EdgesSortedBySourceIterator it(lattice, lattice.getLayerTagManager().getMask(tagList)); BOOST_FOREACH(const std::string& expectedEntry, expectedEntries) { BOOST_REQUIRE(it.hasNext()); Lattice::EdgeDescriptor equivEdge = it.next(); BOOST_CHECK_EQUAL(lattice.getAnnotationText(equivEdge), expectedEntry); }
void ising_run(int L) { std::cout << "starting fsc experiment with L = " << L << std::endl; boost::timer::cpu_timer timer; /* * parameters */ int Tsteps, nsteps; double Tbegin, Tend; Tbegin = 4.5; Tend = 5; Tsteps = 100; nsteps = 200; /* * setup */ drng rdg; irng rig(0, L); Lattice lattice(L, 1, rig); csp::ising::Ising<Lattice, drng> ising(lattice, Tbegin, rdg); //thermalization if(Tbegin > 0) { for(int i = 0; i < nsteps; ++i) { ising.step(); } } /* * run and plot */ std::stringstream fname; fname << "ising" << L << ".dat"; std::ofstream file(fname.str().c_str()); csp::algorithm::print_ plot(file); typedef boost::multi_array<double, 2> plot_type; plot_type plot_array = ising.T_run(Tbegin, Tend, Tsteps, nsteps); csp::iterate::stencil_iterate<2>(plot_array, plot); std::cout << "L = " << L << " spin flips: " << (double) (1.0 * Tsteps * nsteps * L*L*L + (Tbegin ? nsteps*L*L*L : 0)) << " t = " << timer.format() << std::endl; }
void test_clover() { mdp << "START TESTING CLOVER ACTIONS\n"; int box[]={64,6,6,6}, nc=3; generic_lattice lattice(4,box); gauge_field U(lattice,nc); fermi_field psi(lattice, nc); fermi_field chi2(lattice, nc); coefficients coeff; coeff["kappa_s"]=0.1; coeff["kappa_t"]=0.1; coeff["c_{sw}"]=1.00; set_hot(U); compute_em_field(U); set_random(psi); double t0,t1; inversion_stats stats; default_fermi_action=FermiCloverActionFast::mul_Q; default_fermi_inverter=MinimumResidueInverter<fermi_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Clover Min Res TIME=" << t1 << endl; default_fermi_inverter=BiConjugateGradientStabilizedInverter<fermi_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Clover BiCGStab TIME=" << t1 << endl; default_fermi_action=FermiCloverActionSSE2::mul_Q; default_fermi_inverter=MinimumResidueInverter<fermi_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Clover SSE Min Res TIME=" << t1 << endl; default_fermi_inverter=BiConjugateGradientStabilizedInverter<fermi_field,gauge_field>; t0=mpi.time(); stats=mul_invQ(chi2,psi,U,coeff); t1=(mpi.time()-t0)/lattice.nvol_gl/stats.steps; cout << "Clover SSE BiCGStab TIME=" << t1 << endl; }
int main(int argc, char** argv) { mdp.open_wormholes(argc,argv); // START string gauge_filename=argv[1]; int size1=atoi(argv[2]); int size2=atoi(argv[3]); mdp_field_file_header header; double result=0.0; // read file metadata if(is_file(gauge_filename)) header=get_info(gauge_filename); else error("Unable to access gauge configuration\n"); if(header.ndim!=4) error("sorry, only in 4D"); int nc=(int) sqrt((double) header.bytes_per_site/(4*sizeof(mdp_complex))); int *L=header.box; // lattice size // create lattice and read it in mdp_lattice lattice(4,L); // make a 4D lattice gauge_field U(lattice,nc); // make a gauge field U U.load(gauge_filename); int length=2*size1+2*size2; int path[length][2]; // make a generic path for(int i=0; i<size1; i++) { path[i][0]=+1; path[i+size1+size2][0]=-1; } for(int i=size1; i<size1+size2; i++) { path[i][0]=+1; path[i+size1+size2][0]=-1; } // loop over all possible paths for(int mu=1; mu<4; mu++) for(int nu=mu+1; nu<4; nu++) { // build each path for(int i=0;i<size1;i++) path[i][1]=path[i+size1+size2][1]=mu; for(int i=size1;i<size1+size2;i++) path[i][1]=path[i+size1+size2][1]=nu; result+=real(average_path(U,length,path))/6; } cout << "average loop " << size1 << "x" << size2 << " = " << result << endl; mdp.close_wormholes(); // STOP return 0; }
void test_gauge(int nt, int nx, char* filename) { int box[]={nt,nx,nx,nx}, nc=3; generic_lattice lattice(4,box, default_partitioning0, torus_topology, 0, 1,false); gauge_field U(lattice,nc); char filename2[200]; U.load(filename); // U.switch_endianess_4bytes(); for(int k=0; k<=20; k+=5) { sprintf(filename2,"%s.topological_charge_%i.vtk",filename,k); float tc=topological_charge_vtk(U,filename2,0); mdp << "topological_charge=" << tc << endl; ApeSmearing::smear(U,0.7,5,10); } }
float PerlinGenerator::noise(QVector3D& point) { int ix = MathHelper::toInt(floor(point.x())); float fx0 = point.x() - ix; float fx1 = fx0 - 1.0f; float wx = smooth(fx0); int iy = MathHelper::toInt(floor(point.y())); float fy0 = point.y() - iy; float fy1 = fy0 - 1.0f; float wy = smooth(fy0); int iz = MathHelper::toInt(floor(point.z())); float fz0 = point.z() - iz; float fz1 = fz0 - 1.0f; float wz = smooth(fz0); float vx0 = lattice(ix, iy, iz, QVector3D(fx0, fy0, fz0)); float vx1 = lattice(ix + 1, iy, iz, QVector3D(fx1, fy0, fz0)); float vy0 = lerp(QVector3D(wx, vx0, vx1)); vx0 = lattice(ix, iy + 1, iz, QVector3D(fx0, fy1, fz0)); vx1 = lattice(ix + 1, iy + 1, iz, QVector3D(fx1, fy1, fz0)); float vy1 = lerp(QVector3D(wx, vx0, vx1)); float vz0 = lerp(QVector3D(wy, vy0, vy1)); vx0 = lattice(ix, iy, iz + 1, QVector3D(fx0, fy0, fz1)); vx1 = lattice(ix + 1, iy, iz + 1, QVector3D(fx1, fy0, fz1)); vy0 = lerp(QVector3D(wx, vx0, vx1)); vx0 = lattice(ix, iy + 1, iz + 1, QVector3D(fx0, fy1, fz1)); vx1 = lattice(ix + 1, iy + 1, iz + 1, QVector3D(fx1, fy1, fz1)); vy1 = lerp(QVector3D(wx, vx0, vx1)); float vz1 = lerp(QVector3D(wy, vy0, vy1)); return lerp(QVector3D(wz, vz0, vz1)); }
void testBracketPrinter(std::string pattern, std::string output) { AnnotationItemManager aim; Lattice lattice(aim, "a"); lattice.addSymbols(lattice.getFirstVertex(), lattice.getLastVertex()); Lattice::EdgeDescriptor edge = lattice.firstOutEdge( lattice.getFirstVertex(), lattice.getLayerTagManager().anyTag()); std::vector<std::string> vs = boost::assign::list_of(pattern); BracketPrinter bp(vs, ",", ",", "="); std::set<std::string> tags = boost::assign::list_of("symbol")("token")("segment"); std::map<std::string, std::string> avMap = boost::assign::map_list_of("case", "Nominative")("number", "singular"); EdgeData edgeData(lattice, edge, tags, "Noun-Phrase", "Żółta jaźń", avMap, -1.5, "#"); std::set<EdgeData> edgeDataSet; edgeDataSet.insert(edgeData); std::set<EdgePrintData> printed = bp.print(edgeDataSet); BOOST_FOREACH(EdgePrintData epd, printed) { BOOST_CHECK_EQUAL(epd.printedElements[0], output); }
void Source:: doSourceH(CalculationPartition & cp, long timestep) { float val; InterleavedLattice& lattice(cp.lattice()); mFieldInput.startHalfTimestepH(timestep, cp.dt()*(timestep+0.5)); for (int xyz = 0; xyz < 3; xyz++) { mFieldInput.restartMaskPointer(xyz); if (mFields.whichH()[xyz] != 0) for (unsigned int rr = 0; rr < mRegions.size(); rr++) { Rect3i rect = mRegions[rr].yeeCells(); { Vector3i xx; if (!mIsSoft) { for (xx[2] = rect.p1[2]; xx[2] <= rect.p2[2]; xx[2]++) for (xx[1] = rect.p1[1]; xx[1] <= rect.p2[1]; xx[1]++) for (xx[0] = rect.p1[0]; xx[0] <= rect.p2[0]; xx[0]++) { val = mFieldInput.getFieldH(xyz); lattice.setH(xyz, xx, val); } } else { for (xx[2] = rect.p1[2]; xx[2] <= rect.p2[2]; xx[2]++) for (xx[1] = rect.p1[1]; xx[1] <= rect.p2[1]; xx[1]++) for (xx[0] = rect.p1[0]; xx[0] <= rect.p2[0]; xx[0]++) { val = mFieldInput.getFieldH(xyz); lattice.setH(xyz, xx, lattice.getH(xyz, xx) + val); } } } } } }
int main(int argc, char** argv) { int nIterations = 10000; if (argc > 1) { nIterations = atoi(argv[1]); } Lattice lattice(4, 8, 5.5, 1.0, 1.0, 1.0, 0, 10, 0, 1, 4, -1); vector<complex<double> > boundaryConditions(4, complex<double>(1.0, 0.0)); DWF linop(0.4, 1.8, 4, pyQCD::wilson, boundaryConditions, &lattice); VectorXcd psi = VectorXcd::Zero(4 * 12 * 4 * 4 * 4 * 8); psi(0) = 1.0; std::cout << "Performing " << nIterations << " matrix-vector products." << std::endl; boost::timer::cpu_timer timer; for (int i = 0; i < nIterations; ++i) { VectorXcd eta = linop.apply(psi); } boost::timer::cpu_times const elapsedTimes(timer.elapsed()); boost::timer::nanosecond_type const elapsed(elapsedTimes.system + elapsedTimes.user); boost::timer::nanosecond_type const walltime(elapsedTimes.wall); std::cout << "Total CPU time = " << elapsed / 1.0e9 << " s" << endl; std::cout << "CPU time per iteration = " << elapsed / 1.0e9 / nIterations << " s" << endl; std::cout << "Walltime = " << walltime / 1.0e9 << " s" << endl; std::cout << "Walltime per iteration = " << walltime / 1.0e9 / nIterations << " s" << endl; std::cout << "Performance: " << linop.getNumFlops() << " floating point operations; " << (double) linop.getNumFlops() / elapsed * 1000.0 << " MFlops / thread" << endl; return 0; }
int main(int argc,char *argv[]) { ParametersEngineType engineParams; ParametersModelType mp; Dmrg::SimpleReader reader(argv[1]); std::cout<<"Loading"<<std::endl; reader.load(engineParams); reader.load(mp); std::cout<<"Initialize Lattice"<<std::endl; LatticeType lattice(engineParams); std::cout<<"Initialize MF Parameters"<<std::endl; MFParamsType mfParams(engineParams,mp); EngineType engine(engineParams, mp, mfParams, lattice); engine.run(); }
// Genera los grafos usados para optimizar las heurísticas vector<vector<nodo>> generar_grafos() { vector<vector<nodo>> grafos; int initial = 1; int max = 10; int increment = 1; // Familia lattice for(int m = initial; m <= max; m += increment) for(int n = initial; n <= max; n += increment) { grafos.push_back(lattice(m, n)); } // Familia (K_n U Claw_m)^c for(int m = initial; m <= max; m += increment) for(int n = initial; n <= max; n += increment) { grafos.push_back(kn_union_claw_m_complemento(m, n)); } // Familia lollipop for(int m = initial; m <= max; m += increment) for(int n = initial; n <= max; n += increment) { grafos.push_back(lollipop(m, n)); } // Familia fan for(int m = initial; m <= max; m += increment) for(int n = initial; n <= max; n += increment) { grafos.push_back(fan(m, n)); } // Familia ninja for(int n = initial; n <= max; n += increment) { grafos.push_back(ninja(n)); } return grafos; }
int main(int argc, char** argv) { mdp.open_wormholes(argc,argv); // mpirun int L[]={4,10,10,10}; mdp_lattice lattice(4,L); gauge_field U(lattice,3); coefficients gauge; gauge["beta"]=5.0; // mdp_field<float> Q(lattice); set_cold(U); for(int k=0; k<10; k++) { cout << k << endl; WilsonGaugeAction::heatbath(U,gauge,1); } topological_charge_vtk(U,"top_charge_simple.vtk"); mdp.close_wormholes(); return 0; }
void main(void) { gettime(&_time1); //Define Variables array lattice(3,LATTICE_SIZE,LATTICE_SIZE, LATTICE_SIZE); int i,j, k, counter; double coeff_1_6 = 1.0/6.0; //perform desired iterations for( counter = 0; counter < LOOP; counter++) { //relax the lattice for( i = 1; i < LATTICE_SIZE - 1; i++) for( j = 1; j < LATTICE_SIZE - 1; j++) for( k = 1; k < LATTICE_SIZE - 1; k++) { lattice.Set( coeff_1_6*( lattice.Val(i+1,j,k) + lattice.Val(i-1,j,k) + lattice.Val(i,j+1,k) + lattice.Val(i,j-1,k) + lattice.Val(i,j,k-1) + lattice.Val(i,j,k+1) ), i,j,k); } } gettime(&_time2); int temp = (_time2.ti_hour - _time1.ti_hour)*3600 + (_time2.ti_min - _time1.ti_min)*60 + (_time2.ti_sec - _time1.ti_sec); printf("The time was %d secs",temp); }
int main(int argc, char* argv[]) { param_t params; /* struct to hold parameter values */ speed_t* cells = NULL; /* grid containing fluid densities */ speed_t* tmp_cells = NULL; /* scratch space */ int* obstacles = NULL; /* grid indicating which cells are blocked */ int* rowsSetup = NULL; int* accelgrid = NULL; float* av_vels = NULL; /* a record of the av. velocity computed for each timestep */ int ii, rank, size, tag=0, jj; /* generic counter */ struct timeval timstr; /* structure to hold elapsed time */ struct rusage ru; /* structure to hold CPU time--system and user */ double tic,toc; /* floating point numbers to calculate elapsed wallclock time */ double usrtim; /* floating point number to record elapsed user CPU time */ double systim; /* floating point number to record elapsed system CPU time */ int halorow; int buff; int extra; int start_row; int end_row; MPI_Status status; accel_area_t accel_area; /* initialise our data structures and load values from file */ initialise(argv[1], &accel_area, ¶ms, &cells, &tmp_cells, &obstacles, &av_vels, &accelgrid); // Initialize MPI environment. MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); extra = params.ny%size; halorow = (rank<extra) ? (params.ny/size + 1) * params.nx : (params.ny/size) * params.nx; calc_row_setup(&rowsSetup, rank, halorow, extra, params); buff = rowsSetup[0]; start_row = rowsSetup[1]; end_row = rowsSetup[2]; /* iterate for max_iters timesteps */ gettimeofday(&timstr,NULL); tic=timstr.tv_sec+(timstr.tv_usec/1000000.0); for(ii=0; ii<params.max_iters; ii++) { accelerate_flow(params,accel_area,cells,start_row, end_row, accelgrid); lattice(params,cells,tmp_cells,obstacles, av_vels, start_row, end_row, ii); } gettimeofday(&timstr,NULL); toc=timstr.tv_sec+(timstr.tv_usec/1000000.0); getrusage(RUSAGE_SELF, &ru); timstr=ru.ru_utime; usrtim=timstr.tv_sec+(timstr.tv_usec/1000000.0); timstr=ru.ru_stime; systim=timstr.tv_sec+(timstr.tv_usec/1000000.0); float* buffer = malloc(buff * 9 * sizeof(float)); if(rank != 0) { for(ii=0; ii<halorow; ii++) { buffer[9*ii] = cells[start_row+ii].speeds[0]; buffer[9*ii+1] = cells[start_row+ii].speeds[1]; buffer[9*ii+2] = cells[start_row+ii].speeds[2]; buffer[9*ii+3] = cells[start_row+ii].speeds[3]; buffer[9*ii+4] = cells[start_row+ii].speeds[4]; buffer[9*ii+5] = cells[start_row+ii].speeds[5]; buffer[9*ii+6] = cells[start_row+ii].speeds[6]; buffer[9*ii+7] = cells[start_row+ii].speeds[7]; buffer[9*ii+8] = cells[start_row+ii].speeds[8]; } MPI_Send(buffer, 9*buff, MPI_FLOAT, 0, tag, MPI_COMM_WORLD); } else { if(extra == 0) { for(ii=1; ii<size; ii++) { MPI_Recv(buffer, 9*buff, MPI_FLOAT, ii, tag, MPI_COMM_WORLD, &status); for(jj=0; jj<halorow; jj++) { cells[halorow*ii+jj].speeds[0] = buffer[9*jj]; cells[halorow*ii+jj].speeds[1] = buffer[9*jj+1]; cells[halorow*ii+jj].speeds[2] = buffer[9*jj+2]; cells[halorow*ii+jj].speeds[3] = buffer[9*jj+3]; cells[halorow*ii+jj].speeds[4] = buffer[9*jj+4]; cells[halorow*ii+jj].speeds[5] = buffer[9*jj+5]; cells[halorow*ii+jj].speeds[6] = buffer[9*jj+6]; cells[halorow*ii+jj].speeds[7] = buffer[9*jj+7]; cells[halorow*ii+jj].speeds[8] = buffer[9*jj+8]; } } } else { for(ii=1; ii<extra; ii++) { MPI_Recv(buffer, 9*buff, MPI_FLOAT, ii, tag, MPI_COMM_WORLD, &status); for(jj=0; jj<halorow; jj++) { cells[halorow*ii+jj].speeds[0] = buffer[9*jj]; cells[halorow*ii+jj].speeds[1] = buffer[9*jj+1]; cells[halorow*ii+jj].speeds[2] = buffer[9*jj+2]; cells[halorow*ii+jj].speeds[3] = buffer[9*jj+3]; cells[halorow*ii+jj].speeds[4] = buffer[9*jj+4]; cells[halorow*ii+jj].speeds[5] = buffer[9*jj+5]; cells[halorow*ii+jj].speeds[6] = buffer[9*jj+6]; cells[halorow*ii+jj].speeds[7] = buffer[9*jj+7]; cells[halorow*ii+jj].speeds[8] = buffer[9*jj+8]; } } for(ii=extra; ii<size; ii++) { MPI_Recv(buffer, 9*buff, MPI_FLOAT, ii, tag, MPI_COMM_WORLD, &status); for(jj=0; jj<(params.ny/size) * params.nx; jj++) { int local_extra = halorow * extra + (halorow-params.nx)* (ii-extra); cells[local_extra + jj].speeds[0] = buffer[9*jj]; cells[local_extra + jj].speeds[1] = buffer[9*jj+1]; cells[local_extra + jj].speeds[2] = buffer[9*jj+2]; cells[local_extra + jj].speeds[3] = buffer[9*jj+3]; cells[local_extra + jj].speeds[4] = buffer[9*jj+4]; cells[local_extra + jj].speeds[5] = buffer[9*jj+5]; cells[local_extra + jj].speeds[6] = buffer[9*jj+6]; cells[local_extra + jj].speeds[7] = buffer[9*jj+7]; cells[local_extra + jj].speeds[8] = buffer[9*jj+8]; } } } free(buffer); buffer = NULL; } MPI_Finalize(); /*Finilize MPI*/ if(rank == 0) { printf("==done==\n"); printf("Reynolds number:\t\t%.12E\n",calc_reynolds(params,cells,obstacles,av_vels[params.max_iters-1])); printf("Elapsed time:\t\t\t%.6lf (s)\n", toc-tic); printf("Elapsed user CPU time:\t\t%.6lf (s)\n", usrtim); printf("Elapsed system CPU time:\t%.6lf (s)\n", systim); write_values(params,cells,obstacles,av_vels); finalise(¶ms, &cells, &tmp_cells, &obstacles, &av_vels, &accelgrid, &rowsSetup); } return EXIT_SUCCESS; }
/* main routine */ int main(int argc, char *argv[]){ FILELog::ReportingLevel() = logINFO; VARIABLES vars; vars = commandline_input(argc, argv); clock_t init, final; int number_of_particles = vars.num; double timestep = vars.dt; bool use_T2 = vars.use_T2; // = false (no T2 decay), = true (T2 decay) int num_of_repeat = vars.num_of_repeat; //Number of times to repeat simulation. For every repeat all data is flushed and we start the simulation again. int number_of_timesteps; number_of_timesteps = (int)ceil(vars.gs/timestep); double permeability = 0.0; double radius = .00535; double d = sqrt( 2.0*PI*radius*radius/( sqrt(3.0)*.79 ) ); //double lattice_size = 0.00601922026995422789215053328651; double grad_duration = 4.5; double D_extra = 2.5E-6; double D_intra = 1.0E-6; double T2_e = 200; double T2_i = 200; FILE_LOG(logINFO) << "f = " << 2.0*PI*radius*radius/(sqrt(3.0)*d*d) << std::endl; Vector3 xhat(1.0,0.0,0.0); Vector3 yhat(0.0,1.0,0.0); Vector3 zhat(0.0,0.0,1.0); Lattice<Cylinder_XY> lattice(D_extra, T2_e, permeability); lattice.setLatticeVectors(d,2.0*d*0.86602540378443864676372317075294,d,xhat,yhat,zhat); lattice.addBasis(Cylinder_XY(d/2.0, 0.0, radius, T2_i, D_intra, 1)); lattice.addBasis(Cylinder_XY(0.0, d*0.86602540378443864676372317075294, radius, T2_i, D_intra, 2)); lattice.addBasis(Cylinder_XY(d/2.0, 2.0*d*0.86602540378443864676372317075294, radius, T2_i, D_intra, 3)); lattice.addBasis(Cylinder_XY(d, d*0.86602540378443864676372317075294, radius, T2_i, D_intra, 4)); double gspacings [] = { 8.0 , 10.5 , 14.0 , 18.5 , 24.5 , 32.5 , 42.5 , 56.5 }; double bvals [] = {108780, 154720, 219040, 301730, 411980, 558990, 742420, 1000000 }; double G [9]; for (int kk = 0; kk < num_of_repeat; kk++) { vector<PGSE> measurements_x; vector<PGSE> measurements_y; vector<PGSE> measurements_z; for (int i = 0; i < 8; i++){ double echo_time = 2.0*grad_duration + gspacings[i]; G[0] = 0.0; G[i+1] = sqrt(bvals[i]/(GAMMA*GAMMA*grad_duration*grad_duration*(grad_duration + gspacings[i] - (grad_duration/3.0)))); measurements_x.push_back(PGSE(grad_duration,gspacings[i], timestep, G[0], echo_time, number_of_particles, xhat)); measurements_y.push_back(PGSE(grad_duration,gspacings[i], timestep, G[0], echo_time, number_of_particles, yhat)); measurements_z.push_back(PGSE(grad_duration,gspacings[i], timestep, G[0], echo_time, number_of_particles, zhat)); measurements_x.push_back(PGSE(grad_duration,gspacings[i], timestep, G[i+1], echo_time, number_of_particles, xhat)); measurements_y.push_back(PGSE(grad_duration,gspacings[i], timestep, G[i+1], echo_time, number_of_particles, yhat)); measurements_z.push_back(PGSE(grad_duration,gspacings[i], timestep, G[i+1], echo_time, number_of_particles, zhat)); } vector<double> lnsignal(2); vector<double> b(2); cout << " trial = " << kk << endl; Particles ensemble(number_of_particles,timestep, use_T2); lattice.initializeUniformly(ensemble.getGenerator() , ensemble.getEnsemble() ); for (int k = 0; k < measurements_x.size();k++){ measurements_x[k].updatePhase(ensemble.getEnsemble(), 0.0); measurements_y[k].updatePhase(ensemble.getEnsemble(), 0.0); measurements_z[k].updatePhase(ensemble.getEnsemble(), 0.0); } for (int i = 1; i <= number_of_timesteps; i++){ ensemble.updateposition(lattice); for (int k = 0; k < measurements_x.size();k++){ measurements_x[k].updatePhase(ensemble.getEnsemble(), i*timestep); measurements_y[k].updatePhase(ensemble.getEnsemble(), i*timestep); measurements_z[k].updatePhase(ensemble.getEnsemble(), i*timestep); } } for (int i = 0; i < 8*2; i+=2){ double ADCx, ADCy, ADCz, diff_time; lnsignal[0] = log(measurements_x[i].get_signal()); b[0] = measurements_x[i].get_b(); lnsignal[1] = log(measurements_x[i+1].get_signal()); b[1] = measurements_x[i+1].get_b(); ADCx = -1.0*linear_regression(lnsignal,b); //std::cout << b[0] << " " << lnsignal[0] << " " << b[1] << " " << lnsignal[1] << " "; lnsignal[0] = log(measurements_y[i].get_signal()); b[0] = measurements_y[i].get_b(); lnsignal[1] = log(measurements_y[i+1].get_signal()); b[1] = measurements_y[i+1].get_b(); ADCy = -1.0*linear_regression(lnsignal,b); //std::cout << b[0] << " " << lnsignal[0] << " " << b[1] << " " << lnsignal[1] << " "; lnsignal[0] = log(measurements_z[i].get_signal()); b[0] = measurements_z[i].get_b(); lnsignal[1] = log(measurements_z[i+1].get_signal()); b[1] = measurements_z[i+1].get_b(); ADCz = -1.0*linear_regression(lnsignal,b); //std::cout << b[0] << " " << lnsignal[0] << " " << b[1] << " " << lnsignal[1] << std::endl; diff_time = measurements_x[i].get_DT(); std::cout << i << " " << diff_time << " " << ADCx << " " << ADCy << " " << ADCz << " " << b[1] << " " << G[1] << std::endl; } } final= clock()-init; //final time - intial time
void runGenTest(RunParameters &r) { // Define variables Vector J, expJ; std::vector<int> cons; std::vector<double> weight; if (r.useGI) { epsilonP2_ptr=&epsilonP2_GI; epsilonC_ptr=&epsilonC_GI; getMaxError_ptr=&getMaxError_GI; } else { epsilonP2_ptr=&epsilonP2; epsilonC_ptr=&epsilonC; getMaxError_ptr=&getMaxError; } // Get reference sequence from file FILE *consIn = fopen(r.getConsensusInfile().c_str(),"r"); if (consIn!=NULL) getConsensus(consIn,cons); else { printf("Error reading input from file %s\n\n",r.getConsensusInfile().c_str()); exit(1); } fclose(consIn); if (r.useVerbose) { printf("Reference sequence: "); for (int i=0;i<cons.size();i++) printf(" %d",cons[i]); printf("\n\n"); } // Retrieve couplings from file FILE *dataIn=fopen(r.getInfile().c_str(),"r"); if (dataIn!=NULL) getCouplings(dataIn,J); else { printf("Error reading input from file %s",r.getInfile().c_str()); exit(1); } fclose(dataIn); // Resize expJ for (int i=0;i<J.size();i++) expJ.push_back(std::vector<double>(J[i].size(),0)); for (int i=0;i<J.size();i++) { for (int j=0;j<J[i].size();j++) expJ[i][j] = exp(J[i][j]); } // Declare 2-point correlations, 3-point correlations, P(k) and magnetisations bool ThreePoints = (r.p3red || r.p3); int N = sizetolength(J.size()); // System size double alpha = 0.01; // Field regularization multiplier double gamma = 0; // Regularization strength (L2, set below) if (r.useGamma) { if (r.gamma==0) gamma=1/(r.sampleB); else gamma=r.gamma; } Vector p(J.size(),std::vector<double>()); // MC magnetisations and 2-point correlations Vector cc(J.size(),std::vector<double>()); // MC connected 2-point correlations Vector q(J.size(),std::vector<double>()); // MSA magnetisations and 2-point correlations Vector qcc(J.size(),std::vector<double>()); // MSA connected 2-point correlations std::vector<std::vector<std::vector<std::vector<double> > > > p3(N); // MC 3-point correlations std::vector<std::vector<std::vector<std::vector<double> > > > c3(N); // MC connected 3-point correlations std::vector<std::vector<std::vector<std::vector<double> > > > q3(N); // MSA 3-point correlations std::vector<std::vector<std::vector<std::vector<double> > > > qc3(N); // MSA connected 3-point correlations std::vector<double> pk(N+1,0); // MC mutation probability std::vector<double> qk(N+1,0); // MSA mutation probability std::vector<double> absErr(2,0); // Absolute errors on magnetisation and 2-point correlations for (int i=0;i<J.size();i++) { cc[i].resize(J[i].size(),0); p[i].resize(J[i].size(),0); qcc[i].resize(J[i].size(),0); q[i].resize(J[i].size(),0); } if (ThreePoints) { for (int i=0;i<N;i++) { p3[i].resize(N); c3[i].resize(N); q3[i].resize(N); qc3[i].resize(N); for (int j=0;j<N;j++) { p3[i][j].resize(N); c3[i][j].resize(N); q3[i][j].resize(N); qc3[i][j].resize(N); for (int k=0;k<N;k++) { p3[i][j][k].resize(p[i].size()*p[j].size()*p[k].size(),0); c3[i][j][k].resize(p[i].size()*p[j].size()*p[k].size(),0); q3[i][j][k].resize(p[i].size()*p[j].size()*p[k].size(),0); qc3[i][j][k].resize(p[i].size()*p[j].size()*p[k].size(),0); } } } } // Get sequences from MSA file and compute correlations FILE *alIn=fopen(r.getInfileAl().c_str(),"r"); FILE *weightIn=fopen(r.getWeights().c_str(),"r"); if (alIn!=NULL){ if (ThreePoints) getAlignment(alIn,weightIn,J,q,q3,qk,cons); else getAlignment(alIn,weightIn,J,q,qk,cons); } else { printf("Error reading input from file %s\n\n",r.getInfileAl().c_str()); exit(1); } fclose(alIn); if (weightIn!=NULL) fclose(weightIn); if (r.useVerbose) printf("Got N=%d, len(h[0])=%d\n",N,(int)J[0].size()); // Get default starting configuration, if nontrivial std::vector<int> lattice(N); if (r.useStart) { FILE *startIn=fopen(r.getStartInfile().c_str(),"r"); for (int i=0;i<N;i++) fscanf(startIn,"%d",&lattice[i]); } else { for (int i=0;i<N;i++) lattice[i]=(int) p[i].size(); } // Prepare to simulate srand((unsigned)time(0)); // Run MC and get correlations if (ThreePoints) getErrorGenTest(J, expJ, r.sampleB, r.b, r.runs, p, lattice, pk, p3, cons); // compute errors on P P2 and MAX else getErrorGenTest(J, expJ, r.sampleB, r.b, r.runs, p, lattice, pk, cons); // compute errors on P P2 and MAX //Compute connected correlations double Neff = 0; double NJeff = 0; // estimate the threshold for correlations to print out double meanq = 0; for (int i=0;i<lattice.size();i++) { for (int a=0;a<p[i].size();a++) { Neff++; meanq+=q[i][a]; absErr[0] += (p[i][a] - q[i][a]) * (p[i][a] - q[i][a]); for (int j=i+1;j<lattice.size();j++) { for (int b=0;b<p[j].size();b++) { NJeff++; int idx = index(i,j,lattice.size()); int sab = sindex(a,b,J[i].size(),J[j].size()); absErr[1] += (p[idx][sab] - q[idx][sab]) * (p[idx][sab] - q[idx][sab]); cc[idx][sab] = p[idx][sab] - (p[i][a] * p[j][b]); qcc[idx][sab] = q[idx][sab] - (q[i][a] * q[j][b]); if (ThreePoints) { for (int k=j+1;k<lattice.size();k++) { for (int c=0;c<p[k].size();c++) { int ijx = idx; int ikx = index(i,k,lattice.size()); int jkx = index(j,k,lattice.size()); int sac = sindex(a,c,J[i].size(),J[k].size()); int sbc = sindex(b,c,J[j].size(),J[k].size()); int sabc = sindex3(a,b,c,J[i].size(),J[j].size(),J[k].size()); c3[i][j][k][sabc] = p3[i][j][k][sabc] - (p[i][a]*p[jkx][sbc]) - (p[j][b]*p[ikx][sac]) - (p[k][c]*p[ijx][sab]) + (2*(p[i][a]*p[j][b]*p[k][c])); qc3[i][j][k][sabc] = q3[i][j][k][sabc] - (q[i][a]*q[jkx][sbc]) - (q[j][b]*q[ikx][sac]) - (q[k][c]*q[ijx][sab]) + (2*(q[i][a]*q[j][b]*q[k][c])); } } } } } } } absErr[0] = sqrt(absErr[0]/Neff); absErr[1] = sqrt(absErr[1]/NJeff); meanq=meanq/Neff; // Print out errors double maxPrecision=1/(r.sampleB); double ep1 = epsilonP(q, p, N, maxPrecision, J, gamma, alpha); double ep2 = (*epsilonP2_ptr)(q, p, N, maxPrecision, J, gamma); double em = (*getMaxError_ptr)( q, p, maxPrecision, J, gamma, alpha); printf("\nRelative errors: P %f, P2 %f MAX %f gamma %f\n",ep1,ep2,em,gamma); printf("Absolute errors: P %f, P2 %f \n\n",absErr[0],absErr[1]); //Print results for comparison FILE *mOut = fopen(r.getMOutfile().c_str(),"w"); FILE *pOut = fopen(r.getP2Outfile().c_str(),"w"); FILE *ccOut = fopen(r.getCCOutfile().c_str(),"w"); FILE *pkOut = fopen(r.getPKOutfile().c_str(),"w"); printMagnetisations(mOut, q, p); double num=0; printCorrelations(ccOut, qcc, cc,pOut, q, p); if (ThreePoints){ FILE *p3Out = fopen(r.getP3Outfile().c_str(),"w"); FILE *c3Out = fopen(r.getC3Outfile().c_str(),"w"); if (r.p3red) num=meanq*meanq*meanq; if (r.p3) num=0; print3points(c3Out, qc3, c3,p3Out, q3, p3, num); } for (int sit=0;sit<N;sit++) fprintf(pkOut,"%d %le %le\n",sit,qk[sit],pk[sit]); fflush(pkOut); }
void BinomialVanillaEngine<T>::calculate() const { DayCounter rfdc = process_->riskFreeRate()->dayCounter(); DayCounter divdc = process_->dividendYield()->dayCounter(); DayCounter voldc = process_->blackVolatility()->dayCounter(); Calendar volcal = process_->blackVolatility()->calendar(); Real s0 = process_->stateVariable()->value(); QL_REQUIRE(s0 > 0.0, "negative or null underlying given"); Volatility v = process_->blackVolatility()->blackVol( arguments_.exercise->lastDate(), s0); Date maturityDate = arguments_.exercise->lastDate(); Rate r = process_->riskFreeRate()->zeroRate(maturityDate, rfdc, Continuous, NoFrequency); Rate q = process_->dividendYield()->zeroRate(maturityDate, divdc, Continuous, NoFrequency); Date referenceDate = process_->riskFreeRate()->referenceDate(); // binomial trees with constant coefficient Handle<YieldTermStructure> flatRiskFree( boost::shared_ptr<YieldTermStructure>( new FlatForward(referenceDate, r, rfdc))); Handle<YieldTermStructure> flatDividends( boost::shared_ptr<YieldTermStructure>( new FlatForward(referenceDate, q, divdc))); Handle<BlackVolTermStructure> flatVol( boost::shared_ptr<BlackVolTermStructure>( new BlackConstantVol(referenceDate, volcal, v, voldc))); boost::shared_ptr<PlainVanillaPayoff> payoff = boost::dynamic_pointer_cast<PlainVanillaPayoff>(arguments_.payoff); QL_REQUIRE(payoff, "non-plain payoff given"); Time maturity = rfdc.yearFraction(referenceDate, maturityDate); boost::shared_ptr<StochasticProcess1D> bs( new GeneralizedBlackScholesProcess( process_->stateVariable(), flatDividends, flatRiskFree, flatVol)); TimeGrid grid(maturity, timeSteps_); boost::shared_ptr<T> tree(new T(bs, maturity, timeSteps_, payoff->strike())); boost::shared_ptr<BlackScholesLattice<T> > lattice( new BlackScholesLattice<T>(tree, r, maturity, timeSteps_)); DiscretizedVanillaOption option(arguments_, *process_, grid); option.initialize(lattice, maturity); // Partial derivatives calculated from various points in the // binomial tree (Odegaard) // Rollback to third-last step, and get underlying price (s2) & // option values (p2) at this point option.rollback(grid[2]); Array va2(option.values()); QL_ENSURE(va2.size() == 3, "Expect 3 nodes in grid at second step"); Real p2h = va2[2]; // high-price Real s2 = lattice->underlying(2, 2); // high price // Rollback to second-last step, and get option value (p1) at // this point option.rollback(grid[1]); Array va(option.values()); QL_ENSURE(va.size() == 2, "Expect 2 nodes in grid at first step"); Real p1 = va[1]; // Finally, rollback to t=0 option.rollback(0.0); Real p0 = option.presentValue(); Real s1 = lattice->underlying(1, 1); // Calculate partial derivatives Real delta0 = (p1-p0)/(s1-s0); // dp/ds Real delta1 = (p2h-p1)/(s2-s1); // dp/ds // Store results results_.value = p0; results_.delta = delta0; results_.gamma = 2.0*(delta1-delta0)/(s2-s0); //d(delta)/ds results_.theta = blackScholesTheta(process_, results_.value, results_.delta, results_.gamma); }
void PWOrbitalBuilder::transform2GridData(PWBasis::GIndex_t& nG, int spinIndex, PWOrbitalSet& pwFunc) { ostringstream splineTag; splineTag << "eigenstates_"<<nG[0]<<"_"<<nG[1]<<"_"<<nG[2]; herr_t status = H5Eset_auto(NULL, NULL); app_log() << " splineTag " << splineTag.str() << endl; hid_t es_grp_id; status = H5Gget_objinfo (hfileID, splineTag.str().c_str(), 0, NULL); if(status) { es_grp_id = H5Gcreate(hfileID,splineTag.str().c_str(),0); HDFAttribIO<PWBasis::GIndex_t> t(nG); t.write(es_grp_id,"grid"); } else { es_grp_id = H5Gopen(hfileID,splineTag.str().c_str()); } string tname=myParam->getTwistName(); hid_t twist_grp_id; status = H5Gget_objinfo (es_grp_id, tname.c_str(), 0, NULL); if(status) twist_grp_id = H5Gcreate(es_grp_id,tname.c_str(),0); else twist_grp_id = H5Gopen(es_grp_id,tname.c_str()); HDFAttribIO<PosType> hdfobj_twist(TwistAngle); hdfobj_twist.write(twist_grp_id,"twist_angle"); ParticleSet::ParticleLayout_t& lattice(targetPtcl.Lattice); RealType dx=1.0/static_cast<RealType>(nG[0]-1); RealType dy=1.0/static_cast<RealType>(nG[1]-1); RealType dz=1.0/static_cast<RealType>(nG[2]-1); #if defined(VERYTINYMEMORY) typedef Array<ValueType,3> StorageType; StorageType inData(nG[0],nG[1],nG[2]); int ib=0; while(ib<myParam->numBands) { string bname(myParam->getBandName(ib)); status = H5Gget_objinfo (twist_grp_id, bname.c_str(), 0, NULL); hid_t band_grp_id, spin_grp_id=-1; if(status) { band_grp_id = H5Gcreate(twist_grp_id,bname.c_str(),0); } else { band_grp_id = H5Gopen(twist_grp_id,bname.c_str()); } hid_t parent_id=band_grp_id; if(myParam->hasSpin) { bname=myParam->getSpinName(spinIndex); status = H5Gget_objinfo (band_grp_id, bname.c_str(), 0, NULL); if(status) { spin_grp_id = H5Gcreate(band_grp_id,bname.c_str(),0); } else { spin_grp_id = H5Gopen(band_grp_id,bname.c_str()); } parent_id=spin_grp_id; } for(int ig=0; ig<nG[0]; ig++) { RealType x=ig*dx; for(int jg=0; jg<nG[1]; jg++) { RealType y=jg*dy; for(int kg=0; kg<nG[2]; kg++) { inData(ig,jg,kg)= pwFunc.evaluate(ib,lattice.toCart(PosType(x,y,kg*dz))); } } } app_log() << " Add spline data " << ib << " h5path=" << tname << "/eigvector" << endl; HDFAttribIO<StorageType> t(inData); t.write(parent_id,myParam->eigvecTag.c_str()); if(spin_grp_id>=0) H5Gclose(spin_grp_id); H5Gclose(band_grp_id); ++ib; } #else typedef Array<ValueType,3> StorageType; vector<StorageType*> inData; int nb=myParam->numBands; for(int ib=0; ib<nb; ib++) inData.push_back(new StorageType(nG[0],nG[1],nG[2])); PosType tAngle=targetPtcl.Lattice.k_cart(TwistAngle); PWOrbitalSet::ValueVector_t phi(nb); for(int ig=0; ig<nG[0]; ig++) { RealType x=ig*dx; for(int jg=0; jg<nG[1]; jg++) { RealType y=jg*dy; for(int kg=0; kg<nG[2]; kg++) { targetPtcl.R[0]=lattice.toCart(PosType(x,y,kg*dz)); pwFunc.evaluate(targetPtcl,0,phi); RealType x(dot(targetPtcl.R[0],tAngle)); ValueType phase(std::cos(x),-std::sin(x)); for(int ib=0; ib<nb; ib++) (*inData[ib])(ig,jg,kg)=phase*phi[ib]; } } } for(int ib=0; ib<nb; ib++) { string bname(myParam->getBandName(ib)); status = H5Gget_objinfo (twist_grp_id, bname.c_str(), 0, NULL); hid_t band_grp_id, spin_grp_id=-1; if(status) { band_grp_id = H5Gcreate(twist_grp_id,bname.c_str(),0); } else { band_grp_id = H5Gopen(twist_grp_id,bname.c_str()); } hid_t parent_id=band_grp_id; if(myParam->hasSpin) { bname=myParam->getSpinName(spinIndex); status = H5Gget_objinfo (band_grp_id, bname.c_str(), 0, NULL); if(status) { spin_grp_id = H5Gcreate(band_grp_id,bname.c_str(),0); } else { spin_grp_id = H5Gopen(band_grp_id,bname.c_str()); } parent_id=spin_grp_id; } app_log() << " Add spline data " << ib << " h5path=" << tname << "/eigvector" << endl; HDFAttribIO<StorageType> t(*(inData[ib])); t.write(parent_id,myParam->eigvecTag.c_str()); if(spin_grp_id>=0) H5Gclose(spin_grp_id); H5Gclose(band_grp_id); } for(int ib=0; ib<nb; ib++) delete inData[ib]; #endif H5Gclose(twist_grp_id); H5Gclose(es_grp_id); }
void AlgTcharge::run() { Lattice& lattice( AlgLattice() ); Float tmat[nfunc][nfunc]; for (int f1(0);f1<nfunc;f1++) for (int f2(0);f2<nfunc;f2++) tmat[f1][f2] = 0; // sum over lattice Site nloop; while ( nloop.LoopsOverNode() ) { // Array of imaginary parts of the plaquettes // at a given site // plaqs[0] = F_01 // plaqs[1] = F_02 // plaqs[2] = F_03 // plaqs[3] = F_12 // plaqs[4] = F_13 // plaqs[5] = F_23 Matrix plaqs[nfunc][6]; // // fill plaqs with the full plaquettes // - then zero the real parts // int mu; int nu; int index(0); for (mu=0;mu<3;++mu) { for (nu=mu+1;nu<4;nu++) { for (int f(0);f<nfunc;f++) { (*(leaf_map[f]))( lattice, plaqs[f][index], nloop.pos(), mu, nu ); ZeroReal(plaqs[f][index]); } index++; } } for (int f1(0);f1<nfunc;f1++) { for (int f2(f1);f2<nfunc;f2++) { tmat[f1][f2] += MkTop(plaqs[f1],plaqs[f2]).real(); } } } // global sum the approximations for (int f1(0);f1<nfunc;f1++) { for (int f2(f1);f2<nfunc;f2++) { glb_sum( &tmat[f1][f2] ); } } // Print out results //---------------------------------------------------------------- if(common_arg->filename != 0) { char *fname = "alg_tcharge()"; FILE *fp; if( (fp = Fopen(common_arg->filename, "a")) == NULL ) { ERR.FileA(cname,fname,common_arg->filename); } Fprintf(fp,"AlgTcharge:\n"); Fprintf(fp,"nleaf : %i\n",nfunc); for (int f(0);f<nfunc;f++) Fprintf(fp," %i : %s\n",f,names[f]); for (int f1(0);f1<nfunc;f1++) { for (int f2(f1);f2<nfunc;f2++) { Fprintf(fp,"%i %i : %15e\n",f1,f2,tmat[f1][f2]); } } Fclose(fp); } }
void BinomialBarrierEngine<T,D>::calculate() const { DayCounter rfdc = process_->riskFreeRate()->dayCounter(); DayCounter divdc = process_->dividendYield()->dayCounter(); DayCounter voldc = process_->blackVolatility()->dayCounter(); Calendar volcal = process_->blackVolatility()->calendar(); Real s0 = process_->stateVariable()->value(); QL_REQUIRE(s0 > 0.0, "negative or null underlying given"); Volatility v = process_->blackVolatility()->blackVol( arguments_.exercise->lastDate(), s0); Date maturityDate = arguments_.exercise->lastDate(); Rate r = process_->riskFreeRate()->zeroRate(maturityDate, rfdc, Continuous, NoFrequency); Rate q = process_->dividendYield()->zeroRate(maturityDate, divdc, Continuous, NoFrequency); Date referenceDate = process_->riskFreeRate()->referenceDate(); // binomial trees with constant coefficient Handle<YieldTermStructure> flatRiskFree( boost::shared_ptr<YieldTermStructure>( new FlatForward(referenceDate, r, rfdc))); Handle<YieldTermStructure> flatDividends( boost::shared_ptr<YieldTermStructure>( new FlatForward(referenceDate, q, divdc))); Handle<BlackVolTermStructure> flatVol( boost::shared_ptr<BlackVolTermStructure>( new BlackConstantVol(referenceDate, volcal, v, voldc))); boost::shared_ptr<StrikedTypePayoff> payoff = boost::dynamic_pointer_cast<StrikedTypePayoff>(arguments_.payoff); QL_REQUIRE(payoff, "non-striked payoff given"); Time maturity = rfdc.yearFraction(referenceDate, maturityDate); boost::shared_ptr<StochasticProcess1D> bs( new GeneralizedBlackScholesProcess( process_->stateVariable(), flatDividends, flatRiskFree, flatVol)); // correct timesteps to ensure a (local) minimum, using Boyle and Lau // approach. See Journal of Derivatives, 1/1994, // "Bumping up against the barrier with the binomial method" // Note: this approach works only for CoxRossRubinstein lattices, so // is disabled if T is not a CoxRossRubinstein or derived from it. Size optimum_steps = timeSteps_; if (boost::is_base_of<CoxRossRubinstein, T>::value && maxTimeSteps_ > timeSteps_ && s0 > 0 && arguments_.barrier > 0) { Real divisor; if (s0 > arguments_.barrier) divisor = std::pow(std::log(s0 / arguments_.barrier), 2); else divisor = std::pow(std::log(arguments_.barrier / s0), 2); if (!close(divisor,0)) { for (Size i=1; i < timeSteps_ ; ++i) { Size optimum = Size(( i*i * v*v * maturity) / divisor); if (timeSteps_ < optimum) { optimum_steps = optimum; break; // found first minimum with iterations>=timesteps } } } if (optimum_steps > maxTimeSteps_) optimum_steps = maxTimeSteps_; // too high, limit } TimeGrid grid(maturity, optimum_steps); boost::shared_ptr<T> tree(new T(bs, maturity, optimum_steps, payoff->strike())); boost::shared_ptr<BlackScholesLattice<T> > lattice( new BlackScholesLattice<T>(tree, r, maturity, optimum_steps)); D option(arguments_, *process_, grid); option.initialize(lattice, maturity); // Partial derivatives calculated from various points in the // binomial tree // (see J.C.Hull, "Options, Futures and other derivatives", 6th edition, pp 397/398) // Rollback to third-last step, and get underlying prices (s2) & // option values (p2) at this point option.rollback(grid[2]); Array va2(option.values()); QL_ENSURE(va2.size() == 3, "Expect 3 nodes in grid at second step"); Real p2u = va2[2]; // up Real p2m = va2[1]; // mid Real p2d = va2[0]; // down (low) Real s2u = lattice->underlying(2, 2); // up price Real s2m = lattice->underlying(2, 1); // middle price Real s2d = lattice->underlying(2, 0); // down (low) price // calculate gamma by taking the first derivate of the two deltas Real delta2u = (p2u - p2m)/(s2u-s2m); Real delta2d = (p2m-p2d)/(s2m-s2d); Real gamma = (delta2u - delta2d) / ((s2u-s2d)/2); // Rollback to second-last step, and get option values (p1) at // this point option.rollback(grid[1]); Array va(option.values()); QL_ENSURE(va.size() == 2, "Expect 2 nodes in grid at first step"); Real p1u = va[1]; Real p1d = va[0]; Real s1u = lattice->underlying(1, 1); // up (high) price Real s1d = lattice->underlying(1, 0); // down (low) price Real delta = (p1u - p1d) / (s1u - s1d); // Finally, rollback to t=0 option.rollback(0.0); Real p0 = option.presentValue(); // Store results results_.value = p0; results_.delta = delta; results_.gamma = gamma; // theta can be approximated by calculating the numerical derivative // between mid value at third-last step and at t0. The underlying price // is the same, only time varies. results_.theta = (p2m - p0) / grid[2]; }