void _initQuda() { if( quda_initialized ) return; if( g_debug_level > 0 ) if(g_proc_id == 0) printf("\n# QUDA: Detected QUDA version %d.%d.%d\n\n", QUDA_VERSION_MAJOR, QUDA_VERSION_MINOR, QUDA_VERSION_SUBMINOR); if( QUDA_VERSION_MAJOR == 0 && QUDA_VERSION_MINOR < 7) { fprintf(stderr, "Error: minimum QUDA version required is 0.7.0 (for support of chiral basis and removal of bug in mass normalization with preconditioning).\n"); exit(-2); } gauge_param = newQudaGaugeParam(); inv_param = newQudaInvertParam(); // *** QUDA parameters begin here (sloppy prec. will be adjusted in invert) QudaPrecision cpu_prec = QUDA_DOUBLE_PRECISION; QudaPrecision cuda_prec = QUDA_DOUBLE_PRECISION; QudaPrecision cuda_prec_sloppy = QUDA_SINGLE_PRECISION; QudaPrecision cuda_prec_precondition = QUDA_HALF_PRECISION; QudaTune tune = QUDA_TUNE_YES; // *** the remainder should not be changed for this application // local lattice size #if USE_LZ_LY_LX_T gauge_param.X[0] = LZ; gauge_param.X[1] = LY; gauge_param.X[2] = LX; gauge_param.X[3] = T; #else gauge_param.X[0] = LX; gauge_param.X[1] = LY; gauge_param.X[2] = LZ; gauge_param.X[3] = T; #endif inv_param.Ls = 1; gauge_param.anisotropy = 1.0; gauge_param.type = QUDA_WILSON_LINKS; gauge_param.gauge_order = QUDA_QDP_GAUGE_ORDER; gauge_param.cpu_prec = cpu_prec; gauge_param.cuda_prec = cuda_prec; gauge_param.reconstruct = 18; gauge_param.cuda_prec_sloppy = cuda_prec_sloppy; gauge_param.reconstruct_sloppy = 18; gauge_param.cuda_prec_precondition = cuda_prec_precondition; gauge_param.reconstruct_precondition = 18; gauge_param.gauge_fix = QUDA_GAUGE_FIXED_NO; inv_param.dagger = QUDA_DAG_NO; inv_param.mass_normalization = QUDA_KAPPA_NORMALIZATION; inv_param.solver_normalization = QUDA_DEFAULT_NORMALIZATION; inv_param.pipeline = 0; inv_param.gcrNkrylov = 10; // require both L2 relative and heavy quark residual to determine convergence // inv_param.residual_type = (QudaResidualType)(QUDA_L2_RELATIVE_RESIDUAL | QUDA_HEAVY_QUARK_RESIDUAL); inv_param.tol_hq = 1.0;//1e-3; // specify a tolerance for the residual for heavy quark residual inv_param.reliable_delta = 1e-2; // ignored by multi-shift solver // domain decomposition preconditioner parameters inv_param.inv_type_precondition = QUDA_CG_INVERTER; inv_param.schwarz_type = QUDA_ADDITIVE_SCHWARZ; inv_param.precondition_cycle = 1; inv_param.tol_precondition = 1e-1; inv_param.maxiter_precondition = 10; inv_param.verbosity_precondition = QUDA_SILENT; inv_param.cuda_prec_precondition = cuda_prec_precondition; inv_param.omega = 1.0; inv_param.cpu_prec = cpu_prec; inv_param.cuda_prec = cuda_prec; inv_param.cuda_prec_sloppy = cuda_prec_sloppy; inv_param.clover_cpu_prec = cpu_prec; inv_param.clover_cuda_prec = cuda_prec; inv_param.clover_cuda_prec_sloppy = cuda_prec_sloppy; inv_param.clover_cuda_prec_precondition = cuda_prec_precondition; inv_param.preserve_source = QUDA_PRESERVE_SOURCE_YES; inv_param.gamma_basis = QUDA_CHIRAL_GAMMA_BASIS; inv_param.dirac_order = QUDA_DIRAC_ORDER; inv_param.input_location = QUDA_CPU_FIELD_LOCATION; inv_param.output_location = QUDA_CPU_FIELD_LOCATION; inv_param.tune = tune ? QUDA_TUNE_YES : QUDA_TUNE_NO; gauge_param.ga_pad = 0; // 24*24*24/2; inv_param.sp_pad = 0; // 24*24*24/2; inv_param.cl_pad = 0; // 24*24*24/2; // For multi-GPU, ga_pad must be large enough to store a time-slice int x_face_size = gauge_param.X[1]*gauge_param.X[2]*gauge_param.X[3]/2; int y_face_size = gauge_param.X[0]*gauge_param.X[2]*gauge_param.X[3]/2; int z_face_size = gauge_param.X[0]*gauge_param.X[1]*gauge_param.X[3]/2; int t_face_size = gauge_param.X[0]*gauge_param.X[1]*gauge_param.X[2]/2; int pad_size =MAX(x_face_size, y_face_size); pad_size = MAX(pad_size, z_face_size); pad_size = MAX(pad_size, t_face_size); gauge_param.ga_pad = pad_size; // solver verbosity if( g_debug_level == 0 ) inv_param.verbosity = QUDA_SILENT; else if( g_debug_level == 1 ) inv_param.verbosity = QUDA_SUMMARIZE; else inv_param.verbosity = QUDA_VERBOSE; // general verbosity setVerbosityQuda( QUDA_SUMMARIZE, "# QUDA: ", stdout); // declare the grid mapping used for communications in a multi-GPU grid #if USE_LZ_LY_LX_T int grid[4] = {g_nproc_z, g_nproc_y, g_nproc_x, g_nproc_t}; #else int grid[4] = {g_nproc_x, g_nproc_y, g_nproc_z, g_nproc_t}; #endif initCommsGridQuda(4, grid, commsMap, NULL); // alloc gauge_quda size_t gSize = (gauge_param.cpu_prec == QUDA_DOUBLE_PRECISION) ? sizeof(double) : sizeof(float); for (int dir = 0; dir < 4; dir++) { gauge_quda[dir] = (double*) malloc(VOLUME*18*gSize); if(gauge_quda[dir] == NULL) { fprintf(stderr, "_initQuda: malloc for gauge_quda[dir] failed"); exit(-2); } } // alloc space for a temp. spinor, used throughout this module tempSpinor = (double*)malloc( 2*VOLUME*24*sizeof(double) ); /* factor 2 for doublet */ if(tempSpinor == NULL) { fprintf(stderr, "_initQuda: malloc for tempSpinor failed"); exit(-2); } // initialize the QUDA library #ifdef MPI initQuda(-1); //sets device numbers automatically #else initQuda(0); //scalar build: use device 0 #endif quda_initialized = 1; }
int invert_test(void) { QudaGaugeParam gaugeParam = newQudaGaugeParam(); QudaInvertParam inv_param = newQudaInvertParam(); double mass = 0.5; set_params(&gaugeParam, &inv_param, xdim, ydim, zdim, tdim, cpu_prec, prec, prec_sloppy, link_recon, link_recon_sloppy, mass, tol, 500, 1e-3, 0.8); // declare the dimensions of the communication grid initCommsGridQuda(4, gridsize_from_cmdline, NULL, NULL); // this must be before the FaceBuffer is created (this is because it allocates pinned memory - FIXME) initQuda(device); setDims(gaugeParam.X); setSpinorSiteSize(6); size_t gSize = (gaugeParam.cpu_prec == QUDA_DOUBLE_PRECISION) ? sizeof(double) : sizeof(float); for (int dir = 0; dir < 4; dir++) { qdp_fatlink[dir] = malloc(V*gaugeSiteSize*gSize); qdp_longlink[dir] = malloc(V*gaugeSiteSize*gSize); } fatlink = malloc(4*V*gaugeSiteSize*gSize); longlink = malloc(4*V*gaugeSiteSize*gSize); construct_fat_long_gauge_field(qdp_fatlink, qdp_longlink, 1, gaugeParam.cpu_prec, &gaugeParam); const double cos_pi_3 = 0.5; // Cos(pi/3) const double sin_pi_3 = sqrt(0.75); // Sin(pi/3) for(int dir=0; dir<4; ++dir){ for(int i=0; i<V; ++i){ for(int j=0; j<gaugeSiteSize; ++j){ if(gaugeParam.cpu_prec == QUDA_DOUBLE_PRECISION){ ((double*)qdp_fatlink[dir])[i*gaugeSiteSize + j] = 0.5*rand()/RAND_MAX; if(link_recon != QUDA_RECONSTRUCT_8 && link_recon != QUDA_RECONSTRUCT_12){ // incorporate non-trivial phase into long links if(j%2 == 0){ const double real = ((double*)qdp_longlink[dir])[i*gaugeSiteSize + j]; const double imag = ((double*)qdp_longlink[dir])[i*gaugeSiteSize + j + 1]; ((double*)qdp_longlink[dir])[i*gaugeSiteSize + j] = real*cos_pi_3 - imag*sin_pi_3; ((double*)qdp_longlink[dir])[i*gaugeSiteSize + j + 1] = real*sin_pi_3 + imag*cos_pi_3; } } ((double*)fatlink)[(i*4 + dir)*gaugeSiteSize + j] = ((double*)qdp_fatlink[dir])[i*gaugeSiteSize + j]; ((double*)longlink)[(i*4 + dir)*gaugeSiteSize + j] = ((double*)qdp_longlink[dir])[i*gaugeSiteSize + j]; }else{ ((float*)qdp_fatlink[dir])[i] = 0.5*rand()/RAND_MAX; if(link_recon != QUDA_RECONSTRUCT_8 && link_recon != QUDA_RECONSTRUCT_12){ // incorporate non-trivial phase into long links if(j%2 == 0){ const float real = ((float*)qdp_longlink[dir])[i*gaugeSiteSize + j]; const float imag = ((float*)qdp_longlink[dir])[i*gaugeSiteSize + j + 1]; ((float*)qdp_longlink[dir])[i*gaugeSiteSize + j] = real*cos_pi_3 - imag*sin_pi_3; ((float*)qdp_longlink[dir])[i*gaugeSiteSize + j + 1] = real*sin_pi_3 + imag*cos_pi_3; } } ((double*)fatlink)[(i*4 + dir)*gaugeSiteSize + j] = ((double*)qdp_fatlink[dir])[i*gaugeSiteSize + j]; ((float*)fatlink)[(i*4 + dir)*gaugeSiteSize + j] = ((float*)qdp_fatlink[dir])[i*gaugeSiteSize + j]; ((float*)longlink)[(i*4 + dir)*gaugeSiteSize + j] = ((float*)qdp_longlink[dir])[i*gaugeSiteSize + j]; } } } } ColorSpinorParam csParam; csParam.nColor=3; csParam.nSpin=1; csParam.nDim=4; for(int d = 0; d < 4; d++) { csParam.x[d] = gaugeParam.X[d]; } csParam.x[0] /= 2; csParam.precision = inv_param.cpu_prec; csParam.pad = 0; csParam.siteSubset = QUDA_PARITY_SITE_SUBSET; csParam.siteOrder = QUDA_EVEN_ODD_SITE_ORDER; csParam.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER; csParam.gammaBasis = inv_param.gamma_basis; csParam.create = QUDA_ZERO_FIELD_CREATE; in = new cpuColorSpinorField(csParam); out = new cpuColorSpinorField(csParam); ref = new cpuColorSpinorField(csParam); tmp = new cpuColorSpinorField(csParam); if (inv_param.cpu_prec == QUDA_SINGLE_PRECISION){ constructSpinorField((float*)in->V()); }else{ constructSpinorField((double*)in->V()); } #ifdef MULTI_GPU int tmp_value = MAX(ydim*zdim*tdim/2, xdim*zdim*tdim/2); tmp_value = MAX(tmp_value, xdim*ydim*tdim/2); tmp_value = MAX(tmp_value, xdim*ydim*zdim/2); int fat_pad = tmp_value; int link_pad = 3*tmp_value; gaugeParam.type = QUDA_ASQTAD_FAT_LINKS; gaugeParam.reconstruct = QUDA_RECONSTRUCT_NO; GaugeFieldParam cpuFatParam(fatlink, gaugeParam); cpuFat = new cpuGaugeField(cpuFatParam); ghost_fatlink = (void**)cpuFat->Ghost(); gaugeParam.type = QUDA_ASQTAD_LONG_LINKS; GaugeFieldParam cpuLongParam(longlink, gaugeParam); cpuLong = new cpuGaugeField(cpuLongParam); ghost_longlink = (void**)cpuLong->Ghost(); gaugeParam.type = QUDA_ASQTAD_FAT_LINKS; gaugeParam.ga_pad = fat_pad; gaugeParam.reconstruct= gaugeParam.reconstruct_sloppy = QUDA_RECONSTRUCT_NO; loadGaugeQuda(fatlink, &gaugeParam); gaugeParam.type = QUDA_ASQTAD_LONG_LINKS; gaugeParam.ga_pad = link_pad; gaugeParam.reconstruct= link_recon; gaugeParam.reconstruct_sloppy = link_recon_sloppy; loadGaugeQuda(longlink, &gaugeParam); #else gaugeParam.type = QUDA_ASQTAD_FAT_LINKS; gaugeParam.reconstruct = gaugeParam.reconstruct_sloppy = QUDA_RECONSTRUCT_NO; loadGaugeQuda(fatlink, &gaugeParam); gaugeParam.type = QUDA_ASQTAD_LONG_LINKS; gaugeParam.reconstruct = link_recon; gaugeParam.reconstruct_sloppy = link_recon_sloppy; loadGaugeQuda(longlink, &gaugeParam); #endif double time0 = -((double)clock()); // Start the timer double nrm2=0; double src2=0; int ret = 0; switch(test_type){ case 0: //even inv_param.matpc_type = QUDA_MATPC_EVEN_EVEN; invertQuda(out->V(), in->V(), &inv_param); time0 += clock(); time0 /= CLOCKS_PER_SEC; #ifdef MULTI_GPU matdagmat_mg4dir(ref, qdp_fatlink, qdp_longlink, ghost_fatlink, ghost_longlink, out, mass, 0, inv_param.cpu_prec, gaugeParam.cpu_prec, tmp, QUDA_EVEN_PARITY); #else matdagmat(ref->V(), qdp_fatlink, qdp_longlink, out->V(), mass, 0, inv_param.cpu_prec, gaugeParam.cpu_prec, tmp->V(), QUDA_EVEN_PARITY); #endif mxpy(in->V(), ref->V(), Vh*mySpinorSiteSize, inv_param.cpu_prec); nrm2 = norm_2(ref->V(), Vh*mySpinorSiteSize, inv_param.cpu_prec); src2 = norm_2(in->V(), Vh*mySpinorSiteSize, inv_param.cpu_prec); break; case 1: //odd inv_param.matpc_type = QUDA_MATPC_ODD_ODD; invertQuda(out->V(), in->V(), &inv_param); time0 += clock(); // stop the timer time0 /= CLOCKS_PER_SEC; #ifdef MULTI_GPU matdagmat_mg4dir(ref, qdp_fatlink, qdp_longlink, ghost_fatlink, ghost_longlink, out, mass, 0, inv_param.cpu_prec, gaugeParam.cpu_prec, tmp, QUDA_ODD_PARITY); #else matdagmat(ref->V(), qdp_fatlink, qdp_longlink, out->V(), mass, 0, inv_param.cpu_prec, gaugeParam.cpu_prec, tmp->V(), QUDA_ODD_PARITY); #endif mxpy(in->V(), ref->V(), Vh*mySpinorSiteSize, inv_param.cpu_prec); nrm2 = norm_2(ref->V(), Vh*mySpinorSiteSize, inv_param.cpu_prec); src2 = norm_2(in->V(), Vh*mySpinorSiteSize, inv_param.cpu_prec); break; case 2: //full spinor errorQuda("full spinor not supported\n"); break; case 3: //multi mass CG, even case 4: #define NUM_OFFSETS 12 { double masses[NUM_OFFSETS] ={0.002, 0.0021, 0.0064, 0.070, 0.077, 0.081, 0.1, 0.11, 0.12, 0.13, 0.14, 0.205}; inv_param.num_offset = NUM_OFFSETS; // these can be set independently for (int i=0; i<inv_param.num_offset; i++) { inv_param.tol_offset[i] = inv_param.tol; inv_param.tol_hq_offset[i] = inv_param.tol_hq; } void* outArray[NUM_OFFSETS]; int len; cpuColorSpinorField* spinorOutArray[NUM_OFFSETS]; spinorOutArray[0] = out; for(int i=1;i < inv_param.num_offset; i++){ spinorOutArray[i] = new cpuColorSpinorField(csParam); } for(int i=0;i < inv_param.num_offset; i++){ outArray[i] = spinorOutArray[i]->V(); inv_param.offset[i] = 4*masses[i]*masses[i]; } len=Vh; if (test_type == 3) { inv_param.matpc_type = QUDA_MATPC_EVEN_EVEN; } else { inv_param.matpc_type = QUDA_MATPC_ODD_ODD; } invertMultiShiftQuda(outArray, in->V(), &inv_param); cudaDeviceSynchronize(); time0 += clock(); // stop the timer time0 /= CLOCKS_PER_SEC; printfQuda("done: total time = %g secs, compute time = %g, %i iter / %g secs = %g gflops\n", time0, inv_param.secs, inv_param.iter, inv_param.secs, inv_param.gflops/inv_param.secs); printfQuda("checking the solution\n"); QudaParity parity = QUDA_INVALID_PARITY; if (inv_param.solve_type == QUDA_NORMOP_SOLVE){ //parity = QUDA_EVENODD_PARITY; errorQuda("full parity not supported\n"); }else if (inv_param.matpc_type == QUDA_MATPC_EVEN_EVEN){ parity = QUDA_EVEN_PARITY; }else if (inv_param.matpc_type == QUDA_MATPC_ODD_ODD){ parity = QUDA_ODD_PARITY; }else{ errorQuda("ERROR: invalid spinor parity \n"); exit(1); } for(int i=0;i < inv_param.num_offset;i++){ printfQuda("%dth solution: mass=%f, ", i, masses[i]); #ifdef MULTI_GPU matdagmat_mg4dir(ref, qdp_fatlink, qdp_longlink, ghost_fatlink, ghost_longlink, spinorOutArray[i], masses[i], 0, inv_param.cpu_prec, gaugeParam.cpu_prec, tmp, parity); #else matdagmat(ref->V(), qdp_fatlink, qdp_longlink, outArray[i], masses[i], 0, inv_param.cpu_prec, gaugeParam.cpu_prec, tmp->V(), parity); #endif mxpy(in->V(), ref->V(), len*mySpinorSiteSize, inv_param.cpu_prec); double nrm2 = norm_2(ref->V(), len*mySpinorSiteSize, inv_param.cpu_prec); double src2 = norm_2(in->V(), len*mySpinorSiteSize, inv_param.cpu_prec); double hqr = sqrt(HeavyQuarkResidualNormCpu(*spinorOutArray[i], *ref).z); double l2r = sqrt(nrm2/src2); printfQuda("Shift %d residuals: (L2 relative) tol %g, QUDA = %g, host = %g; (heavy-quark) tol %g, QUDA = %g, host = %g\n", i, inv_param.tol_offset[i], inv_param.true_res_offset[i], l2r, inv_param.tol_hq_offset[i], inv_param.true_res_hq_offset[i], hqr); //emperical, if the cpu residue is more than 1 order the target accuracy, the it fails to converge if (sqrt(nrm2/src2) > 10*inv_param.tol_offset[i]){ ret |=1; } } for(int i=1; i < inv_param.num_offset;i++) delete spinorOutArray[i]; } break; default: errorQuda("Unsupported test type"); }//switch if (test_type <=2){ double hqr = sqrt(HeavyQuarkResidualNormCpu(*out, *ref).z); double l2r = sqrt(nrm2/src2); printfQuda("Residuals: (L2 relative) tol %g, QUDA = %g, host = %g; (heavy-quark) tol %g, QUDA = %g, host = %g\n", inv_param.tol, inv_param.true_res, l2r, inv_param.tol_hq, inv_param.true_res_hq, hqr); printfQuda("done: total time = %g secs, compute time = %g secs, %i iter / %g secs = %g gflops, \n", time0, inv_param.secs, inv_param.iter, inv_param.secs, inv_param.gflops/inv_param.secs); } end(); return ret; }