void IntensityTransformer::Apply(cv::Mat &mat) { auto seed = GetSeed(); auto rng = m_rngs.pop_or_create([seed]() { return std::make_unique<std::mt19937>(seed); } ); // Using single precision as EigVal and EigVec matrices are single precision. std::normal_distribution<float> d(0, (float)m_curStdDev); cv::Mat alphas(1, 3, CV_32FC1); assert(m_eigVal.rows == 1 && m_eigVec.cols == 3); alphas.at<float>(0) = d(*rng) * m_eigVal.at<float>(0); alphas.at<float>(1) = d(*rng) * m_eigVal.at<float>(1); alphas.at<float>(2) = d(*rng) * m_eigVal.at<float>(2); m_rngs.push(std::move(rng)); assert(m_eigVec.rows == 3 && m_eigVec.cols == 3); cv::Mat shifts = m_eigVec * alphas.t(); // For multi-channel images data is in BGR format. size_t cdst = mat.rows * mat.cols * mat.channels(); ElemType* pdstBase = reinterpret_cast<ElemType*>(mat.data); for (ElemType* pdst = pdstBase; pdst < pdstBase + cdst;) { for (int c = 0; c < mat.channels(); c++) { float shift = shifts.at<float>(mat.channels() - c - 1); *pdst = std::min(std::max(*pdst + shift, (ElemType)0), (ElemType)255); pdst++; } } }
Mat PcaModel::drawSample(float sigma /*= 1.0f*/) { std::normal_distribution<float> distribution(0.0f, sigma); // TODO: c'tor takes the stddev. Update all the documentation!!! vector<float> alphas(getNumberOfPrincipalComponents()); for (auto& a : alphas) { a = distribution(engine); } return drawSample(alphas); /* without calling drawSample(alphas): (maybe if we add noise) Mat alphas = Mat::zeros(getNumberOfPrincipalComponents(), 1, CV_32FC1); for (int row=0; row < alphas.rows; ++row) { alphas.at<float>(row, 0) = distribution(engine); } */ /* with noise: (does the noise make sense in drawSample(vector<float> coefficients)?) unsigned int vsize = mean.size(); vector epsilon = Utils::generateNormalVector(vectorSize) * sqrt(m_noiseVariance); return m_mean + m_pcaBasisMatrix * coefficients + epsilon; */ }
void write_tasks(transport::repository<>& repo, transport::step_mpi<>* model) { const double M_P = 1.0; const double m = 1E-5 * M_P; const double c = 0.0018; const double d = 0.022 * M_P; const double phi0 = 14.84 * M_P; const double phi_init = 16.5 * M_P; const double N_init = 0.0; const double N_pre = 6.0; const double N_max = 50.1; transport::parameters<> params(M_P, { m, c, d, phi0 }, model); transport::initial_conditions<> ics("step", params, { phi_init }, N_init, N_pre); transport::basic_range<> times(N_init, N_max, 100, transport::spacing::linear); transport::basic_range<> ks(exp(7.0), exp(11.5), 1000, transport::spacing::log_bottom); transport::basic_range<> alphas(0.0, 0.0, 0, transport::spacing::linear); transport::basic_range<> betas(1.0/3.0, 1.0/3.0, 0, transport::spacing::linear); // construct a threepf task transport::threepf_alphabeta_task<> tk3("step.threepf", ics, times, ks, alphas, betas); tk3.set_collect_initial_conditions(true).set_adaptive_ics_efolds(5.0); transport::zeta_threepf_task<> ztk3("step.threepf-zeta", tk3); repo.commit(ztk3); }
/** * Compute the search direction based on the current (inverse) Hessian * approximation and given gradient. * * @param[out] pk The negative product of the inverse Hessian and gradient * direction gk. * @param[in] gk Gradient direction. **/ inline void search_direction(VectorT &pk, const VectorT &gk) const { std::vector<Scalar> alphas(_buf.size()); typename boost::circular_buffer<UpdateT>::const_reverse_iterator buf_rit; typename boost::circular_buffer<UpdateT>::const_iterator buf_it; typename std::vector<Scalar>::const_iterator alpha_it; typename std::vector<Scalar>::reverse_iterator alpha_rit; pk.noalias() = -gk; for (buf_rit = _buf.rbegin(), alpha_rit = alphas.rbegin(); buf_rit != _buf.rend(); buf_rit++, alpha_rit++) { Scalar alpha; const Scalar &rhoi(boost::get<0>(*buf_rit)); const VectorT &yi(boost::get<1>(*buf_rit)); const VectorT &si(boost::get<2>(*buf_rit)); alpha = rhoi * si.dot(pk); pk -= alpha * yi; *alpha_rit = alpha; } pk *= _gammak; for (buf_it = _buf.begin(), alpha_it = alphas.begin(); buf_it != _buf.end(); buf_it++, alpha_it++) { Scalar beta; const Scalar &rhoi(boost::get<0>(*buf_it)); const VectorT &yi(boost::get<1>(*buf_it)); const VectorT &si(boost::get<2>(*buf_it)); beta = rhoi*yi.dot(pk); pk += (*alpha_it - beta)*si; } }
Mat PcaModel::drawSample(vector<float> coefficients) { Mat alphas(coefficients); Mat sqrtOfEigenvalues = eigenvalues.clone(); for (unsigned int i = 0; i < eigenvalues.rows; ++i) { sqrtOfEigenvalues.at<float>(i) = std::sqrt(eigenvalues.at<float>(i)); } //Mat smallBasis = pcaBasis(cv::Rect(0, 0, 55, 100)); //Mat smallMean = mean(cv::Rect(0, 0, 1, 100)); Mat modelSample = mean + pcaBasis * alphas.mul(sqrtOfEigenvalues); // Surr //Mat modelSample = mean + pcaBasis * alphas; // Bsl .h5 old return modelSample; }
double bin_sugeno::operator()(const p_dna&d) { fuzzy::Function*y_function=fuzzy::function3(fuzzy::coord(0,0), fuzzy::coord(0.5,1.0), fuzzy::coord(1.0,0)); fuzzy::MyuFunctions mf={fuzzy::fvector(0),y_function}; const int x_count=m_cdata->x_count(); int i=0; for(int k=0;k<m_numbers.size();++k) for(int x=0;x<x_count;++x){ fuzzy::ExpFunction*ff=new fuzzy::ExpFunction(d->get(i),d->get(i+1)); mf.x_funcs.push_back(ff); i+=2; } fuzzy::rule_vector rules=fuzzy::make_rules(*m_cdata,mf,m_numbers); dvector errors; // ошибка на новых данных for(int i=0;i<m_numbers.size();++i){ dvector alphas(m_numbers.size()); dvector y_1(m_numbers.size()); // считаем выход по каждому правилу для X из текущего набора for(int j=0;j<rules.size();++j){ dvector myu_x(x_count); for(int x=0;x<x_count;++x){ double x_value=(*m_cdata)[m_numbers[i]]->at(x); myu_x[x]=rules[j](x_value,x); } alphas[i]=*std::min_element(myu_x.begin(),myu_x.end()); y_1[i]=rules[i].y(); } // Собственно вывод. Алгоритм из черной книги dvector up_mul=alphas*y_1; double up=std::accumulate(up_mul.begin(),up_mul.end(),0.0); double down=std::accumulate(alphas.begin(),alphas.end(),0.0); double etalon=m_cdata->y_for_xp(m_numbers[i]); double out=up/down; //Выход, для текущего набора double delta=fabs(out-etalon); errors.push_back(delta); } // Средняя ошибка. return std::accumulate(errors.begin(),errors.end(),0.0)/errors.size(); }
void write_tasks(transport::repository<>& repo, transport::new_axion_mpi<>* model) { const double M_Planck = 1.0; const double g = 1e-10; const double f = M_Planck; const double Lambda = std::pow(g, 1.0/4.0) * std::pow(25.0/(2*M_PI), 1.0/2.0) * M_Planck; const double phi_init = 23.5 * M_Planck; const double chi_init = f/2.0 - 0.001*M_Planck; const double N_init = 0.0; const double N_pre = 10.0; const double N_max = 68.0; transport::parameters<> params(M_Planck, { g, Lambda, f, M_PI }, model); transport::initial_conditions<> ics("axion_cfs", params, { phi_init, chi_init }, N_init, N_pre); transport::basic_range<> times(N_init, N_max, 500, transport::spacing::linear); transport::basic_range<> ks(exp(5.0), exp(5.0), 0, transport::spacing::linear); transport::basic_range<> alphas(0.0, 0.0, 0, transport::spacing::linear); transport::basic_range<> beta_equi(1.0/3.0, 1.0/3.0, 0, transport::spacing::linear); transport::basic_range<> beta_sq(0.95, 0.95, 0, transport::spacing::linear); // construct a threepf task for the equilateral mode transport::threepf_alphabeta_task<> tk3_equi("axion_cfs.equi.threepf", ics, times, ks, alphas, beta_equi); tk3_equi.set_adaptive_ics_efolds(4.0); tk3_equi.set_collect_initial_conditions(true); transport::zeta_threepf_task<> ztk3_equi("axion_cfs.equi.threepf-zeta", tk3_equi); ztk3_equi.set_paired(true); // construct a threepf task for the squeezed mode transport::threepf_alphabeta_task<> tk3_sq("axion_cfs.sq.threepf", ics, times, ks, alphas, beta_sq); tk3_sq.set_adaptive_ics_efolds(4.0); tk3_sq.set_collect_initial_conditions(true); transport::zeta_threepf_task<> ztk3_sq("axion_cfs.sq.threepf-zeta", tk3_sq); ztk3_sq.set_paired(true); repo.commit(ztk3_equi); repo.commit(ztk3_sq); }
PlaneSet::PlaneSet(Material& in_material, int in_nnormal, double* in_normal, int in_noffset, double* in_offset) : TriangleSet(in_material,true, false/* true */), nPlanes(max(in_nnormal, in_noffset)), normal(in_nnormal, in_normal), offset(in_noffset, in_offset) { /* We'll set up 4 triangles per plane, in case we need to render a hexagon. Each triangle has 3 vertices (so 12 for the plane), and each vertex gets 3 color components and 1 alpha component. */ ARRAY<int> colors(36*nPlanes); ARRAY<double> alphas(12*nPlanes); if (material.colors.getLength() > 1) { material.colors.recycle(nPlanes); for (int i=0; i<nPlanes; i++) { Color color=material.colors.getColor(i); for (int j=0; j<12; j++) { colors.ptr[36*i+3*j+0] = color.getRedub(); colors.ptr[36*i+3*j+1] = color.getGreenub(); colors.ptr[36*i+3*j+2] = color.getBlueub(); alphas.ptr[12*i+j] = color.getAlphaf(); } } material.colors.set(12*nPlanes, colors.ptr, 12*nPlanes, alphas.ptr); material.colorPerVertex(true, 12*nPlanes); } ARRAY<double> vertices(36*nPlanes), normals(36*nPlanes); for (int i=0; i<vertices.size(); i++) vertices.ptr[i] = NA_REAL; for (int i=0; i<nPlanes; i++) for (int j=0; j<12; j++) { normals.ptr[36*i+3*j+0] = normal.getRecycled(i).x; normals.ptr[36*i+3*j+1] = normal.getRecycled(i).y; normals.ptr[36*i+3*j+2] = normal.getRecycled(i).z; } initFaceSet(12*nPlanes, vertices.ptr, normals.ptr, NULL); }
void assemble_cortical(const Geometry& geo, Matrix& mat, const Head2EEGMat& M, const std::string& domain_name, const unsigned gauss_order, double alpha, double beta, const std::string &filename) { // Following the article: M. Clerc, J. Kybic "Cortical mapping by Laplace–Cauchy transmission using a boundary element method". // Assumptions: // - domain_name: the domain containing the sources is an innermost domain (defined as the interior of only one interface (called Cortex) // - Cortex interface is composed of one mesh only (no shared vertices) // TODO check orders of MxM products for efficiency ... delete intermediate matrices const Domain& SourceDomain = geo.domain(domain_name); const Interface& Cortex = SourceDomain.begin()->interface(); const Mesh& cortex = Cortex.begin()->mesh(); // test the assumption assert(SourceDomain.size() == 1); assert(Cortex.size() == 1); // shape of the new matrix: unsigned Nl = geo.size()-geo.outermost_interface().nb_triangles()-Cortex.nb_vertices()-Cortex.nb_triangles(); unsigned Nc = geo.size()-geo.outermost_interface().nb_triangles(); std::fstream f(filename.c_str()); Matrix P; if ( !f ) { // build the HeadMat: // The following is the same as assemble_HM except N_11, D_11 and S_11 are not computed. SymMatrix mat_temp(Nc); mat_temp.set(0.0); double K = 1.0 / (4.0 * M_PI); // We iterate over the meshes (or pair of domains) to fill the lower half of the HeadMat (since its symmetry) for ( Geometry::const_iterator mit1 = geo.begin(); mit1 != geo.end(); ++mit1) { for ( Geometry::const_iterator mit2 = geo.begin(); (mit2 != (mit1+1)); ++mit2) { // if mit1 and mit2 communicate, i.e they are used for the definition of a common domain const int orientation = geo.oriented(*mit1, *mit2); // equals 0, if they don't have any domains in common // equals 1, if they are both oriented toward the same domain // equals -1, if they are not if ( orientation != 0) { double Scoeff = orientation * geo.sigma_inv(*mit1, *mit2) * K; double Dcoeff = - orientation * geo.indicator(*mit1, *mit2) * K; double Ncoeff; if ( !(mit1->outermost() || mit2->outermost()) && ( (*mit1 != *mit2)||( *mit1 != cortex) ) ) { // Computing S block first because it's needed for the corresponding N block operatorS(*mit1, *mit2, mat_temp, Scoeff, gauss_order); Ncoeff = geo.sigma(*mit1, *mit2)/geo.sigma_inv(*mit1, *mit2); } else { Ncoeff = orientation * geo.sigma(*mit1, *mit2) * K; } if ( !mit1->outermost() && (( (*mit1 != *mit2)||( *mit1 != cortex) )) ) { // Computing D block operatorD(*mit1, *mit2, mat_temp, Dcoeff, gauss_order); } if ( ( *mit1 != *mit2 ) && ( !mit2->outermost() ) ) { // Computing D* block operatorD(*mit1, *mit2, mat_temp, Dcoeff, gauss_order, true); } // Computing N block if ( (*mit1 != *mit2)||( *mit1 != cortex) ) { operatorN(*mit1, *mit2, mat_temp, Ncoeff, gauss_order); } } } } // Deflate the diagonal block (N33) of 'mat' : (in order to have a zero-mean potential for the outermost interface) const Interface i = geo.outermost_interface(); unsigned i_first = (*i.begin()->mesh().vertex_begin())->index(); deflat(mat_temp, i, mat_temp(i_first, i_first) / (geo.outermost_interface().nb_vertices())); mat = Matrix(Nl, Nc); mat.set(0.0); // copy mat_temp into mat except the lines for cortex vertices [i_vb_c, i_ve_c] and cortex triangles [i_tb_c, i_te_c]. unsigned iNl = 0; unsigned i_vb_c = (*cortex.vertex_begin())->index(); unsigned i_ve_c = (*cortex.vertex_rbegin())->index(); unsigned i_tb_c = cortex.begin()->index(); unsigned i_te_c = cortex.rbegin()->index(); for ( unsigned i = 0; i < Nc; ++i) { if ( !(i_vb_c<=i && i<=i_ve_c) && !(i_tb_c<=i && i<=i_te_c) ) { mat.setlin(iNl, mat_temp.getlin(i)); ++iNl; } } // ** Construct P: the null-space projector ** Matrix W; { Matrix U, s; mat.svd(U, s, W); } SparseMatrix S(Nc,Nc); // we set S to 0 everywhere, except in the last part of the diag: for ( unsigned i = Nl; i < Nc; ++i) { S(i, i) = 1.0; } P = (W * S) * W.transpose(); // P is a projector: P^2 = P and mat*P*X = 0 if ( filename.length() != 0 ) { std::cout << "Saving projector P (" << filename << ")." << std::endl; P.save(filename); } } else { std::cout << "Loading projector P (" << filename << ")." << std::endl; P.load(filename); } // ** Get the gradient of P1&P0 elements on the meshes ** Matrix MM(M.transpose() * M); SymMatrix RR(Nc, Nc); RR.set(0.); for ( Geometry::const_iterator mit = geo.begin(); mit != geo.end(); ++mit) { mit->gradient_norm2(RR); } // ** Choose Regularization parameter ** SparseMatrix alphas(Nc,Nc); // diagonal matrix Matrix Z; if ( alpha < 0 ) { // try an automatic method... TODO find better estimation double nRR_v = RR.submat(0, geo.nb_vertices(), 0, geo.nb_vertices()).frobenius_norm(); alphas.set(0.); alpha = MM.frobenius_norm() / (1.e3*nRR_v); beta = alpha * 50000.; for ( Vertices::const_iterator vit = geo.vertex_begin(); vit != geo.vertex_end(); ++vit) { alphas(vit->index(), vit->index()) = alpha; } for ( Meshes::const_iterator mit = geo.begin(); mit != geo.end(); ++mit) { if ( !mit->outermost() ) { for ( Mesh::const_iterator tit = mit->begin(); tit != mit->end(); ++tit) { alphas(tit->index(), tit->index()) = beta; } } } std::cout << "AUTOMATIC alphas = " << alpha << "\tbeta = " << beta << std::endl; } else { for ( Vertices::const_iterator vit = geo.vertex_begin(); vit != geo.vertex_end(); ++vit) { alphas(vit->index(), vit->index()) = alpha; } for ( Meshes::const_iterator mit = geo.begin(); mit != geo.end(); ++mit) { if ( !mit->outermost() ) { for ( Mesh::const_iterator tit = mit->begin(); tit != mit->end(); ++tit) { alphas(tit->index(), tit->index()) = beta; } } } std::cout << "alphas = " << alpha << "\tbeta = " << beta << std::endl; } Z = P.transpose() * (MM + alphas*RR) * P; // ** PseudoInverse and return ** // X = P * { (M*P)' * (M*P) + (R*P)' * (R*P) }¡(-1) * (M*P)'m // X = P * { P'*M'*M*P + P'*R'*R*P }¡(-1) * P'*M'm // X = P * { P'*(MM + a*RR)*P }¡(-1) * P'*M'm // X = P * Z¡(-1) * P' * M'm Matrix rhs = P.transpose() * M.transpose(); mat = P * Z.pinverse() * rhs; }
void TPZArtDiff::Bornhaus(int dim, TPZFMatrix<REAL> &jacinv, TPZVec<T> & sol, TPZVec<TPZDiffMatrix<T> > & Ai, TPZVec<TPZDiffMatrix<T> > & Tau){ #ifdef FASTESTDIFF int i, j; int nstate = Ai[0].Rows(); TPZDiffMatrix<T> RTM, RMi, Y, Yi, Temp, Temp2, BornhausTau(nstate, nstate), LambdaBornhaus; T us, c; TPZVec<REAL> alphas(dim,0.); TPZEulerConsLaw::uRes(sol, us); TPZEulerConsLaw::cSpeed(sol, 1.4, c); RMMatrix(sol, us, fGamma, RTM, RMi); BornhausTau.Redim(nstate, nstate); for( i = 0; i < dim; i++) { for( j = 0; j < dim; j++) alphas[j] = jacinv(i,j); ContributeBornhaus(sol, us, c, fGamma, alphas, BornhausTau); } RTM. Multiply(BornhausTau, Temp); Temp.Multiply(RMi, BornhausTau); for( i = 0; i < dim;i++) { Ai[i].Multiply(BornhausTau, Tau[i]); } #else int i, j; int nstate = Ai[0].Rows(); TPZDiffMatrix<T> Rot, RotT, Y, Yi, M, Mi, Temp, Temp2, BornhausTau(nstate, nstate), LambdaBornhaus; T us, c; TPZVec<REAL> alphas(dim,0.); TPZEulerConsLaw::uRes(sol, us); TPZEulerConsLaw::cSpeed(sol, 1.4, c); RotMatrix(sol, us, Rot, RotT); MMatrix(sol, us, fGamma, M, Mi); for( i = 0; i < dim; i++) { for( j = 0; j < dim; j++) alphas[j] = jacinv(i,j); EigenSystemBornhaus(sol, us, c, fGamma, alphas, Y, Yi, LambdaBornhaus); Y. Multiply(LambdaBornhaus, Temp); Temp.Multiply(Yi, Temp2); BornhausTau.Add(Temp2); } RotT. Multiply(M, Temp); Temp. Multiply(BornhausTau, Temp2); Temp2. Multiply(Mi, Temp); Temp. Multiply(Rot, BornhausTau); BornhausTau.Inverse(); for( i = 0; i < dim;i++) { Ai[i].Multiply(BornhausTau, Tau[i]); } #endif }
int main(int argc, char *argv[]) { if (argc<2) { cout << "\nUsage: " << argv[0] << " L [c=2 w=64 k=80 d=1]" << endl; cout << " L is the number of levels\n"; cout << " optional c is number of columns in the key-switching matrices (default=2)\n"; cout << " optional w is Hamming weight of the secret key (default=64)\n"; cout << " optional k is the security parameter (default=80)\n"; cout << " optional d specifies GF(2^d) arithmetic (default=1, must be <=16)\n"; // cout << " k is the security parameter\n"; // cout << " m determines the ring mod Phi_m(X)" << endl; cout << endl; exit(0); } cout.unsetf(ios::floatfield); cout.precision(4); long L = atoi(argv[1]); long c = 2; long w = 64; long k = 80; long d = 1; if (argc>2) c = atoi(argv[2]); if (argc>3) w = atoi(argv[3]); if (argc>4) k = atoi(argv[4]); if (argc>5) d = atoi(argv[5]); if (d>16) Error("d cannot be larger than 16\n"); cout << "\nTesting FHE with parameters L="<<L << ", c="<<c<<", w="<<w<<", k="<<k<<", d="<<d<< endl; // get a lower-bound on the parameter N=phi(m): // 1. Empirically, we use ~20-bit small primes in the modulus chain (the main // constraints is that 2m must divide p-1 for every prime p). The first // prime is larger, a 40-bit prime. (If this is a 32-bit machine then we // use two 20-bit primes instead.) // 2. With L levels, the largest modulus for "fresh ciphertexts" has size // q0 ~ p0 * p^{L} ~ 2^{40+20L} // 3. We break each ciphertext into upto c digits, do each digit is as large // as D=2^{(40+20L)/c} // 4. The added noise variance term from the key-switching operation is // c*N*sigma^2*D^2, and this must be mod-switched down to w*N (so it is // on part with the added noise from modulus-switching). Hence the ratio // P that we use for mod-switching must satisfy c*N*sigma^2*D^2/P^2<w*N, // or P > sqrt(c/w) * sigma * 2^{(40+20L)/c} // 5. With this extra P factor, the key-switching matrices are defined // relative to a modulus of size // Q0 = q0*P ~ sqrt{c/w} sigma 2^{(40+20L)(1+1/c)} // 6. To get k-bit security we need N>log(Q0/sigma)(k+110)/7.2, i.e. roughly // N > (40+20L)(1+1/c)(k+110) / 7.2 long ptxtSpace = 2; double cc = 1.0+(1.0/(double)c); long N = (long) ceil((pSize*L+p0Size)*cc*(k+110)/7.2); cout << " bounding phi(m) > " << N << endl; #if 0 // A small m for debugging purposes long m = 15; #else // pre-computed values of [phi(m),m,d] long ms[][4] = { //phi(m) m ord(2) c_m*1000 { 1176, 1247, 28, 3736}, { 1936, 2047, 11, 3870}, { 2880, 3133, 24, 3254}, { 4096, 4369, 16, 3422}, { 5292, 5461, 14, 4160}, { 5760, 8435, 24, 8935}, { 8190, 8191, 13, 1273}, {10584, 16383, 14, 8358}, {10752, 11441, 48, 3607}, {12000, 13981, 20, 2467}, {11520, 15665, 24, 14916}, {14112, 18415, 28, 11278}, {15004, 15709, 22, 3867}, {15360, 20485, 24, 12767}, // {16384, 21845, 16, 12798}, {17208 ,21931, 24, 18387}, {18000, 18631, 25, 4208}, {18816, 24295, 28, 16360}, {19200, 21607, 40, 35633}, {21168, 27305, 28, 15407}, {23040, 23377, 48, 5292}, {24576, 24929, 48, 5612}, {27000, 32767, 15, 20021}, {31104, 31609, 71, 5149}, {42336, 42799, 21, 5952}, {46080, 53261, 24, 33409}, {49140, 57337, 39, 2608}, {51840, 59527, 72, 21128}, {61680, 61681, 40, 1273}, {65536, 65537, 32, 1273}, {75264, 82603, 56, 36484}, {84672, 92837, 56, 38520} }; #if 0 for (long i = 0; i < 25; i++) { long m = ms[i][1]; PAlgebra alg(m); alg.printout(); cout << "\n"; // compute phi(m) directly long phim = 0; for (long j = 0; j < m; j++) if (GCD(j, m) == 1) phim++; if (phim != alg.phiM()) cout << "ERROR\n"; } exit(0); #endif // find the first m satisfying phi(m)>=N and d | ord(2) in Z_m^* long m = 0; for (unsigned i=0; i<sizeof(ms)/sizeof(long[3]); i++) if (ms[i][0]>=N && (ms[i][2] % d) == 0) { m = ms[i][1]; c_m = 0.001 * (double) ms[i][3]; break; } if (m==0) Error("Cannot support this L,d combination"); #endif // m = 257; FHEcontext context(m); #if 0 context.stdev = to_xdouble(0.5); // very low error #endif activeContext = &context; // Mark this as the "current" context context.zMstar.printout(); cout << endl; // Set the modulus chain #if 1 // The first 1-2 primes of total p0size bits #if (NTL_SP_NBITS > p0Size) AddPrimesByNumber(context, 1, 1UL<<p0Size); // add a single prime #else AddPrimesByNumber(context, 2, 1UL<<(p0Size/2)); // add two primes #endif #endif // The next L primes, as small as possible AddPrimesByNumber(context, L); ZZ productOfCtxtPrimes = context.productOfPrimes(context.ctxtPrimes); double productSize = context.logOfProduct(context.ctxtPrimes); // might as well test that the answer is roughly correct cout << " context.logOfProduct(...)-log(context.productOfPrimes(...)) = " << productSize-log(productOfCtxtPrimes) << endl; // calculate the size of the digits context.digits.resize(c); IndexSet s1; #if 0 for (long i=0; i<c-1; i++) context.digits[i] = IndexSet(i,i); context.digits[c-1] = context.ctxtPrimes / IndexSet(0,c-2); AddPrimesByNumber(context, 2, 1, true); #else double sizeSoFar = 0.0; double maxDigitSize = 0.0; if (c>1) { // break ciphetext into a few digits double dsize = productSize/c; // initial estimate double target = dsize-(pSize/3.0); long idx = context.ctxtPrimes.first(); for (long i=0; i<c-1; i++) { // compute next digit IndexSet s; while (idx <= context.ctxtPrimes.last() && sizeSoFar < target) { s.insert(idx); sizeSoFar += log((double)context.ithPrime(idx)); idx = context.ctxtPrimes.next(idx); } context.digits[i] = s; s1.insert(s); double thisDigitSize = context.logOfProduct(s); if (maxDigitSize < thisDigitSize) maxDigitSize = thisDigitSize; cout << " digit #"<<i+1<< " " <<s << ": size " << thisDigitSize << endl; target += dsize; } IndexSet s = context.ctxtPrimes / s1; // all the remaining primes context.digits[c-1] = s; double thisDigitSize = context.logOfProduct(s); if (maxDigitSize < thisDigitSize) maxDigitSize = thisDigitSize; cout << " digit #"<<c<< " " <<s << ": size " << thisDigitSize << endl; } else { maxDigitSize = context.logOfProduct(context.ctxtPrimes); context.digits[0] = context.ctxtPrimes; } // Add primes to the chain for the P factor of key-switching double sizeOfSpecialPrimes = maxDigitSize + log(c/(double)w)/2 + log(context.stdev *2); AddPrimesBySize(context, sizeOfSpecialPrimes, true); #endif cout << "* ctxtPrimes: " << context.ctxtPrimes << ", log(q0)=" << context.logOfProduct(context.ctxtPrimes) << endl; cout << "* specialPrimes: " << context.specialPrimes << ", log(P)=" << context.logOfProduct(context.specialPrimes) << endl; for (long i=0; i<context.numPrimes(); i++) { cout << " modulus #" << i << " " << context.ithPrime(i) << endl; } cout << endl; setTimersOn(); const ZZX& PhimX = context.zMstar.PhimX(); // The polynomial Phi_m(X) long phim = context.zMstar.phiM(); // The integer phi(m) FHESecKey secretKey(context); const FHEPubKey& publicKey = secretKey; #if 0 // Debug mode: use sk=1,2 DoubleCRT newSk(to_ZZX(2), context); long id1 = secretKey.ImportSecKey(newSk, 64, ptxtSpace); newSk -= 1; long id2 = secretKey.ImportSecKey(newSk, 64, ptxtSpace); #else long id1 = secretKey.GenSecKey(w,ptxtSpace); // A Hamming-weight-w secret key long id2 = secretKey.GenSecKey(w,ptxtSpace); // A second Hamming-weight-w secret key #endif ZZX zero = to_ZZX(0); // Ctxt zeroCtxt(publicKey); /******************************************************************/ /** TESTS BEGIN HERE ***/ /******************************************************************/ cout << "ptxtSpace = " << ptxtSpace << endl; GF2X G; // G is the AES polynomial, G(X)= X^8 +X^4 +X^3 +X +1 SetCoeff(G,8); SetCoeff(G,4); SetCoeff(G,3); SetCoeff(G,1); SetCoeff(G,0); GF2X X; SetX(X); #if 1 // code for rotations... { GF2X::HexOutput = 1; const PAlgebra& al = context.zMstar; const PAlgebraModTwo& al2 = context.modTwo; long ngens = al.numOfGens(); long nslots = al.NSlots(); DoubleCRT tmp(context); vector< vector< DoubleCRT > > maskTable; maskTable.resize(ngens); for (long i = 0; i < ngens; i++) { if (i==0 && al.SameOrd(i)) continue; long ord = al.OrderOf(i); maskTable[i].resize(ord+1, tmp); for (long j = 0; j <= ord; j++) { // initialize the mask that is 1 whenever // the ith coordinate is at least j vector<GF2X> maps, alphas, betas; al2.mapToSlots(maps, G); // Change G to X to get bits in the slots alphas.resize(nslots); for (long k = 0; k < nslots; k++) if (coordinate(al, i, k) >= j) alphas[k] = 1; else alphas[k] = 0; GF2X ptxt; al2.embedInSlots(ptxt, alphas, maps); // Sanity-check, make sure that encode/decode works as expected al2.decodePlaintext(betas, ptxt, G, maps); for (long k = 0; k < nslots; k++) { if (alphas[k] != betas[k]) { cout << " Mask computation failed, i="<<i<<", j="<<j<<"\n"; return 0; } } maskTable[i][j] = to_ZZX(ptxt); } } vector<GF2X> maps; al2.mapToSlots(maps, G); vector<GF2X> alphas(nslots); for (long i=0; i < nslots; i++) random(alphas[i], 8); // random degree-7 polynomial mod 2 for (long amt = 0; amt < 20; amt++) { cout << "."; GF2X ptxt; al2.embedInSlots(ptxt, alphas, maps); DoubleCRT pp(context); pp = to_ZZX(ptxt); rotate(pp, amt, maskTable); GF2X ptxt1 = to_GF2X(to_ZZX(pp)); vector<GF2X> betas; al2.decodePlaintext(betas, ptxt1, G, maps); for (long i = 0; i < nslots; i++) { if (alphas[i] != betas[(i+amt)%nslots]) { cout << " amt="<<amt<<" oops\n"; return 0; } } } cout << "\n"; #if 0 long ord0 = al.OrderOf(0); for (long i = 0; i < nslots; i++) { cout << alphas[i] << " "; if ((i+1) % (nslots/ord0) == 0) cout << "\n"; } cout << "\n\n"; cout << betas.size() << "\n"; for (long i = 0; i < nslots; i++) { cout << betas[i] << " "; if ((i+1) % (nslots/ord0) == 0) cout << "\n"; } #endif return 0; } #endif // an initial sanity check on noise estimates, // comparing the estimated variance to the actual average cout << "pk:"; checkCiphertext(publicKey.pubEncrKey, zero, secretKey); ZZX ptxt[6]; // first four are plaintext, last two are constants std::vector<Ctxt> ctxt(4, Ctxt(publicKey)); // Initialize the plaintext and constants to random 0-1 polynomials for (size_t j=0; j<6; j++) { ptxt[j].rep.SetLength(phim); for (long i = 0; i < phim; i++) ptxt[j].rep[i] = RandomBnd(ptxtSpace); ptxt[j].normalize(); if (j<4) { publicKey.Encrypt(ctxt[j], ptxt[j], ptxtSpace); cout << "c"<<j<<":"; checkCiphertext(ctxt[j], ptxt[j], secretKey); } } // perform upto 2L levels of computation, each level computing: // 1. c0 += c1 // 2. c1 *= c2 // L1' = max(L1,L2)+1 // 3. c1.reLinearlize // 4. c2 *= p4 // 5. c2.automorph(k) // k is the first generator of Zm^* /(2) // 6. c2.reLinearlize // 7. c3 += p5 // 8. c3 *= c0 // L3' = max(L3,L0,L1)+1 // 9. c2 *= c3 // L2' = max(L2,L0+1,L1+1,L3+1)+1 // 10. c0 *= c0 // L0' = max(L0,L1)+1 // 11. c0.reLinearlize // 12. c2.reLinearlize // 13. c3.reLinearlize // // The levels of the four ciphertexts behave as follows: // 0, 0, 0, 0 => 1, 1, 2, 1 => 2, 3, 3, 2 // => 4, 4, 5, 4 => 5, 6, 6, 5 // => 7, 7, 8, 7 => 8,,9, 9, 10 => [...] // // We perform the same operations on the plaintext, and after each operation // we check that decryption still works, and print the curretn modulus and // noise estimate. We stop when we get the first decryption error, or when // we reach 2L levels (which really should not happen). zz_pContext zzpc; zz_p::init(ptxtSpace); zzpc.save(); const zz_pXModulus F = to_zz_pX(PhimX); long g = context.zMstar.ZmStarGen(0); // the first generator in Zm* zz_pX x2g(g, 1); zz_pX p2; // generate a key-switching matrix from s(X^g) to s(X) secretKey.GenKeySWmatrix(/*powerOfS= */ 1, /*powerOfX= */ g, 0, 0, /*ptxtSpace=*/ ptxtSpace); // generate a key-switching matrix from s^2 to s secretKey.GenKeySWmatrix(/*powerOfS= */ 2, /*powerOfX= */ 1, 0, 0, /*ptxtSpace=*/ ptxtSpace); // generate a key-switching matrix from s^3 to s secretKey.GenKeySWmatrix(/*powerOfS= */ 3, /*powerOfX= */ 1, 0, 0, /*ptxtSpace=*/ ptxtSpace); for (long lvl=0; lvl<2*L; lvl++) { cout << "=======================================================\n"; ctxt[0] += ctxt[1]; ptxt[0] += ptxt[1]; PolyRed(ptxt[0], ptxtSpace, true); cout << "c0+=c1: "; checkCiphertext(ctxt[0], ptxt[0], secretKey); ctxt[1].multiplyBy(ctxt[2]); ptxt[1] = (ptxt[1] * ptxt[2]) % PhimX; PolyRed(ptxt[1], ptxtSpace, true); cout << "c1*=c2: "; checkCiphertext(ctxt[1], ptxt[1], secretKey); ctxt[2].multByConstant(ptxt[4]); ptxt[2] = (ptxt[2] * ptxt[4]) % PhimX; PolyRed(ptxt[2], ptxtSpace, true); cout << "c2*=p4: "; checkCiphertext(ctxt[2], ptxt[2], secretKey); ctxt[2] >>= g; zzpc.restore(); p2 = to_zz_pX(ptxt[2]); CompMod(p2, p2, x2g, F); ptxt[2] = to_ZZX(p2); cout << "c2>>="<<g<<":"; checkCiphertext(ctxt[2], ptxt[2], secretKey); ctxt[2].reLinearize(); cout << "c2.relin:"; checkCiphertext(ctxt[2], ptxt[2], secretKey); ctxt[3].addConstant(ptxt[5]); ptxt[3] += ptxt[5]; PolyRed(ptxt[3], ptxtSpace, true); cout << "c3+=p5: "; checkCiphertext(ctxt[3], ptxt[3], secretKey); ctxt[3].multiplyBy(ctxt[0]); ptxt[3] = (ptxt[3] * ptxt[0]) % PhimX; PolyRed(ptxt[3], ptxtSpace, true); cout << "c3*=c0: "; checkCiphertext(ctxt[3], ptxt[3], secretKey); ctxt[0].square(); ptxt[0] = (ptxt[0] * ptxt[0]) % PhimX; PolyRed(ptxt[0], ptxtSpace, true); cout << "c0*=c0: "; checkCiphertext(ctxt[0], ptxt[0], secretKey); ctxt[2].multiplyBy(ctxt[3]); ptxt[2] = (ptxt[2] * ptxt[3]) % PhimX; PolyRed(ptxt[2], ptxtSpace, true); cout << "c2*=c3: "; checkCiphertext(ctxt[2], ptxt[2], secretKey); } /******************************************************************/ /** TESTS END HERE ***/ /******************************************************************/ cout << endl; return 0; }
SwaptionVolCube1::Cube SwaptionVolCube1::sabrCalibration(const Cube& marketVolCube) const { const std::vector<Time>& optionTimes = marketVolCube.optionTimes(); const std::vector<Time>& swapLengths = marketVolCube.swapLengths(); const std::vector<Date>& optionDates = marketVolCube.optionDates(); const std::vector<Period>& swapTenors = marketVolCube.swapTenors(); Matrix alphas(optionTimes.size(), swapLengths.size(),0.); Matrix betas(alphas); Matrix nus(alphas); Matrix rhos(alphas); Matrix forwards(alphas); Matrix errors(alphas); Matrix maxErrors(alphas); Matrix endCriteria(alphas); const std::vector<Matrix>& tmpMarketVolCube = marketVolCube.points(); std::vector<Real> strikes(strikeSpreads_.size()); std::vector<Real> volatilities(strikeSpreads_.size()); for (Size j=0; j<optionTimes.size(); j++) { for (Size k=0; k<swapLengths.size(); k++) { Rate atmForward = atmStrike(optionDates[j], swapTenors[k]); strikes.clear(); volatilities.clear(); for (Size i=0; i<nStrikes_; i++){ Real strike = atmForward+strikeSpreads_[i]; if(strike>=MINSTRIKE) { strikes.push_back(strike); volatilities.push_back(tmpMarketVolCube[i][j][k]); } } const std::vector<Real>& guess = parametersGuess_.operator()( optionTimes[j], swapLengths[k]); const boost::shared_ptr<SABRInterpolation> sabrInterpolation = boost::shared_ptr<SABRInterpolation>(new SABRInterpolation(strikes.begin(), strikes.end(), volatilities.begin(), optionTimes[j], atmForward, guess[0], guess[1], guess[2], guess[3], isParameterFixed_[0], isParameterFixed_[1], isParameterFixed_[2], isParameterFixed_[3], vegaWeightedSmileFit_, endCriteria_, optMethod_, errorAccept_, useMaxError_, maxGuesses_)); sabrInterpolation->update(); Real rmsError = sabrInterpolation->rmsError(); Real maxError = sabrInterpolation->maxError(); alphas [j][k] = sabrInterpolation->alpha(); betas [j][k] = sabrInterpolation->beta(); nus [j][k] = sabrInterpolation->nu(); rhos [j][k] = sabrInterpolation->rho(); forwards [j][k] = atmForward; errors [j][k] = rmsError; maxErrors [j][k] = maxError; endCriteria[j][k] = sabrInterpolation->endCriteria(); QL_ENSURE(endCriteria[j][k]!=EndCriteria::MaxIterations, "global swaptions calibration failed: " "MaxIterations reached: " << "\n" << "option maturity = " << optionDates[j] << ", \n" << "swap tenor = " << swapTenors[k] << ", \n" << "error = " << io::rate(errors[j][k]) << ", \n" << "max error = " << io::rate(maxErrors[j][k]) << ", \n" << " alpha = " << alphas[j][k] << "n" << " beta = " << betas[j][k] << "\n" << " nu = " << nus[j][k] << "\n" << " rho = " << rhos[j][k] << "\n" ); QL_ENSURE(useMaxError_ ? maxError : rmsError < maxErrorTolerance_, "global swaptions calibration failed: " "option tenor " << optionDates[j] << ", swap tenor " << swapTenors[k] << (useMaxError_ ? ": max error " : ": error") << (useMaxError_ ? maxError : rmsError) << " alpha = " << alphas[j][k] << "n" << " beta = " << betas[j][k] << "\n" << " nu = " << nus[j][k] << "\n" << " rho = " << rhos[j][k] << "\n" << (useMaxError_ ? ": error" : ": max error ") << (useMaxError_ ? rmsError :maxError) ); } } Cube sabrParametersCube(optionDates, swapTenors, optionTimes, swapLengths, 8); sabrParametersCube.setLayer(0, alphas); sabrParametersCube.setLayer(1, betas); sabrParametersCube.setLayer(2, nus); sabrParametersCube.setLayer(3, rhos); sabrParametersCube.setLayer(4, forwards); sabrParametersCube.setLayer(5, errors); sabrParametersCube.setLayer(6, maxErrors); sabrParametersCube.setLayer(7, endCriteria); return sabrParametersCube; }