void Comp3Mod(zz_pX& x1, zz_pX& x2, zz_pX& x3, const zz_pX& g1, const zz_pX& g2, const zz_pX& g3, const zz_pX& h, const zz_pXModulus& F) { long m = SqrRoot(g1.rep.length() + g2.rep.length() + g3.rep.length()); if (m == 0) { clear(x1); clear(x2); clear(x3); return; } zz_pXArgument A; build(A, h, F, m); zz_pX xx1, xx2, xx3; CompMod(xx1, g1, A, F); CompMod(xx2, g2, A, F); CompMod(xx3, g3, A, F); x1 = xx1; x2 = xx2; x3 = xx3; }
void MinPolyMod(zz_pX& hh, const zz_pX& g, const zz_pXModulus& F, long m) { zz_pX h, h1; long n = F.n; if (m < 1 || m > n) Error("MinPoly: bad args"); /* probabilistically compute min-poly */ ProbMinPolyMod(h, g, F, m); if (deg(h) == m) { hh = h; return; } CompMod(h1, h, g, F); if (IsZero(h1)) { hh = h; return; } /* not completely successful...must iterate */ long i; zz_pX h2, h3; zz_pXMultiplier H1; vec_zz_p R(INIT_SIZE, n); for (;;) { R.SetLength(n); for (i = 0; i < n; i++) random(R[i]); build(H1, h1, F); UpdateMap(R, R, H1, F); DoMinPolyMod(h2, g, F, m-deg(h), R); mul(h, h, h2); if (deg(h) == m) { hh = h; return; } CompMod(h3, h2, g, F); MulMod(h1, h3, H1, F); if (IsZero(h1)) { hh = h; return; } } }
void split(ZZ_pEX& f1, ZZ_pEX& g1, ZZ_pEX& f2, ZZ_pEX& g2, const ZZ_pEX& f, const ZZ_pEX& g, const vec_ZZ_pE& roots, long lo, long mid) { long r = mid-lo+1; ZZ_pEXModulus F; build(F, f); vec_ZZ_pE lroots(INIT_SIZE, r); long i; for (i = 0; i < r; i++) lroots[i] = roots[lo+i]; ZZ_pEX h, a, d; BuildFromRoots(h, lroots); CompMod(a, h, g, F); GCD(f1, a, f); div(f2, f, f1); rem(g1, g, f1); rem(g2, g, f2); }
template<> void PAlgebraModTmpl<zz_pX,vec_zz_pX,zz_pXModulus>::mapToFt(zz_pX& r, const zz_pX& G,unsigned t,const zz_pX* rF1) const { int i = zmStar.indexOfRep(t); if (i < 0) { r=zz_pX::zero(); return; } if (rF1==NULL) { // Compute the representation "from scratch" zz_pE::init(factors[i]); // work with the extension field GF_2[X]/Ft(X) zz_pEX Ga=to_zz_pEX((zz_pX&)G);// G is polynomial over the extension field r=rep(FindRoot(Ga)); // Find a root of G in this field return; } // if rF1 is set, then use it instead, setting r = rF1(X^t) mod Ft(X) zz_pXModulus Ft(factors[i]); // long tInv = InvMod(t,m); zz_pX X2t = PowerXMod(t,Ft); // X2t = X^t mod Ft r = CompMod(*rF1,X2t,Ft); // r = F1(X2t) mod Ft /* Debugging sanity-check: G(r)=0 in the extension field (Z/2Z)[X]/Ft(X) zz_pE::init(factors[i]); zz_pEX Ga=to_zz_pEX((zz_pX&)G);// G as a polynomial over the extension field zz_pE ra =to_zz_pE(r); // r is an element in the extension field eval(ra,Ga,ra); // ra = Ga(ra) if (!IsZero(ra)) {// check that Ga(r)=0 in this extension field cout << "rF1(X^t) mod Ft(X) != root of G mod Ft, t=" << t << endl; exit(0); }*******************************************************************/ }
void PAlgebraModDerived<type>::embedInAllSlots(RX& H, const RX& alpha, const MappingData<type>& mappingData) const { if (isDryRun()) { H = RX::zero(); return; } FHE_TIMER_START; long nSlots = zMStar.getNSlots(); vector<RX> crt(nSlots); // alloate space for CRT components // The i'th CRT component is (H mod F_t) = alpha(maps[i]) mod F_t, // where with t=T[i]. if (IsX(mappingData.G) || deg(alpha) <= 0) { // special case...no need for CompMod, which is // is not optimized for this case for (long i=0; i<nSlots; i++) // crt[i] = alpha(maps[i]) mod Ft crt[i] = ConstTerm(alpha); } else { // general case... for (long i=0; i<nSlots; i++) // crt[i] = alpha(maps[i]) mod Ft CompMod(crt[i], alpha, mappingData.maps[i], factors[i]); } CRT_reconstruct(H,crt); // interpolate to get H FHE_TIMER_STOP; }
void PAlgebraModDerived<type>::embedInSlots(RX& H, const vector<RX>& alphas, const MappingData<type>& mappingData) const { long nSlots = zMStar.getNSlots(); assert(lsize(alphas) == nSlots); for (long i = 0; i < nSlots; i++) assert(deg(alphas[i]) < mappingData.degG); vector<RX> crt(nSlots); // alloate space for CRT components // The i'th CRT component is (H mod F_t) = alphas[i](maps[i]) mod F_t, // where with t=T[i]. if (IsX(mappingData.G)) { // special case...no need for CompMod, which is // is not optimized for zero for (long i=0; i<nSlots; i++) // crt[i] = alpha(maps[i]) mod Ft crt[i] = ConstTerm(alphas[i]); } else { // general case... for (long i=0; i<nSlots; i++) // crt[i] = alpha(maps[i]) mod Ft CompMod(crt[i], alphas[i], mappingData.maps[i], factors[i]); } CRT_reconstruct(H,crt); // interpolate to get p }
NTL_CLIENT int main() { ZZ_p::init(conv<ZZ>(17)); // define GF(17) ZZ_pX P; BuildIrred(P, 10); // generate an irreducible polynomial P // of degree 10 over GF(17) ZZ_pE::init(P); // define GF(17^10) ZZ_pEX f, g, h; // declare polynomials over GF(17^10) random(f, 20); // f is a random, monic polynomial of degree 20 SetCoeff(f, 20); random(h, 20); // h is a random polynomial of degree less than 20 g = MinPolyMod(h, f); // compute the minimum polynomial of h modulo f if (g == 0) Error("oops (1)"); // check that g != 0 if (CompMod(g, h, f) != 0) // check that g(h) = 0 mod f Error("oops (2)"); }
void PAlgebraModDerived<type>::mapToFt(RX& w, const RX& G,unsigned long t,const RX* rF1) const { if (isDryRun()) { w = RX::zero(); return; } long i = zMStar.indexOfRep(t); if (i < 0) { clear(w); return; } if (rF1==NULL) { // Compute the representation "from scratch" // special case if (G == factors[i]) { SetX(w); return; } //special case if (deg(G) == 1) { w = -ConstTerm(G); return; } // the general case: currently only works when r == 1 assert(r == 1); REBak bak; bak.save(); RE::init(factors[i]); // work with the extension field GF_p[X]/Ft(X) REX Ga; conv(Ga, G); // G as a polynomial over the extension field vec_RE roots; FindRoots(roots, Ga); // Find roots of G in this field RE* first = &roots[0]; RE* last = first + roots.length(); RE* smallest = min_element(first, last); // make a canonical choice w=rep(*smallest); return; } // if rF1 is set, then use it instead, setting w = rF1(X^t) mod Ft(X) RXModulus Ft(factors[i]); // long tInv = InvMod(t,m); RX X2t = PowerXMod(t,Ft); // X2t = X^t mod Ft w = CompMod(*rF1,X2t,Ft); // w = F1(X2t) mod Ft /* Debugging sanity-check: G(w)=0 in the extension field (Z/2Z)[X]/Ft(X) RE::init(factors[i]); REX Ga; conv(Ga, G); // G as a polynomial over the extension field RE ra; conv(ra, w); // w is an element in the extension field eval(ra,Ga,ra); // ra = Ga(ra) if (!IsZero(ra)) {// check that Ga(w)=0 in this extension field cout << "rF1(X^t) mod Ft(X) != root of G mod Ft, t=" << t << endl; exit(0); }*******************************************************************/ }
int Directory::Compare( const void* ptr1, const void* ptr2 ) /**********************************************************/ { const cv_dir_entry* cvDirEntry1 = ( cv_dir_entry * ) ptr1; const cv_dir_entry* cvDirEntry2 = ( cv_dir_entry * ) ptr2; int retVal; if ( IsModuleBasis(cvDirEntry1->subsection,cvDirEntry2->subsection) ) { retVal = CompMod(cvDirEntry1->iMod,cvDirEntry2->iMod); return retVal ? retVal : CompSub(cvDirEntry1->subsection,cvDirEntry2->subsection); } retVal = CompSub(cvDirEntry1->subsection,cvDirEntry2->subsection); if ( retVal ) { return retVal; } retVal = CompMod(cvDirEntry1->iMod,cvDirEntry2->iMod); return retVal ? retVal : 0; }
void ComposeFrobeniusMap(GF2EX& y, const GF2EXModulus& F) { long d = GF2E::degree(); long n = deg(F); long i; i = 1; while (i <= d) i = i << 1; i = i >> 1; GF2EX z(INIT_SIZE, n), z1(INIT_SIZE, n); i = i >> 1; long m = 1; if (n == 2) { SetX(z); SqrMod(z, z, F); } else { while (i) { long m1 = 2*m; if (i & d) m1++; if (m1 >= NTL_BITS_PER_LONG-1 || (1L << m1) >= n) break; m = m1; i = i >> 1; } clear(z); SetCoeff(z, 1L << m); } while (i) { z1 = z; long j, k, dz; dz = deg(z); for (j = 0; j <= dz; j++) for (k = 0; k < m; k++) sqr(z1.rep[j], z1.rep[j]); CompMod(z, z1, z, F); m = 2*m; if (d & i) { SqrMod(z, z, F); m++; } i = i >> 1; } y = z; }
static void GenerateBabySteps(GF2EX& h1, const GF2EX& f, const GF2EX& h, long k, long verbose) { double t; if (verbose) { cerr << "generating baby steps..."; t = GetTime(); } GF2EXModulus F; build(F, f); GF2EXArgument H; #if 0 double n2 = sqrt(double(F.n)); double n4 = sqrt(n2); double n34 = n2*n4; long sz = long(ceil(n34/sqrt(sqrt(2.0)))); #else long sz = 2*SqrRoot(F.n); #endif build(H, h, F, sz); h1 = h; long i; long HexOutput = GF2X::HexOutput; GF2X::HexOutput = 1; if (!use_files) { BabyStepFile.kill(); BabyStepFile.SetLength(k-1); } for (i = 1; i <= k-1; i++) { if (use_files) { ofstream s; OpenWrite(s, FileName(GF2EX_stem, "baby", i)); s << h1 << "\n"; s.close(); } else BabyStepFile(i) = h1; CompMod(h1, h1, H, F); if (verbose) cerr << "+"; } if (verbose) cerr << (GetTime()-t) << "\n"; GF2X::HexOutput = HexOutput; }
void PowerCompose(ZZ_pEX& y, const ZZ_pEX& h, long q, const ZZ_pEXModulus& F) { if (q < 0) LogicError("PowerCompose: bad args"); ZZ_pEX z(INIT_SIZE, F.n); long sw; z = h; SetX(y); while (q) { sw = 0; if (q > 1) sw = 2; if (q & 1) { if (IsX(y)) y = z; else sw = sw | 1; } switch (sw) { case 0: break; case 1: CompMod(y, y, z, F); break; case 2: CompMod(z, z, z, F); break; case 3: Comp2Mod(y, z, y, z, z, F); break; } q = q >> 1; } }
NTL_CLIENT int main() { zz_p::init(17); zz_pX P; BuildIrred(P, 10); zz_pE::init(P); zz_pEX f, g, h; random(f, 20); SetCoeff(f, 20); random(h, 20); g = MinPolyMod(h, f); if (deg(g) < 0) Error("bad zz_pEXTest (1)"); if (CompMod(g, h, f) != 0) Error("bad zz_pEXTest (2)"); vec_pair_zz_pEX_long v; long i; for (i = 0; i < 5; i++) { long n = RandomBnd(20)+1; cerr << n << " "; random(f, n); SetCoeff(f, n); v = CanZass(f); g = mul(v); if (f != g) cerr << "oops1\n"; long i; for (i = 0; i < v.length(); i++) if (!DetIrredTest(v[i].a)) Error("bad zz_pEXTest (3)"); } cerr << "\n"; cerr << "zz_pEXTest OK\n"; }
static void GenerateBabySteps(ZZ_pEX& h1, const ZZ_pEX& f, const ZZ_pEX& h, long k, FileList& flist, long verbose) { double t; if (verbose) { cerr << "generating baby steps..."; t = GetTime(); } ZZ_pEXModulus F; build(F, f); ZZ_pEXArgument H; #if 0 double n2 = sqrt(double(F.n)); double n4 = sqrt(n2); double n34 = n2*n4; long sz = long(ceil(n34/sqrt(sqrt(2.0)))); #else long sz = 2*SqrRoot(F.n); #endif build(H, h, F, sz); h1 = h; long i; if (!use_files) { (*BabyStepFile).SetLength(k-1); } for (i = 1; i <= k-1; i++) { if (use_files) { ofstream s; OpenWrite(s, FileName("baby", i), flist); s << h1 << "\n"; CloseWrite(s); } else (*BabyStepFile)(i) = h1; CompMod(h1, h1, H, F); if (verbose) cerr << "+"; } if (verbose) cerr << (GetTime()-t) << "\n"; }
static void GenerateGiantSteps(const ZZ_pX& f, const ZZ_pX& h, long l, long verbose) { double t; if (verbose) { cerr << "generating giant steps..."; t = GetTime(); } ZZ_pXModulus F; build(F, f); ZZ_pXArgument H; build(H, h, F, 2*SqrRoot(F.n)); ZZ_pX h1; h1 = h; long i; if (!use_files) { GiantStepFile.kill(); GiantStepFile.SetLength(l); } for (i = 1; i <= l-1; i++) { if (use_files) { ofstream s; OpenWrite(s, FileName(ZZ_pX_stem, "giant", i)); s << h1 << "\n"; s.close(); } else GiantStepFile(i) = h1; CompMod(h1, h1, H, F); if (verbose) cerr << "+"; } if (use_files) { ofstream s; OpenWrite(s, FileName(ZZ_pX_stem, "giant", i)); s << h1 << "\n"; s.close(); } else GiantStepFile(i) = h1; if (verbose) cerr << (GetTime()-t) << "\n"; }
void Comp2Mod(ZZ_pX& x1, ZZ_pX& x2, const ZZ_pX& g1, const ZZ_pX& g2, const ZZ_pX& h, const ZZ_pXModulus& F) { long m = SqrRoot(g1.rep.length() + g2.rep.length()); if (m == 0) { clear(x1); clear(x2); return; } ZZ_pXArgument A; build(A, h, F, m); ZZ_pX xx1, xx2; CompMod(xx1, g1, A, F); CompMod(xx2, g2, A, F); x1 = xx1; x2 = xx2; }
void PAlgebraModTmpl<RX,vec_RX,RXM>::embedInAllSlots(RX& p, const RX& alpha, const vector<RX>& maps) const { unsigned nSlots = zmStar.NSlots(); if (nSlots==0 || maps.size()!=nSlots) { p=RX::zero(); return; } vector<RX> crt(nSlots); // alloate space for CRT components // The i'th CRT component is (p mod F_t) = alpha(maps[i]) mod F_t, // where with t=T[i]. for (unsigned i=0; i<nSlots; i++) // crt[i] = alpha(maps[i]) mod Ft CompMod(crt[i], alpha, maps[i], factors[i]); CRT_reconstruct(p,crt); // interpolate to get p }
void applyLinPoly(GF2E& beta, const vec_GF2E& C, const GF2E& alpha, long p) { long d = GF2E::degree(); assert(d == C.length()); GF2E gamma, res; gamma = to_GF2E(GF2X(1, 1)); res = C[0]*alpha; for (long i = 1; i < d; i++) { gamma = power(gamma, p); res += C[i]*to_GF2E(CompMod(rep(alpha), rep(gamma), GF2E::modulus())); } beta = res; }
void CompMod(zz_pX& x, const zz_pX& g, const zz_pX& h, const zz_pXModulus& F) // x = g(h) mod f { long m = SqrRoot(g.rep.length()); if (m == 0) { clear(x); return; } zz_pXArgument A; build(A, h, F, m); CompMod(x, g, A, F); }
void PAlgebraModDerived<type>::embedInSlots(RX& H, const vector<RX>& alphas, const MappingData<type>& mappingData) const { if (isDryRun()) { H = RX::zero(); return; } FHE_TIMER_START; long nSlots = zMStar.getNSlots(); assert(lsize(alphas) == nSlots); for (long i = 0; i < nSlots; i++) assert(deg(alphas[i]) < mappingData.degG); vector<RX> crt(nSlots); // alloate space for CRT components // The i'th CRT component is (H mod F_t) = alphas[i](maps[i]) mod F_t, // where with t=T[i]. if (IsX(mappingData.G)) { // special case...no need for CompMod, which is // is not optimized for this case for (long i=0; i<nSlots; i++) // crt[i] = alpha(maps[i]) mod Ft crt[i] = ConstTerm(alphas[i]); } else { // general case...still try to avoid CompMod when possible, // which is the common case for encoding masks for (long i=0; i<nSlots; i++) { // crt[i] = alpha(maps[i]) mod Ft if (deg(alphas[i]) <= 0) crt[i] = alphas[i]; else CompMod(crt[i], alphas[i], mappingData.maps[i], factors[i]); } } CRT_reconstruct(H,crt); // interpolate to get p FHE_TIMER_STOP; }
static void GenerateBabySteps(ZZ_pX& h1, const ZZ_pX& f, const ZZ_pX& h, long k, FileList& flist, long verbose) { double t; if (verbose) { cerr << "generating baby steps..."; t = GetTime(); } ZZ_pXModulus F; build(F, f); ZZ_pXArgument H; build(H, h, F, 2*SqrRoot(F.n)); h1 = h; long i; if (!use_files) { (*BabyStepFile).SetLength(k-1); } for (i = 1; i <= k-1; i++) { if (use_files) { ofstream s; OpenWrite(s, FileName("baby", i), flist); s << h1 << "\n"; CloseWrite(s); } else (*BabyStepFile)(i) = h1; CompMod(h1, h1, H, F); if (verbose) cerr << "+"; } if (verbose) cerr << (GetTime()-t) << "\n"; }
void TraceMap(GF2X& w, const GF2X& a, long d, const GF2XModulus& F, const GF2X& b) { if (d < 0) throw helib::InvalidArgument("TraceMap: d is negative"); GF2X y, z, t; z = b; y = a; clear(w); while (d) { if (d == 1) { if (IsZero(w)) w = y; else { CompMod(w, w, z, F); add(w, w, y); } } else if ((d & 1) == 0) { Comp2Mod(z, t, z, y, z, F); add(y, t, y); } else if (IsZero(w)) { w = y; Comp2Mod(z, t, z, y, z, F); add(y, t, y); } else { Comp3Mod(z, t, w, z, y, w, z, F); add(w, w, y); add(y, t, y); } d = d >> 1; } }
void TraceMap(ZZ_pEX& w, const ZZ_pEX& a, long d, const ZZ_pEXModulus& F, const ZZ_pEX& b) { if (d < 0) LogicError("TraceMap: bad args"); ZZ_pEX y, z, t; z = b; y = a; clear(w); while (d) { if (d == 1) { if (IsZero(w)) w = y; else { CompMod(w, w, z, F); add(w, w, y); } } else if ((d & 1) == 0) { Comp2Mod(z, t, z, y, z, F); add(y, t, y); } else if (IsZero(w)) { w = y; Comp2Mod(z, t, z, y, z, F); add(y, t, y); } else { Comp3Mod(z, t, w, z, y, w, z, F); add(w, w, y); add(y, t, y); } d = d >> 1; } }
int main(int argc, char *argv[]) { argmap_t argmap; argmap["m"] = "0"; argmap["p"] = "2"; argmap["r"] = "1"; if (!parseArgs(argc, argv, argmap)) usage(); unsigned long m = atoi(argmap["m"]); if (!m) usage(); unsigned long p = atoi(argmap["p"]); unsigned long r = atoi(argmap["r"]); cout << "m = " << m << ", p = " << p << ", r = " << r << endl; vector<long> f; factorize(f,m); cout << "factoring "<<m<<" gives ["; for (unsigned long i=0; i<f.size(); i++) cout << f[i] << " "; cout << "]\n"; PAlgebra al(m, p); al.printout(); cout << "\n"; PAlgebraMod almod(al, r); almod.genMaskTable(); FHEcontext context(m, p, r); buildModChain(context, 5, 2); stringstream s1; writeContextBase(s1, context); s1 << context; string s2 = s1.str(); cout << s2 << endl; stringstream s3(s2); unsigned long m1, p1, r1; readContextBase(s3, m1, p1, r1); FHEcontext c1(m1, p1, r1); s3 >> c1; if (context == c1) cout << "equal\n"; else cout << "not equal\n"; return 0; #if 0 PAlgebraModTwo al2(al); PAlgebraMod2r al2r(r,al2); if (false) { long nslots = al.NSlots(); long ngens = al.numOfGens(); for (long i = 0; i < nslots; i++) { const int* v = al.dLog(al.ith_rep(i)); cout << i << " "; for (long j = 0; j < ngens; j++) cout << v[j] << " "; cout << "\n"; } return 0; } // GF2X::HexOutput = 1; // compact hexadecimal printout // cout << "Phi_m(X) = " << al.PhimX() << endl; // vec_GF2X Fs = al2.Factors(); // cout << "Factors = " << Fs << endl; // GF2X Q = Fs[0]; // for (long i=1; i<Fs.length(); i++) Q *= Fs[i]; // cout << "mult of factors = " << Q << endl; GF2X G; // G is the AES polynomial, G(X)= X^8 +X^4 +X^3 +X +1 SetCoeff(G,8); SetCoeff(G,4); SetCoeff(G,3); SetCoeff(G,1); SetCoeff(G,0); // GF2X G; SetCoeff(G,4); SetCoeff(G,1); SetCoeff(G,0); // X^4 +X +1 // cout << "G = " << G << ", deg(G)=" << deg(G) << endl; vector<GF2X> maps; al2.mapToSlots(maps, G); // cout << "maps = ["; // for (unsigned long i=0; i<maps.size(); i++) // cout << maps[i] << " "; // cout << "]\n"; GF2X X; SetX(X); // The polynomial X GF2X ptxt; // plaintext has X in all the slots cout << "embedding X in plaintext slots... "; al2.embedInAllSlots(ptxt, X, maps); cout << "done\n"; // cout << "ptxt = " << ptxt << endl; // Debugging printout: p modulo all the factors // vector<GF2X> crt; // al2.CRT_decompose(crt,ptxt); // cout << "ptxt mod factors = ["; // for (unsigned long i=0; i<crt.size(); i++) cout << crt[i] << " "; // cout << "]\n"; // Decode the plaintext back to a vector of elements, // and check that they are all equal to X vector<GF2X> alphas; cout << "decoding plaintext slots... "; al2.decodePlaintext(alphas, ptxt, G, maps); cout << "done\n"; // cout << "alphas = ["; // for (unsigned long i=0; i<alphas.size(); i++) // cout << alphas[i] << " "; // cout << "]\n"; cout << "comparing " << alphas.size() << " plaintext slots to X... "; for (unsigned long i=0; i<alphas.size(); i++) if (alphas[i] != X) { cout << "\n alphas["<<i<<"] = "<<alphas[i]<<" != X\n\n"; exit(0); } cout << "all tests completed successfully\n\n"; // encode and decode random polynomials for (unsigned long i=0; i<alphas.size(); i++) random(alphas[i], 8); // random degree-7 polynomial mod 2 cout << "embedding random GF(2^8) elements in plaintext slots... "; al2.embedInSlots(ptxt, alphas, maps); cout << "done\n"; // Compute p^2 mod Phi_m(X) and also p(X^2) mod Phi_m(X) and // verify that they both decode to a vector of X^2 in all the slots cout << "squaring and decoding plaintext slots... "; X *= X; // X^2 // GF2X ptxt2; // SqrMod(ptxt2,ptxt,al2.PhimXMod()); // ptxt2 = ptxt^2 mod Phi_m(X) CompMod(ptxt, ptxt, X, al2.PhimXMod()); // ptxt = ptxt(X^2) mod Phi_m(X) // // sanity chack: these should be the same mod 2 (but not mod 2^r) // if (ptxt != ptxt2) cout << "ptxt^2 != ptxt(X^2) mod Phi_m(X)\n"; vector<GF2X> betas; al2.decodePlaintext(betas, ptxt, G, maps); cout << "done\n"; if (alphas.size() != betas.size()) Error("wrong number of slots decoded"); cout << "comparing decoded plaintext slots... "; for (unsigned long i=0; i<alphas.size(); i++) { SqrMod(alphas[i],alphas[i],G); // should get alpha[i]^2 mod G if (alphas[i] != betas[i]) { cout << "\n alphas["<<i<<"] = "<<alphas[i] <<" != " << "betas["<<i<<"] = " << betas[i] << "\n\n"; exit(0); } } cout << "all tests completed successfully\n\n"; // return 0; al2r.restoreContext(); vector<zz_pX> maps1; zz_pX X1; SetX(X1); cerr << "HERE1\n"; al2r.mapToSlots(maps1, X1); cerr << "HERE1a\n"; vector<zz_pX> alphas1; alphas1.resize(maps.size()); for (long i = 0; i < lsize(alphas1); i++) random(alphas1[i], 1); zz_pX ptxt1; cerr << "HERE2\n"; al2r.embedInSlots(ptxt1, alphas1, maps1); cerr << "HERE3\n"; vector<zz_pX> betas1; al2r.decodePlaintext(betas1, ptxt1, X1, maps1); assert(alphas1 == betas1); return 0; #endif }
int main(int argc, char *argv[]) { if (argc<2) { cout << "\nUsage: " << argv[0] << " L [c=2 w=64 k=80 d=1]" << endl; cout << " L is the number of levels\n"; cout << " optional c is number of columns in the key-switching matrices (default=2)\n"; cout << " optional w is Hamming weight of the secret key (default=64)\n"; cout << " optional k is the security parameter (default=80)\n"; cout << " optional d specifies GF(2^d) arithmetic (default=1, must be <=16)\n"; // cout << " k is the security parameter\n"; // cout << " m determines the ring mod Phi_m(X)" << endl; cout << endl; exit(0); } cout.unsetf(ios::floatfield); cout.precision(4); long L = atoi(argv[1]); long c = 2; long w = 64; long k = 80; long d = 1; if (argc>2) c = atoi(argv[2]); if (argc>3) w = atoi(argv[3]); if (argc>4) k = atoi(argv[4]); if (argc>5) d = atoi(argv[5]); if (d>16) Error("d cannot be larger than 16\n"); cout << "\nTesting FHE with parameters L="<<L << ", c="<<c<<", w="<<w<<", k="<<k<<", d="<<d<< endl; // get a lower-bound on the parameter N=phi(m): // 1. Empirically, we use ~20-bit small primes in the modulus chain (the main // constraints is that 2m must divide p-1 for every prime p). The first // prime is larger, a 40-bit prime. (If this is a 32-bit machine then we // use two 20-bit primes instead.) // 2. With L levels, the largest modulus for "fresh ciphertexts" has size // q0 ~ p0 * p^{L} ~ 2^{40+20L} // 3. We break each ciphertext into upto c digits, do each digit is as large // as D=2^{(40+20L)/c} // 4. The added noise variance term from the key-switching operation is // c*N*sigma^2*D^2, and this must be mod-switched down to w*N (so it is // on part with the added noise from modulus-switching). Hence the ratio // P that we use for mod-switching must satisfy c*N*sigma^2*D^2/P^2<w*N, // or P > sqrt(c/w) * sigma * 2^{(40+20L)/c} // 5. With this extra P factor, the key-switching matrices are defined // relative to a modulus of size // Q0 = q0*P ~ sqrt{c/w} sigma 2^{(40+20L)(1+1/c)} // 6. To get k-bit security we need N>log(Q0/sigma)(k+110)/7.2, i.e. roughly // N > (40+20L)(1+1/c)(k+110) / 7.2 long ptxtSpace = 2; double cc = 1.0+(1.0/(double)c); long N = (long) ceil((pSize*L+p0Size)*cc*(k+110)/7.2); cout << " bounding phi(m) > " << N << endl; #if 0 // A small m for debugging purposes long m = 15; #else // pre-computed values of [phi(m),m,d] long ms[][4] = { //phi(m) m ord(2) c_m*1000 { 1176, 1247, 28, 3736}, { 1936, 2047, 11, 3870}, { 2880, 3133, 24, 3254}, { 4096, 4369, 16, 3422}, { 5292, 5461, 14, 4160}, { 5760, 8435, 24, 8935}, { 8190, 8191, 13, 1273}, {10584, 16383, 14, 8358}, {10752, 11441, 48, 3607}, {12000, 13981, 20, 2467}, {11520, 15665, 24, 14916}, {14112, 18415, 28, 11278}, {15004, 15709, 22, 3867}, {15360, 20485, 24, 12767}, // {16384, 21845, 16, 12798}, {17208 ,21931, 24, 18387}, {18000, 18631, 25, 4208}, {18816, 24295, 28, 16360}, {19200, 21607, 40, 35633}, {21168, 27305, 28, 15407}, {23040, 23377, 48, 5292}, {24576, 24929, 48, 5612}, {27000, 32767, 15, 20021}, {31104, 31609, 71, 5149}, {42336, 42799, 21, 5952}, {46080, 53261, 24, 33409}, {49140, 57337, 39, 2608}, {51840, 59527, 72, 21128}, {61680, 61681, 40, 1273}, {65536, 65537, 32, 1273}, {75264, 82603, 56, 36484}, {84672, 92837, 56, 38520} }; #if 0 for (long i = 0; i < 25; i++) { long m = ms[i][1]; PAlgebra alg(m); alg.printout(); cout << "\n"; // compute phi(m) directly long phim = 0; for (long j = 0; j < m; j++) if (GCD(j, m) == 1) phim++; if (phim != alg.phiM()) cout << "ERROR\n"; } exit(0); #endif // find the first m satisfying phi(m)>=N and d | ord(2) in Z_m^* long m = 0; for (unsigned i=0; i<sizeof(ms)/sizeof(long[3]); i++) if (ms[i][0]>=N && (ms[i][2] % d) == 0) { m = ms[i][1]; c_m = 0.001 * (double) ms[i][3]; break; } if (m==0) Error("Cannot support this L,d combination"); #endif // m = 257; FHEcontext context(m); #if 0 context.stdev = to_xdouble(0.5); // very low error #endif activeContext = &context; // Mark this as the "current" context context.zMstar.printout(); cout << endl; // Set the modulus chain #if 1 // The first 1-2 primes of total p0size bits #if (NTL_SP_NBITS > p0Size) AddPrimesByNumber(context, 1, 1UL<<p0Size); // add a single prime #else AddPrimesByNumber(context, 2, 1UL<<(p0Size/2)); // add two primes #endif #endif // The next L primes, as small as possible AddPrimesByNumber(context, L); ZZ productOfCtxtPrimes = context.productOfPrimes(context.ctxtPrimes); double productSize = context.logOfProduct(context.ctxtPrimes); // might as well test that the answer is roughly correct cout << " context.logOfProduct(...)-log(context.productOfPrimes(...)) = " << productSize-log(productOfCtxtPrimes) << endl; // calculate the size of the digits context.digits.resize(c); IndexSet s1; #if 0 for (long i=0; i<c-1; i++) context.digits[i] = IndexSet(i,i); context.digits[c-1] = context.ctxtPrimes / IndexSet(0,c-2); AddPrimesByNumber(context, 2, 1, true); #else double sizeSoFar = 0.0; double maxDigitSize = 0.0; if (c>1) { // break ciphetext into a few digits double dsize = productSize/c; // initial estimate double target = dsize-(pSize/3.0); long idx = context.ctxtPrimes.first(); for (long i=0; i<c-1; i++) { // compute next digit IndexSet s; while (idx <= context.ctxtPrimes.last() && sizeSoFar < target) { s.insert(idx); sizeSoFar += log((double)context.ithPrime(idx)); idx = context.ctxtPrimes.next(idx); } context.digits[i] = s; s1.insert(s); double thisDigitSize = context.logOfProduct(s); if (maxDigitSize < thisDigitSize) maxDigitSize = thisDigitSize; cout << " digit #"<<i+1<< " " <<s << ": size " << thisDigitSize << endl; target += dsize; } IndexSet s = context.ctxtPrimes / s1; // all the remaining primes context.digits[c-1] = s; double thisDigitSize = context.logOfProduct(s); if (maxDigitSize < thisDigitSize) maxDigitSize = thisDigitSize; cout << " digit #"<<c<< " " <<s << ": size " << thisDigitSize << endl; } else { maxDigitSize = context.logOfProduct(context.ctxtPrimes); context.digits[0] = context.ctxtPrimes; } // Add primes to the chain for the P factor of key-switching double sizeOfSpecialPrimes = maxDigitSize + log(c/(double)w)/2 + log(context.stdev *2); AddPrimesBySize(context, sizeOfSpecialPrimes, true); #endif cout << "* ctxtPrimes: " << context.ctxtPrimes << ", log(q0)=" << context.logOfProduct(context.ctxtPrimes) << endl; cout << "* specialPrimes: " << context.specialPrimes << ", log(P)=" << context.logOfProduct(context.specialPrimes) << endl; for (long i=0; i<context.numPrimes(); i++) { cout << " modulus #" << i << " " << context.ithPrime(i) << endl; } cout << endl; setTimersOn(); const ZZX& PhimX = context.zMstar.PhimX(); // The polynomial Phi_m(X) long phim = context.zMstar.phiM(); // The integer phi(m) FHESecKey secretKey(context); const FHEPubKey& publicKey = secretKey; #if 0 // Debug mode: use sk=1,2 DoubleCRT newSk(to_ZZX(2), context); long id1 = secretKey.ImportSecKey(newSk, 64, ptxtSpace); newSk -= 1; long id2 = secretKey.ImportSecKey(newSk, 64, ptxtSpace); #else long id1 = secretKey.GenSecKey(w,ptxtSpace); // A Hamming-weight-w secret key long id2 = secretKey.GenSecKey(w,ptxtSpace); // A second Hamming-weight-w secret key #endif ZZX zero = to_ZZX(0); // Ctxt zeroCtxt(publicKey); /******************************************************************/ /** TESTS BEGIN HERE ***/ /******************************************************************/ cout << "ptxtSpace = " << ptxtSpace << endl; GF2X G; // G is the AES polynomial, G(X)= X^8 +X^4 +X^3 +X +1 SetCoeff(G,8); SetCoeff(G,4); SetCoeff(G,3); SetCoeff(G,1); SetCoeff(G,0); GF2X X; SetX(X); #if 1 // code for rotations... { GF2X::HexOutput = 1; const PAlgebra& al = context.zMstar; const PAlgebraModTwo& al2 = context.modTwo; long ngens = al.numOfGens(); long nslots = al.NSlots(); DoubleCRT tmp(context); vector< vector< DoubleCRT > > maskTable; maskTable.resize(ngens); for (long i = 0; i < ngens; i++) { if (i==0 && al.SameOrd(i)) continue; long ord = al.OrderOf(i); maskTable[i].resize(ord+1, tmp); for (long j = 0; j <= ord; j++) { // initialize the mask that is 1 whenever // the ith coordinate is at least j vector<GF2X> maps, alphas, betas; al2.mapToSlots(maps, G); // Change G to X to get bits in the slots alphas.resize(nslots); for (long k = 0; k < nslots; k++) if (coordinate(al, i, k) >= j) alphas[k] = 1; else alphas[k] = 0; GF2X ptxt; al2.embedInSlots(ptxt, alphas, maps); // Sanity-check, make sure that encode/decode works as expected al2.decodePlaintext(betas, ptxt, G, maps); for (long k = 0; k < nslots; k++) { if (alphas[k] != betas[k]) { cout << " Mask computation failed, i="<<i<<", j="<<j<<"\n"; return 0; } } maskTable[i][j] = to_ZZX(ptxt); } } vector<GF2X> maps; al2.mapToSlots(maps, G); vector<GF2X> alphas(nslots); for (long i=0; i < nslots; i++) random(alphas[i], 8); // random degree-7 polynomial mod 2 for (long amt = 0; amt < 20; amt++) { cout << "."; GF2X ptxt; al2.embedInSlots(ptxt, alphas, maps); DoubleCRT pp(context); pp = to_ZZX(ptxt); rotate(pp, amt, maskTable); GF2X ptxt1 = to_GF2X(to_ZZX(pp)); vector<GF2X> betas; al2.decodePlaintext(betas, ptxt1, G, maps); for (long i = 0; i < nslots; i++) { if (alphas[i] != betas[(i+amt)%nslots]) { cout << " amt="<<amt<<" oops\n"; return 0; } } } cout << "\n"; #if 0 long ord0 = al.OrderOf(0); for (long i = 0; i < nslots; i++) { cout << alphas[i] << " "; if ((i+1) % (nslots/ord0) == 0) cout << "\n"; } cout << "\n\n"; cout << betas.size() << "\n"; for (long i = 0; i < nslots; i++) { cout << betas[i] << " "; if ((i+1) % (nslots/ord0) == 0) cout << "\n"; } #endif return 0; } #endif // an initial sanity check on noise estimates, // comparing the estimated variance to the actual average cout << "pk:"; checkCiphertext(publicKey.pubEncrKey, zero, secretKey); ZZX ptxt[6]; // first four are plaintext, last two are constants std::vector<Ctxt> ctxt(4, Ctxt(publicKey)); // Initialize the plaintext and constants to random 0-1 polynomials for (size_t j=0; j<6; j++) { ptxt[j].rep.SetLength(phim); for (long i = 0; i < phim; i++) ptxt[j].rep[i] = RandomBnd(ptxtSpace); ptxt[j].normalize(); if (j<4) { publicKey.Encrypt(ctxt[j], ptxt[j], ptxtSpace); cout << "c"<<j<<":"; checkCiphertext(ctxt[j], ptxt[j], secretKey); } } // perform upto 2L levels of computation, each level computing: // 1. c0 += c1 // 2. c1 *= c2 // L1' = max(L1,L2)+1 // 3. c1.reLinearlize // 4. c2 *= p4 // 5. c2.automorph(k) // k is the first generator of Zm^* /(2) // 6. c2.reLinearlize // 7. c3 += p5 // 8. c3 *= c0 // L3' = max(L3,L0,L1)+1 // 9. c2 *= c3 // L2' = max(L2,L0+1,L1+1,L3+1)+1 // 10. c0 *= c0 // L0' = max(L0,L1)+1 // 11. c0.reLinearlize // 12. c2.reLinearlize // 13. c3.reLinearlize // // The levels of the four ciphertexts behave as follows: // 0, 0, 0, 0 => 1, 1, 2, 1 => 2, 3, 3, 2 // => 4, 4, 5, 4 => 5, 6, 6, 5 // => 7, 7, 8, 7 => 8,,9, 9, 10 => [...] // // We perform the same operations on the plaintext, and after each operation // we check that decryption still works, and print the curretn modulus and // noise estimate. We stop when we get the first decryption error, or when // we reach 2L levels (which really should not happen). zz_pContext zzpc; zz_p::init(ptxtSpace); zzpc.save(); const zz_pXModulus F = to_zz_pX(PhimX); long g = context.zMstar.ZmStarGen(0); // the first generator in Zm* zz_pX x2g(g, 1); zz_pX p2; // generate a key-switching matrix from s(X^g) to s(X) secretKey.GenKeySWmatrix(/*powerOfS= */ 1, /*powerOfX= */ g, 0, 0, /*ptxtSpace=*/ ptxtSpace); // generate a key-switching matrix from s^2 to s secretKey.GenKeySWmatrix(/*powerOfS= */ 2, /*powerOfX= */ 1, 0, 0, /*ptxtSpace=*/ ptxtSpace); // generate a key-switching matrix from s^3 to s secretKey.GenKeySWmatrix(/*powerOfS= */ 3, /*powerOfX= */ 1, 0, 0, /*ptxtSpace=*/ ptxtSpace); for (long lvl=0; lvl<2*L; lvl++) { cout << "=======================================================\n"; ctxt[0] += ctxt[1]; ptxt[0] += ptxt[1]; PolyRed(ptxt[0], ptxtSpace, true); cout << "c0+=c1: "; checkCiphertext(ctxt[0], ptxt[0], secretKey); ctxt[1].multiplyBy(ctxt[2]); ptxt[1] = (ptxt[1] * ptxt[2]) % PhimX; PolyRed(ptxt[1], ptxtSpace, true); cout << "c1*=c2: "; checkCiphertext(ctxt[1], ptxt[1], secretKey); ctxt[2].multByConstant(ptxt[4]); ptxt[2] = (ptxt[2] * ptxt[4]) % PhimX; PolyRed(ptxt[2], ptxtSpace, true); cout << "c2*=p4: "; checkCiphertext(ctxt[2], ptxt[2], secretKey); ctxt[2] >>= g; zzpc.restore(); p2 = to_zz_pX(ptxt[2]); CompMod(p2, p2, x2g, F); ptxt[2] = to_ZZX(p2); cout << "c2>>="<<g<<":"; checkCiphertext(ctxt[2], ptxt[2], secretKey); ctxt[2].reLinearize(); cout << "c2.relin:"; checkCiphertext(ctxt[2], ptxt[2], secretKey); ctxt[3].addConstant(ptxt[5]); ptxt[3] += ptxt[5]; PolyRed(ptxt[3], ptxtSpace, true); cout << "c3+=p5: "; checkCiphertext(ctxt[3], ptxt[3], secretKey); ctxt[3].multiplyBy(ctxt[0]); ptxt[3] = (ptxt[3] * ptxt[0]) % PhimX; PolyRed(ptxt[3], ptxtSpace, true); cout << "c3*=c0: "; checkCiphertext(ctxt[3], ptxt[3], secretKey); ctxt[0].square(); ptxt[0] = (ptxt[0] * ptxt[0]) % PhimX; PolyRed(ptxt[0], ptxtSpace, true); cout << "c0*=c0: "; checkCiphertext(ctxt[0], ptxt[0], secretKey); ctxt[2].multiplyBy(ctxt[3]); ptxt[2] = (ptxt[2] * ptxt[3]) % PhimX; PolyRed(ptxt[2], ptxtSpace, true); cout << "c2*=c3: "; checkCiphertext(ctxt[2], ptxt[2], secretKey); } /******************************************************************/ /** TESTS END HERE ***/ /******************************************************************/ cout << endl; return 0; }
long IterIrredTest(const ZZ_pEX& f) { if (deg(f) <= 0) return 0; if (deg(f) == 1) return 1; ZZ_pEXModulus F; build(F, f); ZZ_pEX h; FrobeniusMap(h, F); long CompTableSize = 2*SqrRoot(deg(f)); ZZ_pEXArgument H; build(H, h, F, CompTableSize); long i, d, limit, limit_sqr; ZZ_pEX g, X, t, prod; SetX(X); i = 0; g = h; d = 1; limit = 2; limit_sqr = limit*limit; set(prod); while (2*d <= deg(f)) { sub(t, g, X); MulMod(prod, prod, t, F); i++; if (i == limit_sqr) { GCD(t, f, prod); if (!IsOne(t)) return 0; set(prod); limit++; limit_sqr = limit*limit; i = 0; } d = d + 1; if (2*d <= deg(f)) { CompMod(g, g, H, F); } } if (i > 0) { GCD(t, f, prod); if (!IsOne(t)) return 0; } return 1; }
void TandemPowerCompose(ZZ_pEX& y1, ZZ_pEX& y2, const ZZ_pEX& h, long q1, long q2, const ZZ_pEXModulus& F) { ZZ_pEX z(INIT_SIZE, F.n); long sw; z = h; SetX(y1); SetX(y2); while (q1 || q2) { sw = 0; if (q1 > 1 || q2 > 1) sw = 4; if (q1 & 1) { if (IsX(y1)) y1 = z; else sw = sw | 2; } if (q2 & 1) { if (IsX(y2)) y2 = z; else sw = sw | 1; } switch (sw) { case 0: break; case 1: CompMod(y2, y2, z, F); break; case 2: CompMod(y1, y1, z, F); break; case 3: Comp2Mod(y1, y2, y1, y2, z, F); break; case 4: CompMod(z, z, z, F); break; case 5: Comp2Mod(z, y2, z, y2, z, F); break; case 6: Comp2Mod(z, y1, z, y1, z, F); break; case 7: Comp3Mod(z, y1, y2, z, y1, y2, z, F); break; } q1 = q1 >> 1; q2 = q2 >> 1; } }
static void GenerateGiantSteps(const ZZ_pEX& f, const ZZ_pEX& h, long l, long verbose) { double t; if (verbose) { cerr << "generating giant steps..."; t = GetTime(); } ZZ_pEXModulus F; build(F, f); ZZ_pEXArgument H; #if 0 double n2 = sqrt(double(F.n)); double n4 = sqrt(n2); double n34 = n2*n4; long sz = long(ceil(n34/sqrt(sqrt(2.0)))); #else long sz = 2*SqrRoot(F.n); #endif build(H, h, F, sz); ZZ_pEX h1; h1 = h; long i; if (!use_files) { GiantStepFile.kill(); GiantStepFile.SetLength(l); } for (i = 1; i <= l-1; i++) { if (use_files) { ofstream s; OpenWrite(s, FileName(ZZ_pEX_stem, "giant", i)); s << h1 << "\n"; s.close(); } else GiantStepFile(i) = h1; CompMod(h1, h1, H, F); if (verbose) cerr << "+"; } if (use_files) { ofstream s; OpenWrite(s, FileName(ZZ_pEX_stem, "giant", i)); s << h1 << "\n"; s.close(); } else GiantStepFile(i) = h1; if (verbose) cerr << (GetTime()-t) << "\n"; }
void DDF(vec_pair_ZZ_pX_long& factors, const ZZ_pX& ff, const ZZ_pX& hh, long verbose) { ZZ_pX f = ff; ZZ_pX h = hh; if (!IsOne(LeadCoeff(f))) Error("DDF: bad args"); factors.SetLength(0); if (deg(f) == 0) return; if (deg(f) == 1) { AddFactor(factors, f, 1, verbose); return; } long CompTableSize = 2*SqrRoot(deg(f)); long GCDTableSize = ZZ_pX_BlockingFactor; ZZ_pXModulus F; build(F, f); ZZ_pXArgument H; build(H, h, F, min(CompTableSize, deg(f))); long i, d, limit, old_n; ZZ_pX g, X; vec_ZZ_pX tbl(INIT_SIZE, GCDTableSize); SetX(X); i = 0; g = h; d = 1; limit = GCDTableSize; while (2*d <= deg(f)) { old_n = deg(f); sub(tbl[i], g, X); i++; if (i == limit) { ProcessTable(f, factors, F, i, tbl, d, verbose); i = 0; } d = d + 1; if (2*d <= deg(f)) { // we need to go further if (deg(f) < old_n) { // f has changed build(F, f); rem(h, h, f); rem(g, g, f); build(H, h, F, min(CompTableSize, deg(f))); } CompMod(g, g, H, F); } } ProcessTable(f, factors, F, i, tbl, d-1, verbose); if (!IsOne(f)) AddFactor(factors, f, deg(f), verbose); }