void rotate(DoubleCRT& d, long amt, const vector< vector< DoubleCRT > > & maskTable) // rotate d by amt { const FHEcontext& context = d.getContext(); const PAlgebra& al = context.zMstar; // const PAlgebraModTwo& al2 = context.modTwo; long ngens = al.numOfGens(); long nslots = al.NSlots(); amt = amt % nslots; if (amt < 0) amt += nslots; if (amt == 0) return; long i, v; i = ngens-1; v = coordinate(al, i, amt); rotate1D(d, i, v, maskTable); if (i == 0) return; DoubleCRT mask(context, d.getIndexSet()); mask.SetZero(); mask.Add(maskTable[i][v], false); DoubleCRT tmp(context, d.getIndexSet()); for (i--; i >= 0; i--) { v = coordinate(al, i, amt); tmp.SetZero(); tmp += d; tmp *= mask; d -= tmp; rotate1D(d, i, v+1, maskTable); rotate1D(tmp, i, v, maskTable); d += tmp; if (i > 0) { tmp.SetZero(); tmp.Add(maskTable[i][v], false); tmp.Sub(maskTable[i][v+1], false); mask *= tmp; mask.Add(maskTable[i][v+1], false); } } }
void rotate1D(DoubleCRT& d, long i, long amt, const vector< vector< DoubleCRT > > & maskTable) // rotate d in dimension i by amt { const FHEcontext& context = d.getContext(); const PAlgebra& al = context.zMstar; // const PAlgebraModTwo& al2 = context.modTwo; long ngens = al.numOfGens(); // long nslots = al.NSlots(); assert(i >= 0 && i < ngens); long ord = al.OrderOf(i); amt = amt % ord; if (amt < 0) amt += ord; if (amt == 0) return; if (al.SameOrd(i)) { // "native" rotation long val = PowerMod(al.ZmStarGen(i), amt, al.M()); d.automorph(val); } else { // more expensive "non-native" rotation assert(maskTable[i].size() > 0); long val = PowerMod(al.ZmStarGen(i), amt, al.M()); // long ival = InvMod(val, al.M()); long ival = PowerMod(al.ZmStarGen(i), amt-ord, al.M()); const DoubleCRT& m1 = maskTable[i].at(ord-amt); DoubleCRT d1(d); d1.Mul(m1, false); d -= d1; d.automorph(val); d1.automorph(ival); d += d1; } }
// Choose random c0,c1 such that c0+s*c1 = p*e for a short e void RLWE(DoubleCRT& c0,DoubleCRT& c1, const DoubleCRT &s, long p, ZZ* prgSeed) { assert (p>0); // Can be used with p=1, but we always use with p>=2 // choose c1 at random (using prgSeed if not NULL) c1.randomize(prgSeed); // choose a short error e, set c0 = p*e - c1*s c0.sampleGaussian(); c0 *= p; // It is assumed that c0,c1 are defined with respect to the same set of // primes, but s may be defined relative to a different set. Either way // the primes for of c0,c1 are unchanged. DoubleCRT tmp(c1); tmp.Mul(s, /*matchIndexSets=*/false); // multiply but don't mod-up c0 -= tmp; }
// Add/subtract a ciphertext part to a ciphertext. // With negative=true we subtract, otherwise we add. void Ctxt::addPart(const DoubleCRT& part, const SKHandle& handle, bool matchPrimeSet, bool negative) { FHE_TIMER_START; assert (&part.getContext() == &context); if (parts.size()==0) { // inserting 1st part primeSet = part.getIndexSet(); parts.push_back(CtxtPart(part,handle)); if (negative) parts.back().Negate(); // not thread-safe?? } else { // adding to a ciphertext with existing parts if (!(part.getIndexSet() <= primeSet)) { // add to the the prime-set of *this, if needed (this is expensive) if (matchPrimeSet) { IndexSet setDiff = part.getIndexSet() / primeSet; // set minus for (size_t i=0; i<parts.size(); i++) parts[i].addPrimes(setDiff); primeSet.insert(setDiff); } else // this should never happen throw std::logic_error("part has too many primes and matchPrimeSet==false"); } DoubleCRT tmp(context, IndexSet::emptySet()); const DoubleCRT* ptr = ∂ // mod-UP the part if needed IndexSet s = primeSet / part.getIndexSet(); if (!empty(s)) { // if need to mod-UP, do it on a temporary copy tmp = part; tmp.addPrimesAndScale(s); ptr = &tmp; } long j = getPartIndexByHandle(handle); if (j>=0) { // found a matching part, add them up if (negative) parts[j] -= *ptr; else parts[j] += *ptr; } else { // no mathing part found, just append this part parts.push_back(CtxtPart(*ptr,handle)); if (negative) parts.back().Negate(); // not thread-safe?? } } }
// Add a constant polynomial void Ctxt::addConstant(const DoubleCRT& dcrt, double size) { // If the size is not given, we use the default value phi(m)*(ptxtSpace/2)^2 if (size < 0.0) { // WARNING: the following line is written to prevent integer overflow size = ((double) context.zMStar.getPhiM()) * ptxtSpace*ptxtSpace /4.0; } // Scale the constant, then add it to the part that points to one long f = (ptxtSpace>2)? rem(context.productOfPrimes(primeSet),ptxtSpace): 1; noiseVar += (size*f)*f; IndexSet delta = dcrt.getIndexSet() / primeSet; // set minus if (f==1 && empty(delta)) { // just add it addPart(dcrt, SKHandle(0,1,0)); return; } // work with a local copy DoubleCRT tmp = dcrt; if (!empty(delta)) tmp.removePrimes(delta); if (f!=1) tmp *= f; addPart(tmp, SKHandle(0,1,0)); }
void PowerfulDCRT::dcrtToPowerful(Vec<ZZ>& out, const DoubleCRT& dcrt) const { const IndexSet& set = dcrt.getIndexSet(); if (empty(set)) { // sanity check clear(out); return; } zz_pBak bak; bak.save(); // backup NTL's current modulus ZZ product = conv<ZZ>(1L); for (long i = set.first(); i <= set.last(); i = set.next(i)) { pConvVec[i].restoreModulus(); zz_pX oneRowPoly; long newPrime = dcrt.getOneRow(oneRowPoly,i); HyperCube<zz_p> oneRowPwrfl(indexes.shortSig); pConvVec[i].polyToPowerful(oneRowPwrfl, oneRowPoly); if (i == set.first()) // just copy conv(out, oneRowPwrfl.getData()); else // CRT intVecCRT(out, product, oneRowPwrfl.getData(), newPrime); // in NumbTh product *= newPrime; } }
// Copy only the primes in s \intersect other.getIndexSet() void DoubleCRT::partialCopy(const DoubleCRT& other, const IndexSet& _s) { if (&context != &other.context) Error("DoubleCRT::partialCopy: incompatible contexts"); // set the primes of *this to s \intersect other.getIndexSet() IndexSet s = _s; s.retain(other.getIndexSet()); map.remove(getIndexSet() / s); map.insert(s / getIndexSet()); long phim = context.zMStar.getPhiM(); for (long i = s.first(); i <= s.last(); i = s.next(i)) { vec_long& row = map[i]; const vec_long& other_row = other.map[i]; for (long j = 0; j < phim; j++) row[j] = other_row[j]; } }