static CYTHON_INLINE long mat_ZZ_LLL_U(struct ZZ **det, mat_ZZ *x, mat_ZZ *U, long a, long b, long verbose) { *det = new ZZ(); return LLL(**det,*x,*U,a,b,verbose); }
const LLLCtrl<Base<F>>& ctrl ) { DEBUG_CSE if( z.Width() != 1 ) LogicError("z was assumed to be a column vector"); const Int n = z.Height(); const Int m = n+1; Identity( B, m, n ); auto bLastRow = B( IR(m-1), ALL ); Transpose( z, bLastRow ); Scale( NSqrt, bLastRow ); Matrix<F> R; auto info = LLL( B, U, R, ctrl ); return info.nullity; } #define PROTO(F) \ template Int ZDependenceSearch \ ( const Matrix<F>& z, \ Base<F> NSqrt, \ Matrix<F>& B, \ Matrix<F>& U, \ const LLLCtrl<Base<F>>& ctrl ); #define EL_NO_INT_PROTO #define EL_ENABLE_DOUBLEDOUBLE #define EL_ENABLE_QUADDOUBLE
bool LatticeCoordinates ( const Matrix<F>& B, const Matrix<F>& y, Matrix<F>& x ) { DEBUG_CSE typedef Base<F> Real; const Int m = B.Height(); const Int n = B.Width(); if( y.Height() != m || y.Width() != 1 ) LogicError("y should have been an ",m," x 1 vector"); if( FrobeniusNorm(y) == Real(0) ) { Zeros( x, n, 1 ); return true; } Matrix<F> BRed( B ); Matrix<F> UB, RB; auto infoB = LLL( BRed, UB, RB ); auto MB = BRed( ALL, IR(0,infoB.rank) ); Matrix<F> A; Zeros( A, m, infoB.rank+1 ); { auto AL = A( ALL, IR(0,infoB.rank) ); auto aR = A( ALL, IR(infoB.rank) ); AL = MB; aR = y; } // Reduce A in-place Matrix<F> UA, RA; auto infoA = LLL( A, UA, RA ); if( infoA.nullity != 1 ) return false; // Solve for x_M such that M_B x_M = y // NOTE: The last column of U_A should hold the coordinates of the single // member of the null-space of (the original) A Matrix<F> xM; xM = UA( IR(0,infoA.rank), IR(infoB.rank) ); const F gamma = UA(infoA.rank,infoB.rank); if( Abs(gamma) != Real(1) ) LogicError("Invalid member of null space"); else xM *= -Conj(gamma); // Map xM back to the original coordinates using the portion of the // unimodular transformation of B (U_B) which produced the image of B auto UBM = UB( ALL, IR(0,infoB.rank) ); Zeros( x, n, 1 ); Gemv( NORMAL, F(1), UBM, xM, F(0), x ); /* if( infoB.nullity != 0 ) { Matrix<F> C; Zeros( C, m, infoB.nullity+1 ); auto cL = C( ALL, IR(infoB.rank-1) ); auto CR = C( ALL, IR(infoB.rank,END) ); // Reduce the kernel of B CR = UB( ALL, IR(infoB.rank,END) ); LLL( CR ); // Attempt to reduce the (reduced) kernel out of the coordinates // TODO: Which column to grab from the result?!? cL = x; LLL( C ); x = cL; } */ return true; }
long LatticeSolve(vec_ZZ& x, const mat_ZZ& A, const vec_ZZ& y, long reduce) { long n = A.NumRows(); long m = A.NumCols(); if (y.length() != m) Error("LatticeSolve: dimension mismatch"); if (reduce < 0 || reduce > 2) Error("LatticeSolve: bad reduce parameter"); if (IsZero(y)) { x.SetLength(n); clear(x); return 1; } mat_ZZ A1, U1; ZZ det2; long im_rank, ker_rank; A1 = A; im_rank = image(det2, A1, U1); ker_rank = n - im_rank; mat_ZZ A2, U2; long new_rank; long i; A2.SetDims(im_rank + 1, m); for (i = 1; i <= im_rank; i++) A2(i) = A1(ker_rank + i); A2(im_rank + 1) = y; new_rank = image(det2, A2, U2); if (new_rank != im_rank || (U2(1)(im_rank+1) != 1 && U2(1)(im_rank+1) != -1)) return 0; vec_ZZ x1; x1.SetLength(im_rank); for (i = 1; i <= im_rank; i++) x1(i) = U2(1)(i); if (U2(1)(im_rank+1) == 1) negate(x1, x1); vec_ZZ x2, tmp; x2.SetLength(n); clear(x2); tmp.SetLength(n); for (i = 1; i <= im_rank; i++) { mul(tmp, U1(ker_rank+i), x1(i)); add(x2, x2, tmp); } if (reduce == 0) { x = x2; return 1; } else if (reduce == 1) { U1.SetDims(ker_rank+1, n); U1(ker_rank+1) = x2; image(det2, U1); x = U1(ker_rank + 1); return 1; } else if (reduce == 2) { U1.SetDims(ker_rank, n); LLL(det2, U1); U1.SetDims(ker_rank+1, n); U1(ker_rank+1) = x2; image(det2, U1); x = U1(ker_rank + 1); return 1; } return 0; }