from fpylll import * from fpylll.algorithms.bkz2 import BKZReduction load("../framework/load_strategies.sage") load("../framework/DBDD_generic.sage") load("../framework/proba_utils.sage") # Restoration Type SUBSTITUTION = 0 QMODULUS_SUBSTITUTION = 1 class DBDD(DBDD_generic): """ This class defines all the elements defining a DBDD instance with all the basis computations """ def __init__(self, B, S, mu, u=None, verbosity=1, float_type="ld", **kwargs): """constructor that builds a DBDD instance from a lattice, mean, sigma and a target ;min_dim: Number of coordinates to find to consider the problem solved :B: Basis of the lattice :S: The Covariance matrix (Sigma) of the uSVP solution :mu: The expectation of the uSVP solution :u: The unique vector to be found (optinal, for verification purposes) :fp_type: Floating point type to use in FPLLL ("d, ld, dd, qd") """ self.verbosity = verbosity self.B = B # The lattice Basis self.D = kwargs.get('D', None) # The dual Basis (only B or D is active) assert self.D.T * self.B == identity_matrix(B.nrows()) self._dim = B.nrows() self._maintains_basis = False self.S = S self.PP = 0 * S # Span of the projections so far (orthonormal) self.mu = mu self.u = u self.u_original = u self.expected_length = RR(sqrt(self.S.trace()) + 1) self.projections = 0 self.save = {"save": None} self.float_type = float_type self.estimate_attack(silent=True) self.Pi = identity_matrix(self._dim) # Reduction matrix self.Gamma = identity_matrix(self._dim) # Substitution matrix self._restoration_instructions = [] def dim(self): return self._dim def S_diag(self): return [self.S[i, i] for i in range(self.S.nrows())] @without_dependencies def volumes(self): if self.B is not None: Bvol = logdet(self.B * self.B.T) / 2 else: Bvol = -logdet(self.D * self.D.T) / 2 S = self.S + self.mu.T * self.mu Svol = degen_logdet(S) dvol = Bvol - Svol / 2. return (Bvol, Svol, dvol) @without_dependencies def test_primitive_dual(self, V, action): if self.B is None: self.B = dual_basis(self.D) W = V * self.B.T den = lcm([x.denominator() for x in W[0]]) num = gcd([x for x in W[0] * den]) assert den == 1 if num == 1: return True if action == "warn": logging("non-primitive (factor %d)." % num, style="WARNING", newline=False) return True elif action == "reject": raise RejectedHint("non-primitive (factor %d)." % num) raise InvalidHint("non-primitive (factor %d)." % num) def get_reduced_hint_vector(self, V): V = V * self.Gamma if V == 0: raise RejectedHint("Redundant hint") return V def add_restoration_instruction(self, type, Gamma, **data): self.Gamma *= Gamma last_instruction = self._restoration_instructions[-1] if len(self._restoration_instructions)>0 else (None, None, None) if type==SUBSTITUTION and last_instruction is not None and last_instruction[0]==SUBSTITUTION: self._restoration_instructions[-1] = (SUBSTITUTION, last_instruction[1]*Gamma, {}) else: self._restoration_instructions.append((type, Gamma, data)) @not_after_projections @hint_integration_wrapper(force=True, requires=["dual"], invalidates=["primal"]) def integrate_perfect_hint(self, v, l, reduction=False): V = concatenate(v, -l) V = self.get_reduced_hint_vector(V) VS = V * self.S den = scal(VS * V.T) self.D = lattice_orthogonal_section(self.D, V, self._maintains_basis) self._dim -= 1 num = self.mu * V.T self.mu -= (num / den) * VS num = VS.T * VS self.S -= num / den ## Dimension Reduction Gamma, data = build_standard_substitution_matrix(V, output_data=True) normalization_matrix = data['normalization_matrix'] normalized_Gamma = Gamma*normalization_matrix self.D = self.D * Gamma self.mu = self.mu * normalized_Gamma self.S = normalized_Gamma.T * self.S * normalized_Gamma self.PP = 0 * self.S self.Pi = normalized_Gamma.T * self.Pi self.add_restoration_instruction(SUBSTITUTION, Gamma) @not_after_projections @hint_integration_wrapper(force=True, requires=["dual"], invalidates=["primal"]) def integrate_modular_hint(self, v, l, k, smooth=True): V = concatenate(v, -l) V = self.get_reduced_hint_vector(V) VS = V * self.S den = scal(VS * V.T) if den == 0: raise NotImplementedError('Normally, useless condition') #raise RejectedHint("Redundant hint") if not smooth: raise NotImplementedError() self.D = lattice_modular_intersection(self.D, V, k, self._maintains_basis) @not_after_projections @hint_integration_wrapper(force=True, requires=["dual"], invalidates=["primal"]) def integrate_q_modular_hint(self, v, l, q): V = concatenate(v, -l) V = self.get_reduced_hint_vector(V) V = V % q if V == 0: raise RejectedHint("Redundant hint") _, pivot = V.nonzero_positions()[0] # Warning, it is non-zero for F_q! It is why there is "V = V%q" before. V = V * int(mod(V[0,pivot],q)**(-1)) % q V = V.apply_map(lambda x: recenter(x,q)) W = q * canonical_vec(self._dim, pivot) Gamma = build_standard_substitution_matrix(V, pivot=pivot) assert scal(V * W.T)/q == 1, f' = {scal(V * W.T)/q} != 1' self.D = lattice_modular_intersection(self.D, V, q, self._maintains_basis) # So, V/q is a dual vector, and we hope it is a primitive one ## Let build the reduction matrix \Pi #B_ = block_matrix(QQ, [[Gamma.T], [W]]) #Pi = dual_basis(B_)[:self._dim-1] dim = self._dim Pi = block_matrix(QQ, [ [identity_matrix(pivot), zero_matrix(pivot,1), zero_matrix(pivot, dim-pivot-1)], [zero_matrix(dim-pivot-1, pivot), zero_matrix(dim-pivot-1,1), identity_matrix(dim-pivot-1)], ]) assert Pi.dimensions() == (dim-1, dim), 'Error in the dimension of \Pi' assert Pi * Gamma == identity_matrix(dim-1), 'Error in the calculation of \Pi: \Pi\Gamma != I' assert Pi * W.T == 0, 'Error in the calculation of \Pi: \Pi W != 0' self._dim -= 1 # Even if we have a formulae for the primal basis, it is incorrect because we don't apply "lattice_modular_intersection" on self.B if self.D is not None: self.D = self.D * Gamma self.mu = self.mu * Pi.T self.S = Pi * self.S * Pi.T self.PP = 0 * self.S self.Pi = Pi * self.Pi self.add_restoration_instruction(QMODULUS_SUBSTITUTION, Gamma, w=W, v=V, q=q) @not_after_projections @hint_integration_wrapper(force=True) def integrate_approx_hint(self, v, l, variance, aposteriori=False): if variance < 0: raise InvalidHint("variance must be non-negative !") if variance == 0: raise InvalidHint("variance=0 : must use perfect hint !") if not aposteriori: V = concatenate(v, -l) V = self.get_reduced_hint_vector(V) VS = V * self.S d = scal(VS * V.T) center = scal(self.mu * V.T) coeff = (- center / (variance + d)) self.mu += coeff * VS self.S -= (1 / (variance + d) * VS.T) * VS else: V = concatenate(v, 0) V = self.get_reduced_hint_vector(V) VS = V * self.S # test if eigenvector #if not scal(VS * V.T)**2 == scal(VS * VS.T) * scal(V * V.T): # raise RejectedHint("Not an eigenvector of Σ,") if not scal(VS * VS.T): raise RejectedHint("0-Eigenvector of Σ forbidden,") # New formulae den = scal(VS * V.T) self.mu += ((l - scal(self.mu * V.T)) / den) * VS self.S += (((variance - den) / den**2) * VS.T ) * VS @not_after_projections @hint_integration_wrapper() def integrate_approx_hint_fulldim(self, center, covariance, aposteriori=False): # Using http://www.cs.columbia.edu/~liulp/pdf/linear_normal_dist.pdf # with A = Id if not aposteriori: d = self.S.nrows() - 1 if self.S.rank() != d or covariance.rank() != d: raise InvalidHint("Covariances not full dimensional") zero = vec(d * [0]) F = (self.S + block4(covariance, zero.T, zero, vec([1]))).inverse() F[-1, -1] = 0 C = concatenate(center, 1) self.mu += ((C - self.mu) * F) * self.S self.S -= self.S * F * self.S else: raise NotImplementedError() @hint_integration_wrapper(force=False, requires=["primal"], invalidates=["dual"]) def integrate_short_vector_hint(self, v): V = concatenate(v, 0) if V.dimensions()[1] > self.Pi.nrows(): # Cannot use self._dim here V = V * self.Pi.T # Reduction assert V.dimensions()[1] == self.Pi.nrows() V -= V * self.PP if scal((V * self.S) * V.T) == 0: raise InvalidHint("Projects to 0") self.projections += 1 PV = identity_matrix(V.ncols()) - projection_matrix(V) try: self.B = lattice_project_against(self.B, V, self._maintains_basis) self._dim -= 1 except ValueError: raise InvalidHint("Not in Λ") self.mu = self.mu * PV self.u = self.u * (self.Pi.T * PV * self.Gamma.T) self.S = PV.T * self.S * PV self.PP += V.T * (V / scal(V * V.T)) @without_dependencies def attack(self, beta_max=None, beta_pre=None, randomize=False, tours=1): """ Run the lattice reduction to solve the DBDD instance. Return the (blocksize, solution) of a succesful attack, or (None, None) on failure """ self.logging(" Running the Attack ", style="HEADER") if self.B is None: self.B = dual_basis(self.D) # Apply adequate distortion denom = lcm([x.denominator() for x in self.B.list()]) B = self.B d = B.nrows() S = self.S + self.mu.T * self.mu L, Linv = square_root_inverse_degen(S, self.B) M = B * Linv # Make the matrix Integral denom = lcm([x.denominator() for x in M.list()]) M = matrix(ZZ, M * denom) # Build the BKZ object G = None try: G = GSO.Mat(IntegerMatrix.from_matrix(M), float_type=self.float_type) except ValueError as e: G = GSO.Mat(IntegerMatrix.from_matrix(M)) bkz = BKZReduction(G) if randomize: bkz.lll_obj() bkz.randomize_block(0, d, density=d / 4) bkz.lll_obj() u_den = lcm([x.denominator() for x in self.u.list()]) if beta_pre is not None: self.logging("\rRunning BKZ-%d (until convergence)" % beta_pre, newline=False) bkz.lll_obj() par = BKZ.Param(block_size=beta_pre, strategies=strategies) bkz(par) bkz.lll_obj() else: beta_pre = 2 # Run BKZ tours with progressively increasing blocksizes for beta in range(beta_pre, B.nrows() + 1): self.logging("\rRunning BKZ-%d" % beta, newline=False) if beta_max is not None: if beta > beta_max: self.logging("Failure ... (reached beta_max)", style="SUCCESS") self.logging("") return None, None if beta == 2: bkz.lll_obj() else: par = BKZ.Param(block_size=beta, strategies=strategies, max_loops=tours) bkz(par) bkz.lll_obj() # Recover the tentative solution, # undo distorition, scaling, and test it v = vec(bkz.A[0]) v = u_den * v * L / denom solution = matrix(ZZ, v.apply_map(round)) / u_den self.reduced_solution = solution for instruc_type, Gamma, data in self._restoration_instructions[::-1]: solution = solution * Gamma.T if instruc_type == SUBSTITUTION: pass elif instruc_type == QMODULUS_SUBSTITUTION: w, v, q = data['w'], data['v'], data['q'] w_norm2 = w[0].inner_product(w[0]) projection_of_s = solution - w * (solution[0].inner_product(w[0]) / w_norm2) solution += w * round(projection_of_s[0].inner_product(v[0])/q) else: w, func = data['w'], data['func'] w_norm2 = w[0].inner_product(w[0]) projection_of_s = solution - w * (solution[0].inner_product(w[0]) / w_norm2) solution += w * func(projection_of_s) if not self.check_solution(solution): continue self.logging("Success !", style="SUCCESS") self.logging("") return beta, solution self.logging("Failure ...", style="FAILURE") self.logging("") return None, None