| Derr = build_centered_binomial_law(6) | Derr = build_centered_binomial_law(6) | ||||
| modulus = 11 | modulus = 11 | ||||
| saving_results = True | saving_results = True | ||||
| results_filename = "results.csv" | |||||
| try: | try: | ||||
| N_tests = int(sys.argv[1]) | N_tests = int(sys.argv[1]) | ||||
| vv += v(randint(qvec_donttouch, d - 1)) | vv += v(randint(qvec_donttouch, d - 1)) | ||||
| return vv | return vv | ||||
| def qrandv(): | |||||
| vv = randint(1, q-1) * v(randint(qvec_donttouch, d - 1)) | |||||
| vv -= randint(1, q-1) * v(randint(qvec_donttouch, d - 1)) | |||||
| vv += randint(1, q-1) * v(randint(qvec_donttouch, d - 1)) | |||||
| vv -= randint(1, q-1) * v(randint(qvec_donttouch, d - 1)) | |||||
| vv += randint(1, q-1) * v(randint(qvec_donttouch, d - 1)) | |||||
| return vv | |||||
| def one_experiment(id, aargs): | def one_experiment(id, aargs): | ||||
| (N_hints, T_hints) = aargs | (N_hints, T_hints) = aargs | ||||
| verbosity=0) | verbosity=0) | ||||
| for j in range(N_hints): | for j in range(N_hints): | ||||
| vv = randv() | vv = randv() | ||||
| print(vv) | |||||
| if T_hints == "Perfect": | if T_hints == "Perfect": | ||||
| dbdd.integrate_perfect_hint(vv, dbdd.leak(vv), estimate=False) | dbdd.integrate_perfect_hint(vv, dbdd.leak(vv), estimate=False) | ||||
| dbdd_p.integrate_perfect_hint(vv, dbdd_p.leak(vv), estimate=False) | dbdd_p.integrate_perfect_hint(vv, dbdd_p.leak(vv), estimate=False) | ||||
| dbdd_p.integrate_modular_hint(vv, dbdd_p.leak(vv) % modulus, | dbdd_p.integrate_modular_hint(vv, dbdd_p.leak(vv) % modulus, | ||||
| modulus, smooth=True, estimate=False) | modulus, smooth=True, estimate=False) | ||||
| if T_hints == "Q-Modular": | if T_hints == "Q-Modular": | ||||
| vv = qrandv() | |||||
| dbdd.integrate_q_modular_hint(vv, dbdd.leak(vv) % q, | dbdd.integrate_q_modular_hint(vv, dbdd.leak(vv) % q, | ||||
| q, estimate=False) | q, estimate=False) | ||||
| dbdd_p.integrate_q_modular_hint(vv, dbdd_p.leak(vv) % q, | dbdd_p.integrate_q_modular_hint(vv, dbdd_p.leak(vv) % q, | ||||
| avg = RR(sum(data)) / N_tests | avg = RR(sum(data)) / N_tests | ||||
| var = abs(RR(sum([r**2 for r in data])) / N_tests - avg**2) | var = abs(RR(sum([r**2 for r in data])) / N_tests - avg**2) | ||||
| return (avg, var) | return (avg, var) | ||||
| def save_results(*args): | |||||
| with open(results_filename, 'a') as _file: | |||||
| _file.write(';'.join([str(arg) for arg in args])+'\n') | |||||
| def validation_prediction(N_tests, N_hints, T_hints): | def validation_prediction(N_tests, N_hints, T_hints): | ||||
| # Estimation | # Estimation | ||||
| print("Time:", datetime.datetime.now() - ttt) | print("Time:", datetime.datetime.now() - ttt) | ||||
| if saving_results: | if saving_results: | ||||
| with open('results.csv', 'a') as _file: | |||||
| _file.write(f'{T_hints};{N_hints};{N_tests};{datetime.datetime.now() - ttt};') | |||||
| _file.write(f'{beta_real};{beta_pred_full};{beta_pred_light};') | |||||
| _file.write(f'{vbeta_real};{vbeta_pred_full};{vbeta_pred_light};') | |||||
| _file.write(f'\n') | |||||
| save_results( | |||||
| T_hints, N_hints, N_tests, datetime.datetime.now() - ttt, | |||||
| beta_real, beta_pred_full, beta_pred_light, | |||||
| vbeta_real, vbeta_pred_full, vbeta_pred_light, | |||||
| ) | |||||
| return beta_pred_full | return beta_pred_full | ||||
| d = m + n | d = m + n | ||||
| print("\n \n None") | print("\n \n None") | ||||
| print("hints,\t real,\t pred_full, \t pred_light,") | print("hints,\t real,\t pred_full, \t pred_light,") | ||||
| beta_pred = validation_prediction(N_tests, 0, "None") | beta_pred = validation_prediction(N_tests, 0, "None") | ||||
| print("\n \n Perfect") | |||||
| print("hints,\t real,\t pred_full, \t pred_light,") | |||||
| for h in range(1, 100): | |||||
| beta_pred = validation_prediction(N_tests, h, "Perfect") # Line 0 | |||||
| if beta_pred < 3: | |||||
| break | |||||
| print("\n \n Modular") | |||||
| print("hints,\t real,\t pred_full, \t pred_light,") | |||||
| for h in range(2, 200, 2): | |||||
| beta_pred = validation_prediction(N_tests, h, "Modular") # Line 0 | |||||
| if beta_pred < 3: | |||||
| break | |||||
| print("\n \n Q-Modular") | |||||
| print("hints,\t real,\t pred_full, \t pred_light,") | |||||
| for h in range(1, 100): | |||||
| beta_pred = validation_prediction(N_tests, h, "Q-Modular") # Line 0 | |||||
| if beta_pred < 3: | |||||
| break | |||||
| print("\n \n Approx") | |||||
| print("hints,\t real,\t pred_full, \t pred_light,") | |||||
| for h in range(4, 200, 4): | |||||
| beta_pred = validation_prediction(N_tests, h, "Approx") # Line 0 | |||||
| if beta_pred < 3: | |||||
| break | |||||
| for T_hints in ["Perfect", "Modular", "Q-Modular", "Approx"]: | |||||
| hint_range = None | |||||
| if T_hints == "Perfect": | |||||
| hint_range = range(1, 100) | |||||
| elif T_hints == "Modular": | |||||
| hint_range = range(2, 200, 2) | |||||
| elif T_hints == "Q-Modular": | |||||
| hint_range = range(1, 100) | |||||
| elif T_hints == "Approx": | |||||
| hint_range = range(4, 200, 4) | |||||
| print(f"\n \n {T_hints}") | |||||
| print("hints,\t real,\t pred_full, \t pred_light,") | |||||
| for h in hint_range: | |||||
| beta_pred = validation_prediction(N_tests, h, T_hints) # Line 0 | |||||
| if beta_pred < 3: | |||||
| break |
| self.D = kwargs.get('D', None) # The dual Basis (only B or D is active) | self.D = kwargs.get('D', None) # The dual Basis (only B or D is active) | ||||
| assert self.D.T * self.B == identity_matrix(B.nrows()) | assert self.D.T * self.B == identity_matrix(B.nrows()) | ||||
| self._dim = B.nrows() | self._dim = B.nrows() | ||||
| #self._keep_basis = False | |||||
| self._maintains_basis = True | |||||
| self.S = S | self.S = S | ||||
| self.PP = 0 * S # Span of the projections so far (orthonormal) | self.PP = 0 * S # Span of the projections so far (orthonormal) | ||||
| self.mu = mu | self.mu = mu | ||||
| self.float_type = float_type | self.float_type = float_type | ||||
| self.estimate_attack(silent=True) | self.estimate_attack(silent=True) | ||||
| self.Pi = identity_matrix(self._dim) # Reduction matrix | self.Pi = identity_matrix(self._dim) # Reduction matrix | ||||
| self.Gamma = identity_matrix(self._dim) # Substitution matrix | self.Gamma = identity_matrix(self._dim) # Substitution matrix | ||||
| self._restoration_instructions = [] | self._restoration_instructions = [] | ||||
| else: | else: | ||||
| self._restoration_instructions.append((type, Gamma, data)) | self._restoration_instructions.append((type, Gamma, data)) | ||||
| #@not_after_projections | |||||
| #@hint_integration_wrapper(force=True, requires=["dual"], invalidates=["primal"]) | |||||
| #def reduce_dimension(self, Gamma, normalization_matrix=None): | |||||
| # if normalization_matrix is None: | |||||
| # normalization_matrix = (Gamma.T * Gamma).inverse() | |||||
| # normalized_Gamma = Gamma*normalization_matrix | |||||
| # | |||||
| # self.D = self.D * Gamma | |||||
| # self.mu = self.mu * normalized_Gamma | |||||
| # self.S = normalized_Gamma.T * self.S * normalized_Gamma | |||||
| # self.PP = 0 * self.S | |||||
| # | |||||
| # #self.Pi *= normalized_Gamma | |||||
| # self.add_restoration_instruction(SUBSTITUTION, Gamma) | |||||
| @not_after_projections | @not_after_projections | ||||
| @hint_integration_wrapper(force=True, requires=["dual"], | @hint_integration_wrapper(force=True, requires=["dual"], | ||||
| invalidates=["primal"]) | invalidates=["primal"]) | ||||
| VS = V * self.S | VS = V * self.S | ||||
| den = scal(VS * V.T) | den = scal(VS * V.T) | ||||
| if den == 0: | |||||
| raise NotImplementedError('Normally, useless condition') | |||||
| #raise RejectedHint("Redundant hint") | |||||
| self.D = lattice_orthogonal_section(self.D, V) | |||||
| self.D = lattice_orthogonal_section(self.D, V, self._maintains_basis) | |||||
| self._dim -= 1 | self._dim -= 1 | ||||
| num = self.mu * V.T | num = self.mu * V.T | ||||
| if not smooth: | if not smooth: | ||||
| raise NotImplementedError() | raise NotImplementedError() | ||||
| self.D = lattice_modular_intersection(self.D, V, k) | |||||
| self.D = lattice_modular_intersection(self.D, V, k, self._maintains_basis) | |||||
| @not_after_projections | @not_after_projections | ||||
| @hint_integration_wrapper(force=True, requires=["dual"], invalidates=["primal"]) | @hint_integration_wrapper(force=True, requires=["dual"], invalidates=["primal"]) | ||||
| def integrate_q_modular_hint(self, v, l, q): | def integrate_q_modular_hint(self, v, l, q): | ||||
| V = concatenate(v, -l) | V = concatenate(v, -l) | ||||
| V = self.get_reduced_hint_vector(V) | V = self.get_reduced_hint_vector(V) | ||||
| V = V % q | |||||
| if V == 0: | if V == 0: | ||||
| raise RejectedHint("Redundant hint") | raise RejectedHint("Redundant hint") | ||||
| _, pivot = V.nonzero_positions()[0] | |||||
| _, pivot = V.nonzero_positions()[0] # Warning, it is non-zero for F_q! It is why there is "V = V%q" before. | |||||
| V = V * int(mod(V[0,pivot],q)**(-1)) % q | V = V * int(mod(V[0,pivot],q)**(-1)) % q | ||||
| V = V.apply_map(lambda x: recenter(x,q)) | V = V.apply_map(lambda x: recenter(x,q)) | ||||
| W = q * canonical_vec(self._dim, pivot) | W = q * canonical_vec(self._dim, pivot) | ||||
| Gamma = build_standard_substitution_matrix(V, pivot=pivot) | Gamma = build_standard_substitution_matrix(V, pivot=pivot) | ||||
| assert scal(V * W.T)/q == 1, f'<V, W> = {scal(V * W.T)/q} != 1' | assert scal(V * W.T)/q == 1, f'<V, W> = {scal(V * W.T)/q} != 1' | ||||
| self.D = lattice_modular_intersection(self.D, V, q) | |||||
| self.D = lattice_modular_intersection(self.D, V, q, self._maintains_basis) | |||||
| # So, V/q is a dual vector, and we hope it is a primitive one | # So, V/q is a dual vector, and we hope it is a primitive one | ||||
| ## Let build the reduction matrix \Pi | ## Let build the reduction matrix \Pi | ||||
| self.projections += 1 | self.projections += 1 | ||||
| PV = identity_matrix(V.ncols()) - projection_matrix(V) | PV = identity_matrix(V.ncols()) - projection_matrix(V) | ||||
| try: | try: | ||||
| self.B = lattice_project_against(self.B, V) | |||||
| self.B = lattice_project_against(self.B, V, self._maintains_basis) | |||||
| self._dim -= 1 | self._dim -= 1 | ||||
| except ValueError: | except ValueError: | ||||
| raise InvalidHint("Not in Λ") | raise InvalidHint("Not in Λ") |
| def remove_linear_dependencies(B, dim=None): | def remove_linear_dependencies(B, dim=None): | ||||
| nrows = B.nrows() | nrows = B.nrows() | ||||
| if dim is None or nrows > dim: | if dim is None or nrows > dim: | ||||
| #print(f'Go... [{dim}]') | |||||
| B = B.LLL() | |||||
| r = min([i for i in range(B.nrows()) if not B[i].is_zero()]) if dim is None else nrows-dim | |||||
| B = B[r:] | |||||
| #print(f'[{nrows}->{B.nrows()}]') | |||||
| # Determine the number of dependencies | |||||
| K, r = None, None | |||||
| if dim is None: | |||||
| K = B.left_kernel().basis_matrix() # I assume that the cost of "left_kernel" is negligeable before "LLL" | |||||
| r = K.dimensions()[0] | |||||
| else: | |||||
| r = nrows-dim | |||||
| if r == 1: | |||||
| # Find a linear dependency | |||||
| if K is None: | |||||
| K = B.left_kernel().basis_matrix() | |||||
| assert K.dimensions()[0] == 1 | |||||
| combinaison = K[0] | |||||
| # Detect the redundant vector | |||||
| pivot, pivot_value = None, None | |||||
| for ind, value in enumerate(combinaison): | |||||
| if abs(value) > 0 and (pivot is None or abs(value)<abs(pivot_value)): | |||||
| pivot, pivot_value = ind, value | |||||
| B = B[[i for i in range(B.dimensions()[0]) if i != pivot]] | |||||
| else: | |||||
| B = B.LLL() | |||||
| B = B[r:] | |||||
| return B | return B | ||||
| def lattice_orthogonal_section(D, V): | |||||
| def lattice_orthogonal_section(D, V, maintains_basis=True): | |||||
| """ | """ | ||||
| Compute the intersection of the lattice L(B) | Compute the intersection of the lattice L(B) | ||||
| with the hyperplane orthogonal to Span(V). | with the hyperplane orthogonal to Span(V). | ||||
| PV = projection_matrix(V) | PV = projection_matrix(V) | ||||
| D = D - D * PV | D = D - D * PV | ||||
| #print(f'LLL... ({return_basis})', end='') | |||||
| # Eliminate linear dependencies | # Eliminate linear dependencies | ||||
| #if return_basis: | |||||
| # D = remove_linear_dependencies(D) | |||||
| #print(f'{D.nrows()}') | |||||
| #print('Done.') | |||||
| if maintains_basis: | |||||
| D = remove_linear_dependencies(D) | |||||
| # Go back to the primal | # Go back to the primal | ||||
| return D | return D | ||||
| def lattice_project_against(B, V): | |||||
| def lattice_project_against(B, V, maintains_basis=True): | |||||
| """ | """ | ||||
| Compute the projection of the lattice L(B) orthogonally to Span(V). All vectors if V | Compute the projection of the lattice L(B) orthogonally to Span(V). All vectors if V | ||||
| (or at least their projection on Span(B)) must belong to L(B). | (or at least their projection on Span(B)) must belong to L(B). | ||||
| B = B - B * PV | B = B - B * PV | ||||
| # Eliminate linear dependencies | # Eliminate linear dependencies | ||||
| #if return_basis: | |||||
| # B = remove_linear_dependencies(B) | |||||
| if maintains_basis: | |||||
| B = remove_linear_dependencies(B) | |||||
| # Go back to the primal | # Go back to the primal | ||||
| return B | return B | ||||
| def lattice_modular_intersection(D, V, k): | |||||
| def lattice_modular_intersection(D, V, k, maintains_basis=True): | |||||
| """ | """ | ||||
| Compute the intersection of the lattice L(B) with | Compute the intersection of the lattice L(B) with | ||||
| the lattice {x | x*V = 0 mod k} | the lattice {x | x*V = 0 mod k} | ||||
| # append the equation in the dual | # append the equation in the dual | ||||
| V /= k | V /= k | ||||
| # D = dual_basis(B) | |||||
| D = D.stack(V) | D = D.stack(V) | ||||
| # Eliminate linear dependencies | # Eliminate linear dependencies | ||||
| #if return_basis: | |||||
| # D = remove_linear_dependencies(D) | |||||
| if maintains_basis: | |||||
| D = remove_linear_dependencies(D) | |||||
| # Go back to the primal | # Go back to the primal | ||||
| return D | return D |