|
- from numpy.linalg import inv as np_inv
- # from numpy.linalg import slogdet as np_slogdet
- from numpy import array
- import numpy as np
-
-
- def dual_basis(B):
- """
- Compute the dual basis of B
- """
- return B.pseudoinverse().transpose()
-
-
- def projection_matrix(A):
- """
- Construct the projection matrix orthogonally to Span(V)
- """
- S = A * A.T
- return A.T * S.inverse() * A
-
-
- def project_against(v, X):
- """ Project matrix X orthonally to vector v"""
- # Pv = projection_matrix(v)
- # return X - X * Pv
- Z = (X * v.T) * v / scal(v * v.T)
- return X - Z
-
-
- # def make_primitive(B, v):
- # assert False
- # # project and Scale v's in V so that each v
- # # is in the lattice, and primitive in it.
- # # Note: does not make V primitive as as set of vector !
- # # (e.g. linear dep. not eliminated)
- # PB = projection_matrix(B)
- # DT = dual_basis(B).T
- # v = vec(v) * PB
- # w = v * DT
- # den = lcm([x.denominator() for x in w[0]])
- # num = gcd([x for x in w[0] * den])
- # if num==0:
- # return None
- # v *= den/num
- # return v
-
-
- def vol(B):
- return sqrt(det(B * B.T))
-
-
- def project_and_eliminate_dep(B, W):
- # Project v on Span(B)
- PB = projection_matrix(B)
- V = W * PB
- rank_loss = V.nrows() - V.rank()
-
- if rank_loss > 0:
- print("WARNING: their were %d linear dependencies out of %d " %
- (rank_loss, V.nrows()))
- V = V.LLL()
- V = V[rank_loss:]
-
- return V
-
-
- def is_cannonical_direction(v):
- v = vec(v)
- return sum([x != 0 for x in v[0]]) == 1
-
-
- def cannonical_param(v):
- v = vec(v)
- assert is_cannonical_direction(v)
- i = [x != 0 for x in v[0]].index(True)
- return i, v[0, i]
-
- def xgcd_of_list(a):
- (g, s, t) = xgcd(a[0], a[1])
- bezouts = [s, t]
-
- for v in a[2:]:
- (g, s_, t_) = xgcd(g, v)
- bezouts = [b*s_ for b in bezouts]
- bezouts.append(t_)
-
- return (g, bezouts)
-
- def remove_linear_dependencies(B, dim=None):
- nrows = B.nrows()
- if dim is None or nrows > dim:
- # Determine the number of dependencies
- K, r = None, None
- if dim is None:
- K = B.left_kernel().basis_matrix() # I assume that the cost of "left_kernel" is negligeable before "LLL"
- r = K.dimensions()[0]
- else:
- r = nrows-dim
-
- if r == 1 and False:
- print("Use Better Algo")
- # Find a linear dependency
- if K is None:
- K = B.left_kernel().basis_matrix()
- assert K.dimensions()[0] == 1
- combinaison = K[0]
- combinaison *= lcm([v.denominator() for v in combinaison])
- print(combinaison)
-
- pivot, pivot_value = None, None
- for ind, value in enumerate(combinaison):
- if abs(value) == 1:
- pivot, pivot_value = ind, value
- break
-
- if pivot_value is None:
- print('Complex case')
- for ind, value in enumerate(combinaison):
- if abs(value) > 0 and gcd([v for i,v in enumerate(combinaison) if i!=ind]) == 1:
- if pivot is None or abs(value)<abs(pivot_value):
- pivot, pivot_value = ind, value
-
- if pivot_value < 0:
- combinaison = vector(ZZ, [-v for v in combinaison])
-
- _, bezouts = xgcd_of_list([v for i,v in enumerate(combinaison) if i!=pivot])
-
- factor = combinaison[pivot]-1
- for i in range(len(combinaison)):
- ind = i if i < pivot else i-1
- if i != pivot:
- B[i] += factor*bezouts[ind]*B[pivot]
- combinaison[pivot] += -factor
- assert (combinaison*B).is_zero(), 'It is not a linear dependency anymore !'
-
- assert abs(combinaison[pivot]) == 1, f'Error abs(combinaison[pivot]) == {abs(combinaison[pivot])} != 1'
- B = B[[i for i in range(B.dimensions()[0]) if i != pivot]]
-
- else:
- B = B.LLL()
- B = B[r:]
- return B
-
- def lattice_orthogonal_section(D, V, maintains_basis=True):
- """
- Compute the intersection of the lattice L(B)
- with the hyperplane orthogonal to Span(V).
- (V can be either a vector or a matrix)
- INPUT AND OUTPUT DUAL BASIS
- Algorithm:
- - project V onto Span(B)
- - project the dual basis onto orth(V)
- - eliminate linear dependencies (LLL)
- - go back to the primal.
- """
-
- #V = project_and_eliminate_dep(D, V) ## No need because D is full-rank
- #r = V.nrows()
-
- # Project the dual basis orthogonally to v
- PV = projection_matrix(V)
- D = D - D * PV
-
- # Eliminate linear dependencies
- if maintains_basis:
- D = remove_linear_dependencies(D)
-
- # Go back to the primal
- return D
-
-
- def lattice_project_against(B, V, maintains_basis=True):
- """
- Compute the projection of the lattice L(B) orthogonally to Span(V). All vectors if V
- (or at least their projection on Span(B)) must belong to L(B).
- Algorithm:
- - project V onto Span(B)
- - project the basis onto orth(V)
- - eliminate linear dependencies (LLL)
- """
- # Project v on Span(B)
- #V = project_and_eliminate_dep(B, V) ## No need because D is full-rank
- #r = V.nrows() # Useless
-
- # Check that V belogs to L(B)
- D = dual_basis(B)
- M = D * V.T
- if not lcm([x.denominator() for x in M.list()]) == 1:
- raise ValueError("Not in the lattice")
-
- # Project the basis orthogonally to v
- PV = projection_matrix(V)
- B = B - B * PV
-
- # Eliminate linear dependencies
- if maintains_basis:
- B = remove_linear_dependencies(B)
-
- # Go back to the primal
- return B
-
-
- def lattice_modular_intersection(D, V, k, maintains_basis=True):
- """
- Compute the intersection of the lattice L(B) with
- the lattice {x | x*V = 0 mod k}
- (V can be either a vector or a matrix)
- Algorithm:
- - project V onto Span(B)
- - append the equations in the dual
- - eliminate linear dependencies (LLL)
- - go back to the primal.
- """
- # Project v on Span(B)
- #V = project_and_eliminate_dep(D, V) ## No need because D is full-rank
- #r = V.nrows() # Useless
-
- # append the equation in the dual
- V /= k
- D = D.stack(V)
-
- # Eliminate linear dependencies
- if maintains_basis:
- D = remove_linear_dependencies(D)
-
- # Go back to the primal
- return D
-
-
- def is_diagonal(M):
- if M.nrows() != M.ncols():
- return False
- A = M.numpy()
- return np.all(A == np.diag(np.diagonal(A)))
-
-
- def logdet(M, exact=False):
- """
- Compute the log of the determinant of a large rational matrix,
- tryping to avoid overflows.
- """
- if not exact:
- MM = array(M, dtype=float)
- _, l = slogdet(MM)
- return l
-
- a = abs(M.det())
- l = 0
-
- while a > 2**32:
- l += RR(32 * ln(2))
- a /= 2**32
-
- l += ln(RR(a))
- return l
-
-
- def degen_inverse(S, B=None):
- """ Compute the inverse of a symmetric matrix restricted
- to its span
- """
- # Get an orthogonal basis for the Span of B
-
- if B is None:
- # Get an orthogonal basis for the Span of B
- V = S.echelon_form()
- V = V[:V.rank()]
- P = projection_matrix(V)
- else:
- P = projection_matrix(B)
-
- # make S non-degenerated by adding the complement of span(B)
- C = identity_matrix(S.ncols()) - P
- Sinv = (S + C).inverse() - C
-
- assert S * Sinv == P, "Consistency failed (probably not your fault)."
- assert P * Sinv == Sinv, "Consistency failed (probably not your fault)."
-
- return Sinv
-
-
- def degen_logdet(S, B=None):
- """ Compute the determinant of a symmetric matrix
- sigma (m x m) restricted to the span of the full-rank
- rectangular (k x m, k <= m) matrix V
- """
- # Get an orthogonal basis for the Span of B
-
- if B is None:
- # Get an orthogonal basis for the Span of B
- V = S.echelon_form()
- V = V[:V.rank()]
- P = projection_matrix(V)
- else:
- P = projection_matrix(B)
-
- # Check that S is indeed supported by span(B)
- #assert S == P.T * S * P
- assert (S - P.T * S * P).norm() <= 1e-10
-
- # make S non-degenerated by adding the complement of span(B)
- C = identity_matrix(S.ncols()) - P
- l3 = logdet(S + C)
- return l3
-
-
- def square_root_inverse_degen(S, B=None):
- """ Compute the determinant of a symmetric matrix
- sigma (m x m) restricted to the span of the full-rank
- rectangular (k x m, k <= m) matrix V
- """
- if B is None:
- # Get an orthogonal basis for the Span of B
- V = S.echelon_form()
- V = V[:V.rank()]
- P = projection_matrix(V)
- else:
- P = projection_matrix(B)
-
- # make S non-degenerated by adding the complement of span(B)
- C = identity_matrix(S.ncols()) - P
- S_inv = np_inv(array((S + C), dtype=float))
- S_inv = array(S_inv, dtype=float)
- L_inv = cholesky(S_inv)
- L_inv = round_matrix_to_rational(L_inv)
- L = L_inv.inverse()
-
- return L, L_inv
-
-
- def build_standard_substitution_matrix(V, pivot=None, data=None, output_data=False):
- _, pivot = V.nonzero_positions()[0] if (pivot is None) else (None, pivot)
- assert V[0,pivot] != 0, 'The value of the pivot must be non-zero.'
- dim = V.ncols()
-
- V1 = - V[0,:pivot] / V[0,pivot]
- V2 = - V[0,pivot+1:] / V[0,pivot]
-
- Gamma = zero_matrix(QQ, dim,dim-1)
- Gamma[:pivot,:pivot] = identity_matrix(pivot)
- Gamma[pivot,:pivot] = V1
- Gamma[pivot,pivot:] = V2
- Gamma[pivot+1:,pivot:] = identity_matrix(dim-pivot-1)
-
- data = data if not output_data else {}
- if data is not None:
- norm2 = 1 + scal(V1*V1.T) + scal(V2*V2.T)
- normalization_matrix = zero_matrix(QQ, dim-1,dim-1)
- normalization_matrix[:pivot,:pivot] = identity_matrix(pivot) - V1.T*V1 / norm2
- normalization_matrix[pivot:,pivot:] = identity_matrix(dim-pivot-1) - V2.T*V2 / norm2
- normalization_matrix[:pivot,pivot:] = - V1.T*V2 / norm2
- normalization_matrix[pivot:,:pivot] = - V2.T*V1 / norm2
-
- data['det'] = norm2
- data['normalization_matrix'] = normalization_matrix
-
- if output_data:
- return Gamma, data
- else:
- return Gamma
|