1"""
2Linear Algebra - Matrices, Eigenvalues, and Decompositions
3
4This script demonstrates:
5- Matrix operations (addition, multiplication, transpose)
6- Determinants and matrix inverse
7- Eigenvalues and eigenvectors
8- Matrix diagonalization
9- Singular Value Decomposition (SVD)
10- Solving linear systems
11- Matrix exponential
12"""
13
14import numpy as np
15
16try:
17 import matplotlib.pyplot as plt
18 HAS_MATPLOTLIB = True
19except ImportError:
20 HAS_MATPLOTLIB = False
21 print("Warning: matplotlib not available, skipping visualizations")
22
23
24def matrix_determinant_2x2(A):
25 """Compute determinant of 2x2 matrix manually"""
26 if A.shape != (2, 2):
27 raise ValueError("Matrix must be 2x2")
28 return A[0, 0] * A[1, 1] - A[0, 1] * A[1, 0]
29
30
31def matrix_inverse_2x2(A):
32 """Compute inverse of 2x2 matrix manually"""
33 det = matrix_determinant_2x2(A)
34 if abs(det) < 1e-10:
35 raise ValueError("Matrix is singular")
36 return np.array([[A[1, 1], -A[0, 1]],
37 [-A[1, 0], A[0, 0]]]) / det
38
39
40def power_iteration(A, num_iterations=100):
41 """Find dominant eigenvalue and eigenvector using power iteration"""
42 n = A.shape[0]
43 b = np.random.rand(n)
44
45 for _ in range(num_iterations):
46 b_new = A @ b
47 b_new_norm = np.linalg.norm(b_new)
48 b = b_new / b_new_norm
49
50 eigenvalue = (b @ (A @ b)) / (b @ b)
51 return eigenvalue, b
52
53
54def gram_schmidt(A):
55 """Gram-Schmidt orthogonalization of column vectors"""
56 Q = np.zeros_like(A, dtype=float)
57
58 for i in range(A.shape[1]):
59 q = A[:, i].astype(float)
60 for j in range(i):
61 q -= np.dot(Q[:, j], A[:, i]) * Q[:, j]
62 Q[:, i] = q / np.linalg.norm(q)
63
64 return Q
65
66
67# ============================================================================
68# MAIN DEMONSTRATIONS
69# ============================================================================
70
71print("=" * 70)
72print("LINEAR ALGEBRA - MATRICES, EIGENVALUES, AND DECOMPOSITIONS")
73print("=" * 70)
74
75# Test 1: Matrix operations
76print("\n1. MATRIX OPERATIONS")
77print("-" * 70)
78A = np.array([[1, 2], [3, 4]])
79B = np.array([[5, 6], [7, 8]])
80print("Matrix A:")
81print(A)
82print("\nMatrix B:")
83print(B)
84print(f"\nA + B:\n{A + B}")
85print(f"\nA * B (element-wise):\n{A * B}")
86print(f"\nA @ B (matrix multiplication):\n{A @ B}")
87print(f"\nA^T (transpose):\n{A.T}")
88print(f"\nTrace(A): {np.trace(A)}")
89
90# Test 2: Determinants
91print("\n2. DETERMINANTS")
92print("-" * 70)
93A = np.array([[1, 2], [3, 4]])
94det_manual = matrix_determinant_2x2(A)
95det_numpy = np.linalg.det(A)
96print(f"Matrix A:\n{A}")
97print(f"det(A) manual: {det_manual}")
98print(f"det(A) numpy: {det_numpy:.6f}")
99
100A3 = np.array([[1, 2, 3], [0, 4, 5], [1, 0, 6]])
101print(f"\nMatrix A (3x3):\n{A3}")
102print(f"det(A): {np.linalg.det(A3):.6f}")
103
104# Test 3: Matrix inverse
105print("\n3. MATRIX INVERSE")
106print("-" * 70)
107A = np.array([[1, 2], [3, 4]], dtype=float)
108A_inv_manual = matrix_inverse_2x2(A)
109A_inv_numpy = np.linalg.inv(A)
110print(f"Matrix A:\n{A}")
111print(f"\nA^(-1) manual:\n{A_inv_manual}")
112print(f"\nA^(-1) numpy:\n{A_inv_numpy}")
113print(f"\nVerification A @ A^(-1):\n{A @ A_inv_numpy}")
114
115# Test 4: Solving linear systems
116print("\n4. SOLVING LINEAR SYSTEMS Ax = b")
117print("-" * 70)
118A = np.array([[3, 1], [1, 2]], dtype=float)
119b = np.array([9, 8], dtype=float)
120print(f"Matrix A:\n{A}")
121print(f"Vector b: {b}")
122
123# Method 1: Using inverse
124x_inv = np.linalg.inv(A) @ b
125print(f"\nSolution (using inverse): x = {x_inv}")
126
127# Method 2: Using solve
128x_solve = np.linalg.solve(A, b)
129print(f"Solution (using solve): x = {x_solve}")
130
131# Verification
132print(f"Verification Ax = {A @ x_solve}")
133print(f"Expected b = {b}")
134
135# Test 5: Eigenvalues and eigenvectors
136print("\n5. EIGENVALUES AND EIGENVECTORS")
137print("-" * 70)
138A = np.array([[4, 1], [2, 3]], dtype=float)
139eigenvalues, eigenvectors = np.linalg.eig(A)
140print(f"Matrix A:\n{A}")
141print(f"\nEigenvalues: {eigenvalues}")
142print(f"\nEigenvectors:")
143print(eigenvectors)
144
145# Verification: A v = λ v
146for i in range(len(eigenvalues)):
147 v = eigenvectors[:, i]
148 lam = eigenvalues[i]
149 Av = A @ v
150 lam_v = lam * v
151 print(f"\nEigenvalue λ_{i+1} = {lam:.4f}")
152 print(f"A v_{i+1} = {Av}")
153 print(f"λ_{i+1} v_{i+1} = {lam_v}")
154
155# Test 6: Power iteration
156print("\n6. POWER ITERATION FOR DOMINANT EIGENVALUE")
157print("-" * 70)
158A = np.array([[4, 1], [2, 3]], dtype=float)
159lam_power, v_power = power_iteration(A, 50)
160print(f"Matrix A:\n{A}")
161print(f"\nDominant eigenvalue (power iteration): {lam_power:.6f}")
162print(f"Dominant eigenvector: {v_power}")
163print(f"\nNumPy eigenvalues: {np.linalg.eigvals(A)}")
164
165# Test 7: Matrix diagonalization
166print("\n7. MATRIX DIAGONALIZATION")
167print("-" * 70)
168A = np.array([[4, 1], [2, 3]], dtype=float)
169eigenvalues, P = np.linalg.eig(A)
170D = np.diag(eigenvalues)
171print(f"Matrix A:\n{A}")
172print(f"\nDiagonal matrix D:\n{D}")
173print(f"\nEigenvector matrix P:\n{P}")
174print(f"\nReconstruction P @ D @ P^(-1):\n{P @ D @ np.linalg.inv(P)}")
175
176# Test 8: Singular Value Decomposition
177print("\n8. SINGULAR VALUE DECOMPOSITION (SVD)")
178print("-" * 70)
179A = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)
180U, s, Vt = np.linalg.svd(A)
181print(f"Matrix A ({A.shape[0]}x{A.shape[1]}):")
182print(A)
183print(f"\nLeft singular vectors U ({U.shape[0]}x{U.shape[1]}):")
184print(U)
185print(f"\nSingular values: {s}")
186print(f"\nRight singular vectors V^T ({Vt.shape[0]}x{Vt.shape[1]}):")
187print(Vt)
188
189# Reconstruct A
190S = np.zeros((A.shape[0], A.shape[1]))
191S[:min(A.shape), :min(A.shape)] = np.diag(s)
192A_reconstructed = U @ S @ Vt
193print(f"\nReconstructed A = U @ S @ V^T:")
194print(A_reconstructed)
195
196# Test 9: Matrix rank and condition number
197print("\n9. MATRIX RANK AND CONDITION NUMBER")
198print("-" * 70)
199A_full = np.array([[1, 2], [3, 4]], dtype=float)
200A_singular = np.array([[1, 2], [2, 4]], dtype=float)
201
202print(f"Full rank matrix:\n{A_full}")
203print(f"Rank: {np.linalg.matrix_rank(A_full)}")
204print(f"Condition number: {np.linalg.cond(A_full):.4f}")
205
206print(f"\nRank-deficient matrix:\n{A_singular}")
207print(f"Rank: {np.linalg.matrix_rank(A_singular)}")
208print(f"Condition number: {np.linalg.cond(A_singular):.4e}")
209
210# Test 10: Gram-Schmidt orthogonalization
211print("\n10. GRAM-SCHMIDT ORTHOGONALIZATION")
212print("-" * 70)
213A = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=float)
214Q = gram_schmidt(A)
215print(f"Original matrix A:")
216print(A)
217print(f"\nOrthogonalized matrix Q:")
218print(Q)
219print(f"\nQ^T @ Q (should be identity):")
220print(Q.T @ Q)
221
222# Test 11: Matrix exponential
223print("\n11. MATRIX EXPONENTIAL")
224print("-" * 70)
225A = np.array([[0, 1], [-1, 0]], dtype=float)
226print(f"Matrix A (rotation matrix generator):")
227print(A)
228
229# Compute using scipy if available
230try:
231 from scipy.linalg import expm
232 exp_A = expm(A)
233 print(f"\nexp(A) using scipy:")
234 print(exp_A)
235 print(f"\nThis is a rotation by 1 radian:")
236 print(f"cos(1) = {np.cos(1):.6f}, sin(1) = {np.sin(1):.6f}")
237except ImportError:
238 # Manual computation using Taylor series
239 exp_A = np.eye(2)
240 A_power = np.eye(2)
241 for n in range(1, 20):
242 A_power = A_power @ A
243 exp_A += A_power / np.math.factorial(n)
244 print(f"\nexp(A) using Taylor series (20 terms):")
245 print(exp_A)
246
247# Visualization
248if HAS_MATPLOTLIB:
249 print("\n" + "=" * 70)
250 print("VISUALIZATIONS")
251 print("=" * 70)
252
253 fig, axes = plt.subplots(2, 3, figsize=(14, 9))
254
255 # Plot 1: Eigenvector visualization
256 ax = axes[0, 0]
257 A = np.array([[3, 1], [1, 3]], dtype=float)
258 eigenvalues, eigenvectors = np.linalg.eig(A)
259
260 # Draw eigenvectors
261 origin = [0, 0]
262 for i in range(2):
263 v = eigenvectors[:, i]
264 ax.arrow(0, 0, v[0], v[1], head_width=0.1, head_length=0.1,
265 fc=f'C{i}', ec=f'C{i}', label=f'v{i+1} (λ={eigenvalues[i]:.1f})')
266 # Show transformation
267 Av = A @ v
268 ax.arrow(0, 0, Av[0], Av[1], head_width=0.1, head_length=0.1,
269 fc=f'C{i}', ec=f'C{i}', alpha=0.3, linestyle='--')
270
271 ax.set_xlim(-5, 5)
272 ax.set_ylim(-5, 5)
273 ax.set_xlabel('x')
274 ax.set_ylabel('y')
275 ax.set_title('Eigenvectors and Av')
276 ax.legend()
277 ax.grid(True, alpha=0.3)
278 ax.axhline(y=0, color='k', linewidth=0.5)
279 ax.axvline(x=0, color='k', linewidth=0.5)
280 ax.set_aspect('equal')
281
282 # Plot 2: SVD visualization
283 ax = axes[0, 1]
284 A = np.array([[3, 1], [1, 2]], dtype=float)
285 U, s, Vt = np.linalg.svd(A)
286
287 theta = np.linspace(0, 2*np.pi, 100)
288 circle = np.array([np.cos(theta), np.sin(theta)])
289 ellipse = A @ circle
290
291 ax.plot(circle[0], circle[1], 'b-', label='Unit circle', linewidth=2)
292 ax.plot(ellipse[0], ellipse[1], 'r-', label='Transformed', linewidth=2)
293
294 # Draw singular vectors
295 for i in range(2):
296 v = Vt[i, :]
297 ax.arrow(0, 0, v[0], v[1], head_width=0.1, head_length=0.1,
298 fc='blue', ec='blue', alpha=0.5)
299 u = U[:, i] * s[i]
300 ax.arrow(0, 0, u[0], u[1], head_width=0.1, head_length=0.1,
301 fc='red', ec='red', alpha=0.5)
302
303 ax.set_xlim(-4, 4)
304 ax.set_ylim(-4, 4)
305 ax.set_xlabel('x')
306 ax.set_ylabel('y')
307 ax.set_title('SVD: Circle to Ellipse')
308 ax.legend()
309 ax.grid(True, alpha=0.3)
310 ax.set_aspect('equal')
311
312 # Plot 3: Matrix condition number effect
313 ax = axes[0, 2]
314 cond_numbers = []
315 errors = []
316
317 for epsilon in np.logspace(-8, -1, 20):
318 A = np.array([[1, 1], [1, 1+epsilon]], dtype=float)
319 b = np.array([2, 2+epsilon], dtype=float)
320 x = np.linalg.solve(A, b)
321
322 # True solution is [1, 1]
323 error = np.linalg.norm(x - np.array([1, 1]))
324 cond = np.linalg.cond(A)
325
326 cond_numbers.append(cond)
327 errors.append(error)
328
329 ax.loglog(cond_numbers, errors, 'bo-', markersize=4)
330 ax.set_xlabel('Condition number')
331 ax.set_ylabel('Solution error')
332 ax.set_title('Condition Number vs Error')
333 ax.grid(True, alpha=0.3, which='both')
334
335 # Plot 4: Power iteration convergence
336 ax = axes[1, 0]
337 A = np.array([[4, 1], [2, 3]], dtype=float)
338
339 b = np.random.rand(2)
340 eigenvalues_history = []
341
342 for i in range(50):
343 b = A @ b
344 b = b / np.linalg.norm(b)
345 lam = (b @ (A @ b)) / (b @ b)
346 eigenvalues_history.append(lam)
347
348 true_eigenvalue = max(np.linalg.eigvals(A))
349
350 ax.plot(eigenvalues_history, 'b-', linewidth=2, label='Power iteration')
351 ax.axhline(y=true_eigenvalue, color='r', linestyle='--', label=f'True λ = {true_eigenvalue:.4f}')
352 ax.set_xlabel('Iteration')
353 ax.set_ylabel('Eigenvalue estimate')
354 ax.set_title('Power Iteration Convergence')
355 ax.legend()
356 ax.grid(True, alpha=0.3)
357
358 # Plot 5: Gram-Schmidt visualization
359 ax = axes[1, 1]
360 v1 = np.array([3, 1])
361 v2 = np.array([1, 2])
362
363 # Original vectors
364 ax.arrow(0, 0, v1[0], v1[1], head_width=0.2, head_length=0.2,
365 fc='blue', ec='blue', label='v1', linewidth=2)
366 ax.arrow(0, 0, v2[0], v2[1], head_width=0.2, head_length=0.2,
367 fc='red', ec='red', label='v2', linewidth=2, alpha=0.5)
368
369 # Orthogonalized vectors
370 A = np.column_stack([v1, v2])
371 Q = gram_schmidt(A)
372 q1, q2 = Q[:, 0], Q[:, 1]
373
374 ax.arrow(0, 0, q1[0], q1[1], head_width=0.2, head_length=0.2,
375 fc='blue', ec='blue', linestyle='--', linewidth=2, alpha=0.7)
376 ax.arrow(0, 0, q2[0], q2[1], head_width=0.2, head_length=0.2,
377 fc='green', ec='green', label='q2 (orthogonal)', linewidth=2)
378
379 ax.set_xlim(-1, 4)
380 ax.set_ylim(-1, 3)
381 ax.set_xlabel('x')
382 ax.set_ylabel('y')
383 ax.set_title('Gram-Schmidt Orthogonalization')
384 ax.legend()
385 ax.grid(True, alpha=0.3)
386 ax.set_aspect('equal')
387
388 # Plot 6: Matrix exponential for rotation
389 ax = axes[1, 2]
390 theta_vals = np.linspace(0, 2*np.pi, 8, endpoint=False)
391
392 for theta in theta_vals:
393 A = np.array([[0, -theta], [theta, 0]])
394 try:
395 from scipy.linalg import expm
396 R = expm(A)
397 except ImportError:
398 # Manual exponential
399 R = np.eye(2)
400 A_power = np.eye(2)
401 for n in range(1, 20):
402 A_power = A_power @ A
403 R += A_power / np.math.factorial(n)
404
405 v = np.array([1, 0])
406 v_rotated = R @ v
407 ax.arrow(0, 0, v_rotated[0], v_rotated[1], head_width=0.1,
408 head_length=0.1, fc='blue', ec='blue', alpha=0.6)
409
410 circle_theta = np.linspace(0, 2*np.pi, 100)
411 ax.plot(np.cos(circle_theta), np.sin(circle_theta), 'r--', alpha=0.3)
412
413 ax.set_xlim(-1.5, 1.5)
414 ax.set_ylim(-1.5, 1.5)
415 ax.set_xlabel('x')
416 ax.set_ylabel('y')
417 ax.set_title('Matrix Exponential Rotations')
418 ax.grid(True, alpha=0.3)
419 ax.set_aspect('equal')
420
421 plt.tight_layout()
422 plt.savefig('/opt/projects/01_Personal/03_Study/examples/Mathematical_Methods/03_linear_algebra.png', dpi=150)
423 print("Saved visualization: 03_linear_algebra.png")
424 plt.close()
425
426print("\n" + "=" * 70)
427print("DEMONSTRATION COMPLETE")
428print("=" * 70)