Exercises Notebook
Converted from
exercises.ipynbfor web reading.
Singular Value Decomposition - Exercises
This notebook contains 10 progressive exercises for 02-Singular-Value-Decomposition. Each exercise has a learner workspace followed by a complete reference solution. Use the solution cells after making a serious attempt.
Difficulty grows from direct computation to AI-facing interpretation. Formulas use LaTeX-in-Markdown with $...$ and `
`.
Code cell 2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
try:
import seaborn as sns
sns.set_theme(style="whitegrid", palette="colorblind")
HAS_SNS = True
except ImportError:
plt.style.use("seaborn-v0_8-whitegrid")
HAS_SNS = False
mpl.rcParams.update({
"figure.figsize": (10, 6),
"figure.dpi": 120,
"font.size": 13,
"axes.titlesize": 15,
"axes.labelsize": 13,
"xtick.labelsize": 11,
"ytick.labelsize": 11,
"legend.fontsize": 11,
"legend.framealpha": 0.85,
"lines.linewidth": 2.0,
"axes.spines.top": False,
"axes.spines.right": False,
"savefig.bbox": "tight",
"savefig.dpi": 150,
})
np.random.seed(42)
print("Plot setup complete.")
Code cell 3
import numpy as np
import numpy.linalg as la
import scipy.linalg as sla
from scipy import stats
np.set_printoptions(precision=8, suppress=True)
np.random.seed(42)
COLORS = {
"primary": "#0077BB",
"secondary": "#EE7733",
"tertiary": "#009988",
"error": "#CC3311",
"neutral": "#555555",
"highlight": "#EE3377",
}
def header(title):
print("\n" + "=" * len(title))
print(title)
print("=" * len(title))
def check_true(name, cond):
ok = bool(cond)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
return ok
def check_close(name, got, expected, tol=1e-8):
ok = np.allclose(got, expected, atol=tol, rtol=tol)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
print(" got =", got)
print(" expected=", expected)
return ok
def softmax(z, axis=-1):
z = np.asarray(z, dtype=float)
z = z - np.max(z, axis=axis, keepdims=True)
e = np.exp(z)
return e / np.sum(e, axis=axis, keepdims=True)
def gram_schmidt_columns(A, tol=1e-12):
A = np.asarray(A, dtype=float)
Q = []
for j in range(A.shape[1]):
v = A[:, j].copy()
for q in Q:
v -= (q @ v) * q
n = la.norm(v)
if n > tol:
Q.append(v / n)
return np.column_stack(Q) if Q else np.empty((A.shape[0], 0))
def projection_matrix(A):
Q = gram_schmidt_columns(A)
return Q @ Q.T
def numerical_rank(A, tol=1e-10):
return int(np.sum(la.svd(np.asarray(A, dtype=float), compute_uv=False) > tol))
def stable_rank(A):
s = la.svd(np.asarray(A, dtype=float), compute_uv=False)
return float(np.sum(s**2) / (s[0]**2 + 1e-15))
def make_spd(n, seed=0, ridge=0.5):
rng = np.random.default_rng(seed)
A = rng.normal(size=(n, n))
return A.T @ A + ridge * np.eye(n)
print("Chapter 03 helper setup complete.")
Exercise 1: SVD Reconstruction
Compute and verify orthogonality plus reconstruction.
Code cell 5
# Your Solution
# Exercise 1 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 1.")
Code cell 6
# Solution
# Exercise 1 - SVD Reconstruction
header("Exercise 1: SVD reconstruction")
A = np.array([[3.0, 1.0], [0.0, 2.0], [1.0, 0.0]])
U, s, Vt = la.svd(A, full_matrices=False)
Ahat = U @ np.diag(s) @ Vt
print("singular values:", s)
check_close("U^T U = I", U.T @ U, np.eye(len(s)))
check_close("V V^T = I", Vt @ Vt.T, np.eye(len(s)))
check_close("reconstruction", Ahat, A)
Exercise 2: SVD from
Recover right singular vectors and singular values from the eigendecomposition of .
Code cell 8
# Your Solution
# Exercise 2 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 2.")
Code cell 9
# Solution
# Exercise 2 - SVD from $A^T A$
header("Exercise 2: SVD from AtA")
A = np.array([[2.0, 0.0], [0.0, 1.0], [2.0, 1.0]])
vals, V = la.eigh(A.T @ A)
order = np.argsort(vals)[::-1]
vals, V = vals[order], V[:, order]
s = np.sqrt(np.maximum(vals, 0))
U = A @ V / s
print("singular values:", s)
check_close("matches numpy singular values", s, la.svd(A, compute_uv=False), tol=1e-10)
check_close("reconstruction", U @ np.diag(s) @ V.T, A)
Exercise 3: Four Fundamental Subspaces
Use the SVD to read column, row, null, and left-null dimensions.
Code cell 11
# Your Solution
# Exercise 3 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 3.")
Code cell 12
# Solution
# Exercise 3 - Four Fundamental Subspaces
header("Exercise 3: four subspaces")
A = np.array([[1.0, 2.0, 3.0], [2.0, 4.0, 6.0]])
U, s, Vt = la.svd(A, full_matrices=True)
r = numerical_rank(A)
print("rank:", r, "singular values:", s)
print("col dim", r, "row dim", r, "null dim", A.shape[1]-r, "left-null dim", A.shape[0]-r)
check_true("rank-nullity domain", r + (A.shape[1]-r) == A.shape[1])
check_true("codomain split", r + (A.shape[0]-r) == A.shape[0])
Exercise 4: Best Rank- Approximation
Compare truncated SVD error to the discarded singular values.
Code cell 14
# Your Solution
# Exercise 4 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 4.")
Code cell 15
# Solution
# Exercise 4 - Best Rank-$k$ Approximation
header("Exercise 4: Eckart-Young")
A = np.array([[5.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])
U, s, Vt = la.svd(A, full_matrices=False)
k = 1
Ak = U[:, :k] @ np.diag(s[:k]) @ Vt[:k]
err = la.norm(A - Ak, 'fro')
print("singular values:", s)
print("rank-1 Frobenius error:", err)
check_close("discarded-energy formula", err, np.sqrt(np.sum(s[k:]**2)))
Exercise 5: Pseudoinverse Least Squares
Solve an overdetermined system with and compare to lstsq.
Code cell 17
# Your Solution
# Exercise 5 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 5.")
Code cell 18
# Solution
# Exercise 5 - Pseudoinverse Least Squares
header("Exercise 5: pseudoinverse")
A = np.array([[1.0, 0.0], [1.0, 1.0], [1.0, 2.0], [1.0, 3.0]])
b = np.array([1.0, 2.0, 2.2, 3.8])
U, s, Vt = la.svd(A, full_matrices=False)
Ap = Vt.T @ np.diag(1/s) @ U.T
x = Ap @ b
x_ref = la.lstsq(A, b, rcond=None)[0]
print("solution:", x)
check_close("pseudoinverse matches lstsq", x, x_ref)
check_close("normal residual orthogonal", A.T @ (A @ x - b), np.zeros(A.shape[1]), tol=1e-10)
Exercise 6: Condition Number
Compute and observe sensitivity.
Code cell 20
# Your Solution
# Exercise 6 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 6.")
Code cell 21
# Solution
# Exercise 6 - Condition Number
header("Exercise 6: condition number")
A = np.array([[1.0, 0.999], [0.999, 0.998]])
s = la.svd(A, compute_uv=False)
cond = s[0] / s[-1]
print("singular values:", s, "condition:", cond)
check_close("matches numpy cond", cond, la.cond(A))
check_true("ill-conditioned", cond > 1e5)
Exercise 7: Randomized SVD Sketch
Use a random test matrix to approximate the dominant singular subspace.
Code cell 23
# Your Solution
# Exercise 7 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 7.")
Code cell 24
# Solution
# Exercise 7 - Randomized SVD Sketch
header("Exercise 7: randomized SVD")
rng = np.random.default_rng(0)
A = rng.normal(size=(40, 5)) @ rng.normal(size=(5, 30))
Omega = rng.normal(size=(30, 8))
Q, _ = la.qr(A @ Omega, mode='reduced')
B = Q.T @ A
_, s_sketch, _ = la.svd(B, full_matrices=False)
s_full = la.svd(A, compute_uv=False)
print("full top-5:", s_full[:5])
print("sketch top-5:", s_sketch[:5])
check_true("top singular value close", abs(s_sketch[0] - s_full[0]) / s_full[0] < 0.05)
Exercise 8: LoRA Rank Update
Verify that a LoRA update has rank at most the adapter rank.
Code cell 26
# Your Solution
# Exercise 8 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 8.")
Code cell 27
# Solution
# Exercise 8 - LoRA Rank Update
header("Exercise 8: LoRA rank")
rng = np.random.default_rng(1)
d_out, d_in, r = 12, 10, 3
B = rng.normal(size=(d_out, r))
A = rng.normal(size=(r, d_in))
Delta = B @ A
print("rank Delta:", la.matrix_rank(Delta))
check_true("rank bound", la.matrix_rank(Delta) <= r)
print("Takeaway: LoRA constrains updates to a low-rank tangent family.")
Exercise 9: Nuclear Norm and Stable Rank
Compute spectral, Frobenius, nuclear, and stable rank from singular values.
Code cell 29
# Your Solution
# Exercise 9 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 9.")
Code cell 30
# Solution
# Exercise 9 - Nuclear Norm and Stable Rank
header("Exercise 9: singular-value summaries")
A = np.diag([5.0, 2.0, 0.5, 0.1])
s = la.svd(A, compute_uv=False)
print("spectral", s[0], "fro", la.norm(s), "nuclear", s.sum(), "stable", stable_rank(A))
check_close("Frobenius from singular values", la.norm(A, 'fro'), la.norm(s))
check_true("stable rank <= exact rank", stable_rank(A) <= la.matrix_rank(A))
Exercise 10: Latent Semantic Approximation
Approximate a term-document matrix by a rank-2 semantic representation.
Code cell 32
# Your Solution
# Exercise 10 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 10.")
Code cell 33
# Solution
# Exercise 10 - Latent Semantic Approximation
header("Exercise 10: latent semantic approximation")
X = np.array([[3, 3, 0, 0], [2, 2, 0, 0], [0, 0, 3, 2], [0, 0, 2, 3], [1, 1, 1, 1]], dtype=float)
U, s, Vt = la.svd(X, full_matrices=False)
X2 = U[:, :2] @ np.diag(s[:2]) @ Vt[:2]
energy = np.sum(s[:2]**2) / np.sum(s**2)
print("rank-2 energy:", energy)
check_true("rank-2 captures most energy", energy > 0.9)
check_true("rank is at most 2", la.matrix_rank(X2) <= 2)