Theory Notebook
Converted from
theory.ipynbfor web reading.
Gradient Descent - Theory Notebook
Executable derivations and diagnostics for Chapter 8.
Code cell 2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
try:
import seaborn as sns
sns.set_theme(style="whitegrid", palette="colorblind")
HAS_SNS = True
except ImportError:
plt.style.use("seaborn-v0_8-whitegrid")
HAS_SNS = False
mpl.rcParams.update({
"figure.figsize": (10, 6),
"figure.dpi": 120,
"font.size": 13,
"axes.titlesize": 15,
"axes.labelsize": 13,
"xtick.labelsize": 11,
"ytick.labelsize": 11,
"legend.fontsize": 11,
"legend.framealpha": 0.85,
"lines.linewidth": 2.0,
"axes.spines.top": False,
"axes.spines.right": False,
"savefig.bbox": "tight",
"savefig.dpi": 150,
})
np.random.seed(42)
print("Plot setup complete.")
Code cell 3
def header(title):
print("\n" + "=" * 78)
print(title)
print("=" * 78)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
COLORS = {
"primary": "#0077BB",
"secondary": "#EE7733",
"tertiary": "#009988",
"error": "#CC3311",
"neutral": "#555555",
"highlight": "#EE3377",
}
print("Helper functions ready.")
Demo 1: Gradient Direction
This cell checks a small numerical fact connected to gradient direction.
Code cell 5
header("Demo 1: Gradient Direction")
x = np.linspace(-3, 3, 200)
a = 1
freq = 1
loss = 0.5 * a * x**2 + 0.1 * np.sin(freq * x)
grad = a * x + 0.1 * freq * np.cos(freq * x)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, loss, color=COLORS["primary"], label="loss")
ax.plot(x, grad, color=COLORS["secondary"], linestyle="--", label="gradient")
ax.set_title("Gradient Descent: gradient direction diagnostic")
ax.set_xlabel("Parameter $\\theta$")
ax.set_ylabel("Value")
ax.legend(loc="best")
fig.tight_layout()
plt.show()
plt.close(fig)
check_true("finite loss curve", np.all(np.isfinite(loss)))
print("Takeaway: plotting the objective and gradient makes gradient direction visible before training a large model.")
Demo 2: Descent Lemma
This cell checks a small numerical fact connected to descent lemma.
Code cell 7
header("Demo 2: update-size computation")
theta = np.array([1.5, -0.5, 0.25], dtype=float)
H = np.diag([1.0, 3.0, 5.0])
grad = H @ theta
eta = 0.1 / (2)
step = -eta * grad
new_theta = theta + step
old_loss = 0.5 * theta @ H @ theta
new_loss = 0.5 * new_theta @ H @ new_theta
print("old_loss =", round(float(old_loss), 6))
print("new_loss =", round(float(new_loss), 6))
print("relative_update =", round(float(np.linalg.norm(step) / max(np.linalg.norm(theta), 1e-12)), 6))
check_true("descent on this quadratic", new_loss < old_loss)
print("Takeaway: descent lemma should be checked through both loss change and update magnitude.")
Demo 3: Constant Step Size
This cell checks a small numerical fact connected to constant step size.
Code cell 9
header("Demo 3: stochastic estimate")
rng = np.random.default_rng(42 + 2)
samples = rng.normal(loc=0.0, scale=1.0, size=(256, 3))
theta = np.array([0.2, -0.4, 0.6])
full_grad = samples.T @ (samples @ theta) / len(samples)
batch = samples[:32]
batch_grad = batch.T @ (batch @ theta) / len(batch)
gap = np.linalg.norm(batch_grad - full_grad)
print("full_grad =", np.round(full_grad, 5))
print("batch_grad =", np.round(batch_grad, 5))
print("gradient_gap =", round(float(gap), 6))
check_true("finite stochastic estimate", np.isfinite(gap))
print("Takeaway: even when the section is not stochastic, minibatch estimates affect how constant step size appears in practice.")
Demo 4: Exact Line Search
This cell checks a small numerical fact connected to exact line search.
Code cell 11
header("Demo 4: closed-form verification")
values = np.array([4.0, 5.0, 7.0])
mean_value = float(values.mean())
centered = values - mean_value
energy = float(np.dot(centered, centered))
manual = float(sum((v - mean_value) ** 2 for v in values))
print("values =", values)
print("centered_energy =", round(energy, 6))
check_close("manual equals vectorized computation", energy, manual)
print("Takeaway: small closed-form checks prevent conceptual drift when implementing exact line search.")
Demo 5: Backtracking Line Search
This cell checks a small numerical fact connected to backtracking line search.
Code cell 13
header("Demo 5: Backtracking Line Search")
x = np.linspace(-3, 3, 200)
a = 5
freq = 5
loss = 0.5 * a * x**2 + 0.1 * np.sin(freq * x)
grad = a * x + 0.1 * freq * np.cos(freq * x)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, loss, color=COLORS["primary"], label="loss")
ax.plot(x, grad, color=COLORS["secondary"], linestyle="--", label="gradient")
ax.set_title("Gradient Descent: backtracking line search diagnostic")
ax.set_xlabel("Parameter $\\theta$")
ax.set_ylabel("Value")
ax.legend(loc="best")
fig.tight_layout()
plt.show()
plt.close(fig)
check_true("finite loss curve", np.all(np.isfinite(loss)))
print("Takeaway: plotting the objective and gradient makes backtracking line search visible before training a large model.")
Demo 6: Armijo Condition
This cell checks a small numerical fact connected to Armijo condition.
Code cell 15
header("Demo 6: update-size computation")
theta = np.array([1.5, -0.5, 0.25], dtype=float)
H = np.diag([1.0, 3.0, 4.0])
grad = H @ theta
eta = 0.1 / (3)
step = -eta * grad
new_theta = theta + step
old_loss = 0.5 * theta @ H @ theta
new_loss = 0.5 * new_theta @ H @ new_theta
print("old_loss =", round(float(old_loss), 6))
print("new_loss =", round(float(new_loss), 6))
print("relative_update =", round(float(np.linalg.norm(step) / max(np.linalg.norm(theta), 1e-12)), 6))
check_true("descent on this quadratic", new_loss < old_loss)
print("Takeaway: Armijo condition should be checked through both loss change and update magnitude.")
Demo 7: Wolfe Conditions
This cell checks a small numerical fact connected to Wolfe conditions.
Code cell 17
header("Demo 7: stochastic estimate")
rng = np.random.default_rng(42 + 6)
samples = rng.normal(loc=0.0, scale=1.0, size=(256, 3))
theta = np.array([0.2, -0.4, 0.6])
full_grad = samples.T @ (samples @ theta) / len(samples)
batch = samples[:32]
batch_grad = batch.T @ (batch @ theta) / len(batch)
gap = np.linalg.norm(batch_grad - full_grad)
print("full_grad =", np.round(full_grad, 5))
print("batch_grad =", np.round(batch_grad, 5))
print("gradient_gap =", round(float(gap), 6))
check_true("finite stochastic estimate", np.isfinite(gap))
print("Takeaway: even when the section is not stochastic, minibatch estimates affect how Wolfe conditions appears in practice.")
Demo 8: Convex Convergence
This cell checks a small numerical fact connected to convex convergence.
Code cell 19
header("Demo 8: closed-form verification")
values = np.array([8.0, 9.0, 11.0])
mean_value = float(values.mean())
centered = values - mean_value
energy = float(np.dot(centered, centered))
manual = float(sum((v - mean_value) ** 2 for v in values))
print("values =", values)
print("centered_energy =", round(energy, 6))
check_close("manual equals vectorized computation", energy, manual)
print("Takeaway: small closed-form checks prevent conceptual drift when implementing convex convergence.")
Demo 9: Strongly Convex Convergence
This cell checks a small numerical fact connected to strongly convex convergence.
Code cell 21
header("Demo 9: Strongly Convex Convergence")
x = np.linspace(-3, 3, 200)
a = 4
freq = 9
loss = 0.5 * a * x**2 + 0.1 * np.sin(freq * x)
grad = a * x + 0.1 * freq * np.cos(freq * x)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, loss, color=COLORS["primary"], label="loss")
ax.plot(x, grad, color=COLORS["secondary"], linestyle="--", label="gradient")
ax.set_title("Gradient Descent: strongly convex convergence diagnostic")
ax.set_xlabel("Parameter $\\theta$")
ax.set_ylabel("Value")
ax.legend(loc="best")
fig.tight_layout()
plt.show()
plt.close(fig)
check_true("finite loss curve", np.all(np.isfinite(loss)))
print("Takeaway: plotting the objective and gradient makes strongly convex convergence visible before training a large model.")
Demo 10: Nonconvex Stationarity
This cell checks a small numerical fact connected to nonconvex stationarity.
Code cell 23
header("Demo 10: update-size computation")
theta = np.array([1.5, -0.5, 0.25], dtype=float)
H = np.diag([1.0, 3.0, 8.0])
grad = H @ theta
eta = 0.1 / (1)
step = -eta * grad
new_theta = theta + step
old_loss = 0.5 * theta @ H @ theta
new_loss = 0.5 * new_theta @ H @ new_theta
print("old_loss =", round(float(old_loss), 6))
print("new_loss =", round(float(new_loss), 6))
print("relative_update =", round(float(np.linalg.norm(step) / max(np.linalg.norm(theta), 1e-12)), 6))
check_true("descent on this quadratic", new_loss < old_loss)
print("Takeaway: nonconvex stationarity should be checked through both loss change and update magnitude.")
Demo 11: Pl Condition
This cell checks a small numerical fact connected to PL condition.
Code cell 25
header("Demo 11: stochastic estimate")
rng = np.random.default_rng(42 + 10)
samples = rng.normal(loc=0.0, scale=1.0, size=(256, 3))
theta = np.array([0.2, -0.4, 0.6])
full_grad = samples.T @ (samples @ theta) / len(samples)
batch = samples[:32]
batch_grad = batch.T @ (batch @ theta) / len(batch)
gap = np.linalg.norm(batch_grad - full_grad)
print("full_grad =", np.round(full_grad, 5))
print("batch_grad =", np.round(batch_grad, 5))
print("gradient_gap =", round(float(gap), 6))
check_true("finite stochastic estimate", np.isfinite(gap))
print("Takeaway: even when the section is not stochastic, minibatch estimates affect how PL condition appears in practice.")
Demo 12: Condition Number
This cell checks a small numerical fact connected to condition number.
Code cell 27
header("Demo 12: closed-form verification")
values = np.array([12.0, 13.0, 15.0])
mean_value = float(values.mean())
centered = values - mean_value
energy = float(np.dot(centered, centered))
manual = float(sum((v - mean_value) ** 2 for v in values))
print("values =", values)
print("centered_energy =", round(energy, 6))
check_close("manual equals vectorized computation", energy, manual)
print("Takeaway: small closed-form checks prevent conceptual drift when implementing condition number.")
Demo 13: Polyak Momentum
This cell checks a small numerical fact connected to Polyak momentum.
Code cell 29
header("Demo 13: Polyak Momentum")
x = np.linspace(-3, 3, 200)
a = 3
freq = 13
loss = 0.5 * a * x**2 + 0.1 * np.sin(freq * x)
grad = a * x + 0.1 * freq * np.cos(freq * x)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, loss, color=COLORS["primary"], label="loss")
ax.plot(x, grad, color=COLORS["secondary"], linestyle="--", label="gradient")
ax.set_title("Gradient Descent: Polyak momentum diagnostic")
ax.set_xlabel("Parameter $\\theta$")
ax.set_ylabel("Value")
ax.legend(loc="best")
fig.tight_layout()
plt.show()
plt.close(fig)
check_true("finite loss curve", np.all(np.isfinite(loss)))
print("Takeaway: plotting the objective and gradient makes Polyak momentum visible before training a large model.")
Demo 14: Nesterov Acceleration
This cell checks a small numerical fact connected to Nesterov acceleration.
Code cell 31
header("Demo 14: update-size computation")
theta = np.array([1.5, -0.5, 0.25], dtype=float)
H = np.diag([1.0, 3.0, 7.0])
grad = H @ theta
eta = 0.1 / (2)
step = -eta * grad
new_theta = theta + step
old_loss = 0.5 * theta @ H @ theta
new_loss = 0.5 * new_theta @ H @ new_theta
print("old_loss =", round(float(old_loss), 6))
print("new_loss =", round(float(new_loss), 6))
print("relative_update =", round(float(np.linalg.norm(step) / max(np.linalg.norm(theta), 1e-12)), 6))
check_true("descent on this quadratic", new_loss < old_loss)
print("Takeaway: Nesterov acceleration should be checked through both loss change and update magnitude.")
Demo 15: Gradient Flow
This cell checks a small numerical fact connected to gradient flow.
Code cell 33
header("Demo 15: stochastic estimate")
rng = np.random.default_rng(42 + 14)
samples = rng.normal(loc=0.0, scale=1.0, size=(256, 3))
theta = np.array([0.2, -0.4, 0.6])
full_grad = samples.T @ (samples @ theta) / len(samples)
batch = samples[:32]
batch_grad = batch.T @ (batch @ theta) / len(batch)
gap = np.linalg.norm(batch_grad - full_grad)
print("full_grad =", np.round(full_grad, 5))
print("batch_grad =", np.round(batch_grad, 5))
print("gradient_gap =", round(float(gap), 6))
check_true("finite stochastic estimate", np.isfinite(gap))
print("Takeaway: even when the section is not stochastic, minibatch estimates affect how gradient flow appears in practice.")
Demo 16: Oscillation
This cell checks a small numerical fact connected to oscillation.
Code cell 35
header("Demo 16: closed-form verification")
values = np.array([16.0, 17.0, 19.0])
mean_value = float(values.mean())
centered = values - mean_value
energy = float(np.dot(centered, centered))
manual = float(sum((v - mean_value) ** 2 for v in values))
print("values =", values)
print("centered_energy =", round(energy, 6))
check_close("manual equals vectorized computation", energy, manual)
print("Takeaway: small closed-form checks prevent conceptual drift when implementing oscillation.")
Demo 17: Edge Of Stability Preview
This cell checks a small numerical fact connected to edge of stability preview.
Code cell 37
header("Demo 17: Edge Of Stability Preview")
x = np.linspace(-3, 3, 200)
a = 2
freq = 17
loss = 0.5 * a * x**2 + 0.1 * np.sin(freq * x)
grad = a * x + 0.1 * freq * np.cos(freq * x)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, loss, color=COLORS["primary"], label="loss")
ax.plot(x, grad, color=COLORS["secondary"], linestyle="--", label="gradient")
ax.set_title("Gradient Descent: edge of stability preview diagnostic")
ax.set_xlabel("Parameter $\\theta$")
ax.set_ylabel("Value")
ax.legend(loc="best")
fig.tight_layout()
plt.show()
plt.close(fig)
check_true("finite loss curve", np.all(np.isfinite(loss)))
print("Takeaway: plotting the objective and gradient makes edge of stability preview visible before training a large model.")
Demo 18: Gradient Clipping Preview
This cell checks a small numerical fact connected to gradient clipping preview.
Code cell 39
header("Demo 18: update-size computation")
theta = np.array([1.5, -0.5, 0.25], dtype=float)
H = np.diag([1.0, 3.0, 6.0])
grad = H @ theta
eta = 0.1 / (3)
step = -eta * grad
new_theta = theta + step
old_loss = 0.5 * theta @ H @ theta
new_loss = 0.5 * new_theta @ H @ new_theta
print("old_loss =", round(float(old_loss), 6))
print("new_loss =", round(float(new_loss), 6))
print("relative_update =", round(float(np.linalg.norm(step) / max(np.linalg.norm(theta), 1e-12)), 6))
check_true("descent on this quadratic", new_loss < old_loss)
print("Takeaway: gradient clipping preview should be checked through both loss change and update magnitude.")
Demo 19: Linear Regression By Gd
This cell checks a small numerical fact connected to linear regression by GD.
Code cell 41
header("Demo 19: stochastic estimate")
rng = np.random.default_rng(42 + 18)
samples = rng.normal(loc=0.0, scale=1.0, size=(256, 3))
theta = np.array([0.2, -0.4, 0.6])
full_grad = samples.T @ (samples @ theta) / len(samples)
batch = samples[:32]
batch_grad = batch.T @ (batch @ theta) / len(batch)
gap = np.linalg.norm(batch_grad - full_grad)
print("full_grad =", np.round(full_grad, 5))
print("batch_grad =", np.round(batch_grad, 5))
print("gradient_gap =", round(float(gap), 6))
check_true("finite stochastic estimate", np.isfinite(gap))
print("Takeaway: even when the section is not stochastic, minibatch estimates affect how linear regression by GD appears in practice.")
Demo 20: Logistic Regression By Gd
This cell checks a small numerical fact connected to logistic regression by GD.
Code cell 43
header("Demo 20: closed-form verification")
values = np.array([20.0, 21.0, 23.0])
mean_value = float(values.mean())
centered = values - mean_value
energy = float(np.dot(centered, centered))
manual = float(sum((v - mean_value) ** 2 for v in values))
print("values =", values)
print("centered_energy =", round(energy, 6))
check_close("manual equals vectorized computation", energy, manual)
print("Takeaway: small closed-form checks prevent conceptual drift when implementing logistic regression by GD.")
Demo 21: Learning-Rate Diagnostics
This cell checks a small numerical fact connected to learning-rate diagnostics.
Code cell 45
header("Demo 21: Learning-Rate Diagnostics")
x = np.linspace(-3, 3, 200)
a = 1
freq = 21
loss = 0.5 * a * x**2 + 0.1 * np.sin(freq * x)
grad = a * x + 0.1 * freq * np.cos(freq * x)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x, loss, color=COLORS["primary"], label="loss")
ax.plot(x, grad, color=COLORS["secondary"], linestyle="--", label="gradient")
ax.set_title("Gradient Descent: learning-rate diagnostics diagnostic")
ax.set_xlabel("Parameter $\\theta$")
ax.set_ylabel("Value")
ax.legend(loc="best")
fig.tight_layout()
plt.show()
plt.close(fig)
check_true("finite loss curve", np.all(np.isfinite(loss)))
print("Takeaway: plotting the objective and gradient makes learning-rate diagnostics visible before training a large model.")
Demo 22: Optimization Loop Design
This cell checks a small numerical fact connected to optimization loop design.
Code cell 47
header("Demo 22: update-size computation")
theta = np.array([1.5, -0.5, 0.25], dtype=float)
H = np.diag([1.0, 3.0, 5.0])
grad = H @ theta
eta = 0.1 / (1)
step = -eta * grad
new_theta = theta + step
old_loss = 0.5 * theta @ H @ theta
new_loss = 0.5 * new_theta @ H @ new_theta
print("old_loss =", round(float(old_loss), 6))
print("new_loss =", round(float(new_loss), 6))
print("relative_update =", round(float(np.linalg.norm(step) / max(np.linalg.norm(theta), 1e-12)), 6))
check_true("descent on this quadratic", new_loss < old_loss)
print("Takeaway: optimization loop design should be checked through both loss change and update magnitude.")
Demo 23: Gradient Direction
This cell checks a small numerical fact connected to gradient direction.
Code cell 49
header("Demo 23: stochastic estimate")
rng = np.random.default_rng(42 + 22)
samples = rng.normal(loc=0.0, scale=1.0, size=(256, 3))
theta = np.array([0.2, -0.4, 0.6])
full_grad = samples.T @ (samples @ theta) / len(samples)
batch = samples[:32]
batch_grad = batch.T @ (batch @ theta) / len(batch)
gap = np.linalg.norm(batch_grad - full_grad)
print("full_grad =", np.round(full_grad, 5))
print("batch_grad =", np.round(batch_grad, 5))
print("gradient_gap =", round(float(gap), 6))
check_true("finite stochastic estimate", np.isfinite(gap))
print("Takeaway: even when the section is not stochastic, minibatch estimates affect how gradient direction appears in practice.")
Demo 24: Descent Lemma
This cell checks a small numerical fact connected to descent lemma.
Code cell 51
header("Demo 24: closed-form verification")
values = np.array([24.0, 25.0, 27.0])
mean_value = float(values.mean())
centered = values - mean_value
energy = float(np.dot(centered, centered))
manual = float(sum((v - mean_value) ** 2 for v in values))
print("values =", values)
print("centered_energy =", round(energy, 6))
check_close("manual equals vectorized computation", energy, manual)
print("Takeaway: small closed-form checks prevent conceptual drift when implementing descent lemma.")