Exercises Notebook
Exercises Notebook
Converted from
exercises.ipynbfor web reading.
Stochastic Optimization - Exercises
Ten graded exercises. Each exercise has a problem, scaffold, and solution cell.
Code cell 2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
try:
import seaborn as sns
sns.set_theme(style="whitegrid", palette="colorblind")
HAS_SNS = True
except ImportError:
plt.style.use("seaborn-v0_8-whitegrid")
HAS_SNS = False
mpl.rcParams.update({
"figure.figsize": (10, 6),
"figure.dpi": 120,
"font.size": 13,
"axes.titlesize": 15,
"axes.labelsize": 13,
"xtick.labelsize": 11,
"ytick.labelsize": 11,
"legend.fontsize": 11,
"legend.framealpha": 0.85,
"lines.linewidth": 2.0,
"axes.spines.top": False,
"axes.spines.right": False,
"savefig.bbox": "tight",
"savefig.dpi": 150,
})
np.random.seed(42)
print("Plot setup complete.")
Exercise 1 [*]: Population Risk
- State the relevant definition for population risk.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 4
# Your Solution
print("Exercise 1 scaffold: fill in the missing computation for population risk.")
answer = None
print("answer =", answer)
Code cell 5
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 1: Population Risk")
vector = np.array([1.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 4.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: population risk is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 2 [*]: Gradient Variance
- State the relevant definition for gradient variance.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 7
# Your Solution
print("Exercise 2 scaffold: fill in the missing computation for gradient variance.")
answer = None
print("answer =", answer)
Code cell 8
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 2: Gradient Variance")
vector = np.array([2.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 7.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: gradient variance is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 3 [*]: Batch-Size Scaling
- State the relevant definition for batch-size scaling.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 10
# Your Solution
print("Exercise 3 scaffold: fill in the missing computation for batch-size scaling.")
answer = None
print("answer =", answer)
Code cell 11
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 3: Batch-Size Scaling")
vector = np.array([3.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 12.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: batch-size scaling is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 4 [**]: Robbins-Monro Schedule
- State the relevant definition for Robbins-Monro schedule.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 13
# Your Solution
print("Exercise 4 scaffold: fill in the missing computation for Robbins-Monro schedule.")
answer = None
print("answer =", answer)
Code cell 14
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 4: Robbins-Monro Schedule")
vector = np.array([4.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 19.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: Robbins-Monro schedule is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 5 [**]: Strongly Convex Sgd
- State the relevant definition for strongly convex SGD.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 16
# Your Solution
print("Exercise 5 scaffold: fill in the missing computation for strongly convex SGD.")
answer = None
print("answer =", answer)
Code cell 17
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 5: Strongly Convex Sgd")
vector = np.array([5.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 28.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: strongly convex SGD is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 6 [**]: Gradient Noise Scale
- State the relevant definition for gradient noise scale.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 19
# Your Solution
print("Exercise 6 scaffold: fill in the missing computation for gradient noise scale.")
answer = None
print("answer =", answer)
Code cell 20
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 6: Gradient Noise Scale")
vector = np.array([6.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 39.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: gradient noise scale is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 7 [**]: Saga
- State the relevant definition for SAGA.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 22
# Your Solution
print("Exercise 7 scaffold: fill in the missing computation for SAGA.")
answer = None
print("answer =", answer)
Code cell 23
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 7: Saga")
vector = np.array([7.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 52.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: SAGA is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 8 [***]: Polyak Averaging
- State the relevant definition for Polyak averaging.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 25
# Your Solution
print("Exercise 8 scaffold: fill in the missing computation for Polyak averaging.")
answer = None
print("answer =", answer)
Code cell 26
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 8: Polyak Averaging")
vector = np.array([8.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 67.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: Polyak averaging is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 9 [***]: Gradient Accumulation
- State the relevant definition for gradient accumulation.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 28
# Your Solution
print("Exercise 9 scaffold: fill in the missing computation for gradient accumulation.")
answer = None
print("answer =", answer)
Code cell 29
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 9: Gradient Accumulation")
vector = np.array([9.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 84.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: gradient accumulation is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")
Exercise 10 [***]: Federated Averaging
- State the relevant definition for federated averaging.
- Compute the requested toy quantity.
- Explain the optimization diagnostic you would log in a real model-training run.
Code cell 31
# Your Solution
print("Exercise 10 scaffold: fill in the missing computation for federated averaging.")
answer = None
print("answer =", answer)
Code cell 32
# Solution
import numpy as np
def header(title):
print("\n" + "=" * 72)
print(title)
print("=" * 72)
def check_close(name, value, target, tol=1e-8):
ok = abs(float(value) - float(target)) <= tol
print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
if not ok:
raise AssertionError(name)
def check_true(name, condition):
ok = bool(condition)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
raise AssertionError(name)
header("Exercise 10: Federated Averaging")
vector = np.array([10.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 103.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: federated averaging is interpreted through the objective, update, or diagnostic in Stochastic Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")