Exercises NotebookMath for LLMs

Hyperparameter Optimization

Optimization / Hyperparameter Optimization

Run notebook
Exercises Notebook

Exercises Notebook

Converted from exercises.ipynb for web reading.

Hyperparameter Optimization - Exercises

Ten graded exercises. Each exercise has a problem, scaffold, and solution cell.

Code cell 2

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl

try:
    import seaborn as sns
    sns.set_theme(style="whitegrid", palette="colorblind")
    HAS_SNS = True
except ImportError:
    plt.style.use("seaborn-v0_8-whitegrid")
    HAS_SNS = False

mpl.rcParams.update({
    "figure.figsize":    (10, 6),
    "figure.dpi":         120,
    "font.size":           13,
    "axes.titlesize":      15,
    "axes.labelsize":      13,
    "xtick.labelsize":     11,
    "ytick.labelsize":     11,
    "legend.fontsize":     11,
    "legend.framealpha":   0.85,
    "lines.linewidth":      2.0,
    "axes.spines.top":     False,
    "axes.spines.right":   False,
    "savefig.bbox":       "tight",
    "savefig.dpi":         150,
})
np.random.seed(42)
print("Plot setup complete.")

Exercise 1 [*]: Log-Uniform Sampling

  1. State the relevant definition for log-uniform sampling.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 4

# Your Solution
print("Exercise 1 scaffold: fill in the missing computation for log-uniform sampling.")
answer = None
print("answer =", answer)

Code cell 5

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 1: Log-Uniform Sampling")
vector = np.array([1.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 4.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: log-uniform sampling is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 2 [*]: Random Search

  1. State the relevant definition for random search.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 7

# Your Solution
print("Exercise 2 scaffold: fill in the missing computation for random search.")
answer = None
print("answer =", answer)

Code cell 8

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 2: Random Search")
vector = np.array([2.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 7.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: random search is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 3 [*]: Surrogate Model

  1. State the relevant definition for surrogate model.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 10

# Your Solution
print("Exercise 3 scaffold: fill in the missing computation for surrogate model.")
answer = None
print("answer =", answer)

Code cell 11

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 3: Surrogate Model")
vector = np.array([3.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 12.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: surrogate model is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 4 [**]: Expected Improvement

  1. State the relevant definition for expected improvement.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 13

# Your Solution
print("Exercise 4 scaffold: fill in the missing computation for expected improvement.")
answer = None
print("answer =", answer)

Code cell 14

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 4: Expected Improvement")
vector = np.array([4.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 19.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: expected improvement is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 5 [**]: Thompson Sampling

  1. State the relevant definition for Thompson sampling.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 16

# Your Solution
print("Exercise 5 scaffold: fill in the missing computation for Thompson sampling.")
answer = None
print("answer =", answer)

Code cell 17

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 5: Thompson Sampling")
vector = np.array([5.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 28.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: Thompson sampling is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 6 [**]: Successive Halving

  1. State the relevant definition for successive halving.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 19

# Your Solution
print("Exercise 6 scaffold: fill in the missing computation for successive halving.")
answer = None
print("answer =", answer)

Code cell 20

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 6: Successive Halving")
vector = np.array([6.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 39.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: successive halving is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 7 [**]: Asha

  1. State the relevant definition for ASHA.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 22

# Your Solution
print("Exercise 7 scaffold: fill in the missing computation for ASHA.")
answer = None
print("answer =", answer)

Code cell 23

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 7: Asha")
vector = np.array([7.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 52.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: ASHA is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 8 [***]: Population-Based Training

  1. State the relevant definition for population-based training.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 25

# Your Solution
print("Exercise 8 scaffold: fill in the missing computation for population-based training.")
answer = None
print("answer =", answer)

Code cell 26

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 8: Population-Based Training")
vector = np.array([8.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 67.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: population-based training is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 9 [***]: Pareto Frontier

  1. State the relevant definition for Pareto frontier.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 28

# Your Solution
print("Exercise 9 scaffold: fill in the missing computation for Pareto frontier.")
answer = None
print("answer =", answer)

Code cell 29

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 9: Pareto Frontier")
vector = np.array([9.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 84.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: Pareto frontier is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 10 [***]: Nested Validation

  1. State the relevant definition for nested validation.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 31

# Your Solution
print("Exercise 10 scaffold: fill in the missing computation for nested validation.")
answer = None
print("answer =", answer)

Code cell 32

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 10: Nested Validation")
vector = np.array([10.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 103.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: nested validation is interpreted through the objective, update, or diagnostic in Hyperparameter Optimization.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")