Exercises NotebookMath for LLMs

Learning Rate Schedules

Optimization / Learning Rate Schedules

Run notebook
Exercises Notebook

Exercises Notebook

Converted from exercises.ipynb for web reading.

Learning Rate Schedules - Exercises

Ten graded exercises. Each exercise has a problem, scaffold, and solution cell.

Code cell 2

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl

try:
    import seaborn as sns
    sns.set_theme(style="whitegrid", palette="colorblind")
    HAS_SNS = True
except ImportError:
    plt.style.use("seaborn-v0_8-whitegrid")
    HAS_SNS = False

mpl.rcParams.update({
    "figure.figsize":    (10, 6),
    "figure.dpi":         120,
    "font.size":           13,
    "axes.titlesize":      15,
    "axes.labelsize":      13,
    "xtick.labelsize":     11,
    "ytick.labelsize":     11,
    "legend.fontsize":     11,
    "legend.framealpha":   0.85,
    "lines.linewidth":      2.0,
    "axes.spines.top":     False,
    "axes.spines.right":   False,
    "savefig.bbox":       "tight",
    "savefig.dpi":         150,
})
np.random.seed(42)
print("Plot setup complete.")

Exercise 1 [*]: Step Decay

  1. State the relevant definition for step decay.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 4

# Your Solution
print("Exercise 1 scaffold: fill in the missing computation for step decay.")
answer = None
print("answer =", answer)

Code cell 5

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 1: Step Decay")
vector = np.array([1.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 4.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: step decay is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 2 [*]: Polynomial Decay

  1. State the relevant definition for polynomial decay.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 7

# Your Solution
print("Exercise 2 scaffold: fill in the missing computation for polynomial decay.")
answer = None
print("answer =", answer)

Code cell 8

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 2: Polynomial Decay")
vector = np.array([2.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 7.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: polynomial decay is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 3 [*]: Warmup Ratio

  1. State the relevant definition for warmup ratio.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 10

# Your Solution
print("Exercise 3 scaffold: fill in the missing computation for warmup ratio.")
answer = None
print("answer =", answer)

Code cell 11

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 3: Warmup Ratio")
vector = np.array([3.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 12.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: warmup ratio is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 4 [**]: Cosine With Restarts

  1. State the relevant definition for cosine with restarts.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 13

# Your Solution
print("Exercise 4 scaffold: fill in the missing computation for cosine with restarts.")
answer = None
print("answer =", answer)

Code cell 14

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 4: Cosine With Restarts")
vector = np.array([4.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 19.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: cosine with restarts is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 5 [**]: One-Cycle Policy

  1. State the relevant definition for one-cycle policy.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 16

# Your Solution
print("Exercise 5 scaffold: fill in the missing computation for one-cycle policy.")
answer = None
print("answer =", answer)

Code cell 17

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 5: One-Cycle Policy")
vector = np.array([5.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 28.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: one-cycle policy is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 6 [**]: Inverse-Square-Root Decay

  1. State the relevant definition for inverse-square-root decay.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 19

# Your Solution
print("Exercise 6 scaffold: fill in the missing computation for inverse-square-root decay.")
answer = None
print("answer =", answer)

Code cell 20

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 6: Inverse-Square-Root Decay")
vector = np.array([6.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 39.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: inverse-square-root decay is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 7 [**]: Cooldown

  1. State the relevant definition for cooldown.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 22

# Your Solution
print("Exercise 7 scaffold: fill in the missing computation for cooldown.")
answer = None
print("answer =", answer)

Code cell 23

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 7: Cooldown")
vector = np.array([7.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 52.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: cooldown is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 8 [***]: Batch-Size Scaling

  1. State the relevant definition for batch-size scaling.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 25

# Your Solution
print("Exercise 8 scaffold: fill in the missing computation for batch-size scaling.")
answer = None
print("answer =", answer)

Code cell 26

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 8: Batch-Size Scaling")
vector = np.array([8.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 67.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: batch-size scaling is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 9 [***]: Token-Budget Scheduling

  1. State the relevant definition for token-budget scheduling.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 28

# Your Solution
print("Exercise 9 scaffold: fill in the missing computation for token-budget scheduling.")
answer = None
print("answer =", answer)

Code cell 29

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 9: Token-Budget Scheduling")
vector = np.array([9.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 84.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: token-budget scheduling is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")

Exercise 10 [***]: Llm Pretraining Schedule Design

  1. State the relevant definition for LLM pretraining schedule design.
  2. Compute the requested toy quantity.
  3. Explain the optimization diagnostic you would log in a real model-training run.

Code cell 31

# Your Solution
print("Exercise 10 scaffold: fill in the missing computation for LLM pretraining schedule design.")
answer = None
print("answer =", answer)

Code cell 32

# Solution
import numpy as np

def header(title):
    print("\n" + "=" * 72)
    print(title)
    print("=" * 72)

def check_close(name, value, target, tol=1e-8):
    ok = abs(float(value) - float(target)) <= tol
    print(f"{'PASS' if ok else 'FAIL'} - {name}: value={value:.8f}, target={target:.8f}")
    if not ok:
        raise AssertionError(name)

def check_true(name, condition):
    ok = bool(condition)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        raise AssertionError(name)

header("Exercise 10: Llm Pretraining Schedule Design")
vector = np.array([10.0, 1.0, -1.0])
answer = float(vector[0] ** 2 + 3.0)
check_close("toy scalar computation", answer, 103.0)
check_true("finite answer", np.isfinite(answer))
print("Definition anchor: LLM pretraining schedule design is interpreted through the objective, update, or diagnostic in Learning Rate Schedules.")
print("\nTakeaway: a tiny verified computation is the fastest way to test intuition before scaling an optimizer experiment.")