Exercises NotebookMath for LLMs

Numerical Integration

Numerical Methods / Numerical Integration

Run notebook
Exercises Notebook

Exercises Notebook

Converted from exercises.ipynb for web reading.

Numerical Integration — Exercises

The only way to learn mathematics is to do mathematics.

Companion: notes.md | theory.ipynb

Difficulty: ★ = Straightforward | ★★ = Moderate | ★★★ = Challenging

Code cell 2

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl

try:
    import seaborn as sns
    sns.set_theme(style="whitegrid", palette="colorblind")
    HAS_SNS = True
except ImportError:
    plt.style.use("seaborn-v0_8-whitegrid")
    HAS_SNS = False

mpl.rcParams.update({
    "figure.figsize":    (10, 6),
    "figure.dpi":         120,
    "font.size":           13,
    "axes.titlesize":      15,
    "axes.labelsize":      13,
    "xtick.labelsize":     11,
    "ytick.labelsize":     11,
    "legend.fontsize":     11,
    "legend.framealpha":   0.85,
    "lines.linewidth":      2.0,
    "axes.spines.top":     False,
    "axes.spines.right":   False,
    "savefig.bbox":       "tight",
    "savefig.dpi":         150,
})
np.random.seed(42)
print("Plot setup complete.")

Code cell 3

import numpy as np
import numpy.linalg as nla
from scipy import stats
import scipy.linalg as la
from scipy import integrate

try:
    import matplotlib.pyplot as plt
    import matplotlib as mpl
    plt.style.use('seaborn-v0_8-whitegrid')
    mpl.rcParams.update({'figure.figsize': (10,6), 'figure.dpi':120,
                         'axes.spines.top':False, 'axes.spines.right':False})
    HAS_MPL = True
except ImportError:
    HAS_MPL = False

COLORS = {'primary':'#0077BB','secondary':'#EE7733','error':'#CC3311','neutral':'#555555'}
np.set_printoptions(precision=8, suppress=True)
np.random.seed(42)
print('Setup complete.')

print("Chapter helper setup complete.")

def header(title):
    print("\n" + "=" * len(title))
    print(title)
    print("=" * len(title))

def check_true(name, cond):
    print(f"{'PASS' if cond else 'FAIL'} - {name}")
    return bool(cond)

def check_close(name, got, expected, tol=1e-8):
    ok = np.allclose(got, expected, atol=tol, rtol=tol)
    print(f"{'PASS' if ok else 'FAIL'} - {name}")
    if not ok:
        print('  expected:', expected)
        print('  got     :', got)
    return bool(ok)

print("Chapter helper setup complete.")

Exercise 1 ★ — Newton-Cotes Rules

(a) Implement composite_midpoint(f, a, b, m), composite_trap(f, a, b, m), and composite_simpson(f, a, b, m) from scratch.

(b) Test all three on 01exdx=e1\int_0^1 e^x dx = e - 1 for m=10,100,1000m = 10, 100, 1000. Print the errors and verify O(h2)O(h^2) for midpoint/trapezoidal and O(h4)O(h^4) for Simpson's.

(c) Show that the midpoint rule has half the error of the trapezoidal rule (same order, smaller constant). Use a convex function like f(x)=exf(x) = e^x.

Code cell 5

# Your Solution
# Exercise 1 — Scaffold

def composite_midpoint(f, a, b, m):
    pass

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 6

# Solution
def midpt_sol(f, a, b, m):
    h = (b-a)/m
    x = a + h*(np.arange(m)+0.5)
    return h * f(x).sum()

def trap_sol(f, a, b, m):
    h = (b-a)/m
    x = np.linspace(a,b,m+1)
    y = f(x)
    return h*(y[0]/2 + y[1:-1].sum() + y[-1]/2)

def simp_sol(f, a, b, m):
    assert m%2==0
    h = (b-a)/m
    x = np.linspace(a,b,m+1); y = f(x)
    return h/3*(y[0]+4*y[1:-1:2].sum()+2*y[2:-2:2].sum()+y[-1])

exact = np.e - 1
print(f'{'m':>6}  {'Midpt err':>12}  {'Trap err':>12}  {'Simp err':>12}')
for m in [10,100,1000]:
    em = abs(midpt_sol(np.exp,0,1,m)-exact)
    et = abs(trap_sol(np.exp,0,1,m)-exact)
    es = abs(simp_sol(np.exp,0,1,m)-exact)
    print(f'{m:>6}  {em:>12.4e}  {et:>12.4e}  {es:>12.4e}')

# (c) Midpoint vs trapezoidal ratio
ratio = abs(midpt_sol(np.exp,0,1,100)-exact)/abs(trap_sol(np.exp,0,1,100)-exact)
print(f'\nMidpt/Trap error ratio: {ratio:.4f} (expected ~0.5 for convex f)')
ok = ratio < 0.55
print(f'PASS: midpoint has smaller constant' if ok else 'FAIL')

Exercise 2 ★ — Romberg Integration

(a) Implement romberg(f, a, b, max_k) building the triangular table Rk,jR_{k,j}.

(b) Apply it to 01sin(x2)dx\int_0^1 \sin(x^2) dx and compare the convergence of the diagonal Rk,kR_{k,k} to the composite trapezoidal at the same number of evaluations.

(c) Show super-geometric convergence: for f=exf = e^x, the number of correct digits approximately doubles each Romberg level.

Code cell 8

# Your Solution
# Exercise 2 — Scaffold

def romberg(f, a, b, max_k=8, tol=1e-12):
    """
    Romberg's method.
    Returns: (result, levels_used)
    """

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 9

# Solution
def romberg_sol(f, a, b, max_k=8, tol=1e-12):
    R = np.zeros((max_k, max_k))
    h = b-a
    R[0,0] = h/2*(f(a)+f(b))
    for k in range(1, max_k):
        h /= 2
        x_new = a+h*(2*np.arange(1,2**(k-1)+1)-1)
        R[k,0] = R[k-1,0]/2 + h*f(x_new).sum()
        for j in range(1, k+1):
            R[k,j] = (4**j*R[k,j-1]-R[k-1,j-1])/(4**j-1)
        if abs(R[k,k]-R[k-1,k-1]) < tol:
            return R[k,k], k
    return R[max_k-1,max_k-1], max_k

# (b)
exact_s, _ = integrate.quad(lambda x: np.sin(x**2), 0, 1)
result_r, levels = romberg_sol(lambda x: np.sin(x**2), 0, 1)
print(f'Romberg result: {result_r:.10f}, error: {abs(result_r-exact_s):.2e}, levels: {levels}')
print(f'Trap (same evals={2**levels+1}): error: {abs(trap_sol(lambda x:np.sin(x**2),0,1,2**levels)-exact_s):.2e}')

# (c)
print('\nRomberg convergence for exp:')
exact_e = np.e-1
R2 = np.zeros((8,8)); h2 = 1.0; R2[0,0] = 0.5*(np.exp(0)+np.exp(1))
for k in range(1,8):
    h2/=2; x_n=h2*(2*np.arange(1,2**(k-1)+1)-1)
    R2[k,0]=R2[k-1,0]/2+h2*np.exp(x_n).sum()
    for j in range(1,k+1): R2[k,j]=(4**j*R2[k,j-1]-R2[k-1,j-1])/(4**j-1)
    err = abs(R2[k,k]-exact_e)
    digits = -np.log10(err) if err > 0 else 16
    print(f'  k={k}: error={err:.2e}, digits={digits:.1f}')

Exercise 3 ★ — Gauss-Legendre Quadrature

(a) Implement gauss_legendre(n) using the Golub-Welsch algorithm (eigendecomposition of the Jacobi tridiagonal matrix).

(b) Verify: the nn-point GL rule integrates polynomials xkx^k exactly for k2n1k \leq 2n-1 but fails for k=2nk = 2n.

(c) Compare GL-5 (5 evaluations) with composite Simpson's (101 evaluations) and composite trapezoidal (1001 evaluations) for 01exdx\int_0^1 e^x dx.

Code cell 11

# Your Solution
# Exercise 3 — Scaffold

def gauss_legendre(n):
    """
    Returns (nodes, weights) for n-point GL rule on [-1,1].
    Use np.linalg.eigh on the Jacobi tridiagonal matrix.
    """

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 12

# Solution
def gl_sol(n):
    k = np.arange(1,n); beta = k/np.sqrt(4*k**2-1)
    J = np.diag(beta,-1)+np.diag(beta,1)
    ev, evec = np.linalg.eigh(J)
    idx = np.argsort(ev)
    return ev[idx], 2*evec[0,idx]**2

def gl_integrate_sol(f, a, b, n):
    x, w = gl_sol(n)
    xm = (a+b)/2 + (b-a)/2*x
    return (b-a)/2 * np.dot(w, f(xm))

# (b)
n = 4
print(f'GL-{n} degree of exactness (exact for deg ≤ {2*n-1}):')
for deg in range(0, 2*n+2):
    f_p = lambda x,d=deg: x**d
    exact_v = (1-(-1)**(deg+1))/(deg+1)
    gl_v = gl_integrate_sol(f_p, -1, 1, n)
    ok_str = 'exact' if abs(gl_v-exact_v)<1e-10 else 'FAILS'
    print(f'  deg {deg}: {ok_str} (error={abs(gl_v-exact_v):.2e})')

# (c)
exact = np.e-1
print(f'\nEfficiency comparison:')
print(f'  GL-5 (5 evals):        error = {abs(gl_integrate_sol(np.exp,0,1,5)-exact):.2e}')
print(f'  Trap m=100 (101 evals): error = {abs(trap_sol(np.exp,0,1,100)-exact):.2e}')
print(f'  Trap m=1000 (1001 evals): error = {abs(trap_sol(np.exp,0,1,1000)-exact):.2e}')

Exercise 4 ★★ — Adaptive Quadrature

(a) Implement adaptive_simpson(f, a, b, tol) using recursive bisection: estimate the error by comparing 1-panel Simpson's vs 2-panel Simpson's.

(b) Test on 01xdx=2/3\int_0^1 \sqrt{x} dx = 2/3 (smooth but derivative singular at 0) and 0πsin(50x)dx=0\int_0^\pi \sin(50x) dx = 0 (highly oscillatory).

(c) Use scipy.integrate.quad with full_output=True to inspect how many subintervals it uses for each integrand. Compare to your implementation.

Code cell 14

# Your Solution
# Exercise 4 — Scaffold

def adaptive_simpson(f, a, b, tol, depth=0, max_depth=50):
    """
    Adaptive Simpson's rule via recursive bisection.
    Error estimate: |S(a,b,2) - S(a,b,1)| / 15
    (where S(a,b,2) uses 2 panels)
    """
    def s1(a, b):
        """1-panel Simpson (3 evaluations)."""
        m = (a+b)/2
        return (b-a)/6*(f(a)+4*f(m)+f(b))

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 15

# Solution
def adaptive_simp_sol(f, a, b, tol, depth=0, max_depth=50):
    m = (a+b)/2
    def s(l, r):
        c = (l+r)/2
        return (r-l)/6*(f(l)+4*f(c)+f(r))
    S_whole = s(a, b)
    S_left = s(a, m); S_right = s(m, b)
    err_est = abs(S_left+S_right-S_whole)/15
    if err_est < tol or depth >= max_depth:
        return S_left+S_right
    return (adaptive_simp_sol(f, a, m, tol/2, depth+1) +
            adaptive_simp_sol(f, m, b, tol/2, depth+1))

# (b)
f1 = lambda x: np.sqrt(np.maximum(x, 1e-300))  # sqrt(x), avoid x=0
r1 = adaptive_simp_sol(f1, 1e-10, 1, tol=1e-8)  # Avoid exact 0
print(f'sqrt(x) from ~0 to 1: {r1:.8f}, exact: {2/3:.8f}, err: {abs(r1-2/3):.2e}')

f2 = lambda x: np.sin(50*x)
r2 = adaptive_simp_sol(f2, 0, np.pi, tol=1e-8)
print(f'sin(50x) from 0 to pi: {r2:.8f}, exact: 0, err: {abs(r2):.2e}')

# (c) scipy comparison
r_scipy, err, info = integrate.quad(f2, 0, np.pi, full_output=True)
print(f'scipy.quad: result={r_scipy:.2e}, neval={info["neval"]}')

Exercise 5 ★★ — Monte Carlo Integration

(a) Estimate [0,1]2x2+y2dxdy\int_{[0,1]^2} \sqrt{x^2+y^2}\, dx\, dy using plain Monte Carlo for n=103,104,105n = 10^3, 10^4, 10^5. Show O(n1/2)O(n^{-1/2}) convergence and compute 95% confidence intervals.

(b) Use importance sampling to estimate 0xex2/2dx=1\int_0^\infty x e^{-x^2/2} dx = 1 (which equals 1). Compare variance when sampling from Uniform(0,5)\text{Uniform}(0, 5) vs from q(x)ex/2q(x) \propto e^{-x/2} (exponential proposal).

(c) Implement antithetic variates for the 2D integral and measure the variance reduction factor compared to plain MC.

Code cell 17

# Your Solution
# Exercise 5 — Scaffold

np.random.seed(42)

# Exact value via scipy
exact_2d, _ = integrate.dblquad(lambda y,x: np.sqrt(x**2+y**2), 0,1, 0,1)
print(f'Exact 2D integral: {exact_2d:.8f}')

# (a) Plain MC

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 18

# Solution
np.random.seed(42)
exact_2d = 0.7651635  # sqrt(x^2+y^2) on [0,1]^2
f_2d = lambda X: np.sqrt(X[:,0]**2+X[:,1]**2)

# (a)
print('Plain MC for sqrt(x^2+y^2) on [0,1]^2:')
for n in [1000, 10000, 100000]:
    X = np.random.rand(n, 2)
    fvals = f_2d(X)
    est = fvals.mean()
    se = fvals.std()/np.sqrt(n)
    ci = 1.96*se
    print(f'  n={n:>7}: est={est:.5f} ± {ci:.5f} (95% CI), err={abs(est-exact_2d):.4f}')

# (b) Importance sampling for int x*exp(-x^2/2) dx = 1
exact_is = 1.0
f_is = lambda x: x * np.exp(-x**2/2)

# Uniform(0,5): E_uniform[f(X) * 5] -- IS weight = 5
u_unif = np.random.uniform(0, 5, 100000)
est_unif = (f_is(u_unif) * 5).mean()  # multiply by interval length
var_unif = (f_is(u_unif)*5).var()

# Exponential(0.5): q(x)=0.5*exp(-x/2), IS weight = f/q = x*e^(-x^2/2)/(0.5*e^(-x/2))
u_exp = np.random.exponential(2.0, 100000)  # rate=0.5 -> mean=2
w_exp = f_is(u_exp) / (0.5*np.exp(-u_exp/2))  # likelihood ratio
est_exp = w_exp.mean()
var_exp = w_exp.var()

print(f'\nImportance sampling for E[x*exp(-x^2/2)]:')
print(f'  Uniform(0,5):    est={est_unif:.4f}, variance={var_unif:.4f}')
print(f'  Exponential:     est={est_exp:.4f}, variance={var_exp:.4f}')
print(f'  Variance reduction: {var_unif/max(var_exp,1e-10):.1f}x')

# (c) Antithetic
n_anti = 50000
u1 = np.random.rand(n_anti, 2)
u2 = 1 - u1  # Antithetic
anti_vals = (f_2d(u1)+f_2d(u2))/2
plain_vals = f_2d(np.random.rand(2*n_anti, 2))
print(f'\nAntithetic variates:')
print(f'  Plain SE:     {plain_vals.std()/np.sqrt(2*n_anti):.6f}')
print(f'  Antithetic SE: {anti_vals.std()/np.sqrt(n_anti):.6f}')
print(f'  Variance reduction: {(plain_vals.std()/anti_vals.std())**2:.2f}x')

Exercise 6 ★★ — Euler-Maclaurin and Spectral Accuracy

(a) Numerically verify the Euler-Maclaurin formula for 01x4dx=1/5\int_0^1 x^4 dx = 1/5: show that the trapezoidal error at m=10m=10 matches h212[f(1)f(0)]+O(h4)\frac{h^2}{12}[f'(1)-f'(0)] + O(h^4).

(b) Demonstrate spectral accuracy for periodic functions: for f(x)=esin(2πx)f(x) = e^{\sin(2\pi x)} on [0,1][0,1], show the trapezoidal rule achieves <1010< 10^{-10} error with just 20 points.

(c) Show why the DFT is exact: the trapezoidal rule applied to e2πikxe^{2\pi ikx} on [0,1][0,1] returns the exact Fourier coefficient (=1=1 if k=0k=0, =0=0 otherwise) for any m>km > |k|.

Code cell 20

# Your Solution
# Exercise 6 — Scaffold

# (a) Euler-Maclaurin for x^4
f_em = lambda x: x**4
f_em_prime = lambda x: 4*x**3
exact_em = 1/5

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 21

# Solution
# (a) Euler-Maclaurin
m = 10; h = 1/m
f_em = lambda x: x**4; exact_em = 1/5
trap_val = trap_sol(f_em, 0, 1, m)
trap_err = trap_val - exact_em
em_leading = h**2/12*(4*1**3 - 4*0**3)  # h^2/12 * (f'(1)-f'(0))
print(f'Trapezoidal error: {trap_err:.8e}')
print(f'EM leading term:   {em_leading:.8e}')
print(f'Ratio error/EM:    {trap_err/em_leading:.4f} (should be ~1)')

# (b) Periodic
f_per = lambda x: np.exp(np.sin(2*np.pi*x))
exact_per, _ = integrate.quad(f_per, 0, 1)
for m_p in [5, 10, 20, 30]:
    err_p = abs(trap_sol(f_per, 0, 1, m_p) - exact_per)
    print(f'm={m_p:>3}: trap error = {err_p:.2e}')

# (c) DFT exactness
k = 3; m_dft = 10
x = np.linspace(0, 1, m_dft, endpoint=False)
h_dft = 1/m_dft
integrand = np.exp(2j*np.pi*k*x)
trap_complex = h_dft * integrand.sum()  # Trapezoidal = midpoint for periodic
print(f'\nDFT: integral of e^(2pi*i*{k}*x) on [0,1]:')
print(f'  Trapezoidal result: {trap_complex:.2e} (should be 0 for k≠0)')
print(f'  |result|: {abs(trap_complex):.2e}')
ok = abs(trap_complex) < 1e-14
print(f'  PASS: trapezoidal gives exact DFT' if ok else f'  FAIL: |result|={abs(trap_complex):.2e}')

Exercise 7 ★★ — Gauss-Hermite and the ELBO

(a) Implement gauss_hermite_expect(f, mu, sigma, n) that computes ExN(μ,σ2)[f(x)]\mathbb{E}_{x \sim \mathcal{N}(\mu, \sigma^2)}[f(x)] via nn-point Gauss-Hermite quadrature.

(b) Compute the KL divergence KL(N(μ,σ2)N(0,1))\text{KL}(\mathcal{N}(\mu, \sigma^2) \| \mathcal{N}(0,1)) using GH quadrature and verify against the analytic formula 12(μ2+σ21logσ2)\frac{1}{2}(\mu^2 + \sigma^2 - 1 - \log \sigma^2).

(c) Compute the reconstruction term Eq[logp(xz)]\mathbb{E}_q[\log p(x|z)] for a Gaussian likelihood: logp(xz)=12(xz)212log(2π)\log p(x|z) = -\frac{1}{2}(x-z)^2 - \frac{1}{2}\log(2\pi). Compare GH-20 accuracy vs 1-sample and 10-sample Monte Carlo.

Code cell 23

# Your Solution
# Exercise 7 — Scaffold

def gauss_hermite_expect(f, mu, sigma, n=20):
    """
    E_{x~N(mu,sigma^2)}[f(x)] via n-point GH quadrature.
    Nodes/weights from np.polynomial.hermite.hermgauss(n).
    Transform: x = sqrt(2)*sigma*t + mu.
    """

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 24

# Solution
def gh_expect_sol(f, mu, sigma, n=20):
    t, w = np.polynomial.hermite.hermgauss(n)
    return np.dot(w, f(np.sqrt(2)*sigma*t+mu)) / np.sqrt(np.pi)

# (b) KL divergence
mu, sigma = 1.5, 0.5
log_ratio = lambda z: (-0.5*(z-mu)**2/sigma**2 + 0.5*z**2 + np.log(sigma))
kl_gh = gh_expect_sol(log_ratio, mu, sigma, n=20)
kl_analytic = 0.5*(mu**2+sigma**2-1-2*np.log(sigma))
print(f'KL(N({mu},{sigma}^2)||N(0,1)):')
print(f'  GH-20:    {kl_gh:.8f}')
print(f'  Analytic: {kl_analytic:.8f}')
print(f'  Error:    {abs(kl_gh-kl_analytic):.2e}')

# (c) Reconstruction
x_obs = 2.0
log_p = lambda z: -0.5*(x_obs-z)**2 - 0.5*np.log(2*np.pi)
recon_gh = gh_expect_sol(log_p, mu, sigma, n=20)
np.random.seed(42)
recon_mc1 = log_p(np.random.normal(mu, sigma, 1)).mean()
recon_mc10 = log_p(np.random.normal(mu, sigma, 10)).mean()
print(f'\nE_q[log p(x|z)] for x={x_obs}, q=N({mu},{sigma}^2):')
print(f'  GH-20 (reference):    {recon_gh:.8f}')
print(f'  MC-1 sample:          {recon_mc1:.4f}')
print(f'  MC-10 samples:        {recon_mc10:.4f}')

Exercise 8 ★★★ — Quasi-Monte Carlo and Dimension Scaling

(a) Implement a simple Halton sequence generator for dd dimensions using prime bases p1,p2,,pdp_1, p_2, \ldots, p_d (2, 3, 5, 7, 11, ...):

hn(p)=kakpk1h_n^{(p)} = \sum_{k} a_k p^{-k-1}

where (a0,a1,a2,)(a_0, a_1, a_2, \ldots) are digits of nn in base pp.

(b) Compare plain MC vs Halton QMC for I=[0,1]di=1dsin(πxi)=(2/π)dI = \int_{[0,1]^d} \prod_{i=1}^d \sin(\pi x_i) = (2/\pi)^d for d=2,5,10d = 2, 5, 10, n=1000n = 1000 samples. Show QMC wins for small dd.

(c) For d=2d = 2, plot MC and QMC points and the 2D integrand. Observe how QMC fills the space more uniformly.

Code cell 26

# Your Solution
# Exercise 8 — Scaffold

def halton(n, d):
    """
    Generate n Halton sequence points in d dimensions.
    Uses the first d primes as bases.
    Returns: array of shape (n, d) in [0,1]^d
    """
    primes = [2,3,5,7,11,13,17,19,23,29]
    assert d <= len(primes), f'Need d <= {len(primes)}'
    X = np.zeros((n, d))
    for j, p in enumerate(primes[:d]):
        pass
    return X

print("Learner scaffold loaded; write your solution here, then compare below.")

Code cell 27

# Solution
def halton_sol(n, d):
    primes = [2,3,5,7,11,13,17,19,23,29]
    X = np.zeros((n, d))
    for j, p in enumerate(primes[:d]):
        for k in range(n):
            f_val, r, q = 1.0, 0.0, k
            while q > 0:
                f_val /= p
                r += f_val * (q % p)
                q //= p
            X[k, j] = r
    return X

# (b)
f_prod = lambda X: np.prod(np.sin(np.pi*X), axis=1)
np.random.seed(42)
print('MC vs Halton QMC:')
print(f'  {'d':>3}  {'Exact':>8}  {'MC err':>10}  {'QMC err':>10}')
for d in [2, 5, 10]:
    exact_d = (2/np.pi)**d
    n = 1000
    X_mc = np.random.rand(n, d)
    X_qmc = halton_sol(n, d)
    err_mc = abs(f_prod(X_mc).mean()-exact_d)
    err_qmc = abs(f_prod(X_qmc).mean()-exact_d)
    print(f'  {d:>3}  {exact_d:>8.5f}  {err_mc:>10.4e}  {err_qmc:>10.4e}')

# (c) 2D plot
if HAS_MPL:
    fig, axes = plt.subplots(1, 2, figsize=(12, 5))
    n_plot = 200
    X_mc2 = np.random.rand(n_plot, 2)
    X_qmc2 = halton_sol(n_plot, 2)
    for ax, X, title in [(axes[0], X_mc2, 'MC (random)'), (axes[1], X_qmc2, 'Halton QMC')]:
        ax.scatter(X[:,0], X[:,1], s=10, alpha=0.5, color='#0077BB')
        ax.set_xlim(0,1); ax.set_ylim(0,1)
        ax.set_title(f'{title} ({n_plot} points)')
        ax.set_xlabel('x1'); ax.set_ylabel('x2')
    plt.tight_layout(); plt.show()
    print('Plot displayed: observe QMC fills space more uniformly')

Exercise 9 *** - Importance Sampling for Tail Expectations

Estimate P(Z>3)P(Z>3) for ZN(0,1)Z\sim\mathcal{N}(0,1) using naive Monte Carlo and shifted-proposal importance sampling.

Code cell 29

# Your Solution
print("Write your solution here, then compare with the reference solution below.")

Code cell 30

# Solution
from scipy import stats
n=50000
z=np.random.randn(n); naive=(z>3).mean()
y=np.random.normal(3,1,n)
w=stats.norm.pdf(y,0,1)/stats.norm.pdf(y,3,1)
is_est=np.mean((y>3)*w)
truth=stats.norm.sf(3)
header('Exercise 9: Importance Sampling')
print(f'truth={truth:.6f}, naive={naive:.6f}, IS={is_est:.6f}')
check_true('importance estimate is close to truth', abs(is_est-truth)<5e-4)
print('Takeaway: changing the sampling distribution can make rare-event integrals tractable.')

Exercise 10 *** - Antithetic Variates

Estimate E[eU]E[e^U] for UUniform(0,1)U\sim\operatorname{Uniform}(0,1) with standard Monte Carlo and antithetic pairs.

Code cell 32

# Your Solution
print("Write your solution here, then compare with the reference solution below.")

Code cell 33

# Solution
n=50000
u=np.random.rand(n); est=np.exp(u).mean()
v=np.random.rand(n//2); anti=0.5*(np.exp(v)+np.exp(1-v)); est_a=anti.mean()
truth=np.e-1
header('Exercise 10: Antithetic Variates')
print(f'truth={truth:.6f}, MC={est:.6f}, antithetic={est_a:.6f}')
print(f'var MC={np.var(np.exp(u))/n:.3e}, var anti={np.var(anti)/(n//2):.3e}')
check_true('antithetic variance is lower', np.var(anti)/(n//2) < np.var(np.exp(u))/n)
print('Takeaway: variance reduction improves integration without changing the target integral.')