05_monte_carlo.py

Download
python 467 lines 13.6 KB
  1"""
  2๋ชฌํ…Œ์นด๋ฅผ๋กœ ์‹œ๋ฎฌ๋ ˆ์ด์…˜ (Monte Carlo Simulation)
  3Monte Carlo Methods
  4
  5ํ™•๋ฅ ์  ๋ฐฉ๋ฒ•์„ ์‚ฌ์šฉํ•œ ์ˆ˜์น˜ ๊ณ„์‚ฐ ๋ฐ ์‹œ๋ฎฌ๋ ˆ์ด์…˜์ž…๋‹ˆ๋‹ค.
  6"""
  7
  8import numpy as np
  9import matplotlib.pyplot as plt
 10from typing import Callable, Tuple
 11
 12
 13# =============================================================================
 14# 1. ฯ€ ์ถ”์ • (์›์˜ ๋„“์ด)
 15# =============================================================================
 16def estimate_pi(n_samples: int) -> Tuple[float, float]:
 17    """
 18    ๋‹จ์œ„ ์›์„ ์‚ฌ์šฉํ•œ ฯ€ ์ถ”์ •
 19
 20    ์ •์‚ฌ๊ฐํ˜• [-1, 1] x [-1, 1] ๋‚ด์— ๋ฌด์ž‘์œ„ ์ ์„ ๋ฟŒ๋ฆฌ๊ณ 
 21    ๋‹จ์œ„ ์› ๋‚ด๋ถ€์— ๋–จ์–ด์ง€๋Š” ๋น„์œจ์„ ๊ณ„์‚ฐ
 22
 23    ์›์˜ ๋„“์ด / ์ •์‚ฌ๊ฐํ˜• ๋„“์ด = ฯ€ / 4
 24    โ†’ ฯ€ = 4 * (์› ๋‚ด๋ถ€ ์  / ์ „์ฒด ์ )
 25
 26    ์˜ค์ฐจ: O(1/โˆšn)
 27    """
 28    # ๊ท ์ผ ๋ถ„ํฌ๋กœ ์  ์ƒ์„ฑ
 29    x = np.random.uniform(-1, 1, n_samples)
 30    y = np.random.uniform(-1, 1, n_samples)
 31
 32    # ์› ๋‚ด๋ถ€์— ์žˆ๋Š” ์  ๊ฐœ์ˆ˜
 33    inside = np.sum(x**2 + y**2 <= 1)
 34
 35    pi_estimate = 4 * inside / n_samples
 36    std_error = 4 * np.sqrt(inside * (n_samples - inside) / n_samples**3)
 37
 38    return pi_estimate, std_error
 39
 40
 41def estimate_pi_convergence(max_samples: int = 100000) -> Tuple[np.ndarray, np.ndarray]:
 42    """ฯ€ ์ถ”์ •์˜ ์ˆ˜๋ ด ๊ณผ์ •"""
 43    x = np.random.uniform(-1, 1, max_samples)
 44    y = np.random.uniform(-1, 1, max_samples)
 45    inside = (x**2 + y**2 <= 1).cumsum()
 46    n = np.arange(1, max_samples + 1)
 47    return n, 4 * inside / n
 48
 49
 50# =============================================================================
 51# 2. ๋ชฌํ…Œ์นด๋ฅผ๋กœ ์ ๋ถ„
 52# =============================================================================
 53def monte_carlo_integrate(
 54    f: Callable[[np.ndarray], np.ndarray],
 55    a: float,
 56    b: float,
 57    n_samples: int
 58) -> Tuple[float, float]:
 59    """
 60    1D ๋ชฌํ…Œ์นด๋ฅผ๋กœ ์ ๋ถ„
 61
 62    โˆซ[a,b] f(x)dx โ‰ˆ (b-a) * (1/n) * ฮฃf(x_i)
 63
 64    Args:
 65        f: ํ”ผ์ ๋ถ„ ํ•จ์ˆ˜
 66        a, b: ์ ๋ถ„ ๊ตฌ๊ฐ„
 67        n_samples: ์ƒ˜ํ”Œ ์ˆ˜
 68
 69    Returns:
 70        (์ ๋ถ„๊ฐ’ ์ถ”์ •, ํ‘œ์ค€ ์˜ค์ฐจ)
 71    """
 72    x = np.random.uniform(a, b, n_samples)
 73    fx = f(x)
 74
 75    integral = (b - a) * np.mean(fx)
 76    variance = np.var(fx)
 77    std_error = (b - a) * np.sqrt(variance / n_samples)
 78
 79    return integral, std_error
 80
 81
 82def monte_carlo_integrate_nd(
 83    f: Callable[[np.ndarray], float],
 84    bounds: list,
 85    n_samples: int
 86) -> Tuple[float, float]:
 87    """
 88    ๋‹ค์ฐจ์› ๋ชฌํ…Œ์นด๋ฅผ๋กœ ์ ๋ถ„
 89
 90    Args:
 91        f: ๋‹ค๋ณ€์ˆ˜ ํ•จ์ˆ˜ f(x) where x is array
 92        bounds: [(a1,b1), (a2,b2), ...] ๊ฐ ์ฐจ์›์˜ ๋ฒ”์œ„
 93        n_samples: ์ƒ˜ํ”Œ ์ˆ˜
 94    """
 95    dim = len(bounds)
 96    volume = np.prod([b - a for a, b in bounds])
 97
 98    # ๊ฐ ์ฐจ์›์—์„œ ๊ท ์ผ ์ƒ˜ํ”Œ๋ง
 99    samples = np.array([
100        np.random.uniform(a, b, n_samples)
101        for a, b in bounds
102    ]).T  # shape: (n_samples, dim)
103
104    values = np.array([f(x) for x in samples])
105
106    integral = volume * np.mean(values)
107    std_error = volume * np.std(values) / np.sqrt(n_samples)
108
109    return integral, std_error
110
111
112# =============================================================================
113# 3. ์ค‘์š”๋„ ์ƒ˜ํ”Œ๋ง (Importance Sampling)
114# =============================================================================
115def importance_sampling(
116    f: Callable[[np.ndarray], np.ndarray],
117    g: Callable[[np.ndarray], np.ndarray],
118    sample_g: Callable[[int], np.ndarray],
119    n_samples: int
120) -> Tuple[float, float]:
121    """
122    ์ค‘์š”๋„ ์ƒ˜ํ”Œ๋ง
123
124    ๋ชฉํ‘œ: โˆซf(x)dx ๋ฅผ ๋” ํšจ์œจ์ ์œผ๋กœ ์ถ”์ •
125
126    g(x)๋ฅผ ์ค‘์š”๋„ ๋ถ„ํฌ๋กœ ์‚ฌ์šฉ:
127    โˆซf(x)dx = โˆซ(f(x)/g(x))g(x)dx = E_g[f(x)/g(x)]
128
129    ๋ถ„์‚ฐ ๊ฐ์†Œ: f(x)์™€ ๋น„์Šทํ•œ g(x) ์„ ํƒ
130
131    Args:
132        f: ํ”ผ์ ๋ถ„ ํ•จ์ˆ˜ * ์›๋ž˜ ๋ถ„ํฌ
133        g: ์ค‘์š”๋„ ๋ถ„ํฌ์˜ PDF
134        sample_g: g์—์„œ ์ƒ˜ํ”Œ ์ƒ์„ฑ ํ•จ์ˆ˜
135        n_samples: ์ƒ˜ํ”Œ ์ˆ˜
136    """
137    x = sample_g(n_samples)
138    weights = f(x) / g(x)
139
140    integral = np.mean(weights)
141    std_error = np.std(weights) / np.sqrt(n_samples)
142
143    return integral, std_error
144
145
146# =============================================================================
147# 4. ๋žœ๋ค ์›Œํฌ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
148# =============================================================================
149def random_walk_1d(n_steps: int, n_walks: int = 1) -> np.ndarray:
150    """
151    1D ๋žœ๋ค ์›Œํฌ
152
153    ๊ฐ ์Šคํ…์—์„œ +1 ๋˜๋Š” -1๋กœ ์ด๋™
154    """
155    steps = np.random.choice([-1, 1], size=(n_walks, n_steps))
156    positions = np.cumsum(steps, axis=1)
157    return positions
158
159
160def random_walk_2d(n_steps: int) -> Tuple[np.ndarray, np.ndarray]:
161    """2D ๋žœ๋ค ์›Œํฌ"""
162    angles = np.random.uniform(0, 2*np.pi, n_steps)
163    dx = np.cos(angles)
164    dy = np.sin(angles)
165    x = np.cumsum(dx)
166    y = np.cumsum(dy)
167    return x, y
168
169
170# =============================================================================
171# 5. ์˜ต์…˜ ๊ฐ€๊ฒฉ ๊ฒฐ์ • (Black-Scholes Monte Carlo)
172# =============================================================================
173def black_scholes_mc(
174    S0: float,
175    K: float,
176    T: float,
177    r: float,
178    sigma: float,
179    n_simulations: int,
180    option_type: str = 'call'
181) -> Tuple[float, float]:
182    """
183    ์œ ๋Ÿฝํ˜• ์˜ต์…˜ ๊ฐ€๊ฒฉ์˜ ๋ชฌํ…Œ์นด๋ฅผ๋กœ ์ถ”์ •
184
185    ๊ธฐํ•˜ ๋ธŒ๋ผ์šด ์šด๋™:
186    S_T = S_0 * exp((r - ฯƒยฒ/2)T + ฯƒโˆšT * Z)
187
188    Args:
189        S0: ํ˜„์žฌ ์ฃผ๊ฐ€
190        K: ํ–‰์‚ฌ๊ฐ€
191        T: ๋งŒ๊ธฐ (๋…„)
192        r: ๋ฌด์œ„ํ—˜ ์ด์ž์œจ
193        sigma: ๋ณ€๋™์„ฑ
194        n_simulations: ์‹œ๋ฎฌ๋ ˆ์ด์…˜ ํšŸ์ˆ˜
195        option_type: 'call' or 'put'
196    """
197    # ์ฃผ๊ฐ€ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
198    Z = np.random.standard_normal(n_simulations)
199    ST = S0 * np.exp((r - 0.5 * sigma**2) * T + sigma * np.sqrt(T) * Z)
200
201    # ํŽ˜์ด์˜คํ”„ ๊ณ„์‚ฐ
202    if option_type == 'call':
203        payoffs = np.maximum(ST - K, 0)
204    else:  # put
205        payoffs = np.maximum(K - ST, 0)
206
207    # ํ˜„์žฌ ๊ฐ€์น˜๋กœ ํ• ์ธ
208    price = np.exp(-r * T) * np.mean(payoffs)
209    std_error = np.exp(-r * T) * np.std(payoffs) / np.sqrt(n_simulations)
210
211    return price, std_error
212
213
214def black_scholes_analytical(
215    S0: float,
216    K: float,
217    T: float,
218    r: float,
219    sigma: float,
220    option_type: str = 'call'
221) -> float:
222    """Black-Scholes ํ•ด์„์  ํ•ด (๋น„๊ต์šฉ)"""
223    from scipy.stats import norm
224
225    d1 = (np.log(S0/K) + (r + 0.5*sigma**2)*T) / (sigma*np.sqrt(T))
226    d2 = d1 - sigma * np.sqrt(T)
227
228    if option_type == 'call':
229        price = S0 * norm.cdf(d1) - K * np.exp(-r*T) * norm.cdf(d2)
230    else:
231        price = K * np.exp(-r*T) * norm.cdf(-d2) - S0 * norm.cdf(-d1)
232
233    return price
234
235
236# =============================================================================
237# 6. ๋ฒ„ํ• ๋ฐ”๋Š˜ ๋ฌธ์ œ (Buffon's Needle)
238# =============================================================================
239def buffon_needle(
240    needle_length: float,
241    line_spacing: float,
242    n_drops: int
243) -> Tuple[float, float]:
244    """
245    ๋ฒ„ํ• ๋ฐ”๋Š˜ ๋ฌธ์ œ๋กœ ฯ€ ์ถ”์ •
246
247    ํ‰ํ–‰์„  ์‚ฌ์ด์— ๋ฐ”๋Š˜์„ ๋–จ์–ด๋œจ๋ ธ์„ ๋•Œ
248    ์„ ์„ ๊ต์ฐจํ•  ํ™•๋ฅ  = 2L / (ฯ€D)
249
250    โ†’ ฯ€ = 2L * n_drops / (D * crossings)
251
252    Args:
253        needle_length: ๋ฐ”๋Š˜ ๊ธธ์ด (L)
254        line_spacing: ์„  ๊ฐ„๊ฒฉ (D), L โ‰ค D
255        n_drops: ๋ฐ”๋Š˜ ๋–จ์–ด๋œจ๋ฆฌ๊ธฐ ํšŸ์ˆ˜
256    """
257    if needle_length > line_spacing:
258        raise ValueError("๋ฐ”๋Š˜ ๊ธธ์ด๋Š” ์„  ๊ฐ„๊ฒฉ๋ณด๋‹ค ์ž‘์•„์•ผ ํ•ฉ๋‹ˆ๋‹ค")
259
260    # ๋ฐ”๋Š˜ ์ค‘์‹ฌ์˜ ์œ„์น˜ (0 ~ D/2)
261    y_center = np.random.uniform(0, line_spacing/2, n_drops)
262
263    # ๋ฐ”๋Š˜ ๊ฐ๋„ (0 ~ ฯ€)
264    theta = np.random.uniform(0, np.pi, n_drops)
265
266    # ๋ฐ”๋Š˜ ๋์ด ์„ ์„ ๋„˜๋Š”์ง€ ํ™•์ธ
267    # ๋ฐ”๋Š˜ ๋์˜ y ์ขŒํ‘œ ๋ณ€ํ™”: (L/2) * sin(ฮธ)
268    crosses = y_center <= (needle_length/2) * np.sin(theta)
269    n_crossings = np.sum(crosses)
270
271    if n_crossings == 0:
272        return float('inf'), float('inf')
273
274    pi_estimate = 2 * needle_length * n_drops / (line_spacing * n_crossings)
275
276    return pi_estimate, 1 / np.sqrt(n_crossings)  # ๋Œ€๋žต์ ์ธ ์˜ค์ฐจ
277
278
279# =============================================================================
280# ์‹œ๊ฐํ™”
281# =============================================================================
282def plot_monte_carlo_examples():
283    """๋ชฌํ…Œ์นด๋ฅผ๋กœ ์˜ˆ์ œ ์‹œ๊ฐํ™”"""
284    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
285
286    # 1. ฯ€ ์ถ”์ •
287    ax = axes[0, 0]
288    n = 10000
289    x = np.random.uniform(-1, 1, n)
290    y = np.random.uniform(-1, 1, n)
291    inside = x**2 + y**2 <= 1
292
293    ax.scatter(x[inside], y[inside], c='blue', s=1, alpha=0.5)
294    ax.scatter(x[~inside], y[~inside], c='red', s=1, alpha=0.5)
295    theta = np.linspace(0, 2*np.pi, 100)
296    ax.plot(np.cos(theta), np.sin(theta), 'k-', linewidth=2)
297    ax.set_xlim(-1.1, 1.1)
298    ax.set_ylim(-1.1, 1.1)
299    ax.set_aspect('equal')
300    pi_est = 4 * np.sum(inside) / n
301    ax.set_title(f'ฯ€ ์ถ”์ •: {pi_est:.4f} (n={n})')
302
303    # 2. ฯ€ ์ถ”์ • ์ˆ˜๋ ด
304    ax = axes[0, 1]
305    n, estimates = estimate_pi_convergence(50000)
306    ax.semilogx(n, estimates, 'b-', alpha=0.7)
307    ax.axhline(y=np.pi, color='r', linestyle='--', label=f'ฯ€ = {np.pi:.6f}')
308    ax.set_xlabel('์ƒ˜ํ”Œ ์ˆ˜')
309    ax.set_ylabel('ฯ€ ์ถ”์ •๊ฐ’')
310    ax.set_title('ฯ€ ์ถ”์ • ์ˆ˜๋ ด')
311    ax.legend()
312    ax.grid(True)
313
314    # 3. ๋žœ๋ค ์›Œํฌ
315    ax = axes[1, 0]
316    for _ in range(5):
317        x, y = random_walk_2d(1000)
318        ax.plot(np.concatenate([[0], x]), np.concatenate([[0], y]), alpha=0.7)
319    ax.plot(0, 0, 'ko', markersize=10)
320    ax.set_xlabel('x')
321    ax.set_ylabel('y')
322    ax.set_title('2D ๋žœ๋ค ์›Œํฌ (5๊ฐœ ๊ฒฝ๋กœ)')
323    ax.axis('equal')
324    ax.grid(True)
325
326    # 4. ์˜ต์…˜ ๊ฐ€๊ฒฉ ์ˆ˜๋ ด
327    ax = axes[1, 1]
328    S0, K, T, r, sigma = 100, 100, 1, 0.05, 0.2
329    exact = black_scholes_analytical(S0, K, T, r, sigma)
330
331    ns = np.logspace(2, 5, 20).astype(int)
332    estimates = []
333    errors = []
334    for n in ns:
335        est, err = black_scholes_mc(S0, K, T, r, sigma, n)
336        estimates.append(est)
337        errors.append(err)
338
339    ax.errorbar(ns, estimates, yerr=errors, fmt='o-', capsize=3)
340    ax.axhline(y=exact, color='r', linestyle='--', label=f'ํ•ด์„ํ•ด: {exact:.4f}')
341    ax.set_xscale('log')
342    ax.set_xlabel('์‹œ๋ฎฌ๋ ˆ์ด์…˜ ํšŸ์ˆ˜')
343    ax.set_ylabel('์˜ต์…˜ ๊ฐ€๊ฒฉ')
344    ax.set_title('์ฝœ์˜ต์…˜ ๊ฐ€๊ฒฉ ์ˆ˜๋ ด')
345    ax.legend()
346    ax.grid(True)
347
348    plt.tight_layout()
349    plt.savefig('/opt/projects/01_Personal/03_Study/Numerical_Simulation/examples/monte_carlo.png', dpi=150)
350    plt.close()
351    print("๊ทธ๋ž˜ํ”„ ์ €์žฅ: monte_carlo.png")
352
353
354# =============================================================================
355# ํ…Œ์ŠคํŠธ
356# =============================================================================
357def main():
358    print("=" * 60)
359    print("๋ชฌํ…Œ์นด๋ฅผ๋กœ ์‹œ๋ฎฌ๋ ˆ์ด์…˜ (Monte Carlo)")
360    print("=" * 60)
361
362    np.random.seed(42)
363
364    # 1. ฯ€ ์ถ”์ •
365    print("\n[1] ฯ€ ์ถ”์ • (์›์˜ ๋„“์ด)")
366    print("-" * 40)
367    for n in [1000, 10000, 100000, 1000000]:
368        pi_est, std_err = estimate_pi(n)
369        print(f"n={n:>8}: ฯ€ โ‰ˆ {pi_est:.6f} ยฑ {std_err:.6f}, "
370              f"์˜ค์ฐจ: {abs(pi_est - np.pi):.6f}")
371
372    # 2. ๋ชฌํ…Œ์นด๋ฅผ๋กœ ์ ๋ถ„
373    print("\n[2] ๋ชฌํ…Œ์นด๋ฅผ๋กœ ์ ๋ถ„")
374    print("-" * 40)
375
376    # โˆซ[0,1] xยฒ dx = 1/3
377    f1 = lambda x: x**2
378    integral, error = monte_carlo_integrate(f1, 0, 1, 100000)
379    print(f"โˆซxยฒdx [0,1] = {integral:.6f} ยฑ {error:.6f} (์ •ํ™•: 0.333333)")
380
381    # โˆซ[0,ฯ€] sin(x) dx = 2
382    f2 = lambda x: np.sin(x)
383    integral, error = monte_carlo_integrate(f2, 0, np.pi, 100000)
384    print(f"โˆซsin(x)dx [0,ฯ€] = {integral:.6f} ยฑ {error:.6f} (์ •ํ™•: 2.0)")
385
386    # 3. ๋‹ค์ฐจ์› ์ ๋ถ„
387    print("\n[3] ๋‹ค์ฐจ์› ์ ๋ถ„")
388    print("-" * 40)
389
390    # ๋‹จ์œ„ ๊ตฌ์˜ ๋ถ€ํ”ผ: 4ฯ€/3 โ‰ˆ 4.189
391    def sphere_indicator(x):
392        return 1 if np.sum(x**2) <= 1 else 0
393
394    volume, error = monte_carlo_integrate_nd(sphere_indicator, [(-1,1)]*3, 100000)
395    print(f"๋‹จ์œ„ ๊ตฌ ๋ถ€ํ”ผ = {volume:.4f} ยฑ {error:.4f} (์ •ํ™•: {4*np.pi/3:.4f})")
396
397    # 4. ์˜ต์…˜ ๊ฐ€๊ฒฉ
398    print("\n[4] ์œ ๋Ÿฝํ˜• ์ฝœ์˜ต์…˜ ๊ฐ€๊ฒฉ")
399    print("-" * 40)
400
401    S0, K, T, r, sigma = 100, 100, 1, 0.05, 0.2
402
403    exact = black_scholes_analytical(S0, K, T, r, sigma)
404    mc_price, mc_error = black_scholes_mc(S0, K, T, r, sigma, 100000)
405
406    print(f"Black-Scholes ํ•ด์„ํ•ด: {exact:.4f}")
407    print(f"Monte Carlo (n=100000): {mc_price:.4f} ยฑ {mc_error:.4f}")
408    print(f"์˜ค์ฐจ: {abs(mc_price - exact):.4f}")
409
410    # 5. ๋ฒ„ํ• ๋ฐ”๋Š˜
411    print("\n[5] ๋ฒ„ํ• ๋ฐ”๋Š˜ ๋ฌธ์ œ")
412    print("-" * 40)
413
414    pi_est, _ = buffon_needle(1, 2, 100000)
415    print(f"ฯ€ ์ถ”์ • (L=1, D=2, n=100000): {pi_est:.6f}")
416
417    # 6. ๋žœ๋ค ์›Œํฌ ํ†ต๊ณ„
418    print("\n[6] 1D ๋žœ๋ค ์›Œํฌ ํ†ต๊ณ„")
419    print("-" * 40)
420
421    n_steps = 1000
422    n_walks = 10000
423    positions = random_walk_1d(n_steps, n_walks)
424    final_positions = positions[:, -1]
425
426    print(f"{n_steps}์Šคํ… ํ›„ ({n_walks}๊ฐœ ์›Œํฌ):")
427    print(f"  ํ‰๊ท  ์œ„์น˜: {np.mean(final_positions):.2f} (์ด๋ก : 0)")
428    print(f"  ํ‘œ์ค€ํŽธ์ฐจ: {np.std(final_positions):.2f} (์ด๋ก : {np.sqrt(n_steps):.2f})")
429
430    # ์‹œ๊ฐํ™”
431    try:
432        plot_monte_carlo_examples()
433    except Exception as e:
434        print(f"๊ทธ๋ž˜ํ”„ ์ƒ์„ฑ ์‹คํŒจ: {e}")
435
436    print("\n" + "=" * 60)
437    print("๋ชฌํ…Œ์นด๋ฅผ๋กœ ๋ฐฉ๋ฒ• ์ •๋ฆฌ")
438    print("=" * 60)
439    print("""
440    ์žฅ์ :
441    - ๊ณ ์ฐจ์› ๋ฌธ์ œ์—์„œ๋„ ์ˆ˜๋ ด ์†๋„ ์œ ์ง€ (์ฐจ์›์˜ ์ €์ฃผ ํšŒํ”ผ)
442    - ๋ณต์žกํ•œ ์˜์—ญ/์กฐ๊ฑด์— ์ ์šฉ ์šฉ์ด
443    - ๊ตฌํ˜„์ด ๋‹จ์ˆœ
444
445    ๋‹จ์ :
446    - ์ˆ˜๋ ด ์†๋„๊ฐ€ ๋А๋ฆผ: O(1/โˆšn)
447    - ํ™•๋ฅ ์  โ†’ ๊ฒฐ๊ณผ์— ๋ถˆํ™•์‹ค์„ฑ
448    - ๋งŽ์€ ์ƒ˜ํ”Œ ํ•„์š”
449
450    ๋ถ„์‚ฐ ๊ฐ์†Œ ๊ธฐ๋ฒ•:
451    - ์ค‘์š”๋„ ์ƒ˜ํ”Œ๋ง (Importance Sampling)
452    - ๋Œ€์กฐ ๋ณ€๋Ÿ‰ (Control Variates)
453    - ์•ˆํ‹ฐํ…Œํ‹ฑ ๋ณ€๋Ÿ‰ (Antithetic Variates)
454    - ์ธตํ™” ์ƒ˜ํ”Œ๋ง (Stratified Sampling)
455    - ์ค€๋‚œ์ˆ˜ (Quasi-random / Low-discrepancy sequences)
456
457    ์‘์šฉ:
458    - ๊ธˆ์œต: ํŒŒ์ƒ์ƒํ’ˆ ๊ฐ€๊ฒฉ ๊ฒฐ์ •, ๋ฆฌ์Šคํฌ ๋ถ„์„
459    - ๋ฌผ๋ฆฌ: ํ†ต๊ณ„์—ญํ•™, ์ž…์ž ์‹œ๋ฎฌ๋ ˆ์ด์…˜
460    - ์ปดํ“จํ„ฐ ๊ทธ๋ž˜ํ”ฝ: ๊ฒฝ๋กœ ์ถ”์  ๋ Œ๋”๋ง
461    - ์ตœ์ ํ™”: ์‹œ๋ฎฌ๋ ˆ์ดํ‹ฐ๋“œ ์–ด๋‹๋ง, MCMC
462    """)
463
464
465if __name__ == "__main__":
466    main()