1"""
2๋ชฌํ
์นด๋ฅผ๋ก ์๋ฎฌ๋ ์ด์
(Monte Carlo Simulation)
3Monte Carlo Methods
4
5ํ๋ฅ ์ ๋ฐฉ๋ฒ์ ์ฌ์ฉํ ์์น ๊ณ์ฐ ๋ฐ ์๋ฎฌ๋ ์ด์
์
๋๋ค.
6"""
7
8import numpy as np
9import matplotlib.pyplot as plt
10from typing import Callable, Tuple
11
12
13# =============================================================================
14# 1. ฯ ์ถ์ (์์ ๋์ด)
15# =============================================================================
16def estimate_pi(n_samples: int) -> Tuple[float, float]:
17 """
18 ๋จ์ ์์ ์ฌ์ฉํ ฯ ์ถ์
19
20 ์ ์ฌ๊ฐํ [-1, 1] x [-1, 1] ๋ด์ ๋ฌด์์ ์ ์ ๋ฟ๋ฆฌ๊ณ
21 ๋จ์ ์ ๋ด๋ถ์ ๋จ์ด์ง๋ ๋น์จ์ ๊ณ์ฐ
22
23 ์์ ๋์ด / ์ ์ฌ๊ฐํ ๋์ด = ฯ / 4
24 โ ฯ = 4 * (์ ๋ด๋ถ ์ / ์ ์ฒด ์ )
25
26 ์ค์ฐจ: O(1/โn)
27 """
28 # ๊ท ์ผ ๋ถํฌ๋ก ์ ์์ฑ
29 x = np.random.uniform(-1, 1, n_samples)
30 y = np.random.uniform(-1, 1, n_samples)
31
32 # ์ ๋ด๋ถ์ ์๋ ์ ๊ฐ์
33 inside = np.sum(x**2 + y**2 <= 1)
34
35 pi_estimate = 4 * inside / n_samples
36 std_error = 4 * np.sqrt(inside * (n_samples - inside) / n_samples**3)
37
38 return pi_estimate, std_error
39
40
41def estimate_pi_convergence(max_samples: int = 100000) -> Tuple[np.ndarray, np.ndarray]:
42 """ฯ ์ถ์ ์ ์๋ ด ๊ณผ์ """
43 x = np.random.uniform(-1, 1, max_samples)
44 y = np.random.uniform(-1, 1, max_samples)
45 inside = (x**2 + y**2 <= 1).cumsum()
46 n = np.arange(1, max_samples + 1)
47 return n, 4 * inside / n
48
49
50# =============================================================================
51# 2. ๋ชฌํ
์นด๋ฅผ๋ก ์ ๋ถ
52# =============================================================================
53def monte_carlo_integrate(
54 f: Callable[[np.ndarray], np.ndarray],
55 a: float,
56 b: float,
57 n_samples: int
58) -> Tuple[float, float]:
59 """
60 1D ๋ชฌํ
์นด๋ฅผ๋ก ์ ๋ถ
61
62 โซ[a,b] f(x)dx โ (b-a) * (1/n) * ฮฃf(x_i)
63
64 Args:
65 f: ํผ์ ๋ถ ํจ์
66 a, b: ์ ๋ถ ๊ตฌ๊ฐ
67 n_samples: ์ํ ์
68
69 Returns:
70 (์ ๋ถ๊ฐ ์ถ์ , ํ์ค ์ค์ฐจ)
71 """
72 x = np.random.uniform(a, b, n_samples)
73 fx = f(x)
74
75 integral = (b - a) * np.mean(fx)
76 variance = np.var(fx)
77 std_error = (b - a) * np.sqrt(variance / n_samples)
78
79 return integral, std_error
80
81
82def monte_carlo_integrate_nd(
83 f: Callable[[np.ndarray], float],
84 bounds: list,
85 n_samples: int
86) -> Tuple[float, float]:
87 """
88 ๋ค์ฐจ์ ๋ชฌํ
์นด๋ฅผ๋ก ์ ๋ถ
89
90 Args:
91 f: ๋ค๋ณ์ ํจ์ f(x) where x is array
92 bounds: [(a1,b1), (a2,b2), ...] ๊ฐ ์ฐจ์์ ๋ฒ์
93 n_samples: ์ํ ์
94 """
95 dim = len(bounds)
96 volume = np.prod([b - a for a, b in bounds])
97
98 # ๊ฐ ์ฐจ์์์ ๊ท ์ผ ์ํ๋ง
99 samples = np.array([
100 np.random.uniform(a, b, n_samples)
101 for a, b in bounds
102 ]).T # shape: (n_samples, dim)
103
104 values = np.array([f(x) for x in samples])
105
106 integral = volume * np.mean(values)
107 std_error = volume * np.std(values) / np.sqrt(n_samples)
108
109 return integral, std_error
110
111
112# =============================================================================
113# 3. ์ค์๋ ์ํ๋ง (Importance Sampling)
114# =============================================================================
115def importance_sampling(
116 f: Callable[[np.ndarray], np.ndarray],
117 g: Callable[[np.ndarray], np.ndarray],
118 sample_g: Callable[[int], np.ndarray],
119 n_samples: int
120) -> Tuple[float, float]:
121 """
122 ์ค์๋ ์ํ๋ง
123
124 ๋ชฉํ: โซf(x)dx ๋ฅผ ๋ ํจ์จ์ ์ผ๋ก ์ถ์
125
126 g(x)๋ฅผ ์ค์๋ ๋ถํฌ๋ก ์ฌ์ฉ:
127 โซf(x)dx = โซ(f(x)/g(x))g(x)dx = E_g[f(x)/g(x)]
128
129 ๋ถ์ฐ ๊ฐ์: f(x)์ ๋น์ทํ g(x) ์ ํ
130
131 Args:
132 f: ํผ์ ๋ถ ํจ์ * ์๋ ๋ถํฌ
133 g: ์ค์๋ ๋ถํฌ์ PDF
134 sample_g: g์์ ์ํ ์์ฑ ํจ์
135 n_samples: ์ํ ์
136 """
137 x = sample_g(n_samples)
138 weights = f(x) / g(x)
139
140 integral = np.mean(weights)
141 std_error = np.std(weights) / np.sqrt(n_samples)
142
143 return integral, std_error
144
145
146# =============================================================================
147# 4. ๋๋ค ์ํฌ ์๋ฎฌ๋ ์ด์
148# =============================================================================
149def random_walk_1d(n_steps: int, n_walks: int = 1) -> np.ndarray:
150 """
151 1D ๋๋ค ์ํฌ
152
153 ๊ฐ ์คํ
์์ +1 ๋๋ -1๋ก ์ด๋
154 """
155 steps = np.random.choice([-1, 1], size=(n_walks, n_steps))
156 positions = np.cumsum(steps, axis=1)
157 return positions
158
159
160def random_walk_2d(n_steps: int) -> Tuple[np.ndarray, np.ndarray]:
161 """2D ๋๋ค ์ํฌ"""
162 angles = np.random.uniform(0, 2*np.pi, n_steps)
163 dx = np.cos(angles)
164 dy = np.sin(angles)
165 x = np.cumsum(dx)
166 y = np.cumsum(dy)
167 return x, y
168
169
170# =============================================================================
171# 5. ์ต์
๊ฐ๊ฒฉ ๊ฒฐ์ (Black-Scholes Monte Carlo)
172# =============================================================================
173def black_scholes_mc(
174 S0: float,
175 K: float,
176 T: float,
177 r: float,
178 sigma: float,
179 n_simulations: int,
180 option_type: str = 'call'
181) -> Tuple[float, float]:
182 """
183 ์ ๋ฝํ ์ต์
๊ฐ๊ฒฉ์ ๋ชฌํ
์นด๋ฅผ๋ก ์ถ์
184
185 ๊ธฐํ ๋ธ๋ผ์ด ์ด๋:
186 S_T = S_0 * exp((r - ฯยฒ/2)T + ฯโT * Z)
187
188 Args:
189 S0: ํ์ฌ ์ฃผ๊ฐ
190 K: ํ์ฌ๊ฐ
191 T: ๋ง๊ธฐ (๋
)
192 r: ๋ฌด์ํ ์ด์์จ
193 sigma: ๋ณ๋์ฑ
194 n_simulations: ์๋ฎฌ๋ ์ด์
ํ์
195 option_type: 'call' or 'put'
196 """
197 # ์ฃผ๊ฐ ์๋ฎฌ๋ ์ด์
198 Z = np.random.standard_normal(n_simulations)
199 ST = S0 * np.exp((r - 0.5 * sigma**2) * T + sigma * np.sqrt(T) * Z)
200
201 # ํ์ด์คํ ๊ณ์ฐ
202 if option_type == 'call':
203 payoffs = np.maximum(ST - K, 0)
204 else: # put
205 payoffs = np.maximum(K - ST, 0)
206
207 # ํ์ฌ ๊ฐ์น๋ก ํ ์ธ
208 price = np.exp(-r * T) * np.mean(payoffs)
209 std_error = np.exp(-r * T) * np.std(payoffs) / np.sqrt(n_simulations)
210
211 return price, std_error
212
213
214def black_scholes_analytical(
215 S0: float,
216 K: float,
217 T: float,
218 r: float,
219 sigma: float,
220 option_type: str = 'call'
221) -> float:
222 """Black-Scholes ํด์์ ํด (๋น๊ต์ฉ)"""
223 from scipy.stats import norm
224
225 d1 = (np.log(S0/K) + (r + 0.5*sigma**2)*T) / (sigma*np.sqrt(T))
226 d2 = d1 - sigma * np.sqrt(T)
227
228 if option_type == 'call':
229 price = S0 * norm.cdf(d1) - K * np.exp(-r*T) * norm.cdf(d2)
230 else:
231 price = K * np.exp(-r*T) * norm.cdf(-d2) - S0 * norm.cdf(-d1)
232
233 return price
234
235
236# =============================================================================
237# 6. ๋ฒํ ๋ฐ๋ ๋ฌธ์ (Buffon's Needle)
238# =============================================================================
239def buffon_needle(
240 needle_length: float,
241 line_spacing: float,
242 n_drops: int
243) -> Tuple[float, float]:
244 """
245 ๋ฒํ ๋ฐ๋ ๋ฌธ์ ๋ก ฯ ์ถ์
246
247 ํํ์ ์ฌ์ด์ ๋ฐ๋์ ๋จ์ด๋จ๋ ธ์ ๋
248 ์ ์ ๊ต์ฐจํ ํ๋ฅ = 2L / (ฯD)
249
250 โ ฯ = 2L * n_drops / (D * crossings)
251
252 Args:
253 needle_length: ๋ฐ๋ ๊ธธ์ด (L)
254 line_spacing: ์ ๊ฐ๊ฒฉ (D), L โค D
255 n_drops: ๋ฐ๋ ๋จ์ด๋จ๋ฆฌ๊ธฐ ํ์
256 """
257 if needle_length > line_spacing:
258 raise ValueError("๋ฐ๋ ๊ธธ์ด๋ ์ ๊ฐ๊ฒฉ๋ณด๋ค ์์์ผ ํฉ๋๋ค")
259
260 # ๋ฐ๋ ์ค์ฌ์ ์์น (0 ~ D/2)
261 y_center = np.random.uniform(0, line_spacing/2, n_drops)
262
263 # ๋ฐ๋ ๊ฐ๋ (0 ~ ฯ)
264 theta = np.random.uniform(0, np.pi, n_drops)
265
266 # ๋ฐ๋ ๋์ด ์ ์ ๋๋์ง ํ์ธ
267 # ๋ฐ๋ ๋์ y ์ขํ ๋ณํ: (L/2) * sin(ฮธ)
268 crosses = y_center <= (needle_length/2) * np.sin(theta)
269 n_crossings = np.sum(crosses)
270
271 if n_crossings == 0:
272 return float('inf'), float('inf')
273
274 pi_estimate = 2 * needle_length * n_drops / (line_spacing * n_crossings)
275
276 return pi_estimate, 1 / np.sqrt(n_crossings) # ๋๋ต์ ์ธ ์ค์ฐจ
277
278
279# =============================================================================
280# ์๊ฐํ
281# =============================================================================
282def plot_monte_carlo_examples():
283 """๋ชฌํ
์นด๋ฅผ๋ก ์์ ์๊ฐํ"""
284 fig, axes = plt.subplots(2, 2, figsize=(12, 10))
285
286 # 1. ฯ ์ถ์
287 ax = axes[0, 0]
288 n = 10000
289 x = np.random.uniform(-1, 1, n)
290 y = np.random.uniform(-1, 1, n)
291 inside = x**2 + y**2 <= 1
292
293 ax.scatter(x[inside], y[inside], c='blue', s=1, alpha=0.5)
294 ax.scatter(x[~inside], y[~inside], c='red', s=1, alpha=0.5)
295 theta = np.linspace(0, 2*np.pi, 100)
296 ax.plot(np.cos(theta), np.sin(theta), 'k-', linewidth=2)
297 ax.set_xlim(-1.1, 1.1)
298 ax.set_ylim(-1.1, 1.1)
299 ax.set_aspect('equal')
300 pi_est = 4 * np.sum(inside) / n
301 ax.set_title(f'ฯ ์ถ์ : {pi_est:.4f} (n={n})')
302
303 # 2. ฯ ์ถ์ ์๋ ด
304 ax = axes[0, 1]
305 n, estimates = estimate_pi_convergence(50000)
306 ax.semilogx(n, estimates, 'b-', alpha=0.7)
307 ax.axhline(y=np.pi, color='r', linestyle='--', label=f'ฯ = {np.pi:.6f}')
308 ax.set_xlabel('์ํ ์')
309 ax.set_ylabel('ฯ ์ถ์ ๊ฐ')
310 ax.set_title('ฯ ์ถ์ ์๋ ด')
311 ax.legend()
312 ax.grid(True)
313
314 # 3. ๋๋ค ์ํฌ
315 ax = axes[1, 0]
316 for _ in range(5):
317 x, y = random_walk_2d(1000)
318 ax.plot(np.concatenate([[0], x]), np.concatenate([[0], y]), alpha=0.7)
319 ax.plot(0, 0, 'ko', markersize=10)
320 ax.set_xlabel('x')
321 ax.set_ylabel('y')
322 ax.set_title('2D ๋๋ค ์ํฌ (5๊ฐ ๊ฒฝ๋ก)')
323 ax.axis('equal')
324 ax.grid(True)
325
326 # 4. ์ต์
๊ฐ๊ฒฉ ์๋ ด
327 ax = axes[1, 1]
328 S0, K, T, r, sigma = 100, 100, 1, 0.05, 0.2
329 exact = black_scholes_analytical(S0, K, T, r, sigma)
330
331 ns = np.logspace(2, 5, 20).astype(int)
332 estimates = []
333 errors = []
334 for n in ns:
335 est, err = black_scholes_mc(S0, K, T, r, sigma, n)
336 estimates.append(est)
337 errors.append(err)
338
339 ax.errorbar(ns, estimates, yerr=errors, fmt='o-', capsize=3)
340 ax.axhline(y=exact, color='r', linestyle='--', label=f'ํด์ํด: {exact:.4f}')
341 ax.set_xscale('log')
342 ax.set_xlabel('์๋ฎฌ๋ ์ด์
ํ์')
343 ax.set_ylabel('์ต์
๊ฐ๊ฒฉ')
344 ax.set_title('์ฝ์ต์
๊ฐ๊ฒฉ ์๋ ด')
345 ax.legend()
346 ax.grid(True)
347
348 plt.tight_layout()
349 plt.savefig('/opt/projects/01_Personal/03_Study/Numerical_Simulation/examples/monte_carlo.png', dpi=150)
350 plt.close()
351 print("๊ทธ๋ํ ์ ์ฅ: monte_carlo.png")
352
353
354# =============================================================================
355# ํ
์คํธ
356# =============================================================================
357def main():
358 print("=" * 60)
359 print("๋ชฌํ
์นด๋ฅผ๋ก ์๋ฎฌ๋ ์ด์
(Monte Carlo)")
360 print("=" * 60)
361
362 np.random.seed(42)
363
364 # 1. ฯ ์ถ์
365 print("\n[1] ฯ ์ถ์ (์์ ๋์ด)")
366 print("-" * 40)
367 for n in [1000, 10000, 100000, 1000000]:
368 pi_est, std_err = estimate_pi(n)
369 print(f"n={n:>8}: ฯ โ {pi_est:.6f} ยฑ {std_err:.6f}, "
370 f"์ค์ฐจ: {abs(pi_est - np.pi):.6f}")
371
372 # 2. ๋ชฌํ
์นด๋ฅผ๋ก ์ ๋ถ
373 print("\n[2] ๋ชฌํ
์นด๋ฅผ๋ก ์ ๋ถ")
374 print("-" * 40)
375
376 # โซ[0,1] xยฒ dx = 1/3
377 f1 = lambda x: x**2
378 integral, error = monte_carlo_integrate(f1, 0, 1, 100000)
379 print(f"โซxยฒdx [0,1] = {integral:.6f} ยฑ {error:.6f} (์ ํ: 0.333333)")
380
381 # โซ[0,ฯ] sin(x) dx = 2
382 f2 = lambda x: np.sin(x)
383 integral, error = monte_carlo_integrate(f2, 0, np.pi, 100000)
384 print(f"โซsin(x)dx [0,ฯ] = {integral:.6f} ยฑ {error:.6f} (์ ํ: 2.0)")
385
386 # 3. ๋ค์ฐจ์ ์ ๋ถ
387 print("\n[3] ๋ค์ฐจ์ ์ ๋ถ")
388 print("-" * 40)
389
390 # ๋จ์ ๊ตฌ์ ๋ถํผ: 4ฯ/3 โ 4.189
391 def sphere_indicator(x):
392 return 1 if np.sum(x**2) <= 1 else 0
393
394 volume, error = monte_carlo_integrate_nd(sphere_indicator, [(-1,1)]*3, 100000)
395 print(f"๋จ์ ๊ตฌ ๋ถํผ = {volume:.4f} ยฑ {error:.4f} (์ ํ: {4*np.pi/3:.4f})")
396
397 # 4. ์ต์
๊ฐ๊ฒฉ
398 print("\n[4] ์ ๋ฝํ ์ฝ์ต์
๊ฐ๊ฒฉ")
399 print("-" * 40)
400
401 S0, K, T, r, sigma = 100, 100, 1, 0.05, 0.2
402
403 exact = black_scholes_analytical(S0, K, T, r, sigma)
404 mc_price, mc_error = black_scholes_mc(S0, K, T, r, sigma, 100000)
405
406 print(f"Black-Scholes ํด์ํด: {exact:.4f}")
407 print(f"Monte Carlo (n=100000): {mc_price:.4f} ยฑ {mc_error:.4f}")
408 print(f"์ค์ฐจ: {abs(mc_price - exact):.4f}")
409
410 # 5. ๋ฒํ ๋ฐ๋
411 print("\n[5] ๋ฒํ ๋ฐ๋ ๋ฌธ์ ")
412 print("-" * 40)
413
414 pi_est, _ = buffon_needle(1, 2, 100000)
415 print(f"ฯ ์ถ์ (L=1, D=2, n=100000): {pi_est:.6f}")
416
417 # 6. ๋๋ค ์ํฌ ํต๊ณ
418 print("\n[6] 1D ๋๋ค ์ํฌ ํต๊ณ")
419 print("-" * 40)
420
421 n_steps = 1000
422 n_walks = 10000
423 positions = random_walk_1d(n_steps, n_walks)
424 final_positions = positions[:, -1]
425
426 print(f"{n_steps}์คํ
ํ ({n_walks}๊ฐ ์ํฌ):")
427 print(f" ํ๊ท ์์น: {np.mean(final_positions):.2f} (์ด๋ก : 0)")
428 print(f" ํ์คํธ์ฐจ: {np.std(final_positions):.2f} (์ด๋ก : {np.sqrt(n_steps):.2f})")
429
430 # ์๊ฐํ
431 try:
432 plot_monte_carlo_examples()
433 except Exception as e:
434 print(f"๊ทธ๋ํ ์์ฑ ์คํจ: {e}")
435
436 print("\n" + "=" * 60)
437 print("๋ชฌํ
์นด๋ฅผ๋ก ๋ฐฉ๋ฒ ์ ๋ฆฌ")
438 print("=" * 60)
439 print("""
440 ์ฅ์ :
441 - ๊ณ ์ฐจ์ ๋ฌธ์ ์์๋ ์๋ ด ์๋ ์ ์ง (์ฐจ์์ ์ ์ฃผ ํํผ)
442 - ๋ณต์กํ ์์ญ/์กฐ๊ฑด์ ์ ์ฉ ์ฉ์ด
443 - ๊ตฌํ์ด ๋จ์
444
445 ๋จ์ :
446 - ์๋ ด ์๋๊ฐ ๋๋ฆผ: O(1/โn)
447 - ํ๋ฅ ์ โ ๊ฒฐ๊ณผ์ ๋ถํ์ค์ฑ
448 - ๋ง์ ์ํ ํ์
449
450 ๋ถ์ฐ ๊ฐ์ ๊ธฐ๋ฒ:
451 - ์ค์๋ ์ํ๋ง (Importance Sampling)
452 - ๋์กฐ ๋ณ๋ (Control Variates)
453 - ์ํฐํ
ํฑ ๋ณ๋ (Antithetic Variates)
454 - ์ธตํ ์ํ๋ง (Stratified Sampling)
455 - ์ค๋์ (Quasi-random / Low-discrepancy sequences)
456
457 ์์ฉ:
458 - ๊ธ์ต: ํ์์ํ ๊ฐ๊ฒฉ ๊ฒฐ์ , ๋ฆฌ์คํฌ ๋ถ์
459 - ๋ฌผ๋ฆฌ: ํต๊ณ์ญํ, ์
์ ์๋ฎฌ๋ ์ด์
460 - ์ปดํจํฐ ๊ทธ๋ํฝ: ๊ฒฝ๋ก ์ถ์ ๋ ๋๋ง
461 - ์ต์ ํ: ์๋ฎฌ๋ ์ดํฐ๋ ์ด๋๋ง, MCMC
462 """)
463
464
465if __name__ == "__main__":
466 main()