1"""
220. ์ค์ ํ๋ก์ ํธ
3- ๋ฌธ์ ์ค์บ๋
4- ์ฐจ๋ ๋ฒํธํ ์ธ์
5- ์ค์๊ฐ ๊ฐ์ฒด ์ถ์
6- ์ด๋ฏธ์ง ํ๋
ธ๋ผ๋ง
7"""
8
9import cv2
10import numpy as np
11
12
13# ============================================================
14# ํ๋ก์ ํธ 1: ๋ฌธ์ ์ค์บ๋
15# ============================================================
16
17def document_scanner():
18 """๋ฌธ์ ์ค์บ๋ ํ๋ก์ ํธ"""
19 print("=" * 60)
20 print("ํ๋ก์ ํธ 1: ๋ฌธ์ ์ค์บ๋")
21 print("=" * 60)
22
23 # ์๋ฎฌ๋ ์ด์
์ฉ ๋ฌธ์ ์ด๋ฏธ์ง ์์ฑ
24 # ์ค์ ๋ก๋ ์นด๋ฉ๋ผ๋ก ์ดฌ์ํ ์ด๋ฏธ์ง ์ฌ์ฉ
25 img = np.zeros((600, 800, 3), dtype=np.uint8)
26 img[:] = [150, 150, 150] # ํ์ ๋ฐฐ๊ฒฝ
27
28 # ๊ธฐ์ธ์ด์ง ๋ฌธ์ (์ฌ๋ค๋ฆฌ๊ผด)
29 doc_pts = np.array([[150, 100], [650, 80], [700, 520], [100, 550]], np.int32)
30 cv2.fillPoly(img, [doc_pts], (255, 255, 255))
31
32 # ๋ฌธ์ ๋ด์ฉ ์๋ฎฌ๋ ์ด์
33 cv2.putText(img, 'DOCUMENT TITLE', (220, 200),
34 cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
35 cv2.line(img, (200, 250), (600, 240), (100, 100, 100), 2)
36 cv2.line(img, (200, 300), (600, 290), (100, 100, 100), 2)
37 cv2.line(img, (200, 350), (550, 340), (100, 100, 100), 2)
38
39 cv2.imwrite('scanner_input.jpg', img)
40
41 # 1. ๊ทธ๋ ์ด์ค์ผ์ผ ๋ฐ ๋ธ๋ฌ
42 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
43 blurred = cv2.GaussianBlur(gray, (5, 5), 0)
44
45 # 2. ์ฃ์ง ๊ฒ์ถ
46 edges = cv2.Canny(blurred, 50, 150)
47
48 # 3. ์ปจํฌ์ด ๊ฒ์ถ
49 contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
50
51 # 4. ๊ฐ์ฅ ํฐ ์ฌ๊ฐํ ์ปจํฌ์ด ์ฐพ๊ธฐ
52 doc_contour = None
53 max_area = 0
54
55 for cnt in contours:
56 area = cv2.contourArea(cnt)
57 if area > 10000: # ์ต์ ํฌ๊ธฐ
58 peri = cv2.arcLength(cnt, True)
59 approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
60
61 if len(approx) == 4 and area > max_area:
62 doc_contour = approx
63 max_area = area
64
65 if doc_contour is not None:
66 # ์ฝ๋ ์ ๋ ฌ
67 pts = doc_contour.reshape(4, 2)
68 rect = order_points(pts)
69
70 # ๊ฒฐ๊ณผ ์ด๋ฏธ์ง์ ํ์
71 result_contour = img.copy()
72 cv2.drawContours(result_contour, [doc_contour], -1, (0, 255, 0), 3)
73 for pt in rect:
74 cv2.circle(result_contour, tuple(pt.astype(int)), 10, (0, 0, 255), -1)
75
76 cv2.imwrite('scanner_contour.jpg', result_contour)
77
78 # 5. ์๊ทผ ๋ณํ
79 width, height = 500, 700 # ์ถ๋ ฅ ํฌ๊ธฐ
80 dst = np.array([
81 [0, 0],
82 [width - 1, 0],
83 [width - 1, height - 1],
84 [0, height - 1]
85 ], dtype=np.float32)
86
87 M = cv2.getPerspectiveTransform(rect, dst)
88 warped = cv2.warpPerspective(img, M, (width, height))
89
90 # 6. ์ด์งํ (์ค์บ ํจ๊ณผ)
91 warped_gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
92 scanned = cv2.adaptiveThreshold(
93 warped_gray, 255,
94 cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
95 cv2.THRESH_BINARY, 11, 2
96 )
97
98 cv2.imwrite('scanner_warped.jpg', warped)
99 cv2.imwrite('scanner_result.jpg', scanned)
100
101 print("๋ฌธ์ ์ค์บ๋ ์๋ฃ!")
102 print(" - scanner_input.jpg: ์๋ณธ")
103 print(" - scanner_contour.jpg: ๋ฌธ์ ๊ฒ์ถ")
104 print(" - scanner_warped.jpg: ์๊ทผ ๋ณด์ ")
105 print(" - scanner_result.jpg: ์ต์ข
์ค์บ")
106 else:
107 print("๋ฌธ์๋ฅผ ์ฐพ์ ์ ์์ต๋๋ค.")
108
109 print("\n์ฒ๋ฆฌ ํ์ดํ๋ผ์ธ:")
110 print(" 1. ๊ทธ๋ ์ด์ค์ผ์ผ + ๋ธ๋ฌ")
111 print(" 2. Canny ์ฃ์ง ๊ฒ์ถ")
112 print(" 3. ์ปจํฌ์ด ๊ฒ์ถ ๋ฐ ๊ทผ์ฌํ")
113 print(" 4. 4๊ฐํ ๋ฌธ์ ์ ํ")
114 print(" 5. ์๊ทผ ๋ณํ")
115 print(" 6. ์ด์งํ (์ ํ)")
116
117
118def order_points(pts):
119 """์ฝ๋ ์ ์ [์ข์, ์ฐ์, ์ฐํ, ์ขํ] ์์๋ก ์ ๋ ฌ"""
120 rect = np.zeros((4, 2), dtype=np.float32)
121
122 # ํฉ์ด ๊ฐ์ฅ ์์ ๊ฒ: ์ข์
123 # ํฉ์ด ๊ฐ์ฅ ํฐ ๊ฒ: ์ฐํ
124 s = pts.sum(axis=1)
125 rect[0] = pts[np.argmin(s)]
126 rect[2] = pts[np.argmax(s)]
127
128 # ์ฐจ์ด๊ฐ ๊ฐ์ฅ ์์ ๊ฒ: ์ฐ์
129 # ์ฐจ์ด๊ฐ ๊ฐ์ฅ ํฐ ๊ฒ: ์ขํ
130 diff = np.diff(pts, axis=1)
131 rect[1] = pts[np.argmin(diff)]
132 rect[3] = pts[np.argmax(diff)]
133
134 return rect
135
136
137# ============================================================
138# ํ๋ก์ ํธ 2: ์ฐจ๋ ๋ฒํธํ ์ธ์ (๊ฐ๋
)
139# ============================================================
140
141def license_plate_recognition():
142 """์ฐจ๋ ๋ฒํธํ ์ธ์ ํ๋ก์ ํธ (๊ฐ๋
)"""
143 print("\n" + "=" * 60)
144 print("ํ๋ก์ ํธ 2: ์ฐจ๋ ๋ฒํธํ ์ธ์")
145 print("=" * 60)
146
147 # ์๋ฎฌ๋ ์ด์
์ฉ ๋ฒํธํ ์ด๋ฏธ์ง
148 img = np.zeros((400, 600, 3), dtype=np.uint8)
149 img[:] = [200, 200, 200]
150
151 # ์ฐจ๋ ํํ
152 cv2.rectangle(img, (100, 100), (500, 350), (80, 80, 80), -1)
153 cv2.rectangle(img, (120, 120), (480, 250), (60, 60, 60), -1)
154
155 # ๋ฒํธํ
156 plate_x, plate_y = 200, 280
157 plate_w, plate_h = 200, 50
158 cv2.rectangle(img, (plate_x, plate_y), (plate_x+plate_w, plate_y+plate_h),
159 (255, 255, 255), -1)
160 cv2.rectangle(img, (plate_x, plate_y), (plate_x+plate_w, plate_y+plate_h),
161 (0, 0, 0), 2)
162 cv2.putText(img, '12AB3456', (plate_x+20, plate_y+35),
163 cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2)
164
165 cv2.imwrite('lpr_input.jpg', img)
166
167 print("\n๋ฒํธํ ์ธ์ ํ์ดํ๋ผ์ธ:")
168 print("""
1691. ๋ฒํธํ ๊ฒ์ถ (Plate Detection)
170 - Haar Cascade (ํ์ต๋ ๋ถ๋ฅ๊ธฐ)
171 - DNN (YOLO, SSD)
172 - ์ฃ์ง ๊ธฐ๋ฐ ๊ฒ์ถ
173
1742. ๋ฒํธํ ์์ญ ์ถ์ถ
175 - ์ปจํฌ์ด ๊ฒ์ถ
176 - ์๊ทผ ๋ณด์
177
1783. ๋ฌธ์ ๋ถํ (Character Segmentation)
179 - ์ด์งํ
180 - ์ปจํฌ์ด๋ก ๊ฐ ๋ฌธ์ ๋ถ๋ฆฌ
181 - ์ฐ๊ฒฐ ์์ ๋ถ์
182
1834. ๋ฌธ์ ์ธ์ (OCR)
184 - Tesseract OCR
185 - DNN ๊ธฐ๋ฐ ์ธ์
186 - ํ
ํ๋ฆฟ ๋งค์นญ
187
1885. ํ์ฒ๋ฆฌ
189 - ํ์ ๊ฒ์ฆ
190 - ๋
ธ์ด์ฆ ์ ๊ฑฐ
191""")
192
193 code = '''
194# ๋ฒํธํ ์ธ์ ์ฝ๋ ์์
195import cv2
196import pytesseract
197
198# 1. ๋ฒํธํ ๊ฒ์ถ
199plate_cascade = cv2.CascadeClassifier('haarcascade_plate.xml')
200plates = plate_cascade.detectMultiScale(gray, 1.1, 5)
201
202for (x, y, w, h) in plates:
203 # 2. ๋ฒํธํ ์์ญ ์ถ์ถ
204 plate_img = gray[y:y+h, x:x+w]
205
206 # 3. ์ ์ฒ๋ฆฌ
207 plate_img = cv2.resize(plate_img, None, fx=2, fy=2)
208 _, thresh = cv2.threshold(plate_img, 0, 255,
209 cv2.THRESH_BINARY + cv2.THRESH_OTSU)
210
211 # 4. OCR
212 text = pytesseract.image_to_string(thresh, config='--psm 7')
213 print(f"๋ฒํธํ: {text.strip()}")
214'''
215 print(code)
216
217 print("\nํ์ ๋ผ์ด๋ธ๋ฌ๋ฆฌ:")
218 print(" - pytesseract: pip install pytesseract")
219 print(" - Tesseract-OCR: ์์คํ
์ค์น ํ์")
220
221
222# ============================================================
223# ํ๋ก์ ํธ 3: ์ค์๊ฐ ๊ฐ์ฒด ์ถ์
224# ============================================================
225
226def object_tracking_project():
227 """์ค์๊ฐ ๊ฐ์ฒด ์ถ์ ํ๋ก์ ํธ"""
228 print("\n" + "=" * 60)
229 print("ํ๋ก์ ํธ 3: ์ค์๊ฐ ๊ฐ์ฒด ์ถ์ ")
230 print("=" * 60)
231
232 # ์๋ฎฌ๋ ์ด์
ํ๋ ์ ์ํ์ค ์์ฑ
233 frames = []
234 for i in range(30):
235 frame = np.zeros((480, 640, 3), dtype=np.uint8)
236 frame[:] = [50, 50, 50]
237
238 # ์์ง์ด๋ ๊ฐ์ฒด
239 x = 100 + i * 15
240 y = 240 + int(50 * np.sin(i * 0.3))
241 cv2.circle(frame, (x, y), 40, (0, 200, 0), -1)
242
243 # ๊ณ ์ ๊ฐ์ฒด
244 cv2.rectangle(frame, (400, 100), (500, 200), (200, 0, 0), -1)
245
246 frames.append(frame)
247
248 # ์ฒซ ํ๋ ์์์ ์ถ์ ๋์ ์ ํ
249 first_frame = frames[0].copy()
250 bbox = (60, 200, 80, 80) # x, y, w, h
251 cv2.rectangle(first_frame, (bbox[0], bbox[1]),
252 (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 2)
253 cv2.imwrite('tracking_init.jpg', first_frame)
254
255 # ์ถ์ ์๋ฎฌ๋ ์ด์
256 print("\n์ถ์ ์๋ฎฌ๋ ์ด์
(KCF Tracker ๊ฐ๋
)")
257
258 # ์ถ์ ๊ฒฐ๊ณผ ์๊ฐํ
259 result_frame = frames[15].copy()
260 new_x = 100 + 15 * 15
261 new_y = 240 + int(50 * np.sin(15 * 0.3))
262 cv2.rectangle(result_frame, (new_x-40, new_y-40),
263 (new_x+40, new_y+40), (0, 255, 0), 2)
264 cv2.putText(result_frame, 'Tracking', (new_x-30, new_y-50),
265 cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
266 cv2.imwrite('tracking_result.jpg', result_frame)
267
268 print("\n์์ฑ๋ ์ถ์ ์ฝ๋:")
269 code = '''
270import cv2
271
272# ๋น๋์ค ์บก์ฒ
273cap = cv2.VideoCapture(0) # ๋๋ 'video.mp4'
274
275# ์ฒซ ํ๋ ์ ์ฝ๊ธฐ
276ret, frame = cap.read()
277
278# ROI ์ ํ (๋ง์ฐ์ค๋ก ๋๋๊ทธ)
279bbox = cv2.selectROI("Select Object", frame, fromCenter=False)
280cv2.destroyAllWindows()
281
282# ์ถ์ ๊ธฐ ์์ฑ (์ฌ๋ฌ ์ต์
)
283# tracker = cv2.TrackerBoosting_create()
284# tracker = cv2.TrackerMIL_create()
285tracker = cv2.TrackerKCF_create()
286# tracker = cv2.TrackerCSRT_create() # ๋ ์ ํ
287
288# ์ด๊ธฐํ
289tracker.init(frame, bbox)
290
291while True:
292 ret, frame = cap.read()
293 if not ret:
294 break
295
296 # ์ถ์ ์
๋ฐ์ดํธ
297 success, bbox = tracker.update(frame)
298
299 if success:
300 x, y, w, h = [int(v) for v in bbox]
301 cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
302 cv2.putText(frame, "Tracking", (x, y-10),
303 cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
304 else:
305 cv2.putText(frame, "Lost", (50, 50),
306 cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
307
308 cv2.imshow('Tracking', frame)
309 if cv2.waitKey(1) & 0xFF == ord('q'):
310 break
311
312cap.release()
313cv2.destroyAllWindows()
314'''
315 print(code)
316
317 print("\n์ถ์ ๊ธฐ ๋น๊ต:")
318 trackers = [
319 ('KCF', '๋น ๋ฆ, ์ผ๋ฐ์ ์ฑ๋ฅ'),
320 ('CSRT', '์ ํ, ๋ค์ ๋๋ฆผ'),
321 ('MOSSE', '๋งค์ฐ ๋น ๋ฆ, ๋ฎ์ ์ ํ๋'),
322 ('MedianFlow', '์์ธก ๊ฐ๋ฅํ ์์ง์'),
323 ]
324 for name, desc in trackers:
325 print(f" {name}: {desc}")
326
327
328# ============================================================
329# ํ๋ก์ ํธ 4: ์ด๋ฏธ์ง ํ๋
ธ๋ผ๋ง
330# ============================================================
331
332def panorama_stitching():
333 """์ด๋ฏธ์ง ํ๋
ธ๋ผ๋ง ํ๋ก์ ํธ"""
334 print("\n" + "=" * 60)
335 print("ํ๋ก์ ํธ 4: ์ด๋ฏธ์ง ํ๋
ธ๋ผ๋ง")
336 print("=" * 60)
337
338 # ์๋ฎฌ๋ ์ด์
์ฉ ๊ฒน์น๋ ์ด๋ฏธ์ง ์์ฑ
339 # ๋ฐฐ๊ฒฝ
340 full_scene = np.zeros((300, 800, 3), dtype=np.uint8)
341 full_scene[:] = [200, 200, 200]
342
343 # ์ฅ๋ฉด์ ๊ฐ์ฒด ๋ฐฐ์น
344 cv2.circle(full_scene, (100, 150), 50, (0, 0, 150), -1)
345 cv2.rectangle(full_scene, (250, 100), (350, 200), (0, 150, 0), -1)
346 cv2.circle(full_scene, (500, 150), 60, (150, 0, 0), -1)
347 cv2.rectangle(full_scene, (650, 80), (750, 220), (150, 150, 0), -1)
348
349 # ๊ฒน์น๋ ๋ถ๋ถ์ด ์๋ ๋ ์ด๋ฏธ์ง
350 img1 = full_scene[:, :450].copy()
351 img2 = full_scene[:, 300:].copy()
352
353 cv2.imwrite('panorama_img1.jpg', img1)
354 cv2.imwrite('panorama_img2.jpg', img2)
355
356 print("์คํฐ์นญํ ์ด๋ฏธ์ง ์์ฑ ์๋ฃ")
357
358 # ํน์ง์ ๊ฒ์ถ ๋ฐ ๋งค์นญ
359 gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
360 gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
361
362 # ORB ํน์ง์
363 orb = cv2.ORB_create(nfeatures=500)
364 kp1, des1 = orb.detectAndCompute(gray1, None)
365 kp2, des2 = orb.detectAndCompute(gray2, None)
366
367 if des1 is not None and des2 is not None:
368 # ๋งค์นญ
369 bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
370 matches = bf.knnMatch(des1, des2, k=2)
371
372 # Ratio test
373 good = []
374 for m, n in matches:
375 if m.distance < 0.75 * n.distance:
376 good.append(m)
377
378 print(f"์ข์ ๋งค์นญ: {len(good)}")
379
380 if len(good) >= 4:
381 # ํธ๋ชจ๊ทธ๋ํผ ๊ณ์ฐ
382 src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
383 dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
384
385 H, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
386
387 if H is not None:
388 # ํ๋
ธ๋ผ๋ง ์์ฑ
389 h1, w1 = img1.shape[:2]
390 h2, w2 = img2.shape[:2]
391
392 # ๊ฒฐ๊ณผ ์ด๋ฏธ์ง ํฌ๊ธฐ ๊ณ์ฐ
393 corners = np.float32([[0, 0], [w2, 0], [w2, h2], [0, h2]]).reshape(-1, 1, 2)
394 transformed = cv2.perspectiveTransform(corners, H)
395
396 all_corners = np.concatenate([
397 np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]).reshape(-1, 1, 2),
398 transformed
399 ])
400
401 x_min, y_min = np.int32(all_corners.min(axis=0).ravel())
402 x_max, y_max = np.int32(all_corners.max(axis=0).ravel())
403
404 translation = np.array([
405 [1, 0, -x_min],
406 [0, 1, -y_min],
407 [0, 0, 1]
408 ])
409
410 # ์ํ ๋ฐ ํฉ์ฑ
411 result_width = x_max - x_min
412 result_height = y_max - y_min
413
414 result = cv2.warpPerspective(img2, translation @ H,
415 (result_width, result_height))
416 result[-y_min:-y_min+h1, -x_min:-x_min+w1] = img1
417
418 cv2.imwrite('panorama_result.jpg', result)
419 print("ํ๋
ธ๋ผ๋ง ์์ฑ ์๋ฃ: panorama_result.jpg")
420
421 print("\nํ๋
ธ๋ผ๋ง ์์ฑ ํ์ดํ๋ผ์ธ:")
422 print(" 1. ํน์ง์ ๊ฒ์ถ (SIFT/ORB)")
423 print(" 2. ํน์ง์ ๋งค์นญ")
424 print(" 3. ํธ๋ชจ๊ทธ๋ํผ ๊ณ์ฐ")
425 print(" 4. ์ด๋ฏธ์ง ์ํ")
426 print(" 5. ๋ธ๋ ๋ฉ (๊ฒฝ๊ณ ๋ถ๋๋ฝ๊ฒ)")
427
428 # OpenCV Stitcher ํด๋์ค ์ฌ์ฉ
429 print("\nOpenCV Stitcher ์ฌ์ฉ:")
430 code = '''
431# ๊ฐ๋จํ ๋ฐฉ๋ฒ: cv2.Stitcher
432stitcher = cv2.Stitcher_create(cv2.Stitcher_PANORAMA)
433status, panorama = stitcher.stitch([img1, img2, img3])
434
435if status == cv2.Stitcher_OK:
436 cv2.imwrite('panorama.jpg', panorama)
437else:
438 print(f"์คํฐ์นญ ์คํจ: {status}")
439'''
440 print(code)
441
442
443# ============================================================
444# ํ๋ก์ ํธ 5: ์ฆ๊ฐ ํ์ค ๋ง์ปค
445# ============================================================
446
447def ar_marker_project():
448 """์ฆ๊ฐ ํ์ค ๋ง์ปค ํ๋ก์ ํธ (๊ฐ๋
)"""
449 print("\n" + "=" * 60)
450 print("ํ๋ก์ ํธ 5: AR ๋ง์ปค ๊ธฐ๋ฐ ์ฆ๊ฐ ํ์ค")
451 print("=" * 60)
452
453 # ArUco ๋ง์ปค ์์ฑ (์๋ฎฌ๋ ์ด์
)
454 marker_size = 200
455
456 # ์๋ฎฌ๋ ์ด์
์ฉ ๋ง์ปค ์ด๋ฏธ์ง
457 marker = np.zeros((marker_size, marker_size), dtype=np.uint8)
458 marker[:] = 255
459
460 # ๊ฐ๋จํ ํจํด (์ค์ ArUco ๋ง์ปค๋ ๋ ๋ณต์ก)
461 cv2.rectangle(marker, (10, 10), (190, 190), 0, 10)
462 cv2.rectangle(marker, (40, 40), (80, 80), 0, -1)
463 cv2.rectangle(marker, (120, 40), (160, 80), 0, -1)
464 cv2.rectangle(marker, (40, 120), (80, 160), 0, -1)
465 cv2.rectangle(marker, (120, 120), (160, 160), 0, -1)
466 cv2.rectangle(marker, (80, 80), (120, 120), 0, -1)
467
468 cv2.imwrite('ar_marker.jpg', marker)
469
470 print("\nArUco ๋ง์ปค:")
471 print(" - OpenCV์ ๋ด์ฅ๋ ๋ง์ปค ์์คํ
")
472 print(" - ์๋ ๊ฒ์ถ ๋ฐ ID ์ธ์")
473 print(" - 4๊ฐ ์ฝ๋๋ก ํฌ์ฆ ์ถ์ ")
474
475 code = '''
476# ArUco ๋ง์ปค ์์ฑ
477import cv2
478
479# ๋์
๋๋ฆฌ ์ ํ
480aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
481
482# ๋ง์ปค ์์ฑ (ID=42, ํฌ๊ธฐ=200x200)
483marker = cv2.aruco.generateImageMarker(aruco_dict, 42, 200)
484cv2.imwrite('marker_42.png', marker)
485
486# ๋ง์ปค ๊ฒ์ถ
487detector = cv2.aruco.ArucoDetector(aruco_dict)
488corners, ids, rejected = detector.detectMarkers(gray)
489
490# ๋ง์ปค ๊ทธ๋ฆฌ๊ธฐ
491cv2.aruco.drawDetectedMarkers(image, corners, ids)
492
493# ํฌ์ฆ ์ถ์ (์นด๋ฉ๋ผ ์บ๋ฆฌ๋ธ๋ ์ด์
ํ์)
494rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
495 corners, marker_length, camera_matrix, dist_coeffs
496)
497
498# ์ขํ์ถ ๊ทธ๋ฆฌ๊ธฐ
499for rvec, tvec in zip(rvecs, tvecs):
500 cv2.drawFrameAxes(image, camera_matrix, dist_coeffs, rvec, tvec, 0.1)
501'''
502 print(code)
503
504 print("\nAR ์์ฉ:")
505 print(" - 3D ๊ฐ์ฒด ์ค๋ฒ๋ ์ด")
506 print(" - ๊ฐ์ ๊ฐ๊ตฌ ๋ฐฐ์น")
507 print(" - ๊ฒ์/๊ต์ก")
508
509
510# ============================================================
511# ํ๋ก์ ํธ ๊ตฌ์กฐ ๊ฐ์ด๋
512# ============================================================
513
514def project_structure_guide():
515 """ํ๋ก์ ํธ ๊ตฌ์กฐ ๊ฐ์ด๋"""
516 print("\n" + "=" * 60)
517 print("์ปดํจํฐ ๋น์ ํ๋ก์ ํธ ๊ตฌ์กฐ ๊ฐ์ด๋")
518 print("=" * 60)
519
520 print("""
521๊ถ์ฅ ํ๋ก์ ํธ ๊ตฌ์กฐ:
522
523project/
524โโโ main.py # ๋ฉ์ธ ์คํ ํ์ผ
525โโโ config.py # ์ค์ (๊ฒฝ๋ก, ํ๋ผ๋ฏธํฐ)
526โโโ requirements.txt # ์์กด์ฑ
527โโโ README.md
528โ
529โโโ src/
530โ โโโ __init__.py
531โ โโโ detection.py # ๊ฐ์ฒด ๊ฒ์ถ
532โ โโโ preprocessing.py # ์ ์ฒ๋ฆฌ
533โ โโโ tracking.py # ์ถ์
534โ โโโ utils.py # ์ ํธ๋ฆฌํฐ
535โ
536โโโ models/ # ํ์ต๋ ๋ชจ๋ธ ํ์ผ
537โ โโโ yolov3.weights
538โ โโโ yolov3.cfg
539โ โโโ ...
540โ
541โโโ data/ # ์
๋ ฅ ๋ฐ์ดํฐ
542โ โโโ images/
543โ โโโ videos/
544โ
545โโโ output/ # ๊ฒฐ๊ณผ ์ ์ฅ
546โ โโโ results/
547โ โโโ logs/
548โ
549โโโ tests/ # ํ
์คํธ ์ฝ๋
550 โโโ test_detection.py
551""")
552
553 print("\n๊ฐ๋ฐ ํ:")
554 print(" 1. ๋ชจ๋ํ: ๊ธฐ๋ฅ๋ณ๋ก ๋ถ๋ฆฌ")
555 print(" 2. ์ค์ ํ์ผ: ํ๋์ฝ๋ฉ ํผํ๊ธฐ")
556 print(" 3. ๋ก๊น
: ๋๋ฒ๊น
์ฉ์ด")
557 print(" 4. ํ
์คํธ: ๋จ์ ํ
์คํธ ์์ฑ")
558 print(" 5. ๋ฌธ์ํ: ํจ์/ํด๋์ค docstring")
559
560
561def main():
562 """๋ฉ์ธ ํจ์"""
563 # ํ๋ก์ ํธ 1: ๋ฌธ์ ์ค์บ๋
564 document_scanner()
565
566 # ํ๋ก์ ํธ 2: ๋ฒํธํ ์ธ์
567 license_plate_recognition()
568
569 # ํ๋ก์ ํธ 3: ๊ฐ์ฒด ์ถ์
570 object_tracking_project()
571
572 # ํ๋ก์ ํธ 4: ํ๋
ธ๋ผ๋ง
573 panorama_stitching()
574
575 # ํ๋ก์ ํธ 5: AR ๋ง์ปค
576 ar_marker_project()
577
578 # ํ๋ก์ ํธ ๊ตฌ์กฐ ๊ฐ์ด๋
579 project_structure_guide()
580
581 print("\n" + "=" * 60)
582 print("์ค์ ํ๋ก์ ํธ ๋ฐ๋ชจ ์๋ฃ!")
583 print("=" * 60)
584
585
586if __name__ == '__main__':
587 main()