1#!/usr/bin/env python3
2"""
3ONNX Runtime ๊ธฐ๋ฐ Edge AI ์ถ๋ก
4์ด๋ฏธ์ง ๋ถ๋ฅ ๋ฐ ๊ฐ์ฒด ๊ฒ์ถ ์์
5
6์ฐธ๊ณ : content/ko/IoT_Embedded/09_Edge_AI_ONNX.md
7"""
8
9import numpy as np
10import time
11from typing import Optional, Tuple, List, Dict
12import os
13
14# ONNX Runtime ์ค์น ํ์ธ
15try:
16 import onnxruntime as ort
17 HAS_ONNX = True
18except ImportError:
19 HAS_ONNX = False
20 print("๊ฒฝ๊ณ : onnxruntime์ด ์ค์น๋์ง ์์์ต๋๋ค.")
21 print("์ค์น: pip install onnxruntime")
22
23# OpenCV ์ค์น ํ์ธ
24try:
25 import cv2
26 HAS_CV2 = True
27except ImportError:
28 HAS_CV2 = False
29 print("๊ฒฝ๊ณ : opencv-python์ด ์ค์น๋์ง ์์์ต๋๋ค.")
30 print("์ค์น: pip install opencv-python")
31
32# PIL ์ค์น ํ์ธ
33try:
34 from PIL import Image
35 HAS_PIL = True
36except ImportError:
37 HAS_PIL = False
38 print("๊ฒฝ๊ณ : Pillow๊ฐ ์ค์น๋์ง ์์์ต๋๋ค.")
39 print("์ค์น: pip install Pillow")
40
41
42# === ONNX ๋ชจ๋ธ ๋ํผ ===
43
44class ONNXModel:
45 """ONNX ๋ชจ๋ธ ๊ธฐ๋ณธ ๋ํผ"""
46
47 def __init__(self, model_path: str, providers: Optional[List[str]] = None):
48 if not HAS_ONNX:
49 raise ImportError("onnxruntime์ด ํ์ํฉ๋๋ค")
50
51 if not os.path.exists(model_path):
52 raise FileNotFoundError(f"๋ชจ๋ธ ํ์ผ์ ์ฐพ์ ์ ์์ต๋๋ค: {model_path}")
53
54 if providers is None:
55 # ์ฌ์ฉ ๊ฐ๋ฅํ ํ๋ก๋ฐ์ด๋ ์๋ ์ ํ
56 available = ort.get_available_providers()
57 if 'CUDAExecutionProvider' in available:
58 providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
59 else:
60 providers = ['CPUExecutionProvider']
61
62 # ์ธ์
์ต์
์ค์
63 sess_options = ort.SessionOptions()
64 sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
65 sess_options.intra_op_num_threads = 4 # CPU ์ค๋ ๋ ์
66
67 # ์ธ์
์์ฑ
68 self.session = ort.InferenceSession(
69 model_path,
70 sess_options=sess_options,
71 providers=providers
72 )
73
74 # ์
์ถ๋ ฅ ์ ๋ณด
75 self.input_name = self.session.get_inputs()[0].name
76 self.input_shape = self.session.get_inputs()[0].shape
77 self.input_type = self.session.get_inputs()[0].type
78 self.output_name = self.session.get_outputs()[0].name
79
80 print(f"๋ชจ๋ธ ๋ก๋ ์๋ฃ: {model_path}")
81 print(f" ํ๋ก๋ฐ์ด๋: {self.session.get_providers()}")
82 print(f" ์
๋ ฅ: {self.input_name} {self.input_shape}")
83 print(f" ์ถ๋ ฅ: {self.output_name}")
84
85 def get_input_shape(self) -> list:
86 """์
๋ ฅ ํํ ๋ฐํ"""
87 return self.input_shape
88
89 def predict(self, input_data: np.ndarray) -> np.ndarray:
90 """์ถ๋ก ์ํ"""
91 outputs = self.session.run(
92 [self.output_name],
93 {self.input_name: input_data}
94 )
95 return outputs[0]
96
97 def benchmark(self, num_iterations: int = 100) -> Dict[str, float]:
98 """์ฑ๋ฅ ๋ฒค์น๋งํฌ"""
99 # ๋๋ฏธ ์
๋ ฅ ์์ฑ
100 dummy_shape = [1 if x == 'batch' or x == 'N' or x is None else x
101 for x in self.input_shape]
102 dummy_input = np.random.randn(*dummy_shape).astype(np.float32)
103
104 # ์๋ฐ์
105 for _ in range(10):
106 self.predict(dummy_input)
107
108 # ์ธก์
109 times = []
110 for _ in range(num_iterations):
111 start = time.perf_counter()
112 self.predict(dummy_input)
113 elapsed = (time.perf_counter() - start) * 1000 # ms
114 times.append(elapsed)
115
116 times = np.array(times)
117
118 results = {
119 "mean_ms": float(np.mean(times)),
120 "std_ms": float(np.std(times)),
121 "min_ms": float(np.min(times)),
122 "max_ms": float(np.max(times)),
123 "fps": 1000.0 / np.mean(times)
124 }
125
126 return results
127
128
129# === ์ด๋ฏธ์ง ๋ถ๋ฅ ๋ชจ๋ธ ===
130
131class ImageClassifier(ONNXModel):
132 """ONNX ์ด๋ฏธ์ง ๋ถ๋ฅ ๋ชจ๋ธ"""
133
134 # ImageNet ํด๋์ค (์์ 10๊ฐ๋ง ์์)
135 IMAGENET_CLASSES = [
136 'tench', 'goldfish', 'great_white_shark', 'tiger_shark',
137 'hammerhead', 'electric_ray', 'stingray', 'cock', 'hen', 'ostrich'
138 # ... ์ค์ ๋ก๋ 1000๊ฐ ํด๋์ค
139 ]
140
141 def __init__(self, model_path: str, labels_path: Optional[str] = None):
142 super().__init__(model_path)
143
144 # ๋ ์ด๋ธ ๋ก๋ (์๋ ๊ฒฝ์ฐ)
145 if labels_path and os.path.exists(labels_path):
146 with open(labels_path, 'r') as f:
147 self.labels = [line.strip() for line in f]
148 else:
149 self.labels = self.IMAGENET_CLASSES
150
151 # ์
๋ ฅ ํฌ๊ธฐ ์ถ์ถ
152 self.input_height = self.input_shape[2] if len(self.input_shape) > 2 else 224
153 self.input_width = self.input_shape[3] if len(self.input_shape) > 3 else 224
154
155 def preprocess_image(self, image_path: str) -> np.ndarray:
156 """์ด๋ฏธ์ง ์ ์ฒ๋ฆฌ (PIL ์ฌ์ฉ)"""
157 if not HAS_PIL:
158 raise ImportError("Pillow๊ฐ ํ์ํฉ๋๋ค")
159
160 # ์ด๋ฏธ์ง ๋ก๋
161 image = Image.open(image_path).convert('RGB')
162
163 # ๋ฆฌ์ฌ์ด์ฆ
164 image = image.resize((self.input_width, self.input_height))
165
166 # NumPy ๋ฐฐ์ด๋ก ๋ณํ
167 img_array = np.array(image).astype(np.float32)
168
169 # ์ ๊ทํ (ImageNet ํ์ค)
170 mean = np.array([0.485, 0.456, 0.406]) * 255
171 std = np.array([0.229, 0.224, 0.225]) * 255
172 img_array = (img_array - mean) / std
173
174 # HWC to CHW
175 img_array = img_array.transpose(2, 0, 1)
176
177 # ๋ฐฐ์น ์ฐจ์ ์ถ๊ฐ
178 img_array = np.expand_dims(img_array, axis=0)
179
180 return img_array
181
182 def classify(self, image_path: str, top_k: int = 5) -> List[Tuple[str, float]]:
183 """์ด๋ฏธ์ง ๋ถ๋ฅ"""
184 # ์ ์ฒ๋ฆฌ
185 input_data = self.preprocess_image(image_path)
186
187 # ์ถ๋ก
188 start = time.perf_counter()
189 output = self.predict(input_data)
190 inference_time = (time.perf_counter() - start) * 1000
191
192 # Softmax
193 probs = self._softmax(output[0])
194
195 # Top-K ๊ฒฐ๊ณผ
196 top_indices = np.argsort(probs)[-top_k:][::-1]
197
198 results = []
199 for idx in top_indices:
200 label = self.labels[idx] if idx < len(self.labels) else f"class_{idx}"
201 results.append((label, float(probs[idx])))
202
203 print(f"์ถ๋ก ์๊ฐ: {inference_time:.2f}ms")
204
205 return results
206
207 @staticmethod
208 def _softmax(x: np.ndarray) -> np.ndarray:
209 """Softmax ํจ์"""
210 exp_x = np.exp(x - np.max(x))
211 return exp_x / exp_x.sum()
212
213
214# === ๊ฐ์ฒด ๊ฒ์ถ ๋ชจ๋ธ (YOLO) ===
215
216class YOLODetector:
217 """YOLO ONNX ๊ฐ์ฒด ๊ฒ์ถ๊ธฐ"""
218
219 # COCO ๋ฐ์ดํฐ์
80๊ฐ ํด๋์ค
220 COCO_CLASSES = [
221 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
222 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
223 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
224 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
225 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
226 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
227 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
228 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
229 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
230 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
231 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
232 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
233 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
234 ]
235
236 def __init__(self, model_path: str, conf_threshold: float = 0.5,
237 iou_threshold: float = 0.45):
238 if not HAS_ONNX:
239 raise ImportError("onnxruntime์ด ํ์ํฉ๋๋ค")
240
241 if not os.path.exists(model_path):
242 # ๋ชจ๋ธ์ด ์๋ ๊ฒฝ์ฐ ์๋ฎฌ๋ ์ด์
๋ชจ๋
243 print(f"๊ฒฝ๊ณ : ๋ชจ๋ธ ํ์ผ์ ์ฐพ์ ์ ์์ต๋๋ค: {model_path}")
244 print("์๋ฎฌ๋ ์ด์
๋ชจ๋๋ก ์คํํฉ๋๋ค.")
245 self.simulation_mode = True
246 self.input_height = 640
247 self.input_width = 640
248 return
249
250 self.simulation_mode = False
251
252 # ONNX ์ธ์
์์ฑ
253 sess_options = ort.SessionOptions()
254 sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
255
256 self.session = ort.InferenceSession(
257 model_path,
258 sess_options=sess_options,
259 providers=['CPUExecutionProvider']
260 )
261
262 self.conf_threshold = conf_threshold
263 self.iou_threshold = iou_threshold
264
265 # ์
๋ ฅ ์ ๋ณด
266 input_info = self.session.get_inputs()[0]
267 self.input_name = input_info.name
268 self.input_shape = input_info.shape
269 self.input_height = self.input_shape[2]
270 self.input_width = self.input_shape[3]
271
272 print(f"YOLO ๋ชจ๋ธ ๋ก๋ ์๋ฃ")
273 print(f" ์
๋ ฅ ํฌ๊ธฐ: {self.input_width}x{self.input_height}")
274
275 def preprocess(self, image: np.ndarray) -> Tuple[np.ndarray, Tuple[float, float]]:
276 """์ด๋ฏธ์ง ์ ์ฒ๋ฆฌ"""
277 orig_height, orig_width = image.shape[:2]
278
279 # ๋ฆฌ์ฌ์ด์ฆ
280 resized = cv2.resize(image, (self.input_width, self.input_height))
281
282 # BGR to RGB, HWC to CHW
283 input_data = resized[:, :, ::-1].transpose(2, 0, 1)
284
285 # ์ ๊ทํ (0-1)
286 input_data = input_data.astype(np.float32) / 255.0
287
288 # ๋ฐฐ์น ์ฐจ์ ์ถ๊ฐ
289 input_data = np.expand_dims(input_data, axis=0)
290
291 # ์ค์ผ์ผ ๋น์จ ์ ์ฅ
292 scale = (orig_width / self.input_width, orig_height / self.input_height)
293
294 return input_data, scale
295
296 def detect(self, image: np.ndarray) -> List[Dict]:
297 """๊ฐ์ฒด ๊ฒ์ถ"""
298 if self.simulation_mode:
299 # ์๋ฎฌ๋ ์ด์
: ๋๋ค ๊ฒ์ถ ๊ฒฐ๊ณผ ๋ฐํ
300 print("์๋ฎฌ๋ ์ด์
๋ชจ๋: ๋๋ค ๊ฒ์ถ ๊ฒฐ๊ณผ๋ฅผ ์์ฑํฉ๋๋ค.")
301 return self._simulate_detection(image)
302
303 if not HAS_CV2:
304 raise ImportError("opencv-python์ด ํ์ํฉ๋๋ค")
305
306 # ์ ์ฒ๋ฆฌ
307 input_data, scale = self.preprocess(image)
308
309 # ์ถ๋ก
310 start = time.perf_counter()
311 outputs = self.session.run(None, {self.input_name: input_data})
312 inference_time = (time.perf_counter() - start) * 1000
313
314 # ํ์ฒ๋ฆฌ
315 detections = self.postprocess(outputs[0], scale)
316
317 print(f"์ถ๋ก ์๊ฐ: {inference_time:.2f}ms")
318 print(f"๊ฒ์ถ๋ ๊ฐ์ฒด: {len(detections)}๊ฐ")
319
320 return detections
321
322 def postprocess(self, output: np.ndarray, scale: Tuple[float, float]) -> List[Dict]:
323 """์ถ๋ ฅ ํ์ฒ๋ฆฌ"""
324 if not HAS_CV2:
325 return []
326
327 predictions = output[0]
328
329 boxes = []
330 scores = []
331 class_ids = []
332
333 for pred in predictions:
334 confidence = pred[4]
335
336 if confidence > self.conf_threshold:
337 class_probs = pred[5:]
338 class_id = np.argmax(class_probs)
339 class_score = class_probs[class_id]
340
341 if class_score > self.conf_threshold:
342 # ๋ฐ์ค ์ขํ (center_x, center_y, width, height)
343 cx, cy, w, h = pred[:4]
344
345 # ์๋ณธ ์ค์ผ์ผ๋ก ๋ณํ
346 x1 = int((cx - w / 2) * scale[0])
347 y1 = int((cy - h / 2) * scale[1])
348 x2 = int((cx + w / 2) * scale[0])
349 y2 = int((cy + h / 2) * scale[1])
350
351 boxes.append([x1, y1, x2, y2])
352 scores.append(float(confidence * class_score))
353 class_ids.append(int(class_id))
354
355 # NMS (Non-Maximum Suppression)
356 if boxes:
357 indices = cv2.dnn.NMSBoxes(
358 boxes, scores, self.conf_threshold, self.iou_threshold
359 )
360
361 results = []
362 for i in indices:
363 idx = i[0] if isinstance(i, (list, np.ndarray)) else i
364 results.append({
365 'box': boxes[idx],
366 'score': scores[idx],
367 'class_id': class_ids[idx],
368 'class_name': self.COCO_CLASSES[class_ids[idx]]
369 })
370
371 return results
372
373 return []
374
375 def _simulate_detection(self, image: np.ndarray) -> List[Dict]:
376 """์๋ฎฌ๋ ์ด์
: ๋๋ค ๊ฒ์ถ ๊ฒฐ๊ณผ"""
377 height, width = image.shape[:2]
378
379 num_detections = np.random.randint(1, 5)
380 detections = []
381
382 for _ in range(num_detections):
383 x1 = np.random.randint(0, width // 2)
384 y1 = np.random.randint(0, height // 2)
385 x2 = np.random.randint(x1 + 50, width)
386 y2 = np.random.randint(y1 + 50, height)
387
388 class_id = np.random.randint(0, len(self.COCO_CLASSES))
389
390 detections.append({
391 'box': [x1, y1, x2, y2],
392 'score': np.random.uniform(0.5, 0.95),
393 'class_id': class_id,
394 'class_name': self.COCO_CLASSES[class_id]
395 })
396
397 return detections
398
399 def draw_detections(self, image: np.ndarray, detections: List[Dict]) -> np.ndarray:
400 """๊ฒ์ถ ๊ฒฐ๊ณผ ์๊ฐํ"""
401 if not HAS_CV2:
402 print("๊ฒฝ๊ณ : opencv-python์ด ์์ด ์๊ฐํ๋ฅผ ๊ฑด๋๋๋๋ค.")
403 return image
404
405 result = image.copy()
406
407 for det in detections:
408 x1, y1, x2, y2 = det['box']
409 label = f"{det['class_name']}: {det['score']:.2f}"
410
411 # ๋ฐ์ค ๊ทธ๋ฆฌ๊ธฐ
412 cv2.rectangle(result, (x1, y1), (x2, y2), (0, 255, 0), 2)
413
414 # ๋ผ๋ฒจ ๋ฐฐ๊ฒฝ
415 (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
416 cv2.rectangle(result, (x1, y1 - 20), (x1 + w, y1), (0, 255, 0), -1)
417
418 # ๋ผ๋ฒจ ํ
์คํธ
419 cv2.putText(result, label, (x1, y1 - 5),
420 cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
421
422 return result
423
424
425# === ์ฌ์ฉ ์์ ===
426
427def example_basic_inference():
428 """๊ธฐ๋ณธ ONNX ์ถ๋ก ์์ """
429 print("\n=== ๊ธฐ๋ณธ ONNX ์ถ๋ก ์์ ===")
430
431 if not HAS_ONNX:
432 print("onnxruntime์ด ์ค์น๋์ง ์์ ์์ ๋ฅผ ์คํํ ์ ์์ต๋๋ค.")
433 return
434
435 # ์๋ฎฌ๋ ์ด์
: ๋๋ฏธ ๋ชจ๋ธ ์์ฑ
436 print("์๋ฎฌ๋ ์ด์
๋ชจ๋: ๋๋ฏธ ์
๋ ฅ์ผ๋ก ํ
์คํธํฉ๋๋ค.")
437
438 # ๋๋ฏธ ๋ฐ์ดํฐ
439 batch_size = 1
440 channels = 3
441 height = 224
442 width = 224
443
444 dummy_input = np.random.randn(batch_size, channels, height, width).astype(np.float32)
445
446 print(f"์
๋ ฅ ํํ: {dummy_input.shape}")
447 print(f"์
๋ ฅ ๋ฐ์ดํฐ ๋ฒ์: [{dummy_input.min():.2f}, {dummy_input.max():.2f}]")
448
449
450def example_image_classification():
451 """์ด๋ฏธ์ง ๋ถ๋ฅ ์์ """
452 print("\n=== ์ด๋ฏธ์ง ๋ถ๋ฅ ์์ ===")
453
454 if not HAS_ONNX:
455 print("onnxruntime์ด ์ค์น๋์ง ์์ ์์ ๋ฅผ ์คํํ ์ ์์ต๋๋ค.")
456 return
457
458 # ๋ชจ๋ธ ๊ฒฝ๋ก (์์)
459 model_path = "resnet18.onnx"
460
461 if not os.path.exists(model_path):
462 print(f"๋ชจ๋ธ ํ์ผ์ ์ฐพ์ ์ ์์ต๋๋ค: {model_path}")
463 print("PyTorch์์ ๋ณํ ์์:")
464 print(" import torch")
465 print(" model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=True)")
466 print(" dummy_input = torch.randn(1, 3, 224, 224)")
467 print(" torch.onnx.export(model, dummy_input, 'resnet18.onnx')")
468 return
469
470 # ๋ถ๋ฅ๊ธฐ ์์ฑ
471 classifier = ImageClassifier(model_path)
472
473 # ๋ฒค์น๋งํฌ
474 print("\n์ฑ๋ฅ ๋ฒค์น๋งํฌ:")
475 results = classifier.benchmark(num_iterations=50)
476 print(f" ํ๊ท : {results['mean_ms']:.2f}ms")
477 print(f" FPS: {results['fps']:.1f}")
478
479
480def example_object_detection():
481 """๊ฐ์ฒด ๊ฒ์ถ ์์ """
482 print("\n=== ๊ฐ์ฒด ๊ฒ์ถ ์์ (์๋ฎฌ๋ ์ด์
) ===")
483
484 # ์๋ฎฌ๋ ์ด์
๋ชจ๋๋ก ์คํ
485 detector = YOLODetector("yolov5s.onnx") # ํ์ผ์ด ์์ด๋ ์๋ฎฌ๋ ์ด์
๊ฐ๋ฅ
486
487 # ๋๋ฏธ ์ด๋ฏธ์ง ์์ฑ
488 dummy_image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
489
490 # ๊ฒ์ถ
491 detections = detector.detect(dummy_image)
492
493 # ๊ฒฐ๊ณผ ์ถ๋ ฅ
494 print("\n๊ฒ์ถ ๊ฒฐ๊ณผ:")
495 for i, det in enumerate(detections):
496 print(f" {i+1}. {det['class_name']}: {det['score']:.2f}")
497 print(f" ๋ฐ์ค: {det['box']}")
498
499 # ์๊ฐํ (OpenCV๊ฐ ์๋ ๊ฒฝ์ฐ)
500 if HAS_CV2:
501 result_image = detector.draw_detections(dummy_image, detections)
502 print("\n๊ฒฐ๊ณผ ์ด๋ฏธ์ง ์์ฑ ์๋ฃ")
503
504
505def example_performance_comparison():
506 """์ฑ๋ฅ ๋น๊ต ์์ """
507 print("\n=== ์ฑ๋ฅ ๋น๊ต ์์ ===")
508
509 if not HAS_ONNX:
510 print("onnxruntime์ด ์ค์น๋์ง ์์ ์์ ๋ฅผ ์คํํ ์ ์์ต๋๋ค.")
511 return
512
513 print("๋ฐฐ์น ํฌ๊ธฐ๋ณ ์ฑ๋ฅ ๋น๊ต (์๋ฎฌ๋ ์ด์
)")
514
515 input_shape = (1, 3, 224, 224)
516
517 for batch_size in [1, 4, 8, 16]:
518 data = np.random.randn(batch_size, *input_shape[1:]).astype(np.float32)
519
520 start = time.perf_counter()
521 # ์๋ฎฌ๋ ์ด์
: ๊ฐ๋จํ ์ฐ์ฐ
522 _ = np.mean(data, axis=(2, 3))
523 elapsed = time.perf_counter() - start
524
525 throughput = batch_size / elapsed
526 print(f"๋ฐฐ์น ํฌ๊ธฐ {batch_size:2d}: {throughput:.1f} samples/sec")
527
528
529# === ๋ฉ์ธ ์คํ ===
530
531if __name__ == "__main__":
532 print("=" * 60)
533 print("ONNX Runtime Edge AI ์ถ๋ก ์์ ")
534 print("=" * 60)
535
536 # ONNX Runtime ์ค์น ํ์ธ
537 if HAS_ONNX:
538 print(f"\nONNX Runtime ๋ฒ์ : {ort.__version__}")
539 print(f"์ฌ์ฉ ๊ฐ๋ฅํ ํ๋ก๋ฐ์ด๋: {ort.get_available_providers()}")
540 else:
541 print("\n๊ฒฝ๊ณ : ONNX Runtime์ด ์ค์น๋์ง ์์์ต๋๋ค.")
542 print("์ค์น: pip install onnxruntime")
543
544 # ์์ ์คํ
545 example_basic_inference()
546 example_image_classification()
547 example_object_detection()
548 example_performance_comparison()
549
550 print("\n" + "=" * 60)
551 print("๋ชจ๋ ์์ ์๋ฃ")
552 print("=" * 60)