19_dnn_module.py

Download
python 539 lines 13.6 KB
  1"""
  219. OpenCV DNN ๋ชจ๋“ˆ
  3- ๋”ฅ๋Ÿฌ๋‹ ๋ชจ๋ธ ๋กœ๋“œ
  4- ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜
  5- ๊ฐ์ฒด ๊ฒ€์ถœ (YOLO, SSD)
  6- ์‹œ๋งจํ‹ฑ ์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜
  7"""
  8
  9import cv2
 10import numpy as np
 11
 12
 13def dnn_module_overview():
 14    """DNN ๋ชจ๋“ˆ ๊ฐœ์š”"""
 15    print("=" * 50)
 16    print("OpenCV DNN ๋ชจ๋“ˆ ๊ฐœ์š”")
 17    print("=" * 50)
 18
 19    print("\n1. ์ง€์› ํ”„๋ ˆ์ž„์›Œํฌ:")
 20    frameworks = [
 21        ('Caffe', '.caffemodel, .prototxt'),
 22        ('TensorFlow', '.pb, .pbtxt'),
 23        ('Darknet', '.weights, .cfg'),
 24        ('ONNX', '.onnx'),
 25        ('Torch', '.t7, .net'),
 26    ]
 27
 28    for name, files in frameworks:
 29        print(f"   {name}: {files}")
 30
 31    print("\n2. ๋ชจ๋ธ ๋กœ๋“œ ํ•จ์ˆ˜:")
 32    print("   cv2.dnn.readNet(model, config)")
 33    print("   cv2.dnn.readNetFromCaffe(prototxt, caffemodel)")
 34    print("   cv2.dnn.readNetFromTensorflow(model, config)")
 35    print("   cv2.dnn.readNetFromDarknet(cfg, weights)")
 36    print("   cv2.dnn.readNetFromONNX(onnx)")
 37
 38    print("\n3. ๋ฐฑ์—”๋“œ ๋ฐ ํƒ€๊ฒŸ:")
 39    print("   ๋ฐฑ์—”๋“œ: DNN_BACKEND_OPENCV, DNN_BACKEND_CUDA")
 40    print("   ํƒ€๊ฒŸ: DNN_TARGET_CPU, DNN_TARGET_CUDA")
 41
 42
 43def blob_creation_demo():
 44    """Blob ์ƒ์„ฑ ๋ฐ๋ชจ"""
 45    print("\n" + "=" * 50)
 46    print("Blob ์ƒ์„ฑ")
 47    print("=" * 50)
 48
 49    # ํ…Œ์ŠคํŠธ ์ด๋ฏธ์ง€
 50    img = np.zeros((480, 640, 3), dtype=np.uint8)
 51    img[:] = [150, 150, 150]
 52    cv2.circle(img, (320, 240), 100, (0, 200, 0), -1)
 53
 54    # Blob ์ƒ์„ฑ
 55    # scalefactor: ํ”ฝ์…€ ๊ฐ’ ์Šค์ผ€์ผ๋ง (๋ณดํ†ต 1/255)
 56    # size: ๋„คํŠธ์›Œํฌ ์ž…๋ ฅ ํฌ๊ธฐ
 57    # mean: ํ‰๊ท  ๊ฐ’ ๋นผ๊ธฐ (BGR ์ˆœ์„œ)
 58    # swapRB: BGR -> RGB ๋ณ€ํ™˜
 59    # crop: ํฌ๊ธฐ ์กฐ์ • ์‹œ ํฌ๋กญ ์—ฌ๋ถ€
 60
 61    blob = cv2.dnn.blobFromImage(
 62        img,
 63        scalefactor=1/255.0,
 64        size=(224, 224),
 65        mean=(0, 0, 0),
 66        swapRB=True,
 67        crop=False
 68    )
 69
 70    print(f"์›๋ณธ ์ด๋ฏธ์ง€: {img.shape}")
 71    print(f"Blob shape: {blob.shape}")
 72    print(f"Blob dtype: {blob.dtype}")
 73
 74    print("\nblobFromImage ํŒŒ๋ผ๋ฏธํ„ฐ:")
 75    print("  scalefactor: ๋ณดํ†ต 1/255.0 (0-1 ์ •๊ทœํ™”)")
 76    print("  size: ๋„คํŠธ์›Œํฌ ์ž…๋ ฅ ํฌ๊ธฐ (224x224, 416x416 ๋“ฑ)")
 77    print("  mean: ImageNet ํ‰๊ท  (104.0, 117.0, 123.0)")
 78    print("  swapRB: OpenCV BGR -> ๋ชจ๋ธ RGB")
 79    print("  crop: True๋ฉด ํฌ๋กญ, False๋ฉด ๋ฆฌ์‚ฌ์ด์ฆˆ๋งŒ")
 80
 81    # ์—ฌ๋Ÿฌ ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ
 82    images = [img, img.copy()]
 83    blob_batch = cv2.dnn.blobFromImages(
 84        images,
 85        scalefactor=1/255.0,
 86        size=(224, 224),
 87        mean=(0, 0, 0),
 88        swapRB=True
 89    )
 90    print(f"\nBatch blob shape: {blob_batch.shape}")
 91
 92    cv2.imwrite('dnn_input.jpg', img)
 93
 94
 95def image_classification_demo():
 96    """์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜ ๋ฐ๋ชจ (๊ฐœ๋…)"""
 97    print("\n" + "=" * 50)
 98    print("์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜ (Image Classification)")
 99    print("=" * 50)
100
101    print("\n๋ชจ๋ธ ์˜ˆ์‹œ:")
102    models = [
103        ('ResNet', 'Residual Networks, ๊นŠ์€ ๋„คํŠธ์›Œํฌ'),
104        ('VGG', 'Visual Geometry Group, ๋‹จ์ˆœ ๊ตฌ์กฐ'),
105        ('MobileNet', '๊ฒฝ๋Ÿ‰ํ™”, ๋ชจ๋ฐ”์ผ์šฉ'),
106        ('EfficientNet', 'ํšจ์œจ์  ์Šค์ผ€์ผ๋ง'),
107        ('GoogLeNet', 'Inception ๋ชจ๋“ˆ'),
108    ]
109
110    for name, desc in models:
111        print(f"   {name}: {desc}")
112
113    code = '''
114# ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜ ์ฝ”๋“œ ํ…œํ”Œ๋ฆฟ
115import cv2
116
117# ๋ชจ๋ธ ๋กœ๋“œ (์˜ˆ: MobileNet)
118net = cv2.dnn.readNetFromCaffe(
119    'deploy.prototxt',
120    'mobilenet.caffemodel'
121)
122
123# ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ
124img = cv2.imread('image.jpg')
125blob = cv2.dnn.blobFromImage(
126    img, 1/255.0, (224, 224), (104, 117, 123), swapRB=True
127)
128
129# ์ถ”๋ก 
130net.setInput(blob)
131output = net.forward()
132
133# ๊ฒฐ๊ณผ ํ•ด์„
134class_id = np.argmax(output)
135confidence = output[0][class_id]
136print(f"Class: {class_id}, Confidence: {confidence:.2f}")
137'''
138    print(code)
139
140    print("\n์ฐธ๊ณ : ์‹ค์ œ ์‹คํ–‰์—๋Š” ๋ชจ๋ธ ํŒŒ์ผ์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค.")
141    print("  MobileNet: https://github.com/shicai/MobileNet-Caffe")
142    print("  ONNX Models: https://github.com/onnx/models")
143
144
145def object_detection_yolo_demo():
146    """YOLO ๊ฐ์ฒด ๊ฒ€์ถœ ๋ฐ๋ชจ (๊ฐœ๋…)"""
147    print("\n" + "=" * 50)
148    print("๊ฐ์ฒด ๊ฒ€์ถœ - YOLO")
149    print("=" * 50)
150
151    print("\nYOLO (You Only Look Once):")
152    print("  - ์‹ค์‹œ๊ฐ„ ๊ฐ์ฒด ๊ฒ€์ถœ")
153    print("  - ๋‹จ์ผ ๋„คํŠธ์›Œํฌ๋กœ ๊ฒ€์ถœ + ๋ถ„๋ฅ˜")
154    print("  - ๋ฒ„์ „: YOLOv3, YOLOv4, YOLOv5, YOLOv8")
155
156    code = '''
157# YOLO ๊ฐ์ฒด ๊ฒ€์ถœ ์ฝ”๋“œ
158import cv2
159import numpy as np
160
161# ๋ชจ๋ธ ๋กœ๋“œ (Darknet)
162net = cv2.dnn.readNetFromDarknet('yolov3.cfg', 'yolov3.weights')
163
164# ์ถœ๋ ฅ ๋ ˆ์ด์–ด ์ด๋ฆ„
165layer_names = net.getLayerNames()
166output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
167
168# ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ
169img = cv2.imread('image.jpg')
170blob = cv2.dnn.blobFromImage(
171    img, 1/255.0, (416, 416), (0, 0, 0), swapRB=True, crop=False
172)
173
174# ์ถ”๋ก 
175net.setInput(blob)
176outputs = net.forward(output_layers)
177
178# ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ
179boxes = []
180confidences = []
181class_ids = []
182
183for output in outputs:
184    for detection in output:
185        scores = detection[5:]
186        class_id = np.argmax(scores)
187        confidence = scores[class_id]
188
189        if confidence > 0.5:
190            # ๋ฐ”์šด๋”ฉ ๋ฐ•์Šค ์ขŒํ‘œ
191            center_x = int(detection[0] * img.shape[1])
192            center_y = int(detection[1] * img.shape[0])
193            w = int(detection[2] * img.shape[1])
194            h = int(detection[3] * img.shape[0])
195
196            x = int(center_x - w / 2)
197            y = int(center_y - h / 2)
198
199            boxes.append([x, y, w, h])
200            confidences.append(float(confidence))
201            class_ids.append(class_id)
202
203# NMS (Non-Maximum Suppression)
204indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
205
206# ๊ฒฐ๊ณผ ์‹œ๊ฐํ™”
207for i in indices.flatten():
208    x, y, w, h = boxes[i]
209    cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
210    label = f"{classes[class_ids[i]]}: {confidences[i]:.2f}"
211    cv2.putText(img, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
212'''
213    print(code)
214
215    print("\n๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ:")
216    print("  YOLOv3: https://pjreddie.com/darknet/yolo/")
217    print("  YOLOv4: https://github.com/AlexeyAB/darknet")
218
219
220def object_detection_ssd_demo():
221    """SSD ๊ฐ์ฒด ๊ฒ€์ถœ ๋ฐ๋ชจ (๊ฐœ๋…)"""
222    print("\n" + "=" * 50)
223    print("๊ฐ์ฒด ๊ฒ€์ถœ - SSD")
224    print("=" * 50)
225
226    print("\nSSD (Single Shot Detector):")
227    print("  - ๋‹ค์ค‘ ์Šค์ผ€์ผ ํŠน์ง• ๋งต ์‚ฌ์šฉ")
228    print("  - ๋น ๋ฅธ ์†๋„")
229    print("  - MobileNet + SSD ์กฐํ•ฉ ์ธ๊ธฐ")
230
231    code = '''
232# SSD ๊ฐ์ฒด ๊ฒ€์ถœ ์ฝ”๋“œ
233import cv2
234
235# ๋ชจ๋ธ ๋กœ๋“œ (TensorFlow)
236net = cv2.dnn.readNetFromTensorflow(
237    'frozen_inference_graph.pb',
238    'ssd_mobilenet_v2_coco.pbtxt'
239)
240
241# ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ
242img = cv2.imread('image.jpg')
243blob = cv2.dnn.blobFromImage(
244    img, size=(300, 300), mean=(127.5, 127.5, 127.5),
245    scalefactor=1/127.5, swapRB=True
246)
247
248# ์ถ”๋ก 
249net.setInput(blob)
250detections = net.forward()
251
252# ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ
253for i in range(detections.shape[2]):
254    confidence = detections[0, 0, i, 2]
255
256    if confidence > 0.5:
257        class_id = int(detections[0, 0, i, 1])
258        x1 = int(detections[0, 0, i, 3] * img.shape[1])
259        y1 = int(detections[0, 0, i, 4] * img.shape[0])
260        x2 = int(detections[0, 0, i, 5] * img.shape[1])
261        y2 = int(detections[0, 0, i, 6] * img.shape[0])
262
263        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
264'''
265    print(code)
266
267    print("\n๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ:")
268    print("  TensorFlow Model Zoo:")
269    print("  https://github.com/tensorflow/models/blob/master/research/object_detection/")
270
271
272def face_detection_dnn_demo():
273    """DNN ์–ผ๊ตด ๊ฒ€์ถœ ๋ฐ๋ชจ"""
274    print("\n" + "=" * 50)
275    print("DNN ์–ผ๊ตด ๊ฒ€์ถœ")
276    print("=" * 50)
277
278    print("\nOpenCV DNN ์–ผ๊ตด ๊ฒ€์ถœ๊ธฐ:")
279    print("  - Caffe ๊ธฐ๋ฐ˜ SSD")
280    print("  - 300x300 ์ž…๋ ฅ")
281    print("  - Haar Cascade๋ณด๋‹ค ์ •ํ™•")
282
283    code = '''
284# DNN ์–ผ๊ตด ๊ฒ€์ถœ
285import cv2
286
287# ๋ชจ๋ธ ๋กœ๋“œ
288model_file = "res10_300x300_ssd_iter_140000.caffemodel"
289config_file = "deploy.prototxt"
290net = cv2.dnn.readNetFromCaffe(config_file, model_file)
291
292# ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ
293img = cv2.imread('image.jpg')
294h, w = img.shape[:2]
295blob = cv2.dnn.blobFromImage(
296    img, 1.0, (300, 300), (104.0, 177.0, 123.0)
297)
298
299# ์ถ”๋ก 
300net.setInput(blob)
301detections = net.forward()
302
303# ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ
304for i in range(detections.shape[2]):
305    confidence = detections[0, 0, i, 2]
306
307    if confidence > 0.5:
308        box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
309        x1, y1, x2, y2 = box.astype(int)
310        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
311        label = f"{confidence:.2f}"
312        cv2.putText(img, label, (x1, y1-10),
313                   cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
314'''
315    print(code)
316
317    print("\n๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ:")
318    print("  https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector")
319
320
321def semantic_segmentation_demo():
322    """์‹œ๋งจํ‹ฑ ์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜ ๋ฐ๋ชจ (๊ฐœ๋…)"""
323    print("\n" + "=" * 50)
324    print("์‹œ๋งจํ‹ฑ ์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜")
325    print("=" * 50)
326
327    print("\n์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜ ์œ ํ˜•:")
328    print("  - Semantic: ํ”ฝ์…€ ๋‹จ์œ„ ํด๋ž˜์Šค ๋ถ„๋ฅ˜")
329    print("  - Instance: ๊ฐœ๋ณ„ ๊ฐ์ฒด ๊ตฌ๋ถ„")
330    print("  - Panoptic: Semantic + Instance")
331
332    print("\n์ฃผ์š” ๋ชจ๋ธ:")
333    models = [
334        ('FCN', 'Fully Convolutional Network'),
335        ('U-Net', '์˜๋ฃŒ ์ด๋ฏธ์ง€์šฉ'),
336        ('DeepLab', 'Atrous convolution'),
337        ('SegNet', '์ธ์ฝ”๋”-๋””์ฝ”๋” ๊ตฌ์กฐ'),
338        ('PSPNet', 'Pyramid Pooling'),
339    ]
340
341    for name, desc in models:
342        print(f"   {name}: {desc}")
343
344    code = '''
345# ์‹œ๋งจํ‹ฑ ์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜ ์ฝ”๋“œ
346import cv2
347import numpy as np
348
349# ๋ชจ๋ธ ๋กœ๋“œ (์˜ˆ: ENet)
350net = cv2.dnn.readNet('enet-model.net')
351
352# ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ
353img = cv2.imread('image.jpg')
354blob = cv2.dnn.blobFromImage(
355    img, 1/255.0, (1024, 512), (0, 0, 0), swapRB=True
356)
357
358# ์ถ”๋ก 
359net.setInput(blob)
360output = net.forward()
361
362# ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ (ํด๋ž˜์Šค ๋งต)
363class_map = np.argmax(output[0], axis=0)
364
365# ์ปฌ๋Ÿฌ ๋งต ์ ์šฉ
366colors = np.random.randint(0, 255, (num_classes, 3))
367segmentation = colors[class_map]
368'''
369    print(code)
370
371
372def pose_estimation_dnn_demo():
373    """ํฌ์ฆˆ ์ถ”์ • DNN ๋ฐ๋ชจ (๊ฐœ๋…)"""
374    print("\n" + "=" * 50)
375    print("ํฌ์ฆˆ ์ถ”์ • (Pose Estimation)")
376    print("=" * 50)
377
378    print("\nํฌ์ฆˆ ์ถ”์ • ์œ ํ˜•:")
379    print("  - 2D: ์ด๋ฏธ์ง€์ƒ์˜ ๊ด€์ ˆ ์œ„์น˜")
380    print("  - 3D: 3์ฐจ์› ๊ณต๊ฐ„์˜ ๊ด€์ ˆ ์œ„์น˜")
381
382    print("\n์ฃผ์š” ๋ชจ๋ธ:")
383    models = [
384        ('OpenPose', 'Bottom-up ๋ฐฉ์‹, ๋‹ค์ค‘ ์ธ์›'),
385        ('PoseNet', '๊ฒฝ๋Ÿ‰ํ™”, ์‹ค์‹œ๊ฐ„'),
386        ('HRNet', '๊ณ ํ•ด์ƒ๋„, ์ •ํ™•'),
387        ('MediaPipe', 'Google, ๋ชจ๋ฐ”์ผ ์ตœ์ ํ™”'),
388    ]
389
390    for name, desc in models:
391        print(f"   {name}: {desc}")
392
393    print("\n๊ด€์ ˆ ํฌ์ธํŠธ (COCO ๋ฐ์ดํ„ฐ์…‹):")
394    keypoints = [
395        "0: nose", "1: neck",
396        "2: right_shoulder", "3: right_elbow", "4: right_wrist",
397        "5: left_shoulder", "6: left_elbow", "7: left_wrist",
398        "8: right_hip", "9: right_knee", "10: right_ankle",
399        "11: left_hip", "12: left_knee", "13: left_ankle",
400        "14: right_eye", "15: left_eye",
401        "16: right_ear", "17: left_ear"
402    ]
403    for kp in keypoints:
404        print(f"   {kp}")
405
406
407def dnn_performance_tips():
408    """DNN ์„ฑ๋Šฅ ์ตœ์ ํ™”"""
409    print("\n" + "=" * 50)
410    print("DNN ์„ฑ๋Šฅ ์ตœ์ ํ™”")
411    print("=" * 50)
412
413    print("""
4141. GPU ๊ฐ€์† ์‚ฌ์šฉ
415   net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
416   net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
417
4182. ์ž…๋ ฅ ํฌ๊ธฐ ์กฐ์ •
419   - ์ž‘์€ ์ž…๋ ฅ = ๋น ๋ฅธ ์ถ”๋ก 
420   - ์ •ํ™•๋„์™€ ์†๋„ ํŠธ๋ ˆ์ด๋“œ์˜คํ”„
421
4223. ๋ชจ๋ธ ์ตœ์ ํ™”
423   - INT8 ์–‘์žํ™”
424   - ๋ชจ๋ธ ํ”„๋ฃจ๋‹
425   - ์ง€์‹ ์ฆ๋ฅ˜
426
4274. ๋ฐฐ์น˜ ์ฒ˜๋ฆฌ
428   - ์—ฌ๋Ÿฌ ์ด๋ฏธ์ง€ ๋™์‹œ ์ฒ˜๋ฆฌ
429   - blobFromImages() ์‚ฌ์šฉ
430
4315. ๋น„๋™๊ธฐ ์ถ”๋ก 
432   - net.forwardAsync()
433   - ์ถ”๋ก  ์ค‘ ๋‹ค๋ฅธ ์ž‘์—… ์ˆ˜ํ–‰
434
4356. ๋ชจ๋ธ ์„ ํƒ
436   - ์†๋„ ์ค‘์‹œ: MobileNet, EfficientNet-Lite
437   - ์ •ํ™•๋„ ์ค‘์‹œ: ResNet, EfficientNet
438
4397. ์ถ”๋ก  ์‹œ๊ฐ„ ์ธก์ •
440""")
441
442    # ์‹œ๊ฐ„ ์ธก์ • ์˜ˆ์‹œ
443    print("์ถ”๋ก  ์‹œ๊ฐ„ ์ธก์ •:")
444    code = '''
445import time
446
447# ์›Œ๋ฐ์—…
448for _ in range(10):
449    net.forward()
450
451# ์ธก์ •
452times = []
453for _ in range(100):
454    start = time.time()
455    net.forward()
456    times.append(time.time() - start)
457
458print(f"ํ‰๊ท : {np.mean(times)*1000:.2f}ms")
459print(f"FPS: {1/np.mean(times):.2f}")
460'''
461    print(code)
462
463
464def model_download_guide():
465    """๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ ๊ฐ€์ด๋“œ"""
466    print("\n" + "=" * 50)
467    print("๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ ๊ฐ€์ด๋“œ")
468    print("=" * 50)
469
470    print("""
4711. YOLO
472   - ๊ณต์‹: https://pjreddie.com/darknet/yolo/
473   - v4: https://github.com/AlexeyAB/darknet
474   - v5+: https://github.com/ultralytics/yolov5
475
4762. SSD MobileNet
477   - TensorFlow Model Zoo
478   - https://github.com/tensorflow/models/
479
4803. ์–ผ๊ตด ๊ฒ€์ถœ
481   - OpenCV DNN Face Detector
482   - https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector
483
4844. ํฌ์ฆˆ ์ถ”์ •
485   - OpenPose: https://github.com/CMU-Perceptual-Computing-Lab/openpose
486   - ๊ฒฝ๋Ÿ‰ ๋ฒ„์ „: https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch
487
4885. ์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜
489   - ENet: https://github.com/e-lab/ENet-training
490   - DeepLab: https://github.com/tensorflow/models/tree/master/research/deeplab
491
4926. ONNX Model Zoo
493   - https://github.com/onnx/models
494   - ๋‹ค์–‘ํ•œ ์‚ฌ์ „ ํ•™์Šต ๋ชจ๋ธ
495
4967. OpenVINO Model Zoo
497   - https://github.com/openvinotoolkit/open_model_zoo
498   - Intel ์ตœ์ ํ™” ๋ชจ๋ธ
499""")
500
501
502def main():
503    """๋ฉ”์ธ ํ•จ์ˆ˜"""
504    # DNN ๋ชจ๋“ˆ ๊ฐœ์š”
505    dnn_module_overview()
506
507    # Blob ์ƒ์„ฑ
508    blob_creation_demo()
509
510    # ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜
511    image_classification_demo()
512
513    # YOLO ๊ฐ์ฒด ๊ฒ€์ถœ
514    object_detection_yolo_demo()
515
516    # SSD ๊ฐ์ฒด ๊ฒ€์ถœ
517    object_detection_ssd_demo()
518
519    # DNN ์–ผ๊ตด ๊ฒ€์ถœ
520    face_detection_dnn_demo()
521
522    # ์‹œ๋งจํ‹ฑ ์„ธ๊ทธ๋ฉ˜ํ…Œ์ด์…˜
523    semantic_segmentation_demo()
524
525    # ํฌ์ฆˆ ์ถ”์ •
526    pose_estimation_dnn_demo()
527
528    # ์„ฑ๋Šฅ ์ตœ์ ํ™”
529    dnn_performance_tips()
530
531    # ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ ๊ฐ€์ด๋“œ
532    model_download_guide()
533
534    print("\nDNN ๋ชจ๋“ˆ ๋ฐ๋ชจ ์™„๋ฃŒ!")
535
536
537if __name__ == '__main__':
538    main()