๋ชจ๋ธ ํ‰๊ฐ€ (Model Evaluation)

๋ชจ๋ธ ํ‰๊ฐ€ (Model Evaluation)

๊ฐœ์š”

๋ชจ๋ธ ํ‰๊ฐ€๋Š” ํ•™์Šต๋œ ๋ชจ๋ธ์˜ ์„ฑ๋Šฅ์„ ๊ฐ๊ด€์ ์œผ๋กœ ์ธก์ •ํ•˜๋Š” ๊ณผ์ •์ž…๋‹ˆ๋‹ค. ๋ถ„๋ฅ˜์™€ ํšŒ๊ท€ ๋ฌธ์ œ์— ๋”ฐ๋ผ ๋‹ค๋ฅธ ํ‰๊ฐ€ ์ง€ํ‘œ๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.


1. ๋ถ„๋ฅ˜ ํ‰๊ฐ€ ์ง€ํ‘œ

1.1 ํ˜ผ๋™ ํ–‰๋ ฌ (Confusion Matrix)

import numpy as np
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt

# ์˜ˆ์‹œ ๋ฐ์ดํ„ฐ
y_true = [1, 0, 1, 1, 0, 1, 0, 0, 1, 1]
y_pred = [1, 0, 1, 0, 0, 1, 1, 0, 1, 1]

# ํ˜ผ๋™ ํ–‰๋ ฌ
cm = confusion_matrix(y_true, y_pred)
print("ํ˜ผ๋™ ํ–‰๋ ฌ:")
print(cm)

# ์‹œ๊ฐํ™”
fig, ax = plt.subplots(figsize=(6, 5))
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Negative', 'Positive'])
disp.plot(ax=ax, cmap='Blues')
plt.title('Confusion Matrix')
plt.show()

# ํ˜ผ๋™ ํ–‰๋ ฌ ์š”์†Œ
tn, fp, fn, tp = cm.ravel()
print(f"\nTN (True Negative): {tn}")
print(f"FP (False Positive): {fp} - Type I Error")
print(f"FN (False Negative): {fn} - Type II Error")
print(f"TP (True Positive): {tp}")

1.2 ์ •ํ™•๋„ (Accuracy)

from sklearn.metrics import accuracy_score

# Accuracy = (TP + TN) / (TP + TN + FP + FN)
accuracy = accuracy_score(y_true, y_pred)
print(f"์ •ํ™•๋„: {accuracy:.4f}")

# ์ˆ˜๋™ ๊ณ„์‚ฐ
accuracy_manual = (tp + tn) / (tp + tn + fp + fn)
print(f"์ •ํ™•๋„ (์ˆ˜๋™): {accuracy_manual:.4f}")

# ์ฃผ์˜: ๋ถˆ๊ท ํ˜• ๋ฐ์ดํ„ฐ์—์„œ๋Š” ์ •ํ™•๋„๋งŒ์œผ๋กœ ํ‰๊ฐ€ ๋ถ€์ ์ ˆ
# ์˜ˆ: 99% negative โ†’ ๋ชจ๋‘ negative ์˜ˆ์ธกํ•ด๋„ 99% ์ •ํ™•๋„

1.3 ์ •๋ฐ€๋„, ์žฌํ˜„์œจ, F1-score

from sklearn.metrics import precision_score, recall_score, f1_score

# Precision = TP / (TP + FP)
# "์–‘์„ฑ์œผ๋กœ ์˜ˆ์ธกํ•œ ๊ฒƒ ์ค‘ ์‹ค์ œ ์–‘์„ฑ์˜ ๋น„์œจ"
precision = precision_score(y_true, y_pred)
print(f"์ •๋ฐ€๋„ (Precision): {precision:.4f}")

# Recall (Sensitivity) = TP / (TP + FN)
# "์‹ค์ œ ์–‘์„ฑ ์ค‘ ์–‘์„ฑ์œผ๋กœ ์˜ˆ์ธกํ•œ ๋น„์œจ"
recall = recall_score(y_true, y_pred)
print(f"์žฌํ˜„์œจ (Recall): {recall:.4f}")

# F1-Score = 2 * (Precision * Recall) / (Precision + Recall)
# ์ •๋ฐ€๋„์™€ ์žฌํ˜„์œจ์˜ ์กฐํ™” ํ‰๊ท 
f1 = f1_score(y_true, y_pred)
print(f"F1-Score: {f1:.4f}")

# ์ˆ˜๋™ ๊ณ„์‚ฐ
precision_manual = tp / (tp + fp)
recall_manual = tp / (tp + fn)
f1_manual = 2 * precision_manual * recall_manual / (precision_manual + recall_manual)
print(f"\n์ˆ˜๋™ ๊ณ„์‚ฐ:")
print(f"Precision: {precision_manual:.4f}")
print(f"Recall: {recall_manual:.4f}")
print(f"F1: {f1_manual:.4f}")

1.4 ๋ถ„๋ฅ˜ ๋ฆฌํฌํŠธ

from sklearn.metrics import classification_report

y_true = [0, 0, 0, 1, 1, 1, 2, 2, 2]
y_pred = [0, 0, 1, 1, 1, 2, 2, 2, 0]

report = classification_report(y_true, y_pred, target_names=['Class A', 'Class B', 'Class C'])
print("๋ถ„๋ฅ˜ ๋ฆฌํฌํŠธ:")
print(report)

# ๋”•์…”๋„ˆ๋ฆฌ๋กœ ๋ฐ˜ํ™˜
report_dict = classification_report(y_true, y_pred, output_dict=True)
print(f"\nClass B์˜ F1-score: {report_dict['Class B']['f1-score']:.4f}")

1.5 ROC ๊ณก์„ ๊ณผ AUC

from sklearn.metrics import roc_curve, roc_auc_score, auc
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression

# ๋ฐ์ดํ„ฐ ์ค€๋น„
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
    cancer.data, cancer.target, test_size=0.2, random_state=42
)

# ๋ชจ๋ธ ํ•™์Šต
model = LogisticRegression(max_iter=1000)
model.fit(X_train, y_train)

# ์˜ˆ์ธก ํ™•๋ฅ 
y_proba = model.predict_proba(X_test)[:, 1]

# ROC ๊ณก์„ 
fpr, tpr, thresholds = roc_curve(y_test, y_proba)
roc_auc = auc(fpr, tpr)

# ์‹œ๊ฐํ™”
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, 'b-', linewidth=2, label=f'ROC Curve (AUC = {roc_auc:.4f})')
plt.plot([0, 1], [0, 1], 'r--', linewidth=1, label='Random Classifier')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.title('ROC Curve')
plt.legend(loc='lower right')
plt.grid(True, alpha=0.3)
plt.show()

print(f"AUC Score: {roc_auc:.4f}")
print(f"AUC Score (sklearn): {roc_auc_score(y_test, y_proba):.4f}")

1.6 PR ๊ณก์„  (Precision-Recall)

from sklearn.metrics import precision_recall_curve, average_precision_score

# PR ๊ณก์„ 
precision, recall, thresholds = precision_recall_curve(y_test, y_proba)
ap = average_precision_score(y_test, y_proba)

# ์‹œ๊ฐํ™”
plt.figure(figsize=(8, 6))
plt.plot(recall, precision, 'b-', linewidth=2, label=f'PR Curve (AP = {ap:.4f})')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curve')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()

print(f"Average Precision: {ap:.4f}")

# ROC vs PR
# - ROC: ๋ถˆ๊ท ํ˜• ๋ฐ์ดํ„ฐ์—์„œ๋„ ์•ˆ์ •์ ์ด์ง€๋งŒ, ๊ธ์ • ํด๋ž˜์Šค๊ฐ€ ์ ์œผ๋ฉด FPR์ด ๋‚ฎ์•„ ๋ณด์ผ ์ˆ˜ ์žˆ์Œ
# - PR: ๋ถˆ๊ท ํ˜• ๋ฐ์ดํ„ฐ์—์„œ ๋” ๋ฏผ๊ฐ, ๊ธ์ • ํด๋ž˜์Šค ์˜ˆ์ธก ์„ฑ๋Šฅ์— ์ง‘์ค‘

2. ๋‹ค์ค‘ ๋ถ„๋ฅ˜ ํ‰๊ฐ€

2.1 ๋‹ค์ค‘ ๋ถ„๋ฅ˜ ์ง€ํ‘œ

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score

iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(
    iris.data, iris.target, test_size=0.2, random_state=42
)

model = LogisticRegression(max_iter=1000)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)

# ์ •ํ™•๋„
print(f"์ •ํ™•๋„: {accuracy_score(y_test, y_pred):.4f}")

# F1-score (๋‹ค์–‘ํ•œ ํ‰๊ท  ๋ฐฉ๋ฒ•)
print(f"\nF1-Score (macro): {f1_score(y_test, y_pred, average='macro'):.4f}")
print(f"F1-Score (weighted): {f1_score(y_test, y_pred, average='weighted'):.4f}")
print(f"F1-Score (micro): {f1_score(y_test, y_pred, average='micro'):.4f}")

# macro: ๊ฐ ํด๋ž˜์Šค์˜ F1์„ ๋‹จ์ˆœ ํ‰๊ท 
# weighted: ๊ฐ ํด๋ž˜์Šค์˜ ์ƒ˜ํ”Œ ์ˆ˜๋กœ ๊ฐ€์ค‘ ํ‰๊ท 
# micro: ์ „์ฒด TP, FP, FN์„ ํ•ฉ์‚ฐํ•˜์—ฌ ๊ณ„์‚ฐ

2.2 ๋‹ค์ค‘ ํด๋ž˜์Šค ROC

from sklearn.preprocessing import label_binarize

# ๋ ˆ์ด๋ธ” ์ด์ง„ํ™”
y_test_bin = label_binarize(y_test, classes=[0, 1, 2])
y_proba = model.predict_proba(X_test)

# ๊ฐ ํด๋ž˜์Šค๋ณ„ ROC
plt.figure(figsize=(10, 6))
colors = ['blue', 'red', 'green']

for i, (color, name) in enumerate(zip(colors, iris.target_names)):
    fpr, tpr, _ = roc_curve(y_test_bin[:, i], y_proba[:, i])
    roc_auc = auc(fpr, tpr)
    plt.plot(fpr, tpr, color=color, linewidth=2,
             label=f'{name} (AUC = {roc_auc:.4f})')

plt.plot([0, 1], [0, 1], 'k--', linewidth=1)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Multi-class ROC Curves')
plt.legend(loc='lower right')
plt.grid(True, alpha=0.3)
plt.show()

3. ํšŒ๊ท€ ํ‰๊ฐ€ ์ง€ํ‘œ

import numpy as np
from sklearn.metrics import (
    mean_absolute_error,
    mean_squared_error,
    r2_score,
    mean_absolute_percentage_error
)

# ์˜ˆ์‹œ ๋ฐ์ดํ„ฐ
y_true = np.array([3, -0.5, 2, 7, 4.5])
y_pred = np.array([2.5, 0.0, 2, 8, 4.0])

# MAE (Mean Absolute Error)
mae = mean_absolute_error(y_true, y_pred)
print(f"MAE: {mae:.4f}")
# ํ•ด์„: ํ‰๊ท ์ ์œผ๋กœ ์˜ˆ์ธก์ด ์‹ค์ œ๊ฐ’์—์„œ {mae} ๋งŒํผ ๋ฒ—์–ด๋‚จ

# MSE (Mean Squared Error)
mse = mean_squared_error(y_true, y_pred)
print(f"MSE: {mse:.4f}")
# ํŠน์ง•: ํฐ ์˜ค์ฐจ์— ๋” ํฐ ํŒจ๋„ํ‹ฐ

# RMSE (Root Mean Squared Error)
rmse = np.sqrt(mse)
print(f"RMSE: {rmse:.4f}")
# ํ•ด์„: ํƒ€๊ฒŸ๊ณผ ๊ฐ™์€ ๋‹จ์œ„๋กœ ํ•ด์„ ๊ฐ€๋Šฅ

# Rยฒ (๊ฒฐ์ •๊ณ„์ˆ˜)
r2 = r2_score(y_true, y_pred)
print(f"Rยฒ: {r2:.4f}")
# ํ•ด์„: 0~1, 1์— ๊ฐ€๊นŒ์šธ์ˆ˜๋ก ์ข‹์Œ, ๋ชจ๋ธ์ด ๋ถ„์‚ฐ์˜ ๋ช‡ %๋ฅผ ์„ค๋ช…ํ•˜๋Š”์ง€

# MAPE (Mean Absolute Percentage Error)
mape = mean_absolute_percentage_error(y_true, y_pred)
print(f"MAPE: {mape:.4f}")
# ์ฃผ์˜: y_true๊ฐ€ 0์— ๊ฐ€๊นŒ์šฐ๋ฉด ๋ถˆ์•ˆ์ •

# ์ˆ˜๋™ ๊ณ„์‚ฐ
print("\n=== ์ˆ˜๋™ ๊ณ„์‚ฐ ===")
print(f"MAE: {np.mean(np.abs(y_true - y_pred)):.4f}")
print(f"MSE: {np.mean((y_true - y_pred)**2):.4f}")
print(f"Rยฒ: {1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2):.4f}")

3.1 Rยฒ Score ํ•ด์„

from sklearn.datasets import load_diabetes
from sklearn.linear_model import LinearRegression

diabetes = load_diabetes()
X_train, X_test, y_train, y_test = train_test_split(
    diabetes.data, diabetes.target, test_size=0.2, random_state=42
)

model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)

r2 = r2_score(y_test, y_pred)
print(f"Rยฒ Score: {r2:.4f}")
print(f"ํ•ด์„: ๋ชจ๋ธ์ด ํƒ€๊ฒŸ ๋ถ„์‚ฐ์˜ {r2*100:.1f}%๋ฅผ ์„ค๋ช…ํ•ฉ๋‹ˆ๋‹ค.")

# ์‹œ๊ฐํ™”
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred, alpha=0.7)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--', linewidth=2)
plt.xlabel('์‹ค์ œ๊ฐ’')
plt.ylabel('์˜ˆ์ธก๊ฐ’')
plt.title(f'์‹ค์ œ๊ฐ’ vs ์˜ˆ์ธก๊ฐ’ (Rยฒ = {r2:.4f})')
plt.grid(True, alpha=0.3)
plt.show()

4. ํ‰๊ฐ€ ์ง€ํ‘œ ์„ ํƒ ๊ฐ€์ด๋“œ

"""
๋ถ„๋ฅ˜ ๋ฌธ์ œ:

1. ๊ท ํ˜• ๋ฐ์ดํ„ฐ
   - Accuracy, F1-score

2. ๋ถˆ๊ท ํ˜• ๋ฐ์ดํ„ฐ
   - Precision, Recall, F1-score, PR-AUC
   - ์–‘์„ฑ ํด๋ž˜์Šค๊ฐ€ ์ค‘์š”: Recall ์ค‘์‹œ
   - ์˜คํƒ์ด ๋น„์šฉ: Precision ์ค‘์‹œ

3. ํ™•๋ฅ  ์˜ˆ์ธก ํ’ˆ์งˆ
   - ROC-AUC, PR-AUC, Log Loss

4. ๋‹ค์ค‘ ๋ถ„๋ฅ˜
   - Macro F1: ํด๋ž˜์Šค ๊ท ๋“ฑ ์ค‘์š”
   - Weighted F1: ์ƒ˜ํ”Œ ์ˆ˜ ๋น„๋ก€ ์ค‘์š”
   - Micro F1: ์ „์ฒด ์ •ํ™•๋„์™€ ์œ ์‚ฌ


ํšŒ๊ท€ ๋ฌธ์ œ:

1. ๊ธฐ๋ณธ
   - MSE, RMSE, MAE

2. ์ด์ƒ์น˜ ๋ฏผ๊ฐ
   - MAE (robust), MSE (sensitive)

3. ์ƒ๋Œ€์  ์˜ค์ฐจ
   - MAPE, Rยฒ

4. ๋ชจ๋ธ ๋น„๊ต
   - Rยฒ (0~1 ๋ฒ”์œ„๋กœ ์ •๊ทœํ™”๋จ)
"""

# ํ‰๊ฐ€ ์ง€ํ‘œ ๋น„๊ต ํ•จ์ˆ˜
def evaluate_classification(y_true, y_pred, y_proba=None):
    """๋ถ„๋ฅ˜ ๋ชจ๋ธ ์ข…ํ•ฉ ํ‰๊ฐ€"""
    print("=== ๋ถ„๋ฅ˜ ํ‰๊ฐ€ ๊ฒฐ๊ณผ ===")
    print(f"Accuracy:  {accuracy_score(y_true, y_pred):.4f}")
    print(f"Precision: {precision_score(y_true, y_pred, average='weighted'):.4f}")
    print(f"Recall:    {recall_score(y_true, y_pred, average='weighted'):.4f}")
    print(f"F1-Score:  {f1_score(y_true, y_pred, average='weighted'):.4f}")
    if y_proba is not None:
        print(f"ROC-AUC:   {roc_auc_score(y_true, y_proba):.4f}")

def evaluate_regression(y_true, y_pred):
    """ํšŒ๊ท€ ๋ชจ๋ธ ์ข…ํ•ฉ ํ‰๊ฐ€"""
    print("=== ํšŒ๊ท€ ํ‰๊ฐ€ ๊ฒฐ๊ณผ ===")
    print(f"MAE:  {mean_absolute_error(y_true, y_pred):.4f}")
    print(f"MSE:  {mean_squared_error(y_true, y_pred):.4f}")
    print(f"RMSE: {np.sqrt(mean_squared_error(y_true, y_pred)):.4f}")
    print(f"Rยฒ:   {r2_score(y_true, y_pred):.4f}")

5. ํ•™์Šต ๊ณก์„ ๊ณผ ๊ฒ€์ฆ ๊ณก์„ 

5.1 ํ•™์Šต ๊ณก์„  (Learning Curve)

from sklearn.model_selection import learning_curve

# ๋ฐ์ดํ„ฐ ์ค€๋น„
cancer = load_breast_cancer()
X, y = cancer.data, cancer.target

# ํ•™์Šต ๊ณก์„  ๊ณ„์‚ฐ
train_sizes, train_scores, val_scores = learning_curve(
    LogisticRegression(max_iter=1000),
    X, y,
    train_sizes=np.linspace(0.1, 1.0, 10),
    cv=5,
    scoring='accuracy'
)

# ํ‰๊ท  ๋ฐ ํ‘œ์ค€ํŽธ์ฐจ
train_mean = train_scores.mean(axis=1)
train_std = train_scores.std(axis=1)
val_mean = val_scores.mean(axis=1)
val_std = val_scores.std(axis=1)

# ์‹œ๊ฐํ™”
plt.figure(figsize=(10, 6))
plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1, color='blue')
plt.fill_between(train_sizes, val_mean - val_std, val_mean + val_std, alpha=0.1, color='orange')
plt.plot(train_sizes, train_mean, 'o-', color='blue', label='Training Score')
plt.plot(train_sizes, val_mean, 'o-', color='orange', label='Validation Score')
plt.xlabel('Training Set Size')
plt.ylabel('Accuracy')
plt.title('Learning Curve')
plt.legend(loc='best')
plt.grid(True, alpha=0.3)
plt.show()

# ํ•ด์„:
# - ๋‘ ๊ณก์„ ์ด ๋ชจ๋‘ ๋‚ฎ์Œ โ†’ ๊ณผ์†Œ์ ํ•ฉ
# - ํ›ˆ๋ จ ๊ณก์„  ๋†’๊ณ  ๊ฒ€์ฆ ๊ณก์„  ๋‚ฎ์Œ โ†’ ๊ณผ์ ํ•ฉ
# - ๋‘ ๊ณก์„ ์ด ์ˆ˜๋ ด โ†’ ์ ์ ˆํ•œ ์ ํ•ฉ

5.2 ๊ฒ€์ฆ ๊ณก์„  (Validation Curve)

from sklearn.model_selection import validation_curve

# ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ ๋ฒ”์œ„
param_range = np.logspace(-4, 2, 10)

# ๊ฒ€์ฆ ๊ณก์„  ๊ณ„์‚ฐ
train_scores, val_scores = validation_curve(
    LogisticRegression(max_iter=1000),
    X, y,
    param_name='C',
    param_range=param_range,
    cv=5,
    scoring='accuracy'
)

train_mean = train_scores.mean(axis=1)
train_std = train_scores.std(axis=1)
val_mean = val_scores.mean(axis=1)
val_std = val_scores.std(axis=1)

# ์‹œ๊ฐํ™”
plt.figure(figsize=(10, 6))
plt.semilogx(param_range, train_mean, 'o-', color='blue', label='Training Score')
plt.semilogx(param_range, val_mean, 'o-', color='orange', label='Validation Score')
plt.fill_between(param_range, train_mean - train_std, train_mean + train_std, alpha=0.1, color='blue')
plt.fill_between(param_range, val_mean - val_std, val_mean + val_std, alpha=0.1, color='orange')
plt.xlabel('C (Regularization Parameter)')
plt.ylabel('Accuracy')
plt.title('Validation Curve')
plt.legend(loc='best')
plt.grid(True, alpha=0.3)
plt.show()

์—ฐ์Šต ๋ฌธ์ œ

๋ฌธ์ œ 1: ๋ถ„๋ฅ˜ ํ‰๊ฐ€

ํ˜ผ๋™ ํ–‰๋ ฌ์—์„œ Precision, Recall, F1-score๋ฅผ ๊ณ„์‚ฐํ•˜์„ธ์š”.

# TN=50, FP=10, FN=5, TP=35

# ํ’€์ด
tn, fp, fn, tp = 50, 10, 5, 35
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)

print(f"Precision: {precision:.4f}")
print(f"Recall: {recall:.4f}")
print(f"F1-Score: {f1:.4f}")

๋ฌธ์ œ 2: ํšŒ๊ท€ ํ‰๊ฐ€

์˜ˆ์ธก๊ฐ’๊ณผ ์‹ค์ œ๊ฐ’์œผ๋กœ Rยฒ๋ฅผ ๊ณ„์‚ฐํ•˜์„ธ์š”.

y_true = [100, 150, 200, 250, 300]
y_pred = [110, 140, 210, 240, 290]

# ํ’€์ด
from sklearn.metrics import r2_score
print(f"Rยฒ Score: {r2_score(y_true, y_pred):.4f}")

์š”์•ฝ

์ง€ํ‘œ ๋ถ„๋ฅ˜/ํšŒ๊ท€ ๋ฒ”์œ„ ์„ค๋ช…
Accuracy ๋ถ„๋ฅ˜ 0-1 ์ „์ฒด ์ •๋‹ต ๋น„์œจ
Precision ๋ถ„๋ฅ˜ 0-1 ์–‘์„ฑ ์˜ˆ์ธก ์ค‘ ์‹ค์ œ ์–‘์„ฑ
Recall ๋ถ„๋ฅ˜ 0-1 ์‹ค์ œ ์–‘์„ฑ ์ค‘ ์–‘์„ฑ ์˜ˆ์ธก
F1-Score ๋ถ„๋ฅ˜ 0-1 Precision/Recall ์กฐํ™”ํ‰๊ท 
ROC-AUC ๋ถ„๋ฅ˜ 0-1 ๋ถ„๋ฅ˜๊ธฐ ์ „๋ฐ˜์  ์„ฑ๋Šฅ
MAE ํšŒ๊ท€ 0-โˆž ํ‰๊ท  ์ ˆ๋Œ€ ์˜ค์ฐจ
MSE ํšŒ๊ท€ 0-โˆž ํ‰๊ท  ์ œ๊ณฑ ์˜ค์ฐจ
Rยฒ ํšŒ๊ท€ -โˆž-1 ์„ค๋ช… ๋ถ„์‚ฐ ๋น„์œจ
to navigate between lessons