MLflow ๊ณ ๊ธ‰

MLflow ๊ณ ๊ธ‰

1. MLflow Projects

MLflow Projects๋Š” ์žฌํ˜„ ๊ฐ€๋Šฅํ•œ ML ์ฝ”๋“œ ํŒจํ‚ค์ง• ํ˜•์‹์ž…๋‹ˆ๋‹ค.

1.1 ํ”„๋กœ์ ํŠธ ๊ตฌ์กฐ

my_ml_project/
โ”œโ”€โ”€ MLproject              # ํ”„๋กœ์ ํŠธ ์ •์˜ ํŒŒ์ผ
โ”œโ”€โ”€ conda.yaml             # Conda ํ™˜๊ฒฝ ์ •์˜
โ”œโ”€โ”€ requirements.txt       # pip ์˜์กด์„ฑ (์„ ํƒ)
โ”œโ”€โ”€ train.py               # ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ
โ”œโ”€โ”€ evaluate.py            # ํ‰๊ฐ€ ์Šคํฌ๋ฆฝํŠธ
โ””โ”€โ”€ data/
    โ””โ”€โ”€ sample_data.csv

1.2 MLproject ํŒŒ์ผ

# MLproject
name: churn-prediction

# ํ™˜๊ฒฝ ์ •์˜ (3๊ฐ€์ง€ ์˜ต์…˜)
# ์˜ต์…˜ 1: Conda
conda_env: conda.yaml

# ์˜ต์…˜ 2: Docker
# docker_env:
#   image: my-docker-image:latest

# ์˜ต์…˜ 3: System (ํ˜„์žฌ ํ™˜๊ฒฝ ์‚ฌ์šฉ)
# python_env: python_env.yaml

# ์—”ํŠธ๋ฆฌ ํฌ์ธํŠธ
entry_points:
  main:
    parameters:
      data_path: {type: str, default: "data/train.csv"}
      n_estimators: {type: int, default: 100}
      max_depth: {type: int, default: 5}
      learning_rate: {type: float, default: 0.1}
    command: "python train.py --data-path {data_path} --n-estimators {n_estimators} --max-depth {max_depth} --learning-rate {learning_rate}"

  evaluate:
    parameters:
      model_path: {type: str}
      test_data: {type: str}
    command: "python evaluate.py --model-path {model_path} --test-data {test_data}"

  hyperparameter_search:
    parameters:
      n_trials: {type: int, default: 50}
    command: "python hyperparam_search.py --n-trials {n_trials}"

1.3 conda.yaml

# conda.yaml
name: churn-prediction-env
channels:
  - conda-forge
  - defaults
dependencies:
  - python=3.9
  - pip
  - scikit-learn=1.2.0
  - pandas=1.5.0
  - numpy=1.23.0
  - pip:
    - mlflow>=2.0
    - xgboost>=1.7

1.4 ํ”„๋กœ์ ํŠธ ์‹คํ–‰

# ๋กœ์ปฌ ์‹คํ–‰
mlflow run . -P n_estimators=200 -P max_depth=10

# Git์—์„œ ์ง์ ‘ ์‹คํ–‰
mlflow run https://github.com/user/ml-project.git -P data_path=s3://bucket/data.csv

# ํŠน์ • ๋ธŒ๋žœ์น˜/ํƒœ๊ทธ
mlflow run https://github.com/user/ml-project.git --version main

# Docker ํ™˜๊ฒฝ์—์„œ ์‹คํ–‰
mlflow run . --env-manager docker

# ํŠน์ • ์—”ํŠธ๋ฆฌ ํฌ์ธํŠธ ์‹คํ–‰
mlflow run . -e evaluate -P model_path=models/model.pkl -P test_data=data/test.csv

# ์‹คํ—˜ ์ง€์ •
mlflow run . --experiment-name "production-training"

1.5 train.py ์˜ˆ์‹œ

"""
train.py - MLflow Project ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ
"""

import argparse
import mlflow
import mlflow.sklearn
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

def main(args):
    # MLflow ์ž๋™ ๋กœ๊น…
    mlflow.sklearn.autolog()

    with mlflow.start_run():
        # ๋ฐ์ดํ„ฐ ๋กœ๋“œ
        df = pd.read_csv(args.data_path)
        X = df.drop("target", axis=1)
        y = df["target"]

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42
        )

        # ์ถ”๊ฐ€ ํŒŒ๋ผ๋ฏธํ„ฐ ๋กœ๊น…
        mlflow.log_param("data_path", args.data_path)
        mlflow.log_param("train_size", len(X_train))

        # ๋ชจ๋ธ ํ•™์Šต
        model = GradientBoostingClassifier(
            n_estimators=args.n_estimators,
            max_depth=args.max_depth,
            learning_rate=args.learning_rate,
            random_state=42
        )
        model.fit(X_train, y_train)

        # ํ‰๊ฐ€
        y_pred = model.predict(X_test)
        metrics = {
            "accuracy": accuracy_score(y_test, y_pred),
            "precision": precision_score(y_test, y_pred, average='macro'),
            "recall": recall_score(y_test, y_pred, average='macro'),
            "f1": f1_score(y_test, y_pred, average='macro')
        }

        for name, value in metrics.items():
            mlflow.log_metric(name, value)

        print(f"Model trained with accuracy: {metrics['accuracy']:.4f}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--data-path", type=str, required=True)
    parser.add_argument("--n-estimators", type=int, default=100)
    parser.add_argument("--max-depth", type=int, default=5)
    parser.add_argument("--learning-rate", type=float, default=0.1)
    args = parser.parse_args()
    main(args)

2. MLflow Models

2.1 ๋ชจ๋ธ ํ”Œ๋ ˆ์ด๋ฒ„ (Flavors)

"""
MLflow ๋ชจ๋ธ ํ”Œ๋ ˆ์ด๋ฒ„
"""

import mlflow

# ์ง€์›๋˜๋Š” ํ”Œ๋ ˆ์ด๋ฒ„
flavors = {
    "sklearn": "scikit-learn ๋ชจ๋ธ",
    "pytorch": "PyTorch ๋ชจ๋ธ",
    "tensorflow": "TensorFlow/Keras ๋ชจ๋ธ",
    "xgboost": "XGBoost ๋ชจ๋ธ",
    "lightgbm": "LightGBM ๋ชจ๋ธ",
    "catboost": "CatBoost ๋ชจ๋ธ",
    "transformers": "HuggingFace Transformers",
    "langchain": "LangChain ๋ชจ๋ธ",
    "onnx": "ONNX ๋ชจ๋ธ",
    "pyfunc": "Python ํ•จ์ˆ˜ (์ปค์Šคํ…€)"
}

2.2 ๋ชจ๋ธ ์‹œ๊ทธ๋‹ˆ์ฒ˜

"""
๋ชจ๋ธ ์‹œ๊ทธ๋‹ˆ์ฒ˜ ์ •์˜
"""

import mlflow
from mlflow.models.signature import ModelSignature, infer_signature
from mlflow.types.schema import Schema, ColSpec

# ๋ฐฉ๋ฒ• 1: ์ž๋™ ์ถ”๋ก 
signature = infer_signature(X_train, model.predict(X_train))

# ๋ฐฉ๋ฒ• 2: ๋ช…์‹œ์  ์ •์˜
input_schema = Schema([
    ColSpec("double", "feature_1"),
    ColSpec("double", "feature_2"),
    ColSpec("string", "category")
])
output_schema = Schema([ColSpec("long", "prediction")])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)

# ๋ชจ๋ธ ์ €์žฅ ์‹œ ์‹œ๊ทธ๋‹ˆ์ฒ˜ ํฌํ•จ
mlflow.sklearn.log_model(
    model,
    "model",
    signature=signature,
    input_example=X_train[:5]  # ์ž…๋ ฅ ์˜ˆ์‹œ
)

2.3 ์ปค์Šคํ…€ ๋ชจ๋ธ (pyfunc)

"""
์ปค์Šคํ…€ MLflow ๋ชจ๋ธ (pyfunc)
"""

import mlflow
import mlflow.pyfunc
import pandas as pd

class CustomModel(mlflow.pyfunc.PythonModel):
    """์ปค์Šคํ…€ MLflow ๋ชจ๋ธ"""

    def __init__(self, preprocessor, model, threshold=0.5):
        self.preprocessor = preprocessor
        self.model = model
        self.threshold = threshold

    def load_context(self, context):
        """์•„ํ‹ฐํŒฉํŠธ ๋กœ๋“œ"""
        import joblib
        # context.artifacts์—์„œ ์ถ”๊ฐ€ ํŒŒ์ผ ๋กœ๋“œ ๊ฐ€๋Šฅ
        pass

    def predict(self, context, model_input: pd.DataFrame) -> pd.DataFrame:
        """์˜ˆ์ธก ์ˆ˜ํ–‰"""
        # ์ „์ฒ˜๋ฆฌ
        processed = self.preprocessor.transform(model_input)

        # ์˜ˆ์ธก
        probabilities = self.model.predict_proba(processed)[:, 1]

        # ํ›„์ฒ˜๋ฆฌ (์ž„๊ณ„๊ฐ’ ์ ์šฉ)
        predictions = (probabilities >= self.threshold).astype(int)

        return pd.DataFrame({
            "prediction": predictions,
            "probability": probabilities
        })


# ์ปค์Šคํ…€ ๋ชจ๋ธ ์ €์žฅ
custom_model = CustomModel(preprocessor, trained_model, threshold=0.6)

# Conda ํ™˜๊ฒฝ ์ •์˜
conda_env = {
    "channels": ["conda-forge"],
    "dependencies": [
        "python=3.9",
        "pip",
        {"pip": ["mlflow", "scikit-learn", "pandas"]}
    ],
    "name": "custom_model_env"
}

with mlflow.start_run():
    mlflow.pyfunc.log_model(
        artifact_path="model",
        python_model=custom_model,
        conda_env=conda_env,
        artifacts={
            "preprocessor": "artifacts/preprocessor.pkl",
            "config": "artifacts/config.yaml"
        },
        signature=signature,
        input_example=sample_input
    )

2.4 ๋ชจ๋ธ ํฌ๋งท ์ €์žฅ

model/
โ”œโ”€โ”€ MLmodel                    # ๋ชจ๋ธ ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ
โ”œโ”€โ”€ model.pkl                  # ์ง๋ ฌํ™”๋œ ๋ชจ๋ธ
โ”œโ”€โ”€ conda.yaml                 # Conda ํ™˜๊ฒฝ
โ”œโ”€โ”€ python_env.yaml            # Python ํ™˜๊ฒฝ
โ”œโ”€โ”€ requirements.txt           # pip ์˜์กด์„ฑ
โ”œโ”€โ”€ input_example.json         # ์ž…๋ ฅ ์˜ˆ์‹œ
โ””โ”€โ”€ registered_model_meta      # ๋ ˆ์ง€์ŠคํŠธ๋ฆฌ ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ
# MLmodel ํŒŒ์ผ ๋‚ด์šฉ
artifact_path: model
flavors:
  python_function:
    env:
      conda: conda.yaml
      virtualenv: python_env.yaml
    loader_module: mlflow.sklearn
    model_path: model.pkl
    predict_fn: predict
    python_version: 3.9.0
  sklearn:
    code: null
    pickled_model: model.pkl
    serialization_format: cloudpickle
    sklearn_version: 1.2.0
mlflow_version: 2.8.0
model_uuid: a1b2c3d4-e5f6-7890-abcd-ef1234567890
signature:
  inputs: '[{"name": "feature_1", "type": "double"}, ...]'
  outputs: '[{"type": "long"}]'

3. Model Registry

3.1 ๋ชจ๋ธ ๋“ฑ๋ก

"""
Model Registry ์‚ฌ์šฉ๋ฒ•
"""

import mlflow
from mlflow.tracking import MlflowClient

# ๋ฐฉ๋ฒ• 1: log_model ์‹œ ์ง์ ‘ ๋“ฑ๋ก
with mlflow.start_run():
    # ํ•™์Šต...
    mlflow.sklearn.log_model(
        model,
        "model",
        registered_model_name="ChurnPredictionModel"  # ์ž๋™ ๋“ฑ๋ก
    )

# ๋ฐฉ๋ฒ• 2: ๊ธฐ์กด run์—์„œ ๋“ฑ๋ก
result = mlflow.register_model(
    model_uri="runs:/RUN_ID/model",
    name="ChurnPredictionModel"
)
print(f"Version: {result.version}")

# ๋ฐฉ๋ฒ• 3: MlflowClient ์‚ฌ์šฉ
client = MlflowClient()
client.create_registered_model(
    name="ChurnPredictionModel",
    description="Customer churn prediction model",
    tags={"team": "ML", "project": "retention"}
)

# ๋ฒ„์ „ ์ถ”๊ฐ€
client.create_model_version(
    name="ChurnPredictionModel",
    source="runs:/RUN_ID/model",
    run_id="RUN_ID",
    description="Initial version with RF"
)

3.2 ๋ชจ๋ธ ์Šคํ…Œ์ด์ง€ ๊ด€๋ฆฌ

"""
๋ชจ๋ธ ์Šคํ…Œ์ด์ง€ ์ „ํ™˜
"""

from mlflow.tracking import MlflowClient

client = MlflowClient()

# ์Šคํ…Œ์ด์ง€ ์ข…๋ฅ˜: None, Staging, Production, Archived

# Staging์œผ๋กœ ์ „ํ™˜
client.transition_model_version_stage(
    name="ChurnPredictionModel",
    version=1,
    stage="Staging",
    archive_existing_versions=False
)

# Production์œผ๋กœ ์Šน๊ฒฉ
client.transition_model_version_stage(
    name="ChurnPredictionModel",
    version=1,
    stage="Production",
    archive_existing_versions=True  # ๊ธฐ์กด Production ๋ฒ„์ „ ์ž๋™ ์•„์นด์ด๋ธŒ
)

# ๋ชจ๋ธ ๋กœ๋“œ (์Šคํ…Œ์ด์ง€๋ณ„)
staging_model = mlflow.pyfunc.load_model("models:/ChurnPredictionModel/Staging")
prod_model = mlflow.pyfunc.load_model("models:/ChurnPredictionModel/Production")

# ํŠน์ • ๋ฒ„์ „ ๋กœ๋“œ
model_v1 = mlflow.pyfunc.load_model("models:/ChurnPredictionModel/1")

3.3 ๋ชจ๋ธ ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ ๊ด€๋ฆฌ

"""
๋ชจ๋ธ ๋ฒ„์ „ ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ
"""

from mlflow.tracking import MlflowClient

client = MlflowClient()

# ๋ชจ๋ธ ์„ค๋ช… ์—…๋ฐ์ดํŠธ
client.update_registered_model(
    name="ChurnPredictionModel",
    description="Updated description"
)

# ๋ฒ„์ „ ์„ค๋ช… ์—…๋ฐ์ดํŠธ
client.update_model_version(
    name="ChurnPredictionModel",
    version=1,
    description="Improved feature engineering"
)

# ํƒœ๊ทธ ์ถ”๊ฐ€
client.set_registered_model_tag(
    name="ChurnPredictionModel",
    key="task",
    value="binary_classification"
)

client.set_model_version_tag(
    name="ChurnPredictionModel",
    version=1,
    key="validated",
    value="true"
)

# ๋ชจ๋ธ ์ •๋ณด ์กฐํšŒ
model = client.get_registered_model("ChurnPredictionModel")
print(f"Name: {model.name}")
print(f"Description: {model.description}")
print(f"Latest versions: {model.latest_versions}")

# ๋ฒ„์ „ ์ •๋ณด ์กฐํšŒ
version = client.get_model_version("ChurnPredictionModel", 1)
print(f"Version: {version.version}")
print(f"Stage: {version.current_stage}")
print(f"Source: {version.source}")

3.4 ๋ชจ๋ธ ๊ฒ€์ƒ‰

"""
๋“ฑ๋ก๋œ ๋ชจ๋ธ ๊ฒ€์ƒ‰
"""

from mlflow.tracking import MlflowClient

client = MlflowClient()

# ๋ชจ๋“  ๋ชจ๋ธ ์กฐํšŒ
models = client.search_registered_models()
for m in models:
    print(f"Model: {m.name}, Latest: {m.latest_versions}")

# ํ•„ํ„ฐ๋ง ๊ฒ€์ƒ‰
models = client.search_registered_models(
    filter_string="name LIKE '%Churn%'"
)

# ๋ฒ„์ „ ๊ฒ€์ƒ‰
versions = client.search_model_versions(
    filter_string="name='ChurnPredictionModel' and current_stage='Production'"
)
for v in versions:
    print(f"Version {v.version}: {v.current_stage}")

4. MLflow Serving

4.1 ๋กœ์ปฌ ์„œ๋น™

# ๋ชจ๋ธ ์„œ๋น™ (run ID ๊ธฐ๋ฐ˜)
mlflow models serve -m "runs:/RUN_ID/model" -p 5001 --no-conda

# ๋ชจ๋ธ ์„œ๋น™ (Registry ๊ธฐ๋ฐ˜)
mlflow models serve -m "models:/ChurnPredictionModel/Production" -p 5001

# ํ™˜๊ฒฝ ์˜ต์…˜
mlflow models serve -m "models:/MyModel/1" \
    --env-manager local \
    --host 0.0.0.0 \
    --port 5001

4.2 REST API ํ˜ธ์ถœ

"""
MLflow ์„œ๋น™ API ํ˜ธ์ถœ
"""

import requests
import json

# ์—”๋“œํฌ์ธํŠธ
url = "http://localhost:5001/invocations"

# ์ž…๋ ฅ ๋ฐ์ดํ„ฐ (์—ฌ๋Ÿฌ ํ˜•์‹ ์ง€์›)
# ํ˜•์‹ 1: split orientation
data_split = {
    "dataframe_split": {
        "columns": ["feature_1", "feature_2", "feature_3"],
        "data": [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
    }
}

# ํ˜•์‹ 2: records orientation
data_records = {
    "dataframe_records": [
        {"feature_1": 1.0, "feature_2": 2.0, "feature_3": 3.0},
        {"feature_1": 4.0, "feature_2": 5.0, "feature_3": 6.0}
    ]
}

# ํ˜•์‹ 3: instances (TensorFlow Serving ํ˜ธํ™˜)
data_instances = {
    "instances": [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
}

# API ํ˜ธ์ถœ
response = requests.post(
    url,
    headers={"Content-Type": "application/json"},
    data=json.dumps(data_split)
)

print(f"Status: {response.status_code}")
print(f"Predictions: {response.json()}")

4.3 Docker ์ด๋ฏธ์ง€ ๋นŒ๋“œ

# Docker ์ด๋ฏธ์ง€ ์ƒ์„ฑ
mlflow models build-docker \
    -m "models:/ChurnPredictionModel/Production" \
    -n "churn-model:latest"

# ์ด๋ฏธ์ง€ ์‹คํ–‰
docker run -p 5001:8080 churn-model:latest

# Dockerfile ์ง์ ‘ ์ƒ์„ฑ
mlflow models generate-dockerfile \
    -m "models:/ChurnPredictionModel/Production" \
    -d ./docker-build

4.4 ๋ฐฐ์น˜ ์ถ”๋ก 

"""
๋ฐฐ์น˜ ์ถ”๋ก  ์ˆ˜ํ–‰
"""

import mlflow
import pandas as pd

# ๋ชจ๋ธ ๋กœ๋“œ
model = mlflow.pyfunc.load_model("models:/ChurnPredictionModel/Production")

# ๋Œ€๋Ÿ‰ ๋ฐ์ดํ„ฐ ๋กœ๋“œ
batch_data = pd.read_parquet("s3://bucket/batch_data.parquet")

# ๋ฐฐ์น˜ ์˜ˆ์ธก
predictions = model.predict(batch_data)

# ๊ฒฐ๊ณผ ์ €์žฅ
results = batch_data.copy()
results["prediction"] = predictions
results.to_parquet("s3://bucket/predictions.parquet")

5. ๊ณ ๊ธ‰ ์„ค์ •

5.1 ์›๊ฒฉ Tracking Server

# PostgreSQL ๋ฐฑ์—”๋“œ + S3 ์•„ํ‹ฐํŒฉํŠธ ์ €์žฅ์†Œ
mlflow server \
    --backend-store-uri postgresql://user:password@host:5432/mlflow \
    --default-artifact-root s3://mlflow-artifacts/ \
    --host 0.0.0.0 \
    --port 5000

# ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
export MLFLOW_TRACKING_URI=http://mlflow-server:5000
export AWS_ACCESS_KEY_ID=your-key
export AWS_SECRET_ACCESS_KEY=your-secret

5.2 ์ธ์ฆ ์„ค์ •

"""
MLflow ์ธ์ฆ ์„ค์ •
"""

import os
import mlflow

# ๊ธฐ๋ณธ ์ธ์ฆ
os.environ["MLFLOW_TRACKING_USERNAME"] = "user"
os.environ["MLFLOW_TRACKING_PASSWORD"] = "password"

# ํ† ํฐ ๊ธฐ๋ฐ˜ ์ธ์ฆ
os.environ["MLFLOW_TRACKING_TOKEN"] = "your-token"

# Azure ML ํ†ตํ•ฉ
os.environ["AZURE_TENANT_ID"] = "tenant-id"
os.environ["AZURE_CLIENT_ID"] = "client-id"
os.environ["AZURE_CLIENT_SECRET"] = "client-secret"

5.3 ํ”Œ๋Ÿฌ๊ทธ์ธ ์‚ฌ์šฉ

"""
MLflow ํ”Œ๋Ÿฌ๊ทธ์ธ ์˜ˆ์‹œ
"""

# Databricks ํ”Œ๋Ÿฌ๊ทธ์ธ
# pip install databricks-cli

import mlflow
mlflow.set_tracking_uri("databricks")
mlflow.set_experiment("/Users/user@email.com/my-experiment")

# Google Cloud ํ”Œ๋Ÿฌ๊ทธ์ธ
# pip install mlflow[google-cloud]
mlflow.set_tracking_uri("gs://bucket/mlflow")

6. ์‹ค์ „ ์›Œํฌํ”Œ๋กœ์šฐ

"""
์ „์ฒด MLflow ์›Œํฌํ”Œ๋กœ์šฐ ์˜ˆ์‹œ
"""

import mlflow
from mlflow.tracking import MlflowClient

# 1. ์„ค์ •
mlflow.set_tracking_uri("http://mlflow-server:5000")
mlflow.set_experiment("production-churn-model")
client = MlflowClient()

# 2. ํ•™์Šต ๋ฐ ์‹คํ—˜
with mlflow.start_run(run_name="rf-optimized") as run:
    # ํ•™์Šต ์ฝ”๋“œ...
    mlflow.sklearn.log_model(model, "model", signature=signature)
    run_id = run.info.run_id

# 3. ๋ชจ๋ธ ๋“ฑ๋ก
model_version = mlflow.register_model(
    f"runs:/{run_id}/model",
    "ChurnPredictionModel"
)

# 4. Staging ์ „ํ™˜
client.transition_model_version_stage(
    name="ChurnPredictionModel",
    version=model_version.version,
    stage="Staging"
)

# 5. ํ…Œ์ŠคํŠธ (Staging์—์„œ)
staging_model = mlflow.pyfunc.load_model("models:/ChurnPredictionModel/Staging")
test_results = evaluate_model(staging_model, test_data)

# 6. Production ์Šน๊ฒฉ
if test_results["accuracy"] > 0.9:
    client.transition_model_version_stage(
        name="ChurnPredictionModel",
        version=model_version.version,
        stage="Production",
        archive_existing_versions=True
    )
    print(f"Model v{model_version.version} promoted to Production!")

์—ฐ์Šต ๋ฌธ์ œ

๋ฌธ์ œ 1: MLflow Project ์ƒ์„ฑ

์™„์ „ํ•œ MLflow Project๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋กœ์ปฌ์—์„œ ์‹คํ–‰ํ•˜์„ธ์š”.

๋ฌธ์ œ 2: ์ปค์Šคํ…€ pyfunc ๋ชจ๋ธ

์ „์ฒ˜๋ฆฌ์™€ ํ›„์ฒ˜๋ฆฌ๋ฅผ ํฌํ•จํ•˜๋Š” ์ปค์Šคํ…€ pyfunc ๋ชจ๋ธ์„ ์ž‘์„ฑํ•˜์„ธ์š”.

๋ฌธ์ œ 3: Model Registry ์›Œํฌํ”Œ๋กœ์šฐ

๋ชจ๋ธ์„ ๋“ฑ๋กํ•˜๊ณ  Staging -> Production ์ „ํ™˜์„ ์ž๋™ํ™”ํ•˜์„ธ์š”.


์š”์•ฝ

๊ธฐ๋Šฅ ์„ค๋ช…
MLflow Projects ์žฌํ˜„ ๊ฐ€๋Šฅํ•œ ์ฝ”๋“œ ํŒจํ‚ค์ง•
MLflow Models ํ‘œ์ค€ํ™”๋œ ๋ชจ๋ธ ํฌ๋งท
Model Registry ๋ชจ๋ธ ๋ฒ„์ „ ๋ฐ ์Šคํ…Œ์ด์ง€ ๊ด€๋ฆฌ
MLflow Serving REST API ์„œ๋น™
pyfunc ์ปค์Šคํ…€ ๋ชจ๋ธ ๋ž˜ํผ

์ฐธ๊ณ  ์ž๋ฃŒ

to navigate between lessons