15. LLM ์์ด์ ํธ (LLM Agents)
ํ์ต ๋ชฉํ
- ์์ด์ ํธ ๊ฐ๋
๊ณผ ์ํคํ
์ฒ ์ดํด
- ReAct ํจํด ๊ตฌํ
- ๋๊ตฌ ์ฌ์ฉ (Tool Use) ๊ธฐ๋ฒ
- LangChain Agent ํ์ฉ
- ์์จ ์์ด์ ํธ ์์คํ
(AutoGPT ๋ฑ)
1. LLM ์์ด์ ํธ ๊ฐ์
์์ด์ ํธ๋?
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ LLM ์์ด์ ํธ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ
โ โโโโโโโโโโโโโโโ โ
โ โ LLM โ โโโ ๋๋ (์์ฌ๊ฒฐ์ ) โ
โ โ (Brain) โ โ
โ โโโโโโโโฌโโโโโโโ โ
โ โ โ
โ โผ โ
โ โโโโโโโโโโโโโโโ โ
โ โ Planning โ โโโ ๊ณํ ์๋ฆฝ โ
โ โโโโโโโโฌโโโโโโโ โ
โ โ โ
โ โผ โ
โ โโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโ โ
โ โ Tools โ โ Memory โ โโโ ๋๊ตฌ + ๊ธฐ์ต โ
โ โ (๊ฒ์, ๊ณ์ฐ, โ โ (๋ํ ์ด๋ ฅ, โ โ
โ โ ์ฝ๋์คํ) โ โ ์ง์ ๋ฒ ์ด์ค)โ โ
โ โโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโ โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
์์ด์ ํธ vs ์ฑ๋ด
| ํญ๋ชฉ |
์ฑ๋ด |
์์ด์ ํธ |
| ์๋ต ๋ฐฉ์ |
๋จ์ผ ์๋ต |
๋ค๋จ๊ณ ์ถ๋ก |
| ๋๊ตฌ ์ฌ์ฉ |
์ ํ์ |
๋ค์ํ ๋๊ตฌ |
| ์์จ์ฑ |
๋ฎ์ |
๋์ |
| ๊ณํ ์๋ฆฝ |
์์ |
์์ |
| ์์ |
๊ณ ๊ฐ ์ง์ ๋ด |
AutoGPT, Copilot |
2. ReAct (Reasoning + Acting)
ReAct ํจํด
Thought: ๋ฌธ์ ๋ฅผ ๋ถ์ํ๊ณ ๋ค์ ํ๋ ๊ฒฐ์
Action: ๋๊ตฌ ์ ํ ๋ฐ ์
๋ ฅ ๊ฒฐ์
Observation: ๋๊ตฌ ์คํ ๊ฒฐ๊ณผ
... (๋ฐ๋ณต)
Final Answer: ์ต์ข
๋ต๋ณ
ReAct ๊ตฌํ
from openai import OpenAI
client = OpenAI()
# ๋๊ตฌ ์ ์
tools = {
"calculator": lambda expr: eval(expr),
"search": lambda query: f"๊ฒ์ ๊ฒฐ๊ณผ: {query}์ ๋ํ ์ ๋ณด...",
"get_weather": lambda city: f"{city}์ ๋ ์จ: ๋ง์, 25๋",
}
def react_agent(question, max_steps=5):
"""ReAct ์์ด์ ํธ"""
system_prompt = """๋น์ ์ ๋ฌธ์ ๋ฅผ ๋จ๊ณ๋ณ๋ก ํด๊ฒฐํ๋ ์์ด์ ํธ์
๋๋ค.
์ฌ์ฉ ๊ฐ๋ฅํ ๋๊ตฌ:
- calculator: ์ํ ๊ณ์ฐ (์: "2 + 3 * 4")
- search: ์ ๋ณด ๊ฒ์ (์: "ํ์ด์ฌ ์ฐฝ์์")
- get_weather: ๋ ์จ ์กฐํ (์: "์์ธ")
๋ค์ ํ์์ ๋ฐ๋ฅด์ธ์:
Thought: [ํ์ฌ ์ํฉ ๋ถ์ ๋ฐ ๋ค์ ํ๋ ๊ณํ]
Action: [๋๊ตฌ ์ด๋ฆ]
Action Input: [๋๊ตฌ ์
๋ ฅ]
๋๊ตฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐ์ผ๋ฉด:
Observation: [๊ฒฐ๊ณผ]
์ต์ข
๋ต๋ณ์ด ์ค๋น๋๋ฉด:
Final Answer: [๋ต๋ณ]
"""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": question}
]
for step in range(max_steps):
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
temperature=0
)
assistant_message = response.choices[0].message.content
messages.append({"role": "assistant", "content": assistant_message})
print(f"=== Step {step + 1} ===")
print(assistant_message)
# Final Answer ์ฒดํฌ
if "Final Answer:" in assistant_message:
final_answer = assistant_message.split("Final Answer:")[-1].strip()
return final_answer
# Action ํ์ฑ
if "Action:" in assistant_message and "Action Input:" in assistant_message:
action_line = assistant_message.split("Action:")[-1].split("\n")[0].strip()
input_line = assistant_message.split("Action Input:")[-1].split("\n")[0].strip()
# ๋๊ตฌ ์คํ
if action_line in tools:
try:
observation = tools[action_line](input_line)
except Exception as e:
observation = f"Error: {str(e)}"
observation_message = f"Observation: {observation}"
messages.append({"role": "user", "content": observation_message})
print(observation_message)
else:
messages.append({"role": "user", "content": f"Error: Unknown tool '{action_line}'"})
return "์ต๋ ๋จ๊ณ ๋๋ฌ, ๋ต๋ณ ์คํจ"
# ์ฌ์ฉ
answer = react_agent("์์ธ์ ๋ ์จ๋ฅผ ํ์ธํ๊ณ , ๊ธฐ์จ์ ์ญ์จ์์ ํ์จ๋ก ๋ณํํด์ฃผ์ธ์.")
print(f"\n์ต์ข
๋ต๋ณ: {answer}")
Function Calling (OpenAI)
from openai import OpenAI
import json
client = OpenAI()
# ๋๊ตฌ ์ ์
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "ํน์ ๋์์ ํ์ฌ ๋ ์จ ์ ๋ณด๋ฅผ ๊ฐ์ ธ์ต๋๋ค.",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "๋์ ์ด๋ฆ (์: Seoul, Tokyo)"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "์จ๋ ๋จ์"
}
},
"required": ["city"]
}
}
},
{
"type": "function",
"function": {
"name": "search_web",
"description": "์น์์ ์ ๋ณด๋ฅผ ๊ฒ์ํฉ๋๋ค.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "๊ฒ์์ด"
}
},
"required": ["query"]
}
}
}
]
# ๋๊ตฌ ๊ตฌํ
def get_weather(city, unit="celsius"):
# ์ค์ ๋ก๋ API ํธ์ถ
weather_data = {
"Seoul": {"temp": 25, "condition": "Sunny"},
"Tokyo": {"temp": 28, "condition": "Cloudy"},
}
data = weather_data.get(city, {"temp": 20, "condition": "Unknown"})
if unit == "fahrenheit":
data["temp"] = data["temp"] * 9/5 + 32
return json.dumps(data)
def search_web(query):
return json.dumps({"results": f"'{query}'์ ๋ํ ๊ฒ์ ๊ฒฐ๊ณผ..."})
tool_implementations = {
"get_weather": get_weather,
"search_web": search_web,
}
def agent_with_tools(user_message):
"""Function Calling ์์ด์ ํธ"""
messages = [{"role": "user", "content": user_message}]
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
tools=tools,
tool_choice="auto" # ์๋์ผ๋ก ๋๊ตฌ ์ ํ
)
assistant_message = response.choices[0].message
# ๋๊ตฌ ํธ์ถ ํ์ ์ฌ๋ถ ํ์ธ
if assistant_message.tool_calls:
messages.append(assistant_message)
# ๊ฐ ๋๊ตฌ ํธ์ถ ์ฒ๋ฆฌ
for tool_call in assistant_message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# ๋๊ตฌ ์คํ
function_response = tool_implementations[function_name](**function_args)
# ๊ฒฐ๊ณผ ์ถ๊ฐ
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response
})
# ์ต์ข
์๋ต
final_response = client.chat.completions.create(
model="gpt-4",
messages=messages
)
return final_response.choices[0].message.content
return assistant_message.content
# ์ฌ์ฉ
result = agent_with_tools("์์ธ๊ณผ ๋์ฟ์ ๋ ์จ๋ฅผ ๋น๊ตํด์ฃผ์ธ์.")
print(result)
์ฝ๋ ์คํ ๋๊ตฌ
import subprocess
import tempfile
import os
def execute_python(code):
"""Python ์ฝ๋ ์์ ํ๊ฒ ์คํ"""
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
f.write(code)
temp_path = f.name
try:
result = subprocess.run(
['python', temp_path],
capture_output=True,
text=True,
timeout=10 # ํ์์์ ์ค์
)
output = result.stdout if result.returncode == 0 else result.stderr
return {"success": result.returncode == 0, "output": output}
except subprocess.TimeoutExpired:
return {"success": False, "output": "Timeout"}
finally:
os.unlink(temp_path)
# ์ฝ๋ ์คํ ๋๊ตฌ ์ ์
code_tool = {
"type": "function",
"function": {
"name": "execute_python",
"description": "Python ์ฝ๋๋ฅผ ์คํํฉ๋๋ค.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "์คํํ Python ์ฝ๋"
}
},
"required": ["code"]
}
}
}
4. LangChain Agent
๊ธฐ๋ณธ ์์ด์ ํธ
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.tools import Tool, tool
from langchain_community.tools import DuckDuckGoSearchRun
# LLM
llm = ChatOpenAI(model="gpt-4", temperature=0)
# ๋๊ตฌ ์ ์
search = DuckDuckGoSearchRun()
@tool
def calculator(expression: str) -> str:
"""์ํ ๊ณ์ฐ์ ์ํํฉ๋๋ค. ์
๋ ฅ: ์ํ ํํ์ (์: '2 + 3 * 4')"""
try:
return str(eval(expression))
except:
return "๊ณ์ฐ ์ค๋ฅ"
@tool
def get_current_time() -> str:
"""ํ์ฌ ์๊ฐ์ ๋ฐํํฉ๋๋ค."""
from datetime import datetime
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tools = [
Tool(name="Search", func=search.run, description="์น ๊ฒ์"),
calculator,
get_current_time,
]
# ํ๋กฌํํธ
prompt = ChatPromptTemplate.from_messages([
("system", "๋น์ ์ ๋์์ด ๋๋ AI ์ด์์คํดํธ์
๋๋ค. ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ์ง๋ฌธ์ ๋ตํ์ธ์."),
MessagesPlaceholder(variable_name="chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# ์์ด์ ํธ ์์ฑ
agent = create_openai_tools_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# ์คํ
result = agent_executor.invoke({"input": "ํ์ฌ ์๊ฐ๊ณผ ์ค๋์ ์ฃผ์ ๋ด์ค๋ฅผ ์๋ ค์ฃผ์ธ์."})
print(result["output"])
ReAct Agent (LangChain)
from langchain.agents import create_react_agent, AgentExecutor
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4", temperature=0)
# ReAct ํ๋กฌํํธ
react_prompt = PromptTemplate.from_template("""Answer the following questions as best you can. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {input}
Thought:{agent_scratchpad}""")
# ์์ด์ ํธ ์์ฑ
react_agent = create_react_agent(llm, tools, react_prompt)
agent_executor = AgentExecutor(agent=react_agent, tools=tools, verbose=True, handle_parsing_errors=True)
# ์คํ
result = agent_executor.invoke({"input": "2024๋
๋ฏธ๊ตญ ๋ํต๋ น ์ ๊ฑฐ ๊ฒฐ๊ณผ๋ฅผ ๊ฒ์ํ๊ณ ์์ฝํด์ฃผ์ธ์."})
๋ฉ๋ชจ๋ฆฌ๊ฐ ์๋ ์์ด์ ํธ
from langchain.memory import ConversationBufferMemory
from langchain.agents import AgentExecutor, create_openai_tools_agent
# ๋ฉ๋ชจ๋ฆฌ
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# ํ๋กฌํํธ (๋ฉ๋ชจ๋ฆฌ ํฌํจ)
prompt = ChatPromptTemplate.from_messages([
("system", "๋น์ ์ ๋์์ด ๋๋ AI ์ด์์คํดํธ์
๋๋ค."),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# ์์ด์ ํธ
agent = create_openai_tools_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
# ๋ํ
agent_executor.invoke({"input": "๋ด ์ด๋ฆ์ ๊น์ฒ ์์ผ."})
agent_executor.invoke({"input": "๋ด ์ด๋ฆ์ด ๋ญ๋ผ๊ณ ํ์ง?"})
5. ์์จ ์์ด์ ํธ ์์คํ
Plan-and-Execute
from langchain.experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner
)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4", temperature=0)
# Planner์ Executor ์์ฑ
planner = load_chat_planner(llm)
executor = load_agent_executor(llm, tools, verbose=True)
# Plan-and-Execute ์์ด์ ํธ
agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
# ๋ณต์กํ ์์
์คํ
result = agent.run("ํ์ด์ฌ์ ์ญ์ฌ์ ๋ํด ์กฐ์ฌํ๊ณ , ์ฃผ์ ๋ฒ์ ๋ณ ํน์ง์ ์์ฝํ ๋งํฌ๋ค์ด ๋ฌธ์๋ฅผ ์์ฑํด์ฃผ์ธ์.")
AutoGPT ์คํ์ผ ์์ด์ ํธ
class AutoGPTAgent:
"""์์จ ์์ด์ ํธ"""
def __init__(self, llm, tools, goals):
self.llm = llm
self.tools = {t.name: t for t in tools}
self.goals = goals
self.memory = []
self.completed_tasks = []
def plan(self):
"""๋ชฉํ ๋ฌ์ฑ์ ์ํ ๊ณํ ์๋ฆฝ"""
prompt = f"""๋น์ ์ ์์จ AI ์์ด์ ํธ์
๋๋ค.
๋ชฉํ: {self.goals}
์๋ฃ๋ ์์
:
{self.completed_tasks}
์ด์ ์์
๊ฒฐ๊ณผ:
{self.memory[-5:] if self.memory else "์์"}
์ฌ์ฉ ๊ฐ๋ฅํ ๋๊ตฌ:
{list(self.tools.keys())}
๋ค์ ์์
์ JSON ํ์์ผ๋ก ์ถ๋ ฅํ์ธ์:
{{"task": "์์
์ค๋ช
", "tool": "์ฌ์ฉํ ๋๊ตฌ", "input": "๋๊ตฌ ์
๋ ฅ"}}
๋ชจ๋ ๋ชฉํ๊ฐ ๋ฌ์ฑ๋์๋ค๋ฉด:
{{"task": "COMPLETE", "summary": "๊ฒฐ๊ณผ ์์ฝ"}}
"""
response = self.llm.invoke(prompt)
return json.loads(response.content)
def execute(self, task):
"""์์
์คํ"""
if task["task"] == "COMPLETE":
return {"status": "complete", "summary": task["summary"]}
tool = self.tools.get(task["tool"])
if tool:
result = tool.run(task["input"])
return {"status": "success", "result": result}
return {"status": "error", "message": f"Unknown tool: {task['tool']}"}
def run(self, max_iterations=10):
"""์์ด์ ํธ ์คํ"""
for i in range(max_iterations):
print(f"\n=== Iteration {i+1} ===")
# ๊ณํ
task = self.plan()
print(f"Task: {task}")
# ์๋ฃ ํ์ธ
if task.get("task") == "COMPLETE":
print(f"Goals achieved: {task['summary']}")
return task["summary"]
# ์คํ
result = self.execute(task)
print(f"Result: {result}")
# ๋ฉ๋ชจ๋ฆฌ ์
๋ฐ์ดํธ
self.memory.append({"task": task, "result": result})
if result["status"] == "success":
self.completed_tasks.append(task["task"])
return "Max iterations reached"
# ์ฌ์ฉ
agent = AutoGPTAgent(
llm=ChatOpenAI(model="gpt-4"),
tools=tools,
goals=["์์ธ์ ์ธ๊ตฌ ์กฐ์ฌ", "์ธ๊ตฌ ํต๊ณ ๋ถ์", "๋ณด๊ณ ์ ์์ฑ"]
)
result = agent.run()
6. ๋ฉํฐ ์์ด์ ํธ ์์คํ
์์ด์ ํธ ๊ฐ ํ์
class ResearcherAgent:
"""์ฐ๊ตฌ ์์ด์ ํธ"""
def __init__(self, llm):
self.llm = llm
def research(self, topic):
prompt = f"'{topic}'์ ๋ํด ์กฐ์ฌํ๊ณ ํต์ฌ ์ ๋ณด๋ฅผ ์ ๋ฆฌํด์ฃผ์ธ์."
return self.llm.invoke(prompt).content
class WriterAgent:
"""์๋ฌธ ์์ด์ ํธ"""
def __init__(self, llm):
self.llm = llm
def write(self, research_results, style="formal"):
prompt = f"๋ค์ ์ ๋ณด๋ฅผ ๋ฐํ์ผ๋ก {style} ์คํ์ผ์ ๋ฌธ์๋ฅผ ์์ฑํด์ฃผ์ธ์:\n{research_results}"
return self.llm.invoke(prompt).content
class ReviewerAgent:
"""๊ฒํ ์์ด์ ํธ"""
def __init__(self, llm):
self.llm = llm
def review(self, document):
prompt = f"๋ค์ ๋ฌธ์๋ฅผ ๊ฒํ ํ๊ณ ๊ฐ์ ์ ์ ์ ์ํด์ฃผ์ธ์:\n{document}"
return self.llm.invoke(prompt).content
class MultiAgentSystem:
"""๋ฉํฐ ์์ด์ ํธ ์์คํ
"""
def __init__(self, llm):
self.researcher = ResearcherAgent(llm)
self.writer = WriterAgent(llm)
self.reviewer = ReviewerAgent(llm)
def create_document(self, topic, max_revisions=2):
# 1. ์ฐ๊ตฌ
print("=== ์ฐ๊ตฌ ๋จ๊ณ ===")
research = self.researcher.research(topic)
print(research[:200] + "...")
# 2. ์์ฑ
print("\n=== ์์ฑ ๋จ๊ณ ===")
document = self.writer.write(research)
print(document[:200] + "...")
# 3. ๊ฒํ ๋ฐ ์์
for i in range(max_revisions):
print(f"\n=== ๊ฒํ {i+1} ===")
review = self.reviewer.review(document)
print(review[:200] + "...")
# ์์
if "์์ ํ์ ์์" in review:
break
document = self.writer.write(f"์๋ณธ:\n{document}\n\n๊ฒํ :\n{review}", style="revised")
return document
# ์ฌ์ฉ
llm = ChatOpenAI(model="gpt-4")
system = MultiAgentSystem(llm)
final_doc = system.create_document("์ธ๊ณต์ง๋ฅ์ ๋ฏธ๋")
7. ์์ด์ ํธ ํ๊ฐ
๋๊ตฌ ์ ํ ์ ํ๋
def evaluate_tool_selection(agent, test_cases):
"""๋๊ตฌ ์ ํ ์ ํ๋ ํ๊ฐ"""
correct = 0
total = len(test_cases)
for case in test_cases:
query = case["query"]
expected_tool = case["expected_tool"]
# ์์ด์ ํธ ์คํ (๋๊ตฌ ์ ํ๋ง)
result = agent.plan(query)
selected_tool = result.get("tool")
if selected_tool == expected_tool:
correct += 1
print(f"[CORRECT] Query: {query}, Tool: {selected_tool}")
else:
print(f"[WRONG] Query: {query}, Expected: {expected_tool}, Got: {selected_tool}")
accuracy = correct / total
print(f"\nTool Selection Accuracy: {accuracy:.2%}")
return accuracy
# ํ
์คํธ ์ผ์ด์ค
test_cases = [
{"query": "2 + 3 * 4๋ฅผ ๊ณ์ฐํด์ค", "expected_tool": "calculator"},
{"query": "์ค๋ ์์ธ ๋ ์จ ์ด๋?", "expected_tool": "get_weather"},
{"query": "ํ์ด์ฌ ์ฐฝ์์๊ฐ ๋๊ตฌ์ผ?", "expected_tool": "search"},
]
# ํ๊ฐ
evaluate_tool_selection(agent, test_cases)
์์
์๋ฃ์จ
def evaluate_task_completion(agent, tasks):
"""์์
์๋ฃ์จ ํ๊ฐ"""
results = []
for task in tasks:
try:
result = agent.run(task["input"])
success = task["validator"](result)
results.append({
"task": task["description"],
"success": success,
"result": result
})
except Exception as e:
results.append({
"task": task["description"],
"success": False,
"error": str(e)
})
completion_rate = sum(r["success"] for r in results) / len(results)
print(f"Task Completion Rate: {completion_rate:.2%}")
return results
# ์์
์ ์
tasks = [
{
"description": "๋ ์จ ์กฐํ ๋ฐ ์ท์ฐจ๋ฆผ ์ถ์ฒ",
"input": "์์ธ ๋ ์จ๋ฅผ ํ์ธํ๊ณ ์ค๋ ์ท์ฐจ๋ฆผ์ ์ถ์ฒํด์ค",
"validator": lambda r: "์์ธ" in r and ("์ท" in r or "์๋ฅ" in r)
},
{
"description": "์ํ ๊ณ์ฐ",
"input": "123 * 456์ ๊ฒฐ๊ณผ๋?",
"validator": lambda r: "56088" in r
},
]
์ ๋ฆฌ
์์ด์ ํธ ์ํคํ
์ฒ ๋น๊ต
| ์ํคํ
์ฒ |
ํน์ง |
์ฌ์ฉ ์์ |
| ReAct |
์ถ๋ก -ํ๋ ๋ฐ๋ณต |
๋จ๊ณ๋ณ ๋ฌธ์ ํด๊ฒฐ |
| Function Calling |
๊ตฌ์กฐํ๋ ๋๊ตฌ ํธ์ถ |
API ์ฐ๋ |
| Plan-and-Execute |
๊ณํ ํ ์คํ |
๋ณต์กํ ์์
|
| AutoGPT |
์์จ ๋ชฉํ ๋ฌ์ฑ |
์ฅ๊ธฐ ์์
|
| Multi-Agent |
์ญํ ๋ถ๋ด ํ์
|
์ ๋ฌธ์ฑ ํ์ |
ํต์ฌ ์ฝ๋
# ReAct ํจํด
Thought: ๋ฌธ์ ๋ถ์
Action: ๋๊ตฌ ์ ํ
Observation: ๊ฒฐ๊ณผ ํ์ธ
Final Answer: ์ต์ข
๋ต๋ณ
# Function Calling (OpenAI)
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
tools=tools,
tool_choice="auto"
)
# LangChain Agent
agent = create_openai_tools_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
result = executor.invoke({"input": query})
์์ด์ ํธ ์ค๊ณ ์ฒดํฌ๋ฆฌ์คํธ
โก ๋ช
ํํ ๋๊ตฌ ์ ์ (์ด๋ฆ, ์ค๋ช
, ํ๋ผ๋ฏธํฐ)
โก ์๋ฌ ์ฒ๋ฆฌ (๋๊ตฌ ์คํจ, ํ์ฑ ์ค๋ฅ)
โก ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ (๋ํ ์ด๋ ฅ, ์ปจํ
์คํธ)
โก ๋ฃจํ ๋ฐฉ์ง (์ต๋ ๋ฐ๋ณต ํ์)
โก ์์ ์ฅ์น (์ํํ ์์
์ ํ)
โก ๋ก๊น
๋ฐ ๋ชจ๋ํฐ๋ง
๋ค์ ๋จ๊ณ
16_Evaluation_Metrics.md์์ LLM ํ๊ฐ ์งํ์ ๋ฒค์น๋งํฌ๋ฅผ ํ์ตํฉ๋๋ค.