10. LangChain ๊ธฐ์ด
10. LangChain ๊ธฐ์ด¶
๋ฒ์ ์ ๋ณด: ์ด ๋ ์จ์ LangChain 0.2+ (2024๋ ~) ๊ธฐ์ค์ผ๋ก ์์ฑ๋์์ต๋๋ค.
LangChain์ ๋น ๋ฅด๊ฒ ๋ฐ์ ํ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋๋ค. ์ฃผ์ ๋ณ๊ฒฝ์ฌํญ: - LCEL (LangChain Expression Language): ๊ถ์ฅ ์ฒด์ธ ๊ตฌ์ฑ ๋ฐฉ์ - langchain-core, langchain-community: ํจํค์ง ๋ถ๋ฆฌ - ConversationChain ๋์ RunnableWithMessageHistory ๊ถ์ฅ
์ต์ ๋ฌธ์: https://python.langchain.com/docs/
ํ์ต ๋ชฉํ¶
- LangChain ํต์ฌ ๊ฐ๋
- LLM ๋ํผ์ ํ๋กฌํํธ
- ์ฒด์ธ๊ณผ ์์ด์ ํธ
- ๋ฉ๋ชจ๋ฆฌ ์์คํ
- LCEL (LangChain Expression Language) ์ฌํ
- LangGraph ๊ธฐ์ด
1. LangChain ๊ฐ์¶
์ค์น¶
# LangChain 0.2+
pip install langchain langchain-openai langchain-community
ํต์ฌ ๊ตฌ์ฑ์์¶
LangChain
โโโ Models # LLM ๋ํผ
โโโ Prompts # ํ๋กฌํํธ ํ
ํ๋ฆฟ
โโโ Chains # ์์ฐจ์ ํธ์ถ
โโโ Agents # ๋๊ตฌ ์ฌ์ฉ ์์ด์ ํธ
โโโ Memory # ๋ํ ๊ธฐ๋ก
โโโ Retrievers # ๋ฌธ์ ๊ฒ์
โโโ Callbacks # ๋ชจ๋ํฐ๋ง
2. LLM ๋ํผ¶
ChatOpenAI¶
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
model="gpt-3.5-turbo",
temperature=0.7,
max_tokens=500
)
# ๊ฐ๋จํ ํธ์ถ
response = llm.invoke("What is the capital of France?")
print(response.content)
๋ค์ํ LLM¶
# OpenAI
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4")
# Anthropic
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-opus-20240229")
# HuggingFace
from langchain_huggingface import HuggingFaceEndpoint
llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.1")
# Ollama (๋ก์ปฌ)
from langchain_community.llms import Ollama
llm = Ollama(model="llama2")
๋ฉ์์ง ํ์ ¶
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="What is 2+2?"),
]
response = llm.invoke(messages)
print(response.content)
3. ํ๋กฌํํธ ํ ํ๋ฆฟ¶
๊ธฐ๋ณธ ํ ํ๋ฆฟ¶
from langchain_core.prompts import PromptTemplate
template = PromptTemplate(
input_variables=["topic"],
template="Write a short poem about {topic}."
)
prompt = template.format(topic="spring")
response = llm.invoke(prompt)
Chat ํ๋กฌํํธ¶
from langchain_core.prompts import ChatPromptTemplate
template = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant that translates {input_language} to {output_language}."),
("human", "{text}")
])
messages = template.format_messages(
input_language="English",
output_language="Korean",
text="Hello, how are you?"
)
response = llm.invoke(messages)
Few-shot ํ๋กฌํํธ¶
from langchain_core.prompts import FewShotPromptTemplate
examples = [
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
{"input": "hot", "output": "cold"},
]
example_template = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}"
)
few_shot_prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=example_template,
prefix="Give the antonym of every input:",
suffix="Input: {word}\nOutput:",
input_variables=["word"]
)
prompt = few_shot_prompt.format(word="big")
4. ์ฒด์ธ (Chains)¶
LCEL (LangChain Expression Language)¶
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
# ์ฒด์ธ ๊ตฌ์ฑ
prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
llm = ChatOpenAI(model="gpt-3.5-turbo")
output_parser = StrOutputParser()
# ํ์ดํ ์ฐ์ฐ์๋ก ์ฐ๊ฒฐ
chain = prompt | llm | output_parser
# ์คํ
result = chain.invoke({"topic": "programmers"})
print(result)
์์ฐจ ์ฒด์ธ¶
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
# ์ฒซ ๋ฒ์งธ ์ฒด์ธ: ์ฃผ์ ์์ฑ
topic_prompt = ChatPromptTemplate.from_template(
"Generate a random topic for a story."
)
# ๋ ๋ฒ์งธ ์ฒด์ธ: ์คํ ๋ฆฌ ์์ฑ
story_prompt = ChatPromptTemplate.from_template(
"Write a short story about: {topic}"
)
# ์ฒด์ธ ์ฐ๊ฒฐ
chain = (
{"topic": topic_prompt | llm | StrOutputParser()}
| story_prompt
| llm
| StrOutputParser()
)
result = chain.invoke({})
๋ณ๋ ฌ ์ฒด์ธ¶
from langchain_core.runnables import RunnableParallel
# ๋ณ๋ ฌ ์คํ
parallel_chain = RunnableParallel(
summary=summary_chain,
keywords=keyword_chain,
sentiment=sentiment_chain
)
results = parallel_chain.invoke({"text": "Long article here..."})
# {'summary': '...', 'keywords': '...', 'sentiment': '...'}
5. ์ถ๋ ฅ ํ์¶
String Parser¶
from langchain_core.output_parsers import StrOutputParser
parser = StrOutputParser()
chain = prompt | llm | parser # ๋ฌธ์์ด๋ก ๋ณํ
JSON Parser¶
from langchain_core.output_parsers import JsonOutputParser
from pydantic import BaseModel, Field
class Person(BaseModel):
name: str = Field(description="The person's name")
age: int = Field(description="The person's age")
parser = JsonOutputParser(pydantic_object=Person)
prompt = ChatPromptTemplate.from_messages([
("system", "Extract person info. {format_instructions}"),
("human", "{text}")
]).partial(format_instructions=parser.get_format_instructions())
chain = prompt | llm | parser
result = chain.invoke({"text": "John is 25 years old"})
# {'name': 'John', 'age': 25}
๊ตฌ์กฐํ๋ ์ถ๋ ฅ¶
from langchain_core.output_parsers import PydanticOutputParser
class MovieReview(BaseModel):
title: str
rating: int
summary: str
parser = PydanticOutputParser(pydantic_object=MovieReview)
6. ์์ด์ ํธ (Agents)¶
๊ธฐ๋ณธ ์์ด์ ํธ¶
from langchain.agents import create_react_agent, AgentExecutor
from langchain import hub
from langchain_community.tools import DuckDuckGoSearchRun
# ๋๊ตฌ ์ ์
search = DuckDuckGoSearchRun()
tools = [search]
# ReAct ํ๋กฌํํธ ๋ก๋
prompt = hub.pull("hwchase17/react")
# ์์ด์ ํธ ์์ฑ
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# ์คํ
result = agent_executor.invoke({"input": "What is the weather in Seoul?"})
์ปค์คํ ๋๊ตฌ¶
from langchain.tools import tool
@tool
def calculate(expression: str) -> str:
"""Calculate a mathematical expression."""
try:
return str(eval(expression))
except:
return "Error in calculation"
@tool
def get_current_time() -> str:
"""Get the current time."""
from datetime import datetime
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tools = [calculate, get_current_time]
Tool ํด๋์ค¶
from langchain.tools import BaseTool
from typing import Optional
from pydantic import Field
class SearchTool(BaseTool):
name: str = "search"
description: str = "Search for information on the internet"
def _run(self, query: str) -> str:
# ๊ฒ์ ๋ก์ง
return f"Search results for: {query}"
async def _arun(self, query: str) -> str:
return self._run(query)
7. ๋ฉ๋ชจ๋ฆฌ (Memory)¶
๊ถ์ฅ ๋ฐฉ์ ๋ณ๊ฒฝ: LangChain 0.2+์์๋
ConversationChain,ConversationBufferMemory๋ฑ์ด deprecated ๋์์ต๋๋ค. ์ ํ๋ก์ ํธ์์๋ RunnableWithMessageHistory (์๋ ์ฐธ์กฐ)๋ฅผ ์ฌ์ฉํ์ธ์.
(Legacy) ๋ํ ๋ฒํผ ๋ฉ๋ชจ๋ฆฌ¶
โ ๏ธ Deprecated: ์๋ "LCEL์์ ๋ฉ๋ชจ๋ฆฌ" ์น์ ์
RunnableWithMessageHistory์ฌ์ฉ ๊ถ์ฅ
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)
# ๋ํ
response1 = conversation.predict(input="Hi, I'm John")
response2 = conversation.predict(input="What's my name?")
# "Your name is John"
(Legacy) ์์ฝ ๋ฉ๋ชจ๋ฆฌ¶
from langchain.memory import ConversationSummaryMemory
memory = ConversationSummaryMemory(llm=llm)
# ๊ธด ๋ํ๋ฅผ ์์ฝํ์ฌ ์ ์ฅ
(Legacy) ์๋์ฐ ๋ฉ๋ชจ๋ฆฌ¶
from langchain.memory import ConversationBufferWindowMemory
# ์ต๊ทผ k๊ฐ์ ๋ํ๋ง ์ ์ง
memory = ConversationBufferWindowMemory(k=5)
LCEL์์ ๋ฉ๋ชจ๋ฆฌ (๊ถ์ฅ)¶
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_community.chat_message_histories import ChatMessageHistory
store = {}
def get_session_history(session_id: str):
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
chain_with_history = RunnableWithMessageHistory(
chain,
get_session_history,
input_messages_key="input",
history_messages_key="history"
)
# ์ฌ์ฉ
response = chain_with_history.invoke(
{"input": "What is my name?"},
config={"configurable": {"session_id": "user123"}}
)
8. RAG with LangChain¶
๋ฌธ์ ๋ก๋¶
from langchain_community.document_loaders import (
TextLoader,
PyPDFLoader,
WebBaseLoader
)
# ํ
์คํธ ํ์ผ
loader = TextLoader("document.txt")
docs = loader.load()
# PDF
loader = PyPDFLoader("document.pdf")
docs = loader.load()
# ์นํ์ด์ง
loader = WebBaseLoader("https://example.com")
docs = loader.load()
ํ ์คํธ ๋ถํ ¶
from langchain.text_splitter import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50
)
chunks = splitter.split_documents(docs)
๋ฒกํฐ ์คํ ์ด¶
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(
documents=chunks,
embedding=embeddings,
persist_directory="./chroma_db"
)
# ๊ฒ์
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
docs = retriever.invoke("What is machine learning?")
RAG ์ฒด์ธ¶
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
template = """Answer based on the context:
Context: {context}
Question: {question}
Answer:"""
prompt = ChatPromptTemplate.from_template(template)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
result = rag_chain.invoke("What is machine learning?")
9. ์คํธ๋ฆฌ๋ฐ¶
# ์คํธ๋ฆฌ๋ฐ ์ถ๋ ฅ
for chunk in chain.stream({"topic": "AI"}):
print(chunk, end="", flush=True)
# ๋น๋๊ธฐ ์คํธ๋ฆฌ๋ฐ
async for chunk in chain.astream({"topic": "AI"}):
print(chunk, end="", flush=True)
10. LCEL (LangChain Expression Language) ์ฌํ¶
LCEL์ LangChain 0.2+์์ ์ฒด์ธ์ ๊ตฌ์ถํ๋ ๊ถ์ฅ ๋ฐฉ์์ ๋๋ค. ๋ณต์กํ LLM ์ ํ๋ฆฌ์ผ์ด์ ์ ๊ตฌ์ถํ๊ธฐ ์ํ ์ ์ธ์ ์ด๊ณ ์กฐํฉ ๊ฐ๋ฅํ ๋ฌธ๋ฒ์ ์ ๊ณตํฉ๋๋ค.
ํ์ดํ ์ฐ์ฐ์๋ฅผ ํตํ ์ฒด์ธ ๊ตฌ์ฑ¶
ํ์ดํ ์ฐ์ฐ์(|)๋ ์ปดํฌ๋ํธ๋ฅผ ์ผ์ชฝ์์ ์ค๋ฅธ์ชฝ์ผ๋ก ์ฐ๊ฒฐํฉ๋๋ค:
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
# ๊ฐ ์ปดํฌ๋ํธ๋ "Runnable"
prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
llm = ChatOpenAI(model="gpt-3.5-turbo")
output_parser = StrOutputParser()
# ํ์ดํ ์ฐ์ฐ์๋ก ๊ตฌ์ฑ
chain = prompt | llm | output_parser
# ์คํ
result = chain.invoke({"topic": "programmers"})
ํต์ฌ Runnable ์ปดํฌ๋ํธ¶
RunnablePassthrough¶
์ ๋ ฅ์ ๊ทธ๋๋ก ์ ๋ฌํ๋ฉฐ, ๋ฐ์ดํฐ ๋ผ์ฐํ ์ ์ ์ฉํฉ๋๋ค:
from langchain_core.runnables import RunnablePassthrough
# ์ ์ฒด ์
๋ ฅ ์ ๋ฌ
chain = RunnablePassthrough() | llm
# ํน์ ํ๋ ์ ๋ฌ
chain = {"text": RunnablePassthrough()} | prompt | llm
RunnableParallel¶
์ฌ๋ฌ ์ฒด์ธ์ ๋ณ๋ ฌ๋ก ์คํํฉ๋๋ค:
from langchain_core.runnables import RunnableParallel
summary_chain = summary_prompt | llm | StrOutputParser()
keyword_chain = keyword_prompt | llm | StrOutputParser()
sentiment_chain = sentiment_prompt | llm | StrOutputParser()
# ์ธ ์ฒด์ธ์ ๋ณ๋ ฌ๋ก ์คํ
parallel_chain = RunnableParallel(
summary=summary_chain,
keywords=keyword_chain,
sentiment=sentiment_chain
)
results = parallel_chain.invoke({"text": "Long article text here..."})
# {'summary': '...', 'keywords': [...], 'sentiment': 'positive'}
RunnableLambda¶
์์์ ํจ์๋ฅผ Runnable๋ก ๋ํํฉ๋๋ค:
from langchain_core.runnables import RunnableLambda
def extract_text(data):
"""์
๋ ฅ์์ ํ
์คํธ ํ๋ ์ถ์ถ."""
return data["text"].upper()
chain = RunnableLambda(extract_text) | llm
result = chain.invoke({"text": "hello world"})
LCEL์์ ์คํธ๋ฆฌ๋ฐ¶
LCEL์ ์ฌ๋ฌ ์คํธ๋ฆฌ๋ฐ ๋ชจ๋๋ฅผ ์ง์ํฉ๋๋ค:
# ๋๊ธฐ ์คํธ๋ฆฌ๋ฐ
for chunk in chain.stream({"topic": "AI"}):
print(chunk, end="", flush=True)
# ๋น๋๊ธฐ ์คํธ๋ฆฌ๋ฐ
async for chunk in chain.astream({"topic": "AI"}):
print(chunk, end="", flush=True)
# ์ด๋ฒคํธ ์คํธ๋ฆฌ๋ฐ (์์ธ ์คํธ๋ฆฌ๋ฐ)
async for event in chain.astream_events({"topic": "AI"}, version="v1"):
kind = event["event"]
if kind == "on_chat_model_stream":
print(event["data"]["chunk"].content, end="", flush=True)
๋น๊ต: ๊ตฌ์ ์ฒด์ธ ์คํ์ผ vs LCEL¶
๊ตฌ์ ์คํ์ผ (Deprecated)¶
from langchain.chains import LLMChain
# ๊ตฌ์ ๋ฐฉ์
chain = LLMChain(llm=llm, prompt=prompt)
result = chain.run(topic="AI")
LCEL ์คํ์ผ (๊ถ์ฅ)¶
# LCEL ๋ฐฉ์
chain = prompt | llm | StrOutputParser()
result = chain.invoke({"topic": "AI"})
LCEL์ ์ฅ์ : - ์กฐํฉ์ฑ: ์ปดํฌ๋ํธ๋ฅผ ์ฝ๊ฒ ๊ฒฐํฉํ๊ณ ์ฌ์ฌ์ฉ - ์คํธ๋ฆฌ๋ฐ: ์คํธ๋ฆฌ๋ฐ ์ถ๋ ฅ ๊ธฐ๋ณธ ์ง์ - ๋น๋๊ธฐ: 1๊ธ ๋น๋๊ธฐ ์ง์ - ๋ณ๋ ฌํ: ๊ฐ๋ฅํ ๊ฒฝ์ฐ ์๋ ๋ณ๋ ฌ ์คํ - ํ์ ์์ ์ฑ: ๋ ๋์ IDE ์ง์ ๋ฐ ์๋ฌ ๋ฉ์์ง
์์ : LCEL์ ์ฌ์ฉํ RAG ์ฒด์ธ¶
from langchain_core.runnables import RunnablePassthrough, RunnableParallel
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# ์ค์
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(documents, embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
# ํ๋กฌํํธ ํ
ํ๋ฆฟ
template = """Answer the question based on the following context:
Context: {context}
Question: {question}
Answer:"""
prompt = ChatPromptTemplate.from_template(template)
# ํฌํผ ํจ์
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
# LCEL ์คํ์ผ RAG ์ฒด์ธ
rag_chain = (
RunnableParallel(
context=retriever | format_docs,
question=RunnablePassthrough()
)
| prompt
| llm
| StrOutputParser()
)
# ์คํ
answer = rag_chain.invoke("What is machine learning?")
# ๋ต๋ณ ์คํธ๋ฆฌ๋ฐ
for chunk in rag_chain.stream("What is deep learning?"):
print(chunk, end="", flush=True)
์ฌํ: ๋ถ๊ธฐ์ ๋ผ์ฐํ ¶
from langchain_core.runnables import RunnableBranch
# ์
๋ ฅ์ ๋ฐ๋ผ ๋ผ์ฐํ
branch = RunnableBranch(
(lambda x: "code" in x["topic"], code_chain),
(lambda x: "math" in x["topic"], math_chain),
default_chain # ๊ธฐ๋ณธ๊ฐ
)
chain = {"topic": RunnablePassthrough()} | branch | llm
11. LangGraph ๊ธฐ์ด¶
LangGraph๋ LLM์ ์ฌ์ฉํ์ฌ ์ํ ์ ์งํ ๋ค์ค ์์ด์ ํธ ์ ํ๋ฆฌ์ผ์ด์ ์ ๊ตฌ์ถํ๊ธฐ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋๋ค. ๊ทธ๋ํ ๊ธฐ๋ฐ ์ํฌํ๋ก์ฐ๋ก LangChain์ ํ์ฅํฉ๋๋ค.
LangGraph๋?¶
LangGraph๋ฅผ ์ฌ์ฉํ๋ฉด ์ ํ๋ฆฌ์ผ์ด์ ์ ๊ทธ๋ํ๋ก ์ ์ํ ์ ์์ต๋๋ค: - ๋ ธ๋๋ ํจ์ (LLM ํธ์ถ, ๋๊ตฌ ์ฌ์ฉ, ์ปค์คํ ๋ก์ง) - ์ฃ์ง๋ ๋ ธ๋ ๊ฐ์ ํ๋ฆ ์ ์ - ์ํ๋ ๊ทธ๋ํ ์คํ ์ ์ฒด์์ ์ ์ง๋จ
LangGraph๋ฅผ ์ฌ์ฉํด์ผ ํ๋ ๊ฒฝ์ฐ (vs ์ฒด์ธ):
| ์ฒด์ธ(LCEL) ์ฌ์ฉ | LangGraph ์ฌ์ฉ |
|---|---|
| ์ ํ ์ํฌํ๋ก์ฐ | ์ฌ์ดํด, ๋ฃจํ |
| ๊ฐ๋จํ ๋ถ๊ธฐ | ๋ณต์กํ ๋ผ์ฐํ |
| ์ํ ์์ | ์ํ ์ ์ง ์์ด์ ํธ |
| ๋จ์ผ ์์ด์ ํธ | ๋ค์ค ์์ด์ ํธ ์์คํ |
์ค์น¶
pip install langgraph
StateGraph ๊ฐ๋ ¶
LangGraph๋ ๋
ธ๋๋ฅผ ํต๊ณผํ๋ฉด์ ์ํ๋ฅผ ์ ์งํ๋ StateGraph๋ฅผ ์ฌ์ฉํฉ๋๋ค:
from typing import TypedDict, Annotated, Sequence
from langchain_core.messages import BaseMessage
from langgraph.graph import StateGraph, END
# ์ํ ์คํค๋ง ์ ์
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], "The messages in the conversation"]
next: str
# ๊ทธ๋ํ ์์ฑ
graph = StateGraph(AgentState)
๋ ธ๋์ ์ฃ์ง¶
from langchain_core.messages import HumanMessage, AIMessage
def agent_node(state: AgentState):
"""์์ด์ ํธ ๊ฒฐ์ ๋
ธ๋."""
messages = state["messages"]
response = llm.invoke(messages)
return {"messages": messages + [response], "next": "tool"}
def tool_node(state: AgentState):
"""๋๊ตฌ ์คํ ๋
ธ๋."""
# ๋๊ตฌ ์คํ
result = "Tool result here"
return {"messages": state["messages"] + [AIMessage(content=result)], "next": END}
# ๋
ธ๋ ์ถ๊ฐ
graph.add_node("agent", agent_node)
graph.add_node("tool", tool_node)
# ์ฃ์ง ์ถ๊ฐ
graph.add_edge("agent", "tool")
graph.add_edge("tool", END)
# ์ง์
์ ์ค์
graph.set_entry_point("agent")
# ์ปดํ์ผ
app = graph.compile()
# ์คํ
result = app.invoke({"messages": [HumanMessage(content="Hello")]})
๋๊ตฌ ์ฌ์ฉ์ด ์๋ ๊ฐ๋จํ ์์ด์ ํธ¶
from langgraph.graph import StateGraph, END
from langchain.tools import tool
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from typing import TypedDict, Annotated, Sequence
from langchain_core.messages import BaseMessage
# ๋๊ตฌ ์ ์
@tool
def search(query: str) -> str:
"""์ ๋ณด ๊ฒ์."""
return f"Search results for: {query}"
tools = [search]
llm_with_tools = llm.bind_tools(tools)
# ์ํ
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], "The messages"]
# ์์ด์ ํธ ๋
ธ๋
def call_agent(state: AgentState):
messages = state["messages"]
response = llm_with_tools.invoke(messages)
return {"messages": messages + [response]}
# ๋๊ตฌ ๋
ธ๋
def call_tool(state: AgentState):
messages = state["messages"]
last_message = messages[-1]
# ๋๊ตฌ ์คํ
tool_calls = last_message.tool_calls
results = []
for tool_call in tool_calls:
tool_result = search.invoke(tool_call["args"])
results.append(ToolMessage(content=tool_result, tool_call_id=tool_call["id"]))
return {"messages": messages + results}
# ๊ทธ๋ํ ๊ตฌ์ถ
graph = StateGraph(AgentState)
graph.add_node("agent", call_agent)
graph.add_node("tools", call_tool)
# ์กฐ๊ฑด๋ถ ๋ผ์ฐํ
def should_continue(state: AgentState):
last_message = state["messages"][-1]
if last_message.tool_calls:
return "tools"
return END
graph.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
graph.add_edge("tools", "agent")
graph.set_entry_point("agent")
# ์ปดํ์ผ ๋ฐ ์คํ
app = graph.compile()
result = app.invoke({"messages": [HumanMessage(content="Search for LangChain news")]})
# ๋ํ ์ถ๋ ฅ
for msg in result["messages"]:
print(f"{msg.__class__.__name__}: {msg.content}")
์กฐ๊ฑด๋ถ ๋ผ์ฐํ ¶
LangGraph๋ ๋์ ๋ผ์ฐํ ์ ์ํ ์กฐ๊ฑด๋ถ ์ฃ์ง๋ฅผ ์ง์ํฉ๋๋ค:
def route_decision(state: AgentState):
"""์ํ์ ๋ฐ๋ผ ๋ค์ ๋
ธ๋ ๊ฒฐ์ ."""
if state.get("error"):
return "error_handler"
elif state.get("needs_review"):
return "review"
else:
return "complete"
graph.add_conditional_edges(
"process",
route_decision,
{
"error_handler": "error_handler",
"review": "review",
"complete": END
}
)
์๊ฐํ¶
LangGraph๋ ๊ทธ๋ํ๋ฅผ ์๊ฐํํ ์ ์์ต๋๋ค:
from IPython.display import Image, display
# ๊ทธ๋ํ ๊ตฌ์กฐ ์๊ฐํ
display(Image(app.get_graph().draw_mermaid_png()))
๋ค์ค ์์ด์ ํธ ์์ ¶
from langgraph.graph import StateGraph, END
class MultiAgentState(TypedDict):
messages: Sequence[BaseMessage]
current_agent: str
def researcher(state: MultiAgentState):
# ์ฐ๊ตฌ ์์ด์ ํธ
return {"messages": [...], "current_agent": "writer"}
def writer(state: MultiAgentState):
# ์์ฑ ์์ด์ ํธ
return {"messages": [...], "current_agent": "reviewer"}
def reviewer(state: MultiAgentState):
# ๊ฒํ ์์ด์ ํธ
return {"messages": [...], "current_agent": END}
# ๋ค์ค ์์ด์ ํธ ๊ทธ๋ํ ๊ตฌ์ถ
graph = StateGraph(MultiAgentState)
graph.add_node("researcher", researcher)
graph.add_node("writer", writer)
graph.add_node("reviewer", reviewer)
graph.add_edge("researcher", "writer")
graph.add_edge("writer", "reviewer")
graph.add_edge("reviewer", END)
graph.set_entry_point("researcher")
app = graph.compile()
์ฃผ์ LangGraph ๊ฐ๋ ¶
- ์ฒดํฌํฌ์ธํ : ์ธ์ ๋ ์ง ์ํ ์ ์ฅ/๋ณต์
- Human-in-the-loop: ๊ณ์ํ๊ธฐ ์ ์ฌ๋์ ์น์ธ์ ์ํด ์ผ์ ์ค์ง
- ํ์ ํธ๋๋ธ: ๋ชจ๋ ์ฒดํฌํฌ์ธํธ์์ ์ฌ์
- ์์์ฑ: ๋ํ ์ํ๋ฅผ ๋ฐ์ดํฐ๋ฒ ์ด์ค์ ์ ์ฅ
์ ๋ฆฌ¶
ํต์ฌ ํจํด¶
# ๊ธฐ๋ณธ LCEL ์ฒด์ธ
chain = prompt | llm | output_parser
# LCEL์ ์ฌ์ฉํ RAG ์ฒด์ธ
rag_chain = (
RunnableParallel(context=retriever, question=RunnablePassthrough())
| prompt | llm | parser
)
# ์์ด์ ํธ (์ ํต์ ๋ฐฉ์)
agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
# ์์ด์ ํธ (LangGraph)
graph = StateGraph(AgentState)
graph.add_node("agent", call_agent)
graph.add_conditional_edges("agent", should_continue)
app = graph.compile()
์ปดํฌ๋ํธ ์ ํ ๊ฐ์ด๋¶
| ์ํฉ | ์ปดํฌ๋ํธ |
|---|---|
| ๋จ์ ํธ์ถ | LLM + Prompt |
| ์์ฐจ ์ฒ๋ฆฌ | Chain (LCEL) |
| ๋ณ๋ ฌ ์คํ | RunnableParallel |
| ๋ฌธ์ ๊ธฐ๋ฐ Q&A | RAG Chain (LCEL) |
| ๊ฐ๋จํ ๋๊ตฌ ์ฌ์ฉ | Agent (ReAct) |
| ๋ณต์กํ ์ํฌํ๋ก์ฐ | LangGraph |
| ๋ค์ค ์์ด์ ํธ ์์คํ | LangGraph |
| ์ํ ์ ์ง ์์ด์ ํธ | LangGraph |
| ๋ํ ์ ์ง | RunnableWithMessageHistory |
LCEL vs LangGraph¶
| ๊ธฐ๋ฅ | LCEL | LangGraph |
|---|---|---|
| ์ฌ์ฉ ์ฌ๋ก | ์ ํ/๊ฐ๋จํ ๋ถ๊ธฐ | ์ฌ์ดํด, ๋ณต์กํ ๋ผ์ฐํ |
| ์ํ | ์ํ ์์ | ์ํ ์ ์ง |
| ๋ฌธ๋ฒ | ํ์ดํ ์ฐ์ฐ์ (\|) |
StateGraph |
| ๋ณต์ก๋ | ๊ฐ๋จ~์ค๊ฐ | ์ค๊ฐ~๋ณต์ก |
| ์ต์ ์ฉ๋ | RAG, ๊ฐ๋จํ ์์ด์ ํธ | ๋ค์ค ์์ด์ ํธ, human-in-loop |
๋ค์ ๋จ๊ณ¶
11_Vector_Databases.md์์ ๋ฒกํฐ ๋ฐ์ดํฐ๋ฒ ์ด์ค๋ฅผ ํ์ตํฉ๋๋ค.