1"""
210. LangChain ๊ธฐ์ด ์์
3
4LangChain์ ์ฌ์ฉํ LLM ์ ํ๋ฆฌ์ผ์ด์
5"""
6
7print("=" * 60)
8print("LangChain ๊ธฐ์ด")
9print("=" * 60)
10
11# ============================================
12# 1. LangChain ๊ตฌ์กฐ (์ฝ๋ ์์)
13# ============================================
14print("\n[1] LangChain ๊ธฐ๋ณธ ๊ตฌ์กฐ")
15print("-" * 40)
16
17langchain_basic = '''
18from langchain_openai import ChatOpenAI
19from langchain_core.prompts import ChatPromptTemplate
20from langchain_core.output_parsers import StrOutputParser
21
22# LLM
23llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7)
24
25# ํ๋กฌํํธ ํ
ํ๋ฆฟ
26prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
27
28# ์ถ๋ ฅ ํ์
29parser = StrOutputParser()
30
31# ์ฒด์ธ ๊ตฌ์ฑ (LCEL)
32chain = prompt | llm | parser
33
34# ์คํ
35result = chain.invoke({"topic": "programming"})
36print(result)
37'''
38print(langchain_basic)
39
40
41# ============================================
42# 2. ํ๋กฌํํธ ํ
ํ๋ฆฟ
43# ============================================
44print("\n[2] ํ๋กฌํํธ ํ
ํ๋ฆฟ ์์ ")
45print("-" * 40)
46
47try:
48 from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
49
50 # ๊ธฐ๋ณธ ํ
ํ๋ฆฟ
51 template = PromptTemplate(
52 input_variables=["product"],
53 template="Write a marketing slogan for {product}."
54 )
55 print(f"๊ธฐ๋ณธ ํ
ํ๋ฆฟ: {template.format(product='smartphone')}")
56
57 # Chat ํ
ํ๋ฆฟ
58 chat_template = ChatPromptTemplate.from_messages([
59 ("system", "You are a helpful assistant."),
60 ("human", "{question}")
61 ])
62 messages = chat_template.format_messages(question="What is Python?")
63 print(f"\nChat ํ
ํ๋ฆฟ: {messages}")
64
65except ImportError:
66 print("langchain ๋ฏธ์ค์น (pip install langchain langchain-core)")
67
68
69# ============================================
70# 3. Few-shot ํ๋กฌํํธ
71# ============================================
72print("\n[3] Few-shot ํ๋กฌํํธ")
73print("-" * 40)
74
75fewshot_code = '''
76from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate
77
78examples = [
79 {"word": "happy", "antonym": "sad"},
80 {"word": "tall", "antonym": "short"},
81]
82
83example_template = PromptTemplate(
84 input_variables=["word", "antonym"],
85 template="Word: {word}\\nAntonym: {antonym}"
86)
87
88few_shot_prompt = FewShotPromptTemplate(
89 examples=examples,
90 example_prompt=example_template,
91 prefix="Give the antonym of each word:",
92 suffix="Word: {input}\\nAntonym:",
93 input_variables=["input"]
94)
95
96prompt = few_shot_prompt.format(input="big")
97'''
98print(fewshot_code)
99
100
101# ============================================
102# 4. ์ถ๋ ฅ ํ์
103# ============================================
104print("\n[4] ์ถ๋ ฅ ํ์")
105print("-" * 40)
106
107parser_code = '''
108from langchain_core.output_parsers import JsonOutputParser
109from pydantic import BaseModel, Field
110
111class Person(BaseModel):
112 name: str = Field(description="Name")
113 age: int = Field(description="Age")
114
115parser = JsonOutputParser(pydantic_object=Person)
116
117# ํ๋กฌํํธ์ ํ์ ์ง์ ์ถ๊ฐ
118format_instructions = parser.get_format_instructions()
119
120prompt = ChatPromptTemplate.from_messages([
121 ("system", "Extract person info. {format_instructions}"),
122 ("human", "{text}")
123]).partial(format_instructions=format_instructions)
124
125chain = prompt | llm | parser
126result = chain.invoke({"text": "John is 25 years old"})
127# {'name': 'John', 'age': 25}
128'''
129print(parser_code)
130
131
132# ============================================
133# 5. ์ฒด์ธ (LCEL)
134# ============================================
135print("\n[5] LCEL ์ฒด์ธ")
136print("-" * 40)
137
138lcel_code = '''
139from langchain_core.runnables import RunnablePassthrough, RunnableParallel
140
141# ์์ฐจ ์ฒด์ธ
142chain = prompt | llm | parser
143
144# ๋ณ๋ ฌ ์ฒด์ธ
145parallel = RunnableParallel(
146 summary=summary_chain,
147 keywords=keyword_chain
148)
149
150# ๋ถ๊ธฐ ์ฒด์ธ
151chain = (
152 {"context": retriever, "question": RunnablePassthrough()}
153 | prompt
154 | llm
155 | parser
156)
157
158# ์คํ
159result = chain.invoke({"question": "What is AI?"})
160'''
161print(lcel_code)
162
163
164# ============================================
165# 6. RAG ์ฒด์ธ
166# ============================================
167print("\n[6] RAG ์ฒด์ธ")
168print("-" * 40)
169
170rag_chain_code = '''
171from langchain_community.vectorstores import Chroma
172from langchain_openai import OpenAIEmbeddings, ChatOpenAI
173from langchain_core.prompts import ChatPromptTemplate
174from langchain_core.runnables import RunnablePassthrough
175
176# ๋ฒกํฐ ์คํ ์ด
177embeddings = OpenAIEmbeddings()
178vectorstore = Chroma.from_texts(texts, embeddings)
179retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
180
181# RAG ํ๋กฌํํธ
182template = """Answer based on context:
183Context: {context}
184Question: {question}
185Answer:"""
186prompt = ChatPromptTemplate.from_template(template)
187
188# ๋ฌธ์ ํฌ๋งท
189def format_docs(docs):
190 return "\\n\\n".join(doc.page_content for doc in docs)
191
192# RAG ์ฒด์ธ
193rag_chain = (
194 {"context": retriever | format_docs, "question": RunnablePassthrough()}
195 | prompt
196 | ChatOpenAI()
197 | StrOutputParser()
198)
199
200# ์คํ
201answer = rag_chain.invoke("What is machine learning?")
202'''
203print(rag_chain_code)
204
205
206# ============================================
207# 7. ์์ด์ ํธ
208# ============================================
209print("\n[7] ์์ด์ ํธ")
210print("-" * 40)
211
212agent_code = '''
213from langchain.agents import create_react_agent, AgentExecutor
214from langchain import hub
215from langchain.tools import tool
216
217# ์ปค์คํ
๋๊ตฌ
218@tool
219def calculate(expression: str) -> str:
220 """Calculate a math expression."""
221 return str(eval(expression))
222
223@tool
224def search(query: str) -> str:
225 """Search the web."""
226 return f"Search results for: {query}"
227
228tools = [calculate, search]
229
230# ReAct ์์ด์ ํธ
231prompt = hub.pull("hwchase17/react")
232agent = create_react_agent(llm, tools, prompt)
233executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
234
235# ์คํ
236result = executor.invoke({"input": "What is 2 + 2?"})
237'''
238print(agent_code)
239
240
241# ============================================
242# 8. ๋ฉ๋ชจ๋ฆฌ
243# ============================================
244print("\n[8] ๋ํ ๋ฉ๋ชจ๋ฆฌ")
245print("-" * 40)
246
247memory_code = '''
248from langchain.memory import ConversationBufferMemory
249from langchain.chains import ConversationChain
250
251# ๋ฉ๋ชจ๋ฆฌ
252memory = ConversationBufferMemory()
253
254# ๋ํ ์ฒด์ธ
255conversation = ConversationChain(
256 llm=llm,
257 memory=memory,
258 verbose=True
259)
260
261# ๋ํ
262response1 = conversation.predict(input="Hi, I'm John")
263response2 = conversation.predict(input="What's my name?")
264# "Your name is John"
265
266# LCEL ๋ฉ๋ชจ๋ฆฌ
267from langchain_core.runnables.history import RunnableWithMessageHistory
268from langchain_community.chat_message_histories import ChatMessageHistory
269
270store = {}
271
272def get_session_history(session_id):
273 if session_id not in store:
274 store[session_id] = ChatMessageHistory()
275 return store[session_id]
276
277chain_with_history = RunnableWithMessageHistory(
278 chain,
279 get_session_history,
280 input_messages_key="input",
281 history_messages_key="history"
282)
283'''
284print(memory_code)
285
286
287# ============================================
288# 9. ๊ฐ๋จํ ์คํ ์์
289# ============================================
290print("\n[9] ์คํ ๊ฐ๋ฅํ ์์ ")
291print("-" * 40)
292
293try:
294 from langchain_core.prompts import PromptTemplate
295
296 # ํ๋กฌํํธ ํ
ํ๋ฆฟ๋ง ํ
์คํธ
297 template = PromptTemplate.from_template(
298 "Translate '{text}' to {language}."
299 )
300
301 # ํฌ๋งทํ
302 prompt = template.format(text="Hello", language="Korean")
303 print(f"์์ฑ๋ ํ๋กฌํํธ: {prompt}")
304
305 # ์
๋ ฅ ๋ณ์
306 print(f"์
๋ ฅ ๋ณ์: {template.input_variables}")
307
308except ImportError:
309 print("langchain-core ๋ฏธ์ค์น")
310
311
312# ============================================
313# ์ ๋ฆฌ
314# ============================================
315print("\n" + "=" * 60)
316print("LangChain ์ ๋ฆฌ")
317print("=" * 60)
318
319summary = """
320ํต์ฌ ํจํด:
321 # ๊ธฐ๋ณธ ์ฒด์ธ
322 chain = prompt | llm | output_parser
323
324 # RAG ์ฒด์ธ
325 rag = {"context": retriever, "question": RunnablePassthrough()} | prompt | llm
326
327 # ์์ด์ ํธ
328 agent = create_react_agent(llm, tools, prompt)
329 executor = AgentExecutor(agent=agent, tools=tools)
330
331์ฃผ์ ์ปดํฌ๋ํธ:
332 - PromptTemplate: ํ๋กฌํํธ ๊ตฌ์ฑ
333 - ChatOpenAI: LLM ๋ํผ
334 - OutputParser: ์ถ๋ ฅ ํ์ฑ
335 - Retriever: ๋ฌธ์ ๊ฒ์
336 - Memory: ๋ํ ํ์คํ ๋ฆฌ
337 - Agent: ๋๊ตฌ ์ฌ์ฉ
338"""
339print(summary)