1"""
212. ์ค์ ์ฑ๋ด ์์
3
4RAG ๊ธฐ๋ฐ ๋ํํ AI ์์คํ
5"""
6
7print("=" * 60)
8print("์ค์ ์ฑ๋ด")
9print("=" * 60)
10
11
12# ============================================
13# 1. ๊ฐ๋จํ ๋ํ ์ฑ๋ด (๋ฉ๋ชจ๋ฆฌ)
14# ============================================
15print("\n[1] ๊ฐ๋จํ ๋ํ ์ฑ๋ด")
16print("-" * 40)
17
18class SimpleChatbot:
19 """ํ์คํ ๋ฆฌ๋ฅผ ์ ์งํ๋ ๊ฐ๋จํ ์ฑ๋ด"""
20
21 def __init__(self, system_prompt="You are a helpful assistant."):
22 self.system_prompt = system_prompt
23 self.history = []
24
25 def chat(self, user_message):
26 """์ฌ์ฉ์ ๋ฉ์์ง ์ฒ๋ฆฌ (LLM ํธ์ถ ์๋ฎฌ๋ ์ด์
)"""
27 # ํ์คํ ๋ฆฌ์ ์ถ๊ฐ
28 self.history.append({"role": "user", "content": user_message})
29
30 # ์ค์ ๋ก๋ LLM ํธ์ถ
31 # response = llm.invoke(messages)
32 response = f"[์๋ต] {user_message}์ ๋ํ ๋ต๋ณ์
๋๋ค."
33
34 self.history.append({"role": "assistant", "content": response})
35 return response
36
37 def get_messages(self):
38 """์ ์ฒด ๋ฉ์์ง ๊ตฌ์ฑ"""
39 messages = [{"role": "system", "content": self.system_prompt}]
40 messages.extend(self.history)
41 return messages
42
43 def clear_history(self):
44 self.history = []
45
46# ํ
์คํธ
47bot = SimpleChatbot()
48print(bot.chat("์๋
ํ์ธ์"))
49print(bot.chat("์ค๋ ๋ ์จ ์ด๋์?"))
50print(f"ํ์คํ ๋ฆฌ ๊ธธ์ด: {len(bot.history)}")
51
52
53# ============================================
54# 2. RAG ์ฑ๋ด
55# ============================================
56print("\n[2] RAG ์ฑ๋ด")
57print("-" * 40)
58
59import numpy as np
60
61class RAGChatbot:
62 """๋ฌธ์ ๊ธฐ๋ฐ RAG ์ฑ๋ด"""
63
64 def __init__(self, documents):
65 self.documents = documents
66 self.history = []
67 # ๊ฐ์ ์๋ฒ ๋ฉ (์ค์ ๋ก๋ ๋ชจ๋ธ ์ฌ์ฉ)
68 self.embeddings = np.random.randn(len(documents), 128)
69
70 def retrieve(self, query, top_k=2):
71 """๊ด๋ จ ๋ฌธ์ ๊ฒ์"""
72 query_emb = np.random.randn(128)
73 similarities = np.dot(self.embeddings, query_emb) / (
74 np.linalg.norm(self.embeddings, axis=1) * np.linalg.norm(query_emb)
75 )
76 top_indices = np.argsort(similarities)[-top_k:][::-1]
77 return [self.documents[i] for i in top_indices]
78
79 def chat(self, question):
80 """RAG ๊ธฐ๋ฐ ๋ต๋ณ ์์ฑ"""
81 # ๊ฒ์
82 relevant_docs = self.retrieve(question)
83 context = "\n".join(relevant_docs)
84
85 # ํ๋กฌํํธ ๊ตฌ์ฑ
86 prompt = f"""Context:
87{context}
88
89History:
90{self._format_history()}
91
92Question: {question}
93
94Answer:"""
95
96 # ์ค์ ๋ก๋ LLM ํธ์ถ
97 response = f"[์ปจํ
์คํธ ๊ธฐ๋ฐ ์๋ต] {relevant_docs[0][:50]}..."
98
99 # ํ์คํ ๋ฆฌ ์
๋ฐ์ดํธ
100 self.history.append({"role": "user", "content": question})
101 self.history.append({"role": "assistant", "content": response})
102
103 return response
104
105 def _format_history(self, max_turns=3):
106 recent = self.history[-max_turns*2:]
107 return "\n".join([f"{m['role']}: {m['content']}" for m in recent])
108
109# ํ
์คํธ
110documents = [
111 "Python is a programming language created by Guido van Rossum.",
112 "Machine learning is a type of artificial intelligence.",
113 "Deep learning uses neural networks with many layers."
114]
115
116rag_bot = RAGChatbot(documents)
117print(rag_bot.chat("What is Python?"))
118print(rag_bot.chat("Tell me more about it"))
119
120
121# ============================================
122# 3. ์๋ ๋ถ๋ฅ
123# ============================================
124print("\n[3] ์๋ ๋ถ๋ฅ")
125print("-" * 40)
126
127class IntentClassifier:
128 """๊ท์น ๊ธฐ๋ฐ ์๋ ๋ถ๋ฅ (์ค์ ๋ก๋ LLM ์ฌ์ฉ)"""
129
130 def __init__(self):
131 self.intents = {
132 "greeting": ["hello", "hi", "hey", "์๋
"],
133 "goodbye": ["bye", "goodbye", "์๊ฐ"],
134 "help": ["help", "๋์", "how do i"],
135 "question": ["what", "why", "how", "when", "๋ฌด์", "์"]
136 }
137
138 def classify(self, text):
139 text_lower = text.lower()
140 for intent, keywords in self.intents.items():
141 if any(kw in text_lower for kw in keywords):
142 return intent
143 return "general"
144
145classifier = IntentClassifier()
146test_texts = ["Hello!", "What is AI?", "Goodbye", "Help me please"]
147for text in test_texts:
148 intent = classifier.classify(text)
149 print(f" [{intent}] {text}")
150
151
152# ============================================
153# 4. ๋ํ ์ํ ๊ด๋ฆฌ
154# ============================================
155print("\n[4] ๋ํ ์ํ ๊ด๋ฆฌ")
156print("-" * 40)
157
158from enum import Enum
159from dataclasses import dataclass, field
160from typing import Dict, List, Any
161
162class State(Enum):
163 GREETING = "greeting"
164 COLLECTING = "collecting"
165 CONFIRMING = "confirming"
166 DONE = "done"
167
168@dataclass
169class ConversationState:
170 state: State = State.GREETING
171 slots: Dict[str, Any] = field(default_factory=dict)
172
173class StatefulBot:
174 def __init__(self):
175 self.context = ConversationState()
176 self.required_slots = ["name", "email"]
177
178 def process(self, message):
179 if self.context.state == State.GREETING:
180 self.context.state = State.COLLECTING
181 return "์๋
ํ์ธ์! ์ด๋ฆ์ ์๋ ค์ฃผ์ธ์."
182
183 elif self.context.state == State.COLLECTING:
184 # ์ฌ๋กฏ ์ถ์ถ (๊ฐ๋จํ ์์)
185 if "name" not in self.context.slots:
186 self.context.slots["name"] = message
187 return "์ด๋ฉ์ผ ์ฃผ์๋ฅผ ์๋ ค์ฃผ์ธ์."
188 elif "email" not in self.context.slots:
189 self.context.slots["email"] = message
190 self.context.state = State.CONFIRMING
191 return f"ํ์ธ: {self.context.slots}. ๋ง์ต๋๊น? (์/์๋์ค)"
192
193 elif self.context.state == State.CONFIRMING:
194 if "์" in message.lower() or "yes" in message.lower():
195 self.context.state = State.DONE
196 return "๊ฐ์ฌํฉ๋๋ค! ์ฒ๋ฆฌ ์๋ฃ๋์์ต๋๋ค."
197 else:
198 self.context = ConversationState()
199 return "์ฒ์๋ถํฐ ๋ค์ ์์ํฉ๋๋ค. ์ด๋ฆ์ ์๋ ค์ฃผ์ธ์."
200
201 return "๋ฌด์์ ๋์๋๋ฆด๊น์?"
202
203# ํ
์คํธ
204stateful_bot = StatefulBot()
205print(stateful_bot.process("์์"))
206print(stateful_bot.process("ํ๊ธธ๋"))
207print(stateful_bot.process("hong@example.com"))
208print(stateful_bot.process("์"))
209
210
211# ============================================
212# 5. OpenAI ์ฑ๋ด (์ฝ๋ ์์)
213# ============================================
214print("\n[5] OpenAI ์ฑ๋ด (์ฝ๋)")
215print("-" * 40)
216
217openai_bot_code = '''
218from openai import OpenAI
219
220class OpenAIChatbot:
221 def __init__(self, system_prompt="You are a helpful assistant."):
222 self.client = OpenAI()
223 self.system_prompt = system_prompt
224 self.history = []
225
226 def chat(self, message):
227 # ๋ฉ์์ง ๊ตฌ์ฑ
228 messages = [{"role": "system", "content": self.system_prompt}]
229 messages.extend(self.history)
230 messages.append({"role": "user", "content": message})
231
232 # API ํธ์ถ
233 response = self.client.chat.completions.create(
234 model="gpt-3.5-turbo",
235 messages=messages,
236 temperature=0.7
237 )
238
239 assistant_msg = response.choices[0].message.content
240
241 # ํ์คํ ๋ฆฌ ์
๋ฐ์ดํธ
242 self.history.append({"role": "user", "content": message})
243 self.history.append({"role": "assistant", "content": assistant_msg})
244
245 return assistant_msg
246
247 def chat_stream(self, message):
248 """์คํธ๋ฆฌ๋ฐ ์๋ต"""
249 messages = [{"role": "system", "content": self.system_prompt}]
250 messages.extend(self.history)
251 messages.append({"role": "user", "content": message})
252
253 stream = self.client.chat.completions.create(
254 model="gpt-3.5-turbo",
255 messages=messages,
256 stream=True
257 )
258
259 full_response = ""
260 for chunk in stream:
261 if chunk.choices[0].delta.content:
262 content = chunk.choices[0].delta.content
263 full_response += content
264 yield content
265
266 self.history.append({"role": "user", "content": message})
267 self.history.append({"role": "assistant", "content": full_response})
268'''
269print(openai_bot_code)
270
271
272# ============================================
273# 6. FastAPI ์๋ฒ (์ฝ๋)
274# ============================================
275print("\n[6] FastAPI ์๋ฒ (์ฝ๋)")
276print("-" * 40)
277
278fastapi_code = '''
279from fastapi import FastAPI
280from pydantic import BaseModel
281
282app = FastAPI()
283sessions = {}
284
285class ChatRequest(BaseModel):
286 session_id: str
287 message: str
288
289@app.post("/chat")
290async def chat(request: ChatRequest):
291 if request.session_id not in sessions:
292 sessions[request.session_id] = OpenAIChatbot()
293
294 bot = sessions[request.session_id]
295 response = bot.chat(request.message)
296
297 return {"response": response}
298
299@app.delete("/session/{session_id}")
300async def clear_session(session_id: str):
301 if session_id in sessions:
302 del sessions[session_id]
303 return {"status": "cleared"}
304
305# ์คํ: uvicorn main:app --reload
306'''
307print(fastapi_code)
308
309
310# ============================================
311# 7. Gradio UI (์ฝ๋)
312# ============================================
313print("\n[7] Gradio UI (์ฝ๋)")
314print("-" * 40)
315
316gradio_code = '''
317import gradio as gr
318
319def respond(message, history):
320 # ์ฑ๋ด ์๋ต ์์ฑ
321 response = bot.chat(message)
322 return response
323
324demo = gr.ChatInterface(
325 fn=respond,
326 title="AI Chatbot",
327 description="Ask me anything!",
328 examples=["Hello!", "What is AI?"],
329 theme="soft"
330)
331
332demo.launch()
333'''
334print(gradio_code)
335
336
337# ============================================
338# ์ ๋ฆฌ
339# ============================================
340print("\n" + "=" * 60)
341print("์ฑ๋ด ์ ๋ฆฌ")
342print("=" * 60)
343
344summary = """
345์ฑ๋ด ๊ตฌ์ฑ์์:
346 1. ๋ํ ํ์คํ ๋ฆฌ ๊ด๋ฆฌ
347 2. ์๋ ๋ถ๋ฅ
348 3. ์ฌ๋กฏ ์ถ์ถ
349 4. ์ํ ๊ด๋ฆฌ
350 5. RAG (๋ฌธ์ ๊ธฐ๋ฐ)
351 6. LLM ํธ์ถ
352
353ํต์ฌ ํจํด:
354 # ๊ธฐ๋ณธ ๋ํ
355 messages = [system] + history + [user_message]
356 response = llm.invoke(messages)
357
358 # RAG
359 context = retrieve(query)
360 response = llm.invoke(context + query)
361
362 # ์คํธ๋ฆฌ๋ฐ
363 for chunk in llm.stream(messages):
364 yield chunk
365
366๋ฐฐํฌ:
367 - FastAPI: REST API ์๋ฒ
368 - Gradio: ๋น ๋ฅธ UI ํ๋กํ ํ์
369 - Streamlit: ๋์๋ณด๋ ์คํ์ผ
370"""
371print(summary)