1"""
2Foundation Models - Prompt Engineering Techniques
3
4Demonstrates various prompt engineering patterns and strategies.
5Shows zero-shot, few-shot, chain-of-thought, and self-consistency approaches.
6Focuses on prompt structure and format - no actual LLM API calls.
7
8No external dependencies.
9"""
10
11import json
12import re
13from typing import List, Dict, Any
14
15
16class PromptTemplate:
17 """Base class for prompt templates."""
18
19 def __init__(self, template: str):
20 self.template = template
21
22 def format(self, **kwargs) -> str:
23 """Format template with provided variables."""
24 return self.template.format(**kwargs)
25
26
27class ZeroShotPrompt(PromptTemplate):
28 """Zero-shot prompt: direct instruction without examples."""
29
30 def __init__(self, task_description: str):
31 template = f"{task_description}\n\nInput: {{input}}\n\nOutput:"
32 super().__init__(template)
33
34
35class FewShotPrompt(PromptTemplate):
36 """Few-shot prompt: instruction + examples."""
37
38 def __init__(self, task_description: str, examples: List[Dict[str, str]]):
39 self.task_description = task_description
40 self.examples = examples
41
42 # Build template
43 template_parts = [task_description, "\n"]
44
45 for i, ex in enumerate(examples, 1):
46 template_parts.append(f"Example {i}:\n")
47 template_parts.append(f"Input: {ex['input']}\n")
48 template_parts.append(f"Output: {ex['output']}\n\n")
49
50 template_parts.append("Now solve this:\n")
51 template_parts.append("Input: {input}\n\nOutput:")
52
53 super().__init__(''.join(template_parts))
54
55
56class ChainOfThoughtPrompt(PromptTemplate):
57 """Chain-of-thought: encourages step-by-step reasoning."""
58
59 def __init__(self, task_description: str, examples: List[Dict[str, Any]]):
60 self.task_description = task_description
61 self.examples = examples
62
63 template_parts = [task_description, "\n"]
64
65 for i, ex in enumerate(examples, 1):
66 template_parts.append(f"Example {i}:\n")
67 template_parts.append(f"Q: {ex['question']}\n")
68 template_parts.append(f"A: Let's think step by step.\n")
69 template_parts.append(f"{ex['reasoning']}\n")
70 template_parts.append(f"Therefore, the answer is {ex['answer']}.\n\n")
71
72 template_parts.append("Now solve this:\n")
73 template_parts.append("Q: {question}\n")
74 template_parts.append("A: Let's think step by step.\n")
75
76 super().__init__(''.join(template_parts))
77
78
79class StructuredOutputPrompt(PromptTemplate):
80 """Prompt for generating structured output (JSON)."""
81
82 def __init__(self, task_description: str, schema: Dict[str, str]):
83 self.schema = schema
84
85 template_parts = [
86 task_description,
87 "\n\nOutput format (JSON):\n",
88 json.dumps(schema, indent=2),
89 "\n\nInput: {input}\n\nOutput JSON:\n"
90 ]
91
92 super().__init__(''.join(template_parts))
93
94
95class RolePrompt(PromptTemplate):
96 """Role-based prompt: assign persona to model."""
97
98 def __init__(self, role: str, task: str):
99 template = (
100 f"You are {role}.\n\n"
101 f"{task}\n\n"
102 f"Input: {{input}}\n\nResponse:"
103 )
104 super().__init__(template)
105
106
107class SelfConsistencyPrompt:
108 """
109 Self-consistency: generate multiple reasoning paths and vote.
110 (Demonstrates structure, actual sampling would need LLM API)
111 """
112
113 def __init__(self, base_prompt: ChainOfThoughtPrompt, num_samples: int = 3):
114 self.base_prompt = base_prompt
115 self.num_samples = num_samples
116
117 def format(self, **kwargs) -> List[str]:
118 """Generate multiple prompts with temperature variation."""
119 prompts = []
120 base = self.base_prompt.format(**kwargs)
121
122 for i in range(self.num_samples):
123 # In practice, would use different random seeds or temperatures
124 prompts.append(f"[Sample {i+1}]\n{base}")
125
126 return prompts
127
128 @staticmethod
129 def aggregate_answers(answers: List[str]) -> str:
130 """Find most common answer (simple majority vote)."""
131 from collections import Counter
132
133 # Extract final answers (simplified)
134 extracted = [a.strip() for a in answers]
135 vote = Counter(extracted)
136
137 return vote.most_common(1)[0][0]
138
139
140# ============================================================
141# Demonstrations
142# ============================================================
143
144def demo_zero_shot():
145 """Demonstrate zero-shot prompting."""
146 print("=" * 60)
147 print("DEMO 1: Zero-Shot Prompting")
148 print("=" * 60)
149
150 prompt = ZeroShotPrompt(
151 "Classify the sentiment of the following text as Positive, Negative, or Neutral."
152 )
153
154 test_inputs = [
155 "This product is amazing! Highly recommend.",
156 "Terrible experience, would not buy again.",
157 "The item arrived on time and matches the description.",
158 ]
159
160 for inp in test_inputs:
161 formatted = prompt.format(input=inp)
162 print(f"\n{formatted}\n")
163 print("-" * 60)
164
165
166def demo_few_shot():
167 """Demonstrate few-shot prompting."""
168 print("\n" + "=" * 60)
169 print("DEMO 2: Few-Shot Prompting")
170 print("=" * 60)
171
172 examples = [
173 {"input": "I love this!", "output": "Positive"},
174 {"input": "This is awful.", "output": "Negative"},
175 {"input": "It's okay.", "output": "Neutral"},
176 ]
177
178 prompt = FewShotPrompt(
179 "Classify the sentiment of the text as Positive, Negative, or Neutral.",
180 examples
181 )
182
183 test_input = "Great value for money!"
184 formatted = prompt.format(input=test_input)
185
186 print(f"\n{formatted}\n")
187
188
189def demo_chain_of_thought():
190 """Demonstrate chain-of-thought prompting."""
191 print("\n" + "=" * 60)
192 print("DEMO 3: Chain-of-Thought Prompting")
193 print("=" * 60)
194
195 examples = [
196 {
197 "question": "If a train travels 60 miles in 1 hour, how far does it travel in 2.5 hours?",
198 "reasoning": "The train travels 60 miles per hour. For 2.5 hours, we multiply: 60 × 2.5 = 150 miles.",
199 "answer": "150 miles"
200 },
201 {
202 "question": "A store sells apples for $2 each. If I buy 3 apples and pay with a $10 bill, how much change do I get?",
203 "reasoning": "Cost of 3 apples = 3 × $2 = $6. Change = $10 - $6 = $4.",
204 "answer": "$4"
205 }
206 ]
207
208 prompt = ChainOfThoughtPrompt(
209 "Solve the following math word problems.",
210 examples
211 )
212
213 test_question = "A book costs $15. If I buy 4 books and have a 20% discount, what is the total cost?"
214 formatted = prompt.format(question=test_question)
215
216 print(f"\n{formatted}\n")
217
218
219def demo_structured_output():
220 """Demonstrate structured output prompting."""
221 print("\n" + "=" * 60)
222 print("DEMO 4: Structured Output Prompting")
223 print("=" * 60)
224
225 schema = {
226 "entity_name": "string",
227 "entity_type": "person|organization|location",
228 "sentiment": "positive|negative|neutral",
229 "confidence": "float (0-1)"
230 }
231
232 prompt = StructuredOutputPrompt(
233 "Extract entities from the text and analyze their sentiment.",
234 schema
235 )
236
237 test_input = "Apple Inc. released a great new product yesterday in California."
238 formatted = prompt.format(input=test_input)
239
240 print(f"\n{formatted}\n")
241
242
243def demo_role_based():
244 """Demonstrate role-based prompting."""
245 print("\n" + "=" * 60)
246 print("DEMO 5: Role-Based Prompting")
247 print("=" * 60)
248
249 roles = [
250 ("a helpful AI assistant", "Answer the following question concisely."),
251 ("a technical expert in machine learning", "Explain the following concept in detail."),
252 ("a friendly teacher explaining to a 10-year-old", "Explain the following concept simply."),
253 ]
254
255 test_input = "What is neural network?"
256
257 for role, task in roles:
258 prompt = RolePrompt(role, task)
259 formatted = prompt.format(input=test_input)
260
261 print(f"\n{formatted}\n")
262 print("-" * 60)
263
264
265def demo_self_consistency():
266 """Demonstrate self-consistency approach."""
267 print("\n" + "=" * 60)
268 print("DEMO 6: Self-Consistency")
269 print("=" * 60)
270
271 examples = [
272 {
273 "question": "Is 17 a prime number?",
274 "reasoning": "A prime number is only divisible by 1 and itself. Let's check divisors of 17: 2, 3, 5, 7, 11, 13 don't divide 17 evenly. Only 1 and 17 divide it.",
275 "answer": "Yes"
276 }
277 ]
278
279 cot_prompt = ChainOfThoughtPrompt(
280 "Determine if the number is prime.",
281 examples
282 )
283
284 sc_prompt = SelfConsistencyPrompt(cot_prompt, num_samples=3)
285
286 test_question = "Is 21 a prime number?"
287 prompts = sc_prompt.format(question=test_question)
288
289 print("\nGenerating multiple reasoning paths:")
290 print("=" * 60)
291
292 for i, p in enumerate(prompts, 1):
293 print(f"\n{p}\n")
294 print("-" * 60)
295
296 # Simulate different answers
297 simulated_answers = ["No", "No", "Yes"] # Majority: No
298
299 aggregated = SelfConsistencyPrompt.aggregate_answers(simulated_answers)
300 print(f"\nSimulated answers: {simulated_answers}")
301 print(f"Aggregated answer (majority vote): {aggregated}")
302
303
304def demo_instruction_optimization():
305 """Demonstrate instruction clarity impact."""
306 print("\n" + "=" * 60)
307 print("DEMO 7: Instruction Optimization")
308 print("=" * 60)
309
310 test_input = "The movie was boring and too long."
311
312 # Poor instruction
313 poor = "Sentiment?"
314 print(f"Poor instruction:\n{poor}\nInput: {test_input}\n")
315
316 # Better instruction
317 better = "What is the sentiment of this text? Answer with one word: Positive, Negative, or Neutral."
318 print(f"\nBetter instruction:\n{better}\nInput: {test_input}\n")
319
320 # Best instruction
321 best = """Classify the sentiment of the following movie review.
322
323Instructions:
3241. Read the review carefully
3252. Identify emotional tone and opinion
3263. Classify as: Positive, Negative, or Neutral
3274. Respond with only the classification label
328
329Review: {input}
330
331Classification:"""
332
333 print(f"\nBest instruction (detailed):\n{best.format(input=test_input)}\n")
334
335
336def demo_constraint_specification():
337 """Demonstrate specifying output constraints."""
338 print("\n" + "=" * 60)
339 print("DEMO 8: Output Constraint Specification")
340 print("=" * 60)
341
342 base_task = "Summarize the following text."
343 test_input = "Artificial intelligence has transformed many industries. Machine learning algorithms can now perform complex tasks that previously required human intelligence. Deep learning, a subset of machine learning, uses neural networks with multiple layers to learn representations of data."
344
345 # Without constraints
346 prompt1 = f"{base_task}\n\nText: {test_input}\n\nSummary:"
347 print(f"Without constraints:\n{prompt1}\n")
348
349 # With length constraint
350 prompt2 = f"{base_task} Use exactly one sentence, maximum 20 words.\n\nText: {test_input}\n\nSummary:"
351 print(f"\nWith length constraint:\n{prompt2}\n")
352
353 # With format constraint
354 prompt3 = f"{base_task} Format as bullet points (max 3 points).\n\nText: {test_input}\n\nSummary:\n-"
355 print(f"\nWith format constraint:\n{prompt3}\n")
356
357 # With style constraint
358 prompt4 = f"{base_task} Explain like I'm 5 years old.\n\nText: {test_input}\n\nSummary:"
359 print(f"\nWith style constraint:\n{prompt4}\n")
360
361
362def demo_prompt_chaining():
363 """Demonstrate multi-step prompt chaining."""
364 print("\n" + "=" * 60)
365 print("DEMO 9: Prompt Chaining")
366 print("=" * 60)
367
368 input_text = "I bought a laptop yesterday but the screen is broken. I want a refund."
369
370 # Step 1: Extract intent
371 step1 = f"""Identify the customer's intent from this message.
372Possible intents: refund_request, product_inquiry, complaint, compliment
373
374Message: {input_text}
375
376Intent:"""
377
378 print(f"Step 1 - Intent Detection:\n{step1}\n")
379
380 # Simulated output from step 1
381 intent = "refund_request"
382
383 # Step 2: Extract entities (using output from step 1)
384 step2 = f"""Extract relevant entities for a {intent}.
385Required entities: product, issue, timeframe
386
387Message: {input_text}
388
389Entities (JSON):"""
390
391 print(f"\nStep 2 - Entity Extraction:\n{step2}\n")
392
393 # Simulated output from step 2
394 entities = {"product": "laptop", "issue": "broken screen", "timeframe": "yesterday"}
395
396 # Step 3: Generate response (using outputs from steps 1 & 2)
397 step3 = f"""Generate a customer service response for a {intent}.
398
399Details:
400- Product: {entities['product']}
401- Issue: {entities['issue']}
402- Purchase time: {entities['timeframe']}
403
404Response:"""
405
406 print(f"\nStep 3 - Response Generation:\n{step3}\n")
407
408
409if __name__ == "__main__":
410 print("\n" + "=" * 60)
411 print("Foundation Models: Prompt Engineering")
412 print("=" * 60)
413
414 demo_zero_shot()
415 demo_few_shot()
416 demo_chain_of_thought()
417 demo_structured_output()
418 demo_role_based()
419 demo_self_consistency()
420 demo_instruction_optimization()
421 demo_constraint_specification()
422 demo_prompt_chaining()
423
424 print("\n" + "=" * 60)
425 print("Key Takeaways:")
426 print("=" * 60)
427 print("1. Zero-shot: Direct instruction (simple tasks)")
428 print("2. Few-shot: Add examples (better accuracy)")
429 print("3. Chain-of-thought: Step-by-step reasoning (complex problems)")
430 print("4. Structured output: Specify exact format (JSON, etc.)")
431 print("5. Role-based: Set persona for appropriate tone/style")
432 print("6. Self-consistency: Multiple samples + voting (robustness)")
433 print("7. Clear instructions > vague instructions")
434 print("8. Constraints: Length, format, style specifications")
435 print("9. Chaining: Break complex tasks into steps")
436 print("=" * 60)