-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_eval.py
More file actions
executable file
·363 lines (295 loc) · 12.7 KB
/
test_eval.py
File metadata and controls
executable file
·363 lines (295 loc) · 12.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
#!/usr/bin/env python3
"""
Test script to verify KnowledgeOps AI evaluation system
"""
import os
import sys
from pathlib import Path
def test_eval_imports():
"""Test that evaluation components can be imported"""
print("📦 Testing Evaluation Imports...")
try:
# Test evaluation imports
from scripts.eval import RAGEvaluator, EvaluationResult
print(" ✅ Evaluation classes imported")
# Test additional dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from jinja2 import Template
print(" ✅ Additional dependencies imported")
print("✅ All evaluation imports successful")
return True
except Exception as e:
print(f"❌ Import error: {e}")
return False
def test_eval_classes():
"""Test evaluation class definitions"""
print("\n🎯 Testing Evaluation Classes...")
try:
from scripts.eval import RAGEvaluator, EvaluationResult
# Test EvaluationResult dataclass
result = EvaluationResult(
question="What is machine learning?",
gold_doc_id="doc_001",
gold_text="Machine learning is a subset of AI",
answer="Machine learning is a subset of artificial intelligence",
faithfulness_score=0.85,
processing_time=2.5
)
print(" ✅ EvaluationResult dataclass works")
# Test RAGEvaluator initialization
evaluator = RAGEvaluator(
api_base_url="http://localhost:8000",
openai_api_key="fake-key",
org_id="test-org"
)
print(" ✅ RAGEvaluator initialization works")
# Test class attributes
checks = [
("RAGEvaluator methods", hasattr(RAGEvaluator, 'load_evaluation_data')),
("RAGEvaluator compute_relevance", hasattr(RAGEvaluator, 'compute_relevance_scores')),
("RAGEvaluator compute_faithfulness", hasattr(RAGEvaluator, 'compute_faithfulness_score')),
("RAGEvaluator generate_report", hasattr(RAGEvaluator, 'generate_html_report')),
]
passed = 0
for check_name, check_result in checks:
if check_result:
print(f" ✅ {check_name}")
passed += 1
else:
print(f" ❌ {check_name}")
if passed == len(checks):
print("✅ All evaluation classes are properly defined")
return True
else:
print(f"❌ {len(checks) - passed} class checks failed")
return False
except Exception as e:
print(f"❌ Class test error: {e}")
return False
def test_csv_loading():
"""Test CSV data loading functionality"""
print("\n📁 Testing CSV Loading...")
try:
from scripts.eval import RAGEvaluator
# Create evaluator
evaluator = RAGEvaluator()
# Check if sample CSV exists
sample_csv = Path("scripts/sample_eval_data.csv")
if not sample_csv.exists():
print(" ⚠️ Sample CSV not found, creating test data")
# Create minimal test CSV
test_csv_content = """question,gold_doc_id,gold_text
"What is AI?",doc_001,"Artificial intelligence is the simulation of human intelligence"
"How does ML work?",doc_002,"Machine learning uses algorithms to learn from data"
"""
with open("test_eval_data.csv", "w") as f:
f.write(test_csv_content)
sample_csv = Path("test_eval_data.csv")
# Test loading
data = evaluator.load_evaluation_data(str(sample_csv))
print(f" ✅ Loaded {len(data)} examples from CSV")
# Validate structure
if data:
first_example = data[0]
required_fields = ['question']
optional_fields = ['gold_doc_id', 'gold_text']
for field in required_fields:
if field not in first_example:
print(f" ❌ Missing required field: {field}")
return False
has_optional = any(field in first_example for field in optional_fields)
if not has_optional:
print(" ❌ Missing optional fields (gold_doc_id or gold_text)")
return False
print(" ✅ CSV structure validation passed")
# Clean up test file
if sample_csv.name == "test_eval_data.csv" and sample_csv.exists():
sample_csv.unlink()
print("✅ CSV loading functionality works")
return True
except Exception as e:
print(f"❌ CSV loading error: {e}")
return False
def test_relevance_computation():
"""Test relevance score computation"""
print("\n📊 Testing Relevance Computation...")
try:
from scripts.eval import RAGEvaluator
evaluator = RAGEvaluator()
# Test data
question = "What is machine learning?"
retrieved_chunks = [
{"document_id": "doc_001", "text": "Machine learning is a subset of AI"},
{"document_id": "doc_002", "text": "Neural networks are computing systems"},
{"document_id": "doc_003", "text": "Deep learning uses multiple layers"}
]
# Test document ID matching
relevance_scores = evaluator.compute_relevance_scores(
question, retrieved_chunks, gold_doc_id="doc_001"
)
if len(relevance_scores) == 3:
print(" ✅ Relevance scores computed for document ID matching")
else:
print(" ❌ Incorrect number of relevance scores")
return False
# Test text similarity
relevance_scores = evaluator.compute_relevance_scores(
question, retrieved_chunks, gold_text="Machine learning is a subset of artificial intelligence"
)
if len(relevance_scores) == 3:
print(" ✅ Relevance scores computed for text similarity")
else:
print(" ❌ Incorrect number of relevance scores")
return False
# Test relevance@k computation
k_values = [1, 3, 5]
relevance_at_k = evaluator.compute_relevance_at_k(relevance_scores, k_values)
if all(k in relevance_at_k for k in k_values):
print(" ✅ Relevance@k computation works")
else:
print(" ❌ Relevance@k computation failed")
return False
print("✅ Relevance computation functionality works")
return True
except Exception as e:
print(f"❌ Relevance computation error: {e}")
return False
def test_faithfulness_computation():
"""Test faithfulness score computation"""
print("\n🎯 Testing Faithfulness Computation...")
try:
from scripts.eval import RAGEvaluator
evaluator = RAGEvaluator()
# Test data
question = "What is machine learning?"
answer = "Machine learning is a subset of artificial intelligence that enables computers to learn from data."
retrieved_chunks = [
{"text": "Machine learning is a subset of artificial intelligence that enables computers to learn and make decisions without being explicitly programmed."}
]
# Test faithfulness computation (without OpenAI key, should return default)
faithfulness_score = evaluator.compute_faithfulness_score(
question, answer, retrieved_chunks
)
if 0.0 <= faithfulness_score <= 1.0:
print(f" ✅ Faithfulness score computed: {faithfulness_score}")
else:
print(f" ❌ Invalid faithfulness score: {faithfulness_score}")
return False
print("✅ Faithfulness computation functionality works")
return True
except Exception as e:
print(f"❌ Faithfulness computation error: {e}")
return False
def test_html_report_generation():
"""Test HTML report generation"""
print("\n📄 Testing HTML Report Generation...")
try:
from scripts.eval import RAGEvaluator, EvaluationResult
evaluator = RAGEvaluator()
# Create test results
results = [
EvaluationResult(
question="What is machine learning?",
gold_doc_id="doc_001",
answer="Machine learning is a subset of AI",
relevance_scores=[1.0, 0.3, 0.1],
relevance_at_k={1: 1.0, 3: 0.47, 5: 0.28},
faithfulness_score=0.85,
processing_time=2.5
),
EvaluationResult(
question="How does neural networks work?",
gold_doc_id="doc_002",
answer="Neural networks are computing systems",
relevance_scores=[0.8, 0.6, 0.2],
relevance_at_k={1: 0.8, 3: 0.53, 5: 0.32},
faithfulness_score=0.92,
processing_time=3.1
)
]
# Test HTML generation
output_path = "test_evaluation_report.html"
evaluator.generate_html_report(results, output_path)
if os.path.exists(output_path):
print(" ✅ HTML report generated successfully")
# Check file size
file_size = os.path.getsize(output_path)
if file_size > 1000: # Should be substantial
print(f" ✅ HTML report size: {file_size} bytes")
else:
print(f" ⚠️ HTML report seems small: {file_size} bytes")
# Clean up
os.remove(output_path)
print(" ✅ Test file cleaned up")
else:
print(" ❌ HTML report not generated")
return False
print("✅ HTML report generation functionality works")
return True
except Exception as e:
print(f"❌ HTML report generation error: {e}")
return False
def test_api_integration():
"""Test API integration"""
print("\n🌐 Testing API Integration...")
try:
from scripts.eval import RAGEvaluator
# Test evaluator creation with API URL
evaluator = RAGEvaluator(api_base_url="http://localhost:8000")
# Test that the evaluator can be created
if evaluator.api_base_url == "http://localhost:8000":
print(" ✅ API URL configuration works")
else:
print(" ❌ API URL configuration failed")
return False
print("✅ API integration configuration works")
return True
except Exception as e:
print(f"❌ API integration error: {e}")
return False
def main():
"""Run all evaluation tests"""
print("🚀 Testing KnowledgeOps AI Evaluation System")
print("=" * 50)
tests = [
("Evaluation Imports", test_eval_imports),
("Evaluation Classes", test_eval_classes),
("CSV Loading", test_csv_loading),
("Relevance Computation", test_relevance_computation),
("Faithfulness Computation", test_faithfulness_computation),
("HTML Report Generation", test_html_report_generation),
("API Integration", test_api_integration)
]
passed = 0
total = len(tests)
for test_name, test_func in tests:
print(f"\n{'='*20} {test_name} {'='*20}")
if test_func():
passed += 1
else:
print(f" ❌ {test_name} failed")
print("\n" + "=" * 50)
print(f"📊 Results: {passed}/{total} tests passed")
if passed == total:
print("🎉 All evaluation tests passed! RAG evaluation system is ready.")
print("\n🚀 Next steps:")
print("1. Ensure the KnowledgeOps AI API is running:")
print(" python run.py")
print("\n2. Run evaluation on sample data:")
print(" python scripts/eval.py scripts/sample_eval_data.csv --openai-key YOUR_KEY")
print("\n3. View the generated HTML report:")
print(" open evaluation_report.html")
print("\n📊 Features:")
print(" - Relevance@k scoring (k=1,3,5,10)")
print(" - Faithfulness scoring with LLM-as-judge")
print(" - Comprehensive HTML reports with charts")
print(" - Processing time tracking")
print(" - Error handling and reporting")
else:
print("⚠️ Some tests failed. Please check the issues above.")
sys.exit(1)
if __name__ == "__main__":
main()