evaluation.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. """MCP Server Evaluation Harness
  2. This script evaluates MCP servers by running test questions against them using Claude.
  3. """
  4. import argparse
  5. import asyncio
  6. import json
  7. import re
  8. import sys
  9. import time
  10. import traceback
  11. import xml.etree.ElementTree as ET
  12. from pathlib import Path
  13. from typing import Any
  14. from anthropic import Anthropic
  15. from connections import create_connection
  16. EVALUATION_PROMPT = """You are an AI assistant with access to tools.
  17. When given a task, you MUST:
  18. 1. Use the available tools to complete the task
  19. 2. Provide summary of each step in your approach, wrapped in <summary> tags
  20. 3. Provide feedback on the tools provided, wrapped in <feedback> tags
  21. 4. Provide your final response, wrapped in <response> tags
  22. Summary Requirements:
  23. - In your <summary> tags, you must explain:
  24. - The steps you took to complete the task
  25. - Which tools you used, in what order, and why
  26. - The inputs you provided to each tool
  27. - The outputs you received from each tool
  28. - A summary for how you arrived at the response
  29. Feedback Requirements:
  30. - In your <feedback> tags, provide constructive feedback on the tools:
  31. - Comment on tool names: Are they clear and descriptive?
  32. - Comment on input parameters: Are they well-documented? Are required vs optional parameters clear?
  33. - Comment on descriptions: Do they accurately describe what the tool does?
  34. - Comment on any errors encountered during tool usage: Did the tool fail to execute? Did the tool return too many tokens?
  35. - Identify specific areas for improvement and explain WHY they would help
  36. - Be specific and actionable in your suggestions
  37. Response Requirements:
  38. - Your response should be concise and directly address what was asked
  39. - Always wrap your final response in <response> tags
  40. - If you cannot solve the task return <response>NOT_FOUND</response>
  41. - For numeric responses, provide just the number
  42. - For IDs, provide just the ID
  43. - For names or text, provide the exact text requested
  44. - Your response should go last"""
  45. def parse_evaluation_file(file_path: Path) -> list[dict[str, Any]]:
  46. """Parse XML evaluation file with qa_pair elements."""
  47. try:
  48. tree = ET.parse(file_path)
  49. root = tree.getroot()
  50. evaluations = []
  51. for qa_pair in root.findall(".//qa_pair"):
  52. question_elem = qa_pair.find("question")
  53. answer_elem = qa_pair.find("answer")
  54. if question_elem is not None and answer_elem is not None:
  55. evaluations.append({
  56. "question": (question_elem.text or "").strip(),
  57. "answer": (answer_elem.text or "").strip(),
  58. })
  59. return evaluations
  60. except Exception as e:
  61. print(f"Error parsing evaluation file {file_path}: {e}")
  62. return []
  63. def extract_xml_content(text: str, tag: str) -> str | None:
  64. """Extract content from XML tags."""
  65. pattern = rf"<{tag}>(.*?)</{tag}>"
  66. matches = re.findall(pattern, text, re.DOTALL)
  67. return matches[-1].strip() if matches else None
  68. async def agent_loop(
  69. client: Anthropic,
  70. model: str,
  71. question: str,
  72. tools: list[dict[str, Any]],
  73. connection: Any,
  74. ) -> tuple[str, dict[str, Any]]:
  75. """Run the agent loop with MCP tools."""
  76. messages = [{"role": "user", "content": question}]
  77. response = await asyncio.to_thread(
  78. client.messages.create,
  79. model=model,
  80. max_tokens=4096,
  81. system=EVALUATION_PROMPT,
  82. messages=messages,
  83. tools=tools,
  84. )
  85. messages.append({"role": "assistant", "content": response.content})
  86. tool_metrics = {}
  87. while response.stop_reason == "tool_use":
  88. tool_use = next(block for block in response.content if block.type == "tool_use")
  89. tool_name = tool_use.name
  90. tool_input = tool_use.input
  91. tool_start_ts = time.time()
  92. try:
  93. tool_result = await connection.call_tool(tool_name, tool_input)
  94. tool_response = json.dumps(tool_result) if isinstance(tool_result, (dict, list)) else str(tool_result)
  95. except Exception as e:
  96. tool_response = f"Error executing tool {tool_name}: {str(e)}\n"
  97. tool_response += traceback.format_exc()
  98. tool_duration = time.time() - tool_start_ts
  99. if tool_name not in tool_metrics:
  100. tool_metrics[tool_name] = {"count": 0, "durations": []}
  101. tool_metrics[tool_name]["count"] += 1
  102. tool_metrics[tool_name]["durations"].append(tool_duration)
  103. messages.append({
  104. "role": "user",
  105. "content": [{
  106. "type": "tool_result",
  107. "tool_use_id": tool_use.id,
  108. "content": tool_response,
  109. }]
  110. })
  111. response = await asyncio.to_thread(
  112. client.messages.create,
  113. model=model,
  114. max_tokens=4096,
  115. system=EVALUATION_PROMPT,
  116. messages=messages,
  117. tools=tools,
  118. )
  119. messages.append({"role": "assistant", "content": response.content})
  120. response_text = next(
  121. (block.text for block in response.content if hasattr(block, "text")),
  122. None,
  123. )
  124. return response_text, tool_metrics
  125. async def evaluate_single_task(
  126. client: Anthropic,
  127. model: str,
  128. qa_pair: dict[str, Any],
  129. tools: list[dict[str, Any]],
  130. connection: Any,
  131. task_index: int,
  132. ) -> dict[str, Any]:
  133. """Evaluate a single QA pair with the given tools."""
  134. start_time = time.time()
  135. print(f"Task {task_index + 1}: Running task with question: {qa_pair['question']}")
  136. response, tool_metrics = await agent_loop(client, model, qa_pair["question"], tools, connection)
  137. response_value = extract_xml_content(response, "response")
  138. summary = extract_xml_content(response, "summary")
  139. feedback = extract_xml_content(response, "feedback")
  140. duration_seconds = time.time() - start_time
  141. return {
  142. "question": qa_pair["question"],
  143. "expected": qa_pair["answer"],
  144. "actual": response_value,
  145. "score": int(response_value == qa_pair["answer"]) if response_value else 0,
  146. "total_duration": duration_seconds,
  147. "tool_calls": tool_metrics,
  148. "num_tool_calls": sum(len(metrics["durations"]) for metrics in tool_metrics.values()),
  149. "summary": summary,
  150. "feedback": feedback,
  151. }
  152. REPORT_HEADER = """
  153. # Evaluation Report
  154. ## Summary
  155. - **Accuracy**: {correct}/{total} ({accuracy:.1f}%)
  156. - **Average Task Duration**: {average_duration_s:.2f}s
  157. - **Average Tool Calls per Task**: {average_tool_calls:.2f}
  158. - **Total Tool Calls**: {total_tool_calls}
  159. ---
  160. """
  161. TASK_TEMPLATE = """
  162. ### Task {task_num}
  163. **Question**: {question}
  164. **Ground Truth Answer**: `{expected_answer}`
  165. **Actual Answer**: `{actual_answer}`
  166. **Correct**: {correct_indicator}
  167. **Duration**: {total_duration:.2f}s
  168. **Tool Calls**: {tool_calls}
  169. **Summary**
  170. {summary}
  171. **Feedback**
  172. {feedback}
  173. ---
  174. """
  175. async def run_evaluation(
  176. eval_path: Path,
  177. connection: Any,
  178. model: str = "claude-3-7-sonnet-20250219",
  179. ) -> str:
  180. """Run evaluation with MCP server tools."""
  181. print("🚀 Starting Evaluation")
  182. client = Anthropic()
  183. tools = await connection.list_tools()
  184. print(f"📋 Loaded {len(tools)} tools from MCP server")
  185. qa_pairs = parse_evaluation_file(eval_path)
  186. print(f"📋 Loaded {len(qa_pairs)} evaluation tasks")
  187. results = []
  188. for i, qa_pair in enumerate(qa_pairs):
  189. print(f"Processing task {i + 1}/{len(qa_pairs)}")
  190. result = await evaluate_single_task(client, model, qa_pair, tools, connection, i)
  191. results.append(result)
  192. correct = sum(r["score"] for r in results)
  193. accuracy = (correct / len(results)) * 100 if results else 0
  194. average_duration_s = sum(r["total_duration"] for r in results) / len(results) if results else 0
  195. average_tool_calls = sum(r["num_tool_calls"] for r in results) / len(results) if results else 0
  196. total_tool_calls = sum(r["num_tool_calls"] for r in results)
  197. report = REPORT_HEADER.format(
  198. correct=correct,
  199. total=len(results),
  200. accuracy=accuracy,
  201. average_duration_s=average_duration_s,
  202. average_tool_calls=average_tool_calls,
  203. total_tool_calls=total_tool_calls,
  204. )
  205. report += "".join([
  206. TASK_TEMPLATE.format(
  207. task_num=i + 1,
  208. question=qa_pair["question"],
  209. expected_answer=qa_pair["answer"],
  210. actual_answer=result["actual"] or "N/A",
  211. correct_indicator="✅" if result["score"] else "❌",
  212. total_duration=result["total_duration"],
  213. tool_calls=json.dumps(result["tool_calls"], indent=2),
  214. summary=result["summary"] or "N/A",
  215. feedback=result["feedback"] or "N/A",
  216. )
  217. for i, (qa_pair, result) in enumerate(zip(qa_pairs, results))
  218. ])
  219. return report
  220. def parse_headers(header_list: list[str]) -> dict[str, str]:
  221. """Parse header strings in format 'Key: Value' into a dictionary."""
  222. headers = {}
  223. if not header_list:
  224. return headers
  225. for header in header_list:
  226. if ":" in header:
  227. key, value = header.split(":", 1)
  228. headers[key.strip()] = value.strip()
  229. else:
  230. print(f"Warning: Ignoring malformed header: {header}")
  231. return headers
  232. def parse_env_vars(env_list: list[str]) -> dict[str, str]:
  233. """Parse environment variable strings in format 'KEY=VALUE' into a dictionary."""
  234. env = {}
  235. if not env_list:
  236. return env
  237. for env_var in env_list:
  238. if "=" in env_var:
  239. key, value = env_var.split("=", 1)
  240. env[key.strip()] = value.strip()
  241. else:
  242. print(f"Warning: Ignoring malformed environment variable: {env_var}")
  243. return env
  244. async def main():
  245. parser = argparse.ArgumentParser(
  246. description="Evaluate MCP servers using test questions",
  247. formatter_class=argparse.RawDescriptionHelpFormatter,
  248. epilog="""
  249. Examples:
  250. # Evaluate a local stdio MCP server
  251. python evaluation.py -t stdio -c python -a my_server.py eval.xml
  252. # Evaluate an SSE MCP server
  253. python evaluation.py -t sse -u https://example.com/mcp -H "Authorization: Bearer token" eval.xml
  254. # Evaluate an HTTP MCP server with custom model
  255. python evaluation.py -t http -u https://example.com/mcp -m claude-3-5-sonnet-20241022 eval.xml
  256. """,
  257. )
  258. parser.add_argument("eval_file", type=Path, help="Path to evaluation XML file")
  259. parser.add_argument("-t", "--transport", choices=["stdio", "sse", "http"], default="stdio", help="Transport type (default: stdio)")
  260. parser.add_argument("-m", "--model", default="claude-3-7-sonnet-20250219", help="Claude model to use (default: claude-3-7-sonnet-20250219)")
  261. stdio_group = parser.add_argument_group("stdio options")
  262. stdio_group.add_argument("-c", "--command", help="Command to run MCP server (stdio only)")
  263. stdio_group.add_argument("-a", "--args", nargs="+", help="Arguments for the command (stdio only)")
  264. stdio_group.add_argument("-e", "--env", nargs="+", help="Environment variables in KEY=VALUE format (stdio only)")
  265. remote_group = parser.add_argument_group("sse/http options")
  266. remote_group.add_argument("-u", "--url", help="MCP server URL (sse/http only)")
  267. remote_group.add_argument("-H", "--header", nargs="+", dest="headers", help="HTTP headers in 'Key: Value' format (sse/http only)")
  268. parser.add_argument("-o", "--output", type=Path, help="Output file for evaluation report (default: stdout)")
  269. args = parser.parse_args()
  270. if not args.eval_file.exists():
  271. print(f"Error: Evaluation file not found: {args.eval_file}")
  272. sys.exit(1)
  273. headers = parse_headers(args.headers) if args.headers else None
  274. env_vars = parse_env_vars(args.env) if args.env else None
  275. try:
  276. connection = create_connection(
  277. transport=args.transport,
  278. command=args.command,
  279. args=args.args,
  280. env=env_vars,
  281. url=args.url,
  282. headers=headers,
  283. )
  284. except ValueError as e:
  285. print(f"Error: {e}")
  286. sys.exit(1)
  287. print(f"🔗 Connecting to MCP server via {args.transport}...")
  288. async with connection:
  289. print("✅ Connected successfully")
  290. report = await run_evaluation(args.eval_file, connection, args.model)
  291. if args.output:
  292. args.output.write_text(report)
  293. print(f"\n✅ Report saved to {args.output}")
  294. else:
  295. print("\n" + report)
  296. if __name__ == "__main__":
  297. asyncio.run(main())