- Added `trillium.py` for searching and creating notes with Trillium's ETAPI. - Implemented `search_notes` and `create_note` functions with appropriate error handling and validation. feat: Add web search functionality using DuckDuckGo - Introduced `web_search.py` for performing web searches without API keys. - Implemented `search_web` function with result handling and validation. feat: Create provider-agnostic function caller for iterative tool calling - Developed `function_caller.py` to manage LLM interactions with tools. - Implemented iterative calling logic with error handling and tool execution. feat: Establish a tool registry for managing available tools - Created `registry.py` to define and manage tool availability and execution. - Integrated feature flags for enabling/disabling tools based on environment variables. feat: Implement event streaming for tool calling processes - Added `stream_events.py` to manage Server-Sent Events (SSE) for tool calling. - Enabled real-time updates during tool execution for enhanced user experience. test: Add tests for tool calling system components - Created `test_tools.py` to validate functionality of code execution, web search, and tool registry. - Implemented asynchronous tests to ensure proper execution and result handling. chore: Add Dockerfile for sandbox environment setup - Created `Dockerfile` to set up a Python environment with necessary dependencies for code execution. chore: Add debug regex script for testing XML parsing - Introduced `debug_regex.py` to validate regex patterns against XML tool calls. chore: Add HTML template for displaying thinking stream events - Created `test_thinking_stream.html` for visualizing tool calling events in a user-friendly format. test: Add tests for OllamaAdapter XML parsing - Developed `test_ollama_parser.py` to validate XML parsing with various test cases, including malformed XML.
131 lines
4.1 KiB
Python
131 lines
4.1 KiB
Python
"""
|
|
OpenAI adapter for tool calling using native function calling API.
|
|
|
|
This adapter converts Lyra tool definitions to OpenAI's function calling
|
|
format and parses OpenAI responses back to Lyra's standardized format.
|
|
"""
|
|
|
|
import json
|
|
from typing import Dict, List, Optional
|
|
from .base import ToolAdapter
|
|
|
|
|
|
class OpenAIAdapter(ToolAdapter):
|
|
"""OpenAI-specific adapter using native function calling.
|
|
|
|
OpenAI supports function calling natively through the 'tools' parameter
|
|
in chat completions. This adapter leverages that capability.
|
|
"""
|
|
|
|
async def prepare_request(
|
|
self,
|
|
messages: List[Dict],
|
|
tools: List[Dict],
|
|
tool_choice: Optional[str] = None
|
|
) -> Dict:
|
|
"""Convert Lyra tools to OpenAI function calling format.
|
|
|
|
Args:
|
|
messages: Conversation history
|
|
tools: Lyra tool definitions
|
|
tool_choice: "auto", "required", "none", or None
|
|
|
|
Returns:
|
|
dict: Request payload with OpenAI-formatted tools
|
|
"""
|
|
# Convert Lyra tools → OpenAI function calling format
|
|
openai_tools = []
|
|
for tool in tools:
|
|
openai_tools.append({
|
|
"type": "function",
|
|
"function": {
|
|
"name": tool["name"],
|
|
"description": tool["description"],
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": tool["parameters"],
|
|
"required": tool.get("required", [])
|
|
}
|
|
}
|
|
})
|
|
|
|
payload = {
|
|
"messages": messages,
|
|
"tools": openai_tools
|
|
}
|
|
|
|
# Add tool_choice if specified
|
|
if tool_choice:
|
|
if tool_choice == "required":
|
|
payload["tool_choice"] = "required"
|
|
elif tool_choice == "none":
|
|
payload["tool_choice"] = "none"
|
|
else: # "auto" or default
|
|
payload["tool_choice"] = "auto"
|
|
|
|
return payload
|
|
|
|
async def parse_response(self, response) -> Dict:
|
|
"""Extract tool calls from OpenAI response.
|
|
|
|
Args:
|
|
response: OpenAI ChatCompletion response object
|
|
|
|
Returns:
|
|
dict: Standardized Lyra format with content and tool_calls
|
|
"""
|
|
message = response.choices[0].message
|
|
content = message.content if message.content else ""
|
|
tool_calls = []
|
|
|
|
# Check if response contains tool calls
|
|
if hasattr(message, 'tool_calls') and message.tool_calls:
|
|
for tc in message.tool_calls:
|
|
try:
|
|
# Parse arguments (may be JSON string)
|
|
args = tc.function.arguments
|
|
if isinstance(args, str):
|
|
args = json.loads(args)
|
|
|
|
tool_calls.append({
|
|
"id": tc.id,
|
|
"name": tc.function.name,
|
|
"arguments": args
|
|
})
|
|
except json.JSONDecodeError as e:
|
|
# If arguments can't be parsed, include error
|
|
tool_calls.append({
|
|
"id": tc.id,
|
|
"name": tc.function.name,
|
|
"arguments": {},
|
|
"error": f"Failed to parse arguments: {str(e)}"
|
|
})
|
|
|
|
return {
|
|
"content": content,
|
|
"tool_calls": tool_calls if tool_calls else None
|
|
}
|
|
|
|
def format_tool_result(
|
|
self,
|
|
tool_call_id: str,
|
|
tool_name: str,
|
|
result: Dict
|
|
) -> Dict:
|
|
"""Format tool result as OpenAI tool message.
|
|
|
|
Args:
|
|
tool_call_id: ID from the original tool call
|
|
tool_name: Name of the executed tool
|
|
result: Tool execution result
|
|
|
|
Returns:
|
|
dict: Message in OpenAI tool message format
|
|
"""
|
|
return {
|
|
"role": "tool",
|
|
"tool_call_id": tool_call_id,
|
|
"name": tool_name,
|
|
"content": json.dumps(result, ensure_ascii=False)
|
|
}
|