Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
335 changes: 227 additions & 108 deletions lib/crewai/src/crewai/agents/crew_agent_executor.py

Large diffs are not rendered by default.

476 changes: 291 additions & 185 deletions lib/crewai/src/crewai/experimental/agent_executor.py

Large diffs are not rendered by default.

174 changes: 174 additions & 0 deletions lib/crewai/tests/agents/test_agent_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
flow methods, routing logic, and error handling.
"""

import time
from unittest.mock import Mock, patch

import pytest
Expand Down Expand Up @@ -462,3 +463,176 @@ def mock_kickoff_side_effect():

assert result == {"output": "Done"}
assert len(executor.state.messages) >= 2


class TestNativeToolExecution:
"""Test native tool execution behavior."""

@pytest.fixture
def mock_dependencies(self):
llm = Mock()
llm.supports_stop_words.return_value = True

task = Mock()
task.name = "Test Task"
task.description = "Test"
task.human_input = False
task.response_model = None

crew = Mock()
crew._memory = None
crew.verbose = False
crew._train = False

agent = Mock()
agent.id = "test-agent-id"
agent.role = "Test Agent"
agent.verbose = False
agent.key = "test-key"

prompt = {"prompt": "Test {input} {tool_names} {tools}"}

tools_handler = Mock()
tools_handler.cache = None

return {
"llm": llm,
"task": task,
"crew": crew,
"agent": agent,
"prompt": prompt,
"max_iter": 10,
"tools": [],
"tools_names": "",
"stop_words": [],
"tools_description": "",
"tools_handler": tools_handler,
}

def test_execute_native_tool_runs_parallel_for_multiple_calls(
self, mock_dependencies
):
executor = AgentExecutor(**mock_dependencies)

def slow_one() -> str:
time.sleep(0.2)
return "one"

def slow_two() -> str:
time.sleep(0.2)
return "two"

executor._available_functions = {"slow_one": slow_one, "slow_two": slow_two}
executor.state.pending_tool_calls = [
{
"id": "call_1",
"function": {"name": "slow_one", "arguments": "{}"},
},
{
"id": "call_2",
"function": {"name": "slow_two", "arguments": "{}"},
},
]

started = time.perf_counter()
result = executor.execute_native_tool()
elapsed = time.perf_counter() - started

assert result == "native_tool_completed"
assert elapsed < 0.5
tool_messages = [m for m in executor.state.messages if m.get("role") == "tool"]
assert len(tool_messages) == 2
assert tool_messages[0]["tool_call_id"] == "call_1"
assert tool_messages[1]["tool_call_id"] == "call_2"

def test_execute_native_tool_falls_back_to_sequential_for_result_as_answer(
self, mock_dependencies
):
executor = AgentExecutor(**mock_dependencies)

def slow_one() -> str:
time.sleep(0.2)
return "one"

def slow_two() -> str:
time.sleep(0.2)
return "two"

result_tool = Mock()
result_tool.name = "slow_one"
result_tool.result_as_answer = True
result_tool.max_usage_count = None
result_tool.current_usage_count = 0

executor.original_tools = [result_tool]
executor._available_functions = {"slow_one": slow_one, "slow_two": slow_two}
executor.state.pending_tool_calls = [
{
"id": "call_1",
"function": {"name": "slow_one", "arguments": "{}"},
},
{
"id": "call_2",
"function": {"name": "slow_two", "arguments": "{}"},
},
]

started = time.perf_counter()
result = executor.execute_native_tool()
elapsed = time.perf_counter() - started

assert result == "tool_result_is_final"
assert elapsed >= 0.2
assert elapsed < 0.8
assert isinstance(executor.state.current_answer, AgentFinish)
assert executor.state.current_answer.output == "one"

def test_execute_native_tool_result_as_answer_short_circuits_remaining_calls(
self, mock_dependencies
):
executor = AgentExecutor(**mock_dependencies)
call_counts = {"slow_one": 0, "slow_two": 0}

def slow_one() -> str:
call_counts["slow_one"] += 1
time.sleep(0.2)
return "one"

def slow_two() -> str:
call_counts["slow_two"] += 1
time.sleep(0.2)
return "two"

result_tool = Mock()
result_tool.name = "slow_one"
result_tool.result_as_answer = True
result_tool.max_usage_count = None
result_tool.current_usage_count = 0

executor.original_tools = [result_tool]
executor._available_functions = {"slow_one": slow_one, "slow_two": slow_two}
executor.state.pending_tool_calls = [
{
"id": "call_1",
"function": {"name": "slow_one", "arguments": "{}"},
},
{
"id": "call_2",
"function": {"name": "slow_two", "arguments": "{}"},
},
]

started = time.perf_counter()
result = executor.execute_native_tool()
elapsed = time.perf_counter() - started

assert result == "tool_result_is_final"
assert isinstance(executor.state.current_answer, AgentFinish)
assert executor.state.current_answer.output == "one"
assert call_counts["slow_one"] == 1
assert call_counts["slow_two"] == 0
assert elapsed < 0.5

tool_messages = [m for m in executor.state.messages if m.get("role") == "tool"]
assert len(tool_messages) == 1
assert tool_messages[0]["tool_call_id"] == "call_1"
Loading