Skip to main content
"""
CrewAI Instantiation Performance Evaluation
===========================================

Demonstrates agent instantiation benchmarking with CrewAI.
"""

from typing import Literal

from agno.eval.performance import PerformanceEval
from crewai.agent import Agent
from crewai.tools import tool


# ---------------------------------------------------------------------------
# Create Benchmark Tool
# ---------------------------------------------------------------------------
@tool("Tool Name")
def get_weather(city: Literal["nyc", "sf"]):
    """Use this to get weather information."""
    if city == "nyc":
        return "It might be cloudy in nyc"
    elif city == "sf":
        return "It's always sunny in sf"
    else:
        raise AssertionError("Unknown city")


tools = [get_weather]


# ---------------------------------------------------------------------------
# Create Benchmark Function
# ---------------------------------------------------------------------------
def instantiate_agent():
    return Agent(
        llm="gpt-4o",
        role="Test Agent",
        goal="Be concise, reply with one sentence.",
        tools=tools,
        backstory="Test",
    )


# ---------------------------------------------------------------------------
# Create Evaluation
# ---------------------------------------------------------------------------
crew_instantiation = PerformanceEval(func=instantiate_agent, num_iterations=1000)

# ---------------------------------------------------------------------------
# Run Evaluation
# ---------------------------------------------------------------------------
if __name__ == "__main__":
    crew_instantiation.run(print_results=True, print_summary=True)

Run the Example

# Clone and setup repo
git clone https://github.com/agno-agi/agno.git
cd agno/cookbook/09_evals/performance/comparison

# Create and activate virtual environment
./scripts/demo_setup.sh
source .venvs/demo/bin/activate

python crewai_instantiation.py