Skip to main content
Before running this script, you need to start the LiteLLM server:
"""
Please first install litellm[proxy] by running: uv pip install 'litellm[proxy]'

Before running this script, you need to start the LiteLLM server:

litellm --model gpt-4o-audio-preview --host 127.0.0.1 --port 4000
"""

import requests
from agno.agent import Agent, RunResponse  # noqa
from agno.media import Audio
from agno.models.litellm import LiteLLMOpenAI

# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------

# Fetch the QA audio file and convert it to a base64 encoded string
url = "https://agno-public.s3.us-east-1.amazonaws.com/demo_data/QA-01.mp3"
response = requests.get(url)
response.raise_for_status()
mp3_data = response.content

# Provide the agent with the audio file and get result as text
# Note: Audio input requires specific audio-enabled models like gpt-4o-audio-preview
agent = Agent(
    model=LiteLLMOpenAI(id="gpt-4o-audio-preview"),
    markdown=True,
)
agent.print_response(
    "What is in this audio?", audio=[Audio(content=mp3_data, format="mp3")], stream=True
)

# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------

if __name__ == "__main__":
    pass

Run the Example

# Clone and setup repo
git clone https://github.com/agno-agi/agno.git
cd agno/cookbook/90_models/litellm_openai

# Create and activate virtual environment
./scripts/demo_setup.sh
source .venvs/demo/bin/activate

python audio_input_agent.py