Build, ship and monitor agents at scale. AgentOS managed platform coming soon. Join the waitlist.
import requests
from agno.agent import Agent
from agno.media import Audio
from agno.models.openai import OpenAIChat
from agno.utils.audio import write_audio_to_file
# Fetch the audio file and convert it to a base64 encoded string
url = "https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav"
response = requests.get(url)
response.raise_for_status()
wav_data = response.content
agent = Agent(
model=OpenAIChat(
id="gpt-4o-audio-preview",
modalities=["text", "audio"],
audio={"voice": "alloy", "format": "wav"},
),
markdown=True,
)
agent.run(
"What's in these recording?",
audio=[Audio(content=wav_data, format="wav")],
)
if agent.run_response.response_audio is not None:
write_audio_to_file(
audio=agent.run_response.response_audio.content, filename="tmp/result.wav"
)
Create a virtual environment
Terminal
and create a python virtual environment.python3 -m venv .venv
source .venv/bin/activate
Set your API key
export OPENAI_API_KEY=xxx
Install libraries
pip install -U openai agno
Run Agent
python cookbook/agent_concepts/multimodal/audio_input_output.py
Was this page helpful?