from pathlib import Path
from agno.agent import Agent, RunResponse
from agno.media import Image
from agno.models.openai import OpenAIChat
from agno.utils.audio import write_audio_to_file
from rich import print
from rich.text import Text
image_agent = Agent(model=OpenAIChat(id="gpt-4o"))
image_path = Path(__file__).parent.joinpath("sample.jpg")
image_story: RunResponse = image_agent.run(
"Write a 3 sentence fiction story about the image",
images=[Image(filepath=image_path)],
)
formatted_text = Text.from_markup(
f":sparkles: [bold magenta]Story:[/bold magenta] {image_story.content} :sparkles:"
)
print(formatted_text)
audio_agent = Agent(
model=OpenAIChat(
id="gpt-4o-audio-preview",
modalities=["text", "audio"],
audio={"voice": "alloy", "format": "wav"},
),
)
audio_story: RunResponse = audio_agent.run(
f"Narrate the story with flair: {image_story.content}"
)
if audio_story.response_audio is not None:
write_audio_to_file(
audio=audio_story.response_audio.content, filename="tmp/sample_story.wav"
)
Create a virtual environment
Terminal
and create a python virtual environment.python3 -m venv .venv
source .venv/bin/activate
Set your API key
export OPENAI_API_KEY=xxx
Install libraries
pip install -U openai rich agno
Run Agent
python cookbook/agent_concepts/multimodal/image_to_audio.py
Was this page helpful?