This example demonstrates different methods of handling streaming responses from async agents, including manual iteration, print response, and pretty print response.

Code

cookbook/agents/async/streaming.py
import asyncio

from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.utils.pprint import apprint_run_response

agent = Agent(
    model=OpenAIChat(id="gpt-5-mini"),
)


async def streaming():
    async for response in agent.arun(input="Tell me a joke.", stream=True):
        print(response.content, end="", flush=True)


async def streaming_print():
    await agent.aprint_response(input="Tell me a joke.", stream=True)


async def streaming_pprint():
    await apprint_run_response(agent.arun(input="Tell me a joke.", stream=True))


if __name__ == "__main__":
    asyncio.run(streaming())
    # OR
    asyncio.run(streaming_print())
    # OR
    asyncio.run(streaming_pprint())

Usage

1

Create a virtual environment

Open the Terminal and create a python virtual environment.
python3 -m venv .venv
source .venv/bin/activate
2

Install libraries

pip install -U agno openai
3

Run Agent

python cookbook/agents/async/streaming.py