import asynciofrom agno.agent import Agent, RunOutput # noqafrom agno.models.meta import Llamaagent = Agent( model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"), markdown=True)# Get the response in a variable# run: RunOutput = asyncio.run(agent.arun("Share a 2 sentence horror story"))# print(run.content)# Print the response in the terminalasyncio.run(agent.aprint_response("Share a 2 sentence horror story"))