from agno.agent import Agent
from agno.db.base import SessionType
from agno.db.postgres import PostgresDb
from agno.models.meta import Llama
from rich.pretty import pprint
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
user_id="test_user",
session_id="test_session",
# Pass the database to the Agent
db=db,
# Enable user memories
enable_user_memories=True,
# Enable session summaries
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
debug_mode=True,
)
# -*- Share personal information
agent.print_response("My name is John Billings", stream=True)
# -*- Print memories and session summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.db.get_session(
session_id="test_session", session_type=SessionType.AGENT
).summary # type: ignore
)
# -*- Share personal information
agent.print_response("I live in NYC", stream=True)
# -*- Print memories and session summary
if agent.db:
pprint(agent.db.get_user_memories(user_id="test_user"))
pprint(
agent.db.get_session(
session_id="test_session", session_type=SessionType.AGENT
).summary # type: ignore
)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
Create a virtual environment
Terminal
and create a python virtual environment.python3 -m venv .venv
source .venv/bin/activate
Set your LLAMA API key
export LLAMA_API_KEY=YOUR_API_KEY
Install libraries
pip install openai sqlalchemy psycopg pgvector llama-api-client
Run Agent
python python cookbook/models/meta/llama/memory.py