This example demonstrates traditional RAG implementation using PgVector database with OpenAI embeddings, where knowledge context is automatically added to prompts without search functionality.
from agno.agent import Agentfrom agno.knowledge.embedder.openai import OpenAIEmbedderfrom agno.knowledge.knowledge import Knowledgefrom agno.models.openai import OpenAIChatfrom agno.vectordb.pgvector import PgVector, SearchTypedb_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"knowledge = Knowledge( # Use PgVector as the vector database and store embeddings in the `ai.recipes` table vector_db=PgVector( table_name="recipes", db_url=db_url, search_type=SearchType.hybrid, embedder=OpenAIEmbedder(id="text-embedding-3-small"), ),)knowledge.add_content( url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")agent = Agent( model=OpenAIChat(id="gpt-5-mini"), knowledge=knowledge, # Enable RAG by adding context from the `knowledge` to the user prompt. add_knowledge_to_context=True, # Set as False because Agents default to `search_knowledge=True` search_knowledge=False, markdown=True,)agent.print_response( "How do I make chicken and galangal in coconut milk soup", stream=True)