This example demonstrates how to implement traditional RAG using LanceDB vector database, where knowledge is added to context rather than searched dynamically by the agent.
"""1. Run: `pip install openai lancedb tantivy pypdf sqlalchemy agno` to install the dependencies2. Run: `python cookbook/rag/03_traditional_rag_lancedb.py` to run the agent"""from agno.agent import Agentfrom agno.knowledge.embedder.openai import OpenAIEmbedderfrom agno.knowledge.knowledge import Knowledgefrom agno.models.openai import OpenAIChatfrom agno.vectordb.lancedb import LanceDb, SearchTypeknowledge = Knowledge( # Use LanceDB as the vector database and store embeddings in the `recipes` table vector_db=LanceDb( table_name="recipes", uri="tmp/lancedb", search_type=SearchType.vector, embedder=OpenAIEmbedder(id="text-embedding-3-small"), ),)knowledge.add_content( name="Recipes", url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf",)agent = Agent( model=OpenAIChat(id="gpt-5-mini"), knowledge=knowledge, # Enable RAG by adding references from Knowledge to the user prompt. add_knowledge_to_context=True, # Set as False because Agents default to `search_knowledge=True` search_knowledge=False, markdown=True,)agent.print_response( "How do I make chicken and galangal in coconut milk soup", stream=True)