π’ Announcing our research paper: Zentry achieves 26% higher accuracy than OpenAI Memory, 91% lower latency, and 90% token savings! Read the paper to learn how we're revolutionizing AI agent memory.
Integrate Zentry with Agno, a Python framework for building autonomous agents. This integration enables Agno agents to access persistent memory across conversations, enhancing context retention and personalization.
The following example demonstrates how to create an Agno agent with Zentry memory integration, including support for image processing:
Copy
import base64from pathlib import Pathfrom typing import Optionalfrom agno.agent import Agentfrom agno.media import Imagefrom agno.models.openai import OpenAIChatfrom Zentry import MemoryClient# Initialize the Zentry clientclient = MemoryClient()# Define the agentagent = Agent( name="Personal Agent", model=OpenAIChat(id="gpt-4"), description="You are a helpful personal agent that helps me with day to day activities." "You can process both text and images.", markdown=True)def chat_user( user_input: Optional[str] = None, user_id: str = "user_123", image_path: Optional[str] = None) -> str: """ Handle user input with memory integration, supporting both text and images. Args: user_input: The user's text input user_id: Unique identifier for the user image_path: Path to an image file if provided Returns: The agent's response as a string """ if image_path: # Convert image to base64 with open(image_path, "rb") as image_file: base64_image = base64.b64encode(image_file.read()).decode("utf-8") # Create message objects for text and image messages = [] if user_input: messages.append({ "role": "user", "content": user_input }) messages.append({ "role": "user", "content": { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{base64_image}" } } }) # Store messages in memory client.add(messages, user_id=user_id) print("β Image and text stored in memory.") if user_input: # Search for relevant memories memories = client.search(user_input, user_id=user_id) memory_context = "\n".join(f"- {m['memory']}" for m in memories) # Construct the prompt prompt = f"""You are a helpful personal assistant who helps users with their day-to-day activities and keeps track of everything.Your task is to:1. Analyze the given image (if present) and extract meaningful details to answer the user's question.2. Use your past memory of the user to personalize your answer.3. Combine the image content and memory to generate a helpful, context-aware response.Here is what I remember about the user:{memory_context}User question:{user_input}""" # Get response from agent if image_path: response = agent.run(prompt, images=[Image(filepath=Path(image_path))]) else: response = agent.run(prompt) # Store the interaction in memory client.add(f"User: {user_input}\nAssistant: {response.content}", user_id=user_id) return response.content return "No user input or image provided."# Example Usageif __name__ == "__main__": response = chat_user( "This is the picture of what I brought with me in the trip to Bahamas", image_path="travel_items.jpeg", user_id="user_123" ) print(response)