Integrations
Connect Engram with your favorite frameworks and tools
Integrations
Engram integrates seamlessly with popular AI frameworks, databases, and development tools. This page provides guides for the most common integration scenarios.
AI Frameworks
LangChain Integration
Engram works perfectly as a memory backend for LangChain applications:
from engram.langchain import EngramMemory
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
# Initialize Engram memory
memory = EngramMemory(
api_key="your_api_key",
user_id="user123"
)
# Use with LangChain agent
llm = OpenAI(temperature=0)
agent = initialize_agent(
tools=your_tools,
llm=llm,
memory=memory,
verbose=True
)
# The agent will automatically store and retrieve memories
response = agent.run("What did we discuss about machine learning?")
LlamaIndex Integration
Store and retrieve documents with LlamaIndex:
from engram.llamaindex import EngramVectorStore
from llama_index import VectorStoreIndex
# Create Engram vector store
vector_store = EngramVectorStore(
api_key="your_api_key",
collection_name="documents"
)
# Build index
index = VectorStoreIndex.from_vector_store(vector_store)
# Query the index
query_engine = index.as_query_engine()
response = query_engine.query("Tell me about neural networks")
Haystack Integration
Use Engram as a document store in Haystack pipelines:
from engram.haystack import EngramDocumentStore
from haystack import Pipeline
from haystack.nodes import BM25Retriever
# Initialize document store
document_store = EngramDocumentStore(
api_key="your_api_key"
)
# Create retrieval pipeline
retriever = BM25Retriever(document_store=document_store)
pipeline = Pipeline()
pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
Web Frameworks
FastAPI Integration
Build APIs with automatic memory storage:
from fastapi import FastAPI, Depends
from engram import EngramClient
from engram.middleware import EngramMiddleware
app = FastAPI()
# Add Engram middleware
app.add_middleware(
EngramMiddleware,
api_key="your_api_key",
auto_store_requests=True
)
@app.post("/chat")
async def chat(
message: str,
engram: EngramClient = Depends()
):
# Search relevant memories
memories = await engram.search(query=message, limit=5)
# Process with your AI model
response = your_ai_model(message, context=memories)
# Store the conversation
await engram.store(
content=f"User: {message}\nAssistant: {response}",
metadata={"type": "conversation"}
)
return {"response": response}
Express.js Integration
Node.js Express middleware for automatic memory management:
import express from 'express';
import { EngramClient, engramMiddleware } from '@engram/sdk';
const app = express();
// Add Engram middleware
app.use(engramMiddleware({
apiKey: process.env.ENGRAM_API_KEY,
autoStore: true
}));
app.post('/chat', async (req, res) => {
const { message } = req.body;
const engram = req.engram;
// Search relevant memories
const memories = await engram.search({
query: message,
limit: 5
});
// Generate response with context
const response = await generateResponse(message, memories);
// Store conversation automatically via middleware
res.json({ response });
});
Database Integrations
PostgreSQL
Store additional metadata in PostgreSQL while keeping vectors in Engram:
import psycopg2
from engram import EngramClient
class HybridMemoryStore:
def __init__(self, postgres_url, engram_api_key):
self.pg_conn = psycopg2.connect(postgres_url)
self.engram = EngramClient(api_key=engram_api_key)
async def store_memory(self, content, metadata):
# Store in Engram for semantic search
memory = await self.engram.store(
content=content,
metadata=metadata
)
# Store additional metadata in PostgreSQL
cursor = self.pg_conn.cursor()
cursor.execute(
"INSERT INTO memories (engram_id, user_id, created_at) VALUES (%s, %s, %s)",
(memory.id, metadata.get('user_id'), memory.created_at)
)
self.pg_conn.commit()
return memory
MongoDB
Combine structured data in MongoDB with semantic search in Engram:
import { MongoClient } from 'mongodb';
import { EngramClient } from '@engram/sdk';
class MongoEngramStore {
constructor(mongoUrl, engramApiKey) {
this.mongo = new MongoClient(mongoUrl);
this.engram = new EngramClient({ apiKey: engramApiKey });
}
async storeDocument(document) {
// Store structured data in MongoDB
const mongoResult = await this.mongo
.db('myapp')
.collection('documents')
.insertOne(document);
// Store content in Engram for semantic search
const memory = await this.engram.store({
content: document.content,
metadata: {
mongo_id: mongoResult.insertedId.toString(),
type: document.type,
user_id: document.user_id
}
});
return { mongoResult, memory };
}
}
Cloud Platforms
AWS Lambda
Use Engram in serverless functions:
import json
from engram import EngramClient
def lambda_handler(event, context):
engram = EngramClient(
api_key=os.environ['ENGRAM_API_KEY']
)
# Process incoming data
data = json.loads(event['body'])
# Store in Engram
memory = engram.store(
content=data['content'],
metadata={'source': 'lambda', 'user_id': data['user_id']}
)
return {
'statusCode': 200,
'body': json.dumps({'memory_id': memory.id})
}
Google Cloud Functions
Deploy Engram-powered functions on GCP:
from flask import Request
import functions_framework
from engram import EngramClient
@functions_framework.http
def engram_search(request: Request):
engram = EngramClient(api_key=os.environ['ENGRAM_API_KEY'])
query = request.json.get('query')
results = engram.search(query=query, limit=10)
return {
'memories': [memory.dict() for memory in results.memories],
'total': results.total
}
Vercel Edge Functions
Build globally distributed AI applications:
import { EngramClient } from '@engram/sdk';
import { NextRequest } from 'next/server';
export const config = {
runtime: 'edge',
};
export default async function handler(req: NextRequest) {
const engram = new EngramClient({
apiKey: process.env.ENGRAM_API_KEY!
});
const { query } = await req.json();
const results = await engram.search({
query,
limit: 5
});
return new Response(JSON.stringify(results), {
headers: { 'Content-Type': 'application/json' }
});
}
Development Tools
GitHub Actions
Automatically process and store documentation:
name: Store Documentation in Engram
on:
push:
paths: ['docs/**']
jobs:
store-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Store documentation
uses: engram/github-action@v1
with:
api-key: ${{ secrets.ENGRAM_API_KEY }}
docs-path: './docs'
metadata: |
{
"repository": "${{ github.repository }}",
"branch": "${{ github.ref_name }}",
"commit": "${{ github.sha }}"
}
Jupyter Notebooks
Use Engram in data science workflows:
# Cell 1: Install and setup
!pip install engram-sdk
from engram import EngramClient
engram = EngramClient(api_key="your_api_key")
# Cell 2: Store research findings
research_finding = """
Experiment XYZ showed that model A outperformed model B
by 15% on the test dataset when using hyperparameter set C.
"""
memory = engram.store(
content=research_finding,
metadata={
"experiment_id": "XYZ",
"performance_gain": 0.15,
"notebook": "experiment_analysis.ipynb"
}
)
# Cell 3: Retrieve related research
related = engram.search(
query="model performance comparison experiments",
limit=10
)
Webhook Integrations
Slack Bot
Create a Slack bot that learns from conversations:
from slack_bolt import App
from engram import EngramClient
app = App(token="your_slack_bot_token")
engram = EngramClient(api_key="your_api_key")
@app.message("help")
async def help_command(message, say):
# Search for relevant help information
results = await engram.search(
query=message['text'],
filters={"type": "help_doc"},
limit=3
)
if results.memories:
response = "Here's what I found:\n"
for memory in results.memories[:3]:
response += f"• {memory.content[:100]}...\n"
else:
response = "I couldn't find any relevant information."
await say(response)
# Store all messages for learning
@app.event("message")
async def store_message(event):
if not event.get('bot_id'): # Don't store bot messages
await engram.store(
content=event['text'],
metadata={
"channel": event['channel'],
"user": event['user'],
"type": "slack_message"
}
)
Discord Bot
Build a Discord bot with memory:
import { Client, GatewayIntentBits } from 'discord.js';
import { EngramClient } from '@engram/sdk';
const client = new Client({
intents: [GatewayIntentBits.Guilds, GatewayIntentBits.GuildMessages]
});
const engram = new EngramClient({
apiKey: process.env.ENGRAM_API_KEY
});
client.on('messageCreate', async (message) => {
if (message.author.bot) return;
// Store message in Engram
await engram.store({
content: message.content,
metadata: {
discord_user_id: message.author.id,
channel_id: message.channel.id,
guild_id: message.guild?.id,
type: 'discord_message'
}
});
// Respond to mentions
if (message.mentions.has(client.user)) {
const query = message.content.replace(`<@${client.user.id}>`, '').trim();
const results = await engram.search({ query, limit: 3 });
if (results.memories.length > 0) {
const response = `I found this relevant information:\n${results.memories[0].content}`;
await message.reply(response);
}
}
});
Next Steps
- Explore our API Reference for detailed endpoint documentation
- Check out example applications on GitHub
- Join our developer community for integration support
Need help with a specific integration? Contact our support team for assistance.