Integration Patterns
Learn how to integrate the Memory Scope API with popular AI frameworks and tools. These patterns help you build personalized AI experiences.
OpenAI Integration
Use Memory Scope API with OpenAI to create personalized AI assistants that remember user preferences and constraints.
OpenAI Integration
from memory_scope import MemoryScopeClient
from memory_scope.exceptions import PolicyDeniedError
import openai
# Initialize clients
memory_client = MemoryScopeClient(api_key="your-memory-api-key")
openai_client = openai.OpenAI(api_key="your-openai-key")
def get_personalized_response(user_id: str, user_message: str) -> str:
"""Generate a personalized AI response using stored memories"""
# Read user preferences
try:
preferences = memory_client.read_memory(
user_id=user_id,
scope="preferences",
domain=None,
purpose="generate personalized AI response"
)
prefs_context = f"User Preferences: {preferences.summary_struct}"
except PolicyDeniedError:
prefs_context = "No preferences available"
# Read communication preferences
try:
communication = memory_client.read_memory(
user_id=user_id,
scope="communication",
domain=None,
purpose="generate personalized AI response"
)
comm_context = f"Communication Style: {communication.summary_struct}"
except PolicyDeniedError:
comm_context = "Use default communication style"
# Build system prompt with context
system_prompt = f"""You are a helpful assistant.
{prefs_context}
{comm_context}
Adapt your responses to match the user's preferences and communication style."""
# Generate response
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message}
]
)
return response.choices[0].message.contentRecommendation System Integration
Build a recommendation system that uses preferences and constraints to filter and rank recommendations.
Recommendation System
from memory_scope import MemoryScopeClient
from memory_scope.exceptions import PolicyDeniedError
memory_client = MemoryScopeClient(api_key="your-api-key")
def get_recommendations(user_id: str, category: str, all_items: list):
"""Get personalized recommendations"""
# Read preferences
try:
preferences = memory_client.read_memory(
user_id=user_id,
scope="preferences",
domain=category,
purpose="generate recommendations"
)
likes = preferences.summary_struct.get("likes", [])
dislikes = preferences.summary_struct.get("dislikes", [])
except PolicyDeniedError:
likes = []
dislikes = []
# Read constraints
try:
constraints = memory_client.read_memory(
user_id=user_id,
scope="constraints",
domain=category,
purpose="filter recommendations"
)
rules = constraints.summary_struct.get("rules", [])
except PolicyDeniedError:
rules = []
# Filter and score items
recommendations = []
for item in all_items:
# Skip disliked items
if item in dislikes:
continue
# Check constraints
if violates_constraints(item, rules):
continue
# Score based on preferences
score = 0
if item in likes:
score += 10
# Add other scoring logic
recommendations.append((item, score))
# Sort by score
recommendations.sort(key=lambda x: x[1], reverse=True)
return [item for item, score in recommendations[:10]]Content Generation Integration
Generate personalized content that matches user communication preferences and accessibility requirements.
Content Generation
from memory_scope import MemoryScopeClient
import openai
memory_client = MemoryScopeClient(api_key="your-memory-api-key")
openai_client = openai.OpenAI(api_key="your-openai-key")
def generate_personalized_content(user_id: str, content_type: str, topic: str) -> str:
"""Generate content personalized to user's communication preferences"""
# Read communication preferences
try:
communication = memory_client.read_memory(
user_id=user_id,
scope="communication",
domain=None,
purpose="generate personalized content"
)
tone = communication.summary_struct.get("preferred_tone", "professional")
use_emojis = communication.summary_struct.get("use_emojis", False)
except PolicyDeniedError:
tone = "professional"
use_emojis = False
# Build style guide
style_guide = f"""
Tone: {tone}
Use emojis: {use_emojis}
"""
# Generate content
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": f"You are a content generator. Follow these style guidelines: {style_guide}"
},
{
"role": "user",
"content": f"Generate {content_type} about {topic}"
}
]
)
return response.choices[0].message.contentCustom Integration Pattern
Create your own integration pattern. This example shows a general pattern for using memories in any AI application.
Custom Integration Pattern
from memory_scope import MemoryScopeClient
from memory_scope.exceptions import PolicyDeniedError
memory_client = MemoryScopeClient(api_key="your-api-key")
def get_user_context(user_id: str, scopes: list, purpose: str) -> dict:
"""Get user context from multiple scopes"""
context = {}
for scope in scopes:
try:
result = memory_client.read_memory(
user_id=user_id,
scope=scope,
domain=None,
purpose=purpose
)
context[scope] = result.summary_struct
except PolicyDeniedError:
# Scope not accessible for this purpose - skip it
context[scope] = None
return context
# Use in your application
user_context = get_user_context(
user_id="user123",
scopes=["preferences", "constraints", "communication"],
purpose="generate personalized response"
)
# Use context in your AI model
# ... your AI logic here ...Integration Best Practices
- Always handle PolicyDeniedError gracefully
- Use appropriate purposes for each read operation
- Store revocation tokens securely
- Cache responses when appropriate
- Respect user privacy and provide revocation UI
Related Documentation