Overview
Build conversational AI systems that maintain context across turns, handle multi-turn dialogs, and integrate with various chat platforms. This example demonstrates conversation management, context tracking, and integration patterns for chatbots.Architecture
Copy
User Message → Context Retrieval → AI Response → Context Update → Reply
Complete Chatbot
Copy
name: conversational-chatbot
description: Contextual chatbot with memory
state:
schema:
conversationHistory: array
userContext: object
lastIntent: string
flow:
# Load conversation history
- member: load-history
type: Data
config:
storage: d1
operation: query
query: |
SELECT * FROM conversations
WHERE user_id = ?
ORDER BY created_at DESC
LIMIT 10
input:
params: [${input.userId}]
state:
set: [conversationHistory]
# Load user context
- member: load-user-context
type: Data
config:
storage: kv
operation: get
binding: CACHE
input:
key: "user:${input.userId}:context"
state:
set: [userContext]
# Classify intent
- member: classify-intent
type: Think
config:
provider: openai
model: gpt-4o-mini
temperature: 0.2
state:
use: [conversationHistory]
input:
prompt: |
Classify the intent of this user message.
Recent conversation:
${state.conversationHistory.slice(0, 5).map(m => `${m.role}: ${m.content}`).join('\n')}
Current message: ${input.message}
Intent categories: question, command, feedback, chitchat, other
state:
set: [lastIntent]
# Retrieve relevant context (RAG)
- member: search-knowledge
condition: ${classify-intent.output.intent === 'question'}
type: RAG
config:
vectorizeBinding: "VECTORIZE"
indexName: "knowledge-base"
operation: query
input:
query: ${input.message}
topK: 3
scoreThreshold: 0.7
# Generate response
- member: generate-response
type: Think
config:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.7
maxTokens: 500
systemPrompt: |
You are a helpful AI assistant.
Guidelines:
- Be conversational and friendly
- Reference previous messages when relevant
- Use retrieved knowledge when available
- Ask clarifying questions if needed
- Keep responses concise (2-3 sentences usually)
state:
use: [conversationHistory, userContext, lastIntent]
input:
prompt: |
User context:
${JSON.stringify(state.userContext, null, 2)}
Recent conversation:
${state.conversationHistory.slice(0, 5).map(m => `${m.role}: ${m.content}`).join('\n')}
${search-knowledge.success ? `Relevant knowledge:\n${search-knowledge.output.results.map(r => r.text).join('\n\n')}` : ''}
User: ${input.message}
Assistant:
# Validate response quality
- member: validate-response
type: Validate
scoring:
evaluator: validate
evaluatorConfig:
type: judge
model: gpt-4o-mini
criteria:
helpful: "Response is helpful and relevant"
safe: "Response is safe and appropriate"
coherent: "Response is coherent and well-structured"
thresholds:
minimum: 0.8
onFailure: retry
retryLimit: 2
# Save conversation turn
- member: save-turn
type: Data
config:
storage: d1
operation: query
query: |
INSERT INTO conversations (user_id, role, content, intent, created_at)
VALUES (?, 'user', ?, ?, CURRENT_TIMESTAMP),
(?, 'assistant', ?, NULL, CURRENT_TIMESTAMP)
input:
params:
- ${input.userId}
- ${input.message}
- ${classify-intent.output.intent}
- ${input.userId}
- ${generate-response.output.text}
# Update user context
- member: update-context
type: Function
state:
use: [userContext]
set: [userContext]
input:
currentContext: ${state.userContext}
newMessage: ${input.message}
intent: ${classify-intent.output.intent}
response: ${generate-response.output.text}
- member: save-context
type: Data
config:
storage: kv
operation: put
binding: CACHE
input:
key: "user:${input.userId}:context"
value: ${state.userContext}
expirationTtl: 86400 # 24 hours
output:
response: ${generate-response.output.text}
intent: ${classify-intent.output.intent}
knowledge_used: ${search-knowledge.success}
quality_score: ${validate-response.output.score}
Slack Integration
Copy
// Slack bot handler
import { Conductor } from '@ensemble-edge/conductor';
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const body = await request.json();
// Verify Slack signature
const signature = request.headers.get('X-Slack-Signature');
if (!verifySlackSignature(body, signature, env.SLACK_SIGNING_SECRET)) {
return new Response('Invalid signature', { status: 401 });
}
// Handle Slack events
if (body.type === 'url_verification') {
return Response.json({ challenge: body.challenge });
}
if (body.event.type === 'message') {
const conductor = new Conductor({ env });
// Process message
const result = await conductor.executeEnsemble('conversational-chatbot', {
userId: body.event.user,
message: body.event.text,
channel: body.event.channel
});
// Send response
await fetch('https://slack.com/api/chat.postMessage', {
method: 'POST',
headers: {
'Authorization': `Bearer ${env.SLACK_BOT_TOKEN}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
channel: body.event.channel,
text: result.output.response
})
});
}
return Response.json({ ok: true });
}
};
Discord Integration
Copy
// Discord bot handler
import { Conductor } from '@ensemble-edge/conductor';
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const body = await request.json();
// Handle Discord interactions
if (body.type === 1) { // PING
return Response.json({ type: 1 });
}
if (body.type === 2) { // APPLICATION_COMMAND
const conductor = new Conductor({ env });
const result = await conductor.executeEnsemble('conversational-chatbot', {
userId: body.member.user.id,
message: body.data.options[0].value,
channel: body.channel_id
});
return Response.json({
type: 4, // CHANNEL_MESSAGE_WITH_SOURCE
data: {
content: result.output.response
}
});
}
return Response.json({ ok: true });
}
};
WhatsApp Integration
Copy
// WhatsApp webhook handler
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const body = await request.json();
// Verify webhook
if (request.method === 'GET') {
const mode = new URL(request.url).searchParams.get('hub.mode');
if (mode === 'subscribe') {
return new Response(
new URL(request.url).searchParams.get('hub.challenge')
);
}
}
// Handle messages
const message = body.entry[0].changes[0].value.messages[0];
if (message.type === 'text') {
const conductor = new Conductor({ env });
const result = await conductor.executeEnsemble('conversational-chatbot', {
userId: message.from,
message: message.text.body
});
// Send response via WhatsApp Business API
await fetch(
`https://graph.facebook.com/v18.0/${env.WHATSAPP_PHONE_ID}/messages`,
{
method: 'POST',
headers: {
'Authorization': `Bearer ${env.WHATSAPP_TOKEN}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
messaging_product: 'whatsapp',
to: message.from,
text: { body: result.output.response }
})
}
);
}
return Response.json({ ok: true });
}
};
Advanced Features
Multi-Turn Context
Copy
- member: maintain-context
type: Function
input:
history: ${state.conversationHistory}
newTurn:
user: ${input.message}
assistant: ${generate-response.output.text}
maxTurns: 10
Intent-Based Routing
Copy
flow:
- member: classify-intent
- member: handle-question
condition: ${classify-intent.output.intent === 'question'}
type: Think
- member: handle-command
condition: ${classify-intent.output.intent === 'command'}
type: Function
- member: handle-feedback
condition: ${classify-intent.output.intent === 'feedback'}
type: Data
Conversation Analytics
Copy
- member: log-analytics
type: Data
config:
storage: d1
operation: query
query: |
INSERT INTO conversation_analytics
(user_id, intent, sentiment, response_time, timestamp)
VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)
input:
params:
- ${input.userId}
- ${classify-intent.output.intent}
- ${analyze-sentiment.output.sentiment}
- ${generate-response.duration}
Proactive Messaging
Copy
name: proactive-message
description: Send proactive message based on trigger
flow:
- member: check-trigger
type: Function
input:
trigger: ${input.trigger}
userId: ${input.userId}
- member: generate-message
condition: ${check-trigger.output.shouldSend}
type: Think
input:
context: ${check-trigger.output.context}
- member: send-message
condition: ${generate-message.success}
type: API
config:
url: "${env.MESSAGING_API}"
method: POST
Testing
Copy
import { describe, it, expect } from 'vitest';
import { TestConductor } from '@ensemble-edge/conductor/testing';
describe('conversational-chatbot', () => {
it('should handle user message', async () => {
const conductor = await TestConductor.create({
mocks: {
db: {
conversations: []
},
ai: {
responses: {
'classify-intent': { intent: 'question' },
'generate-response': { text: 'Here is my response.' }
}
}
}
});
const result = await conductor.executeEnsemble('conversational-chatbot', {
userId: '123',
message: 'What is Conductor?'
});
expect(result).toBeSuccessful();
expect(result.output.response).toBeDefined();
expect(result.output.intent).toBe('question');
});
it('should maintain conversation context', async () => {
const conductor = await TestConductor.create();
// First message
const result1 = await conductor.executeEnsemble('conversational-chatbot', {
userId: '123',
message: 'My name is Alice'
});
// Second message (should remember name)
const result2 = await conductor.executeEnsemble('conversational-chatbot', {
userId: '123',
message: 'What is my name?'
});
expect(result2.output.response).toContain('Alice');
});
});
Best Practices
- Maintain context - Store conversation history
- Classify intents - Route based on user intent
- Use RAG - Retrieve relevant knowledge
- Validate responses - Check quality before sending
- Handle errors gracefully - Friendly error messages
- Rate limit - Prevent abuse
- Monitor conversations - Track analytics
- Test thoroughly - Verify conversation flows

