Use Cases
- Content moderation (multiple checks)
- Document analysis (extract different aspects)
- Code review (security, performance, style)
- Market research (multiple sources)
- Sentiment analysis (different models/prompts)
Basic Pattern
YAML
Copy
ensemble: multi-agent-analysis
agents:
# All run in parallel
- name: agent-1
operation: think
config:
prompt: Analyze aspect 1 of: ${input.content}
- name: agent-2
operation: think
config:
prompt: Analyze aspect 2 of: ${input.content}
- name: agent-3
operation: think
config:
prompt: Analyze aspect 3 of: ${input.content}
# Aggregate results
- name: aggregate
operation: code
config:
script: scripts/aggregate-analysis-results
input:
aspect1: ${agent-1.output}
aspect2: ${agent-2.output}
aspect3: ${agent-3.output}
TypeScript
Copy
import { createEnsemble, step, parallel } from '@anthropic/conductor'
const multiAgentAnalysis = createEnsemble('multi-agent-analysis')
// All run in parallel using parallel() primitive
.addStep(
parallel('analyze-aspects')
.steps(
step('agent-1')
.operation('think')
.config({ prompt: 'Analyze aspect 1 of: ${input.content}' }),
step('agent-2')
.operation('think')
.config({ prompt: 'Analyze aspect 2 of: ${input.content}' }),
step('agent-3')
.operation('think')
.config({ prompt: 'Analyze aspect 3 of: ${input.content}' })
)
)
// Aggregate results
.addStep(
step('aggregate')
.operation('code')
.config({ script: 'scripts/aggregate-analysis-results' })
.input({
aspect1: '${agent-1.output}',
aspect2: '${agent-2.output}',
aspect3: '${agent-3.output}'
})
)
.build()
export default multiAgentAnalysis
Copy
// scripts/aggregate-analysis-results.ts
import type { AgentExecutionContext } from '@ensemble-edge/conductor'
export default function aggregateAnalysisResults(context: AgentExecutionContext) {
const { aspect1, aspect2, aspect3 } = context.input
return {
aspect1,
aspect2,
aspect3
}
}
Content Moderation
Copy
ensemble: moderate-content
inputs:
content:
type: string
required: true
agents:
# Run all checks in parallel
- name: check-explicit
operation: think
config:
provider: openai
model: gpt-4o-mini
temperature: 0
prompt: |
Check if this content contains explicit material.
Return JSON: { "explicit": boolean, "confidence": number, "reason": string }
Content: ${input.content}
- name: check-hate-speech
operation: think
config:
provider: openai
model: gpt-4o-mini
temperature: 0
prompt: |
Check if this content contains hate speech.
Return JSON: { "hate_speech": boolean, "confidence": number, "reason": string }
Content: ${input.content}
- name: check-spam
operation: think
config:
provider: openai
model: gpt-4o-mini
temperature: 0
prompt: |
Check if this content is spam.
Return JSON: { "spam": boolean, "confidence": number, "reason": string }
Content: ${input.content}
- name: check-violence
operation: think
config:
provider: openai
model: gpt-4o-mini
temperature: 0
prompt: |
Check if this content contains violence.
Return JSON: { "violence": boolean, "confidence": number, "reason": string }
Content: ${input.content}
# Aggregate all checks
- name: aggregate
operation: code
config:
script: scripts/aggregate-moderation-checks
input:
explicit_check: ${check-explicit.output}
hate_speech_check: ${check-hate-speech.output}
spam_check: ${check-spam.output}
violence_check: ${check-violence.output}
output:
moderation: ${aggregate.output}
Copy
// scripts/aggregate-moderation-checks.ts
import type { AgentExecutionContext } from '@ensemble-edge/conductor'
export default function aggregateModerationChecks(context: AgentExecutionContext) {
const { explicit_check, hate_speech_check, spam_check, violence_check } = context.input
const checks = {
explicit: JSON.parse(explicit_check),
hate_speech: JSON.parse(hate_speech_check),
spam: JSON.parse(spam_check),
violence: JSON.parse(violence_check)
}
const flags = Object.entries(checks)
.filter(([_, v]) => (v as any)[Object.keys(v as any)[0]])
.map(([k, v]) => ({ type: k, ...v as any }))
return {
safe: flags.length === 0,
flags,
checks
}
}
Code Review
Copy
ensemble: code-review
inputs:
code:
type: string
required: true
language:
type: string
required: true
agents:
# Parallel reviews
- name: security-review
operation: think
config:
provider: openai
model: gpt-4o
prompt: |
Review this ${input.language} code for security vulnerabilities:
- SQL injection
- XSS
- Authentication issues
- Data exposure
Code:
```${input.language}
${input.code}
```
Return JSON: {
"issues": [{ "severity": "high|medium|low", "issue": string, "line": number }],
"score": number
}
- name: performance-review
operation: think
config:
provider: openai
model: gpt-4o
prompt: |
Review this ${input.language} code for performance issues:
- Inefficient algorithms
- Memory leaks
- Unnecessary computations
- Database query optimization
Code:
```${input.language}
${input.code}
```
Return JSON: {
"issues": [{ "severity": "high|medium|low", "issue": string, "line": number }],
"score": number
}
- name: style-review
operation: think
config:
provider: openai
model: gpt-4o-mini
prompt: |
Review this ${input.language} code for style issues:
- Naming conventions
- Code organization
- Comments and documentation
- Best practices
Code:
```${input.language}
${input.code}
```
Return JSON: {
"issues": [{ "severity": "high|medium|low", "issue": string, "line": number }],
"score": number
}
- name: test-coverage-review
operation: think
config:
provider: openai
model: gpt-4o
prompt: |
Review this ${input.language} code for test coverage:
- Missing test cases
- Edge cases
- Error handling
Code:
```${input.language}
${input.code}
```
Return JSON: {
"missing_tests": [string],
"score": number
}
# Aggregate reviews
- name: aggregate
operation: code
config:
script: scripts/aggregate-code-reviews
input:
security_review: ${security-review.output}
performance_review: ${performance-review.output}
style_review: ${style-review.output}
test_coverage_review: ${test-coverage-review.output}
output:
review: ${aggregate.output}
Copy
// scripts/aggregate-code-reviews.ts
import type { AgentExecutionContext } from '@ensemble-edge/conductor'
export default function aggregateCodeReviews(context: AgentExecutionContext) {
const { security_review, performance_review, style_review, test_coverage_review } = context.input
const security = JSON.parse(security_review)
const performance = JSON.parse(performance_review)
const style = JSON.parse(style_review)
const tests = JSON.parse(test_coverage_review)
const allIssues = [
...security.issues.map((i: any) => ({ ...i, category: 'security' })),
...performance.issues.map((i: any) => ({ ...i, category: 'performance' })),
...style.issues.map((i: any) => ({ ...i, category: 'style' }))
]
const overallScore = Math.round(
(security.score + performance.score + style.score + tests.score) / 4
)
return {
score: overallScore,
issues: allIssues.sort((a: any, b: any) => {
const severityOrder: Record<string, number> = { high: 0, medium: 1, low: 2 }
return severityOrder[a.severity] - severityOrder[b.severity]
}),
security: security.score,
performance: performance.score,
style: style.score,
test_coverage: tests.score,
missing_tests: tests.missing_tests
}
}
Document Analysis
Copy
ensemble: analyze-document
agents:
# Extract different aspects in parallel
- name: extract-entities
operation: think
config:
prompt: |
Extract named entities (people, organizations, locations, dates).
Return JSON array.
Document: ${input.document}
- name: extract-topics
operation: think
config:
prompt: |
Extract main topics and themes.
Return JSON array of topics with confidence scores.
Document: ${input.document}
- name: sentiment-analysis
operation: think
config:
prompt: |
Analyze sentiment: positive, negative, neutral.
Include confidence score.
Return JSON.
Document: ${input.document}
- name: extract-key-points
operation: think
config:
prompt: |
Extract key points and action items.
Return JSON array.
Document: ${input.document}
- name: summarize
operation: think
config:
prompt: |
Provide a 2-3 sentence summary.
Document: ${input.document}
# Combine all analyses
- name: combine
operation: code
config:
script: scripts/combine-document-analyses
input:
summary: ${summarize.output}
entities: ${extract-entities.output}
topics: ${extract-topics.output}
sentiment: ${sentiment-analysis.output}
key_points: ${extract-key-points.output}
Copy
// scripts/combine-document-analyses.ts
import type { AgentExecutionContext } from '@ensemble-edge/conductor'
export default function combineDocumentAnalyses(context: AgentExecutionContext) {
const { summary, entities, topics, sentiment, key_points } = context.input
return {
summary,
entities: JSON.parse(entities),
topics: JSON.parse(topics),
sentiment: JSON.parse(sentiment),
key_points: JSON.parse(key_points)
}
}
Market Research
Copy
ensemble: market-research
agents:
# Scrape multiple sources in parallel
- name: scrape-competitor-1
operation: scrape
inputs:
url: ${input.competitor_1_url}
- name: scrape-competitor-2
operation: scrape
inputs:
url: ${input.competitor_2_url}
- name: scrape-competitor-3
operation: scrape
inputs:
url: ${input.competitor_3_url}
# Analyze each in parallel
- name: analyze-1
operation: think
config:
prompt: |
Analyze this competitor website:
- Pricing
- Features
- Positioning
- Target market
Content: ${scrape-competitor-1.output.text}
- name: analyze-2
operation: think
config:
prompt: |
Analyze this competitor website:
- Pricing
- Features
- Positioning
- Target market
Content: ${scrape-competitor-2.output.text}
- name: analyze-3
operation: think
config:
prompt: |
Analyze this competitor website:
- Pricing
- Features
- Positioning
- Target market
Content: ${scrape-competitor-3.output.text}
# Comparative analysis
- name: compare
operation: think
config:
prompt: |
Compare these three competitors:
Competitor 1: ${analyze-1.output}
Competitor 2: ${analyze-2.output}
Competitor 3: ${analyze-3.output}
Provide:
- Competitive advantages
- Market gaps
- Recommendations
Ensemble of Ensembles
Copy
ensemble: comprehensive-analysis
agents:
# Run multiple ensembles in parallel
- name: content-check
ensemble: moderate-content
inputs:
content: ${input.content}
- name: sentiment-check
ensemble: analyze-sentiment
inputs:
text: ${input.content}
- name: quality-check
ensemble: check-quality
inputs:
text: ${input.content}
# Meta-analysis
- name: meta-analyze
operation: think
config:
prompt: |
Based on these analyses, provide overall assessment:
Moderation: ${content-check.output}
Sentiment: ${sentiment-check.output}
Quality: ${quality-check.output}
Should we: approve, reject, or flag for review?
Best Practices
1. Parallel ExecutionCopy
# All these run simultaneously
agents:
- name: check-1
operation: think
- name: check-2
operation: think
- name: check-3
operation: think
# Aggregate waits for all
- name: aggregate
operation: code
Copy
agents:
- name: slow-agent
operation: think
timeout: 30000
- name: aggregate
condition: ${slow-agent.success || slow-agent.timeout}
operation: code
config:
script: scripts/aggregate-with-timeout
input:
agent_success: ${slow-agent.success}
agent_output: ${slow-agent.output}
Copy
// scripts/aggregate-with-timeout.ts
import type { AgentExecutionContext } from '@ensemble-edge/conductor'
export default function aggregateWithTimeout(context: AgentExecutionContext) {
const { agent_success, agent_output } = context.input
return {
result: agent_success ? agent_output : 'timeout'
}
}
Copy
agents:
- name: agent-1
operation: think
- name: aggregate
operation: code
config:
script: scripts/aggregate-successful-results
input:
agent1_success: ${agent-1.success}
agent1_output: ${agent-1.output}
agent2_success: ${agent-2.success}
agent2_output: ${agent-2.output}
agent3_success: ${agent-3.success}
agent3_output: ${agent-3.output}
Copy
// scripts/aggregate-successful-results.ts
import type { AgentExecutionContext } from '@ensemble-edge/conductor'
export default function aggregateSuccessfulResults(context: AgentExecutionContext) {
const {
agent1_success, agent1_output,
agent2_success, agent2_output,
agent3_success, agent3_output
} = context.input
const results = []
if (agent1_success) results.push(agent1_output)
if (agent2_success) results.push(agent2_output)
if (agent3_success) results.push(agent3_output)
return { results }
}
Copy
# Use cheaper models for non-critical checks
agents:
- name: critical-check
operation: think
config:
model: gpt-4o
- name: simple-check
operation: think
config:
model: gpt-4o-mini # Cheaper
Performance Tips
- Limit parallelism: Max 10 concurrent agents
- Use caching: Cache similar analyses
- Timeout appropriately: Don’t wait forever
- Fail gracefully: Partial results > no results
- Monitor costs: Parallel = multiple LLM calls

