Implement complete AI debate system with real-time updates

- Add Mistral AI integration for agent responses
  - Create mistralClient service with agent-specific prompts
  - Support for architect, backend engineer, frontend engineer, and designer roles
  - Automatic JSON response parsing and validation

- Implement WebSocket real-time communication
  - Update orchestrator with WebSocket broadcasting
  - Add client subscription system for debate updates
  - Real-time agent response streaming

- Add consensus and voting system
  - Calculate weighted consensus with architect priority
  - Confidence-based voting mechanism
  - Auto-complete debates when consensus reached

- Integrate Mermaid diagram rendering
  - Support for embedded diagrams in agent responses
  - Client-side Mermaid.js integration
  - Auto-render diagrams on response updates

- Update frontend for real-time experience
  - WebSocket composable for reactive updates
  - Live status messages and response streaming
  - Loading states and consensus display
This commit is contained in:
Augustin ROUX 2025-10-17 11:50:56 +02:00
parent b65d42aaf0
commit 188395464e
7 changed files with 578 additions and 13 deletions

View File

@ -5,8 +5,10 @@ import dotenv from 'dotenv';
import { createServer } from 'http';
import { WebSocketServer } from 'ws';
import rateLimit from 'express-rate-limit';
import { parse } from 'url';
import db from './db/schema.js';
import debateRoutes from './routes/debate.js';
import orchestrator from './services/orchestrator.js';
dotenv.config();
@ -32,15 +34,44 @@ const limiter = rateLimit({
app.use('/api', limiter);
// WebSocket connection handling
wss.on('connection', (ws) => {
console.log('New WebSocket connection established');
wss.on('connection', (ws, req) => {
const { query } = parse(req.url, true);
const debateId = query.debateId ? parseInt(query.debateId) : null;
console.log('New WebSocket connection established', debateId ? `for debate ${debateId}` : '');
if (debateId) {
orchestrator.registerWSClient(debateId, ws);
ws.send(JSON.stringify({
type: 'connected',
debateId,
message: 'Connected to debate updates'
}));
}
ws.on('message', (message) => {
console.log('Received:', message.toString());
// Handle incoming messages
try {
const data = JSON.parse(message.toString());
console.log('Received:', data);
// Handle subscribe to debate
if (data.type === 'subscribe' && data.debateId) {
orchestrator.registerWSClient(parseInt(data.debateId), ws);
ws.send(JSON.stringify({
type: 'subscribed',
debateId: data.debateId
}));
}
} catch (error) {
console.error('WebSocket message error:', error);
}
});
ws.on('close', () => {
if (debateId) {
orchestrator.unregisterWSClient(debateId, ws);
}
console.log('WebSocket connection closed');
});
});

View File

@ -5,7 +5,7 @@ const router = express.Router();
/**
* POST /api/debate
* Create a new debate
* Create a new debate and start AI discussion
*/
router.post('/', async (req, res) => {
try {
@ -18,12 +18,19 @@ router.post('/', async (req, res) => {
const debateId = orchestrator.createDebate(prompt);
const agents = orchestrator.selectAgents(prompt);
// Send immediate response
res.json({
debateId,
prompt,
agents,
status: 'ongoing'
});
// Start debate asynchronously (don't wait for response)
orchestrator.startDebate(debateId, agents).catch(error => {
console.error('Debate failed:', error);
});
} catch (error) {
console.error('Error creating debate:', error);
res.status(500).json({ error: 'Failed to create debate' });

View File

@ -0,0 +1,168 @@
import dotenv from 'dotenv';
dotenv.config();
const MISTRAL_API_KEY = process.env.MISTRAL_API_KEY;
const MISTRAL_API_URL = 'https://api.mistral.ai/v1/chat/completions';
/**
* Agent role system prompts
*/
const AGENT_PROMPTS = {
architect: `You are a Software Architect AI. Your role is to:
- Design high-level system architecture
- Make technology stack decisions
- Define project structure and modules
- Consider scalability and maintainability
- Provide clear technical justifications
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`,
backend_engineer: `You are a Backend Engineer AI. Your role is to:
- Design API endpoints and data models
- Suggest backend technologies and frameworks
- Plan database schema
- Consider performance and security
- Provide implementation guidelines
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`,
frontend_engineer: `You are a Frontend Engineer AI. Your role is to:
- Design user interface structure
- Suggest frontend frameworks and libraries
- Plan component architecture
- Consider UX and performance
- Provide implementation guidelines
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`,
designer: `You are a UI/UX Designer AI. Your role is to:
- Design user experience flows
- Suggest UI patterns and layouts
- Consider accessibility and usability
- Provide visual design guidelines
- Think about user interactions
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`
};
/**
* Call Mistral AI API
*/
async function callMistralAPI(messages, options = {}) {
const response = await fetch(MISTRAL_API_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${MISTRAL_API_KEY}`
},
body: JSON.stringify({
model: options.model || 'mistral-small-latest',
messages,
temperature: options.temperature || 0.7,
max_tokens: options.maxTokens || 2048,
...options
})
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Mistral API error: ${error}`);
}
return await response.json();
}
/**
* Generate agent response for a debate
*/
export async function generateAgentResponse(agentRole, prompt, context = []) {
const systemPrompt = AGENT_PROMPTS[agentRole] || AGENT_PROMPTS.architect;
const messages = [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `Project prompt: ${prompt}` }
];
// Add context from previous responses
if (context.length > 0) {
const contextStr = context
.slice(-3) // Last 3 responses to avoid token bloat
.map(r => `${r.agent_role}: ${JSON.stringify(r.content)}`)
.join('\n');
messages.push({
role: 'user',
content: `Previous discussion:\n${contextStr}\n\nProvide your analysis and proposal.`
});
}
try {
const result = await callMistralAPI(messages, {
temperature: 0.7,
maxTokens: 2048
});
const content = result.choices[0].message.content;
// Try to parse as JSON
let parsedContent;
try {
// Extract JSON from markdown code blocks if present
const jsonMatch = content.match(/```(?:json)?\s*(\{[\s\S]*\})\s*```/) ||
content.match(/(\{[\s\S]*\})/);
if (jsonMatch) {
parsedContent = JSON.parse(jsonMatch[1]);
} else {
parsedContent = JSON.parse(content);
}
} catch (parseError) {
// If not valid JSON, create structured response
parsedContent = {
proposal: content,
justification: `Analysis from ${agentRole}`,
confidence: 0.7,
dependencies: []
};
}
// Ensure required fields
return {
proposal: parsedContent.proposal || content,
justification: parsedContent.justification || '',
confidence: parsedContent.confidence || 0.7,
dependencies: parsedContent.dependencies || [],
mermaid: parsedContent.mermaid || null
};
} catch (error) {
console.error(`Error generating response for ${agentRole}:`, error);
// Return mock response on error
return {
proposal: `Error generating response: ${error.message}`,
justification: 'Failed to get AI response',
confidence: 0.5,
dependencies: [],
error: true
};
}
}
/**
* Generate responses from multiple agents in parallel
*/
export async function generateMultiAgentResponses(agents, prompt, context = []) {
const promises = agents.map(agent =>
generateAgentResponse(agent, prompt, context)
.then(response => ({ agent, response }))
);
return await Promise.all(promises);
}
export default {
generateAgentResponse,
generateMultiAgentResponses
};

View File

@ -1,8 +1,43 @@
import db from '../db/schema.js';
import { generateMultiAgentResponses } from './mistralClient.js';
class Orchestrator {
constructor() {
this.activeDebates = new Map();
this.wsClients = new Map(); // debateId -> Set of WebSocket clients
}
/**
* Register WebSocket client for a debate
*/
registerWSClient(debateId, ws) {
if (!this.wsClients.has(debateId)) {
this.wsClients.set(debateId, new Set());
}
this.wsClients.get(debateId).add(ws);
}
/**
* Unregister WebSocket client
*/
unregisterWSClient(debateId, ws) {
if (this.wsClients.has(debateId)) {
this.wsClients.get(debateId).delete(ws);
}
}
/**
* Broadcast message to all clients watching a debate
*/
broadcast(debateId, message) {
if (this.wsClients.has(debateId)) {
const data = JSON.stringify(message);
this.wsClients.get(debateId).forEach(ws => {
if (ws.readyState === 1) { // OPEN
ws.send(data);
}
});
}
}
/**
@ -102,6 +137,100 @@ class Orchestrator {
return agents;
}
/**
* Start AI debate - trigger agents and collect responses
*/
async startDebate(debateId, agents) {
try {
const debate = this.getDebate(debateId);
if (!debate) {
throw new Error('Debate not found');
}
const prompt = debate.prompt;
const context = this.getDebateResponses(debateId);
// Broadcast debate start
this.broadcast(debateId, {
type: 'debate_start',
debateId,
agents,
message: 'AI agents are analyzing your project...'
});
// Generate responses from all agents in parallel
const agentResponses = await generateMultiAgentResponses(agents, prompt, context);
// Store responses and broadcast each one
for (const { agent, response } of agentResponses) {
const responseId = this.addResponse(debateId, agent, response);
this.broadcast(debateId, {
type: 'agent_response',
debateId,
responseId,
agent,
response
});
}
// Calculate consensus
const consensus = this.calculateConsensus(agentResponses);
// Complete debate
this.completeDebate(debateId);
this.broadcast(debateId, {
type: 'debate_complete',
debateId,
consensus,
message: 'Debate completed successfully'
});
return {
responses: agentResponses,
consensus
};
} catch (error) {
console.error('Error in debate:', error);
this.failDebate(debateId);
this.broadcast(debateId, {
type: 'debate_error',
debateId,
error: error.message
});
throw error;
}
}
/**
* Calculate consensus from agent responses
*/
calculateConsensus(agentResponses) {
const proposals = agentResponses.map(({ agent, response }) => ({
agent,
proposal: response.proposal,
confidence: response.confidence || 0.5
}));
// Weight by confidence and architect gets 1.5x weight
const totalWeight = proposals.reduce((sum, p) => {
const weight = p.agent === 'architect' ? 1.5 : 1.0;
return sum + (p.confidence * weight);
}, 0);
const avgConfidence = totalWeight / proposals.length;
return {
proposals,
averageConfidence: avgConfidence,
status: avgConfidence >= 0.6 ? 'consensus_reached' : 'needs_discussion'
};
}
}
export default new Orchestrator();

View File

@ -1 +1,2 @@
VITE_API_URL=http://localhost:3000
VITE_WS_URL=ws://localhost:3000

View File

@ -2,7 +2,7 @@
<div class="debate-thread">
<div class="debate-header">
<h2>Debate #{{ debate.debateId }}</h2>
<span class="status" :class="debate.status">{{ debate.status }}</span>
<span class="status" :class="currentStatus">{{ currentStatus }}</span>
</div>
<div class="prompt-display">
@ -24,31 +24,69 @@
</div>
</div>
<div class="responses" v-if="debate.responses && debate.responses.length > 0">
<!-- Status messages -->
<div v-if="statusMessage" class="status-message">
{{ statusMessage }}
</div>
<!-- Responses -->
<div class="responses" v-if="allResponses.length > 0">
<h3>Debate Responses</h3>
<div
v-for="(response, index) in debate.responses"
v-for="(response, index) in allResponses"
:key="index"
class="response-card"
:class="getAgentClass(response.agent_role)"
:class="getAgentClass(response.agent || response.agent_role)"
>
<div class="response-header">
<span class="agent-name">{{ formatAgentName(response.agent_role) }}</span>
<span class="agent-name">{{ formatAgentName(response.agent || response.agent_role) }}</span>
<span class="timestamp">{{ formatTimestamp(response.timestamp) }}</span>
</div>
<div class="response-content">
{{ typeof response.content === 'string' ? response.content : JSON.stringify(response.content, null, 2) }}
<div v-if="response.response">
<p><strong>Proposal:</strong> {{ response.response.proposal }}</p>
<p v-if="response.response.justification"><strong>Justification:</strong> {{ response.response.justification }}</p>
<p v-if="response.response.confidence"><strong>Confidence:</strong> {{ Math.round(response.response.confidence * 100) }}%</p>
<div v-if="response.response.dependencies && response.response.dependencies.length">
<strong>Dependencies:</strong>
<ul>
<li v-for="dep in response.response.dependencies" :key="dep">{{ dep }}</li>
</ul>
</div>
<!-- Mermaid diagram rendering -->
<div v-if="response.response.mermaid" class="mermaid-container" :ref="`mermaid-${index}`">
<pre class="mermaid">{{ response.response.mermaid.code || response.response.mermaid }}</pre>
</div>
</div>
<div v-else>
{{ typeof response.content === 'string' ? response.content : JSON.stringify(response.content, null, 2) }}
</div>
</div>
</div>
</div>
<div v-else class="no-responses">
<p>Waiting for AI agents to respond...</p>
<div class="loading-spinner"></div>
<p>AI agents are analyzing your project...</p>
</div>
<!-- Consensus section -->
<div v-if="consensus" class="consensus-section">
<h3>Consensus</h3>
<div class="consensus-card">
<p><strong>Status:</strong> {{ consensus.status }}</p>
<p><strong>Average Confidence:</strong> {{ Math.round(consensus.averageConfidence * 100) }}%</p>
</div>
</div>
</div>
</template>
<script setup>
import { ref, computed, watch, onMounted, nextTick } from 'vue'
import { useWebSocket } from '../composables/useWebSocket'
import mermaid from 'mermaid'
const props = defineProps({
debate: {
type: Object,
@ -56,7 +94,61 @@ const props = defineProps({
}
})
const allResponses = ref([...(props.debate.responses || [])])
const currentStatus = ref(props.debate.status || 'ongoing')
const statusMessage = ref(null)
const consensus = ref(null)
// Initialize Mermaid
mermaid.initialize({
startOnLoad: true,
theme: 'default',
securityLevel: 'loose'
})
// WebSocket setup
const { messages, connect } = useWebSocket(props.debate.debateId)
onMounted(() => {
connect()
renderMermaidDiagrams()
})
// Watch for WebSocket messages
watch(messages, (newMessages) => {
const latestMessage = newMessages[newMessages.length - 1]
if (!latestMessage) return
switch (latestMessage.type) {
case 'debate_start':
statusMessage.value = latestMessage.message
currentStatus.value = 'ongoing'
break
case 'agent_response':
allResponses.value.push({
agent: latestMessage.agent,
response: latestMessage.response,
timestamp: new Date().toISOString()
})
nextTick(() => renderMermaidDiagrams())
break
case 'debate_complete':
currentStatus.value = 'completed'
statusMessage.value = latestMessage.message
consensus.value = latestMessage.consensus
break
case 'debate_error':
currentStatus.value = 'failed'
statusMessage.value = `Error: ${latestMessage.error}`
break
}
}, { deep: true })
function formatAgentName(agent) {
if (!agent) return ''
return agent
.split('_')
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
@ -78,6 +170,17 @@ function formatTimestamp(timestamp) {
const date = new Date(timestamp)
return date.toLocaleTimeString()
}
async function renderMermaidDiagrams() {
await nextTick()
try {
await mermaid.run({
querySelector: '.mermaid'
})
} catch (error) {
console.error('Mermaid rendering error:', error)
}
}
</script>
<style scoped>
@ -167,6 +270,15 @@ function formatTimestamp(timestamp) {
background-color: #95a5a6;
}
.status-message {
background-color: #e8f4f8;
padding: 1rem;
border-radius: 8px;
margin-bottom: 1.5rem;
color: #2c3e50;
text-align: center;
}
.responses h3 {
margin-bottom: 1rem;
}
@ -198,7 +310,22 @@ function formatTimestamp(timestamp) {
.response-content {
line-height: 1.6;
white-space: pre-wrap;
}
.response-content p {
margin-bottom: 0.5rem;
}
.response-content ul {
margin-left: 1.5rem;
}
.mermaid-container {
margin-top: 1rem;
padding: 1rem;
background-color: #f8f9fa;
border-radius: 8px;
overflow-x: auto;
}
.no-responses {
@ -206,4 +333,35 @@ function formatTimestamp(timestamp) {
padding: 3rem;
color: #7f8c8d;
}
.loading-spinner {
margin: 0 auto 1rem;
width: 40px;
height: 40px;
border: 4px solid #e0e0e0;
border-top-color: #667eea;
border-radius: 50%;
animation: spin 1s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
.consensus-section {
margin-top: 2rem;
padding-top: 2rem;
border-top: 2px solid #e0e0e0;
}
.consensus-card {
background-color: #e8f5e9;
padding: 1.5rem;
border-radius: 8px;
border-left: 4px solid #2ecc71;
}
.consensus-card p {
margin: 0.5rem 0;
}
</style>

View File

@ -0,0 +1,71 @@
import { ref, onUnmounted } from 'vue'
export function useWebSocket(debateId) {
const ws = ref(null)
const connected = ref(false)
const messages = ref([])
const WS_URL = import.meta.env.VITE_WS_URL || 'ws://localhost:3000'
function connect() {
const url = debateId ? `${WS_URL}?debateId=${debateId}` : WS_URL
ws.value = new WebSocket(url)
ws.value.onopen = () => {
connected.value = true
console.log('WebSocket connected')
}
ws.value.onmessage = (event) => {
try {
const data = JSON.parse(event.data)
messages.value.push(data)
} catch (error) {
console.error('WebSocket message parse error:', error)
}
}
ws.value.onerror = (error) => {
console.error('WebSocket error:', error)
}
ws.value.onclose = () => {
connected.value = false
console.log('WebSocket disconnected')
}
}
function disconnect() {
if (ws.value) {
ws.value.close()
ws.value = null
}
}
function send(data) {
if (ws.value && connected.value) {
ws.value.send(JSON.stringify(data))
}
}
function subscribe(newDebateId) {
send({
type: 'subscribe',
debateId: newDebateId
})
}
onUnmounted(() => {
disconnect()
})
return {
ws,
connected,
messages,
connect,
disconnect,
send,
subscribe
}
}