- Change maxTokens to max_tokens for Mistral API compatibility - Extract maxTokens from options before spreading to avoid parameter conflicts - Tested successfully with live API calls
171 lines
4.8 KiB
JavaScript
171 lines
4.8 KiB
JavaScript
import dotenv from 'dotenv';
|
|
|
|
dotenv.config();
|
|
|
|
const MISTRAL_API_KEY = process.env.MISTRAL_API_KEY;
|
|
const MISTRAL_API_URL = 'https://api.mistral.ai/v1/chat/completions';
|
|
|
|
/**
|
|
* Agent role system prompts
|
|
*/
|
|
const AGENT_PROMPTS = {
|
|
architect: `You are a Software Architect AI. Your role is to:
|
|
- Design high-level system architecture
|
|
- Make technology stack decisions
|
|
- Define project structure and modules
|
|
- Consider scalability and maintainability
|
|
- Provide clear technical justifications
|
|
|
|
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`,
|
|
|
|
backend_engineer: `You are a Backend Engineer AI. Your role is to:
|
|
- Design API endpoints and data models
|
|
- Suggest backend technologies and frameworks
|
|
- Plan database schema
|
|
- Consider performance and security
|
|
- Provide implementation guidelines
|
|
|
|
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`,
|
|
|
|
frontend_engineer: `You are a Frontend Engineer AI. Your role is to:
|
|
- Design user interface structure
|
|
- Suggest frontend frameworks and libraries
|
|
- Plan component architecture
|
|
- Consider UX and performance
|
|
- Provide implementation guidelines
|
|
|
|
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`,
|
|
|
|
designer: `You are a UI/UX Designer AI. Your role is to:
|
|
- Design user experience flows
|
|
- Suggest UI patterns and layouts
|
|
- Consider accessibility and usability
|
|
- Provide visual design guidelines
|
|
- Think about user interactions
|
|
|
|
Output format: JSON with fields {proposal, justification, confidence (0-1), dependencies: []}`
|
|
};
|
|
|
|
/**
|
|
* Call Mistral AI API
|
|
*/
|
|
async function callMistralAPI(messages, options = {}) {
|
|
const { maxTokens, ...otherOptions } = options;
|
|
|
|
const response = await fetch(MISTRAL_API_URL, {
|
|
method: 'POST',
|
|
headers: {
|
|
'Content-Type': 'application/json',
|
|
'Authorization': `Bearer ${MISTRAL_API_KEY}`
|
|
},
|
|
body: JSON.stringify({
|
|
model: options.model || 'mistral-small-latest',
|
|
messages,
|
|
temperature: options.temperature || 0.7,
|
|
max_tokens: maxTokens || 2048,
|
|
...otherOptions
|
|
})
|
|
});
|
|
|
|
if (!response.ok) {
|
|
const error = await response.text();
|
|
throw new Error(`Mistral API error: ${error}`);
|
|
}
|
|
|
|
return await response.json();
|
|
}
|
|
|
|
/**
|
|
* Generate agent response for a debate
|
|
*/
|
|
export async function generateAgentResponse(agentRole, prompt, context = []) {
|
|
const systemPrompt = AGENT_PROMPTS[agentRole] || AGENT_PROMPTS.architect;
|
|
|
|
const messages = [
|
|
{ role: 'system', content: systemPrompt },
|
|
{ role: 'user', content: `Project prompt: ${prompt}` }
|
|
];
|
|
|
|
// Add context from previous responses
|
|
if (context.length > 0) {
|
|
const contextStr = context
|
|
.slice(-3) // Last 3 responses to avoid token bloat
|
|
.map(r => `${r.agent_role}: ${JSON.stringify(r.content)}`)
|
|
.join('\n');
|
|
|
|
messages.push({
|
|
role: 'user',
|
|
content: `Previous discussion:\n${contextStr}\n\nProvide your analysis and proposal.`
|
|
});
|
|
}
|
|
|
|
try {
|
|
const result = await callMistralAPI(messages, {
|
|
temperature: 0.7,
|
|
maxTokens: 2048
|
|
});
|
|
|
|
const content = result.choices[0].message.content;
|
|
|
|
// Try to parse as JSON
|
|
let parsedContent;
|
|
try {
|
|
// Extract JSON from markdown code blocks if present
|
|
const jsonMatch = content.match(/```(?:json)?\s*(\{[\s\S]*\})\s*```/) ||
|
|
content.match(/(\{[\s\S]*\})/);
|
|
|
|
if (jsonMatch) {
|
|
parsedContent = JSON.parse(jsonMatch[1]);
|
|
} else {
|
|
parsedContent = JSON.parse(content);
|
|
}
|
|
} catch (parseError) {
|
|
// If not valid JSON, create structured response
|
|
parsedContent = {
|
|
proposal: content,
|
|
justification: `Analysis from ${agentRole}`,
|
|
confidence: 0.7,
|
|
dependencies: []
|
|
};
|
|
}
|
|
|
|
// Ensure required fields
|
|
return {
|
|
proposal: parsedContent.proposal || content,
|
|
justification: parsedContent.justification || '',
|
|
confidence: parsedContent.confidence || 0.7,
|
|
dependencies: parsedContent.dependencies || [],
|
|
mermaid: parsedContent.mermaid || null
|
|
};
|
|
|
|
} catch (error) {
|
|
console.error(`Error generating response for ${agentRole}:`, error);
|
|
|
|
// Return mock response on error
|
|
return {
|
|
proposal: `Error generating response: ${error.message}`,
|
|
justification: 'Failed to get AI response',
|
|
confidence: 0.5,
|
|
dependencies: [],
|
|
error: true
|
|
};
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Generate responses from multiple agents in parallel
|
|
*/
|
|
export async function generateMultiAgentResponses(agents, prompt, context = []) {
|
|
const promises = agents.map(agent =>
|
|
generateAgentResponse(agent, prompt, context)
|
|
.then(response => ({ agent, response }))
|
|
);
|
|
|
|
return await Promise.all(promises);
|
|
}
|
|
|
|
export default {
|
|
generateAgentResponse,
|
|
generateMultiAgentResponses
|
|
};
|