From f9c4cf11ff5e09b69cdcb8e26cdfec305bb0db48 Mon Sep 17 00:00:00 2001 From: Augustin Date: Thu, 23 Apr 2026 21:50:06 +0200 Subject: [PATCH] feat(shell): dedicated System Analyst AI, no code execution, analyze system MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - New ShellConvStore with persistent history (shell_conversation.json) - 100k token limit — input grays out, must /clear to continue - Commands limited to /clear and /help only - Shell AI has NO tools — read-only analysis, never executes code - "Analyste Système" panel with system analysis button - System analysis uses Studio AI to write system_analysis.md, prepended as context on every conversation start - Code blocks show "Copier" and "Terminal" buttons to copy or send code directly to the active terminal via WebSocket - Token bar shows usage with warning at 80% 💘 Generated with Crush Assisted-by: GLM-5.1 via Crush --- internal/api/handlers_shell_chat.go | 313 +++++++++++++++++----------- internal/api/server.go | 5 + internal/api/shell_conversation.go | 121 +++++++++++ web/src/api/client.js | 24 +-- web/src/components/Config.jsx | 94 +++++++-- web/src/components/Shell.jsx | 202 ++++++++++++++---- web/src/components/Studio.jsx | 24 ++- web/src/i18n/fr.js | 4 +- web/src/styles/global.css | 68 +++++- 9 files changed, 634 insertions(+), 221 deletions(-) create mode 100644 internal/api/shell_conversation.go diff --git a/internal/api/handlers_shell_chat.go b/internal/api/handlers_shell_chat.go index e3c633a..2ac14b3 100644 --- a/internal/api/handlers_shell_chat.go +++ b/internal/api/handlers_shell_chat.go @@ -1,53 +1,24 @@ package api import ( - "context" "encoding/json" + "fmt" "net/http" + "os" + "os/exec" + "runtime" "strings" + "github.com/muyue/muyue/internal/agent" "github.com/muyue/muyue/internal/orchestrator" ) -const maxShellToolIterations = 10 - type ShellChatRequest struct { - Message string `json:"message"` - Context string `json:"context,omitempty"` - History []string `json:"history,omitempty"` - Cwd string `json:"cwd,omitempty"` - Platform string `json:"platform,omitempty"` - Stream bool `json:"stream"` -} - -type ShellChatResponse struct { - Content string `json:"content,omitempty"` - ToolCalls []ToolCallInfo `json:"tool_calls,omitempty"` - Error string `json:"error,omitempty"` -} - -type ToolCallInfo struct { - ID string `json:"id"` - Name string `json:"name"` - Args map[string]interface{} `json:"args"` - Result *toolResponseData `json:"result,omitempty"` - Error string `json:"error,omitempty"` -} - -func toString(v interface{}) string { - if v == nil { - return "" - } - s, _ := v.(string) - return s -} - -func toBool(v interface{}) bool { - if v == nil { - return false - } - b, _ := v.(bool) - return b + Message string `json:"message"` + Context string `json:"context,omitempty"` + Cwd string `json:"cwd,omitempty"` + Platform string `json:"platform,omitempty"` + Stream bool `json:"stream"` } func (s *Server) handleShellChat(w http.ResponseWriter, r *http.Request) { @@ -56,6 +27,11 @@ func (s *Server) handleShellChat(w http.ResponseWriter, r *http.Request) { return } + if s.shellConvStore.AtLimit() { + writeError(w, "context limit reached, use /clear", http.StatusBadRequest) + return + } + var req ShellChatRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, err.Error(), http.StatusBadRequest) @@ -67,142 +43,237 @@ func (s *Server) handleShellChat(w http.ResponseWriter, r *http.Request) { return } + s.shellConvStore.Add("user", req.Message) + orb, err := orchestrator.New(s.config) if err != nil { writeError(w, err.Error(), http.StatusServiceUnavailable) return } - orb.SetSystemPrompt(s.buildShellSystemPrompt(req)) - orb.SetTools(s.agentToolsJSON) + orb.SetSystemPrompt(s.buildShellSystemPromptV2(req)) if req.Stream { - s.handleShellChatStream(w, orb, req) + s.handleShellChatStreamV2(w, orb) } else { - s.handleShellChatNonStream(w, orb, req) + s.handleShellChatNonStreamV2(w, orb) } } -func (s *Server) buildShellSystemPrompt(req ShellChatRequest) string { +func (s *Server) buildShellSystemPromptV2(_ ShellChatRequest) string { var sb strings.Builder - sb.WriteString(`Tu es l'assistant Shell de Muyue. Tu as accès à un terminal et peux aider l'utilisateur avec: -- Exécuter des commandes shell -- Expliquer des erreurs de commandes -- Suggérer des commandes appropriées pour la tâche demandée -- Lire et explorer des fichiers -- Configurer l'environnement de développement + sb.WriteString(`Tu es l'Analyste Système de Muyue. Tu es un expert en administration système et développement. +Tu aides l'utilisateur à comprendre son système, diagnostiquer des problèmes, et optimiser son environnement. -Tu peux appeler des outils pour exécuter des commandes, lire des fichiers, etc. Sois précis et concis dans tes réponses. +RÈGLES STRICTES: +- Tu ne peux JAMAIS exécuter de commande ou de code +- Tu ne peux que analyser, expliquer, et proposer des solutions +- Quand tu proposes du code ou des commandes, mets-les dans des blocs de code markdown avec le langage spécifié +- L'utilisateur pourra les copier ou les envoyer directement au terminal depuis les boutons `) - if req.Cwd != "" { - sb.WriteString("Répertoire courant: " + req.Cwd + "\n") + analysis := LoadSystemAnalysis() + if analysis != "" { + sb.WriteString("=== ANALYSE SYSTÈME ACTUELLE ===\n") + sb.WriteString(analysis) + sb.WriteString("\n=== FIN DE L'ANALYSE ===\n\n") } - if req.Platform != "" { - sb.WriteString("Plateforme: " + req.Platform + "\n") - } - if req.Context != "" { - sb.WriteString("\nContexte du terminal:\n" + req.Context + "\n") - } - if len(req.History) > 0 { - sb.WriteString("\nDernières commandes exécutées:\n") - for _, h := range req.History { - sb.WriteString(" " + h + "\n") - } + + sb.WriteString(fmt.Sprintf("OS: %s/%s\n", runtime.GOOS, runtime.GOARCH)) + if hostname, err := os.Hostname(); err == nil { + sb.WriteString("Hostname: " + hostname + "\n") } return sb.String() } -func (s *Server) handleShellChatStream(w http.ResponseWriter, orb *orchestrator.Orchestrator, req ShellChatRequest) { +func (s *Server) handleShellChatStreamV2(w http.ResponseWriter, orb *orchestrator.Orchestrator) { SetupSSEHeaders(w) flusher, canFlush := w.(http.Flusher) sseWriter := NewSSEWriter(w) - ctx := context.Background() - messages := []orchestrator.Message{ - {Role: "user", Content: req.Message}, + // Rebuild history into orchestrator + history := s.shellConvStore.Get() + for _, m := range history[:len(history)-1] { // all except last user msg + if m.Role == "system" { + continue + } + // Pre-load orchestrator history + orb.AppendHistory(orchestrator.Message{Role: m.Role, Content: m.Content}) } - engine := NewChatEngine(orb, s.agentRegistry, s.agentToolsJSON) + lastUserMsg := history[len(history)-1].Content - var toolCalls []ToolCallInfo - engine.OnChunk(func(data map[string]interface{}) { - if data == nil { - return - } - sseWriter.Write(data) + var finalContent string + result, err := orb.SendStream(lastUserMsg, func(chunk string) { + finalContent = chunk + sseWriter.Write(map[string]interface{}{"content": chunk}) if canFlush { flusher.Flush() } - if tc, ok := data["tool_call"].(map[string]interface{}); ok { - argsMap := make(map[string]interface{}) - if args, ok := tc["args"].(string); ok { - json.Unmarshal([]byte(args), &argsMap) - } - toolCalls = append(toolCalls, ToolCallInfo{ - ID: toString(tc["tool_call_id"]), - Name: toString(tc["name"]), - Args: argsMap, - }) - } - if tr, ok := data["tool_result"].(map[string]interface{}); ok { - tcID := toString(tr["tool_call_id"]) - for i := range toolCalls { - if toolCalls[i].ID == tcID { - if err, ok := tr["is_error"].(bool); ok && err { - toolCalls[i].Error = toString(tr["content"]) - } else { - toolCalls[i].Result = &toolResponseData{ - Content: toString(tr["content"]), - IsError: toBool(tr["is_error"]), - } - } - break - } - } - } }) - finalContent, _, _, err := engine.RunWithTools(ctx, messages) if err != nil { sseWriter.Write(map[string]interface{}{"error": err.Error()}) return } - if finalContent == "" && len(toolCalls) > 0 { - finalContent = "(opérations terminées)" + content := result + if content == "" { + content = finalContent } - writeJSONResp, _ := json.Marshal(ShellChatResponse{ - Content: finalContent, - ToolCalls: toolCalls, + s.shellConvStore.Add("assistant", cleanThinkingTags(content)) + + sseWriter.Write(map[string]interface{}{ + "done": "true", + "tokens": s.shellConvStore.ApproxTokens(), }) - sseWriter.Write(map[string]interface{}{"done": true, "response": string(writeJSONResp)}) } -func (s *Server) handleShellChatNonStream(w http.ResponseWriter, orb *orchestrator.Orchestrator, req ShellChatRequest) { - ctx := context.Background() - messages := []orchestrator.Message{ - {Role: "user", Content: req.Message}, +func (s *Server) handleShellChatNonStreamV2(w http.ResponseWriter, orb *orchestrator.Orchestrator) { + history := s.shellConvStore.Get() + for _, m := range history[:len(history)-1] { + if m.Role == "system" { + continue + } + orb.AppendHistory(orchestrator.Message{Role: m.Role, Content: m.Content}) } - engine := NewChatEngine(orb, s.agentRegistry, s.agentToolsJSON) + lastUserMsg := history[len(history)-1].Content - finalContent, err := engine.RunNonStream(ctx, messages) + result, err := orb.Send(lastUserMsg) if err != nil { writeError(w, err.Error(), http.StatusInternalServerError) return } - if finalContent == "" { - finalContent = "(tool calls completed, no text response)" + s.shellConvStore.Add("assistant", cleanThinkingTags(result)) + writeJSON(w, map[string]interface{}{ + "content": result, + "tokens": s.shellConvStore.ApproxTokens(), + }) +} + +func (s *Server) handleShellChatHistory(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + writeError(w, "GET only", http.StatusMethodNotAllowed) + return + } + messages := s.shellConvStore.Get() + writeJSON(w, map[string]interface{}{ + "messages": messages, + "tokens": s.shellConvStore.ApproxTokens(), + "max_tokens": shellMaxTokens, + "at_limit": s.shellConvStore.AtLimit(), + }) +} + +func (s *Server) handleShellChatClear(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + writeError(w, "POST only", http.StatusMethodNotAllowed) + return + } + s.shellConvStore.Clear() + writeJSON(w, map[string]interface{}{ + "status": "ok", + "tokens": 0, + }) +} + +func (s *Server) handleShellAnalyze(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + writeError(w, "POST only", http.StatusMethodNotAllowed) + return } - writeJSON(w, ShellChatResponse{ - Content: finalContent, - ToolCalls: nil, + var sysInfo strings.Builder + sysInfo.WriteString("=== INFORMATIONS SYSTÈME ===\n") + sysInfo.WriteString(fmt.Sprintf("OS: %s/%s\n", runtime.GOOS, runtime.GOARCH)) + if hostname, err := os.Hostname(); err == nil { + sysInfo.WriteString("Hostname: " + hostname + "\n") + } + if user := os.Getenv("USER"); user != "" { + sysInfo.WriteString("User: " + user + "\n") + } + + if data, err := os.ReadFile("/proc/cpuinfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + if strings.HasPrefix(line, "model name") { + sysInfo.WriteString("CPU: " + strings.SplitN(line, ":", 2)[1] + "\n") + break + } + } + } + + if data, err := os.ReadFile("/proc/meminfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + if strings.HasPrefix(line, "MemTotal:") || strings.HasPrefix(line, "MemAvailable:") { + sysInfo.WriteString(strings.TrimSpace(line) + "\n") + } + } + } + + if out, err := exec.Command("df", "-h", "/").Output(); err == nil { + lines := strings.Split(string(out), "\n") + if len(lines) >= 2 { + sysInfo.WriteString("Disk: " + strings.TrimSpace(lines[1]) + "\n") + } + } + + if out, err := exec.Command("ps", "aux", "--sort=-pcpu").Output(); err == nil { + lines := strings.Split(string(out), "\n") + sysInfo.WriteString(fmt.Sprintf("\nProcessus actifs (%d total):\n", len(lines)-1)) + for i := 1; i < len(lines) && i <= 10; i++ { + fields := strings.Fields(lines[i]) + if len(fields) >= 11 { + sysInfo.WriteString(fmt.Sprintf(" %-20s CPU:%-6s MEM:%-6s %s\n", fields[10], fields[2]+"%", fields[3]+"%", fields[0])) + } + } + } + + if s.scanResult != nil { + sysInfo.WriteString("\nOutils installés:\n") + for _, t := range s.scanResult.Tools { + status := "✗" + if t.Installed { + status = "✓" + } + sysInfo.WriteString(fmt.Sprintf(" %s %s %s\n", status, t.Name, t.Version)) + } + } + + orb, err := orchestrator.New(s.config) + if err != nil { + writeError(w, err.Error(), http.StatusServiceUnavailable) + return + } + orb.SetSystemPrompt(agent.StudioSystemPrompt()) + + analysisPrompt := `Tu es un expert en administration système. Analyse les informations suivantes sur le système de l'utilisateur. +Génère un rapport d'analyse concis et structuré en markdown qui inclut: +1. Un résumé de l'état du système +2. Les points d'attention (performance, sécurité, configuration) +3. Des recommandations spécifiques d'optimisation +4. Les outils manquants qui pourraient être utiles +5. L'état du réseau et des connexions + +Sois concret et technique. Le rapport sera utilisé comme contexte pour un assistant terminal. + +` + sysInfo.String() + + result, err := orb.Send(analysisPrompt) + if err != nil { + writeError(w, "analysis failed: "+err.Error(), http.StatusInternalServerError) + return + } + + SaveSystemAnalysis(result) + + writeJSON(w, map[string]interface{}{ + "status": "ok", + "analysis": result, }) -} \ No newline at end of file +} diff --git a/internal/api/server.go b/internal/api/server.go index 2e6f9fb..3ec9b2e 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -17,6 +17,7 @@ type Server struct { scanResult *scanner.ScanResult mux *http.ServeMux convStore *ConversationStore + shellConvStore *ShellConvStore agentRegistry *agent.Registry agentToolsJSON json.RawMessage workflowEngine *workflow.Engine @@ -46,6 +47,7 @@ func NewServer(cfg *config.MuyueConfig) *Server { s.config = cfg s.scanResult = scanner.ScanSystem() s.convStore = NewConversationStore() + s.shellConvStore = NewShellConvStore() s.agentRegistry = agent.DefaultRegistry() tools := s.agentRegistry.OpenAITools() toolsJSON, _ := json.Marshal(tools) @@ -89,6 +91,9 @@ func (s *Server) routes() { s.mux.HandleFunc("/api/tool/call", s.handleToolCall) s.mux.HandleFunc("/api/tools/list", s.handleToolList) s.mux.HandleFunc("/api/shell/chat", s.handleShellChat) + s.mux.HandleFunc("/api/shell/chat/history", s.handleShellChatHistory) + s.mux.HandleFunc("/api/shell/chat/clear", s.handleShellChatClear) + s.mux.HandleFunc("/api/shell/analyze", s.handleShellAnalyze) s.mux.HandleFunc("/api/workflow", s.handleWorkflowCreate) s.mux.HandleFunc("/api/workflow/list", s.handleWorkflowList) s.mux.HandleFunc("/api/workflow/", s.handleWorkflowGet) diff --git a/internal/api/shell_conversation.go b/internal/api/shell_conversation.go new file mode 100644 index 0000000..657cfee --- /dev/null +++ b/internal/api/shell_conversation.go @@ -0,0 +1,121 @@ +package api + +import ( + "encoding/json" + "os" + "path/filepath" + "sync" + "time" + "unicode/utf8" + + "github.com/muyue/muyue/internal/config" +) + +const shellMaxTokens = 100000 +const shellCharsPerToken = 4 + +type ShellMessage struct { + ID string `json:"id"` + Role string `json:"role"` + Content string `json:"content"` + Time string `json:"time"` +} + +type ShellConvStore struct { + mu sync.RWMutex + path string + msgs []ShellMessage +} + +func NewShellConvStore() *ShellConvStore { + dir, err := config.ConfigDir() + if err != nil { + dir = "/tmp/muyue" + } + path := filepath.Join(dir, "shell_conversation.json") + s := &ShellConvStore{path: path} + s.load() + return s +} + +func (s *ShellConvStore) load() { + data, err := os.ReadFile(s.path) + if err != nil { + s.msgs = []ShellMessage{} + return + } + json.Unmarshal(data, &s.msgs) + if s.msgs == nil { + s.msgs = []ShellMessage{} + } +} + +func (s *ShellConvStore) save() { + data, _ := json.MarshalIndent(s.msgs, "", " ") + os.MkdirAll(filepath.Dir(s.path), 0755) + os.WriteFile(s.path, data, 0600) +} + +func (s *ShellConvStore) Get() []ShellMessage { + s.mu.RLock() + defer s.mu.RUnlock() + out := make([]ShellMessage, len(s.msgs)) + copy(out, s.msgs) + return out +} + +func (s *ShellConvStore) Add(role, content string) ShellMessage { + s.mu.Lock() + defer s.mu.Unlock() + msg := ShellMessage{ + ID: time.Now().Format("20060102150405.000"), + Role: role, + Content: content, + Time: time.Now().Format(time.RFC3339), + } + s.msgs = append(s.msgs, msg) + s.save() + return msg +} + +func (s *ShellConvStore) Clear() { + s.mu.Lock() + defer s.mu.Unlock() + s.msgs = []ShellMessage{} + s.save() +} + +func (s *ShellConvStore) ApproxTokens() int { + s.mu.RLock() + defer s.mu.RUnlock() + total := 0 + for _, m := range s.msgs { + total += utf8.RuneCountInString(m.Content) / shellCharsPerToken + } + return total +} + +func (s *ShellConvStore) AtLimit() bool { + return s.ApproxTokens() >= shellMaxTokens +} + +func LoadSystemAnalysis() string { + dir, err := config.ConfigDir() + if err != nil { + return "" + } + data, err := os.ReadFile(filepath.Join(dir, "system_analysis.md")) + if err != nil { + return "" + } + return string(data) +} + +func SaveSystemAnalysis(content string) error { + dir, err := config.ConfigDir() + if err != nil { + return err + } + os.MkdirAll(dir, 0755) + return os.WriteFile(filepath.Join(dir, "system_analysis.md"), []byte(content), 0644) +} diff --git a/web/src/api/client.js b/web/src/api/client.js index 6536001..4bd9be6 100644 --- a/web/src/api/client.js +++ b/web/src/api/client.js @@ -57,6 +57,9 @@ const api = { getChatHistory: () => request('/chat/history'), clearChat: () => request('/chat/clear', { method: 'POST' }), summarizeChat: () => request('/chat/summarize', { method: 'POST' }), + getShellChatHistory: () => request('/shell/chat/history'), + clearShellChat: () => request('/shell/chat/clear', { method: 'POST' }), + analyzeSystem: () => request('/shell/analyze', { method: 'POST' }), sendChat: (message, stream = true, onChunk, signal) => { if (!stream) { return request('/chat', { method: 'POST', body: JSON.stringify({ message, stream: false }) }) @@ -104,8 +107,6 @@ const api = { sendShellChat: (message, context = {}, stream = true, onChunk) => { const payload = { message, - context: context.context || '', - history: context.history || [], cwd: context.cwd || '', platform: context.platform || '', stream, @@ -127,7 +128,6 @@ const api = { const reader = res.body.getReader() const decoder = new TextDecoder() let full = '' - let toolCalls = [] while (true) { const { done, value } = await reader.read() if (done) break @@ -137,27 +137,15 @@ const api = { try { const data = JSON.parse(line.slice(6)) if (data.error) { reject(new Error(data.error)); return } - if (data.done) { - resolve({ content: full, tool_calls: toolCalls }) - return - } + if (data.done) { resolve({ content: full, tokens: data.tokens }); return } if (data.content) { - full += data.content + full = data.content if (onChunk) onChunk(full, data) - } else if (data.tool_call) { - toolCalls.push(data.tool_call) - if (onChunk) onChunk(full, data, toolCalls) - } else if (data.tool_result) { - const idx = toolCalls.findIndex(tc => tc.tool_call_id === data.tool_result.id) - if (idx >= 0) { - toolCalls[idx].result = data.tool_result - } - if (onChunk) onChunk(full, data, toolCalls) } } catch {} } } - resolve({ content: full, tool_calls: toolCalls }) + resolve({ content: full }) }).catch(reject) }) }, diff --git a/web/src/components/Config.jsx b/web/src/components/Config.jsx index 9f4eb94..1e711e4 100644 --- a/web/src/components/Config.jsx +++ b/web/src/components/Config.jsx @@ -564,30 +564,82 @@ function PanelLocale({ language, keyboard, layouts, api, t }) { } function PanelSkills({ skillList, t }) { + const [selected, setSelected] = useState(null) + + if (skillList.length === 0) { + return
{t('config.noSkills')}
+ } + return ( -
- {skillList.length === 0 ? ( -
- {t('config.noSkills')} - {t('config.runSkillsInit')} -
- ) : ( - skillList.map((s, i) => ( -
- {s.name} - {s.target || 'both'} - {s.version && {s.version}} - {s.category && {s.category}} - {s.description} - {s.dependencies && s.dependencies.length > 0 && ( -
- deps: {s.dependencies.map(d => d.name).join(', ')} -
- )} + <> +
+ {skillList.map((s, i) => ( +
setSelected(s)}> +
{s.name}
+
{s.description}
+
+ {s.target && {s.target}} + {s.version && {s.version}} + {s.category && {s.category}} +
- )) + ))} +
+ {selected && ( +
setSelected(null)}> +
e.stopPropagation()}> +
+ {selected.name} + +
+
+
+
Description
+
{selected.description}
+
+
+
Métadonnées
+
+ {selected.target && {selected.target}} + {selected.version && {selected.version}} + {selected.category && {selected.category}} + {selected.author && {selected.author}} + {selected.languages && selected.languages.map(l => {l})} +
+
+ {selected.tags && selected.tags.length > 0 && ( +
+
Tags
+
+ {selected.tags.map(tag => {tag})} +
+
+ )} + {selected.content && ( +
+
Contenu
+
{selected.content}
+
+ )} + {selected.dependencies && selected.dependencies.length > 0 && ( +
+
Dépendances
+
+ {selected.dependencies.map((d, i) => ( +
+ {d.type} + {d.name} + {d.required === false && optionnel} +
+ ))} +
+
+ )} +
+
+
)} -
+ ) } diff --git a/web/src/components/Shell.jsx b/web/src/components/Shell.jsx index df1beef..230f573 100644 --- a/web/src/components/Shell.jsx +++ b/web/src/components/Shell.jsx @@ -2,11 +2,12 @@ import { useState, useRef, useEffect, useCallback } from 'react' import { Terminal as XTerm } from '@xterm/xterm' import { FitAddon } from '@xterm/addon-fit' import { WebLinksAddon } from '@xterm/addon-web-links' -import { Plus, X, Monitor, Globe, ChevronDown, Pencil, Trash2 } from 'lucide-react' +import { Plus, X, Monitor, Globe, ChevronDown, Pencil, Trash2, Search, Copy, Send } from 'lucide-react' import '@xterm/xterm/css/xterm.css' import { useI18n } from '../i18n' const MAX_TABS = 7 +const SHELL_MAX_TOKENS = 100000 const THEMES = { default: { @@ -163,17 +164,35 @@ export default function Shell({ api }) { name: '', host: '', port: 22, user: '', key_path: '', }) - const [aiMessages, setAiMessages] = useState([ - { role: 'ai', content: t('shell.aiWelcome') } - ]) + const [aiMessages, setAiMessages] = useState([]) const [aiInput, setAiInput] = useState('') const [aiLoading, setAiLoading] = useState(false) + const [aiTokens, setAiTokens] = useState(0) + const [aiAtLimit, setAiAtLimit] = useState(false) + const [analyzing, setAnalyzing] = useState(false) const aiMessagesRef = useRef(null) + const aiLoadedRef = useRef(false) useEffect(() => { aiMessagesRef.current?.scrollTo(0, aiMessagesRef.current.scrollHeight) }, [aiMessages]) + useEffect(() => { + if (aiLoadedRef.current) return + aiLoadedRef.current = true + api.getShellChatHistory().then(d => { + if (d.messages && d.messages.length > 0) { + setAiMessages(d.messages) + } else { + setAiMessages([{ role: 'assistant', content: t('shell.aiWelcome') || 'Système Analyste prêt. Tapez /help pour les commandes.' }]) + } + setAiTokens(d.tokens || 0) + setAiAtLimit(d.at_limit || false) + }).catch(() => { + setAiMessages([{ role: 'assistant', content: 'Système Analyste prêt.' }]) + }) + }, []) + useEffect(() => { api.getTerminalSessions().then(d => { setSshConnections(d.ssh || []) @@ -372,57 +391,83 @@ export default function Shell({ api }) { } } - const handleAiSend = async () => { - if (!aiInput.trim() || aiLoading) return - const text = aiInput.trim() - setAiMessages(prev => [...prev, { role: 'user', content: text }]) - setAiInput('') - setAiLoading(true) + const sendToTerminal = useCallback((code) => { + const tab = tabs.find(t => t.id === activeTab) + if (!tab) return + const entry = tabsRef.current[tab.id] + if (!entry?.ws || entry.ws.readyState !== WebSocket.OPEN) return + entry.ws.send(JSON.stringify({ type: 'input', data: code + '\r' })) + }, [tabs, activeTab]) - const currentTab = tabs.find(t => t.id === activeTab) - const context = { - cwd: currentTab?.cwd || '', - platform: navigator.platform || '', + const handleAiSend = async () => { + if (!aiInput.trim() || aiLoading || aiAtLimit) return + const text = aiInput.trim() + setAiInput('') + + if (text === '/clear') { + try { + await api.clearShellChat() + setAiMessages([{ role: 'assistant', content: t('shell.aiWelcome') || 'Contexte effacé. Prêt.' }]) + setAiTokens(0) + setAiAtLimit(false) + } catch {} + return } + if (text === '/help') { + setAiMessages(prev => [...prev, + { role: 'user', content: text }, + { role: 'assistant', content: 'Commandes disponibles:\n• /clear — Effacer la conversation\n• /help — Afficher l\'aide\n\nJe ne peux pas exécuter de code. Les blocs de code proposés peuvent être copiés ou envoyés directement au terminal actif.' } + ]) + return + } + + setAiMessages(prev => [...prev, { role: 'user', content: text }]) + setAiLoading(true) + try { let accumulated = '' - await api.sendShellChat(text, context, true, (partial, event) => { - if (event && event.tool_call) { - setAiMessages(prev => [...prev, { - role: 'tool', - content: `${t('shell.toolLaunched')}: ${event.tool_call.name || 'tool'}`, - args: event.tool_call.args ? JSON.stringify(event.tool_call.args).slice(0, 100) : '', - }]) - return - } - if (event && event.tool_result) { - const resultText = event.tool_result.result?.content || event.tool_result.error || 'completed' - setAiMessages(prev => [...prev, { - role: 'tool_result', - content: resultText, - isError: event.tool_result.result?.is_error, - }]) - return - } - if (event && event.done) return + await api.sendShellChat(text, {}, true, (partial) => { accumulated = partial setAiMessages(prev => { const filtered = prev.filter(m => !m._streaming) - return [...filtered, { role: 'ai', content: partial, _streaming: true }] + return [...filtered, { role: 'assistant', content: partial, _streaming: true }] }) }) - setAiMessages(prev => prev.filter(m => !m._streaming)) - if (accumulated) { - setAiMessages(prev => [...prev.filter(m => !m._streaming), { role: 'ai', content: accumulated }]) - } + setAiMessages(prev => { + const filtered = prev.filter(m => !m._streaming) + return [...filtered, { role: 'assistant', content: accumulated }] + }) + // Refresh token count + api.getShellChatHistory().then(d => { + setAiTokens(d.tokens || 0) + setAiAtLimit(d.at_limit || false) + }).catch(() => {}) } catch (err) { - setAiMessages(prev => [...prev.filter(m => !m._streaming), { role: 'ai', content: `${t('shell.error')}: ${err.message}` }]) + if (err.message.includes('context limit')) { + setAiAtLimit(true) + } + setAiMessages(prev => [...prev.filter(m => !m._streaming), { role: 'assistant', content: `Erreur: ${err.message}` }]) } setAiLoading(false) } + const handleAnalyze = async () => { + setAnalyzing(true) + setAiMessages(prev => [...prev, { role: 'system', content: 'Analyse du système en cours...' }]) + try { + const d = await api.analyzeSystem() + setAiMessages(prev => [...prev.filter(m => m.content !== 'Analyse du système en cours...'), { + role: 'system', + content: 'Analyse système terminée et sauvegardée. Le contexte système est maintenant disponible.' + }]) + } catch (err) { + setAiMessages(prev => prev.filter(m => m.content !== 'Analyse du système en cours...')) + } + setAnalyzing(false) + } + return (
@@ -538,13 +583,30 @@ export default function Shell({ api }) {
-
{t('shell.aiAssistant')}
+
+ Analyste Système + +
+
+
+
= SHELL_MAX_TOKENS * 0.8 ? 'warn' : ''}`} + style={{ width: `${Math.min(100, (aiTokens / SHELL_MAX_TOKENS) * 100)}%` }} + /> +
+ {Math.round(aiTokens / 1000)}k/{Math.round(SHELL_MAX_TOKENS / 1000)}k +
{aiMessages.map((msg, i) => ( -
- {msg.content} - {msg.args &&
{msg.args}
} -
+ ))} {aiLoading &&
}
@@ -553,9 +615,10 @@ export default function Shell({ api }) { value={aiInput} onChange={e => setAiInput(e.target.value)} onKeyDown={e => e.key === 'Enter' && handleAiSend()} - placeholder={t('shell.askAi')} + placeholder={aiAtLimit ? '/clear pour continuer' : t('shell.askAi')} + disabled={aiAtLimit && aiInput !== '/clear'} /> - +
@@ -611,3 +674,50 @@ export default function Shell({ api }) {
) } + +function ShellAIMessage({ msg, sendToTerminal }) { + const role = msg.role === 'user' ? 'user' : msg.role === 'system' ? 'system' : 'assistant' + const parts = parseMarkdown(msg.content || '') + + return ( +
+ {parts.map((part, i) => { + if (part.type === 'code') { + return ( +
+ {part.lang &&
{part.lang}
} +
{part.code}
+
+ + +
+
+ ) + } + return {part.text} + })} +
+ ) +} + +function parseMarkdown(text) { + const parts = [] + const regex = /```(\w*)\n([\s\S]*?)```/g + let last = 0 + let match + while ((match = regex.exec(text)) !== null) { + if (match.index > last) { + parts.push({ type: 'text', text: text.slice(last, match.index) }) + } + parts.push({ type: 'code', lang: match[1] || '', code: match[2].replace(/\n$/, '') }) + last = match.index + match[0].length + } + if (last < text.length) { + parts.push({ type: 'text', text: text.slice(last) }) + } + return parts.length > 0 ? parts : [{ type: 'text', text }] +} diff --git a/web/src/components/Studio.jsx b/web/src/components/Studio.jsx index eed1e38..2f13929 100644 --- a/web/src/components/Studio.jsx +++ b/web/src/components/Studio.jsx @@ -1,4 +1,4 @@ -import { useState, useRef, useEffect, useCallback } from 'react' +import { useState, useRef, useEffect, useCallback, useMemo } from 'react' import { useI18n } from '../i18n' const RANKS = { @@ -76,7 +76,7 @@ function formatText(text) { return html } -function ThinkingBlock({ content, done }) { +function ThinkingBlock({ content, done, raw }) { return (
@@ -86,7 +86,9 @@ function ThinkingBlock({ content, done }) { Reflexion {!done && }
-
{content}
+
+ {raw ? : content} +
) } @@ -200,7 +202,7 @@ function FeedItem({ msg }) { {rank.label} {timeStr && {timeStr}}
- {msg.thinking && } + {msg.thinking && } {parsedToolCalls && parsedToolCalls.map((tc, i) => { const resultData = parsedToolResults ? parsedToolResults.find(r => r.tool_call_id === tc.tool_call_id) @@ -234,6 +236,16 @@ function StreamingItem({ content, thinking, toolCalls }) { const cleanContent = content.replace(/]*>[\s\S]*?<\/think>/gi, '') const hasToolCalls = toolCalls && toolCalls.length > 0 + const renderedContent = useMemo(() => { + if (!cleanContent) return [] + return renderContent(cleanContent) + }, [cleanContent]) + + const formattedThinking = useMemo(() => { + if (!thinking) return '' + return formatText(thinking) + }, [thinking]) + return (
@@ -246,7 +258,7 @@ function StreamingItem({ content, thinking, toolCalls }) { {rank.label}
- {thinking && } + {thinking && } {hasToolCalls && toolCalls.map((tc, i) => ( ))} @@ -257,7 +269,7 @@ function StreamingItem({ content, thinking, toolCalls }) { )} {cleanContent && (
- {renderContent(cleanContent).map((part, i) => + {renderedContent.map((part, i) => part.type === 'code' ? (
{part.lang &&
{part.lang}
} diff --git a/web/src/i18n/fr.js b/web/src/i18n/fr.js index f54f3aa..fc580e3 100644 --- a/web/src/i18n/fr.js +++ b/web/src/i18n/fr.js @@ -136,7 +136,7 @@ const fr = { terminal: 'Terminal', updates: 'Mises \u00e0 jour', locale: 'Langue & Clavier', - skills: 'Comp\u00e9ENCES', + skills: 'Compétences', system: 'Syst\u00e8me', }, profile: 'Profil', @@ -160,7 +160,7 @@ const fr = { save: 'Enregistrer', saved: 'Enregistr\u00e9 !', error: 'Erreur', - skills: 'Comp\u00e9ENCES', + skills: 'Compétences', noSkills: 'Aucune comp\u00e9tence install\u00e9e.', runSkillsInit: 'Ex\u00e9cutez muyue skills init', language: 'Langue', diff --git a/web/src/styles/global.css b/web/src/styles/global.css index a67bc43..df6735a 100644 --- a/web/src/styles/global.css +++ b/web/src/styles/global.css @@ -393,11 +393,26 @@ input::placeholder { color: var(--text-disabled); } .connection-dot.off { background: var(--error); } .shell-ai-col { width: 320px; border-left: 1px solid var(--border); background: var(--bg-surface); display: flex; flex-direction: column; flex-shrink: 0; } -.ai-panel-header { padding: 12px 16px; border-bottom: 1px solid var(--border); font-weight: 700; font-size: 13px; color: var(--accent); } +.ai-panel-header { padding: 12px 16px; border-bottom: 1px solid var(--border); font-weight: 700; font-size: 13px; color: var(--accent); display: flex; align-items: center; justify-content: space-between; } +.shell-analyze-btn { + display: flex; align-items: center; gap: 4px; + padding: 4px 10px; border-radius: var(--radius); + background: transparent; border: 1px solid var(--accent-dim); + color: var(--accent); font-size: 11px; font-weight: 600; + cursor: pointer; transition: all 0.15s; +} +.shell-analyze-btn:hover:not(:disabled) { background: var(--accent-bg); } +.shell-analyze-btn:disabled { opacity: 0.4; cursor: not-allowed; } +.shell-ai-token-bar { display: flex; align-items: center; gap: 8px; padding: 6px 12px; border-bottom: 1px solid var(--border); } +.shell-ai-token-track { flex: 1; height: 3px; background: var(--bg-input); border-radius: 2px; overflow: hidden; } +.shell-ai-token-fill { height: 100%; background: var(--accent); border-radius: 2px; transition: width 0.4s, background 0.3s; } +.shell-ai-token-fill.warn { background: var(--warning); } +.shell-ai-token-text { font-size: 10px; font-family: var(--font-mono); color: var(--text-tertiary); white-space: nowrap; } .ai-panel-messages { flex: 1; overflow-y: auto; padding: 12px; display: flex; flex-direction: column; gap: 8px; } .ai-message { padding: 8px 12px; border-radius: var(--radius); font-size: 13px; line-height: 1.5; word-break: break-word; } -.ai-message.ai { background: var(--bg-card); border-left: 3px solid var(--accent); } -.ai-message.user { background: var(--accent-bg); border-left: 3px solid var(--accent-muted); } +.ai-message.assistant { background: var(--bg-card); border-left: 3px solid var(--accent); } +.ai-message.system { background: var(--bg-elevated); border-left: 3px solid var(--info); font-style: italic; color: var(--text-tertiary); font-size: 12px; } +.ai-message.assistant { background: var(--bg-card); border-left: 3px solid var(--accent); } .ai-message.thinking { background: var(--bg-elevated); border-left: 3px solid var(--info); font-style: italic; color: var(--text-tertiary); } .ai-message.tool { background: var(--bg-elevated); border-left: 3px solid var(--warning); } .ai-message.tool .tool-name { font-weight: 700; color: var(--warning); } @@ -405,6 +420,31 @@ input::placeholder { color: var(--text-disabled); } .ai-panel-input { display: flex; gap: 6px; padding: 10px 12px; border-top: 1px solid var(--border); } .ai-panel-input input { flex: 1; font-size: 13px; padding: 6px 10px; } +.shell-code-block { + background: var(--bg); border: 1px solid var(--border); border-radius: var(--radius); + margin: 8px 0 4px; overflow: hidden; +} +.shell-code-block pre { + padding: 10px 12px; font-family: var(--font-mono); font-size: 12px; line-height: 1.5; + overflow-x: auto; color: var(--text-primary); margin: 0; +} +.shell-code-lang { + padding: 3px 10px; font-size: 10px; font-weight: 600; color: var(--text-tertiary); + background: var(--bg-surface); border-bottom: 1px solid var(--border); + text-transform: uppercase; letter-spacing: 0.5px; +} +.shell-code-actions { + display: flex; border-top: 1px solid var(--border); background: var(--bg-surface); +} +.shell-code-actions button { + flex: 1; display: flex; align-items: center; justify-content: center; gap: 4px; + padding: 5px 0; background: transparent; border: none; border-right: 1px solid var(--border); + color: var(--text-tertiary); font-size: 11px; cursor: pointer; transition: all 0.1s; + font-family: var(--font-sans); +} +.shell-code-actions button:last-child { border-right: none; } +.shell-code-actions button:hover { background: var(--accent-bg); color: var(--accent); } + .shell-modal-overlay { position: fixed; inset: 0; background: rgba(0,0,0,0.6); display: flex; align-items: center; justify-content: center; z-index: 1000; @@ -505,10 +545,24 @@ input::placeholder { color: var(--text-disabled); } .config-update-name { color: var(--text-primary); font-weight: 600; font-size: 13px; min-width: 100px; } .config-update-versions { color: var(--text-tertiary); font-size: 12px; font-family: var(--font-mono); } -.config-skill-row { display: flex; align-items: center; gap: 10px; padding: 8px 0; border-bottom: 1px solid var(--border); font-size: 13px; } -.config-skill-row:last-child { border-bottom: none; } -.config-skill-name { color: var(--text-primary); font-weight: 600; min-width: 120px; } -.config-skill-desc { color: var(--text-tertiary); flex: 1; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } +.skill-tiles { display: grid; grid-template-columns: repeat(auto-fill, minmax(200px, 1fr)); gap: 12px; } +.skill-tile { background: var(--bg-card); border: 1px solid var(--border); border-radius: var(--radius); padding: 14px; cursor: pointer; transition: border-color 0.15s; } +.skill-tile:hover { border-color: var(--accent-dim); } +.skill-tile-name { font-weight: 600; color: var(--text-primary); font-size: 14px; margin-bottom: 6px; } +.skill-tile-desc { font-size: 12px; color: var(--text-tertiary); overflow: hidden; text-overflow: ellipsis; display: -webkit-box; -webkit-line-clamp: 2; -webkit-box-orient: vertical; } +.skill-tile-tags { display: flex; gap: 6px; flex-wrap: wrap; margin-top: 10px; } +.skill-detail-overlay { position: fixed; inset: 0; background: rgba(0,0,0,0.7); z-index: 50; display: flex; align-items: center; justify-content: center; } +.skill-detail-panel { background: var(--bg-elevated); border: 1px solid var(--border); border-radius: var(--radius-lg); width: 90%; max-width: 600px; max-height: 80vh; display: flex; flex-direction: column; overflow: hidden; } +.skill-detail-header { display: flex; align-items: center; justify-content: space-between; padding: 16px 20px; border-bottom: 1px solid var(--border); } +.skill-detail-name { font-weight: 600; font-size: 16px; color: var(--text-primary); } +.skill-detail-body { flex: 1; overflow-y: auto; padding: 20px; } +.skill-detail-section { margin-bottom: 16px; } +.skill-detail-label { font-size: 11px; font-weight: 700; color: var(--accent); text-transform: uppercase; letter-spacing: 0.5px; margin-bottom: 6px; } +.skill-detail-meta { display: flex; gap: 8px; flex-wrap: wrap; } +.skill-detail-content { font-family: var(--font-mono); font-size: 12px; color: var(--text-secondary); white-space: pre-wrap; background: var(--bg-card); border: 1px solid var(--border); border-radius: var(--radius); padding: 12px; line-height: 1.6; max-height: 300px; overflow-y: auto; } +.skill-detail-deps { display: flex; flex-direction: column; gap: 6px; } +.skill-detail-dep { font-size: 12px; color: var(--text-tertiary); display: flex; align-items: center; gap: 8px; } +.skill-detail-dep .badge { font-size: 10px; } .chip-row { display: flex; gap: 8px; flex-wrap: wrap; } .config-toast {