All checks were successful
CI / build (push) Successful in 2m41s
Split monolithic app.go into focused modules (dashboard, chat, workflow, config, agents, terminal, commands, handlers). Add proper error handling for installer commands, proxy pipes, and MCP config parsing. Fix daemon channel buffer, cap orchestrator history, compile think regex once, and set HTTP timeouts on preview server. Improve CI with Go module caching, dependency download step, and test stage with race detection. 😘 Generated with Crush Assisted-by: GLM-5-Turbo via Crush <crush@charm.land>
251 lines
4.5 KiB
Go
251 lines
4.5 KiB
Go
package proxy
|
|
|
|
import (
|
|
"bufio"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"os/exec"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
)
|
|
|
|
type AgentType string
|
|
|
|
const (
|
|
AgentCrush AgentType = "crush"
|
|
AgentClaude AgentType = "claude"
|
|
)
|
|
|
|
type AgentStatus string
|
|
|
|
const (
|
|
StatusIdle AgentStatus = "idle"
|
|
StatusRunning AgentStatus = "running"
|
|
StatusStopped AgentStatus = "stopped"
|
|
StatusError AgentStatus = "error"
|
|
)
|
|
|
|
type LogEntry struct {
|
|
Timestamp time.Time
|
|
Agent AgentType
|
|
Level string
|
|
Message string
|
|
}
|
|
|
|
type Agent struct {
|
|
Type AgentType
|
|
Status AgentStatus
|
|
cmd *exec.Cmd
|
|
stdout io.Reader
|
|
stderr io.Reader
|
|
cancel context.CancelFunc
|
|
mu sync.Mutex
|
|
logs []LogEntry
|
|
}
|
|
|
|
type Manager struct {
|
|
agents map[AgentType]*Agent
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
func NewManager() *Manager {
|
|
return &Manager{
|
|
agents: make(map[AgentType]*Agent),
|
|
}
|
|
}
|
|
|
|
func (m *Manager) Start(agentType AgentType, args ...string) error {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
|
|
if a, exists := m.agents[agentType]; exists && a.Status == StatusRunning {
|
|
return fmt.Errorf("%s already running", agentType)
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
var cmdName string
|
|
switch agentType {
|
|
case AgentCrush:
|
|
cmdName = "crush"
|
|
case AgentClaude:
|
|
cmdName = "claude"
|
|
default:
|
|
cancel()
|
|
return fmt.Errorf("unknown agent type: %s", agentType)
|
|
}
|
|
|
|
cmd := exec.CommandContext(ctx, cmdName, args...)
|
|
cmd.Env = os.Environ()
|
|
|
|
stdout, pipeErr := cmd.StdoutPipe()
|
|
if pipeErr != nil {
|
|
cancel()
|
|
return fmt.Errorf("stdout pipe: %w", pipeErr)
|
|
}
|
|
stderr, pipeErr := cmd.StderrPipe()
|
|
if pipeErr != nil {
|
|
cancel()
|
|
return fmt.Errorf("stderr pipe: %w", pipeErr)
|
|
}
|
|
|
|
agent := &Agent{
|
|
Type: agentType,
|
|
Status: StatusRunning,
|
|
cmd: cmd,
|
|
stdout: stdout,
|
|
stderr: stderr,
|
|
cancel: cancel,
|
|
}
|
|
|
|
m.agents[agentType] = agent
|
|
|
|
go agent.captureOutput(stdout, "info")
|
|
go agent.captureOutput(stderr, "error")
|
|
|
|
if err := cmd.Start(); err != nil {
|
|
agent.Status = StatusError
|
|
cancel()
|
|
return fmt.Errorf("start %s: %w", agentType, err)
|
|
}
|
|
|
|
go func() {
|
|
err := cmd.Wait()
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
if err != nil && ctx.Err() == nil {
|
|
agent.Status = StatusError
|
|
agent.log("error", fmt.Sprintf("exited with error: %s", err))
|
|
} else {
|
|
agent.Status = StatusStopped
|
|
agent.log("info", "stopped")
|
|
}
|
|
}()
|
|
|
|
return nil
|
|
}
|
|
|
|
func (m *Manager) Stop(agentType AgentType) error {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
|
|
agent, exists := m.agents[agentType]
|
|
if !exists {
|
|
return fmt.Errorf("%s not found", agentType)
|
|
}
|
|
|
|
if agent.Status != StatusRunning {
|
|
return fmt.Errorf("%s is not running", agentType)
|
|
}
|
|
|
|
agent.cancel()
|
|
agent.Status = StatusStopped
|
|
return nil
|
|
}
|
|
|
|
func (m *Manager) Status(agentType AgentType) (AgentStatus, []LogEntry) {
|
|
m.mu.RLock()
|
|
defer m.mu.RUnlock()
|
|
|
|
agent, exists := m.agents[agentType]
|
|
if !exists {
|
|
return StatusIdle, nil
|
|
}
|
|
|
|
agent.mu.Lock()
|
|
defer agent.mu.Unlock()
|
|
|
|
return agent.Status, agent.logs
|
|
}
|
|
|
|
func (m *Manager) AllStatus() map[AgentType]AgentStatus {
|
|
m.mu.RLock()
|
|
defer m.mu.RUnlock()
|
|
|
|
statuses := make(map[AgentType]AgentStatus)
|
|
for t, a := range m.agents {
|
|
statuses[t] = a.Status
|
|
}
|
|
return statuses
|
|
}
|
|
|
|
|
|
func (a *Agent) captureOutput(reader io.Reader, level string) {
|
|
scanner := bufio.NewScanner(reader)
|
|
for scanner.Scan() {
|
|
line := scanner.Text()
|
|
a.mu.Lock()
|
|
a.logs = append(a.logs, LogEntry{
|
|
Timestamp: time.Now(),
|
|
Agent: a.Type,
|
|
Level: level,
|
|
Message: line,
|
|
})
|
|
if len(a.logs) > 1000 {
|
|
a.logs = a.logs[500:]
|
|
}
|
|
a.mu.Unlock()
|
|
}
|
|
}
|
|
|
|
func (a *Agent) log(level, msg string) {
|
|
a.mu.Lock()
|
|
defer a.mu.Unlock()
|
|
a.logs = append(a.logs, LogEntry{
|
|
Timestamp: time.Now(),
|
|
Agent: a.Type,
|
|
Level: level,
|
|
Message: msg,
|
|
})
|
|
}
|
|
|
|
func (m *Manager) IsAvailable(agentType AgentType) bool {
|
|
var cmdName string
|
|
switch agentType {
|
|
case AgentCrush:
|
|
cmdName = "crush"
|
|
case AgentClaude:
|
|
cmdName = "claude"
|
|
default:
|
|
return false
|
|
}
|
|
|
|
path, err := exec.LookPath(cmdName)
|
|
return err == nil && path != ""
|
|
}
|
|
|
|
func (m *Manager) GetLogs(agentType AgentType, lastN int) []LogEntry {
|
|
m.mu.RLock()
|
|
agent, exists := m.agents[agentType]
|
|
m.mu.RUnlock()
|
|
|
|
if !exists {
|
|
return nil
|
|
}
|
|
|
|
agent.mu.Lock()
|
|
defer agent.mu.Unlock()
|
|
|
|
logs := agent.logs
|
|
if lastN > 0 && len(logs) > lastN {
|
|
logs = logs[len(logs)-lastN:]
|
|
}
|
|
return logs
|
|
}
|
|
|
|
func FormatLogs(logs []LogEntry) string {
|
|
var b strings.Builder
|
|
for _, l := range logs {
|
|
b.WriteString(fmt.Sprintf("[%s] %s %s: %s\n",
|
|
l.Timestamp.Format("15:04:05"),
|
|
l.Agent,
|
|
l.Level,
|
|
l.Message,
|
|
))
|
|
}
|
|
return b.String()
|
|
}
|