fix(studio): forward AI thinking chunks to frontend instead of dropping them
All checks were successful
Beta Release / beta (push) Successful in 40s
All checks were successful
Beta Release / beta (push) Successful in 40s
The ThinkingBlock component existed but was dead code — the backend silently discarded all <think chunks. Now emits thinking SSE events so the UI can display AI reflections in real-time. \xe2\x98\x85 Generated with Crush Assisted-by: GLM-5-Turbo via Crush <crush@charm.land>
This commit is contained in:
@@ -73,8 +73,20 @@ RÈGLES ABSOLUES:
|
||||
flusher, canFlush := w.(http.Flusher)
|
||||
|
||||
result, err := orb.SendStream(body.Message, func(chunk string) {
|
||||
// Skip thinking tags - user doesn't see them
|
||||
if strings.HasPrefix(chunk, "<think") {
|
||||
data, _ := json.Marshal(map[string]string{"thinking": strings.TrimPrefix(chunk, "<think")})
|
||||
w.Write([]byte("data: " + string(data) + "\n\n"))
|
||||
if canFlush {
|
||||
flusher.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
if chunk == "</think>" {
|
||||
data, _ := json.Marshal(map[string]string{"thinking_end": "true"})
|
||||
w.Write([]byte("data: " + string(data) + "\n\n"))
|
||||
if canFlush {
|
||||
flusher.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
data, _ := json.Marshal(map[string]string{"content": chunk})
|
||||
|
||||
@@ -68,7 +68,9 @@ const api = {
|
||||
if (data.done) { resolve(full); return }
|
||||
if (data.content) {
|
||||
full += data.content
|
||||
if (onChunk) onChunk(full)
|
||||
if (onChunk) onChunk(full, data)
|
||||
} else if (data.thinking !== undefined || data.thinking_end) {
|
||||
if (onChunk) onChunk(full, data)
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user