feat: Ollama/gemma4 compat — /init flow, stream filter, safety fixes

provider/openai:
- Fix doubled tool call args (argsComplete flag): Ollama sends complete
  args in the first streaming chunk then repeats them as delta, causing
  doubled JSON and 400 errors in elfs
- Handle fs: prefix (gemma4 uses fs:grep instead of fs.grep)
- Add Reasoning field support for Ollama thinking output

cmd/gnoma:
- Early TTY detection so logger is created with correct destination
  before any component gets a reference to it (fixes slog WARN bleed
  into TUI textarea)

permission:
- Exempt spawn_elfs and agent tools from safety scanner: elf prompt
  text may legitimately mention .env/.ssh/credentials patterns and
  should not be blocked

tui/app:
- /init retry chain: no-tool-calls → spawn_elfs nudge → write nudge
  (ask for plain text output) → TUI fallback write from streamBuf
- looksLikeAgentsMD + extractMarkdownDoc: validate and clean fallback
  content before writing (reject refusals, strip narrative preambles)
- Collapse thinking output to 3 lines; ctrl+o to expand (live stream
  and committed messages)
- Stream-level filter for model pseudo-tool-call blocks: suppresses
  <<tool_code>>...</tool_code>> and <<function_call>>...<tool_call|>
  from entering streamBuf across chunk boundaries
- sanitizeAssistantText regex covers both block formats
- Reset streamFilterClose at every turn start
This commit is contained in:
2026-04-05 19:24:51 +02:00
parent 14b88cadcc
commit cb2d63d06f
51 changed files with 2855 additions and 353 deletions

View File

@@ -39,7 +39,7 @@ var batchSchema = json.RawMessage(`{
},
"max_turns": {
"type": "integer",
"description": "Maximum tool-calling rounds per elf (default 30)"
"description": "Maximum tool-calling rounds per elf (0 or omit = unlimited)"
}
},
"required": ["tasks"]
@@ -62,9 +62,8 @@ func (t *BatchTool) SetProgressCh(ch chan<- elf.Progress) {
func (t *BatchTool) Name() string { return "spawn_elfs" }
func (t *BatchTool) Description() string { return "Spawn multiple elfs (sub-agents) in parallel. Use this when you need to run 2+ independent tasks concurrently. Each elf gets its own conversation and tools. All elfs run simultaneously and results are collected when all complete." }
func (t *BatchTool) Parameters() json.RawMessage { return batchSchema }
func (t *BatchTool) IsReadOnly() bool { return true }
func (t *BatchTool) IsDestructive() bool { return false }
func (t *BatchTool) ShouldDefer() bool { return true }
func (t *BatchTool) IsReadOnly() bool { return true }
func (t *BatchTool) IsDestructive() bool { return false }
type batchArgs struct {
Tasks []batchTask `json:"tasks"`
@@ -89,9 +88,6 @@ func (t *BatchTool) Execute(ctx context.Context, args json.RawMessage) (tool.Res
}
maxTurns := a.MaxTurns
if maxTurns <= 0 {
maxTurns = 30
}
systemPrompt := "You are an elf — a focused sub-agent of gnoma. Complete the given task thoroughly and concisely. Use tools as needed."
@@ -116,7 +112,7 @@ func (t *BatchTool) Execute(ctx context.Context, args json.RawMessage) (tool.Res
}
}
taskType := parseTaskType(task.TaskType)
taskType := parseTaskType(task.TaskType, task.Prompt)
e, err := t.manager.Spawn(ctx, taskType, task.Prompt, systemPrompt, maxTurns)
if err != nil {
for _, entry := range elfs {