Files
vessel/frontend/src/lib/ollama/modelfile-parser.test.ts
vikingowl d81430e1aa test: extend test coverage for backend and frontend
Backend:
- Add fetcher_test.go (HTML stripping, URL fetching utilities)
- Add model_registry_test.go (parsing, size ranges, model matching)
- Add database_test.go (CRUD operations, migrations)
- Add tests for geolocation, search, tools, version handlers

Frontend unit tests (469 total):
- OllamaClient: 22 tests for API methods with mocked fetch
- Memory/RAG: tokenizer, chunker, summarizer, embeddings, vector-store
- Services: prompt-resolution, conversation-summary
- Components: Skeleton, BranchNavigator, ConfirmDialog, ThinkingBlock
- Utils: export, import, file-processor, keyboard
- Tools: builtin math parser (44 tests)

E2E tests (28 total):
- Set up Playwright with Chromium
- App loading, sidebar navigation, settings page
- Chat interface, responsive design, accessibility
- Import dialog, project modal interactions

Config changes:
- Add browser conditions to vitest.config.ts for Svelte 5 components
- Add playwright.config.ts for E2E testing
- Add test:e2e scripts to package.json
- Update .gitignore to exclude test artifacts

Closes #8
2026-01-22 11:05:49 +01:00

174 lines
4.5 KiB
TypeScript

/**
* Modelfile parser tests
*
* Tests parsing of Ollama Modelfile format directives
*/
import { describe, it, expect } from 'vitest';
import {
parseSystemPromptFromModelfile,
parseTemplateFromModelfile,
parseParametersFromModelfile,
hasSystemPrompt
} from './modelfile-parser';
describe('parseSystemPromptFromModelfile', () => {
it('returns null for empty input', () => {
expect(parseSystemPromptFromModelfile('')).toBeNull();
expect(parseSystemPromptFromModelfile(null as unknown as string)).toBeNull();
});
it('parses triple double quoted system prompt', () => {
const modelfile = `FROM llama3
SYSTEM """
You are a helpful assistant.
Be concise and clear.
"""
PARAMETER temperature 0.7`;
const result = parseSystemPromptFromModelfile(modelfile);
expect(result).toBe('You are a helpful assistant.\nBe concise and clear.');
});
it('parses triple single quoted system prompt', () => {
const modelfile = `FROM llama3
SYSTEM '''
You are a coding assistant.
'''`;
const result = parseSystemPromptFromModelfile(modelfile);
expect(result).toBe('You are a coding assistant.');
});
it('parses double quoted single-line system prompt', () => {
const modelfile = `FROM llama3
SYSTEM "You are a helpful assistant."`;
const result = parseSystemPromptFromModelfile(modelfile);
expect(result).toBe('You are a helpful assistant.');
});
it('parses single quoted single-line system prompt', () => {
const modelfile = `FROM mistral
SYSTEM 'Be brief and accurate.'`;
const result = parseSystemPromptFromModelfile(modelfile);
expect(result).toBe('Be brief and accurate.');
});
it('parses unquoted system prompt', () => {
const modelfile = `FROM llama3
SYSTEM You are a helpful AI`;
const result = parseSystemPromptFromModelfile(modelfile);
expect(result).toBe('You are a helpful AI');
});
it('returns null when no system directive', () => {
const modelfile = `FROM llama3
PARAMETER temperature 0.8`;
expect(parseSystemPromptFromModelfile(modelfile)).toBeNull();
});
it('is case insensitive', () => {
const modelfile = `system "Lower case works too"`;
expect(parseSystemPromptFromModelfile(modelfile)).toBe('Lower case works too');
});
});
describe('parseTemplateFromModelfile', () => {
it('returns null for empty input', () => {
expect(parseTemplateFromModelfile('')).toBeNull();
});
it('parses triple quoted template', () => {
const modelfile = `FROM llama3
TEMPLATE """{{ .System }}
{{ .Prompt }}"""`;
const result = parseTemplateFromModelfile(modelfile);
expect(result).toBe('{{ .System }}\n{{ .Prompt }}');
});
it('parses single-line template', () => {
const modelfile = `FROM mistral
TEMPLATE "{{ .Prompt }}"`;
const result = parseTemplateFromModelfile(modelfile);
expect(result).toBe('{{ .Prompt }}');
});
it('returns null when no template', () => {
const modelfile = `FROM llama3
SYSTEM "Hello"`;
expect(parseTemplateFromModelfile(modelfile)).toBeNull();
});
});
describe('parseParametersFromModelfile', () => {
it('returns empty object for empty input', () => {
expect(parseParametersFromModelfile('')).toEqual({});
});
it('parses single parameter', () => {
const modelfile = `FROM llama3
PARAMETER temperature 0.7`;
const result = parseParametersFromModelfile(modelfile);
expect(result).toEqual({ temperature: '0.7' });
});
it('parses multiple parameters', () => {
const modelfile = `FROM llama3
PARAMETER temperature 0.8
PARAMETER top_k 40
PARAMETER top_p 0.9
PARAMETER num_ctx 4096`;
const result = parseParametersFromModelfile(modelfile);
expect(result).toEqual({
temperature: '0.8',
top_k: '40',
top_p: '0.9',
num_ctx: '4096'
});
});
it('normalizes parameter names to lowercase', () => {
const modelfile = `PARAMETER Temperature 0.5
PARAMETER TOP_K 50`;
const result = parseParametersFromModelfile(modelfile);
expect(result.temperature).toBe('0.5');
expect(result.top_k).toBe('50');
});
it('handles mixed content', () => {
const modelfile = `FROM mistral
SYSTEM "Be helpful"
PARAMETER temperature 0.7
TEMPLATE "{{ .Prompt }}"
PARAMETER num_ctx 8192`;
const result = parseParametersFromModelfile(modelfile);
expect(result).toEqual({
temperature: '0.7',
num_ctx: '8192'
});
});
});
describe('hasSystemPrompt', () => {
it('returns true when system prompt exists', () => {
expect(hasSystemPrompt('SYSTEM "Hello"')).toBe(true);
expect(hasSystemPrompt('SYSTEM """Multi\nline"""')).toBe(true);
});
it('returns false when no system prompt', () => {
expect(hasSystemPrompt('FROM llama3')).toBe(false);
expect(hasSystemPrompt('')).toBe(false);
});
});