diff --git a/.env b/.env index 7674f0f4..40eab26a 100644 --- a/.env +++ b/.env @@ -37,3 +37,4 @@ AWS_S3_BUCKET_NAME= #SERVER_TIMEOUT=120000 #SERVER_HEADERS_TIMEOUT=60000 #SERVER_KEEP_ALIVE_TIMEOUT=5000 +OLLAMA_BASE_URL=http://localhost:11434 diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index 43f7bbc0..e3977402 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -33,6 +33,9 @@ jobs: - name: Run acceptance tests run: npm run test:acceptance + - name: Wait for services to be ready with migrations + run: sleep 30 + - name: Run e2e tests run: npm run test:e2e diff --git a/docker-compose.yml b/docker-compose.yml index b99aa76b..65bb9bf7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -16,10 +16,13 @@ services: APP_FRONTEND_URL: ${APP_FRONTEND_URL} BODY_PARSER_JSON_LIMIT: ${BODY_PARSER_JSON_LIMIT} ELASTIC_URL: ${ELASTIC_URL} + OLLAMA_BASE_URL: http://host.docker.internal:11434 ports: - "${APP_PORT}:3000" expose: - "${APP_PORT}" + extra_hosts: + - host.docker.internal:host-gateway depends_on: postgres: condition: service_healthy diff --git a/package-lock.json b/package-lock.json index 7a8a8c91..7d9360dc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -34,6 +34,7 @@ "ldapts": "^7.1.0", "looks-same": "^9.0.0", "odiff-bin": "^2.6.1", + "ollama": "^0.6.3", "passport": "^0.6.0", "passport-jwt": "^4.0.1", "passport-local": "^1.0.0", @@ -45,7 +46,8 @@ "rimraf": "^5.0.1", "rxjs": "^7.8.2", "swagger-ui-express": "^4.6.3", - "uuid-apikey": "^1.5.3" + "uuid-apikey": "^1.5.3", + "zod": "^4.2.1" }, "devDependencies": { "@darraghor/eslint-plugin-nestjs-typed": "^6.9.3", @@ -11581,6 +11583,15 @@ "devOptional": true, "license": "MIT" }, + "node_modules/ollama": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.6.3.tgz", + "integrity": "sha512-KEWEhIqE5wtfzEIZbDCLH51VFZ6Z3ZSa6sIOg/E/tBV8S51flyqBOXi+bRxlOYKDf8i327zG9eSTb8IJxvm3Zg==", + "license": "MIT", + "dependencies": { + "whatwg-fetch": "^3.6.20" + } + }, "node_modules/on-finished": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", @@ -14686,6 +14697,12 @@ "url": "https://opencollective.com/webpack" } }, + "node_modules/whatwg-fetch": { + "version": "3.6.20", + "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", + "license": "MIT" + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -14963,6 +14980,15 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zod": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.2.1.tgz", + "integrity": "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } } } } diff --git a/package.json b/package.json index d5c743d3..e5b1cbe9 100644 --- a/package.json +++ b/package.json @@ -52,6 +52,7 @@ "ldapts": "^7.1.0", "looks-same": "^9.0.0", "odiff-bin": "^2.6.1", + "ollama": "^0.6.3", "passport": "^0.6.0", "passport-jwt": "^4.0.1", "passport-local": "^1.0.0", @@ -63,7 +64,8 @@ "rimraf": "^5.0.1", "rxjs": "^7.8.2", "swagger-ui-express": "^4.6.3", - "uuid-apikey": "^1.5.3" + "uuid-apikey": "^1.5.3", + "zod": "^4.2.1" }, "devDependencies": { "@darraghor/eslint-plugin-nestjs-typed": "^6.9.3", diff --git a/prisma/Dockerfile b/prisma/Dockerfile index f9ca9969..cbf33d39 100644 --- a/prisma/Dockerfile +++ b/prisma/Dockerfile @@ -12,6 +12,4 @@ RUN npm ci --verbose RUN chmod +x /app/wait-for-it.sh RUN chmod +x /app/entrypoint.sh -ENTRYPOINT ["/app/entrypoint.sh"] - -CMD ["sh"] \ No newline at end of file +ENTRYPOINT ["/app/entrypoint.sh"] \ No newline at end of file diff --git a/prisma/migrations/20251209181002_add_vlm_support/migration.sql b/prisma/migrations/20251209181002_add_vlm_support/migration.sql new file mode 100644 index 00000000..bed01119 --- /dev/null +++ b/prisma/migrations/20251209181002_add_vlm_support/migration.sql @@ -0,0 +1,6 @@ +-- AlterEnum +ALTER TYPE "ImageComparison" ADD VALUE 'vlm'; + +-- AlterTable +ALTER TABLE "TestRun" ADD COLUMN "vlmDescription" TEXT; + diff --git a/prisma/migrations/migration_lock.toml b/prisma/migrations/migration_lock.toml index fbffa92c..044d57cd 100644 --- a/prisma/migrations/migration_lock.toml +++ b/prisma/migrations/migration_lock.toml @@ -1,3 +1,3 @@ # Please do not edit this file manually -# It should be added in your version-control system (i.e. Git) -provider = "postgresql" \ No newline at end of file +# It should be added in your version-control system (e.g., Git) +provider = "postgresql" diff --git a/prisma/schema.prisma b/prisma/schema.prisma index 014ea95a..040e758f 100644 --- a/prisma/schema.prisma +++ b/prisma/schema.prisma @@ -69,6 +69,7 @@ model TestRun { baselineBranchName String? ignoreAreas String @default("[]") tempIgnoreAreas String @default("[]") + vlmDescription String? baseline Baseline? build Build @relation(fields: [buildId], references: [id]) project Project? @relation(fields: [projectId], references: [id]) @@ -138,6 +139,7 @@ enum ImageComparison { pixelmatch lookSame odiff + vlm } enum Role { diff --git a/src/_data_/index.ts b/src/_data_/index.ts index 3c45ad77..ba6ce65a 100644 --- a/src/_data_/index.ts +++ b/src/_data_/index.ts @@ -96,6 +96,7 @@ export const generateTestRun = (testRun?: Partial): TestRun => { baselineBranchName: 'master', branchName: 'develop', merge: false, + vlmDescription: null, ...testRun, }; }; diff --git a/src/compare/compare.module.ts b/src/compare/compare.module.ts index c4c622c8..f7e9a9b8 100644 --- a/src/compare/compare.module.ts +++ b/src/compare/compare.module.ts @@ -3,10 +3,14 @@ import { CompareService } from './compare.service'; import { LookSameService } from './libs/looks-same/looks-same.service'; import { OdiffService } from './libs/odiff/odiff.service'; import { PixelmatchService } from './libs/pixelmatch/pixelmatch.service'; +import { VlmService } from './libs/vlm/vlm.service'; +import { OllamaController } from './libs/vlm/ollama.controller'; +import { OllamaService } from './libs/vlm/ollama.service'; import { StaticModule } from '../static/static.module'; @Module({ - providers: [CompareService, PixelmatchService, LookSameService, OdiffService], + controllers: [OllamaController], + providers: [CompareService, PixelmatchService, LookSameService, OdiffService, VlmService, OllamaService], imports: [StaticModule], exports: [CompareService], }) diff --git a/src/compare/compare.service.spec.ts b/src/compare/compare.service.spec.ts index 9940b176..3ba6f821 100644 --- a/src/compare/compare.service.spec.ts +++ b/src/compare/compare.service.spec.ts @@ -1,9 +1,12 @@ import { Test, TestingModule } from '@nestjs/testing'; +import { ConfigService } from '@nestjs/config'; import { PrismaService } from '../prisma/prisma.service'; import { CompareService } from './compare.service'; import { LookSameService } from './libs/looks-same/looks-same.service'; import { OdiffService } from './libs/odiff/odiff.service'; import { PixelmatchService } from './libs/pixelmatch/pixelmatch.service'; +import { VlmService } from './libs/vlm/vlm.service'; +import { OllamaService } from './libs/vlm/ollama.service'; import { StaticModule } from '../static/static.module'; import { ImageComparison } from '@prisma/client'; import * as utils from '../static/utils'; @@ -16,7 +19,21 @@ describe('CompareService', () => { beforeEach(async () => { const module: TestingModule = await Test.createTestingModule({ - providers: [CompareService, OdiffService, PixelmatchService, LookSameService, PrismaService], + providers: [ + CompareService, + OdiffService, + PixelmatchService, + LookSameService, + VlmService, + OllamaService, + PrismaService, + { + provide: ConfigService, + useValue: { + getOrThrow: jest.fn().mockReturnValue('http://localhost:11434'), + }, + }, + ], imports: [StaticModule], }).compile(); diff --git a/src/compare/compare.service.ts b/src/compare/compare.service.ts index 6bb144a0..f7895e9f 100644 --- a/src/compare/compare.service.ts +++ b/src/compare/compare.service.ts @@ -7,6 +7,7 @@ import { PrismaService } from '../prisma/prisma.service'; import { DiffResult } from '../test-runs/diffResult'; import { LookSameService } from './libs/looks-same/looks-same.service'; import { OdiffService } from './libs/odiff/odiff.service'; +import { VlmService } from './libs/vlm/vlm.service'; import { isHddStaticServiceConfigured } from '../static/utils'; @Injectable() @@ -17,6 +18,7 @@ export class CompareService { private readonly pixelmatchService: PixelmatchService, private readonly lookSameService: LookSameService, private readonly odiffService: OdiffService, + private readonly vlmService: VlmService, private readonly prismaService: PrismaService ) {} @@ -44,6 +46,9 @@ export class CompareService { return this.odiffService; } + case ImageComparison.vlm: { + return this.vlmService; + } default: { this.logger.warn(`Unknown ImageComparison value: ${imageComparison}. Falling back to pixelmatch.`); return this.pixelmatchService; diff --git a/src/compare/libs/image-comparator.interface.ts b/src/compare/libs/image-comparator.interface.ts index a186199b..96e213f6 100644 --- a/src/compare/libs/image-comparator.interface.ts +++ b/src/compare/libs/image-comparator.interface.ts @@ -3,8 +3,9 @@ import { ImageCompareInput } from './ImageCompareInput'; import { LooksSameConfig } from './looks-same/looks-same.types'; import { OdiffConfig } from './odiff/odiff.types'; import { PixelmatchConfig } from './pixelmatch/pixelmatch.types'; +import { VlmConfig } from './vlm/vlm.types'; -export type ImageCompareConfig = PixelmatchConfig | LooksSameConfig | OdiffConfig; +export type ImageCompareConfig = PixelmatchConfig | LooksSameConfig | OdiffConfig | VlmConfig; export interface ImageComparator { getDiff(data: ImageCompareInput, config: ImageCompareConfig): Promise; diff --git a/src/compare/libs/vlm/README.md b/src/compare/libs/vlm/README.md new file mode 100644 index 00000000..f0a1b19a --- /dev/null +++ b/src/compare/libs/vlm/README.md @@ -0,0 +1,102 @@ +# VLM (Vision Language Model) Image Comparison + +Hybrid image comparison combining pixelmatch for objective difference detection and Vision Language Models (via Ollama) for human-noticeability analysis. + +## Architecture Flow + +```text +VLM Comparison Request + │ + ▼ +Run Pixelmatch Comparison + │ + ├─→ No Differences Found → Return OK Status + │ + └─→ Differences Found + │ + ▼ + Save Diff Image + │ + ▼ + Run VLM with 3 Images: + (Baseline, Comparison, Diff) + │ + ├─→ Not Noticeable → Override: Return OK Status + │ + └─→ Noticeable → Return Unresolved with VLM Description +``` + +## Quick Start + +### 1. Install & Start Ollama + +```bash +# Install (macOS) +brew install ollama + +# Start Ollama +ollama serve +``` + +### 2. Download a Model + +```bash +# Recommended for accuracy +ollama pull gemma3:12b + +# Note: Smaller models do not show proper results - use gemma3:12b only +``` + +### 3. Configure Backend + +Add to `.env`: +```bash +OLLAMA_BASE_URL=http://localhost:11434 +``` + +### 4. Use VLM in Project + +Set project's image comparison to `vlm` with config: +```json +{ + "model": "gemma3:12b", + "temperature": 0.1 +} +``` + +Optional custom prompt (replaces default system prompt): +```json +{ + "model": "gemma3:12b", + "prompt": "Focus on button colors and text changes", + "temperature": 0.1 +} +``` + +**Note:** The `prompt` field replaces the entire system prompt. If omitted, a default system prompt is used that analyzes the diff image to determine if highlighted differences are noticeable to humans. + +## Recommended Models + +| Model | Size | +|-------|------| +| `gemma3:12b` | ~12GB - **Recommended** | + +**Note:** Models smaller than the default (`gemma3:12b`) have been tested and do not show proper results. They fail to follow structured output formats reliably and may produce incorrect or inconsistent responses. For production use, only use `gemma3:12b` or `llava:13b`. + +## Configuration + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `model` | string | `gemma3:12b` | Ollama vision model name | +| `prompt` | string | System prompt (see below) | Custom prompt for image comparison | +| `temperature` | number | `0.1` | Lower = more consistent results (0.0-1.0) | + +## API Endpoints + +```bash +# List available models +GET /ollama/models + +# Compare two images (for testing) +POST /ollama/compare?model=gemma3:12b&prompt=&temperature=0.1 +``` diff --git a/src/compare/libs/vlm/ollama.controller.ts b/src/compare/libs/vlm/ollama.controller.ts new file mode 100644 index 00000000..1bf0db11 --- /dev/null +++ b/src/compare/libs/vlm/ollama.controller.ts @@ -0,0 +1,64 @@ +import { + Controller, + Get, + Post, + Query, + HttpException, + HttpStatus, + UseInterceptors, + UploadedFiles, +} from '@nestjs/common'; +import { FilesInterceptor } from '@nestjs/platform-express'; +import { ApiTags, ApiConsumes, ApiBody } from '@nestjs/swagger'; +import { OllamaService } from './ollama.service'; + +@ApiTags('Ollama') +@Controller('ollama') +export class OllamaController { + constructor(private readonly ollamaService: OllamaService) {} + + @Get('models') + async listModels() { + return { models: await this.ollamaService.listModels() }; + } + + @Post('compare') + @ApiConsumes('multipart/form-data') + @ApiBody({ + schema: { + type: 'object', + required: ['images'], + properties: { + images: { + type: 'array', + items: { type: 'string', format: 'binary' }, + description: 'Two images to compare (baseline and comparison)', + }, + }, + }, + }) + @UseInterceptors(FilesInterceptor('images', 2)) + async compareImages( + @UploadedFiles() files: Express.Multer.File[], + @Query('model') model: string, + @Query('prompt') prompt: string, + @Query('temperature') temperature: string + ) { + if (files?.length !== 2) { + throw new HttpException('Two images required', HttpStatus.BAD_REQUEST); + } + + return this.ollamaService.generate({ + model, + messages: [ + { + role: 'user', + content: prompt, + images: files.map((f) => new Uint8Array(f.buffer)), + }, + ], + format: 'json', + options: { temperature: Number(temperature) }, + }); + } +} diff --git a/src/compare/libs/vlm/ollama.service.spec.ts b/src/compare/libs/vlm/ollama.service.spec.ts new file mode 100644 index 00000000..f128839e --- /dev/null +++ b/src/compare/libs/vlm/ollama.service.spec.ts @@ -0,0 +1,217 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { ConfigService } from '@nestjs/config'; +import { OllamaService } from './ollama.service'; + +// Mock the ollama module +const mockChat = jest.fn(); +const mockList = jest.fn(); + +jest.mock('ollama', () => { + const MockOllama = jest.fn().mockImplementation(() => ({ + chat: mockChat, + list: mockList, + })); + return { + Ollama: MockOllama, + }; +}); + +describe('OllamaService', () => { + let service: OllamaService; + + beforeEach(async () => { + // Reset mocks + jest.clearAllMocks(); + + const module: TestingModule = await Test.createTestingModule({ + providers: [ + OllamaService, + { + provide: ConfigService, + useValue: { + getOrThrow: jest.fn().mockReturnValue('http://localhost:11434'), + }, + }, + ], + }).compile(); + + service = module.get(OllamaService); + }); + + describe('generate', () => { + it('should call Ollama SDK with correct parameters for Uint8Array', async () => { + const mockResponse = { + model: 'llava', + created_at: new Date(), + message: { content: 'YES', role: 'assistant' }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }; + mockChat.mockResolvedValue(mockResponse); + + const testBytes = new Uint8Array([1, 2, 3, 4]); + const result = await service.generate({ + model: 'llava', + messages: [ + { + role: 'user', + content: 'Test prompt', + images: [testBytes], + }, + ], + }); + + expect(mockChat).toHaveBeenCalledWith({ + model: 'llava', + messages: [ + { + role: 'user', + content: 'Test prompt', + images: [testBytes], + }, + ], + stream: false, + format: undefined, + options: undefined, + }); + expect(result.message.content).toBe('YES'); + expect(result.done).toBe(true); + }); + + it('should call Ollama SDK with correct parameters for base64 strings', async () => { + const mockResponse = { + model: 'llava', + created_at: new Date(), + message: { content: 'YES', role: 'assistant' }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }; + mockChat.mockResolvedValue(mockResponse); + + // Use a longer base64 string + const longBase64 = + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=='; + const result = await service.generate({ + model: 'llava', + messages: [ + { + role: 'user', + content: 'Test prompt', + images: [longBase64], // base64 string - passed through as-is + }, + ], + }); + + expect(mockChat).toHaveBeenCalledWith({ + model: 'llava', + messages: [ + { + role: 'user', + content: 'Test prompt', + images: [longBase64], + }, + ], + stream: false, + format: undefined, + options: undefined, + }); + expect(result.message.content).toBe('YES'); + expect(result.done).toBe(true); + }); + + it('should throw error when SDK call fails', async () => { + mockChat.mockRejectedValue(new Error('Connection refused')); + + await expect( + service.generate({ + model: 'llava', + messages: [{ role: 'user', content: 'Test' }], + }) + ).rejects.toThrow('Connection refused'); + }); + + it('should throw error when OLLAMA_BASE_URL is not configured', async () => { + const mockConfigService = { + getOrThrow: jest.fn().mockImplementation(() => { + throw new Error('Configuration key "OLLAMA_BASE_URL" does not exist'); + }), + } as any; + const newService = new OllamaService(mockConfigService); + + await expect( + newService.generate({ + model: 'llava', + messages: [{ role: 'user', content: 'Test' }], + }) + ).rejects.toThrow('OLLAMA_BASE_URL'); + }); + }); + + describe('listModels', () => { + it('should return list of models', async () => { + const mockDate = new Date('2024-01-01'); + const mockResponse = { + models: [ + { + name: 'llava:7b', + model: 'llava:7b', + size: 1000, + digest: 'abc123', + modified_at: mockDate, + expires_at: mockDate, + size_vram: 500, + details: { + parent_model: '', + format: 'gguf', + family: 'llama', + families: ['llama'], + parameter_size: '7B', + quantization_level: 'Q4_0', + }, + }, + { + name: 'moondream', + model: 'moondream', + size: 2000, + digest: 'def456', + modified_at: mockDate, + expires_at: mockDate, + size_vram: 1000, + details: { + parent_model: '', + format: 'gguf', + family: 'moondream', + families: ['moondream'], + parameter_size: '1.6B', + quantization_level: 'Q4_0', + }, + }, + ], + }; + mockList.mockResolvedValue(mockResponse); + + const result = await service.listModels(); + + expect(mockList).toHaveBeenCalled(); + expect(result).toEqual(mockResponse.models); + }); + + it('should throw error when API fails', async () => { + mockList.mockRejectedValue(new Error('Service Unavailable')); + + await expect(service.listModels()).rejects.toThrow('Service Unavailable'); + }); + }); +}); diff --git a/src/compare/libs/vlm/ollama.service.ts b/src/compare/libs/vlm/ollama.service.ts new file mode 100644 index 00000000..7fad0106 --- /dev/null +++ b/src/compare/libs/vlm/ollama.service.ts @@ -0,0 +1,46 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Ollama, ChatRequest, ChatResponse, ListResponse, ModelResponse } from 'ollama'; + +@Injectable() +export class OllamaService { + private readonly logger: Logger = new Logger(OllamaService.name); + private ollamaClient: Ollama | null = null; + + constructor(private readonly configService: ConfigService) {} + + private getOllamaClient(): Ollama { + if (!this.ollamaClient) { + const baseUrl = this.configService.getOrThrow('OLLAMA_BASE_URL'); + this.ollamaClient = new Ollama({ host: baseUrl }); + } + return this.ollamaClient; + } + + async generate(request: ChatRequest): Promise { + const client = this.getOllamaClient(); + + try { + const response = await client.chat({ + ...request, + stream: false, + }); + + return response; + } catch (error) { + this.logger.error(`Ollama generate request failed: ${error.message}`); + throw error; + } + } + + async listModels(): Promise { + const client = this.getOllamaClient(); + try { + const response: ListResponse = await client.list(); + return response.models; + } catch (error) { + this.logger.error(`Failed to list models: ${error.message}`); + throw error; + } + } +} diff --git a/src/compare/libs/vlm/ollama.types.ts b/src/compare/libs/vlm/ollama.types.ts new file mode 100644 index 00000000..f4b61053 --- /dev/null +++ b/src/compare/libs/vlm/ollama.types.ts @@ -0,0 +1,4 @@ +export interface VlmComparisonResult { + identical: boolean; + description: string; +} diff --git a/src/compare/libs/vlm/vlm.service.spec.ts b/src/compare/libs/vlm/vlm.service.spec.ts new file mode 100644 index 00000000..cdf3fb9e --- /dev/null +++ b/src/compare/libs/vlm/vlm.service.spec.ts @@ -0,0 +1,389 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { TestStatus } from '@prisma/client'; +import { PNG } from 'pngjs'; +import { z } from 'zod'; +import { StaticService } from '../../../static/static.service'; +import { NO_BASELINE_RESULT, EQUAL_RESULT } from '../consts'; +import { DEFAULT_CONFIG, VlmService } from './vlm.service'; +import { OllamaService } from './ollama.service'; +import { PixelmatchService } from '../pixelmatch/pixelmatch.service'; +import { DiffResult } from '../../../test-runs/diffResult'; + +const initService = async ({ + getImageMock = jest.fn(), + ollamaGenerateMock = jest.fn(), + pixelmatchGetDiffMock = jest.fn(), +}) => { + const module: TestingModule = await Test.createTestingModule({ + providers: [ + VlmService, + { + provide: StaticService, + useValue: { + getImage: getImageMock, + }, + }, + { + provide: OllamaService, + useValue: { + generate: ollamaGenerateMock, + }, + }, + { + provide: PixelmatchService, + useValue: { + getDiff: pixelmatchGetDiffMock, + }, + }, + ], + }).compile(); + + return module.get(VlmService); +}; + +describe('VlmService', () => { + const image = new PNG({ width: 20, height: 20 }); + const diffImage = new PNG({ width: 20, height: 20 }); + + it('should return NO_BASELINE_RESULT when pixelmatch returns no baseline', async () => { + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(NO_BASELINE_RESULT); + const service = await initService({ pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: null, image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: true }, + DEFAULT_CONFIG + ); + + expect(result).toStrictEqual(NO_BASELINE_RESULT); + expect(pixelmatchGetDiffMock).toHaveBeenCalled(); + }); + + it('should return OK immediately when pixelmatch finds no differences (VLM not called)', async () => { + const pixelmatchResult: DiffResult = { + ...EQUAL_RESULT, + status: TestStatus.ok, + pixelMisMatchCount: 0, + diffPercent: 0, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const ollamaGenerateMock = jest.fn(); + const service = await initService({ pixelmatchGetDiffMock, ollamaGenerateMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + DEFAULT_CONFIG + ); + + expect(result.status).toBe(TestStatus.ok); + expect(result.pixelMisMatchCount).toBe(0); + expect(result.diffPercent).toBe(0); + expect(ollamaGenerateMock).not.toHaveBeenCalled(); // VLM should not be called + }); + + it('should override to OK when pixelmatch finds differences but VLM says not noticeable', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: 'diff.png', + pixelMisMatchCount: 100, + diffPercent: 2.5, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest + .fn() + .mockReturnValueOnce(image) // baseline + .mockReturnValueOnce(image) // comparison + .mockReturnValueOnce(diffImage); // diff + const ollamaGenerateMock = jest.fn().mockResolvedValue({ + model: 'llava:7b', + created_at: new Date(), + message: { + content: + '{"identical": true, "description": "Differences are minor rendering artifacts, not noticeable to humans."}', + role: 'assistant', + }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + DEFAULT_CONFIG + ); + + expect(result.status).toBe(TestStatus.ok); // Overridden by VLM + expect(result.vlmDescription).toBe('Differences are minor rendering artifacts, not noticeable to humans.'); + expect(result.pixelMisMatchCount).toBe(100); // Preserved from pixelmatch + expect(result.diffPercent).toBe(2.5); // Preserved from pixelmatch + expect(result.diffName).toBe('diff.png'); // Preserved from pixelmatch + expect(ollamaGenerateMock).toHaveBeenCalledWith( + expect.objectContaining({ + messages: [ + expect.objectContaining({ + images: expect.arrayContaining([expect.any(Uint8Array), expect.any(Uint8Array), expect.any(Uint8Array)]), + }), + ], + }) + ); + }); + + it('should keep unresolved when pixelmatch finds differences and VLM confirms noticeable', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: 'diff.png', + pixelMisMatchCount: 500, + diffPercent: 12.5, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest + .fn() + .mockReturnValueOnce(image) // baseline + .mockReturnValueOnce(image) // comparison + .mockReturnValueOnce(diffImage); // diff + const ollamaGenerateMock = jest.fn().mockResolvedValue({ + model: 'llava:7b', + created_at: new Date(), + message: { + content: + '{"identical": false, "description": "Button text changed from Submit to Send, and user count changed from 12 to 15."}', + role: 'assistant', + }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: true }, + DEFAULT_CONFIG + ); + + expect(result.status).toBe(TestStatus.unresolved); // Kept as unresolved + expect(result.vlmDescription).toBe( + 'Button text changed from Submit to Send, and user count changed from 12 to 15.' + ); + expect(result.pixelMisMatchCount).toBe(500); // Preserved from pixelmatch + expect(result.diffPercent).toBe(12.5); // Preserved from pixelmatch + expect(result.diffName).toBe('diff.png'); // Preserved from pixelmatch + }); + + it('should handle invalid JSON response as error and return pixelmatch result', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: 'diff.png', + pixelMisMatchCount: 200, + diffPercent: 5.0, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest + .fn() + .mockReturnValueOnce(image) // baseline + .mockReturnValueOnce(image) // comparison + .mockReturnValueOnce(diffImage); // diff + const ollamaGenerateMock = jest.fn().mockResolvedValue({ + model: 'llava:7b', + created_at: new Date(), + message: { content: 'Invalid JSON response from model', role: 'assistant' }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + DEFAULT_CONFIG + ); + + expect(result.status).toBe(TestStatus.unresolved); // From pixelmatch + expect(result.vlmDescription).toContain('VLM analysis failed'); + expect(result.pixelMisMatchCount).toBe(200); // Preserved from pixelmatch + expect(result.diffPercent).toBe(5.0); // Preserved from pixelmatch + }); + + it('should use custom model and temperature from config', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: 'diff.png', + pixelMisMatchCount: 150, + diffPercent: 3.75, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest + .fn() + .mockReturnValueOnce(image) // baseline + .mockReturnValueOnce(image) // comparison + .mockReturnValueOnce(diffImage); // diff + const ollamaGenerateMock = jest.fn().mockResolvedValue({ + model: 'llava:13b', + created_at: new Date(), + message: { content: '{"identical": true, "description": "No differences."}', role: 'assistant' }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + { model: 'llava:13b', prompt: 'Custom context', temperature: 0.2 } + ); + + const VlmComparisonResultSchema = z.object({ + identical: z.boolean(), + description: z.string(), + }); + const expectedJsonSchema = z.toJSONSchema(VlmComparisonResultSchema); + + expect(ollamaGenerateMock).toHaveBeenCalledWith({ + model: 'llava:13b', + messages: [ + { + role: 'user', + content: 'Custom context', + images: expect.arrayContaining([expect.any(Uint8Array), expect.any(Uint8Array), expect.any(Uint8Array)]), + }, + ], + format: expectedJsonSchema, + options: { temperature: 0.2 }, + }); + }); + + it('should handle API errors gracefully and return pixelmatch result', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: 'diff.png', + pixelMisMatchCount: 300, + diffPercent: 7.5, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest + .fn() + .mockReturnValueOnce(image) // baseline + .mockReturnValueOnce(image) // comparison + .mockReturnValueOnce(diffImage); // diff + const ollamaGenerateMock = jest.fn().mockRejectedValue(new Error('Connection refused')); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + DEFAULT_CONFIG + ); + + expect(result.status).toBe(TestStatus.unresolved); // From pixelmatch + expect(result.vlmDescription).toContain('VLM analysis failed'); + expect(result.pixelMisMatchCount).toBe(300); // Preserved from pixelmatch + expect(result.diffPercent).toBe(7.5); // Preserved from pixelmatch + expect(result.diffName).toBe('diff.png'); // Preserved from pixelmatch + }); + + it('should use thinking field when useThinking is true', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: 'diff.png', + pixelMisMatchCount: 80, + diffPercent: 2.0, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest + .fn() + .mockReturnValueOnce(image) // baseline + .mockReturnValueOnce(image) // comparison + .mockReturnValueOnce(diffImage); // diff + const ollamaGenerateMock = jest.fn().mockResolvedValue({ + model: 'llava:7b', + created_at: new Date(), + message: { + content: '{"identical": false, "description": "Content field"}', + thinking: '{"identical": true, "description": "Thinking field"}', + role: 'assistant', + }, + done: true, + done_reason: 'stop', + total_duration: 1000, + load_duration: 100, + prompt_eval_count: 10, + prompt_eval_duration: 200, + eval_count: 5, + eval_duration: 300, + }); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + { ...DEFAULT_CONFIG, useThinking: true } + ); + + expect(result.status).toBe(TestStatus.ok); // Overridden by VLM + expect(result.vlmDescription).toBe('Thinking field'); + }); + + it('should handle missing diff image gracefully', async () => { + const pixelmatchResult: DiffResult = { + status: TestStatus.unresolved, + diffName: null, // No diff saved + pixelMisMatchCount: 100, + diffPercent: 2.5, + isSameDimension: true, + }; + const pixelmatchGetDiffMock = jest.fn().mockResolvedValue(pixelmatchResult); + const getImageMock = jest.fn().mockReturnValueOnce(image).mockReturnValueOnce(image).mockReturnValueOnce(null); // diff image missing + const ollamaGenerateMock = jest.fn(); + const service = await initService({ getImageMock, ollamaGenerateMock, pixelmatchGetDiffMock }); + + const result = await service.getDiff( + { baseline: 'baseline', image: 'image', diffTollerancePercent: 0.1, ignoreAreas: [], saveDiffAsFile: false }, + DEFAULT_CONFIG + ); + + expect(result).toEqual(pixelmatchResult); // Should return pixelmatch result as-is + expect(ollamaGenerateMock).not.toHaveBeenCalled(); // VLM should not be called + }); + + it.each([ + ['empty string', '', DEFAULT_CONFIG], + ['invalid JSON', 'invalid', DEFAULT_CONFIG], + ['partial config', '{"model":"llava:7b"}', { model: 'llava:7b' }], + [ + 'full config', + '{"model":"llava:13b","prompt":"Custom prompt","temperature":0.2,"useThinking":true}', + { + model: 'llava:13b', + prompt: 'Custom prompt', + temperature: 0.2, + useThinking: true, + }, + ], + ])('should parse config: %s', async (_, configJson, expected) => { + const service = await initService({}); + expect(service.parseConfig(configJson)).toEqual(expected); + }); +}); diff --git a/src/compare/libs/vlm/vlm.service.ts b/src/compare/libs/vlm/vlm.service.ts new file mode 100644 index 00000000..0d93257b --- /dev/null +++ b/src/compare/libs/vlm/vlm.service.ts @@ -0,0 +1,153 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { TestStatus } from '@prisma/client'; +import { StaticService } from '../../../static/static.service'; +import { DiffResult } from '../../../test-runs/diffResult'; +import { parseConfig } from '../../utils'; +import { ImageComparator } from '../image-comparator.interface'; +import { ImageCompareInput } from '../ImageCompareInput'; +import { VlmConfig } from './vlm.types'; +import { OllamaService } from './ollama.service'; +import { PixelmatchService, DEFAULT_CONFIG as PIXELMATCH_DEFAULT_CONFIG } from '../pixelmatch/pixelmatch.service'; +import { PNG } from 'pngjs'; +import { z } from 'zod'; + +export const DEFAULT_PROMPT = `You are provided with three images: +1. First image: baseline screenshot +2. Second image: new version screenshot +3. Diff image + +Spot any difference in text, color, shape and position of elements treat as different event slight change +Ignore minor rendering artifacts that are imperceptible to users like antialliasing +Describe the difference like 100 words`; + +const VlmComparisonResultSchema: z.ZodObject<{ + identical: z.ZodBoolean; + description: z.ZodString; +}> = z.object({ + identical: z.boolean(), + description: z.string(), +}); + +export const DEFAULT_CONFIG: VlmConfig = { + model: 'gemma3:12b', + prompt: DEFAULT_PROMPT, + temperature: 0.1, + useThinking: false, +}; + +@Injectable() +export class VlmService implements ImageComparator { + private readonly logger: Logger = new Logger(VlmService.name); + + constructor( + private readonly staticService: StaticService, + private readonly ollamaService: OllamaService, + private readonly pixelmatchService: PixelmatchService + ) {} + + parseConfig(configJson: string): VlmConfig { + return parseConfig(configJson, DEFAULT_CONFIG, this.logger); + } + + async getDiff(data: ImageCompareInput, config: VlmConfig): Promise { + const pixelmatchResult = await this.pixelmatchService.getDiff( + { + ...data, + saveDiffAsFile: true, + }, + PIXELMATCH_DEFAULT_CONFIG + ); + + if (pixelmatchResult.status === TestStatus.new) { + return pixelmatchResult; + } + + if (pixelmatchResult.status === TestStatus.ok) { + return pixelmatchResult; + } + + this.logger.debug('Pixel diff is being sent to VLM'); + try { + const baseline = await this.staticService.getImage(data.baseline); + const image = await this.staticService.getImage(data.image); + const diffImage = pixelmatchResult.diffName ? await this.staticService.getImage(pixelmatchResult.diffName) : null; + + if (!baseline || !image || !diffImage) { + this.logger.warn('Missing images for VLM analysis, returning pixelmatch result'); + return pixelmatchResult; + } + + const baselineBytes = new Uint8Array(PNG.sync.write(baseline)); + const imageBytes = new Uint8Array(PNG.sync.write(image)); + const diffBytes = new Uint8Array(PNG.sync.write(diffImage)); + + const { pass, description } = await this.compareImagesWithVLM(baselineBytes, imageBytes, diffBytes, config); + + // Build result from pixelmatch, but override status based on VLM analysis + const result: DiffResult = { + ...pixelmatchResult, + vlmDescription: description, + }; + + if (pass) { + result.status = TestStatus.ok; + } else { + result.status = TestStatus.unresolved; + } + + return result; + } catch (error) { + this.logger.error(`VLM comparison failed: ${error.message}`, error.stack); + return { + ...pixelmatchResult, + vlmDescription: `VLM analysis failed: ${error.message}`, + }; + } + } + + private async compareImagesWithVLM( + baselineBytes: Uint8Array, + imageBytes: Uint8Array, + diffBytes: Uint8Array, + config: VlmConfig + ): Promise<{ pass: boolean; description: string }> { + const data = await this.ollamaService.generate({ + model: config.model, + messages: [ + { + role: 'user', + content: config.prompt, + images: [baselineBytes, imageBytes, diffBytes], + }, + ], + format: z.toJSONSchema(VlmComparisonResultSchema), + options: { + temperature: config.temperature, + }, + }); + + // Some models return result in thinking field instead of response + const preferred = config.useThinking ? data.message.thinking : data.message.content; + const fallback = config.useThinking ? data.message.content : data.message.thinking; + const content = preferred || fallback; + + this.logger.debug(`${JSON.stringify(data)}`); + this.logger.debug(`VLM Response: ${content}`); + + if (!content) { + throw new Error('Empty response from model'); + } + + return this.parseVlmResponse(content); + } + + private parseVlmResponse(response: string): { pass: boolean; description: string } { + const parsed = JSON.parse(response); + const validated = VlmComparisonResultSchema.parse(parsed); + + return { + pass: validated.identical, + description: validated.description || 'No description provided', + }; + } +} diff --git a/src/compare/libs/vlm/vlm.types.ts b/src/compare/libs/vlm/vlm.types.ts new file mode 100644 index 00000000..b53992ff --- /dev/null +++ b/src/compare/libs/vlm/vlm.types.ts @@ -0,0 +1,26 @@ +export interface VlmConfig { + /** + * Ollama vision model to use for image comparison. + * @default "gemma3:12b" + */ + model: string; + + /** + * Custom prompt for image comparison. + */ + prompt: string; + + /** + * Temperature parameter controlling response randomness (0.0-1.0). + * Lower values = more consistent results. + * @default 0.1 + */ + temperature: number; + + /** + * Whether to prefer thinking field over content field for response. + * Some models return result in thinking field instead of response. + * @default false + */ + useThinking?: boolean; +} diff --git a/src/compare/utils/index.ts b/src/compare/utils/index.ts index 79e593b1..ad0c3666 100644 --- a/src/compare/utils/index.ts +++ b/src/compare/utils/index.ts @@ -2,6 +2,11 @@ import { Logger } from '@nestjs/common'; import { PNG } from 'pngjs'; import { IgnoreAreaDto } from 'src/test-runs/dto/ignore-area.dto'; +export function pngToBase64(png: PNG): string { + const buffer = PNG.sync.write(png); + return buffer.toString('base64'); +} + export function scaleImageToSize(image: PNG, width: number, height: number): PNG { if (width > image.width || height > image.height) { const preparedImage = new PNG({ width, height, fill: true }); diff --git a/src/test-runs/diffResult.ts b/src/test-runs/diffResult.ts index 66b7462c..b4219595 100644 --- a/src/test-runs/diffResult.ts +++ b/src/test-runs/diffResult.ts @@ -6,4 +6,10 @@ export interface DiffResult { pixelMisMatchCount: number; diffPercent: number; isSameDimension: boolean; + /** + * Optional array of analysis insights (e.g., from VLM or other AI services) + * Each string represents a distinct observation or difference + * Can be displayed as bullet points in UI + */ + vlmDescription?: string; } diff --git a/src/test-runs/dto/testRun.dto.spec.ts b/src/test-runs/dto/testRun.dto.spec.ts new file mode 100644 index 00000000..738bac04 --- /dev/null +++ b/src/test-runs/dto/testRun.dto.spec.ts @@ -0,0 +1,38 @@ +import { TestRun } from '@prisma/client'; +import { generateTestRun } from '../../_data_'; +import { TestRunDto } from './testRun.dto'; + +describe('TestRunDto', () => { + it('should map all fields correctly including vlmDescription', () => { + const testRun: TestRun = generateTestRun({ + vlmDescription: 'VLM analysis result', + }); + + const result = new TestRunDto(testRun); + + expect(result).toMatchObject({ + id: testRun.id, + buildId: testRun.buildId, + imageName: testRun.imageName, + diffName: testRun.diffName, + diffPercent: testRun.diffPercent, + diffTollerancePercent: testRun.diffTollerancePercent, + status: testRun.status, + testVariationId: testRun.testVariationId, + name: testRun.name, + baselineName: testRun.baselineName, + os: testRun.os, + browser: testRun.browser, + viewport: testRun.viewport, + device: testRun.device, + customTags: testRun.customTags, + ignoreAreas: testRun.ignoreAreas, + tempIgnoreAreas: testRun.tempIgnoreAreas, + comment: testRun.comment, + branchName: testRun.branchName, + baselineBranchName: testRun.baselineBranchName, + merge: testRun.merge, + vlmDescription: testRun.vlmDescription, + }); + }); +}); diff --git a/src/test-runs/dto/testRun.dto.ts b/src/test-runs/dto/testRun.dto.ts index 748d9e22..4aa53168 100644 --- a/src/test-runs/dto/testRun.dto.ts +++ b/src/test-runs/dto/testRun.dto.ts @@ -44,6 +44,8 @@ export class TestRunDto { baselineBranchName: string; @ApiProperty() merge: boolean; + @ApiPropertyOptional() + vlmDescription?: string; constructor(testRun: TestRun) { this.id = testRun.id; @@ -67,5 +69,6 @@ export class TestRunDto { this.branchName = testRun.branchName; this.baselineBranchName = testRun.baselineBranchName; this.merge = testRun.merge; + this.vlmDescription = testRun.vlmDescription; } } diff --git a/src/test-runs/dto/testRunResult.dto.ts b/src/test-runs/dto/testRunResult.dto.ts index a4364424..bde5cd04 100644 --- a/src/test-runs/dto/testRunResult.dto.ts +++ b/src/test-runs/dto/testRunResult.dto.ts @@ -5,6 +5,8 @@ import { TestRunDto } from './testRun.dto'; export class TestRunResultDto extends TestRunDto { @ApiPropertyOptional() pixelMisMatchCount?: number; + @ApiPropertyOptional() + vlmDescription?: string; @ApiProperty() url: string; @ApiProperty() @@ -14,6 +16,7 @@ export class TestRunResultDto extends TestRunDto { super(testRun); this.baselineName = testVariation.baselineName; this.pixelMisMatchCount = testRun.pixelMisMatchCount; + this.vlmDescription = testRun.vlmDescription; this.url = `${process.env.APP_FRONTEND_URL}/${testVariation.projectId}?buildId=${testRun.buildId}&testId=${testRun.id}`; } } diff --git a/src/test-runs/test-runs.service.spec.ts b/src/test-runs/test-runs.service.spec.ts index ae3c8991..2f2c9d9f 100644 --- a/src/test-runs/test-runs.service.spec.ts +++ b/src/test-runs/test-runs.service.spec.ts @@ -345,6 +345,7 @@ describe('TestRunsService', () => { diffName: null, pixelMisMatchCount: null, diffPercent: null, + vlmDescription: null, }, }); expect(eventTestRunUpdatedMock).toHaveBeenCalledWith(testRun); @@ -357,6 +358,7 @@ describe('TestRunsService', () => { pixelMisMatchCount: 11, diffPercent: 22, isSameDimension: true, + vlmDescription: 'VLM detected significant color differences in the header section', }; const id = 'some id'; const testRunUpdateMock = jest.fn().mockResolvedValueOnce(testRun); @@ -375,6 +377,7 @@ describe('TestRunsService', () => { diffName: diff.diffName, pixelMisMatchCount: diff.pixelMisMatchCount, diffPercent: diff.diffPercent, + vlmDescription: diff.vlmDescription, }, }); expect(eventTestRunUpdatedMock).toHaveBeenCalledWith(testRun); @@ -383,7 +386,9 @@ describe('TestRunsService', () => { it('findMany', async () => { const buildId = 'some id'; - const testRun: TestRun = generateTestRun(); + const testRun: TestRun = generateTestRun({ + vlmDescription: 'VLM analysis completed', + }); const testRunFindManyMock = jest.fn().mockResolvedValueOnce([testRun]); service = await initService({ testRunFindManyMock, diff --git a/src/test-runs/test-runs.service.ts b/src/test-runs/test-runs.service.ts index 40e7a254..674893c8 100644 --- a/src/test-runs/test-runs.service.ts +++ b/src/test-runs/test-runs.service.ts @@ -173,6 +173,7 @@ export class TestRunsService { pixelMisMatchCount: diffResult && diffResult.pixelMisMatchCount, diffPercent: diffResult && diffResult.diffPercent, status: diffResult ? diffResult.status : TestStatus.new, + vlmDescription: diffResult && diffResult?.vlmDescription, }, }) .then((testRun) => {