From 9e2cfaa6d3a3032b0540d171728ad3410dc41aa7 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Sun, 7 Dec 2025 22:18:20 +0100 Subject: [PATCH 1/6] feat(ollama): add ollama types and meta --- .../ai-ollama/src/meta/model-meta-athene.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-aya.ts | 73 +++++ .../src/meta/model-meta-codegemma.ts | 77 +++++ .../src/meta/model-meta-codellama.ts | 105 +++++++ .../src/meta/model-meta-command-r-plus.ts | 63 ++++ .../src/meta/model-meta-command-r.ts | 63 ++++ .../src/meta/model-meta-command-r7b.ts | 63 ++++ .../src/meta/model-meta-deepseek-coder-v2.ts | 77 +++++ .../src/meta/model-meta-deepseek-r1.ts | 133 +++++++++ .../src/meta/model-meta-deepseek-v3.1.ts | 78 +++++ .../ai-ollama/src/meta/model-meta-devstral.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-dolphin3.ts | 60 ++++ .../src/meta/model-meta-exaone3.5.ts | 91 ++++++ .../ai-ollama/src/meta/model-meta-falcon2.ts | 60 ++++ .../ai-ollama/src/meta/model-meta-falcon3.ts | 105 +++++++ .../src/meta/model-meta-firefunction-v2.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-gemma.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-gemma2.ts | 91 ++++++ .../ai-ollama/src/meta/model-meta-gemma3.ts | 119 ++++++++ .../src/meta/model-meta-granite3-dense.ts | 77 +++++ .../src/meta/model-meta-granite3-guardian.ts | 77 +++++ .../src/meta/model-meta-granite3-moe.ts | 77 +++++ .../src/meta/model-meta-granite3.1-dense.ts | 77 +++++ .../src/meta/model-meta-granite3.1-moe.ts | 77 +++++ .../src/meta/model-meta-llama-guard3.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-llama2.ts | 91 ++++++ .../src/meta/model-meta-llama3-chatqa.ts | 77 +++++ .../src/meta/model-meta-llama3-gradient.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-llama3.1.ts | 91 ++++++ .../src/meta/model-meta-llama3.2-vision.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-llama3.2.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-llama3.3.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-llama3.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-llama4.ts | 77 +++++ .../src/meta/model-meta-llava-llama3.ts | 63 ++++ .../src/meta/model-meta-llava-phi3.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-llava.ts | 91 ++++++ .../ai-ollama/src/meta/model-meta-marco-o1.ts | 60 ++++ .../src/meta/model-meta-mistral-large.ts | 63 ++++ .../src/meta/model-meta-mistral-nemo.ts | 63 ++++ .../src/meta/model-meta-mistral-small.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-mistral.ts | 60 ++++ .../ai-ollama/src/meta/model-meta-mixtral.ts | 77 +++++ .../src/meta/model-meta-moondream.ts | 63 ++++ .../src/meta/model-meta-nemotron-mini.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-nemotron.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-olmo2.ts | 77 +++++ .../src/meta/model-meta-opencoder.ts | 77 +++++ .../src/meta/model-meta-openhermes.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-phi3.ts | 77 +++++ .../ai-ollama/src/meta/model-meta-phi4.ts | 60 ++++ .../ai-ollama/src/meta/model-meta-qwen.ts | 161 +++++++++++ .../src/meta/model-meta-qwen2.5-coder.ts | 132 +++++++++ .../ai-ollama/src/meta/model-meta-qwen2.5.ts | 133 +++++++++ .../ai-ollama/src/meta/model-meta-qwen2.ts | 105 +++++++ .../ai-ollama/src/meta/model-meta-qwen3.ts | 161 +++++++++++ .../ai-ollama/src/meta/model-meta-qwq.ts | 60 ++++ .../ai-ollama/src/meta/model-meta-sailor2.ts | 90 ++++++ .../src/meta/model-meta-shieldgemma.ts | 91 ++++++ .../src/meta/model-meta-smalltinker.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-smollm.ts | 91 ++++++ .../src/meta/model-meta-tinyllama.ts | 63 ++++ .../ai-ollama/src/meta/model-meta-tulu3.ts | 77 +++++ .../typescript/ai-ollama/src/model-meta.ts | 273 ++++++++++++++++++ .../ai-ollama/src/ollama-adapter.ts | 56 +--- packages/typescript/ai-ollama/tsconfig.json | 2 +- 66 files changed, 5372 insertions(+), 53 deletions(-) create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-athene.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-aya.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llava.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts create mode 100644 packages/typescript/ai-ollama/src/model-meta.ts diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts new file mode 100644 index 00000000..9442873c --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const ATHENE_V2_LATEST = { + name: 'athene-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies ModelMeta + +const ATHENE_V2_72b = { + name: 'athene-v2:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies ModelMeta + +export const ATHENE_MODELS = [ + ATHENE_V2_LATEST.name, + ATHENE_V2_72b.name, +] as const + +const ATHENE_IMAGE_MODELS = [] as const + +export const ATHENE_EMBEDDING_MODELS = [] as const + +const ATHENE_AUDIO_MODELS = [] as const + +const ATHENE_VIDEO_MODELS = [] as const + +// export type AtheneChatModels = (typeof ATHENE_MODELS)[number] + +// Manual type map for per-model provider options +export type AtheneChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [ATHENE_V2_LATEST.name]: ChatRequest + [ATHENE_V2_72b.name]: ChatRequest +} + +export type AtheneModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input + [ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts new file mode 100644 index 00000000..9b58bdd9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -0,0 +1,73 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const AYA_LATEST = { + name: 'aya:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 8_000, +} as const satisfies ModelMeta + +const AYA_8b = { + name: 'aya:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 8_000, +} as const satisfies ModelMeta + +const AYA_35b = { + name: 'aya:35b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '20gb', + context: 8_000, +} as const satisfies ModelMeta + +export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const + +const AYA_IMAGE_MODELS = [] as const + +export const AYA_EMBEDDING_MODELS = [] as const + +const AYA_AUDIO_MODELS = [] as const + +const AYA_VIDEO_MODELS = [] as const + +// export type AyaChatModels = (typeof AYA_MODELS)[number] + +// Manual type map for per-model provider options +export type AyaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [AYA_LATEST.name]: ChatRequest + [AYA_8b.name]: ChatRequest + [AYA_35b.name]: ChatRequest +} + +export type AyaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [AYA_LATEST.name]: typeof AYA_LATEST.supports.input + [AYA_8b.name]: typeof AYA_8b.supports.input + [AYA_35b.name]: typeof AYA_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts new file mode 100644 index 00000000..b75d5b88 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const CODEGEMMA_LATEST = { + name: 'codegemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies ModelMeta + +const CODEGEMMA_8b = { + name: 'codegemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.65gb', + context: 8_000, +} as const satisfies ModelMeta + +const CODEGEMMA_35b = { + name: 'codegemma:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies ModelMeta + +export const CODEGEMMA_MODELS = [ + CODEGEMMA_LATEST.name, + CODEGEMMA_8b.name, + CODEGEMMA_35b.name, +] as const + +const CODEGEMMA_IMAGE_MODELS = [] as const + +export const CODEGEMMA_EMBEDDING_MODELS = [] as const + +const CODEGEMMA_AUDIO_MODELS = [] as const + +const CODEGEMMA_VIDEO_MODELS = [] as const + +// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type CodegemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [CODEGEMMA_LATEST.name]: ChatRequest + [CODEGEMMA_8b.name]: ChatRequest + [CODEGEMMA_35b.name]: ChatRequest +} + +export type CodegemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input + [CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input + [CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts new file mode 100644 index 00000000..22badae9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -0,0 +1,105 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const CODELLAMA_LATEST = { + name: 'codellama:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 16_000, +} as const satisfies ModelMeta + +const CODELLAMA_7b = { + name: 'codellama:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 16_000, +} as const satisfies ModelMeta + +const CODELLAMA_13b = { + name: 'codellama:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.4gb', + context: 16_000, +} as const satisfies ModelMeta + +const CODELLAMA_34b = { + name: 'codellama:34b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '19gb', + context: 16_000, +} as const satisfies ModelMeta + +const CODELLAMA_70b = { + name: 'codellama:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '39gb', + context: 2_000, +} as const satisfies ModelMeta + +export const CODELLAMA_MODELS = [ + CODELLAMA_LATEST.name, + CODELLAMA_7b.name, + CODELLAMA_13b.name, + CODELLAMA_34b.name, + CODELLAMA_70b.name, +] as const + +const CODELLAMA_IMAGE_MODELS = [] as const + +export const CODELLAMA_EMBEDDING_MODELS = [] as const + +const CODELLAMA_AUDIO_MODELS = [] as const + +const CODELLAMA_VIDEO_MODELS = [] as const + +// export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number] + +// Manual type map for per-model provider options +export type CodellamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [CODELLAMA_LATEST.name]: ChatRequest + [CODELLAMA_7b.name]: ChatRequest + [CODELLAMA_13b.name]: ChatRequest + [CODELLAMA_34b.name]: ChatRequest + [CODELLAMA_70b.name]: ChatRequest +} + +export type CodellamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [CODELLAMA_LATEST.name]: typeof CODELLAMA_LATEST.supports.input + [CODELLAMA_7b.name]: typeof CODELLAMA_7b.supports.input + [CODELLAMA_13b.name]: typeof CODELLAMA_13b.supports.input + [CODELLAMA_34b.name]: typeof CODELLAMA_34b.supports.input + [CODELLAMA_70b.name]: typeof CODELLAMA_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts new file mode 100644 index 00000000..364e7f99 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const COMMAND_R_PLUS_LATEST = { + name: 'command-r-plus:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '59gb', + context: 128_000, +} as const satisfies ModelMeta + +const COMMAND_R_PLUS_104b = { + name: 'command-r-plus:104b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '59gb', + context: 128_000, +} as const satisfies ModelMeta + +export const COMMAND_R_PLUS_MODELS = [ + COMMAND_R_PLUS_LATEST.name, + COMMAND_R_PLUS_104b.name, +] as const + +const COMMAND_R_PLUS_IMAGE_MODELS = [] as const + +export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const + +const COMMAND_R_PLUS_AUDIO_MODELS = [] as const + +const COMMAND_R_PLUS_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandRPlusChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_PLUS_LATEST.name]: ChatRequest + [COMMAND_R_PLUS_104b.name]: ChatRequest +} + +export type CommandRPlusModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_PLUS_LATEST.name]: typeof COMMAND_R_PLUS_LATEST.supports.input + [COMMAND_R_PLUS_104b.name]: typeof COMMAND_R_PLUS_104b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts new file mode 100644 index 00000000..dbac57f7 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const COMMAND_R_LATEST = { + name: 'command-r:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '19gb', + context: 128_000, +} as const satisfies ModelMeta + +const COMMAND_R_35b = { + name: 'command-r:35b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '19gb', + context: 128_000, +} as const satisfies ModelMeta + +export const COMMAND_R_MODELS = [ + COMMAND_R_LATEST.name, + COMMAND_R_35b.name, +] as const + +const COMMAND_R_IMAGE_MODELS = [] as const + +export const COMMAND_R_EMBEDDING_MODELS = [] as const + +const COMMAND_R_AUDIO_MODELS = [] as const + +const COMMAND_R_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandRChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_LATEST.name]: ChatRequest + [COMMAND_R_35b.name]: ChatRequest +} + +export type CommandRModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_LATEST.name]: typeof COMMAND_R_LATEST.supports.input + [COMMAND_R_35b.name]: typeof COMMAND_R_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts new file mode 100644 index 00000000..848e5891 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const COMMAND_R_7b_LATEST = { + name: 'command-r7b:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5.1gb', + context: 8_000, +} as const satisfies ModelMeta + +const COMMAND_R_7b_7b = { + name: 'command-r7b:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5.1gb', + context: 8_000, +} as const satisfies ModelMeta + +export const COMMAND_R_7b_MODELS = [ + COMMAND_R_7b_LATEST.name, + COMMAND_R_7b_7b.name, +] as const + +const COMMAND_R_7b_IMAGE_MODELS = [] as const + +export const COMMAND_R_7b_EMBEDDING_MODELS = [] as const + +const COMMAND_R_7b_AUDIO_MODELS = [] as const + +const COMMAND_R_7b_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R7b_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandR7bChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_7b_LATEST.name]: ChatRequest + [COMMAND_R_7b_7b.name]: ChatRequest +} + +export type CommandR7bModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_7b_LATEST.name]: typeof COMMAND_R_7b_LATEST.supports.input + [COMMAND_R_7b_7b.name]: typeof COMMAND_R_7b_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts new file mode 100644 index 00000000..280391a4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const DEEPSEEK_CODER_V2_LATEST = { + name: 'deepseek-coder-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 160_900, +} as const satisfies ModelMeta + +const DEEPSEEK_CODER_V2_16b = { + name: 'deepseek-coder-v2:16b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.9gb', + context: 160_000, +} as const satisfies ModelMeta + +const DEEPSEEK_CODER_V2_236b = { + name: 'deepseek-coder-v2:236b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '133gb', + context: 4_000, +} as const satisfies ModelMeta + +export const DEEPSEEK_CODER_V2_MODELS = [ + DEEPSEEK_CODER_V2_LATEST.name, + DEEPSEEK_CODER_V2_16b.name, + DEEPSEEK_CODER_V2_236b.name, +] as const + +const DEEPSEEK_CODER_V2_IMAGE_MODELS = [] as const + +export const DEEPSEEK_CODER_V2_EMBEDDING_MODELS = [] as const + +const DEEPSEEK_CODER_V2_AUDIO_MODELS = [] as const + +const DEEPSEEK_CODER_V2_VIDEO_MODELS = [] as const + +// export type DeepseekCoderV2ChatModels = (typeof DEEPSEEK_CODER_V2_MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekCoderV2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_CODER_V2_LATEST.name]: ChatRequest + [DEEPSEEK_CODER_V2_16b.name]: ChatRequest + [DEEPSEEK_CODER_V2_236b.name]: ChatRequest +} + +export type DeepseekCoderV2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_CODER_V2_LATEST.name]: typeof DEEPSEEK_CODER_V2_LATEST.supports.input + [DEEPSEEK_CODER_V2_16b.name]: typeof DEEPSEEK_CODER_V2_16b.supports.input + [DEEPSEEK_CODER_V2_236b.name]: typeof DEEPSEEK_CODER_V2_236b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts new file mode 100644 index 00000000..00642c79 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -0,0 +1,133 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const DEEPSEEK_R1_LATEST = { + name: 'deepseek-r1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEEPSEEK_R1_1_5b = { + name: 'deepseek-r1:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '1.1gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEEPSEEK_R1_7b = { + name: 'deepseek-r1:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '4.7gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEEPSEEK_R1_8b = { + name: 'deepseek-r1:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEEPSEEK_R1_32b = { + name: 'deepseek-r1:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '20gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEEPSEEK_R1_70b = { + name: 'deepseek-r1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEEPSEEK_R1_671b = { + name: 'deepseek-r1:671b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 128_000, +} as const satisfies ModelMeta + +export const DEEPSEEK_R1_MODELS = [ + DEEPSEEK_R1_LATEST.name, + DEEPSEEK_R1_1_5b.name, + DEEPSEEK_R1_7b.name, + DEEPSEEK_R1_8b.name, + DEEPSEEK_R1_32b.name, + DEEPSEEK_R1_70b.name, + DEEPSEEK_R1_671b.name, +] as const + +const DEEPSEEK_R1_IMAGE_MODELS = [] as const + +export const DEEPSEEK_R1_EMBEDDING_MODELS = [] as const + +const DEEPSEEK_R1_AUDIO_MODELS = [] as const + +const DEEPSEEK_R1_VIDEO_MODELS = [] as const + +// export type DeepseekChatModels = (typeof DEEPSEEK_R1_MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekR1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_R1_LATEST.name]: ChatRequest + [DEEPSEEK_R1_1_5b.name]: ChatRequest + [DEEPSEEK_R1_7b.name]: ChatRequest + [DEEPSEEK_R1_8b.name]: ChatRequest + [DEEPSEEK_R1_32b.name]: ChatRequest + [DEEPSEEK_R1_70b.name]: ChatRequest + [DEEPSEEK_R1_671b.name]: ChatRequest +} + +export type DeepseekR1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_R1_LATEST.name]: typeof DEEPSEEK_R1_LATEST.supports.input + [DEEPSEEK_R1_1_5b.name]: typeof DEEPSEEK_R1_1_5b.supports.input + [DEEPSEEK_R1_7b.name]: typeof DEEPSEEK_R1_7b.supports.input + [DEEPSEEK_R1_8b.name]: typeof DEEPSEEK_R1_8b.supports.input + [DEEPSEEK_R1_32b.name]: typeof DEEPSEEK_R1_32b.supports.input + [DEEPSEEK_R1_70b.name]: typeof DEEPSEEK_R1_70b.supports.input + [DEEPSEEK_R1_671b.name]: typeof DEEPSEEK_R1_671b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts new file mode 100644 index 00000000..a1424384 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -0,0 +1,78 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const DEEPSEEK_V3_1_LATEST = { + name: 'deepseek-v3.1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 160_000, +} as const satisfies ModelMeta + +const DEEPSEEK_V3_1_671b = { + name: 'deepseek-v3.1:671', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + + size: '404gb', + context: 160_000, +} as const satisfies ModelMeta + +const DEEPSEEK_V3_1_671b_cloud = { + name: 'deepseek-v3.1:671-cloud', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 160_000, +} as const satisfies ModelMeta + +export const DEEPSEEK_V3_1_MODELS = [ + DEEPSEEK_V3_1_LATEST.name, + DEEPSEEK_V3_1_671b.name, + DEEPSEEK_V3_1_671b_cloud.name, +] as const + +const DEEPSEEK_V3_1_IMAGE_MODELS = [] as const + +export const DEEPSEEK_V3_1_EMBEDDING_MODELS = [] as const + +const DEEPSEEK_V3_1_AUDIO_MODELS = [] as const + +const DEEPSEEK_V3_1_VIDEO_MODELS = [] as const + +// export type DeepseekV3_1ChatModels = (typeof DEEPSEEK_V3_1__MODELS)[number] + +// Manual type map for per-model provider options +export type Deepseekv3_1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_V3_1_LATEST.name]: ChatRequest + [DEEPSEEK_V3_1_671b.name]: ChatRequest + [DEEPSEEK_V3_1_671b_cloud.name]: ChatRequest +} + +export type Deepseekv3_1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_V3_1_LATEST.name]: typeof DEEPSEEK_V3_1_LATEST.supports.input + [DEEPSEEK_V3_1_671b.name]: typeof DEEPSEEK_V3_1_671b.supports.input + [DEEPSEEK_V3_1_671b_cloud.name]: typeof DEEPSEEK_V3_1_671b_cloud.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts new file mode 100644 index 00000000..063c7dad --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const DEVSTRAL_LATEST = { + name: 'devstral:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 128_000, +} as const satisfies ModelMeta + +const DEVSTRAL_24b = { + name: 'devstral:24b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 128_000, +} as const satisfies ModelMeta + +export const DEVSTRAL_MODELS = [ + DEVSTRAL_LATEST.name, + DEVSTRAL_24b.name, +] as const + +const DEVSTRAL_IMAGE_MODELS = [] as const + +export const DEVSTRAL_EMBEDDING_MODELS = [] as const + +const DEVSTRAL_AUDIO_MODELS = [] as const + +const DEVSTRAL_VIDEO_MODELS = [] as const + +// export type DevstralChatModels = (typeof DEVSTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type DevstralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEVSTRAL_LATEST.name]: ChatRequest + [DEVSTRAL_24b.name]: ChatRequest +} + +export type DevstralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEVSTRAL_LATEST.name]: typeof DEVSTRAL_LATEST.supports.input + [DEVSTRAL_24b.name]: typeof DEVSTRAL_24b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts new file mode 100644 index 00000000..18be8d21 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -0,0 +1,60 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const DOLPHIN3_LATEST = { + name: 'dolphin3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies ModelMeta + +const DOLPHIN3_8b = { + name: 'dolphin3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies ModelMeta + +export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const + +const DOLPHIN3_IMAGE_MODELS = [] as const + +export const DOLPHIN3_EMBEDDING_MODELS = [] as const + +const DOLPHIN3_AUDIO_MODELS = [] as const + +const DOLPHIN3_VIDEO_MODELS = [] as const + +// export type Dolphin3ChatModels = (typeof DOLPHIN3_MODELS)[number] + +// Manual type map for per-model provider options +export type Dolphin3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DOLPHIN3_LATEST.name]: ChatRequest + [DOLPHIN3_8b.name]: ChatRequest +} + +export type Dolphin3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DOLPHIN3_LATEST.name]: typeof DOLPHIN3_LATEST.supports.input + [DOLPHIN3_8b.name]: typeof DOLPHIN3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts new file mode 100644 index 00000000..581f9a13 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const EXAONE3_5_LATEST = { + name: 'exaone3.5:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 32_000, +} as const satisfies ModelMeta + +const EXAONE3_5_2_4b = { + name: 'exaone3.5:2.4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 32_000, +} as const satisfies ModelMeta + +const EXAONE3_5_7_1b = { + name: 'exaone3.5:7.8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 32_000, +} as const satisfies ModelMeta + +const EXAONE3_5_32b = { + name: 'exaone3.5:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '19gb', + context: 32_000, +} as const satisfies ModelMeta + +export const EXAONE3_5MODELS = [ + EXAONE3_5_LATEST.name, + EXAONE3_5_2_4b.name, + EXAONE3_5_7_1b.name, + EXAONE3_5_32b.name, +] as const + +const EXAONE3_5IMAGE_MODELS = [] as const + +export const EXAONE3_5EMBEDDING_MODELS = [] as const + +const EXAONE3_5AUDIO_MODELS = [] as const + +const EXAONE3_5VIDEO_MODELS = [] as const + +// export type AyaChatModels = (typeof EXAONE3_5MODELS)[number] + +// Manual type map for per-model provider options +export type Exaone3_5ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [EXAONE3_5_LATEST.name]: ChatRequest + [EXAONE3_5_2_4b.name]: ChatRequest + [EXAONE3_5_7_1b.name]: ChatRequest + [EXAONE3_5_32b.name]: ChatRequest +} + +export type Exaone3_5ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [EXAONE3_5_LATEST.name]: typeof EXAONE3_5_LATEST.supports.input + [EXAONE3_5_2_4b.name]: typeof EXAONE3_5_2_4b.supports.input + [EXAONE3_5_7_1b.name]: typeof EXAONE3_5_7_1b.supports.input + [EXAONE3_5_32b.name]: typeof EXAONE3_5_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts new file mode 100644 index 00000000..89ef695d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts @@ -0,0 +1,60 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const FALCON2_LATEST = { + name: 'falcon2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.4gb', + context: 2_000, +} as const satisfies ModelMeta + +const FALCON2_11b = { + name: 'falcon2:11b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.4gb', + context: 2_000, +} as const satisfies ModelMeta + +export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const + +const FALCON2_IMAGE_MODELS = [] as const + +export const FALCON2_EMBEDDING_MODELS = [] as const + +const FALCON2_AUDIO_MODELS = [] as const + +const FALCON2_VIDEO_MODELS = [] as const + +// export type Falcon2ChatModels = (typeof FALCON2_MODELS)[number] + +// Manual type map for per-model provider options +export type Falcon2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FALCON2_LATEST.name]: ChatRequest + [FALCON2_11b.name]: ChatRequest +} + +export type Falcon2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FALCON2_LATEST.name]: typeof FALCON2_LATEST.supports.input + [FALCON2_11b.name]: typeof FALCON2_11b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts new file mode 100644 index 00000000..8aed89b1 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts @@ -0,0 +1,105 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const FALCON3_LATEST = { + name: 'falcon3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.6gb', + context: 32_000, +} as const satisfies ModelMeta + +const FALCON3_1b = { + name: 'falcon3:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.8gb', + context: 8_000, +} as const satisfies ModelMeta + +const FALCON3_3b = { + name: 'falcon3:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2gb', + context: 32_000, +} as const satisfies ModelMeta + +const FALCON3_7b = { + name: 'falcon3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.6gb', + context: 32_000, +} as const satisfies ModelMeta + +const FALCON3_10b = { + name: 'falcon3:10b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.3gb', + context: 32_000, +} as const satisfies ModelMeta + +export const FALCON3_MODELS = [ + FALCON3_LATEST.name, + FALCON3_1b.name, + FALCON3_3b.name, + FALCON3_7b.name, + FALCON3_10b.name, +] as const + +const FALCON3_IMAGE_MODELS = [] as const + +export const FALCON3_EMBEDDING_MODELS = [] as const + +const FALCON3_AUDIO_MODELS = [] as const + +const FALCON3_VIDEO_MODELS = [] as const + +// export type Falcon3ChatModels = (typeof FALCON3_MODELS)[number] + +// Manual type map for per-model provider options +export type Falcon3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FALCON3_LATEST.name]: ChatRequest + [FALCON3_1b.name]: ChatRequest + [FALCON3_3b.name]: ChatRequest + [FALCON3_7b.name]: ChatRequest + [FALCON3_10b.name]: ChatRequest +} + +export type Falcon3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FALCON3_LATEST.name]: typeof FALCON3_LATEST.supports.input + [FALCON3_1b.name]: typeof FALCON3_1b.supports.input + [FALCON3_3b.name]: typeof FALCON3_3b.supports.input + [FALCON3_7b.name]: typeof FALCON3_7b.supports.input + [FALCON3_10b.name]: typeof FALCON3_10b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts new file mode 100644 index 00000000..537b5c29 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const FIREFUNCTION_V2_LATEST = { + name: 'firefunction-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies ModelMeta + +const FIREFUNCTION_V2_70b = { + name: 'firefunction-v2:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies ModelMeta + +export const FIREFUNCTION_V2_MODELS = [ + FIREFUNCTION_V2_LATEST.name, + FIREFUNCTION_V2_70b.name, +] as const + +const FIREFUNCTION_V2_IMAGE_MODELS = [] as const + +export const FIREFUNCTION_V2_EMBEDDING_MODELS = [] as const + +const FIREFUNCTION_V2_AUDIO_MODELS = [] as const + +const FIREFUNCTION_V2_VIDEO_MODELS = [] as const + +// export type Firefunction_V2ChatModels = (typeof FIREFUNCTION_V2_MODELS)[number] + +// Manual type map for per-model provider options +export type Firefunction_V2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FIREFUNCTION_V2_LATEST.name]: ChatRequest + [FIREFUNCTION_V2_70b.name]: ChatRequest +} + +export type Firefunction_V2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FIREFUNCTION_V2_LATEST.name]: typeof FIREFUNCTION_V2_LATEST.supports.input + [FIREFUNCTION_V2_70b.name]: typeof FIREFUNCTION_V2_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts new file mode 100644 index 00000000..a0d633d4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GEMMA_LATEST = { + name: 'gemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies ModelMeta + +const GEMMA_2b = { + name: 'gemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const GEMMA_7b = { + name: 'gemma:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies ModelMeta + +export const GEMMA_MODELS = [ + GEMMA_LATEST.name, + GEMMA_2b.name, + GEMMA_7b.name, +] as const + +const GEMMA_IMAGE_MODELS = [] as const + +export const GEMMA_EMBEDDING_MODELS = [] as const + +const GEMMA_AUDIO_MODELS = [] as const + +const GEMMA_VIDEO_MODELS = [] as const + +// export type GemmaChatModels = (typeof GEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type GemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA_LATEST.name]: ChatRequest + [GEMMA_2b.name]: ChatRequest + [GEMMA_7b.name]: ChatRequest +} + +export type GemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA_LATEST.name]: typeof GEMMA_LATEST.supports.input + [GEMMA_2b.name]: typeof GEMMA_2b.supports.input + [GEMMA_7b.name]: typeof GEMMA_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts new file mode 100644 index 00000000..fe8a4ee1 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GEMMA2_LATEST = { + name: 'gemma2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.4gb', + context: 8_000, +} as const satisfies ModelMeta + +const GEMMA2_2b = { + name: 'gemma2:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 8_000, +} as const satisfies ModelMeta + +const GEMMA2_9b = { + name: 'gemma2:9b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.4gb', + context: 8_000, +} as const satisfies ModelMeta + +const GEMMA2_27b = { + name: 'gemma2:27b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '16gb', + context: 8_000, +} as const satisfies ModelMeta + +export const GEMMA2_MODELS = [ + GEMMA2_LATEST.name, + GEMMA2_2b.name, + GEMMA2_9b.name, + GEMMA2_27b.name, +] as const + +const GEMMA2_IMAGE_MODELS = [] as const + +export const GEMMA2_EMBEDDING_MODELS = [] as const + +const GEMMA2_AUDIO_MODELS = [] as const + +const GEMMA2_VIDEO_MODELS = [] as const + +// export type Gemma2ChatModels = (typeof GEMMA2_MODELS)[number] + +// Manual type map for per-model provider options +export type Gemma2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA2_LATEST.name]: ChatRequest + [GEMMA2_2b.name]: ChatRequest + [GEMMA2_9b.name]: ChatRequest + [GEMMA2_27b.name]: ChatRequest +} + +export type Gemma2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA2_LATEST.name]: typeof GEMMA2_LATEST.supports.input + [GEMMA2_2b.name]: typeof GEMMA2_2b.supports.input + [GEMMA2_9b.name]: typeof GEMMA2_9b.supports.input + [GEMMA2_27b.name]: typeof GEMMA2_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts new file mode 100644 index 00000000..3c5e29a6 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts @@ -0,0 +1,119 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GEMMA3_LATEST = { + name: 'gemma3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '3.3gb', + context: 128_000, +} as const satisfies ModelMeta + +const GEMMA3_270m = { + name: 'gemma3:270m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '298mb', + context: 32_000, +} as const satisfies ModelMeta + +const GEMMA3_1b = { + name: 'gemma3:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '815mb', + context: 32_000, +} as const satisfies ModelMeta + +const GEMMA3_4b = { + name: 'gemma3:4b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '3.3gb', + context: 128_000, +} as const satisfies ModelMeta + +const GEMMA3_12b = { + name: 'gemma3:12b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '8.1gb', + context: 128_000, +} as const satisfies ModelMeta + +const GEMMA3_27b = { + name: 'gemma3:27b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '17gb', + context: 128_000, +} as const satisfies ModelMeta + +export const GEMMA3_MODELS = [ + GEMMA3_LATEST.name, + GEMMA3_270m.name, + GEMMA3_1b.name, + GEMMA3_4b.name, + GEMMA3_12b.name, + GEMMA3_27b.name, +] as const + +const GEMMA3_IMAGE_MODELS = [] as const + +export const GEMMA3_EMBEDDING_MODELS = [] as const + +const GEMMA3_AUDIO_MODELS = [] as const + +const GEMMA3_VIDEO_MODELS = [] as const + +// export type Gemma3ChatModels = (typeof GEMMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type Gemma3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA3_LATEST.name]: ChatRequest + [GEMMA3_270m.name]: ChatRequest + [GEMMA3_1b.name]: ChatRequest + [GEMMA3_4b.name]: ChatRequest + [GEMMA3_12b.name]: ChatRequest + [GEMMA3_27b.name]: ChatRequest +} + +export type Gemma3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA3_LATEST.name]: typeof GEMMA3_LATEST.supports.input + [GEMMA3_270m.name]: typeof GEMMA3_270m.supports.input + [GEMMA3_1b.name]: typeof GEMMA3_1b.supports.input + [GEMMA3_4b.name]: typeof GEMMA3_4b.supports.input + [GEMMA3_12b.name]: typeof GEMMA3_12b.supports.input + [GEMMA3_27b.name]: typeof GEMMA3_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts new file mode 100644 index 00000000..fff202dc --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GRANITE3_DENSE_LATEST = { + name: 'granite3-dense:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 4_000, +} as const satisfies ModelMeta + +const GRANITE3_DENSE_2b = { + name: 'granite3-dense:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 4_000, +} as const satisfies ModelMeta + +const GRANITE3_DENSE_8b = { + name: 'granite3-dense:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 4_000, +} as const satisfies ModelMeta + +export const GRANITE3_DENSE_MODELS = [ + GRANITE3_DENSE_LATEST.name, + GRANITE3_DENSE_2b.name, + GRANITE3_DENSE_8b.name, +] as const + +const GRANITE3_DENSE_IMAGE_MODELS = [] as const + +export const GRANITE3_DENSE_EMBEDDING_MODELS = [] as const + +const GRANITE3_DENSE_AUDIO_MODELS = [] as const + +const GRANITE3_DENSE_VIDEO_MODELS = [] as const + +// export type Granite3Dense3ChatModels = (typeof GRANITE3_DENSE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3DenseChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_DENSE_LATEST.name]: ChatRequest + [GRANITE3_DENSE_2b.name]: ChatRequest + [GRANITE3_DENSE_8b.name]: ChatRequest +} + +export type Granite3DenseModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_DENSE_LATEST.name]: typeof GRANITE3_DENSE_LATEST.supports.input + [GRANITE3_DENSE_2b.name]: typeof GRANITE3_DENSE_2b.supports.input + [GRANITE3_DENSE_8b.name]: typeof GRANITE3_DENSE_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts new file mode 100644 index 00000000..56ccb2df --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GRANITE3_GUARDIAN_LATEST = { + name: 'granite3-guardian:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const GRANITE3_GUARDIAN_2b = { + name: 'granite3-guardian:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const GRANITE3_GUARDIAN_8b = { + name: 'granite3-guardian:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies ModelMeta + +export const GRANITE3_GUARDIAN_MODELS = [ + GRANITE3_GUARDIAN_LATEST.name, + GRANITE3_GUARDIAN_2b.name, + GRANITE3_GUARDIAN_8b.name, +] as const + +const GRANITE3_GUARDIAN_IMAGE_MODELS = [] as const + +export const GRANITE3_GUARDIAN_EMBEDDING_MODELS = [] as const + +const GRANITE3_GUARDIAN_AUDIO_MODELS = [] as const + +const GRANITE3_GUARDIAN_VIDEO_MODELS = [] as const + +// export type GraniteGuardian3ChatModels = (typeof GRANITE3_GUARDIAN_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3GuardianChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_GUARDIAN_LATEST.name]: ChatRequest + [GRANITE3_GUARDIAN_2b.name]: ChatRequest + [GRANITE3_GUARDIAN_8b.name]: ChatRequest +} + +export type Granite3GuardianModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_GUARDIAN_LATEST.name]: typeof GRANITE3_GUARDIAN_LATEST.supports.input + [GRANITE3_GUARDIAN_2b.name]: typeof GRANITE3_GUARDIAN_2b.supports.input + [GRANITE3_GUARDIAN_8b.name]: typeof GRANITE3_GUARDIAN_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts new file mode 100644 index 00000000..fb681555 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GRANITE3_MOE_LATEST = { + name: 'granite3-moe:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '822mb', + context: 4_000, +} as const satisfies ModelMeta + +const GRANITE3_MOE_1b = { + name: 'granite3-moe:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '822mb', + context: 4_000, +} as const satisfies ModelMeta + +const GRANITE3_MOE_3b = { + name: 'granite3-moe:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.1gb', + context: 4_000, +} as const satisfies ModelMeta + +export const GRANITE3_MOE_MODELS = [ + GRANITE3_MOE_LATEST.name, + GRANITE3_MOE_1b.name, + GRANITE3_MOE_3b.name, +] as const + +const GRANITE3_MOE_IMAGE_MODELS = [] as const + +export const GRANITE3_MOE_EMBEDDING_MODELS = [] as const + +const GRANITE3_MOE_AUDIO_MODELS = [] as const + +const GRANITE3_MOE_VIDEO_MODELS = [] as const + +// export type GraniteMoe3ChatModels = (typeof GRANITE3_MOE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3MoeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_MOE_LATEST.name]: ChatRequest + [GRANITE3_MOE_1b.name]: ChatRequest + [GRANITE3_MOE_3b.name]: ChatRequest +} + +export type Granite3MoeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_MOE_LATEST.name]: typeof GRANITE3_MOE_LATEST.supports.input + [GRANITE3_MOE_1b.name]: typeof GRANITE3_MOE_1b.supports.input + [GRANITE3_MOE_3b.name]: typeof GRANITE3_MOE_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts new file mode 100644 index 00000000..6c4d598d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GRANITE3_1_DENSE_LATEST = { + name: 'granite3.1-dense:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5gb', + context: 128_000, +} as const satisfies ModelMeta + +const GRANITE3_1_DENSE_2b = { + name: 'granite3.1-dense:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.6gb', + context: 128_000, +} as const satisfies ModelMeta + +const GRANITE3_1_DENSE_8b = { + name: 'granite3.1-dense:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5gb', + context: 128_000, +} as const satisfies ModelMeta + +export const GRANITE3_1_DENSE_MODELS = [ + GRANITE3_1_DENSE_LATEST.name, + GRANITE3_1_DENSE_2b.name, + GRANITE3_1_DENSE_8b.name, +] as const + +const GRANITE3_1_DENSE_IMAGE_MODELS = [] as const + +export const GRANITE3_1_DENSE_EMBEDDING_MODELS = [] as const + +const GRANITE3_1_DENSE_AUDIO_MODELS = [] as const + +const GRANITE3_1_DENSE_VIDEO_MODELS = [] as const + +// export type Granite3_1Dense3ChatModels = (typeof GRANITE3_1_DENSE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3_1DenseChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_1_DENSE_LATEST.name]: ChatRequest + [GRANITE3_1_DENSE_2b.name]: ChatRequest + [GRANITE3_1_DENSE_8b.name]: ChatRequest +} + +export type Granite3_1DenseModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_1_DENSE_LATEST.name]: typeof GRANITE3_1_DENSE_LATEST.supports.input + [GRANITE3_1_DENSE_2b.name]: typeof GRANITE3_1_DENSE_2b.supports.input + [GRANITE3_1_DENSE_8b.name]: typeof GRANITE3_1_DENSE_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts new file mode 100644 index 00000000..b91d129d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const GRANITE3_1_MOE_LATEST = { + name: 'granite3.1-moe:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies ModelMeta + +const GRANITE3_1_MOE_1b = { + name: 'granite3.1-moe:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.4gb', + context: 128_000, +} as const satisfies ModelMeta + +const GRANITE3_1_MOE_3b = { + name: 'granite3.1-moe:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies ModelMeta + +export const GRANITE3_1_MOE_MODELS = [ + GRANITE3_1_MOE_LATEST.name, + GRANITE3_1_MOE_1b.name, + GRANITE3_1_MOE_3b.name, +] as const + +const GRANITE3_1_MOE_IMAGE_MODELS = [] as const + +export const GRANITE3_1_MOE_EMBEDDING_MODELS = [] as const + +const GRANITE3_1_MOE_AUDIO_MODELS = [] as const + +const GRANITE3_1_MOE_VIDEO_MODELS = [] as const + +// export type Granite3_1MoeChatModels = (typeof GRANITE3_1_MOE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3_1MoeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_1_MOE_LATEST.name]: ChatRequest + [GRANITE3_1_MOE_1b.name]: ChatRequest + [GRANITE3_1_MOE_3b.name]: ChatRequest +} + +export type Granite3_1MoeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_1_MOE_LATEST.name]: typeof GRANITE3_1_MOE_LATEST.supports.input + [GRANITE3_1_MOE_1b.name]: typeof GRANITE3_1_MOE_1b.supports.input + [GRANITE3_1_MOE_3b.name]: typeof GRANITE3_1_MOE_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts new file mode 100644 index 00000000..81a2d7c8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA_GUARD3_LATEST = { + name: 'llama3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9b', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA_GUARD3_1b = { + name: 'llama3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA_GUARD3_8b = { + name: 'llama3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies ModelMeta + +export const LLAMA_GUARD3_MODELS = [ + LLAMA_GUARD3_LATEST.name, + LLAMA_GUARD3_1b.name, + LLAMA_GUARD3_8b.name, +] as const + +const LLAMA_GUARD3_IMAGE_MODELS = [] as const + +export const LLAMA_GUARD3_EMBEDDING_MODELS = [] as const + +const LLAMA_GUARD3_AUDIO_MODELS = [] as const + +const LLAMA_GUARD3_VIDEO_MODELS = [] as const + +// export type LlamaGuard3ChatModels = (typeof LLAMA_GUARD3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlamaGuard3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA_GUARD3_LATEST.name]: ChatRequest + [LLAMA_GUARD3_1b.name]: ChatRequest + [LLAMA_GUARD3_8b.name]: ChatRequest +} + +export type LlamaGuard3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA_GUARD3_LATEST.name]: typeof LLAMA_GUARD3_LATEST.supports.input + [LLAMA_GUARD3_1b.name]: typeof LLAMA_GUARD3_1b.supports.input + [LLAMA_GUARD3_8b.name]: typeof LLAMA_GUARD3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts new file mode 100644 index 00000000..bf7b3c23 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA2_LATEST = { + name: 'llama2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 4_000, +} as const satisfies ModelMeta + +const LLAMA2_7b = { + name: 'llama2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 4_000, +} as const satisfies ModelMeta + +const LLAMA2_13b = { + name: 'llama2:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.4gb', + context: 4_000, +} as const satisfies ModelMeta + +const LLAMA2_70b = { + name: 'llama2:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '39gb', + context: 4_000, +} as const satisfies ModelMeta + +export const LLAMA2_MODELS = [ + LLAMA2_LATEST.name, + LLAMA2_7b.name, + LLAMA2_13b.name, + LLAMA2_70b.name, +] as const + +const LLAMA2_IMAGE_MODELS = [] as const + +export const LLAMA2_EMBEDDING_MODELS = [] as const + +const LLAMA2_AUDIO_MODELS = [] as const + +const LLAMA2_VIDEO_MODELS = [] as const + +// export type Llama2ChatModels = (typeof LLAMA2_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA2_LATEST.name]: ChatRequest + [LLAMA2_7b.name]: ChatRequest + [LLAMA2_13b.name]: ChatRequest + [LLAMA2_70b.name]: ChatRequest +} + +export type Llama2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA2_LATEST.name]: typeof LLAMA2_LATEST.supports.input + [LLAMA2_7b.name]: typeof LLAMA2_7b.supports.input + [LLAMA2_13b.name]: typeof LLAMA2_13b.supports.input + [LLAMA2_70b.name]: typeof LLAMA2_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts new file mode 100644 index 00000000..94318789 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_CHATQA_LATEST = { + name: 'llama3-chatqa:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 8_000, +} as const satisfies ModelMeta + +const LLAMA3_CHATQA_8b = { + name: 'llama3-chatqa:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const LLAMA3_CHATQA_70b = { + name: 'llama3-chatqa:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies ModelMeta + +export const LLAMA3_CHATQA_MODELS = [ + LLAMA3_CHATQA_LATEST.name, + LLAMA3_CHATQA_8b.name, + LLAMA3_CHATQA_70b.name, +] as const + +const LLAMA3_CHATQA_IMAGE_MODELS = [] as const + +export const LLAMA3_CHATQA_EMBEDDING_MODELS = [] as const + +const LLAMA3_CHATQA_AUDIO_MODELS = [] as const + +const LLAMA3_CHATQA_VIDEO_MODELS = [] as const + +// export type Llama3ChatQaChatModels = (typeof LLAMA3_CHATQA_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3ChatQaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_CHATQA_LATEST.name]: ChatRequest + [LLAMA3_CHATQA_8b.name]: ChatRequest + [LLAMA3_CHATQA_70b.name]: ChatRequest +} + +export type Llama3ChatQaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_CHATQA_LATEST.name]: typeof LLAMA3_CHATQA_LATEST.supports.input + [LLAMA3_CHATQA_8b.name]: typeof LLAMA3_CHATQA_8b.supports.input + [LLAMA3_CHATQA_70b.name]: typeof LLAMA3_CHATQA_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts new file mode 100644 index 00000000..ebd33a0a --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_GRADIENT_LATEST = { + name: 'llama3-gradient:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 1_000_000, +} as const satisfies ModelMeta + +const LLAMA3_GRADIENT_8b = { + name: 'llama3-gradient:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 1_000_000, +} as const satisfies ModelMeta + +const LLAMA3_GRADIENT_70b = { + name: 'llama3-gradient:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 1_000_000, +} as const satisfies ModelMeta + +export const LLAMA3_GRADIENT_MODELS = [ + LLAMA3_GRADIENT_LATEST.name, + LLAMA3_GRADIENT_8b.name, + LLAMA3_GRADIENT_70b.name, +] as const + +const LLAMA3_GRADIENT_IMAGE_MODELS = [] as const + +export const LLAMA3_GRADIENT_EMBEDDING_MODELS = [] as const + +const LLAMA3_GRADIENT_AUDIO_MODELS = [] as const + +const LLAMA3_GRADIENT_VIDEO_MODELS = [] as const + +// export type Llama3GradientChatModels = (typeof LLAMA3_GRADIENT_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3GradientChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_GRADIENT_LATEST.name]: ChatRequest + [LLAMA3_GRADIENT_8b.name]: ChatRequest + [LLAMA3_GRADIENT_70b.name]: ChatRequest +} + +export type Llama3GradientModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_GRADIENT_LATEST.name]: typeof LLAMA3_GRADIENT_LATEST.supports.input + [LLAMA3_GRADIENT_8b.name]: typeof LLAMA3_GRADIENT_8b.supports.input + [LLAMA3_GRADIENT_70b.name]: typeof LLAMA3_GRADIENT_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts new file mode 100644 index 00000000..9c22fe1d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_1_LATEST = { + name: 'llama3.1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.9b', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_1_8b = { + name: 'llama3.1:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_1_70b = { + name: 'llama3.1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_1_405b = { + name: 'llama3.1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '243gb', + context: 128_000, +} as const satisfies ModelMeta + +export const LLAMA3_1_MODELS = [ + LLAMA3_1_LATEST.name, + LLAMA3_1_8b.name, + LLAMA3_1_70b.name, + LLAMA3_1_405b.name, +] as const + +const LLAMA3_1_IMAGE_MODELS = [] as const + +export const LLAMA3_1_EMBEDDING_MODELS = [] as const + +const LLAMA3_1_AUDIO_MODELS = [] as const + +const LLAMA3_1_VIDEO_MODELS = [] as const + +// export type Llama3_1ChatModels = (typeof LLAMA3_1_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_1_LATEST.name]: ChatRequest + [LLAMA3_1_8b.name]: ChatRequest + [LLAMA3_1_70b.name]: ChatRequest + [LLAMA3_1_405b.name]: ChatRequest +} + +export type Llama3_1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_1_LATEST.name]: typeof LLAMA3_1_LATEST.supports.input + [LLAMA3_1_8b.name]: typeof LLAMA3_1_8b.supports.input + [LLAMA3_1_70b.name]: typeof LLAMA3_1_70b.supports.input + [LLAMA3_1_405b.name]: typeof LLAMA3_1_405b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts new file mode 100644 index 00000000..d68ef0b3 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_2_VISION_LATEST = { + name: 'llama3.2:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '7.8b', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_2_VISION_11b = { + name: 'llama3.2:11b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1gb', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_2_VISION_90b = { + name: 'llama3.2:90b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '55gb', + context: 128_000, +} as const satisfies ModelMeta + +export const LLAMA3_2_VISION_MODELS = [ + LLAMA3_2_VISION_LATEST.name, + LLAMA3_2_VISION_11b.name, + LLAMA3_2_VISION_90b.name, +] as const + +export const LLAMA3_2_VISION_IMAGE_MODELS = [] as const + +export const LLAMA3_2_VISION_EMBEDDING_MODELS = [] as const + +const LLAMA3_2_VISION_AUDIO_MODELS = [] as const + +const LLAMA3_2_VISION_VIDEO_MODELS = [] as const + +// export type Llama3_2VisionChatModels = (typeof LLAMA3_2Vision_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_2VisionChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_2_VISION_LATEST.name]: ChatRequest + [LLAMA3_2_VISION_11b.name]: ChatRequest + [LLAMA3_2_VISION_90b.name]: ChatRequest +} + +export type Llama3_2VisionModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_2_VISION_LATEST.name]: typeof LLAMA3_2_VISION_LATEST.supports.input + [LLAMA3_2_VISION_11b.name]: typeof LLAMA3_2_VISION_11b.supports.input + [LLAMA3_2_VISION_90b.name]: typeof LLAMA3_2_VISION_90b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts new file mode 100644 index 00000000..cd8e2810 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_2_LATEST = { + name: 'llama3.2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2b', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_2_1b = { + name: 'llama3.2:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.3gb', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_2_3b = { + name: 'llama3.2:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies ModelMeta + +export const LLAMA3_2_MODELS = [ + LLAMA3_2_LATEST.name, + LLAMA3_2_1b.name, + LLAMA3_2_3b.name, +] as const + +const LLAMA3_2_IMAGE_MODELS = [] as const + +export const LLAMA3_2_EMBEDDING_MODELS = [] as const + +const LLAMA3_2_AUDIO_MODELS = [] as const + +const LLAMA3_2_VIDEO_MODELS = [] as const + +// export type Llama3_2ChatModels = (typeof LLAMA3_2_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_2_LATEST.name]: ChatRequest + [LLAMA3_2_1b.name]: ChatRequest + [LLAMA3_2_3b.name]: ChatRequest +} + +export type Llama3_2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_2_LATEST.name]: typeof LLAMA3_2_LATEST.supports.input + [LLAMA3_2_1b.name]: typeof LLAMA3_2_1b.supports.input + [LLAMA3_2_3b.name]: typeof LLAMA3_2_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts new file mode 100644 index 00000000..d2efe4c7 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_3_LATEST = { + name: 'llama3.3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43b', + context: 128_000, +} as const satisfies ModelMeta + +const LLAMA3_3_70b = { + name: 'llama3.3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies ModelMeta + +export const LLAMA3_3_MODELS = [ + LLAMA3_3_LATEST.name, + LLAMA3_3_70b.name, +] as const + +const LLAMA3_3_IMAGE_MODELS = [] as const + +export const LLAMA3_3_EMBEDDING_MODELS = [] as const + +const LLAMA3_3_AUDIO_MODELS = [] as const + +const LLAMA3_3_VIDEO_MODELS = [] as const + +// export type Llama3_3ChatModels = (typeof LLAMA3_3_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_3_LATEST.name]: ChatRequest + [LLAMA3_3_70b.name]: ChatRequest +} + +export type Llama3_3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_3_LATEST.name]: typeof LLAMA3_3_LATEST.supports.input + [LLAMA3_3_70b.name]: typeof LLAMA3_3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts new file mode 100644 index 00000000..562e1c12 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA3_LATEST = { + name: 'llama3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 8_000, +} as const satisfies ModelMeta + +const LLAMA3_8b = { + name: 'llama3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const LLAMA3_70b = { + name: 'llama3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies ModelMeta + +export const LLAMA3_MODELS = [ + LLAMA3_LATEST.name, + LLAMA3_8b.name, + LLAMA3_70b.name, +] as const + +const LLAMA3_IMAGE_MODELS = [] as const + +export const LLAMA3_EMBEDDING_MODELS = [] as const + +const LLAMA3_AUDIO_MODELS = [] as const + +const LLAMA3_VIDEO_MODELS = [] as const + +// export type Llama3ChatModels = (typeof LLAMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_LATEST.name]: ChatRequest + [LLAMA3_8b.name]: ChatRequest + [LLAMA3_70b.name]: ChatRequest +} + +export type Llama3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_LATEST.name]: typeof LLAMA3_LATEST.supports.input + [LLAMA3_8b.name]: typeof LLAMA3_8b.supports.input + [LLAMA3_70b.name]: typeof LLAMA3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts new file mode 100644 index 00000000..4d9b0ae9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAMA4_LATEST = { + name: 'llama4:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '67b', + context: 10_000_000, +} as const satisfies ModelMeta + +const LLAMA4_16X17b = { + name: 'llama4:16x17b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '67gb', + context: 10_000_000, +} as const satisfies ModelMeta + +const LLAMA4_128X17b = { + name: 'llama4:128x17b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '245gb', + context: 1_000_000, +} as const satisfies ModelMeta + +export const LLAMA4_MODELS = [ + LLAMA4_LATEST.name, + LLAMA4_16X17b.name, + LLAMA4_128X17b.name, +] as const + +const LLAMA4_IMAGE_MODELS = [] as const + +export const LLAMA4_EMBEDDING_MODELS = [] as const + +const LLAMA4_AUDIO_MODELS = [] as const + +const LLAMA4_VIDEO_MODELS = [] as const + +// export type Llama3_4ChatModels = (typeof LLAMA4_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_4ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA4_LATEST.name]: ChatRequest + [LLAMA4_16X17b.name]: ChatRequest + [LLAMA4_128X17b.name]: ChatRequest +} + +export type Llama3_4ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA4_LATEST.name]: typeof LLAMA4_LATEST.supports.input + [LLAMA4_16X17b.name]: typeof LLAMA4_16X17b.supports.input + [LLAMA4_128X17b.name]: typeof LLAMA4_128X17b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts new file mode 100644 index 00000000..0425b1ad --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAVA_LLAMA3_LATEST = { + name: 'llava-llama3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '5.5b', + context: 8_000, +} as const satisfies ModelMeta + +const LLAVA_LLAMA3_8b = { + name: 'llava-llama3:8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '5.5gb', + context: 8_000, +} as const satisfies ModelMeta + +export const LLAVA_LLAMA3_MODELS = [ + LLAVA_LLAMA3_LATEST.name, + LLAVA_LLAMA3_8b.name, +] as const + +const LLAVA_LLAMA3_IMAGE_MODELS = [] as const + +export const LLAVA_LLAMA3_EMBEDDING_MODELS = [] as const + +const LLAVA_LLAMA3_AUDIO_MODELS = [] as const + +const LLAVA_LLAMA3_VIDEO_MODELS = [] as const + +// export type LlavaLlamaChatModels = (typeof LLAVA_LLAMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlavaLlamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_LLAMA3_LATEST.name]: ChatRequest + [LLAVA_LLAMA3_8b.name]: ChatRequest +} + +export type LlavaLlamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_LLAMA3_LATEST.name]: typeof LLAVA_LLAMA3_LATEST.supports.input + [LLAVA_LLAMA3_8b.name]: typeof LLAVA_LLAMA3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts new file mode 100644 index 00000000..3104c9d4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAVA_PHI3_LATEST = { + name: 'llava-phi3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9b', + context: 4_000, +} as const satisfies ModelMeta + +const LLAVA_PHI3_8b = { + name: 'llava-phi3:8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies ModelMeta + +export const LLAVA_PHI3_MODELS = [ + LLAVA_PHI3_LATEST.name, + LLAVA_PHI3_8b.name, +] as const + +const LLAVA_PHI3_IMAGE_MODELS = [] as const + +export const LLAVA_PHI3_EMBEDDING_MODELS = [] as const + +const LLAVA_PHI3_AUDIO_MODELS = [] as const + +const LLAVA_PHI3_VIDEO_MODELS = [] as const + +// export type LlavaPhi3ChatModels = (typeof LLAVA_PHI3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlavaPhi3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_PHI3_LATEST.name]: ChatRequest + [LLAVA_PHI3_8b.name]: ChatRequest +} + +export type LlavaPhi3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_PHI3_LATEST.name]: typeof LLAVA_PHI3_LATEST.supports.input + [LLAVA_PHI3_8b.name]: typeof LLAVA_PHI3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts new file mode 100644 index 00000000..1d5a2013 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const LLAVA_LATEST = { + name: 'llava:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 32_000, +} as const satisfies ModelMeta + +const LLAVA_7b = { + name: 'llava:7b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +const LLAVA_13b = { + name: 'llava:13b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '8gb', + context: 4_000, +} as const satisfies ModelMeta + +const LLAVA_34b = { + name: 'llava:34b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '20gb', + context: 4_000, +} as const satisfies ModelMeta + +export const LLAVA_MODELS = [ + LLAVA_LATEST.name, + LLAVA_7b.name, + LLAVA_13b.name, + LLAVA_34b.name, +] as const + +const LLAVA_IMAGE_MODELS = [] as const + +export const LLAVA_EMBEDDING_MODELS = [] as const + +const LLAVA_AUDIO_MODELS = [] as const + +const LLAVA_VIDEO_MODELS = [] as const + +// export type llavaChatModels = (typeof LLAVA_MODELS)[number] + +// Manual type map for per-model provider options +export type llavaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_LATEST.name]: ChatRequest + [LLAVA_7b.name]: ChatRequest + [LLAVA_13b.name]: ChatRequest + [LLAVA_34b.name]: ChatRequest +} + +export type llavaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_LATEST.name]: typeof LLAVA_LATEST.supports.input + [LLAVA_7b.name]: typeof LLAVA_7b.supports.input + [LLAVA_13b.name]: typeof LLAVA_13b.supports.input + [LLAVA_34b.name]: typeof LLAVA_34b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts new file mode 100644 index 00000000..949c5d8f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts @@ -0,0 +1,60 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MARCO_O1_LATEST = { + name: 'marco-o1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +const MARCO_O1_7b = { + name: 'marco-o1:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const + +const MARCO_O1_IMAGE_MODELS = [] as const + +export const MARCO_O1_EMBEDDING_MODELS = [] as const + +const MARCO_O1_AUDIO_MODELS = [] as const + +const MARCO_O1_VIDEO_MODELS = [] as const + +// export type MarcoO1ChatModels = (typeof MARCO_O1_MODELS)[number] + +// Manual type map for per-model provider options +export type MarcoO1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MARCO_O1_LATEST.name]: ChatRequest + [MARCO_O1_7b.name]: ChatRequest +} + +export type MarcoO1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MARCO_O1_LATEST.name]: typeof MARCO_O1_LATEST.supports.input + [MARCO_O1_7b.name]: typeof MARCO_O1_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts new file mode 100644 index 00000000..1f924872 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MISTRAL_LARGE_LATEST = { + name: 'mistral-large:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '73gb', + context: 128_000, +} as const satisfies ModelMeta + +const MISTRAL_LARGE_123b = { + name: 'mistral-large:123b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '73gb', + context: 128_000, +} as const satisfies ModelMeta + +export const MISTRAL_LARGE_MODELS = [ + MISTRAL_LARGE_LATEST.name, + MISTRAL_LARGE_123b.name, +] as const + +const MISTRAL_LARGE_IMAGE_MODELS = [] as const + +export const MISTRAL_LARGE_EMBEDDING_MODELS = [] as const + +const MISTRAL_LARGE_AUDIO_MODELS = [] as const + +const MISTRAL_LARGE_VIDEO_MODELS = [] as const + +// export type MistralLargeChatModels = (typeof MISTRAL_LARGE_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralLargeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_LARGE_LATEST.name]: ChatRequest + [MISTRAL_LARGE_123b.name]: ChatRequest +} + +export type MistralLargeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_LARGE_LATEST.name]: typeof MISTRAL_LARGE_LATEST.supports.input + [MISTRAL_LARGE_123b.name]: typeof MISTRAL_LARGE_123b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts new file mode 100644 index 00000000..62376d15 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MISTRAL_NEMO_LATEST = { + name: 'mistral-nemo:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '7.1gb', + context: 1_000, +} as const satisfies ModelMeta + +const MISTRAL_NEMO_12b = { + name: 'mistral-nemo:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '7.1gb', + context: 1_000, +} as const satisfies ModelMeta + +export const MISTRAL_NEMO_MODELS = [ + MISTRAL_NEMO_LATEST.name, + MISTRAL_NEMO_12b.name, +] as const + +const MISTRAL_NEMO_IMAGE_MODELS = [] as const + +export const MISTRAL_NEMO_EMBEDDING_MODELS = [] as const + +const MISTRAL_NEMO_AUDIO_MODELS = [] as const + +const MISTRAL_NEMO_VIDEO_MODELS = [] as const + +// export type MistralNemoChatModels = (typeof MISTRAL_NEMO_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralNemoChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_NEMO_LATEST.name]: ChatRequest + [MISTRAL_NEMO_12b.name]: ChatRequest +} + +export type MistralNemoModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_NEMO_LATEST.name]: typeof MISTRAL_NEMO_LATEST.supports.input + [MISTRAL_NEMO_12b.name]: typeof MISTRAL_NEMO_12b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts new file mode 100644 index 00000000..15d4f45f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MISTRAL_SMALL_LATEST = { + name: 'mistral-small:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 32_000, +} as const satisfies ModelMeta + +const MISTRAL_SMALL_22b = { + name: 'mistral-small:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '13gb', + context: 32_000, +} as const satisfies ModelMeta + +const MISTRAL_SMALL_24b = { + name: 'mistral-small:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '13gb', + context: 32_000, +} as const satisfies ModelMeta + +export const MISTRAL_SMALL_MODELS = [ + MISTRAL_SMALL_LATEST.name, + MISTRAL_SMALL_22b.name, + MISTRAL_SMALL_24b.name, +] as const + +const MISTRAL_SMALL_IMAGE_MODELS = [] as const + +export const MISTRAL_SMALL_EMBEDDING_MODELS = [] as const + +const MISTRAL_SMALL_AUDIO_MODELS = [] as const + +const MISTRAL_SMALL_VIDEO_MODELS = [] as const + +// export type MistralSmallChatModels = (typeof MISTRAL_SMALL_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralSmallChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_SMALL_LATEST.name]: ChatRequest + [MISTRAL_SMALL_22b.name]: ChatRequest + [MISTRAL_SMALL_24b.name]: ChatRequest +} + +export type MistralSmallModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_SMALL_LATEST.name]: typeof MISTRAL_SMALL_LATEST.supports.input + [MISTRAL_SMALL_22b.name]: typeof MISTRAL_SMALL_22b.supports.input + [MISTRAL_SMALL_24b.name]: typeof MISTRAL_SMALL_24b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts new file mode 100644 index 00000000..276420f3 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts @@ -0,0 +1,60 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MISTRAL_LATEST = { + name: 'mistral:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies ModelMeta + +const MISTRAL_7b = { + name: 'mistral:87', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies ModelMeta + +export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const + +const MISTRAL_IMAGE_MODELS = [] as const + +export const MISTRAL_EMBEDDING_MODELS = [] as const + +const MISTRAL_AUDIO_MODELS = [] as const + +const MISTRAL_VIDEO_MODELS = [] as const + +// export type MistralChatModels = (typeof MISTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_LATEST.name]: ChatRequest + [MISTRAL_7b.name]: ChatRequest +} + +export type MistralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_LATEST.name]: typeof MISTRAL_LATEST.supports.input + [MISTRAL_7b.name]: typeof MISTRAL_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts new file mode 100644 index 00000000..d7d7bede --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MIXTRAL_LATEST = { + name: 'mixtral:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '26gb', + context: 32_000, +} as const satisfies ModelMeta + +const MIXTRAL_8X7b = { + name: 'mixtral:8x7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '26gb', + context: 32_000, +} as const satisfies ModelMeta + +const MIXTRAL_8X22b = { + name: 'mixtral:8x22b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '80gb', + context: 64_000, +} as const satisfies ModelMeta + +export const MIXTRAL_MODELS = [ + MIXTRAL_LATEST.name, + MIXTRAL_8X7b.name, + MIXTRAL_8X22b.name, +] as const + +const MIXTRAL_IMAGE_MODELS = [] as const + +export const MIXTRAL_EMBEDDING_MODELS = [] as const + +const MIXTRAL_AUDIO_MODELS = [] as const + +const MIXTRAL_VIDEO_MODELS = [] as const + +// export type MixtralChatModels = (typeof MIXTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type MixtralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MIXTRAL_LATEST.name]: ChatRequest + [MIXTRAL_8X7b.name]: ChatRequest + [MIXTRAL_8X22b.name]: ChatRequest +} + +export type MixtralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MIXTRAL_LATEST.name]: typeof MIXTRAL_LATEST.supports.input + [MIXTRAL_8X7b.name]: typeof MIXTRAL_8X7b.supports.input + [MIXTRAL_8X22b.name]: typeof MIXTRAL_8X22b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts new file mode 100644 index 00000000..a2dd1e15 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const MOONDREAM_LATEST = { + name: 'moondream:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1.7gb', + context: 2_000, +} as const satisfies ModelMeta + +const MOONDREAM_1_8b = { + name: 'moondream:1.8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1.7gb', + context: 2_000, +} as const satisfies ModelMeta + +export const MOONDREAM_MODELS = [ + MOONDREAM_LATEST.name, + MOONDREAM_1_8b.name, +] as const + +const MOONDREAM_IMAGE_MODELS = [] as const + +export const MOONDREAM_EMBEDDING_MODELS = [] as const + +const MOONDREAM_AUDIO_MODELS = [] as const + +const MOONDREAM_VIDEO_MODELS = [] as const + +// export type MoondreamChatModels = (typeof MOONDREAM_MODELS)[number] + +// Manual type map for per-model provider options +export type MoondreamChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MOONDREAM_LATEST.name]: ChatRequest + [MOONDREAM_1_8b.name]: ChatRequest +} + +export type MoondreamModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MOONDREAM_LATEST.name]: typeof MOONDREAM_LATEST.supports.input + [MOONDREAM_1_8b.name]: typeof MOONDREAM_1_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts new file mode 100644 index 00000000..5abf9631 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const NEMOTRON_MINI_LATEST = { + name: 'nemotron-mini:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.7gb', + context: 4_000, +} as const satisfies ModelMeta + +const NEMOTRON_MINI_4b = { + name: 'nemotron-mini:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.7gb', + context: 4_000, +} as const satisfies ModelMeta + +export const NEMOTRON_MINI_MODELS = [ + NEMOTRON_MINI_LATEST.name, + NEMOTRON_MINI_4b.name, +] as const + +const NEMOTRON_MINI_IMAGE_MODELS = [] as const + +export const NEMOTRON_MINI_EMBEDDING_MODELS = [] as const + +const NEMOTRON_MINI_AUDIO_MODELS = [] as const + +const NEMOTRON_MINI_VIDEO_MODELS = [] as const + +// export type NemotronMiniChatModels = (typeof NEMOTRON_MINI_MODELS)[number] + +// Manual type map for per-model provider options +export type NemotronMiniChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [NEMOTRON_MINI_LATEST.name]: ChatRequest + [NEMOTRON_MINI_4b.name]: ChatRequest +} + +export type NemotronMiniModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [NEMOTRON_MINI_LATEST.name]: typeof NEMOTRON_MINI_LATEST.supports.input + [NEMOTRON_MINI_4b.name]: typeof NEMOTRON_MINI_4b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts new file mode 100644 index 00000000..7ae7364e --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const NEMOTRON_LATEST = { + name: 'nemotron:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies ModelMeta + +const NEMOTRON_70b = { + name: 'nemotron:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies ModelMeta + +export const NEMOTRON_MODELS = [ + NEMOTRON_LATEST.name, + NEMOTRON_70b.name, +] as const + +const NEMOTRON_IMAGE_MODELS = [] as const + +export const NEMOTRON_EMBEDDING_MODELS = [] as const + +const NEMOTRON_AUDIO_MODELS = [] as const + +const NEMOTRON_VIDEO_MODELS = [] as const + +// export type NemotronChatModels = (typeof NEMOTRON_MODELS)[number] + +// Manual type map for per-model provider options +export type NemotronChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [NEMOTRON_LATEST.name]: ChatRequest + [NEMOTRON_70b.name]: ChatRequest +} + +export type NemotronModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [NEMOTRON_LATEST.name]: typeof NEMOTRON_LATEST.supports.input + [NEMOTRON_70b.name]: typeof NEMOTRON_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts new file mode 100644 index 00000000..10b2aa33 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const OLMO2_LATEST = { + name: 'olmo2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 4_000, +} as const satisfies ModelMeta + +const OLMO2_7b = { + name: 'olmo2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 4_000, +} as const satisfies ModelMeta + +const OLMO2_13b = { + name: 'olmo2:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.4gb', + context: 4_000, +} as const satisfies ModelMeta + +export const OLMO2_MODELS = [ + OLMO2_LATEST.name, + OLMO2_7b.name, + OLMO2_13b.name, +] as const + +const OLMO2_IMAGE_MODELS = [] as const + +export const OLMO2_EMBEDDING_MODELS = [] as const + +const OLMO2_AUDIO_MODELS = [] as const + +const OLMO2_VIDEO_MODELS = [] as const + +// export type Olmo2ChatModels = (typeof OLMO2_MODELS)[number] + +// Manual type map for per-model provider options +export type Olmo2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OLMO2_LATEST.name]: ChatRequest + [OLMO2_7b.name]: ChatRequest + [OLMO2_13b.name]: ChatRequest +} + +export type Olmo2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OLMO2_LATEST.name]: typeof OLMO2_LATEST.supports.input + [OLMO2_7b.name]: typeof OLMO2_7b.supports.input + [OLMO2_13b.name]: typeof OLMO2_13b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts new file mode 100644 index 00000000..39639016 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const OPENCODER_LATEST = { + name: 'opencoder:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const OPENCODER_1_5b = { + name: 'opencoder:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.4gb', + context: 4_000, +} as const satisfies ModelMeta + +const OPENCODER_8b = { + name: 'opencoder:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies ModelMeta + +export const OPENCODER_MODELS = [ + OPENCODER_LATEST.name, + OPENCODER_1_5b.name, + OPENCODER_8b.name, +] as const + +const OPENCODER_IMAGE_MODELS = [] as const + +export const OPENCODER_EMBEDDING_MODELS = [] as const + +const OPENCODER_AUDIO_MODELS = [] as const + +const OPENCODER_VIDEO_MODELS = [] as const + +// export type OpencoderChatModels = (typeof OPENCODER_MODELS)[number] + +// Manual type map for per-model provider options +export type OpencoderChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OPENCODER_LATEST.name]: ChatRequest + [OPENCODER_1_5b.name]: ChatRequest + [OPENCODER_8b.name]: ChatRequest +} + +export type OpencoderModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OPENCODER_LATEST.name]: typeof OPENCODER_LATEST.supports.input + [OPENCODER_1_5b.name]: typeof OPENCODER_1_5b.supports.input + [OPENCODER_8b.name]: typeof OPENCODER_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts new file mode 100644 index 00000000..591593a4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const OPENHERMES_LATEST = { + name: 'openhermes:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies ModelMeta + +const OPENHERMES_V2 = { + name: 'openhermes:v2', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies ModelMeta + +const OPENHERMES_V2_5 = { + name: 'openhermes:v2.5', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies ModelMeta + +export const OPENHERMES_MODELS = [ + OPENHERMES_LATEST.name, + OPENHERMES_V2.name, + OPENHERMES_V2_5.name, +] as const + +const OPENHERMES_IMAGE_MODELS = [] as const + +export const OPENHERMES_EMBEDDING_MODELS = [] as const + +const OPENHERMES_AUDIO_MODELS = [] as const + +const OPENHERMES_VIDEO_MODELS = [] as const + +// export type OpenhermesChatModels = (typeof OPENHERMES_MODELS)[number] + +// Manual type map for per-model provider options +export type OpenhermesChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OPENHERMES_LATEST.name]: ChatRequest + [OPENHERMES_V2.name]: ChatRequest + [OPENHERMES_V2_5.name]: ChatRequest +} + +export type OpenhermesModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OPENHERMES_LATEST.name]: typeof OPENHERMES_LATEST.supports.input + [OPENHERMES_V2.name]: typeof OPENHERMES_V2.supports.input + [OPENHERMES_V2_5.name]: typeof OPENHERMES_V2_5.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts new file mode 100644 index 00000000..6affa650 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const PHI3_LATEST = { + name: 'phi3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.2gb', + context: 128_000, +} as const satisfies ModelMeta + +const PHI3_3_8b = { + name: 'phi3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.2gb', + context: 128_000, +} as const satisfies ModelMeta + +const PHI3_14b = { + name: 'phi3:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.9gb', + context: 128_000, +} as const satisfies ModelMeta + +export const PHI3_MODELS = [ + PHI3_LATEST.name, + PHI3_3_8b.name, + PHI3_14b.name, +] as const + +const PHI3_IMAGE_MODELS = [] as const + +export const PHI3_EMBEDDING_MODELS = [] as const + +const PHI3_AUDIO_MODELS = [] as const + +const PHI3_VIDEO_MODELS = [] as const + +// export type Phi3ChatModels = (typeof PHI3_MODELS)[number] + +// Manual type map for per-model provider options +export type Phi3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [PHI3_LATEST.name]: ChatRequest + [PHI3_3_8b.name]: ChatRequest + [PHI3_14b.name]: ChatRequest +} + +export type Phi3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [PHI3_LATEST.name]: typeof PHI3_LATEST.supports.input + [PHI3_3_8b.name]: typeof PHI3_3_8b.supports.input + [PHI3_14b.name]: typeof PHI3_14b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts new file mode 100644 index 00000000..de6d0dad --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts @@ -0,0 +1,60 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const PHI4_LATEST = { + name: 'phi4:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '9.1gb', + context: 16_000, +} as const satisfies ModelMeta + +const PHI4_14b = { + name: 'phi4:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '9.1gb', + context: 16_000, +} as const satisfies ModelMeta + +export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const + +const PHI4_IMAGE_MODELS = [] as const + +export const PHI4_EMBEDDING_MODELS = [] as const + +const PHI4_AUDIO_MODELS = [] as const + +const PHI4_VIDEO_MODELS = [] as const + +// export type Phi4ChatModels = (typeof PHI4_MODELS)[number] + +// Manual type map for per-model provider options +export type Phi4ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [PHI4_LATEST.name]: ChatRequest + [PHI4_14b.name]: ChatRequest +} + +export type Phi4ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [PHI4_LATEST.name]: typeof PHI4_LATEST.supports.input + [PHI4_14b.name]: typeof PHI4_14b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts new file mode 100644 index 00000000..eea05791 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts @@ -0,0 +1,161 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const QWEN_LATEST = { + name: 'qwen:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.3gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_0_5b = { + name: 'qwen:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '395mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_1_8b = { + name: 'qwen:1.8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.1gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_4b = { + name: 'qwen:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.3gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_7b = { + name: 'qwen:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_14b = { + name: 'qwen:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.2gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_32b = { + name: 'qwen:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '18gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_72b = { + name: 'qwen:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '41gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN_110b = { + name: 'qwen:110b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '63gb', + context: 32_000, +} as const satisfies ModelMeta + +export const QWEN_MODELS = [ + QWEN_LATEST.name, + QWEN_0_5b.name, + QWEN_1_8b.name, + QWEN_4b.name, + QWEN_7b.name, + QWEN_14b.name, + QWEN_32b.name, + QWEN_72b.name, + QWEN_110b.name, +] as const + +const QWEN_IMAGE_MODELS = [] as const + +export const QWEN_EMBEDDING_MODELS = [] as const + +const QWEN_AUDIO_MODELS = [] as const + +const QWEN_VIDEO_MODELS = [] as const + +// export type QwenChatModels = (typeof QWEN_MODELS)[number] + +// Manual type map for per-model provider options +export type QwenChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN_LATEST.name]: ChatRequest + [QWEN_0_5b.name]: ChatRequest + [QWEN_1_8b.name]: ChatRequest + [QWEN_4b.name]: ChatRequest + [QWEN_7b.name]: ChatRequest + [QWEN_14b.name]: ChatRequest + [QWEN_32b.name]: ChatRequest + [QWEN_72b.name]: ChatRequest + [QWEN_110b.name]: ChatRequest +} + +export type QwenModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN_LATEST.name]: typeof QWEN_LATEST.supports.input + [QWEN_0_5b.name]: typeof QWEN_0_5b.supports.input + [QWEN_1_8b.name]: typeof QWEN_1_8b.supports.input + [QWEN_4b.name]: typeof QWEN_4b.supports.input + [QWEN_7b.name]: typeof QWEN_7b.supports.input + [QWEN_14b.name]: typeof QWEN_14b.supports.input + [QWEN_32b.name]: typeof QWEN_32b.supports.input + [QWEN_72b.name]: typeof QWEN_72b.supports.input + [QWEN_110b.name]: typeof QWEN_110b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts new file mode 100644 index 00000000..7a7a71e5 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts @@ -0,0 +1,132 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const QWEN2_5_CODER_LATEST = { + name: 'qwen2.5-coder:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_CODER_0_5b = { + name: 'qwen2.5-coder:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '398mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_CODER_1_5b = { + name: 'qwen2.5-coder:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '986mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_CODER_3b = { + name: 'qwen2.5-coder:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.9gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_CODER_7b = { + name: 'qwen2.5-coder:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_CODER_14b = { + name: 'qwen2.5-coder:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '9gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_CODER_32b = { + name: 'qwen2.5-coder:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 32_000, +} as const satisfies ModelMeta + +export const QWEN2_5_CODER_MODELS = [ + QWEN2_5_CODER_LATEST.name, + QWEN2_5_CODER_0_5b.name, + QWEN2_5_CODER_1_5b.name, + QWEN2_5_CODER_7b.name, + QWEN2_5_CODER_14b.name, + QWEN2_5_CODER_32b.name, +] as const + +const QWEN2_5_CODER_IMAGE_MODELS = [] as const + +export const QWEN2_5_CODER_EMBEDDING_MODELS = [] as const + +const QWEN2_5_CODER_AUDIO_MODELS = [] as const + +const QWEN2_5_CODER_VIDEO_MODELS = [] as const + +// export type Qwen2_5CoderChatModels = (typeof QWEN2_5_CODER_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2_5CoderChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_5_CODER_LATEST.name]: ChatRequest + [QWEN2_5_CODER_0_5b.name]: ChatRequest + [QWEN2_5_CODER_1_5b.name]: ChatRequest + [QWEN2_5_CODER_3b.name]: ChatRequest + [QWEN2_5_CODER_7b.name]: ChatRequest + [QWEN2_5_CODER_14b.name]: ChatRequest + [QWEN2_5_CODER_32b.name]: ChatRequest +} + +export type Qwen2_5CoderModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_5_CODER_LATEST.name]: typeof QWEN2_5_CODER_LATEST.supports.input + [QWEN2_5_CODER_0_5b.name]: typeof QWEN2_5_CODER_0_5b.supports.input + [QWEN2_5_CODER_1_5b.name]: typeof QWEN2_5_CODER_1_5b.supports.input + [QWEN2_5_CODER_3b.name]: typeof QWEN2_5_CODER_3b.supports.input + [QWEN2_5_CODER_7b.name]: typeof QWEN2_5_CODER_7b.supports.input + [QWEN2_5_CODER_14b.name]: typeof QWEN2_5_CODER_14b.supports.input + [QWEN2_5_CODER_32b.name]: typeof QWEN2_5_CODER_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts new file mode 100644 index 00000000..e0272058 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts @@ -0,0 +1,133 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const QWEN2_5_LATEST = { + name: 'qwen2.5:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_0_5b = { + name: 'qwen2.5:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '398mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_1_5b = { + name: 'qwen2.5:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '986mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_3b = { + name: 'qwen2.5:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.9gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_7b = { + name: 'qwen2.5:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_32b = { + name: 'qwen2.5:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_5_72b = { + name: 'qwen2.5:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies ModelMeta + +export const QWEN2_5_MODELS = [ + QWEN2_5_LATEST.name, + QWEN2_5_0_5b.name, + QWEN2_5_1_5b.name, + QWEN2_5_3b.name, + QWEN2_5_7b.name, + QWEN2_5_32b.name, + QWEN2_5_72b.name, +] as const + +const QWEN2_5_IMAGE_MODELS = [] as const + +export const QWEN2_5_EMBEDDING_MODELS = [] as const + +const QWEN2_5_AUDIO_MODELS = [] as const + +const QWEN2_5_VIDEO_MODELS = [] as const + +// export type Qwen2_5ChatModels = (typeof QWEN2_5_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2_5ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_5_LATEST.name]: ChatRequest + [QWEN2_5_0_5b.name]: ChatRequest + [QWEN2_5_1_5b.name]: ChatRequest + [QWEN2_5_3b.name]: ChatRequest + [QWEN2_5_7b.name]: ChatRequest + [QWEN2_5_32b.name]: ChatRequest + [QWEN2_5_72b.name]: ChatRequest +} + +export type Qwen2_5ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_5_LATEST.name]: typeof QWEN2_5_LATEST.supports.input + [QWEN2_5_0_5b.name]: typeof QWEN2_5_0_5b.supports.input + [QWEN2_5_1_5b.name]: typeof QWEN2_5_1_5b.supports.input + [QWEN2_5_3b.name]: typeof QWEN2_5_3b.supports.input + [QWEN2_5_7b.name]: typeof QWEN2_5_7b.supports.input + [QWEN2_5_32b.name]: typeof QWEN2_5_32b.supports.input + [QWEN2_5_72b.name]: typeof QWEN2_5_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts new file mode 100644 index 00000000..0fadb2be --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts @@ -0,0 +1,105 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const QWEN2_LATEST = { + name: 'qwen2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.4gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_0_5b = { + name: 'qwen2:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '352mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_1_5b = { + name: 'qwen2:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '935mb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_7b = { + name: 'qwen2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.4gb', + context: 32_000, +} as const satisfies ModelMeta + +const QWEN2_72b = { + name: 'qwen2:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '41gb', + context: 32_000, +} as const satisfies ModelMeta + +export const QWEN2_MODELS = [ + QWEN2_LATEST.name, + QWEN2_0_5b.name, + QWEN2_1_5b.name, + QWEN2_7b.name, + QWEN2_72b.name, +] as const + +const QWEN2_IMAGE_MODELS = [] as const + +export const QWEN2_EMBEDDING_MODELS = [] as const + +const QWEN2_AUDIO_MODELS = [] as const + +const QWEN2_VIDEO_MODELS = [] as const + +// export type Qwen2ChatModels = (typeof QWEN2_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_LATEST.name]: ChatRequest + [QWEN2_0_5b.name]: ChatRequest + [QWEN2_1_5b.name]: ChatRequest + [QWEN2_7b.name]: ChatRequest + [QWEN2_72b.name]: ChatRequest +} + +export type Qwen2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_LATEST.name]: typeof QWEN2_LATEST.supports.input + [QWEN2_0_5b.name]: typeof QWEN2_0_5b.supports.input + [QWEN2_1_5b.name]: typeof QWEN2_1_5b.supports.input + [QWEN2_7b.name]: typeof QWEN2_7b.supports.input + [QWEN2_72b.name]: typeof QWEN2_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts new file mode 100644 index 00000000..1403179d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts @@ -0,0 +1,161 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const QWEN3_LATEST = { + name: 'qwen3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 40_000, +} as const satisfies ModelMeta + +const QWEN3_0_6b = { + name: 'qwen3:0.6b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '523mb', + context: 40_000, +} as const satisfies ModelMeta + +const QWEN3_1_7b = { + name: 'qwen3:1.7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '1.4gb', + context: 40_000, +} as const satisfies ModelMeta + +const QWEN3_4b = { + name: 'qwen3:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '2.5gb', + context: 256_000, +} as const satisfies ModelMeta + +const QWEN3_8b = { + name: 'qwen3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 40_000, +} as const satisfies ModelMeta + +const QWEN3_14b = { + name: 'qwen3:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '9.3gb', + context: 40_000, +} as const satisfies ModelMeta + +const QWEN3_30b = { + name: 'qwen3:30b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '19gb', + context: 256_000, +} as const satisfies ModelMeta + +const QWEN3_32b = { + name: 'qwen3:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies ModelMeta + +const QWEN3_235b = { + name: 'qwen3:235b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '142gb', + context: 256_000, +} as const satisfies ModelMeta + +export const QWEN3_MODELS = [ + QWEN3_LATEST.name, + QWEN3_0_6b.name, + QWEN3_1_7b.name, + QWEN3_4b.name, + QWEN3_8b.name, + QWEN3_14b.name, + QWEN3_30b.name, + QWEN3_32b.name, + QWEN3_235b.name, +] as const + +const QWEN3_IMAGE_MODELS = [] as const + +export const QWEN3_EMBEDDING_MODELS = [] as const + +const QWEN3_AUDIO_MODELS = [] as const + +const QWEN3_VIDEO_MODELS = [] as const + +// export type Qwen3ChatModels = (typeof QWEN3_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN3_LATEST.name]: ChatRequest + [QWEN3_0_6b.name]: ChatRequest + [QWEN3_1_7b.name]: ChatRequest + [QWEN3_4b.name]: ChatRequest + [QWEN3_8b.name]: ChatRequest + [QWEN3_14b.name]: ChatRequest + [QWEN3_30b.name]: ChatRequest + [QWEN3_32b.name]: ChatRequest + [QWEN3_235b.name]: ChatRequest +} + +export type Qwen3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN3_LATEST.name]: typeof QWEN3_LATEST.supports.input + [QWEN3_0_6b.name]: typeof QWEN3_0_6b.supports.input + [QWEN3_1_7b.name]: typeof QWEN3_1_7b.supports.input + [QWEN3_4b.name]: typeof QWEN3_4b.supports.input + [QWEN3_8b.name]: typeof QWEN3_8b.supports.input + [QWEN3_14b.name]: typeof QWEN3_14b.supports.input + [QWEN3_30b.name]: typeof QWEN3_30b.supports.input + [QWEN3_32b.name]: typeof QWEN3_32b.supports.input + [QWEN3_235b.name]: typeof QWEN3_235b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts new file mode 100644 index 00000000..8a7f1094 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts @@ -0,0 +1,60 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const QWQ_LATEST = { + name: 'qwq:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies ModelMeta + +const QWQ_32b = { + name: 'qwq:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies ModelMeta + +export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const + +const QWQ_IMAGE_MODELS = [] as const + +export const QWQ_EMBEDDING_MODELS = [] as const + +const QWQ_AUDIO_MODELS = [] as const + +const QWQ_VIDEO_MODELS = [] as const + +// export type QwqChatModels = (typeof QWQ_MODELS)[number] + +// Manual type map for per-model provider options +export type QwqChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWQ_LATEST.name]: ChatRequest + [QWQ_32b.name]: ChatRequest +} + +export type QwqModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWQ_LATEST.name]: typeof QWQ_LATEST.supports.input + [QWQ_32b.name]: typeof QWQ_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts new file mode 100644 index 00000000..4d413fa1 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts @@ -0,0 +1,90 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const SAILOR2_LATEST = { + name: 'sailor2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.2gb', + context: 32_000, +} as const satisfies ModelMeta + +const SAILOR2_1b = { + name: 'sailor2:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.1gb', + context: 32_000, +} as const satisfies ModelMeta + +const SAILOR2_8b = { + name: 'sailor2:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.2gb', + context: 32_000, +} as const satisfies ModelMeta + +const SAILOR2_20b = { + name: 'sailor2:20b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '12gb', + context: 32_000, +} as const satisfies ModelMeta + +export const SAILOR2_MODELS = [ + SAILOR2_LATEST.name, + SAILOR2_8b.name, + SAILOR2_20b.name, +] as const + +const SAILOR2_IMAGE_MODELS = [] as const + +export const SAILOR2_EMBEDDING_MODELS = [] as const + +const SAILOR2_AUDIO_MODELS = [] as const + +const SAILOR2_VIDEO_MODELS = [] as const + +// export type Sailor2ChatModels = (typeof SAILOR2_MODELS)[number] + +// Manual type map for per-model provider options +export type Sailor2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SAILOR2_LATEST.name]: ChatRequest + [SAILOR2_1b.name]: ChatRequest + [SAILOR2_8b.name]: ChatRequest + [SAILOR2_20b.name]: ChatRequest +} + +export type Sailor2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SAILOR2_LATEST.name]: typeof SAILOR2_LATEST.supports.input + [SAILOR2_1b.name]: typeof SAILOR2_1b.supports.input + [SAILOR2_8b.name]: typeof SAILOR2_8b.supports.input + [SAILOR2_20b.name]: typeof SAILOR2_20b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts new file mode 100644 index 00000000..def7958d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const SHIELDGEMMA_LATEST = { + name: 'shieldgemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies ModelMeta + +const SHIELDGEMMA_2b = { + name: 'shieldgemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const SHIELDGEMMA_9b = { + name: 'shieldgemma:9b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies ModelMeta + +const SHIELDGEMMA_27b = { + name: 'shieldgemma:27b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '17gb', + context: 8_000, +} as const satisfies ModelMeta + +export const SHIELDGEMMA_MODELS = [ + SHIELDGEMMA_LATEST.name, + SHIELDGEMMA_2b.name, + SHIELDGEMMA_9b.name, + SHIELDGEMMA_27b.name, +] as const + +const SHIELDGEMMA_IMAGE_MODELS = [] as const + +export const SHIELDGEMMA_EMBEDDING_MODELS = [] as const + +const SHIELDGEMMA_AUDIO_MODELS = [] as const + +const SHIELDGEMMA_VIDEO_MODELS = [] as const + +// export type ShieldgemmaChatModels = (typeof SHIELDGEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type ShieldgemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SHIELDGEMMA_LATEST.name]: ChatRequest + [SHIELDGEMMA_2b.name]: ChatRequest + [SHIELDGEMMA_9b.name]: ChatRequest + [SHIELDGEMMA_27b.name]: ChatRequest +} + +export type ShieldgemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SHIELDGEMMA_LATEST.name]: typeof SHIELDGEMMA_LATEST.supports.input + [SHIELDGEMMA_2b.name]: typeof SHIELDGEMMA_2b.supports.input + [SHIELDGEMMA_9b.name]: typeof SHIELDGEMMA_9b.supports.input + [SHIELDGEMMA_27b.name]: typeof SHIELDGEMMA_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts new file mode 100644 index 00000000..5a25f511 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const SMALLTINKER_LATEST = { + name: 'smalltinker:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.6gb', + context: 32_000, +} as const satisfies ModelMeta + +const SMALLTINKER_3b = { + name: 'smalltinker:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.6gb', + context: 32_000, +} as const satisfies ModelMeta + +export const SMALLTINKER_MODELS = [ + SMALLTINKER_LATEST.name, + SMALLTINKER_3b.name, +] as const + +const SMALLTINKER_IMAGE_MODELS = [] as const + +export const SMALLTINKER_EMBEDDING_MODELS = [] as const + +const SMALLTINKER_AUDIO_MODELS = [] as const + +const SMALLTINKER_VIDEO_MODELS = [] as const + +// export type SmalltinkerChatModels = (typeof SMALLTINKER_MODELS)[number] + +// Manual type map for per-model provider options +export type SmalltinkerChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SMALLTINKER_LATEST.name]: ChatRequest + [SMALLTINKER_3b.name]: ChatRequest +} + +export type SmalltinkerModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SMALLTINKER_LATEST.name]: typeof SMALLTINKER_LATEST.supports.input + [SMALLTINKER_3b.name]: typeof SMALLTINKER_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts new file mode 100644 index 00000000..f90782f3 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts @@ -0,0 +1,91 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const SMOLLM_LATEST = { + name: 'smollm:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '991mb', + context: 2_000, +} as const satisfies ModelMeta + +const SMOLLM_135m = { + name: 'smollm:135m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '92mb', + context: 2_000, +} as const satisfies ModelMeta + +const SMOLLM_360m = { + name: 'smollm:360m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '229mb', + context: 2_000, +} as const satisfies ModelMeta + +const SMOLLM_1_7b = { + name: 'smollm:1.7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '991mb', + context: 2_000, +} as const satisfies ModelMeta + +export const SMOLLM_MODELS = [ + SMOLLM_LATEST.name, + SMOLLM_135m.name, + SMOLLM_360m.name, + SMOLLM_1_7b.name, +] as const + +const SMOLLM_IMAGE_MODELS = [] as const + +export const SMOLLM_EMBEDDING_MODELS = [] as const + +const SMOLLM_AUDIO_MODELS = [] as const + +const SMOLLM_VIDEO_MODELS = [] as const + +// export type SmollmChatModels = (typeof SMOLLM_MODELS)[number] + +// Manual type map for per-model provider options +export type SmollmChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SMOLLM_LATEST.name]: ChatRequest + [SMOLLM_135m.name]: ChatRequest + [SMOLLM_360m.name]: ChatRequest + [SMOLLM_1_7b.name]: ChatRequest +} + +export type SmollmModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SMOLLM_LATEST.name]: typeof SMOLLM_LATEST.supports.input + [SMOLLM_135m.name]: typeof SMOLLM_135m.supports.input + [SMOLLM_360m.name]: typeof SMOLLM_360m.supports.input + [SMOLLM_1_7b.name]: typeof SMOLLM_1_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts new file mode 100644 index 00000000..4ad0529f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts @@ -0,0 +1,63 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const TINNYLLAMA_LATEST = { + name: 'tinnyllama:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '638mb', + context: 2_000, +} as const satisfies ModelMeta + +const TINNYLLAMA_1_1b = { + name: 'tinnyllama:1.1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '638mb', + context: 2_000, +} as const satisfies ModelMeta + +export const TINNYLLAMA_MODELS = [ + TINNYLLAMA_LATEST.name, + TINNYLLAMA_1_1b.name, +] as const + +const TINNYLLAMA_IMAGE_MODELS = [] as const + +export const TINNYLLAMA_EMBEDDING_MODELS = [] as const + +const TINNYLLAMA_AUDIO_MODELS = [] as const + +const TINNYLLAMA_VIDEO_MODELS = [] as const + +// export type TinnyllamaChatModels = (typeof TINNYLLAMA_MODELS)[number] + +// Manual type map for per-model provider options +export type TinnyllamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [TINNYLLAMA_LATEST.name]: ChatRequest + [TINNYLLAMA_1_1b.name]: ChatRequest +} + +export type TinnyllamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [TINNYLLAMA_LATEST.name]: typeof TINNYLLAMA_LATEST.supports.input + [TINNYLLAMA_1_1b.name]: typeof TINNYLLAMA_1_1b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts new file mode 100644 index 00000000..b2a3c275 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts @@ -0,0 +1,77 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const TULU3_LATEST = { + name: 'tulu3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies ModelMeta + +const TULU3_8b = { + name: 'tulu3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies ModelMeta + +const TULU3_70b = { + name: 'tulu3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '43gb', + context: 128_000, +} as const satisfies ModelMeta + +export const TULU3_MODELS = [ + TULU3_LATEST.name, + TULU3_8b.name, + TULU3_70b.name, +] as const + +const TULU3_IMAGE_MODELS = [] as const + +export const TULU3_EMBEDDING_MODELS = [] as const + +const TULU3_AUDIO_MODELS = [] as const + +const TULU3_VIDEO_MODELS = [] as const + +// export type Tulu3ChatModels = (typeof TULU3_MODELS)[number] + +// Manual type map for per-model provider options +export type Tulu3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [TULU3_LATEST.name]: ChatRequest + [TULU3_8b.name]: ChatRequest + [TULU3_70b.name]: ChatRequest +} + +export type Tulu3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [TULU3_LATEST.name]: typeof TULU3_LATEST.supports.input + [TULU3_8b.name]: typeof TULU3_8b.supports.input + [TULU3_70b.name]: typeof TULU3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts new file mode 100644 index 00000000..3ff0d5fd --- /dev/null +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -0,0 +1,273 @@ +// constants +import { ATHENE_MODELS } from './meta/model-meta-athene' +import { AYA_MODELS } from './meta/model-meta-aya' +import { CODEGEMMA_MODELS } from './meta/model-meta-codegemma' +import { CODELLAMA_MODELS } from './meta/model-meta-codellama' +import { COMMAND_R_MODELS } from './meta/model-meta-command-r' +import { COMMAND_R_PLUS_MODELS } from './meta/model-meta-command-r-plus' +import { COMMAND_R_7b_MODELS } from './meta/model-meta-command-r7b' +import { DEEPSEEK_CODER_V2_MODELS } from './meta/model-meta-deepseek-coder-v2' +import { DEEPSEEK_R1_MODELS } from './meta/model-meta-deepseek-r1' +import { DEEPSEEK_V3_1_MODELS } from './meta/model-meta-deepseek-v3.1' +import { DEVSTRAL_MODELS } from './meta/model-meta-devstral' +import { DOLPHIN3_MODELS } from './meta/model-meta-dolphin3' +import { EXAONE3_5MODELS } from './meta/model-meta-exaone3.5' +import { FALCON2_MODELS } from './meta/model-meta-falcon2' +import { FALCON3_MODELS } from './meta/model-meta-falcon3' +import { FIREFUNCTION_V2_MODELS } from './meta/model-meta-firefunction-v2' +import { GEMMA_MODELS } from './meta/model-meta-gemma' +import { GEMMA2_MODELS } from './meta/model-meta-gemma2' +import { GEMMA3_MODELS } from './meta/model-meta-gemma3' +import { GRANITE3_DENSE_MODELS } from './meta/model-meta-granite3-dense' +import { GRANITE3_GUARDIAN_MODELS } from './meta/model-meta-granite3-guardian' +import { GRANITE3_MOE_MODELS } from './meta/model-meta-granite3-moe' +import { GRANITE3_1_DENSE_MODELS } from './meta/model-meta-granite3.1-dense' +import { GRANITE3_1_MOE_MODELS } from './meta/model-meta-granite3.1-moe' +import { LLAMA_GUARD3_MODELS } from './meta/model-meta-llama-guard3' +import { LLAMA2_MODELS } from './meta/model-meta-llama2' +import { LLAMA3_MODELS } from './meta/model-meta-llama3' +import { LLAMA3_CHATQA_MODELS } from './meta/model-meta-llama3-chatqa' +import { LLAMA3_GRADIENT_MODELS } from './meta/model-meta-llama3-gradient' +import { LLAMA3_1_MODELS } from './meta/model-meta-llama3.1' +import { LLAMA3_2_MODELS } from './meta/model-meta-llama3.2' +import { LLAMA3_2_VISION_MODELS } from './meta/model-meta-llama3.2-vision' +import { LLAMA3_3_MODELS } from './meta/model-meta-llama3.3' +import { LLAMA4_MODELS } from './meta/model-meta-llama4' +import { LLAVA_MODELS } from './meta/model-meta-llava' +import { LLAVA_LLAMA3_MODELS } from './meta/model-meta-llava-llama3' +import { LLAVA_PHI3_MODELS } from './meta/model-meta-llava-phi3' +import { MARCO_O1_MODELS } from './meta/model-meta-marco-o1' +import { MISTRAL_MODELS } from './meta/model-meta-mistral' +import { MISTRAL_LARGE_MODELS } from './meta/model-meta-mistral-large' +import { MISTRAL_NEMO_MODELS } from './meta/model-meta-mistral-nemo' +import { MISTRAL_SMALL_MODELS } from './meta/model-meta-mistral-small' +import { MIXTRAL_MODELS } from './meta/model-meta-mixtral' +import { MOONDREAM_MODELS } from './meta/model-meta-moondream' +import { NEMOTRON_MODELS } from './meta/model-meta-nemotron' +import { NEMOTRON_MINI_MODELS } from './meta/model-meta-nemotron-mini' +import { OLMO2_MODELS } from './meta/model-meta-olmo2' +import { OPENCODER_MODELS } from './meta/model-meta-opencoder' +import { OPENHERMES_MODELS } from './meta/model-meta-openhermes' +import { PHI3_MODELS } from './meta/model-meta-phi3' +import { PHI4_MODELS } from './meta/model-meta-phi4' +import { QWEN_MODELS } from './meta/model-meta-qwen' +import { QWEN2_MODELS } from './meta/model-meta-qwen2' +import { QWEN2_5_MODELS } from './meta/model-meta-qwen2.5' +import { QWEN2_5_CODER_MODELS } from './meta/model-meta-qwen2.5-coder' +import { QWEN3_MODELS } from './meta/model-meta-qwen3' +import { QWQ_MODELS } from './meta/model-meta-qwq' +import { SAILOR2_MODELS } from './meta/model-meta-sailor2' +import { SHIELDGEMMA_MODELS } from './meta/model-meta-shieldgemma' +import { SMALLTINKER_MODELS } from './meta/model-meta-smalltinker' +import { SMOLLM_MODELS } from './meta/model-meta-smollm' +import { TINNYLLAMA_MODELS } from './meta/model-meta-tinyllama' +import { TULU3_MODELS } from './meta/model-meta-tulu3' + +// types +import type { AtheneModelInputModalitiesByName } from './meta/model-meta-athene' +import type { AyaModelInputModalitiesByName } from './meta/model-meta-aya' +import type { CodegemmaModelInputModalitiesByName } from './meta/model-meta-codegemma' +import type { CodellamaModelInputModalitiesByName } from './meta/model-meta-codellama' +import type { CommandRModelInputModalitiesByName } from './meta/model-meta-command-r' +import type { CommandRPlusModelInputModalitiesByName } from './meta/model-meta-command-r-plus' +import type { CommandR7bModelInputModalitiesByName } from './meta/model-meta-command-r7b' +import type { DeepseekCoderV2ModelInputModalitiesByName } from './meta/model-meta-deepseek-coder-v2' +import type { DeepseekR1ModelInputModalitiesByName } from './meta/model-meta-deepseek-r1' +import type { Deepseekv3_1ModelInputModalitiesByName } from './meta/model-meta-deepseek-v3.1' +import type { DevstralModelInputModalitiesByName } from './meta/model-meta-devstral' +import type { Dolphin3ModelInputModalitiesByName } from './meta/model-meta-dolphin3' +import type { Exaone3_5ModelInputModalitiesByName } from './meta/model-meta-exaone3.5' +import type { Falcon2ModelInputModalitiesByName } from './meta/model-meta-falcon2' +import type { Falcon3ModelInputModalitiesByName } from './meta/model-meta-falcon3' +import type { Firefunction_V2ModelInputModalitiesByName } from './meta/model-meta-firefunction-v2' +import type { GemmaModelInputModalitiesByName } from './meta/model-meta-gemma' +import type { Gemma2ModelInputModalitiesByName } from './meta/model-meta-gemma2' +import type { Gemma3ModelInputModalitiesByName } from './meta/model-meta-gemma3' +import type { Granite3DenseModelInputModalitiesByName } from './meta/model-meta-granite3-dense' +import type { Granite3GuardianModelInputModalitiesByName } from './meta/model-meta-granite3-guardian' +import type { Granite3MoeModelInputModalitiesByName } from './meta/model-meta-granite3-moe' +import type { Granite3_1DenseModelInputModalitiesByName } from './meta/model-meta-granite3.1-dense' +import type { Granite3_1MoeModelInputModalitiesByName } from './meta/model-meta-granite3.1-moe' +import type { LlamaGuard3ModelInputModalitiesByName } from './meta/model-meta-llama-guard3' +import type { Llama2ModelInputModalitiesByName } from './meta/model-meta-llama2' +import type { Llama3ModelInputModalitiesByName } from './meta/model-meta-llama3' +import type { Llama3ChatQaModelInputModalitiesByName } from './meta/model-meta-llama3-chatqa' +import type { Llama3GradientModelInputModalitiesByName } from './meta/model-meta-llama3-gradient' +import type { Llama3_1ModelInputModalitiesByName } from './meta/model-meta-llama3.1' +import type { Llama3_2ModelInputModalitiesByName } from './meta/model-meta-llama3.2' +import type { Llama3_2VisionModelInputModalitiesByName } from './meta/model-meta-llama3.2-vision' +import type { Llama3_3ModelInputModalitiesByName } from './meta/model-meta-llama3.3' +import type { Llama3_4ModelInputModalitiesByName } from './meta/model-meta-llama4' +import type { llavaModelInputModalitiesByName } from './meta/model-meta-llava' +import type { LlavaLlamaModelInputModalitiesByName } from './meta/model-meta-llava-llama3' +import type { LlavaPhi3ModelInputModalitiesByName } from './meta/model-meta-llava-phi3' +import type { MarcoO1ModelInputModalitiesByName } from './meta/model-meta-marco-o1' +import type { MistralModelInputModalitiesByName } from './meta/model-meta-mistral' +import type { MistralLargeModelInputModalitiesByName } from './meta/model-meta-mistral-large' +import type { MistralNemoModelInputModalitiesByName } from './meta/model-meta-mistral-nemo' +import type { MistralSmallModelInputModalitiesByName } from './meta/model-meta-mistral-small' +import type { MixtralModelInputModalitiesByName } from './meta/model-meta-mixtral' +import type { MoondreamModelInputModalitiesByName } from './meta/model-meta-moondream' +import type { NemotronModelInputModalitiesByName } from './meta/model-meta-nemotron' +import type { NemotronMiniModelInputModalitiesByName } from './meta/model-meta-nemotron-mini' +import type { Olmo2ModelInputModalitiesByName } from './meta/model-meta-olmo2' +import type { OpencoderModelInputModalitiesByName } from './meta/model-meta-opencoder' +import type { OpenhermesModelInputModalitiesByName } from './meta/model-meta-openhermes' +import type { Phi3ModelInputModalitiesByName } from './meta/model-meta-phi3' +import type { Phi4ModelInputModalitiesByName } from './meta/model-meta-phi4' +import type { QwenModelInputModalitiesByName } from './meta/model-meta-qwen' +import type { Qwen2ModelInputModalitiesByName } from './meta/model-meta-qwen2' +import type { Qwen2_5ModelInputModalitiesByName } from './meta/model-meta-qwen2.5' +import type { Qwen2_5CoderModelInputModalitiesByName } from './meta/model-meta-qwen2.5-coder' +import type { Qwen3ModelInputModalitiesByName } from './meta/model-meta-qwen3' +import type { QwqModelInputModalitiesByName } from './meta/model-meta-qwq' +import type { Sailor2ModelInputModalitiesByName } from './meta/model-meta-sailor2' +import type { ShieldgemmaModelInputModalitiesByName } from './meta/model-meta-shieldgemma' +import type { SmalltinkerModelInputModalitiesByName } from './meta/model-meta-smalltinker' +import type { SmollmModelInputModalitiesByName } from './meta/model-meta-smollm' +import type { TinnyllamaModelInputModalitiesByName } from './meta/model-meta-tinyllama' +import type { Tulu3ModelInputModalitiesByName } from './meta/model-meta-tulu3' + +export interface LlamaModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +export const OLLAMA_MODELS = [ + ...ATHENE_MODELS, + ...AYA_MODELS, + ...CODEGEMMA_MODELS, + ...CODELLAMA_MODELS, + ...COMMAND_R_PLUS_MODELS, + ...COMMAND_R_MODELS, + ...COMMAND_R_7b_MODELS, + ...DEEPSEEK_CODER_V2_MODELS, + ...DEEPSEEK_R1_MODELS, + ...DEEPSEEK_V3_1_MODELS, + ...DEVSTRAL_MODELS, + ...DOLPHIN3_MODELS, + ...EXAONE3_5MODELS, + ...FALCON2_MODELS, + ...FALCON3_MODELS, + ...FIREFUNCTION_V2_MODELS, + ...GEMMA_MODELS, + ...GEMMA2_MODELS, + ...GEMMA3_MODELS, + ...GRANITE3_DENSE_MODELS, + ...GRANITE3_GUARDIAN_MODELS, + ...GRANITE3_MOE_MODELS, + ...GRANITE3_1_DENSE_MODELS, + ...GRANITE3_1_MOE_MODELS, + ...LLAMA_GUARD3_MODELS, + ...LLAMA2_MODELS, + ...LLAMA3_CHATQA_MODELS, + ...LLAMA3_GRADIENT_MODELS, + ...LLAMA3_1_MODELS, + ...LLAMA3_2_MODELS, + ...LLAMA3_2_VISION_MODELS, + ...LLAMA3_2_MODELS, + ...LLAMA3_3_MODELS, + ...LLAMA3_MODELS, + ...LLAMA4_MODELS, + ...LLAVA_LLAMA3_MODELS, + ...LLAVA_PHI3_MODELS, + ...LLAVA_MODELS, + ...MARCO_O1_MODELS, + ...MISTRAL_LARGE_MODELS, + ...MISTRAL_NEMO_MODELS, + ...MISTRAL_SMALL_MODELS, + ...MISTRAL_MODELS, + ...MIXTRAL_MODELS, + ...MOONDREAM_MODELS, + ...NEMOTRON_MINI_MODELS, + ...NEMOTRON_MODELS, + ...OLMO2_MODELS, + ...OPENCODER_MODELS, + ...OPENHERMES_MODELS, + ...PHI3_MODELS, + ...PHI4_MODELS, + ...QWEN_MODELS, + ...QWEN2_5_CODER_MODELS, + ...QWEN2_5_MODELS, + ...QWEN2_MODELS, + ...QWEN3_MODELS, + ...QWQ_MODELS, + ...SAILOR2_MODELS, + ...SHIELDGEMMA_MODELS, + ...SMALLTINKER_MODELS, + ...SMOLLM_MODELS, + ...TINNYLLAMA_MODELS, + ...TULU3_MODELS, +] as const + +export type OllamaModelInputModalitiesByName = + AtheneModelInputModalitiesByName & + AyaModelInputModalitiesByName & + CodegemmaModelInputModalitiesByName & + CodellamaModelInputModalitiesByName & + CommandRPlusModelInputModalitiesByName & + CommandRModelInputModalitiesByName & + CommandR7bModelInputModalitiesByName & + DeepseekCoderV2ModelInputModalitiesByName & + DeepseekR1ModelInputModalitiesByName & + Deepseekv3_1ModelInputModalitiesByName & + DevstralModelInputModalitiesByName & + Dolphin3ModelInputModalitiesByName & + Exaone3_5ModelInputModalitiesByName & + Falcon2ModelInputModalitiesByName & + Falcon3ModelInputModalitiesByName & + Firefunction_V2ModelInputModalitiesByName & + GemmaModelInputModalitiesByName & + Gemma2ModelInputModalitiesByName & + Gemma3ModelInputModalitiesByName & + Granite3DenseModelInputModalitiesByName & + Granite3GuardianModelInputModalitiesByName & + Granite3MoeModelInputModalitiesByName & + Granite3_1DenseModelInputModalitiesByName & + Granite3_1MoeModelInputModalitiesByName & + LlamaGuard3ModelInputModalitiesByName & + Llama2ModelInputModalitiesByName & + Llama3ChatQaModelInputModalitiesByName & + Llama3GradientModelInputModalitiesByName & + Llama3_1ModelInputModalitiesByName & + Llama3_2VisionModelInputModalitiesByName & + Llama3_2ModelInputModalitiesByName & + Llama3_3ModelInputModalitiesByName & + Llama3ModelInputModalitiesByName & + Llama3_4ModelInputModalitiesByName & + LlavaLlamaModelInputModalitiesByName & + LlavaPhi3ModelInputModalitiesByName & + llavaModelInputModalitiesByName & + MarcoO1ModelInputModalitiesByName & + MistralLargeModelInputModalitiesByName & + MistralNemoModelInputModalitiesByName & + MistralSmallModelInputModalitiesByName & + MistralModelInputModalitiesByName & + MixtralModelInputModalitiesByName & + MoondreamModelInputModalitiesByName & + NemotronMiniModelInputModalitiesByName & + NemotronModelInputModalitiesByName & + Olmo2ModelInputModalitiesByName & + OpencoderModelInputModalitiesByName & + OpenhermesModelInputModalitiesByName & + Phi3ModelInputModalitiesByName & + Phi4ModelInputModalitiesByName & + QwenModelInputModalitiesByName & + Qwen2_5CoderModelInputModalitiesByName & + Qwen2_5ModelInputModalitiesByName & + Qwen2ModelInputModalitiesByName & + Qwen3ModelInputModalitiesByName & + QwqModelInputModalitiesByName & + Sailor2ModelInputModalitiesByName & + ShieldgemmaModelInputModalitiesByName & + SmalltinkerModelInputModalitiesByName & + SmollmModelInputModalitiesByName & + TinnyllamaModelInputModalitiesByName & + Tulu3ModelInputModalitiesByName diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts index fc6080c0..0c457861 100644 --- a/packages/typescript/ai-ollama/src/ollama-adapter.ts +++ b/packages/typescript/ai-ollama/src/ollama-adapter.ts @@ -1,5 +1,9 @@ import { Ollama as OllamaSDK } from 'ollama' import { BaseAdapter, convertZodToJsonSchema } from '@tanstack/ai' + +import { OLLAMA_MODELS } from './model-meta' + +import type { OllamaModelInputModalitiesByName } from './model-meta' import type { AbortableAsyncIterator, ChatRequest, @@ -23,60 +27,8 @@ export interface OllamaConfig { host?: string } -const OLLAMA_MODELS = [ - 'llama2', - 'llama3', - 'codellama', - 'mistral', - 'mixtral', - 'phi', - 'neural-chat', - 'starling-lm', - 'orca-mini', - 'vicuna', - 'nous-hermes', - 'nomic-embed-text', - 'gpt-oss:20b', -] as const - const OLLAMA_EMBEDDING_MODELS = [] as const -/** - * Type-only map from Ollama model name to its supported input modalities. - * Ollama models have varying multimodal capabilities: - * - Vision models (llava, bakllava, etc.) support text + image - * - Most text models support text only - * - * Note: This is a placeholder - Ollama models are dynamically loaded, - * so we provide a base type that can be extended. - * - * @see https://github.com/ollama/ollama/blob/main/docs/api.md - */ -export type OllamaModelInputModalitiesByName = { - // Vision-capable models (text + image) - llava: readonly ['text', 'image'] - bakllava: readonly ['text', 'image'] - 'llava-llama3': readonly ['text', 'image'] - 'llava-phi3': readonly ['text', 'image'] - moondream: readonly ['text', 'image'] - minicpm: readonly ['text', 'image'] - - // Text-only models - llama2: readonly ['text'] - llama3: readonly ['text'] - codellama: readonly ['text'] - mistral: readonly ['text'] - mixtral: readonly ['text'] - phi: readonly ['text'] - 'neural-chat': readonly ['text'] - 'starling-lm': readonly ['text'] - 'orca-mini': readonly ['text'] - vicuna: readonly ['text'] - 'nous-hermes': readonly ['text'] - 'nomic-embed-text': readonly ['text'] - 'gpt-oss:20b': readonly ['text'] -} - /** * Type-only map from Ollama model name to its provider-specific options. * Ollama models share the same options interface. diff --git a/packages/typescript/ai-ollama/tsconfig.json b/packages/typescript/ai-ollama/tsconfig.json index ea11c109..e9686b6c 100644 --- a/packages/typescript/ai-ollama/tsconfig.json +++ b/packages/typescript/ai-ollama/tsconfig.json @@ -4,6 +4,6 @@ "outDir": "dist", "rootDir": "src" }, - "include": ["src/**/*.ts", "src/**/*.tsx"], + "include": ["src/**/*.ts", "src/**/*.tsx", "src/meta/model-meta-devstralts"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } From 7ada64c5254bd436c82b6ae962d8eb748dee6f64 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 9 Dec 2025 10:33:54 +0100 Subject: [PATCH 2/6] fix: build stuff and type cleanup --- .../ai-ollama/src/meta/model-meta-athene.ts | 8 +-- .../ai-ollama/src/meta/model-meta-aya.ts | 8 +-- .../src/meta/model-meta-codegemma.ts | 8 +-- .../src/meta/model-meta-codellama.ts | 8 +-- .../src/meta/model-meta-command-r-plus.ts | 8 +-- .../src/meta/model-meta-command-r.ts | 8 +-- .../src/meta/model-meta-command-r7b.ts | 8 +-- .../src/meta/model-meta-deepseek-coder-v2.ts | 8 +-- .../src/meta/model-meta-deepseek-ocr.ts | 64 +++++++++++++++++++ .../src/meta/model-meta-deepseek-r1.ts | 8 +-- .../src/meta/model-meta-deepseek-v3.1.ts | 8 +-- .../ai-ollama/src/meta/model-meta-devstral.ts | 8 +-- .../ai-ollama/src/meta/model-meta-dolphin3.ts | 8 +-- .../src/meta/model-meta-exaone3.5.ts | 29 +++------ .../ai-ollama/src/meta/model-meta-falcon2.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-falcon3.ts | 31 +++------ .../src/meta/model-meta-firefunction-v2.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-gemma.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-gemma2.ts | 29 +++------ .../ai-ollama/src/meta/model-meta-gemma3.ts | 33 ++++------ .../src/meta/model-meta-granite3-dense.ts | 27 +++----- .../src/meta/model-meta-granite3-guardian.ts | 27 +++----- .../src/meta/model-meta-granite3-moe.ts | 27 +++----- .../src/meta/model-meta-granite3.1-dense.ts | 27 +++----- .../src/meta/model-meta-granite3.1-moe.ts | 27 +++----- .../src/meta/model-meta-llama-guard3.ts | 27 +++----- .../ai-ollama/src/meta/model-meta-llama2.ts | 29 +++------ .../src/meta/model-meta-llama3-chatqa.ts | 27 +++----- .../src/meta/model-meta-llama3-gradient.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-llama3.1.ts | 29 +++------ .../src/meta/model-meta-llama3.2-vision.ts | 29 +++------ .../ai-ollama/src/meta/model-meta-llama3.2.ts | 27 +++----- .../ai-ollama/src/meta/model-meta-llama3.3.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-llama3.ts | 27 +++----- .../ai-ollama/src/meta/model-meta-llama4.ts | 27 +++----- .../src/meta/model-meta-llava-llama3.ts | 25 ++------ .../src/meta/model-meta-llava-phi3.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-llava.ts | 29 +++------ .../ai-ollama/src/meta/model-meta-marco-o1.ts | 25 ++------ .../src/meta/model-meta-mistral-large.ts | 25 ++------ .../src/meta/model-meta-mistral-nemo.ts | 25 ++------ .../src/meta/model-meta-mistral-small.ts | 27 +++----- .../ai-ollama/src/meta/model-meta-mistral.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-mixtral.ts | 27 +++----- .../src/meta/model-meta-moondream.ts | 25 ++------ .../src/meta/model-meta-nemotron-mini.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-nemotron.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-olmo2.ts | 27 +++----- .../src/meta/model-meta-opencoder.ts | 27 +++----- .../src/meta/model-meta-openhermes.ts | 27 +++----- .../ai-ollama/src/meta/model-meta-phi3.ts | 27 +++----- .../ai-ollama/src/meta/model-meta-phi4.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-qwen.ts | 39 ++++------- .../src/meta/model-meta-qwen2.5-coder.ts | 35 ++++------ .../ai-ollama/src/meta/model-meta-qwen2.5.ts | 35 ++++------ .../ai-ollama/src/meta/model-meta-qwen2.ts | 31 +++------ .../ai-ollama/src/meta/model-meta-qwen3.ts | 39 ++++------- .../ai-ollama/src/meta/model-meta-qwq.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-sailor2.ts | 29 +++------ .../src/meta/model-meta-shieldgemma.ts | 29 +++------ .../src/meta/model-meta-smalltinker.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-smollm.ts | 29 +++------ .../src/meta/model-meta-tinyllama.ts | 25 ++------ .../ai-ollama/src/meta/model-meta-tulu3.ts | 27 +++----- .../ai-ollama/src/meta/models-meta.ts | 11 ++++ .../typescript/ai-ollama/src/model-meta.ts | 12 ---- packages/typescript/ai-ollama/tsconfig.json | 2 +- 67 files changed, 550 insertions(+), 1048 deletions(-) create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts create mode 100644 packages/typescript/ai-ollama/src/meta/models-meta.ts diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts index 9442873c..fe46b033 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -39,13 +39,13 @@ export const ATHENE_MODELS = [ ATHENE_V2_72b.name, ] as const -const ATHENE_IMAGE_MODELS = [] as const +// const ATHENE_IMAGE_MODELS = [] as const -export const ATHENE_EMBEDDING_MODELS = [] as const +// export const ATHENE_EMBEDDING_MODELS = [] as const -const ATHENE_AUDIO_MODELS = [] as const +// const ATHENE_AUDIO_MODELS = [] as const -const ATHENE_VIDEO_MODELS = [] as const +// const ATHENE_VIDEO_MODELS = [] as const // export type AtheneChatModels = (typeof ATHENE_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts index 9b58bdd9..16bd16b0 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -47,13 +47,13 @@ const AYA_35b = { export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const -const AYA_IMAGE_MODELS = [] as const +// const AYA_IMAGE_MODELS = [] as const -export const AYA_EMBEDDING_MODELS = [] as const +// export const AYA_EMBEDDING_MODELS = [] as const -const AYA_AUDIO_MODELS = [] as const +// const AYA_AUDIO_MODELS = [] as const -const AYA_VIDEO_MODELS = [] as const +// const AYA_VIDEO_MODELS = [] as const // export type AyaChatModels = (typeof AYA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts index b75d5b88..93937eda 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -51,13 +51,13 @@ export const CODEGEMMA_MODELS = [ CODEGEMMA_35b.name, ] as const -const CODEGEMMA_IMAGE_MODELS = [] as const +// const CODEGEMMA_IMAGE_MODELS = [] as const -export const CODEGEMMA_EMBEDDING_MODELS = [] as const +// export const CODEGEMMA_EMBEDDING_MODELS = [] as const -const CODEGEMMA_AUDIO_MODELS = [] as const +// const CODEGEMMA_AUDIO_MODELS = [] as const -const CODEGEMMA_VIDEO_MODELS = [] as const +// const CODEGEMMA_VIDEO_MODELS = [] as const // export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts index 22badae9..2a0b3b54 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -75,13 +75,13 @@ export const CODELLAMA_MODELS = [ CODELLAMA_70b.name, ] as const -const CODELLAMA_IMAGE_MODELS = [] as const +// const CODELLAMA_IMAGE_MODELS = [] as const -export const CODELLAMA_EMBEDDING_MODELS = [] as const +// export const CODELLAMA_EMBEDDING_MODELS = [] as const -const CODELLAMA_AUDIO_MODELS = [] as const +// const CODELLAMA_AUDIO_MODELS = [] as const -const CODELLAMA_VIDEO_MODELS = [] as const +// const CODELLAMA_VIDEO_MODELS = [] as const // export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts index 364e7f99..941a7e9f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -39,13 +39,13 @@ export const COMMAND_R_PLUS_MODELS = [ COMMAND_R_PLUS_104b.name, ] as const -const COMMAND_R_PLUS_IMAGE_MODELS = [] as const +// const COMMAND_R_PLUS_IMAGE_MODELS = [] as const -export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const +// export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const -const COMMAND_R_PLUS_AUDIO_MODELS = [] as const +// const COMMAND_R_PLUS_AUDIO_MODELS = [] as const -const COMMAND_R_PLUS_VIDEO_MODELS = [] as const +// const COMMAND_R_PLUS_VIDEO_MODELS = [] as const // export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts index dbac57f7..afce50e7 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -39,13 +39,13 @@ export const COMMAND_R_MODELS = [ COMMAND_R_35b.name, ] as const -const COMMAND_R_IMAGE_MODELS = [] as const +// const COMMAND_R_IMAGE_MODELS = [] as const -export const COMMAND_R_EMBEDDING_MODELS = [] as const +// export const COMMAND_R_EMBEDDING_MODELS = [] as const -const COMMAND_R_AUDIO_MODELS = [] as const +// const COMMAND_R_AUDIO_MODELS = [] as const -const COMMAND_R_VIDEO_MODELS = [] as const +// const COMMAND_R_VIDEO_MODELS = [] as const // export type CommandRChatModels = (typeof COMMAND_R_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts index 848e5891..eebee3b6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -39,13 +39,13 @@ export const COMMAND_R_7b_MODELS = [ COMMAND_R_7b_7b.name, ] as const -const COMMAND_R_7b_IMAGE_MODELS = [] as const +// const COMMAND_R_7b_IMAGE_MODELS = [] as const -export const COMMAND_R_7b_EMBEDDING_MODELS = [] as const +// export const COMMAND_R_7b_EMBEDDING_MODELS = [] as const -const COMMAND_R_7b_AUDIO_MODELS = [] as const +// const COMMAND_R_7b_AUDIO_MODELS = [] as const -const COMMAND_R_7b_VIDEO_MODELS = [] as const +// const COMMAND_R_7b_VIDEO_MODELS = [] as const // export type CommandRChatModels = (typeof COMMAND_R7b_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts index 280391a4..2c274a6d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -51,13 +51,13 @@ export const DEEPSEEK_CODER_V2_MODELS = [ DEEPSEEK_CODER_V2_236b.name, ] as const -const DEEPSEEK_CODER_V2_IMAGE_MODELS = [] as const +// const DEEPSEEK_CODER_V2_IMAGE_MODELS = [] as const -export const DEEPSEEK_CODER_V2_EMBEDDING_MODELS = [] as const +// export const DEEPSEEK_CODER_V2_EMBEDDING_MODELS = [] as const -const DEEPSEEK_CODER_V2_AUDIO_MODELS = [] as const +// const DEEPSEEK_CODER_V2_AUDIO_MODELS = [] as const -const DEEPSEEK_CODER_V2_VIDEO_MODELS = [] as const +// const DEEPSEEK_CODER_V2_VIDEO_MODELS = [] as const // export type DeepseekCoderV2ChatModels = (typeof DEEPSEEK_CODER_V2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts new file mode 100644 index 00000000..4d3b4266 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts @@ -0,0 +1,64 @@ +import type { ChatRequest } from 'ollama' + +interface ModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} + +const DEEPSEEK_OCR_LATEST = { + name: 'deepseek-ocr:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '6.7gb', + context: 8_000, +} as const satisfies ModelMeta + +const DEEPSEEK_OCR_3b = { + name: 'deepseek-ocr:3b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + + size: '6.7gb', + context: 8_000, +} as const satisfies ModelMeta + +export const DEEPSEEK_OCR_MODELS = [ + DEEPSEEK_OCR_LATEST.name, + DEEPSEEK_OCR_3b.name, +] as const + +// export const DEEPSEEK_OCR_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_OCR_EMBEDDING_MODELS = [] as const + +// export const DEEPSEEK_OCR_AUDIO_MODELS = [] as const + +// export const DEEPSEEK_OCR_VIDEO_MODELS = [] as const + +// export type DeepseekOcrChatModels = (typeof DEEPSEEK_OCR__MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekOcrChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_OCR_LATEST.name]: ChatRequest + [DEEPSEEK_OCR_3b.name]: ChatRequest +} + +export type DeepseekOcrModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_OCR_LATEST.name]: typeof DEEPSEEK_OCR_LATEST.supports.input + [DEEPSEEK_OCR_3b.name]: typeof DEEPSEEK_OCR_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts index 00642c79..1159f05b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -99,13 +99,13 @@ export const DEEPSEEK_R1_MODELS = [ DEEPSEEK_R1_671b.name, ] as const -const DEEPSEEK_R1_IMAGE_MODELS = [] as const +// const DEEPSEEK_R1_IMAGE_MODELS = [] as const -export const DEEPSEEK_R1_EMBEDDING_MODELS = [] as const +// export const DEEPSEEK_R1_EMBEDDING_MODELS = [] as const -const DEEPSEEK_R1_AUDIO_MODELS = [] as const +// const DEEPSEEK_R1_AUDIO_MODELS = [] as const -const DEEPSEEK_R1_VIDEO_MODELS = [] as const +// const DEEPSEEK_R1_VIDEO_MODELS = [] as const // export type DeepseekChatModels = (typeof DEEPSEEK_R1_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts index a1424384..0cd5e3ce 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -52,13 +52,13 @@ export const DEEPSEEK_V3_1_MODELS = [ DEEPSEEK_V3_1_671b_cloud.name, ] as const -const DEEPSEEK_V3_1_IMAGE_MODELS = [] as const +// export const DEEPSEEK_V3_1_IMAGE_MODELS = [] as const -export const DEEPSEEK_V3_1_EMBEDDING_MODELS = [] as const +// export const DEEPSEEK_V3_1_EMBEDDING_MODELS = [] as const -const DEEPSEEK_V3_1_AUDIO_MODELS = [] as const +// export const DEEPSEEK_V3_1_AUDIO_MODELS = [] as const -const DEEPSEEK_V3_1_VIDEO_MODELS = [] as const +// export const DEEPSEEK_V3_1_VIDEO_MODELS = [] as const // export type DeepseekV3_1ChatModels = (typeof DEEPSEEK_V3_1__MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts index 063c7dad..6c1fff1d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -39,13 +39,13 @@ export const DEVSTRAL_MODELS = [ DEVSTRAL_24b.name, ] as const -const DEVSTRAL_IMAGE_MODELS = [] as const +// const DEVSTRAL_IMAGE_MODELS = [] as const -export const DEVSTRAL_EMBEDDING_MODELS = [] as const +// export const DEVSTRAL_EMBEDDING_MODELS = [] as const -const DEVSTRAL_AUDIO_MODELS = [] as const +// const DEVSTRAL_AUDIO_MODELS = [] as const -const DEVSTRAL_VIDEO_MODELS = [] as const +// const DEVSTRAL_VIDEO_MODELS = [] as const // export type DevstralChatModels = (typeof DEVSTRAL_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts index 18be8d21..d5b53f63 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -36,13 +36,13 @@ const DOLPHIN3_8b = { export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const -const DOLPHIN3_IMAGE_MODELS = [] as const +// const DOLPHIN3_IMAGE_MODELS = [] as const -export const DOLPHIN3_EMBEDDING_MODELS = [] as const +// export const DOLPHIN3_EMBEDDING_MODELS = [] as const -const DOLPHIN3_AUDIO_MODELS = [] as const +// const DOLPHIN3_AUDIO_MODELS = [] as const -const DOLPHIN3_VIDEO_MODELS = [] as const +// const DOLPHIN3_VIDEO_MODELS = [] as const // export type Dolphin3ChatModels = (typeof DOLPHIN3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts index 581f9a13..131f57c1 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const EXAONE3_5_LATEST = { name: 'exaone3.5:latest', @@ -21,7 +10,7 @@ const EXAONE3_5_LATEST = { }, size: '4.8gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const EXAONE3_5_2_4b = { name: 'exaone3.5:2.4b', @@ -32,7 +21,7 @@ const EXAONE3_5_2_4b = { }, size: '1.6gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const EXAONE3_5_7_1b = { name: 'exaone3.5:7.8b', @@ -43,7 +32,7 @@ const EXAONE3_5_7_1b = { }, size: '4.8gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const EXAONE3_5_32b = { name: 'exaone3.5:32b', @@ -54,7 +43,7 @@ const EXAONE3_5_32b = { }, size: '19gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const EXAONE3_5MODELS = [ EXAONE3_5_LATEST.name, @@ -63,13 +52,13 @@ export const EXAONE3_5MODELS = [ EXAONE3_5_32b.name, ] as const -const EXAONE3_5IMAGE_MODELS = [] as const +// const EXAONE3_5IMAGE_MODELS = [] as const -export const EXAONE3_5EMBEDDING_MODELS = [] as const +// export const EXAONE3_5EMBEDDING_MODELS = [] as const -const EXAONE3_5AUDIO_MODELS = [] as const +// const EXAONE3_5AUDIO_MODELS = [] as const -const EXAONE3_5VIDEO_MODELS = [] as const +// const EXAONE3_5VIDEO_MODELS = [] as const // export type AyaChatModels = (typeof EXAONE3_5MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts index 89ef695d..f353b2f4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const FALCON2_LATEST = { name: 'falcon2:latest', @@ -21,7 +10,7 @@ const FALCON2_LATEST = { }, size: '6.4gb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const FALCON2_11b = { name: 'falcon2:11b', @@ -32,17 +21,17 @@ const FALCON2_11b = { }, size: '6.4gb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const -const FALCON2_IMAGE_MODELS = [] as const +// const FALCON2_IMAGE_MODELS = [] as const -export const FALCON2_EMBEDDING_MODELS = [] as const +// export const FALCON2_EMBEDDING_MODELS = [] as const -const FALCON2_AUDIO_MODELS = [] as const +// const FALCON2_AUDIO_MODELS = [] as const -const FALCON2_VIDEO_MODELS = [] as const +// const FALCON2_VIDEO_MODELS = [] as const // export type Falcon2ChatModels = (typeof FALCON2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts index 8aed89b1..50e15cee 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const FALCON3_LATEST = { name: 'falcon3:latest', @@ -21,7 +10,7 @@ const FALCON3_LATEST = { }, size: '4.6gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const FALCON3_1b = { name: 'falcon3:1b', @@ -32,7 +21,7 @@ const FALCON3_1b = { }, size: '1.8gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const FALCON3_3b = { name: 'falcon3:3b', @@ -43,7 +32,7 @@ const FALCON3_3b = { }, size: '2gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const FALCON3_7b = { name: 'falcon3:7b', @@ -54,7 +43,7 @@ const FALCON3_7b = { }, size: '4.6gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const FALCON3_10b = { name: 'falcon3:10b', @@ -65,7 +54,7 @@ const FALCON3_10b = { }, size: '6.3gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const FALCON3_MODELS = [ FALCON3_LATEST.name, @@ -75,13 +64,13 @@ export const FALCON3_MODELS = [ FALCON3_10b.name, ] as const -const FALCON3_IMAGE_MODELS = [] as const +// const FALCON3_IMAGE_MODELS = [] as const -export const FALCON3_EMBEDDING_MODELS = [] as const +// export const FALCON3_EMBEDDING_MODELS = [] as const -const FALCON3_AUDIO_MODELS = [] as const +// const FALCON3_AUDIO_MODELS = [] as const -const FALCON3_VIDEO_MODELS = [] as const +// const FALCON3_VIDEO_MODELS = [] as const // export type Falcon3ChatModels = (typeof FALCON3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts index 537b5c29..517616a4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const FIREFUNCTION_V2_LATEST = { name: 'firefunction-v2:latest', @@ -21,7 +10,7 @@ const FIREFUNCTION_V2_LATEST = { }, size: '40gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const FIREFUNCTION_V2_70b = { name: 'firefunction-v2:70b', @@ -32,20 +21,20 @@ const FIREFUNCTION_V2_70b = { }, size: '40gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const FIREFUNCTION_V2_MODELS = [ FIREFUNCTION_V2_LATEST.name, FIREFUNCTION_V2_70b.name, ] as const -const FIREFUNCTION_V2_IMAGE_MODELS = [] as const +// const FIREFUNCTION_V2_IMAGE_MODELS = [] as const -export const FIREFUNCTION_V2_EMBEDDING_MODELS = [] as const +// export const FIREFUNCTION_V2_EMBEDDING_MODELS = [] as const -const FIREFUNCTION_V2_AUDIO_MODELS = [] as const +// const FIREFUNCTION_V2_AUDIO_MODELS = [] as const -const FIREFUNCTION_V2_VIDEO_MODELS = [] as const +// const FIREFUNCTION_V2_VIDEO_MODELS = [] as const // export type Firefunction_V2ChatModels = (typeof FIREFUNCTION_V2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts index a0d633d4..1699950b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GEMMA_LATEST = { name: 'gemma:latest', @@ -21,7 +10,7 @@ const GEMMA_LATEST = { }, size: '5gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA_2b = { name: 'gemma:2b', @@ -32,7 +21,7 @@ const GEMMA_2b = { }, size: '1.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA_7b = { name: 'gemma:7b', @@ -43,7 +32,7 @@ const GEMMA_7b = { }, size: '5gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GEMMA_MODELS = [ GEMMA_LATEST.name, @@ -51,13 +40,13 @@ export const GEMMA_MODELS = [ GEMMA_7b.name, ] as const -const GEMMA_IMAGE_MODELS = [] as const +// const GEMMA_IMAGE_MODELS = [] as const export const GEMMA_EMBEDDING_MODELS = [] as const -const GEMMA_AUDIO_MODELS = [] as const +// const GEMMA_AUDIO_MODELS = [] as const -const GEMMA_VIDEO_MODELS = [] as const +// const GEMMA_VIDEO_MODELS = [] as const // export type GemmaChatModels = (typeof GEMMA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts index fe8a4ee1..b5b594a8 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GEMMA2_LATEST = { name: 'gemma2:latest', @@ -21,7 +10,7 @@ const GEMMA2_LATEST = { }, size: '5.4gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA2_2b = { name: 'gemma2:2b', @@ -32,7 +21,7 @@ const GEMMA2_2b = { }, size: '1.6gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA2_9b = { name: 'gemma2:9b', @@ -43,7 +32,7 @@ const GEMMA2_9b = { }, size: '5.4gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA2_27b = { name: 'gemma2:27b', @@ -54,7 +43,7 @@ const GEMMA2_27b = { }, size: '16gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GEMMA2_MODELS = [ GEMMA2_LATEST.name, @@ -63,13 +52,13 @@ export const GEMMA2_MODELS = [ GEMMA2_27b.name, ] as const -const GEMMA2_IMAGE_MODELS = [] as const +// const GEMMA2_IMAGE_MODELS = [] as const -export const GEMMA2_EMBEDDING_MODELS = [] as const +// export const GEMMA2_EMBEDDING_MODELS = [] as const -const GEMMA2_AUDIO_MODELS = [] as const +// const GEMMA2_AUDIO_MODELS = [] as const -const GEMMA2_VIDEO_MODELS = [] as const +// const GEMMA2_VIDEO_MODELS = [] as const // export type Gemma2ChatModels = (typeof GEMMA2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts index 3c5e29a6..e10daf25 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GEMMA3_LATEST = { name: 'gemma3:latest', @@ -21,7 +10,7 @@ const GEMMA3_LATEST = { }, size: '3.3gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA3_270m = { name: 'gemma3:270m', @@ -32,7 +21,7 @@ const GEMMA3_270m = { }, size: '298mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA3_1b = { name: 'gemma3:1b', @@ -43,7 +32,7 @@ const GEMMA3_1b = { }, size: '815mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA3_4b = { name: 'gemma3:4b', @@ -54,7 +43,7 @@ const GEMMA3_4b = { }, size: '3.3gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA3_12b = { name: 'gemma3:12b', @@ -65,7 +54,7 @@ const GEMMA3_12b = { }, size: '8.1gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GEMMA3_27b = { name: 'gemma3:27b', @@ -76,7 +65,7 @@ const GEMMA3_27b = { }, size: '17gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GEMMA3_MODELS = [ GEMMA3_LATEST.name, @@ -87,13 +76,13 @@ export const GEMMA3_MODELS = [ GEMMA3_27b.name, ] as const -const GEMMA3_IMAGE_MODELS = [] as const +// const GEMMA3_IMAGE_MODELS = [] as const -export const GEMMA3_EMBEDDING_MODELS = [] as const +// export const GEMMA3_EMBEDDING_MODELS = [] as const -const GEMMA3_AUDIO_MODELS = [] as const +// const GEMMA3_AUDIO_MODELS = [] as const -const GEMMA3_VIDEO_MODELS = [] as const +// const GEMMA3_VIDEO_MODELS = [] as const // export type Gemma3ChatModels = (typeof GEMMA3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts index fff202dc..6f28a433 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GRANITE3_DENSE_LATEST = { name: 'granite3-dense:latest', @@ -21,7 +10,7 @@ const GRANITE3_DENSE_LATEST = { }, size: '1.6gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_DENSE_2b = { name: 'granite3-dense:2b', @@ -32,7 +21,7 @@ const GRANITE3_DENSE_2b = { }, size: '1.6gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_DENSE_8b = { name: 'granite3-dense:8b', @@ -43,7 +32,7 @@ const GRANITE3_DENSE_8b = { }, size: '4.9gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GRANITE3_DENSE_MODELS = [ GRANITE3_DENSE_LATEST.name, @@ -51,13 +40,13 @@ export const GRANITE3_DENSE_MODELS = [ GRANITE3_DENSE_8b.name, ] as const -const GRANITE3_DENSE_IMAGE_MODELS = [] as const +// const GRANITE3_DENSE_IMAGE_MODELS = [] as const -export const GRANITE3_DENSE_EMBEDDING_MODELS = [] as const +// export const GRANITE3_DENSE_EMBEDDING_MODELS = [] as const -const GRANITE3_DENSE_AUDIO_MODELS = [] as const +// const GRANITE3_DENSE_AUDIO_MODELS = [] as const -const GRANITE3_DENSE_VIDEO_MODELS = [] as const +// const GRANITE3_DENSE_VIDEO_MODELS = [] as const // export type Granite3Dense3ChatModels = (typeof GRANITE3_DENSE_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts index 56ccb2df..798118cb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GRANITE3_GUARDIAN_LATEST = { name: 'granite3-guardian:latest', @@ -21,7 +10,7 @@ const GRANITE3_GUARDIAN_LATEST = { }, size: '2.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_GUARDIAN_2b = { name: 'granite3-guardian:2b', @@ -32,7 +21,7 @@ const GRANITE3_GUARDIAN_2b = { }, size: '2.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_GUARDIAN_8b = { name: 'granite3-guardian:8b', @@ -43,7 +32,7 @@ const GRANITE3_GUARDIAN_8b = { }, size: '5.8gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GRANITE3_GUARDIAN_MODELS = [ GRANITE3_GUARDIAN_LATEST.name, @@ -51,13 +40,13 @@ export const GRANITE3_GUARDIAN_MODELS = [ GRANITE3_GUARDIAN_8b.name, ] as const -const GRANITE3_GUARDIAN_IMAGE_MODELS = [] as const +// const GRANITE3_GUARDIAN_IMAGE_MODELS = [] as const -export const GRANITE3_GUARDIAN_EMBEDDING_MODELS = [] as const +// export const GRANITE3_GUARDIAN_EMBEDDING_MODELS = [] as const -const GRANITE3_GUARDIAN_AUDIO_MODELS = [] as const +// const GRANITE3_GUARDIAN_AUDIO_MODELS = [] as const -const GRANITE3_GUARDIAN_VIDEO_MODELS = [] as const +// const GRANITE3_GUARDIAN_VIDEO_MODELS = [] as const // export type GraniteGuardian3ChatModels = (typeof GRANITE3_GUARDIAN_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts index fb681555..4d43bf2d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GRANITE3_MOE_LATEST = { name: 'granite3-moe:latest', @@ -21,7 +10,7 @@ const GRANITE3_MOE_LATEST = { }, size: '822mb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_MOE_1b = { name: 'granite3-moe:2b', @@ -32,7 +21,7 @@ const GRANITE3_MOE_1b = { }, size: '822mb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_MOE_3b = { name: 'granite3-moe:8b', @@ -43,7 +32,7 @@ const GRANITE3_MOE_3b = { }, size: '2.1gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GRANITE3_MOE_MODELS = [ GRANITE3_MOE_LATEST.name, @@ -51,13 +40,13 @@ export const GRANITE3_MOE_MODELS = [ GRANITE3_MOE_3b.name, ] as const -const GRANITE3_MOE_IMAGE_MODELS = [] as const +// const GRANITE3_MOE_IMAGE_MODELS = [] as const -export const GRANITE3_MOE_EMBEDDING_MODELS = [] as const +// export const GRANITE3_MOE_EMBEDDING_MODELS = [] as const -const GRANITE3_MOE_AUDIO_MODELS = [] as const +// const GRANITE3_MOE_AUDIO_MODELS = [] as const -const GRANITE3_MOE_VIDEO_MODELS = [] as const +// const GRANITE3_MOE_VIDEO_MODELS = [] as const // export type GraniteMoe3ChatModels = (typeof GRANITE3_MOE_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts index 6c4d598d..2dbf7374 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GRANITE3_1_DENSE_LATEST = { name: 'granite3.1-dense:latest', @@ -21,7 +10,7 @@ const GRANITE3_1_DENSE_LATEST = { }, size: '5gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_1_DENSE_2b = { name: 'granite3.1-dense:2b', @@ -32,7 +21,7 @@ const GRANITE3_1_DENSE_2b = { }, size: '1.6gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_1_DENSE_8b = { name: 'granite3.1-dense:8b', @@ -43,7 +32,7 @@ const GRANITE3_1_DENSE_8b = { }, size: '5gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GRANITE3_1_DENSE_MODELS = [ GRANITE3_1_DENSE_LATEST.name, @@ -51,13 +40,13 @@ export const GRANITE3_1_DENSE_MODELS = [ GRANITE3_1_DENSE_8b.name, ] as const -const GRANITE3_1_DENSE_IMAGE_MODELS = [] as const +// const GRANITE3_1_DENSE_IMAGE_MODELS = [] as const -export const GRANITE3_1_DENSE_EMBEDDING_MODELS = [] as const +// export const GRANITE3_1_DENSE_EMBEDDING_MODELS = [] as const -const GRANITE3_1_DENSE_AUDIO_MODELS = [] as const +// const GRANITE3_1_DENSE_AUDIO_MODELS = [] as const -const GRANITE3_1_DENSE_VIDEO_MODELS = [] as const +// const GRANITE3_1_DENSE_VIDEO_MODELS = [] as const // export type Granite3_1Dense3ChatModels = (typeof GRANITE3_1_DENSE_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts index b91d129d..7d513967 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const GRANITE3_1_MOE_LATEST = { name: 'granite3.1-moe:latest', @@ -21,7 +10,7 @@ const GRANITE3_1_MOE_LATEST = { }, size: '2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_1_MOE_1b = { name: 'granite3.1-moe:2b', @@ -32,7 +21,7 @@ const GRANITE3_1_MOE_1b = { }, size: '1.4gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const GRANITE3_1_MOE_3b = { name: 'granite3.1-moe:8b', @@ -43,7 +32,7 @@ const GRANITE3_1_MOE_3b = { }, size: '2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const GRANITE3_1_MOE_MODELS = [ GRANITE3_1_MOE_LATEST.name, @@ -51,13 +40,13 @@ export const GRANITE3_1_MOE_MODELS = [ GRANITE3_1_MOE_3b.name, ] as const -const GRANITE3_1_MOE_IMAGE_MODELS = [] as const +// const GRANITE3_1_MOE_IMAGE_MODELS = [] as const -export const GRANITE3_1_MOE_EMBEDDING_MODELS = [] as const +// export const GRANITE3_1_MOE_EMBEDDING_MODELS = [] as const -const GRANITE3_1_MOE_AUDIO_MODELS = [] as const +// const GRANITE3_1_MOE_AUDIO_MODELS = [] as const -const GRANITE3_1_MOE_VIDEO_MODELS = [] as const +// const GRANITE3_1_MOE_VIDEO_MODELS = [] as const // export type Granite3_1MoeChatModels = (typeof GRANITE3_1_MOE_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts index 81a2d7c8..db18d06d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA_GUARD3_LATEST = { name: 'llama3:latest', @@ -21,7 +10,7 @@ const LLAMA_GUARD3_LATEST = { }, size: '4.9b', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA_GUARD3_1b = { name: 'llama3:7b', @@ -32,7 +21,7 @@ const LLAMA_GUARD3_1b = { }, size: '1.6gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA_GUARD3_8b = { name: 'llama3:70b', @@ -43,7 +32,7 @@ const LLAMA_GUARD3_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA_GUARD3_MODELS = [ LLAMA_GUARD3_LATEST.name, @@ -51,13 +40,13 @@ export const LLAMA_GUARD3_MODELS = [ LLAMA_GUARD3_8b.name, ] as const -const LLAMA_GUARD3_IMAGE_MODELS = [] as const +// const LLAMA_GUARD3_IMAGE_MODELS = [] as const -export const LLAMA_GUARD3_EMBEDDING_MODELS = [] as const +// export const LLAMA_GUARD3_EMBEDDING_MODELS = [] as const -const LLAMA_GUARD3_AUDIO_MODELS = [] as const +// const LLAMA_GUARD3_AUDIO_MODELS = [] as const -const LLAMA_GUARD3_VIDEO_MODELS = [] as const +// const LLAMA_GUARD3_VIDEO_MODELS = [] as const // export type LlamaGuard3ChatModels = (typeof LLAMA_GUARD3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts index bf7b3c23..44a9c66d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA2_LATEST = { name: 'llama2:latest', @@ -21,7 +10,7 @@ const LLAMA2_LATEST = { }, size: '3.8gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA2_7b = { name: 'llama2:7b', @@ -32,7 +21,7 @@ const LLAMA2_7b = { }, size: '3.8gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA2_13b = { name: 'llama2:13b', @@ -43,7 +32,7 @@ const LLAMA2_13b = { }, size: '7.4gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA2_70b = { name: 'llama2:70b', @@ -54,7 +43,7 @@ const LLAMA2_70b = { }, size: '39gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA2_MODELS = [ LLAMA2_LATEST.name, @@ -63,13 +52,13 @@ export const LLAMA2_MODELS = [ LLAMA2_70b.name, ] as const -const LLAMA2_IMAGE_MODELS = [] as const +// const LLAMA2_IMAGE_MODELS = [] as const -export const LLAMA2_EMBEDDING_MODELS = [] as const +// export const LLAMA2_EMBEDDING_MODELS = [] as const -const LLAMA2_AUDIO_MODELS = [] as const +// const LLAMA2_AUDIO_MODELS = [] as const -const LLAMA2_VIDEO_MODELS = [] as const +// const LLAMA2_VIDEO_MODELS = [] as const // export type Llama2ChatModels = (typeof LLAMA2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts index 94318789..58063a03 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_CHATQA_LATEST = { name: 'llama3-chatqa:latest', @@ -21,7 +10,7 @@ const LLAMA3_CHATQA_LATEST = { }, size: '4.7b', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_CHATQA_8b = { name: 'llama3-chatqa:8b', @@ -32,7 +21,7 @@ const LLAMA3_CHATQA_8b = { }, size: '4.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_CHATQA_70b = { name: 'llama3-chatqa:70b', @@ -43,7 +32,7 @@ const LLAMA3_CHATQA_70b = { }, size: '40gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_CHATQA_MODELS = [ LLAMA3_CHATQA_LATEST.name, @@ -51,13 +40,13 @@ export const LLAMA3_CHATQA_MODELS = [ LLAMA3_CHATQA_70b.name, ] as const -const LLAMA3_CHATQA_IMAGE_MODELS = [] as const +// const LLAMA3_CHATQA_IMAGE_MODELS = [] as const -export const LLAMA3_CHATQA_EMBEDDING_MODELS = [] as const +// export const LLAMA3_CHATQA_EMBEDDING_MODELS = [] as const -const LLAMA3_CHATQA_AUDIO_MODELS = [] as const +// const LLAMA3_CHATQA_AUDIO_MODELS = [] as const -const LLAMA3_CHATQA_VIDEO_MODELS = [] as const +// const LLAMA3_CHATQA_VIDEO_MODELS = [] as const // export type Llama3ChatQaChatModels = (typeof LLAMA3_CHATQA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts index ebd33a0a..62753cc7 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_GRADIENT_LATEST = { name: 'llama3-gradient:latest', @@ -21,7 +10,7 @@ const LLAMA3_GRADIENT_LATEST = { }, size: '4.7b', context: 1_000_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_GRADIENT_8b = { name: 'llama3-gradient:8b', @@ -32,7 +21,7 @@ const LLAMA3_GRADIENT_8b = { }, size: '4.7gb', context: 1_000_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_GRADIENT_70b = { name: 'llama3-gradient:70b', @@ -43,7 +32,7 @@ const LLAMA3_GRADIENT_70b = { }, size: '40gb', context: 1_000_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_GRADIENT_MODELS = [ LLAMA3_GRADIENT_LATEST.name, @@ -51,13 +40,13 @@ export const LLAMA3_GRADIENT_MODELS = [ LLAMA3_GRADIENT_70b.name, ] as const -const LLAMA3_GRADIENT_IMAGE_MODELS = [] as const +// const LLAMA3_GRADIENT_IMAGE_MODELS = [] as const export const LLAMA3_GRADIENT_EMBEDDING_MODELS = [] as const -const LLAMA3_GRADIENT_AUDIO_MODELS = [] as const +// const LLAMA3_GRADIENT_AUDIO_MODELS = [] as const -const LLAMA3_GRADIENT_VIDEO_MODELS = [] as const +// const LLAMA3_GRADIENT_VIDEO_MODELS = [] as const // export type Llama3GradientChatModels = (typeof LLAMA3_GRADIENT_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts index 9c22fe1d..66186581 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_1_LATEST = { name: 'llama3.1:latest', @@ -21,7 +10,7 @@ const LLAMA3_1_LATEST = { }, size: '4.9b', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_1_8b = { name: 'llama3.1:8b', @@ -32,7 +21,7 @@ const LLAMA3_1_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_1_70b = { name: 'llama3.1:70b', @@ -43,7 +32,7 @@ const LLAMA3_1_70b = { }, size: '43gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_1_405b = { name: 'llama3.1:70b', @@ -54,7 +43,7 @@ const LLAMA3_1_405b = { }, size: '243gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_1_MODELS = [ LLAMA3_1_LATEST.name, @@ -63,13 +52,13 @@ export const LLAMA3_1_MODELS = [ LLAMA3_1_405b.name, ] as const -const LLAMA3_1_IMAGE_MODELS = [] as const +// const LLAMA3_1_IMAGE_MODELS = [] as const -export const LLAMA3_1_EMBEDDING_MODELS = [] as const +// export const LLAMA3_1_EMBEDDING_MODELS = [] as const -const LLAMA3_1_AUDIO_MODELS = [] as const +// const LLAMA3_1_AUDIO_MODELS = [] as const -const LLAMA3_1_VIDEO_MODELS = [] as const +// const LLAMA3_1_VIDEO_MODELS = [] as const // export type Llama3_1ChatModels = (typeof LLAMA3_1_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts index d68ef0b3..d840815f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_2_VISION_LATEST = { name: 'llama3.2:latest', @@ -21,7 +10,7 @@ const LLAMA3_2_VISION_LATEST = { }, size: '7.8b', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_2_VISION_11b = { name: 'llama3.2:11b', @@ -32,7 +21,7 @@ const LLAMA3_2_VISION_11b = { }, size: '1gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_2_VISION_90b = { name: 'llama3.2:90b', @@ -43,7 +32,7 @@ const LLAMA3_2_VISION_90b = { }, size: '55gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_2_VISION_MODELS = [ LLAMA3_2_VISION_LATEST.name, @@ -51,15 +40,15 @@ export const LLAMA3_2_VISION_MODELS = [ LLAMA3_2_VISION_90b.name, ] as const -export const LLAMA3_2_VISION_IMAGE_MODELS = [] as const +// export const LLAMA3_2_VISION_IMAGE_MODELS = [] as const -export const LLAMA3_2_VISION_EMBEDDING_MODELS = [] as const +// export const LLAMA3_2_VISION_EMBEDDING_MODELS = [] as const -const LLAMA3_2_VISION_AUDIO_MODELS = [] as const +// export const LLAMA3_2_VISION_AUDIO_MODELS = [] as const -const LLAMA3_2_VISION_VIDEO_MODELS = [] as const +// export const LLAMA3_2_VISION_VIDEO_MODELS = [] as const -// export type Llama3_2VisionChatModels = (typeof LLAMA3_2Vision_MODELS)[number] +// export export type Llama3_2VisionChatModels = (typeof LLAMA3_2Vision_MODELS)[number] // Manual type map for per-model provider options export type Llama3_2VisionChatModelProviderOptionsByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts index cd8e2810..328adcce 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_2_LATEST = { name: 'llama3.2:latest', @@ -21,7 +10,7 @@ const LLAMA3_2_LATEST = { }, size: '2b', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_2_1b = { name: 'llama3.2:1b', @@ -32,7 +21,7 @@ const LLAMA3_2_1b = { }, size: '1.3gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_2_3b = { name: 'llama3.2:3b', @@ -43,7 +32,7 @@ const LLAMA3_2_3b = { }, size: '2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_2_MODELS = [ LLAMA3_2_LATEST.name, @@ -51,13 +40,13 @@ export const LLAMA3_2_MODELS = [ LLAMA3_2_3b.name, ] as const -const LLAMA3_2_IMAGE_MODELS = [] as const +// const LLAMA3_2_IMAGE_MODELS = [] as const -export const LLAMA3_2_EMBEDDING_MODELS = [] as const +// export const LLAMA3_2_EMBEDDING_MODELS = [] as const -const LLAMA3_2_AUDIO_MODELS = [] as const +// const LLAMA3_2_AUDIO_MODELS = [] as const -const LLAMA3_2_VIDEO_MODELS = [] as const +// const LLAMA3_2_VIDEO_MODELS = [] as const // export type Llama3_2ChatModels = (typeof LLAMA3_2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts index d2efe4c7..1cbc63a8 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_3_LATEST = { name: 'llama3.3:latest', @@ -21,7 +10,7 @@ const LLAMA3_3_LATEST = { }, size: '43b', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_3_70b = { name: 'llama3.3:8b', @@ -32,20 +21,20 @@ const LLAMA3_3_70b = { }, size: '43gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_3_MODELS = [ LLAMA3_3_LATEST.name, LLAMA3_3_70b.name, ] as const -const LLAMA3_3_IMAGE_MODELS = [] as const +// const LLAMA3_3_IMAGE_MODELS = [] as const -export const LLAMA3_3_EMBEDDING_MODELS = [] as const +// export const LLAMA3_3_EMBEDDING_MODELS = [] as const -const LLAMA3_3_AUDIO_MODELS = [] as const +// const LLAMA3_3_AUDIO_MODELS = [] as const -const LLAMA3_3_VIDEO_MODELS = [] as const +// const LLAMA3_3_VIDEO_MODELS = [] as const // export type Llama3_3ChatModels = (typeof LLAMA3_3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts index 562e1c12..d61504b9 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA3_LATEST = { name: 'llama3:latest', @@ -21,7 +10,7 @@ const LLAMA3_LATEST = { }, size: '4.7b', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_8b = { name: 'llama3:7b', @@ -32,7 +21,7 @@ const LLAMA3_8b = { }, size: '4.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA3_70b = { name: 'llama3:70b', @@ -43,7 +32,7 @@ const LLAMA3_70b = { }, size: '40gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA3_MODELS = [ LLAMA3_LATEST.name, @@ -51,13 +40,13 @@ export const LLAMA3_MODELS = [ LLAMA3_70b.name, ] as const -const LLAMA3_IMAGE_MODELS = [] as const +// const LLAMA3_IMAGE_MODELS = [] as const -export const LLAMA3_EMBEDDING_MODELS = [] as const +// export const LLAMA3_EMBEDDING_MODELS = [] as const -const LLAMA3_AUDIO_MODELS = [] as const +// const LLAMA3_AUDIO_MODELS = [] as const -const LLAMA3_VIDEO_MODELS = [] as const +// const LLAMA3_VIDEO_MODELS = [] as const // export type Llama3ChatModels = (typeof LLAMA3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts index 4d9b0ae9..418cc25d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAMA4_LATEST = { name: 'llama4:latest', @@ -21,7 +10,7 @@ const LLAMA4_LATEST = { }, size: '67b', context: 10_000_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA4_16X17b = { name: 'llama4:16x17b', @@ -32,7 +21,7 @@ const LLAMA4_16X17b = { }, size: '67gb', context: 10_000_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAMA4_128X17b = { name: 'llama4:128x17b', @@ -43,7 +32,7 @@ const LLAMA4_128X17b = { }, size: '245gb', context: 1_000_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAMA4_MODELS = [ LLAMA4_LATEST.name, @@ -51,13 +40,13 @@ export const LLAMA4_MODELS = [ LLAMA4_128X17b.name, ] as const -const LLAMA4_IMAGE_MODELS = [] as const +// const LLAMA4_IMAGE_MODELS = [] as const -export const LLAMA4_EMBEDDING_MODELS = [] as const +// export const LLAMA4_EMBEDDING_MODELS = [] as const -const LLAMA4_AUDIO_MODELS = [] as const +// const LLAMA4_AUDIO_MODELS = [] as const -const LLAMA4_VIDEO_MODELS = [] as const +// const LLAMA4_VIDEO_MODELS = [] as const // export type Llama3_4ChatModels = (typeof LLAMA4_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts index 0425b1ad..da96e112 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAVA_LLAMA3_LATEST = { name: 'llava-llama3:latest', @@ -21,7 +10,7 @@ const LLAVA_LLAMA3_LATEST = { }, size: '5.5b', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAVA_LLAMA3_8b = { name: 'llava-llama3:8b', @@ -32,20 +21,20 @@ const LLAVA_LLAMA3_8b = { }, size: '5.5gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAVA_LLAMA3_MODELS = [ LLAVA_LLAMA3_LATEST.name, LLAVA_LLAMA3_8b.name, ] as const -const LLAVA_LLAMA3_IMAGE_MODELS = [] as const +// const LLAVA_LLAMA3_IMAGE_MODELS = [] as const -export const LLAVA_LLAMA3_EMBEDDING_MODELS = [] as const +// export const LLAVA_LLAMA3_EMBEDDING_MODELS = [] as const -const LLAVA_LLAMA3_AUDIO_MODELS = [] as const +// const LLAVA_LLAMA3_AUDIO_MODELS = [] as const -const LLAVA_LLAMA3_VIDEO_MODELS = [] as const +// const LLAVA_LLAMA3_VIDEO_MODELS = [] as const // export type LlavaLlamaChatModels = (typeof LLAVA_LLAMA3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts index 3104c9d4..4c725a64 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAVA_PHI3_LATEST = { name: 'llava-phi3:latest', @@ -21,7 +10,7 @@ const LLAVA_PHI3_LATEST = { }, size: '2.9b', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAVA_PHI3_8b = { name: 'llava-phi3:8b', @@ -32,20 +21,20 @@ const LLAVA_PHI3_8b = { }, size: '2.9gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAVA_PHI3_MODELS = [ LLAVA_PHI3_LATEST.name, LLAVA_PHI3_8b.name, ] as const -const LLAVA_PHI3_IMAGE_MODELS = [] as const +// const LLAVA_PHI3_IMAGE_MODELS = [] as const -export const LLAVA_PHI3_EMBEDDING_MODELS = [] as const +// export const LLAVA_PHI3_EMBEDDING_MODELS = [] as const -const LLAVA_PHI3_AUDIO_MODELS = [] as const +// const LLAVA_PHI3_AUDIO_MODELS = [] as const -const LLAVA_PHI3_VIDEO_MODELS = [] as const +// const LLAVA_PHI3_VIDEO_MODELS = [] as const // export type LlavaPhi3ChatModels = (typeof LLAVA_PHI3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts index 1d5a2013..18e7f762 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const LLAVA_LATEST = { name: 'llava:latest', @@ -21,7 +10,7 @@ const LLAVA_LATEST = { }, size: '4.7b', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAVA_7b = { name: 'llava:7b', @@ -32,7 +21,7 @@ const LLAVA_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAVA_13b = { name: 'llava:13b', @@ -43,7 +32,7 @@ const LLAVA_13b = { }, size: '8gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const LLAVA_34b = { name: 'llava:34b', @@ -54,7 +43,7 @@ const LLAVA_34b = { }, size: '20gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const LLAVA_MODELS = [ LLAVA_LATEST.name, @@ -63,13 +52,13 @@ export const LLAVA_MODELS = [ LLAVA_34b.name, ] as const -const LLAVA_IMAGE_MODELS = [] as const +// const LLAVA_IMAGE_MODELS = [] as const -export const LLAVA_EMBEDDING_MODELS = [] as const +// export const LLAVA_EMBEDDING_MODELS = [] as const -const LLAVA_AUDIO_MODELS = [] as const +// const LLAVA_AUDIO_MODELS = [] as const -const LLAVA_VIDEO_MODELS = [] as const +// const LLAVA_VIDEO_MODELS = [] as const // export type llavaChatModels = (typeof LLAVA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts index 949c5d8f..fb44d209 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MARCO_O1_LATEST = { name: 'marco-o1:latest', @@ -21,7 +10,7 @@ const MARCO_O1_LATEST = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MARCO_O1_7b = { name: 'marco-o1:7b', @@ -32,17 +21,17 @@ const MARCO_O1_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const -const MARCO_O1_IMAGE_MODELS = [] as const +// const MARCO_O1_IMAGE_MODELS = [] as const -export const MARCO_O1_EMBEDDING_MODELS = [] as const +// export const MARCO_O1_EMBEDDING_MODELS = [] as const -const MARCO_O1_AUDIO_MODELS = [] as const +// const MARCO_O1_AUDIO_MODELS = [] as const -const MARCO_O1_VIDEO_MODELS = [] as const +// const MARCO_O1_VIDEO_MODELS = [] as const // export type MarcoO1ChatModels = (typeof MARCO_O1_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts index 1f924872..7f2055f2 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MISTRAL_LARGE_LATEST = { name: 'mistral-large:latest', @@ -21,7 +10,7 @@ const MISTRAL_LARGE_LATEST = { }, size: '73gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MISTRAL_LARGE_123b = { name: 'mistral-large:123b', @@ -32,20 +21,20 @@ const MISTRAL_LARGE_123b = { }, size: '73gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MISTRAL_LARGE_MODELS = [ MISTRAL_LARGE_LATEST.name, MISTRAL_LARGE_123b.name, ] as const -const MISTRAL_LARGE_IMAGE_MODELS = [] as const +// const MISTRAL_LARGE_IMAGE_MODELS = [] as const -export const MISTRAL_LARGE_EMBEDDING_MODELS = [] as const +// export const MISTRAL_LARGE_EMBEDDING_MODELS = [] as const -const MISTRAL_LARGE_AUDIO_MODELS = [] as const +// const MISTRAL_LARGE_AUDIO_MODELS = [] as const -const MISTRAL_LARGE_VIDEO_MODELS = [] as const +// const MISTRAL_LARGE_VIDEO_MODELS = [] as const // export type MistralLargeChatModels = (typeof MISTRAL_LARGE_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts index 62376d15..39fb3ab6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MISTRAL_NEMO_LATEST = { name: 'mistral-nemo:latest', @@ -21,7 +10,7 @@ const MISTRAL_NEMO_LATEST = { }, size: '7.1gb', context: 1_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MISTRAL_NEMO_12b = { name: 'mistral-nemo:12b', @@ -32,20 +21,20 @@ const MISTRAL_NEMO_12b = { }, size: '7.1gb', context: 1_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MISTRAL_NEMO_MODELS = [ MISTRAL_NEMO_LATEST.name, MISTRAL_NEMO_12b.name, ] as const -const MISTRAL_NEMO_IMAGE_MODELS = [] as const +// const MISTRAL_NEMO_IMAGE_MODELS = [] as const -export const MISTRAL_NEMO_EMBEDDING_MODELS = [] as const +// export const MISTRAL_NEMO_EMBEDDING_MODELS = [] as const -const MISTRAL_NEMO_AUDIO_MODELS = [] as const +// const MISTRAL_NEMO_AUDIO_MODELS = [] as const -const MISTRAL_NEMO_VIDEO_MODELS = [] as const +// const MISTRAL_NEMO_VIDEO_MODELS = [] as const // export type MistralNemoChatModels = (typeof MISTRAL_NEMO_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts index 15d4f45f..3dabd7d2 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MISTRAL_SMALL_LATEST = { name: 'mistral-small:latest', @@ -21,7 +10,7 @@ const MISTRAL_SMALL_LATEST = { }, size: '14gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MISTRAL_SMALL_22b = { name: 'mistral-small:12b', @@ -32,7 +21,7 @@ const MISTRAL_SMALL_22b = { }, size: '13gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MISTRAL_SMALL_24b = { name: 'mistral-small:12b', @@ -43,7 +32,7 @@ const MISTRAL_SMALL_24b = { }, size: '13gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MISTRAL_SMALL_MODELS = [ MISTRAL_SMALL_LATEST.name, @@ -51,13 +40,13 @@ export const MISTRAL_SMALL_MODELS = [ MISTRAL_SMALL_24b.name, ] as const -const MISTRAL_SMALL_IMAGE_MODELS = [] as const +// const MISTRAL_SMALL_IMAGE_MODELS = [] as const -export const MISTRAL_SMALL_EMBEDDING_MODELS = [] as const +// export const MISTRAL_SMALL_EMBEDDING_MODELS = [] as const -const MISTRAL_SMALL_AUDIO_MODELS = [] as const +// const MISTRAL_SMALL_AUDIO_MODELS = [] as const -const MISTRAL_SMALL_VIDEO_MODELS = [] as const +// const MISTRAL_SMALL_VIDEO_MODELS = [] as const // export type MistralSmallChatModels = (typeof MISTRAL_SMALL_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts index 276420f3..55efb14d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MISTRAL_LATEST = { name: 'mistral:latest', @@ -21,7 +10,7 @@ const MISTRAL_LATEST = { }, size: '2.9gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MISTRAL_7b = { name: 'mistral:87', @@ -32,17 +21,17 @@ const MISTRAL_7b = { }, size: '2.9gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const -const MISTRAL_IMAGE_MODELS = [] as const +// const MISTRAL_IMAGE_MODELS = [] as const -export const MISTRAL_EMBEDDING_MODELS = [] as const +// export const MISTRAL_EMBEDDING_MODELS = [] as const -const MISTRAL_AUDIO_MODELS = [] as const +// const MISTRAL_AUDIO_MODELS = [] as const -const MISTRAL_VIDEO_MODELS = [] as const +// const MISTRAL_VIDEO_MODELS = [] as const // export type MistralChatModels = (typeof MISTRAL_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts index d7d7bede..37656cd2 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MIXTRAL_LATEST = { name: 'mixtral:latest', @@ -21,7 +10,7 @@ const MIXTRAL_LATEST = { }, size: '26gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MIXTRAL_8X7b = { name: 'mixtral:8x7b', @@ -32,7 +21,7 @@ const MIXTRAL_8X7b = { }, size: '26gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MIXTRAL_8X22b = { name: 'mixtral:8x22b', @@ -43,7 +32,7 @@ const MIXTRAL_8X22b = { }, size: '80gb', context: 64_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MIXTRAL_MODELS = [ MIXTRAL_LATEST.name, @@ -51,13 +40,13 @@ export const MIXTRAL_MODELS = [ MIXTRAL_8X22b.name, ] as const -const MIXTRAL_IMAGE_MODELS = [] as const +// const MIXTRAL_IMAGE_MODELS = [] as const -export const MIXTRAL_EMBEDDING_MODELS = [] as const +// export const MIXTRAL_EMBEDDING_MODELS = [] as const -const MIXTRAL_AUDIO_MODELS = [] as const +// const MIXTRAL_AUDIO_MODELS = [] as const -const MIXTRAL_VIDEO_MODELS = [] as const +// const MIXTRAL_VIDEO_MODELS = [] as const // export type MixtralChatModels = (typeof MIXTRAL_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts index a2dd1e15..50be72ad 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const MOONDREAM_LATEST = { name: 'moondream:latest', @@ -21,7 +10,7 @@ const MOONDREAM_LATEST = { }, size: '1.7gb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const MOONDREAM_1_8b = { name: 'moondream:1.8b', @@ -32,20 +21,20 @@ const MOONDREAM_1_8b = { }, size: '1.7gb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const MOONDREAM_MODELS = [ MOONDREAM_LATEST.name, MOONDREAM_1_8b.name, ] as const -const MOONDREAM_IMAGE_MODELS = [] as const +// const MOONDREAM_IMAGE_MODELS = [] as const -export const MOONDREAM_EMBEDDING_MODELS = [] as const +// export const MOONDREAM_EMBEDDING_MODELS = [] as const -const MOONDREAM_AUDIO_MODELS = [] as const +// const MOONDREAM_AUDIO_MODELS = [] as const -const MOONDREAM_VIDEO_MODELS = [] as const +// const MOONDREAM_VIDEO_MODELS = [] as const // export type MoondreamChatModels = (typeof MOONDREAM_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts index 5abf9631..b5a9cc2d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const NEMOTRON_MINI_LATEST = { name: 'nemotron-mini:latest', @@ -21,7 +10,7 @@ const NEMOTRON_MINI_LATEST = { }, size: '2.7gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const NEMOTRON_MINI_4b = { name: 'nemotron-mini:4b', @@ -32,20 +21,20 @@ const NEMOTRON_MINI_4b = { }, size: '2.7gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const NEMOTRON_MINI_MODELS = [ NEMOTRON_MINI_LATEST.name, NEMOTRON_MINI_4b.name, ] as const -const NEMOTRON_MINI_IMAGE_MODELS = [] as const +// const NEMOTRON_MINI_IMAGE_MODELS = [] as const -export const NEMOTRON_MINI_EMBEDDING_MODELS = [] as const +// export const NEMOTRON_MINI_EMBEDDING_MODELS = [] as const -const NEMOTRON_MINI_AUDIO_MODELS = [] as const +// const NEMOTRON_MINI_AUDIO_MODELS = [] as const -const NEMOTRON_MINI_VIDEO_MODELS = [] as const +// const NEMOTRON_MINI_VIDEO_MODELS = [] as const // export type NemotronMiniChatModels = (typeof NEMOTRON_MINI_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts index 7ae7364e..3f06d9ea 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const NEMOTRON_LATEST = { name: 'nemotron:latest', @@ -21,7 +10,7 @@ const NEMOTRON_LATEST = { }, size: '43gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const NEMOTRON_70b = { name: 'nemotron:70b', @@ -32,20 +21,20 @@ const NEMOTRON_70b = { }, size: '43gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const NEMOTRON_MODELS = [ NEMOTRON_LATEST.name, NEMOTRON_70b.name, ] as const -const NEMOTRON_IMAGE_MODELS = [] as const +// const NEMOTRON_IMAGE_MODELS = [] as const -export const NEMOTRON_EMBEDDING_MODELS = [] as const +// export const NEMOTRON_EMBEDDING_MODELS = [] as const -const NEMOTRON_AUDIO_MODELS = [] as const +// const NEMOTRON_AUDIO_MODELS = [] as const -const NEMOTRON_VIDEO_MODELS = [] as const +// const NEMOTRON_VIDEO_MODELS = [] as const // export type NemotronChatModels = (typeof NEMOTRON_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts index 10b2aa33..621bc7b8 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const OLMO2_LATEST = { name: 'olmo2:latest', @@ -21,7 +10,7 @@ const OLMO2_LATEST = { }, size: '4.5gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const OLMO2_7b = { name: 'olmo2:7b', @@ -32,7 +21,7 @@ const OLMO2_7b = { }, size: '4.5gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const OLMO2_13b = { name: 'olmo2:13b', @@ -43,7 +32,7 @@ const OLMO2_13b = { }, size: '8.4gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const OLMO2_MODELS = [ OLMO2_LATEST.name, @@ -51,13 +40,13 @@ export const OLMO2_MODELS = [ OLMO2_13b.name, ] as const -const OLMO2_IMAGE_MODELS = [] as const +// const OLMO2_IMAGE_MODELS = [] as const -export const OLMO2_EMBEDDING_MODELS = [] as const +// export const OLMO2_EMBEDDING_MODELS = [] as const -const OLMO2_AUDIO_MODELS = [] as const +// const OLMO2_AUDIO_MODELS = [] as const -const OLMO2_VIDEO_MODELS = [] as const +// const OLMO2_VIDEO_MODELS = [] as const // export type Olmo2ChatModels = (typeof OLMO2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts index 39639016..0b22d3b2 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const OPENCODER_LATEST = { name: 'opencoder:latest', @@ -21,7 +10,7 @@ const OPENCODER_LATEST = { }, size: '4.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const OPENCODER_1_5b = { name: 'opencoder:1.5b', @@ -32,7 +21,7 @@ const OPENCODER_1_5b = { }, size: '1.4gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const OPENCODER_8b = { name: 'opencoder:8b', @@ -43,7 +32,7 @@ const OPENCODER_8b = { }, size: '4.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const OPENCODER_MODELS = [ OPENCODER_LATEST.name, @@ -51,13 +40,13 @@ export const OPENCODER_MODELS = [ OPENCODER_8b.name, ] as const -const OPENCODER_IMAGE_MODELS = [] as const +// const OPENCODER_IMAGE_MODELS = [] as const -export const OPENCODER_EMBEDDING_MODELS = [] as const +// export const OPENCODER_EMBEDDING_MODELS = [] as const -const OPENCODER_AUDIO_MODELS = [] as const +// const OPENCODER_AUDIO_MODELS = [] as const -const OPENCODER_VIDEO_MODELS = [] as const +// const OPENCODER_VIDEO_MODELS = [] as const // export type OpencoderChatModels = (typeof OPENCODER_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts index 591593a4..459baeca 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const OPENHERMES_LATEST = { name: 'openhermes:latest', @@ -21,7 +10,7 @@ const OPENHERMES_LATEST = { }, size: '4.1gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const OPENHERMES_V2 = { name: 'openhermes:v2', @@ -32,7 +21,7 @@ const OPENHERMES_V2 = { }, size: '4.1gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const OPENHERMES_V2_5 = { name: 'openhermes:v2.5', @@ -43,7 +32,7 @@ const OPENHERMES_V2_5 = { }, size: '4.1gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const OPENHERMES_MODELS = [ OPENHERMES_LATEST.name, @@ -51,13 +40,13 @@ export const OPENHERMES_MODELS = [ OPENHERMES_V2_5.name, ] as const -const OPENHERMES_IMAGE_MODELS = [] as const +// const OPENHERMES_IMAGE_MODELS = [] as const -export const OPENHERMES_EMBEDDING_MODELS = [] as const +// export const OPENHERMES_EMBEDDING_MODELS = [] as const -const OPENHERMES_AUDIO_MODELS = [] as const +// const OPENHERMES_AUDIO_MODELS = [] as const -const OPENHERMES_VIDEO_MODELS = [] as const +// const OPENHERMES_VIDEO_MODELS = [] as const // export type OpenhermesChatModels = (typeof OPENHERMES_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts index 6affa650..7836b653 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const PHI3_LATEST = { name: 'phi3:latest', @@ -21,7 +10,7 @@ const PHI3_LATEST = { }, size: '2.2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const PHI3_3_8b = { name: 'phi3:8b', @@ -32,7 +21,7 @@ const PHI3_3_8b = { }, size: '2.2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const PHI3_14b = { name: 'phi3:14b', @@ -43,7 +32,7 @@ const PHI3_14b = { }, size: '7.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const PHI3_MODELS = [ PHI3_LATEST.name, @@ -51,13 +40,13 @@ export const PHI3_MODELS = [ PHI3_14b.name, ] as const -const PHI3_IMAGE_MODELS = [] as const +// const PHI3_IMAGE_MODELS = [] as const -export const PHI3_EMBEDDING_MODELS = [] as const +// export const PHI3_EMBEDDING_MODELS = [] as const -const PHI3_AUDIO_MODELS = [] as const +// const PHI3_AUDIO_MODELS = [] as const -const PHI3_VIDEO_MODELS = [] as const +// const PHI3_VIDEO_MODELS = [] as const // export type Phi3ChatModels = (typeof PHI3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts index de6d0dad..38ffeb9b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const PHI4_LATEST = { name: 'phi4:latest', @@ -21,7 +10,7 @@ const PHI4_LATEST = { }, size: '9.1gb', context: 16_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const PHI4_14b = { name: 'phi4:14b', @@ -32,17 +21,17 @@ const PHI4_14b = { }, size: '9.1gb', context: 16_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const -const PHI4_IMAGE_MODELS = [] as const +// const PHI4_IMAGE_MODELS = [] as const -export const PHI4_EMBEDDING_MODELS = [] as const +// export const PHI4_EMBEDDING_MODELS = [] as const -const PHI4_AUDIO_MODELS = [] as const +// const PHI4_AUDIO_MODELS = [] as const -const PHI4_VIDEO_MODELS = [] as const +// const PHI4_VIDEO_MODELS = [] as const // export type Phi4ChatModels = (typeof PHI4_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts index eea05791..fee586de 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const QWEN_LATEST = { name: 'qwen:latest', @@ -21,7 +10,7 @@ const QWEN_LATEST = { }, size: '2.3gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_0_5b = { name: 'qwen:0.5b', @@ -32,7 +21,7 @@ const QWEN_0_5b = { }, size: '395mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_1_8b = { name: 'qwen:1.8b', @@ -43,7 +32,7 @@ const QWEN_1_8b = { }, size: '1.1gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_4b = { name: 'qwen:4b', @@ -54,7 +43,7 @@ const QWEN_4b = { }, size: '2.3gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_7b = { name: 'qwen:7b', @@ -65,7 +54,7 @@ const QWEN_7b = { }, size: '4.5gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_14b = { name: 'qwen:14b', @@ -76,7 +65,7 @@ const QWEN_14b = { }, size: '8.2gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_32b = { name: 'qwen:32b', @@ -87,7 +76,7 @@ const QWEN_32b = { }, size: '18gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_72b = { name: 'qwen:72b', @@ -98,7 +87,7 @@ const QWEN_72b = { }, size: '41gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN_110b = { name: 'qwen:110b', @@ -109,7 +98,7 @@ const QWEN_110b = { }, size: '63gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const QWEN_MODELS = [ QWEN_LATEST.name, @@ -123,13 +112,13 @@ export const QWEN_MODELS = [ QWEN_110b.name, ] as const -const QWEN_IMAGE_MODELS = [] as const +// const QWEN_IMAGE_MODELS = [] as const -export const QWEN_EMBEDDING_MODELS = [] as const +// export const QWEN_EMBEDDING_MODELS = [] as const -const QWEN_AUDIO_MODELS = [] as const +// const QWEN_AUDIO_MODELS = [] as const -const QWEN_VIDEO_MODELS = [] as const +// const QWEN_VIDEO_MODELS = [] as const // export type QwenChatModels = (typeof QWEN_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts index 7a7a71e5..a033db9c 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const QWEN2_5_CODER_LATEST = { name: 'qwen2.5-coder:latest', @@ -21,7 +10,7 @@ const QWEN2_5_CODER_LATEST = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_CODER_0_5b = { name: 'qwen2.5-coder:0.5b', @@ -32,7 +21,7 @@ const QWEN2_5_CODER_0_5b = { }, size: '398mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_CODER_1_5b = { name: 'qwen2.5-coder:1.5b', @@ -43,7 +32,7 @@ const QWEN2_5_CODER_1_5b = { }, size: '986mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_CODER_3b = { name: 'qwen2.5-coder:3b', @@ -54,7 +43,7 @@ const QWEN2_5_CODER_3b = { }, size: '1.9gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_CODER_7b = { name: 'qwen2.5-coder:7b', @@ -65,7 +54,7 @@ const QWEN2_5_CODER_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_CODER_14b = { name: 'qwen2.5-coder:14b', @@ -76,7 +65,7 @@ const QWEN2_5_CODER_14b = { }, size: '9gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_CODER_32b = { name: 'qwen2.5-coder:32b', @@ -87,7 +76,7 @@ const QWEN2_5_CODER_32b = { }, size: '20gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const QWEN2_5_CODER_MODELS = [ QWEN2_5_CODER_LATEST.name, @@ -98,13 +87,13 @@ export const QWEN2_5_CODER_MODELS = [ QWEN2_5_CODER_32b.name, ] as const -const QWEN2_5_CODER_IMAGE_MODELS = [] as const +// const QWEN2_5_CODER_IMAGE_MODELS = [] as const -export const QWEN2_5_CODER_EMBEDDING_MODELS = [] as const +// export const QWEN2_5_CODER_EMBEDDING_MODELS = [] as const -const QWEN2_5_CODER_AUDIO_MODELS = [] as const +// const QWEN2_5_CODER_AUDIO_MODELS = [] as const -const QWEN2_5_CODER_VIDEO_MODELS = [] as const +// const QWEN2_5_CODER_VIDEO_MODELS = [] as const // export type Qwen2_5CoderChatModels = (typeof QWEN2_5_CODER_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts index e0272058..4827e758 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const QWEN2_5_LATEST = { name: 'qwen2.5:latest', @@ -21,7 +10,7 @@ const QWEN2_5_LATEST = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_0_5b = { name: 'qwen2.5:0.5b', @@ -32,7 +21,7 @@ const QWEN2_5_0_5b = { }, size: '398mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_1_5b = { name: 'qwen2.5:1.5b', @@ -43,7 +32,7 @@ const QWEN2_5_1_5b = { }, size: '986mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_3b = { name: 'qwen2.5:3b', @@ -54,7 +43,7 @@ const QWEN2_5_3b = { }, size: '1.9gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_7b = { name: 'qwen2.5:7b', @@ -65,7 +54,7 @@ const QWEN2_5_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_32b = { name: 'qwen2.5:32b', @@ -76,7 +65,7 @@ const QWEN2_5_32b = { }, size: '20gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_5_72b = { name: 'qwen2.5:72b', @@ -87,7 +76,7 @@ const QWEN2_5_72b = { }, size: '47gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const QWEN2_5_MODELS = [ QWEN2_5_LATEST.name, @@ -99,13 +88,13 @@ export const QWEN2_5_MODELS = [ QWEN2_5_72b.name, ] as const -const QWEN2_5_IMAGE_MODELS = [] as const +// const QWEN2_5_IMAGE_MODELS = [] as const -export const QWEN2_5_EMBEDDING_MODELS = [] as const +// export const QWEN2_5_EMBEDDING_MODELS = [] as const -const QWEN2_5_AUDIO_MODELS = [] as const +// const QWEN2_5_AUDIO_MODELS = [] as const -const QWEN2_5_VIDEO_MODELS = [] as const +// const QWEN2_5_VIDEO_MODELS = [] as const // export type Qwen2_5ChatModels = (typeof QWEN2_5_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts index 0fadb2be..87e42e14 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const QWEN2_LATEST = { name: 'qwen2:latest', @@ -21,7 +10,7 @@ const QWEN2_LATEST = { }, size: '4.4gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_0_5b = { name: 'qwen2:0.5b', @@ -32,7 +21,7 @@ const QWEN2_0_5b = { }, size: '352mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_1_5b = { name: 'qwen2:1.5b', @@ -43,7 +32,7 @@ const QWEN2_1_5b = { }, size: '935mb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_7b = { name: 'qwen2:7b', @@ -54,7 +43,7 @@ const QWEN2_7b = { }, size: '4.4gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN2_72b = { name: 'qwen2:72b', @@ -65,7 +54,7 @@ const QWEN2_72b = { }, size: '41gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const QWEN2_MODELS = [ QWEN2_LATEST.name, @@ -75,13 +64,13 @@ export const QWEN2_MODELS = [ QWEN2_72b.name, ] as const -const QWEN2_IMAGE_MODELS = [] as const +// const QWEN2_IMAGE_MODELS = [] as const -export const QWEN2_EMBEDDING_MODELS = [] as const +// export const QWEN2_EMBEDDING_MODELS = [] as const -const QWEN2_AUDIO_MODELS = [] as const +// const QWEN2_AUDIO_MODELS = [] as const -const QWEN2_VIDEO_MODELS = [] as const +// const QWEN2_VIDEO_MODELS = [] as const // export type Qwen2ChatModels = (typeof QWEN2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts index 1403179d..b3bcbe99 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const QWEN3_LATEST = { name: 'qwen3:latest', @@ -21,7 +10,7 @@ const QWEN3_LATEST = { }, size: '5.2gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_0_6b = { name: 'qwen3:0.6b', @@ -32,7 +21,7 @@ const QWEN3_0_6b = { }, size: '523mb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_1_7b = { name: 'qwen3:1.7b', @@ -43,7 +32,7 @@ const QWEN3_1_7b = { }, size: '1.4gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_4b = { name: 'qwen3:4b', @@ -54,7 +43,7 @@ const QWEN3_4b = { }, size: '2.5gb', context: 256_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_8b = { name: 'qwen3:8b', @@ -65,7 +54,7 @@ const QWEN3_8b = { }, size: '5.2gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_14b = { name: 'qwen3:14b', @@ -76,7 +65,7 @@ const QWEN3_14b = { }, size: '9.3gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_30b = { name: 'qwen3:30b', @@ -87,7 +76,7 @@ const QWEN3_30b = { }, size: '19gb', context: 256_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_32b = { name: 'qwen3:32b', @@ -98,7 +87,7 @@ const QWEN3_32b = { }, size: '20gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWEN3_235b = { name: 'qwen3:235b', @@ -109,7 +98,7 @@ const QWEN3_235b = { }, size: '142gb', context: 256_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const QWEN3_MODELS = [ QWEN3_LATEST.name, @@ -123,13 +112,13 @@ export const QWEN3_MODELS = [ QWEN3_235b.name, ] as const -const QWEN3_IMAGE_MODELS = [] as const +// const QWEN3_IMAGE_MODELS = [] as const -export const QWEN3_EMBEDDING_MODELS = [] as const +// export const QWEN3_EMBEDDING_MODELS = [] as const -const QWEN3_AUDIO_MODELS = [] as const +// const QWEN3_AUDIO_MODELS = [] as const -const QWEN3_VIDEO_MODELS = [] as const +// const QWEN3_VIDEO_MODELS = [] as const // export type Qwen3ChatModels = (typeof QWEN3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts index 8a7f1094..41738f15 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const QWQ_LATEST = { name: 'qwq:latest', @@ -21,7 +10,7 @@ const QWQ_LATEST = { }, size: '20gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const QWQ_32b = { name: 'qwq:32b', @@ -32,17 +21,17 @@ const QWQ_32b = { }, size: '20gb', context: 40_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const -const QWQ_IMAGE_MODELS = [] as const +// const QWQ_IMAGE_MODELS = [] as const -export const QWQ_EMBEDDING_MODELS = [] as const +// export const QWQ_EMBEDDING_MODELS = [] as const -const QWQ_AUDIO_MODELS = [] as const +// const QWQ_AUDIO_MODELS = [] as const -const QWQ_VIDEO_MODELS = [] as const +// const QWQ_VIDEO_MODELS = [] as const // export type QwqChatModels = (typeof QWQ_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts index 4d413fa1..9a6ae9f6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const SAILOR2_LATEST = { name: 'sailor2:latest', @@ -21,7 +10,7 @@ const SAILOR2_LATEST = { }, size: '5.2gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SAILOR2_1b = { name: 'sailor2:1b', @@ -32,7 +21,7 @@ const SAILOR2_1b = { }, size: '1.1gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SAILOR2_8b = { name: 'sailor2:8b', @@ -43,7 +32,7 @@ const SAILOR2_8b = { }, size: '5.2gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SAILOR2_20b = { name: 'sailor2:20b', @@ -54,7 +43,7 @@ const SAILOR2_20b = { }, size: '12gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const SAILOR2_MODELS = [ SAILOR2_LATEST.name, @@ -62,13 +51,13 @@ export const SAILOR2_MODELS = [ SAILOR2_20b.name, ] as const -const SAILOR2_IMAGE_MODELS = [] as const +// const SAILOR2_IMAGE_MODELS = [] as const -export const SAILOR2_EMBEDDING_MODELS = [] as const +// export const SAILOR2_EMBEDDING_MODELS = [] as const -const SAILOR2_AUDIO_MODELS = [] as const +// const SAILOR2_AUDIO_MODELS = [] as const -const SAILOR2_VIDEO_MODELS = [] as const +// const SAILOR2_VIDEO_MODELS = [] as const // export type Sailor2ChatModels = (typeof SAILOR2_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts index def7958d..62fa1e6f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const SHIELDGEMMA_LATEST = { name: 'shieldgemma:latest', @@ -21,7 +10,7 @@ const SHIELDGEMMA_LATEST = { }, size: '5.8gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SHIELDGEMMA_2b = { name: 'shieldgemma:2b', @@ -32,7 +21,7 @@ const SHIELDGEMMA_2b = { }, size: '1.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SHIELDGEMMA_9b = { name: 'shieldgemma:9b', @@ -43,7 +32,7 @@ const SHIELDGEMMA_9b = { }, size: '5.8gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SHIELDGEMMA_27b = { name: 'shieldgemma:27b', @@ -54,7 +43,7 @@ const SHIELDGEMMA_27b = { }, size: '17gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const SHIELDGEMMA_MODELS = [ SHIELDGEMMA_LATEST.name, @@ -63,13 +52,13 @@ export const SHIELDGEMMA_MODELS = [ SHIELDGEMMA_27b.name, ] as const -const SHIELDGEMMA_IMAGE_MODELS = [] as const +// const SHIELDGEMMA_IMAGE_MODELS = [] as const -export const SHIELDGEMMA_EMBEDDING_MODELS = [] as const +// export const SHIELDGEMMA_EMBEDDING_MODELS = [] as const -const SHIELDGEMMA_AUDIO_MODELS = [] as const +// const SHIELDGEMMA_AUDIO_MODELS = [] as const -const SHIELDGEMMA_VIDEO_MODELS = [] as const +// const SHIELDGEMMA_VIDEO_MODELS = [] as const // export type ShieldgemmaChatModels = (typeof SHIELDGEMMA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts index 5a25f511..eafdeb8e 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const SMALLTINKER_LATEST = { name: 'smalltinker:latest', @@ -21,7 +10,7 @@ const SMALLTINKER_LATEST = { }, size: '3.6gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SMALLTINKER_3b = { name: 'smalltinker:3b', @@ -32,20 +21,20 @@ const SMALLTINKER_3b = { }, size: '3.6gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const SMALLTINKER_MODELS = [ SMALLTINKER_LATEST.name, SMALLTINKER_3b.name, ] as const -const SMALLTINKER_IMAGE_MODELS = [] as const +// const SMALLTINKER_IMAGE_MODELS = [] as const -export const SMALLTINKER_EMBEDDING_MODELS = [] as const +// export const SMALLTINKER_EMBEDDING_MODELS = [] as const -const SMALLTINKER_AUDIO_MODELS = [] as const +// const SMALLTINKER_AUDIO_MODELS = [] as const -const SMALLTINKER_VIDEO_MODELS = [] as const +// const SMALLTINKER_VIDEO_MODELS = [] as const // export type SmalltinkerChatModels = (typeof SMALLTINKER_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts index f90782f3..79ebc939 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const SMOLLM_LATEST = { name: 'smollm:latest', @@ -21,7 +10,7 @@ const SMOLLM_LATEST = { }, size: '991mb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SMOLLM_135m = { name: 'smollm:135m', @@ -32,7 +21,7 @@ const SMOLLM_135m = { }, size: '92mb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SMOLLM_360m = { name: 'smollm:360m', @@ -43,7 +32,7 @@ const SMOLLM_360m = { }, size: '229mb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const SMOLLM_1_7b = { name: 'smollm:1.7b', @@ -54,7 +43,7 @@ const SMOLLM_1_7b = { }, size: '991mb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const SMOLLM_MODELS = [ SMOLLM_LATEST.name, @@ -63,13 +52,13 @@ export const SMOLLM_MODELS = [ SMOLLM_1_7b.name, ] as const -const SMOLLM_IMAGE_MODELS = [] as const +// const SMOLLM_IMAGE_MODELS = [] as const -export const SMOLLM_EMBEDDING_MODELS = [] as const +// export const SMOLLM_EMBEDDING_MODELS = [] as const -const SMOLLM_AUDIO_MODELS = [] as const +// const SMOLLM_AUDIO_MODELS = [] as const -const SMOLLM_VIDEO_MODELS = [] as const +// const SMOLLM_VIDEO_MODELS = [] as const // export type SmollmChatModels = (typeof SMOLLM_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts index 4ad0529f..a4b0e110 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const TINNYLLAMA_LATEST = { name: 'tinnyllama:latest', @@ -21,7 +10,7 @@ const TINNYLLAMA_LATEST = { }, size: '638mb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const TINNYLLAMA_1_1b = { name: 'tinnyllama:1.1b', @@ -32,20 +21,20 @@ const TINNYLLAMA_1_1b = { }, size: '638mb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const TINNYLLAMA_MODELS = [ TINNYLLAMA_LATEST.name, TINNYLLAMA_1_1b.name, ] as const -const TINNYLLAMA_IMAGE_MODELS = [] as const +// const TINNYLLAMA_IMAGE_MODELS = [] as const -export const TINNYLLAMA_EMBEDDING_MODELS = [] as const +// export const TINNYLLAMA_EMBEDDING_MODELS = [] as const -const TINNYLLAMA_AUDIO_MODELS = [] as const +// const TINNYLLAMA_AUDIO_MODELS = [] as const -const TINNYLLAMA_VIDEO_MODELS = [] as const +// const TINNYLLAMA_VIDEO_MODELS = [] as const // export type TinnyllamaChatModels = (typeof TINNYLLAMA_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts index b2a3c275..c76e6519 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const TULU3_LATEST = { name: 'tulu3:latest', @@ -21,7 +10,7 @@ const TULU3_LATEST = { }, size: '4.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const TULU3_8b = { name: 'tulu3:8b', @@ -32,7 +21,7 @@ const TULU3_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const TULU3_70b = { name: 'tulu3:70b', @@ -43,7 +32,7 @@ const TULU3_70b = { }, size: '43gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const TULU3_MODELS = [ TULU3_LATEST.name, @@ -51,13 +40,13 @@ export const TULU3_MODELS = [ TULU3_70b.name, ] as const -const TULU3_IMAGE_MODELS = [] as const +// const TULU3_IMAGE_MODELS = [] as const -export const TULU3_EMBEDDING_MODELS = [] as const +// export const TULU3_EMBEDDING_MODELS = [] as const -const TULU3_AUDIO_MODELS = [] as const +// const TULU3_AUDIO_MODELS = [] as const -const TULU3_VIDEO_MODELS = [] as const +// const TULU3_VIDEO_MODELS = [] as const // export type Tulu3ChatModels = (typeof TULU3_MODELS)[number] diff --git a/packages/typescript/ai-ollama/src/meta/models-meta.ts b/packages/typescript/ai-ollama/src/meta/models-meta.ts new file mode 100644 index 00000000..099432b4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/models-meta.ts @@ -0,0 +1,11 @@ +export interface DefaultOllamaModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts index 3ff0d5fd..190a2bbf 100644 --- a/packages/typescript/ai-ollama/src/model-meta.ts +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -128,18 +128,6 @@ import type { SmollmModelInputModalitiesByName } from './meta/model-meta-smollm' import type { TinnyllamaModelInputModalitiesByName } from './meta/model-meta-tinyllama' import type { Tulu3ModelInputModalitiesByName } from './meta/model-meta-tulu3' -export interface LlamaModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} - export const OLLAMA_MODELS = [ ...ATHENE_MODELS, ...AYA_MODELS, diff --git a/packages/typescript/ai-ollama/tsconfig.json b/packages/typescript/ai-ollama/tsconfig.json index e9686b6c..ea11c109 100644 --- a/packages/typescript/ai-ollama/tsconfig.json +++ b/packages/typescript/ai-ollama/tsconfig.json @@ -4,6 +4,6 @@ "outDir": "dist", "rootDir": "src" }, - "include": ["src/**/*.ts", "src/**/*.tsx", "src/meta/model-meta-devstralts"], + "include": ["src/**/*.ts", "src/**/*.tsx"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } From 2e6f6a3e6e22b9e4252b1f8c12b1caed94a33c0f Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 9 Dec 2025 13:02:15 +0100 Subject: [PATCH 3/6] fix: missing save --- packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts | 2 +- .../typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts index 1699950b..b1b66f02 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -42,7 +42,7 @@ export const GEMMA_MODELS = [ // const GEMMA_IMAGE_MODELS = [] as const -export const GEMMA_EMBEDDING_MODELS = [] as const +// export const GEMMA_EMBEDDING_MODELS = [] as const // const GEMMA_AUDIO_MODELS = [] as const diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts index 62753cc7..8a658e6f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -42,7 +42,7 @@ export const LLAMA3_GRADIENT_MODELS = [ // const LLAMA3_GRADIENT_IMAGE_MODELS = [] as const -export const LLAMA3_GRADIENT_EMBEDDING_MODELS = [] as const +// export const LLAMA3_GRADIENT_EMBEDDING_MODELS = [] as const // const LLAMA3_GRADIENT_AUDIO_MODELS = [] as const From 46ed7de2bf3b3c644ac444ab444cc34491e61535 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 9 Dec 2025 13:07:11 +0100 Subject: [PATCH 4/6] feat: add deepseek oct --- packages/typescript/ai-ollama/src/model-meta.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts index 190a2bbf..6e219d78 100644 --- a/packages/typescript/ai-ollama/src/model-meta.ts +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -7,6 +7,7 @@ import { COMMAND_R_MODELS } from './meta/model-meta-command-r' import { COMMAND_R_PLUS_MODELS } from './meta/model-meta-command-r-plus' import { COMMAND_R_7b_MODELS } from './meta/model-meta-command-r7b' import { DEEPSEEK_CODER_V2_MODELS } from './meta/model-meta-deepseek-coder-v2' +import { DEEPSEEK_OCR_MODELS } from './meta/model-meta-deepseek-ocr' import { DEEPSEEK_R1_MODELS } from './meta/model-meta-deepseek-r1' import { DEEPSEEK_V3_1_MODELS } from './meta/model-meta-deepseek-v3.1' import { DEVSTRAL_MODELS } from './meta/model-meta-devstral' @@ -72,6 +73,7 @@ import type { CommandRModelInputModalitiesByName } from './meta/model-meta-comma import type { CommandRPlusModelInputModalitiesByName } from './meta/model-meta-command-r-plus' import type { CommandR7bModelInputModalitiesByName } from './meta/model-meta-command-r7b' import type { DeepseekCoderV2ModelInputModalitiesByName } from './meta/model-meta-deepseek-coder-v2' +import type { DeepseekOcrChatModelProviderOptionsByName } from './meta/model-meta-deepseek-ocr' import type { DeepseekR1ModelInputModalitiesByName } from './meta/model-meta-deepseek-r1' import type { Deepseekv3_1ModelInputModalitiesByName } from './meta/model-meta-deepseek-v3.1' import type { DevstralModelInputModalitiesByName } from './meta/model-meta-devstral' @@ -137,6 +139,7 @@ export const OLLAMA_MODELS = [ ...COMMAND_R_MODELS, ...COMMAND_R_7b_MODELS, ...DEEPSEEK_CODER_V2_MODELS, + ...DEEPSEEK_OCR_MODELS, ...DEEPSEEK_R1_MODELS, ...DEEPSEEK_V3_1_MODELS, ...DEVSTRAL_MODELS, @@ -204,6 +207,7 @@ export type OllamaModelInputModalitiesByName = CommandRModelInputModalitiesByName & CommandR7bModelInputModalitiesByName & DeepseekCoderV2ModelInputModalitiesByName & + DeepseekOcrChatModelProviderOptionsByName & DeepseekR1ModelInputModalitiesByName & Deepseekv3_1ModelInputModalitiesByName & DevstralModelInputModalitiesByName & From 68a080e8f1e7f8191715e5ea7ff4392e518f2dd5 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 9 Dec 2025 13:12:03 +0100 Subject: [PATCH 5/6] fix: ollama type missmatch --- packages/typescript/ai-ollama/src/model-meta.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts index 6e219d78..1b9a3e8c 100644 --- a/packages/typescript/ai-ollama/src/model-meta.ts +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -73,7 +73,7 @@ import type { CommandRModelInputModalitiesByName } from './meta/model-meta-comma import type { CommandRPlusModelInputModalitiesByName } from './meta/model-meta-command-r-plus' import type { CommandR7bModelInputModalitiesByName } from './meta/model-meta-command-r7b' import type { DeepseekCoderV2ModelInputModalitiesByName } from './meta/model-meta-deepseek-coder-v2' -import type { DeepseekOcrChatModelProviderOptionsByName } from './meta/model-meta-deepseek-ocr' +import type { DeepseekOcrModelInputModalitiesByName } from './meta/model-meta-deepseek-ocr' import type { DeepseekR1ModelInputModalitiesByName } from './meta/model-meta-deepseek-r1' import type { Deepseekv3_1ModelInputModalitiesByName } from './meta/model-meta-deepseek-v3.1' import type { DevstralModelInputModalitiesByName } from './meta/model-meta-devstral' @@ -207,7 +207,7 @@ export type OllamaModelInputModalitiesByName = CommandRModelInputModalitiesByName & CommandR7bModelInputModalitiesByName & DeepseekCoderV2ModelInputModalitiesByName & - DeepseekOcrChatModelProviderOptionsByName & + DeepseekOcrModelInputModalitiesByName & DeepseekR1ModelInputModalitiesByName & Deepseekv3_1ModelInputModalitiesByName & DevstralModelInputModalitiesByName & From fd750de122b6c8d0f0d4ffa61090c9e307ad73f6 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 9 Dec 2025 14:04:11 +0100 Subject: [PATCH 6/6] chore: more clean up --- .../ai-ollama/src/meta/model-meta-athene.ts | 17 +++--------- .../ai-ollama/src/meta/model-meta-aya.ts | 19 +++---------- .../src/meta/model-meta-codegemma.ts | 19 +++---------- .../src/meta/model-meta-codellama.ts | 23 +++++----------- .../src/meta/model-meta-command-r-plus.ts | 17 +++--------- .../src/meta/model-meta-command-r.ts | 17 +++--------- .../src/meta/model-meta-command-r7b.ts | 17 +++--------- .../src/meta/model-meta-deepseek-coder-v2.ts | 19 +++---------- .../src/meta/model-meta-deepseek-ocr.ts | 17 +++--------- .../src/meta/model-meta-deepseek-r1.ts | 27 ++++++------------- .../src/meta/model-meta-deepseek-v3.1.ts | 19 +++---------- .../ai-ollama/src/meta/model-meta-devstral.ts | 17 +++--------- .../ai-ollama/src/meta/model-meta-dolphin3.ts | 17 +++--------- 13 files changed, 51 insertions(+), 194 deletions(-) diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts index fe46b033..5e10c76a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const ATHENE_V2_LATEST = { name: 'athene-v2:latest', @@ -21,7 +10,7 @@ const ATHENE_V2_LATEST = { }, size: '47gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const ATHENE_V2_72b = { name: 'athene-v2:72b', @@ -32,7 +21,7 @@ const ATHENE_V2_72b = { }, size: '47gb', context: 32_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const ATHENE_MODELS = [ ATHENE_V2_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts index 16bd16b0..38a93989 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const AYA_LATEST = { name: 'aya:latest', @@ -21,7 +10,7 @@ const AYA_LATEST = { }, size: '4.8gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const AYA_8b = { name: 'aya:8b', @@ -32,7 +21,7 @@ const AYA_8b = { }, size: '4.8gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const AYA_35b = { name: 'aya:35b', @@ -43,7 +32,7 @@ const AYA_35b = { }, size: '20gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts index 93937eda..f424c512 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const CODEGEMMA_LATEST = { name: 'codegemma:latest', @@ -21,7 +10,7 @@ const CODEGEMMA_LATEST = { }, size: '5gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const CODEGEMMA_8b = { name: 'codegemma:2b', @@ -32,7 +21,7 @@ const CODEGEMMA_8b = { }, size: '1.65gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const CODEGEMMA_35b = { name: 'codegemma:7b', @@ -43,7 +32,7 @@ const CODEGEMMA_35b = { }, size: '5gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const CODEGEMMA_MODELS = [ CODEGEMMA_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts index 2a0b3b54..df9a7786 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const CODELLAMA_LATEST = { name: 'codellama:latest', @@ -21,7 +10,7 @@ const CODELLAMA_LATEST = { }, size: '3.8gb', context: 16_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const CODELLAMA_7b = { name: 'codellama:7b', @@ -32,7 +21,7 @@ const CODELLAMA_7b = { }, size: '3.8gb', context: 16_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const CODELLAMA_13b = { name: 'codellama:13b', @@ -43,7 +32,7 @@ const CODELLAMA_13b = { }, size: '7.4gb', context: 16_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const CODELLAMA_34b = { name: 'codellama:34b', @@ -54,7 +43,7 @@ const CODELLAMA_34b = { }, size: '19gb', context: 16_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const CODELLAMA_70b = { name: 'codellama:70b', @@ -65,7 +54,7 @@ const CODELLAMA_70b = { }, size: '39gb', context: 2_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const CODELLAMA_MODELS = [ CODELLAMA_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts index 941a7e9f..84971561 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const COMMAND_R_PLUS_LATEST = { name: 'command-r-plus:latest', @@ -21,7 +10,7 @@ const COMMAND_R_PLUS_LATEST = { }, size: '59gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const COMMAND_R_PLUS_104b = { name: 'command-r-plus:104b', @@ -32,7 +21,7 @@ const COMMAND_R_PLUS_104b = { }, size: '59gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const COMMAND_R_PLUS_MODELS = [ COMMAND_R_PLUS_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts index afce50e7..a4f47e07 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const COMMAND_R_LATEST = { name: 'command-r:latest', @@ -21,7 +10,7 @@ const COMMAND_R_LATEST = { }, size: '19gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const COMMAND_R_35b = { name: 'command-r:35b', @@ -32,7 +21,7 @@ const COMMAND_R_35b = { }, size: '19gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const COMMAND_R_MODELS = [ COMMAND_R_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts index eebee3b6..e510d404 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const COMMAND_R_7b_LATEST = { name: 'command-r7b:latest', @@ -21,7 +10,7 @@ const COMMAND_R_7b_LATEST = { }, size: '5.1gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const COMMAND_R_7b_7b = { name: 'command-r7b:7b', @@ -32,7 +21,7 @@ const COMMAND_R_7b_7b = { }, size: '5.1gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const COMMAND_R_7b_MODELS = [ COMMAND_R_7b_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts index 2c274a6d..eada87a9 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const DEEPSEEK_CODER_V2_LATEST = { name: 'deepseek-coder-v2:latest', @@ -21,7 +10,7 @@ const DEEPSEEK_CODER_V2_LATEST = { }, size: '4.8gb', context: 160_900, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_CODER_V2_16b = { name: 'deepseek-coder-v2:16b', @@ -32,7 +21,7 @@ const DEEPSEEK_CODER_V2_16b = { }, size: '8.9gb', context: 160_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_CODER_V2_236b = { name: 'deepseek-coder-v2:236b', @@ -43,7 +32,7 @@ const DEEPSEEK_CODER_V2_236b = { }, size: '133gb', context: 4_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const DEEPSEEK_CODER_V2_MODELS = [ DEEPSEEK_CODER_V2_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts index 4d3b4266..e3fa0d6c 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const DEEPSEEK_OCR_LATEST = { name: 'deepseek-ocr:latest', @@ -21,7 +10,7 @@ const DEEPSEEK_OCR_LATEST = { }, size: '6.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_OCR_3b = { name: 'deepseek-ocr:3b', @@ -33,7 +22,7 @@ const DEEPSEEK_OCR_3b = { size: '6.7gb', context: 8_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const DEEPSEEK_OCR_MODELS = [ DEEPSEEK_OCR_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts index 1159f05b..470efd36 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const DEEPSEEK_R1_LATEST = { name: 'deepseek-r1:latest', @@ -21,7 +10,7 @@ const DEEPSEEK_R1_LATEST = { }, size: '5.2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_R1_1_5b = { name: 'deepseek-r1:1.5b', @@ -32,7 +21,7 @@ const DEEPSEEK_R1_1_5b = { }, size: '1.1gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_R1_7b = { name: 'deepseek-r1:7b', @@ -43,7 +32,7 @@ const DEEPSEEK_R1_7b = { }, size: '4.7gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_R1_8b = { name: 'deepseek-r1:8b', @@ -54,7 +43,7 @@ const DEEPSEEK_R1_8b = { }, size: '5.2gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_R1_32b = { name: 'deepseek-r1:32b', @@ -65,7 +54,7 @@ const DEEPSEEK_R1_32b = { }, size: '20gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_R1_70b = { name: 'deepseek-r1:70b', @@ -76,7 +65,7 @@ const DEEPSEEK_R1_70b = { }, size: '43gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_R1_671b = { name: 'deepseek-r1:671b', @@ -87,7 +76,7 @@ const DEEPSEEK_R1_671b = { }, size: '404gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const DEEPSEEK_R1_MODELS = [ DEEPSEEK_R1_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts index 0cd5e3ce..413413ed 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const DEEPSEEK_V3_1_LATEST = { name: 'deepseek-v3.1:latest', @@ -21,7 +10,7 @@ const DEEPSEEK_V3_1_LATEST = { }, size: '404gb', context: 160_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_V3_1_671b = { name: 'deepseek-v3.1:671', @@ -33,7 +22,7 @@ const DEEPSEEK_V3_1_671b = { size: '404gb', context: 160_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEEPSEEK_V3_1_671b_cloud = { name: 'deepseek-v3.1:671-cloud', @@ -44,7 +33,7 @@ const DEEPSEEK_V3_1_671b_cloud = { }, size: '404gb', context: 160_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const DEEPSEEK_V3_1_MODELS = [ DEEPSEEK_V3_1_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts index 6c1fff1d..246729ca 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const DEVSTRAL_LATEST = { name: 'devstral:latest', @@ -21,7 +10,7 @@ const DEVSTRAL_LATEST = { }, size: '14gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DEVSTRAL_24b = { name: 'devstral:24b', @@ -32,7 +21,7 @@ const DEVSTRAL_24b = { }, size: '14gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const DEVSTRAL_MODELS = [ DEVSTRAL_LATEST.name, diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts index d5b53f63..f45d44b4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -1,16 +1,5 @@ import type { ChatRequest } from 'ollama' - -interface ModelMeta { - name: string - providerOptions?: TProviderOptions - supports?: { - input?: Array<'text' | 'image' | 'video'> - output?: Array<'text' | 'image' | 'video'> - capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> - } - size?: string - context?: number -} +import type { DefaultOllamaModelMeta } from './models-meta' const DOLPHIN3_LATEST = { name: 'dolphin3:latest', @@ -21,7 +10,7 @@ const DOLPHIN3_LATEST = { }, size: '4.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta const DOLPHIN3_8b = { name: 'dolphin3:8b', @@ -32,7 +21,7 @@ const DOLPHIN3_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies ModelMeta +} as const satisfies DefaultOllamaModelMeta export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const