diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts new file mode 100644 index 00000000..5e10c76a --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const ATHENE_V2_LATEST = { + name: 'athene-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const ATHENE_V2_72b = { + name: 'athene-v2:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const ATHENE_MODELS = [ + ATHENE_V2_LATEST.name, + ATHENE_V2_72b.name, +] as const + +// const ATHENE_IMAGE_MODELS = [] as const + +// export const ATHENE_EMBEDDING_MODELS = [] as const + +// const ATHENE_AUDIO_MODELS = [] as const + +// const ATHENE_VIDEO_MODELS = [] as const + +// export type AtheneChatModels = (typeof ATHENE_MODELS)[number] + +// Manual type map for per-model provider options +export type AtheneChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [ATHENE_V2_LATEST.name]: ChatRequest + [ATHENE_V2_72b.name]: ChatRequest +} + +export type AtheneModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input + [ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts new file mode 100644 index 00000000..38a93989 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -0,0 +1,62 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const AYA_LATEST = { + name: 'aya:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const AYA_8b = { + name: 'aya:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const AYA_35b = { + name: 'aya:35b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '20gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const + +// const AYA_IMAGE_MODELS = [] as const + +// export const AYA_EMBEDDING_MODELS = [] as const + +// const AYA_AUDIO_MODELS = [] as const + +// const AYA_VIDEO_MODELS = [] as const + +// export type AyaChatModels = (typeof AYA_MODELS)[number] + +// Manual type map for per-model provider options +export type AyaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [AYA_LATEST.name]: ChatRequest + [AYA_8b.name]: ChatRequest + [AYA_35b.name]: ChatRequest +} + +export type AyaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [AYA_LATEST.name]: typeof AYA_LATEST.supports.input + [AYA_8b.name]: typeof AYA_8b.supports.input + [AYA_35b.name]: typeof AYA_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts new file mode 100644 index 00000000..f424c512 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const CODEGEMMA_LATEST = { + name: 'codegemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const CODEGEMMA_8b = { + name: 'codegemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.65gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const CODEGEMMA_35b = { + name: 'codegemma:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const CODEGEMMA_MODELS = [ + CODEGEMMA_LATEST.name, + CODEGEMMA_8b.name, + CODEGEMMA_35b.name, +] as const + +// const CODEGEMMA_IMAGE_MODELS = [] as const + +// export const CODEGEMMA_EMBEDDING_MODELS = [] as const + +// const CODEGEMMA_AUDIO_MODELS = [] as const + +// const CODEGEMMA_VIDEO_MODELS = [] as const + +// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type CodegemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [CODEGEMMA_LATEST.name]: ChatRequest + [CODEGEMMA_8b.name]: ChatRequest + [CODEGEMMA_35b.name]: ChatRequest +} + +export type CodegemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input + [CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input + [CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts new file mode 100644 index 00000000..df9a7786 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -0,0 +1,94 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const CODELLAMA_LATEST = { + name: 'codellama:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_7b = { + name: 'codellama:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_13b = { + name: 'codellama:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.4gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_34b = { + name: 'codellama:34b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '19gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_70b = { + name: 'codellama:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '39gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const CODELLAMA_MODELS = [ + CODELLAMA_LATEST.name, + CODELLAMA_7b.name, + CODELLAMA_13b.name, + CODELLAMA_34b.name, + CODELLAMA_70b.name, +] as const + +// const CODELLAMA_IMAGE_MODELS = [] as const + +// export const CODELLAMA_EMBEDDING_MODELS = [] as const + +// const CODELLAMA_AUDIO_MODELS = [] as const + +// const CODELLAMA_VIDEO_MODELS = [] as const + +// export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number] + +// Manual type map for per-model provider options +export type CodellamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [CODELLAMA_LATEST.name]: ChatRequest + [CODELLAMA_7b.name]: ChatRequest + [CODELLAMA_13b.name]: ChatRequest + [CODELLAMA_34b.name]: ChatRequest + [CODELLAMA_70b.name]: ChatRequest +} + +export type CodellamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [CODELLAMA_LATEST.name]: typeof CODELLAMA_LATEST.supports.input + [CODELLAMA_7b.name]: typeof CODELLAMA_7b.supports.input + [CODELLAMA_13b.name]: typeof CODELLAMA_13b.supports.input + [CODELLAMA_34b.name]: typeof CODELLAMA_34b.supports.input + [CODELLAMA_70b.name]: typeof CODELLAMA_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts new file mode 100644 index 00000000..84971561 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const COMMAND_R_PLUS_LATEST = { + name: 'command-r-plus:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '59gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const COMMAND_R_PLUS_104b = { + name: 'command-r-plus:104b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '59gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const COMMAND_R_PLUS_MODELS = [ + COMMAND_R_PLUS_LATEST.name, + COMMAND_R_PLUS_104b.name, +] as const + +// const COMMAND_R_PLUS_IMAGE_MODELS = [] as const + +// export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const + +// const COMMAND_R_PLUS_AUDIO_MODELS = [] as const + +// const COMMAND_R_PLUS_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandRPlusChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_PLUS_LATEST.name]: ChatRequest + [COMMAND_R_PLUS_104b.name]: ChatRequest +} + +export type CommandRPlusModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_PLUS_LATEST.name]: typeof COMMAND_R_PLUS_LATEST.supports.input + [COMMAND_R_PLUS_104b.name]: typeof COMMAND_R_PLUS_104b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts new file mode 100644 index 00000000..a4f47e07 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const COMMAND_R_LATEST = { + name: 'command-r:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '19gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const COMMAND_R_35b = { + name: 'command-r:35b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '19gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const COMMAND_R_MODELS = [ + COMMAND_R_LATEST.name, + COMMAND_R_35b.name, +] as const + +// const COMMAND_R_IMAGE_MODELS = [] as const + +// export const COMMAND_R_EMBEDDING_MODELS = [] as const + +// const COMMAND_R_AUDIO_MODELS = [] as const + +// const COMMAND_R_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandRChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_LATEST.name]: ChatRequest + [COMMAND_R_35b.name]: ChatRequest +} + +export type CommandRModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_LATEST.name]: typeof COMMAND_R_LATEST.supports.input + [COMMAND_R_35b.name]: typeof COMMAND_R_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts new file mode 100644 index 00000000..e510d404 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const COMMAND_R_7b_LATEST = { + name: 'command-r7b:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5.1gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const COMMAND_R_7b_7b = { + name: 'command-r7b:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5.1gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const COMMAND_R_7b_MODELS = [ + COMMAND_R_7b_LATEST.name, + COMMAND_R_7b_7b.name, +] as const + +// const COMMAND_R_7b_IMAGE_MODELS = [] as const + +// export const COMMAND_R_7b_EMBEDDING_MODELS = [] as const + +// const COMMAND_R_7b_AUDIO_MODELS = [] as const + +// const COMMAND_R_7b_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R7b_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandR7bChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_7b_LATEST.name]: ChatRequest + [COMMAND_R_7b_7b.name]: ChatRequest +} + +export type CommandR7bModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_7b_LATEST.name]: typeof COMMAND_R_7b_LATEST.supports.input + [COMMAND_R_7b_7b.name]: typeof COMMAND_R_7b_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts new file mode 100644 index 00000000..eada87a9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_CODER_V2_LATEST = { + name: 'deepseek-coder-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 160_900, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_CODER_V2_16b = { + name: 'deepseek-coder-v2:16b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.9gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_CODER_V2_236b = { + name: 'deepseek-coder-v2:236b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '133gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_CODER_V2_MODELS = [ + DEEPSEEK_CODER_V2_LATEST.name, + DEEPSEEK_CODER_V2_16b.name, + DEEPSEEK_CODER_V2_236b.name, +] as const + +// const DEEPSEEK_CODER_V2_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_CODER_V2_EMBEDDING_MODELS = [] as const + +// const DEEPSEEK_CODER_V2_AUDIO_MODELS = [] as const + +// const DEEPSEEK_CODER_V2_VIDEO_MODELS = [] as const + +// export type DeepseekCoderV2ChatModels = (typeof DEEPSEEK_CODER_V2_MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekCoderV2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_CODER_V2_LATEST.name]: ChatRequest + [DEEPSEEK_CODER_V2_16b.name]: ChatRequest + [DEEPSEEK_CODER_V2_236b.name]: ChatRequest +} + +export type DeepseekCoderV2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_CODER_V2_LATEST.name]: typeof DEEPSEEK_CODER_V2_LATEST.supports.input + [DEEPSEEK_CODER_V2_16b.name]: typeof DEEPSEEK_CODER_V2_16b.supports.input + [DEEPSEEK_CODER_V2_236b.name]: typeof DEEPSEEK_CODER_V2_236b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts new file mode 100644 index 00000000..e3fa0d6c --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts @@ -0,0 +1,53 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_OCR_LATEST = { + name: 'deepseek-ocr:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '6.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_OCR_3b = { + name: 'deepseek-ocr:3b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + + size: '6.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_OCR_MODELS = [ + DEEPSEEK_OCR_LATEST.name, + DEEPSEEK_OCR_3b.name, +] as const + +// export const DEEPSEEK_OCR_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_OCR_EMBEDDING_MODELS = [] as const + +// export const DEEPSEEK_OCR_AUDIO_MODELS = [] as const + +// export const DEEPSEEK_OCR_VIDEO_MODELS = [] as const + +// export type DeepseekOcrChatModels = (typeof DEEPSEEK_OCR__MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekOcrChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_OCR_LATEST.name]: ChatRequest + [DEEPSEEK_OCR_3b.name]: ChatRequest +} + +export type DeepseekOcrModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_OCR_LATEST.name]: typeof DEEPSEEK_OCR_LATEST.supports.input + [DEEPSEEK_OCR_3b.name]: typeof DEEPSEEK_OCR_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts new file mode 100644 index 00000000..470efd36 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -0,0 +1,122 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_R1_LATEST = { + name: 'deepseek-r1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_1_5b = { + name: 'deepseek-r1:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '1.1gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_7b = { + name: 'deepseek-r1:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '4.7gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_8b = { + name: 'deepseek-r1:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_32b = { + name: 'deepseek-r1:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '20gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_70b = { + name: 'deepseek-r1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_671b = { + name: 'deepseek-r1:671b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_R1_MODELS = [ + DEEPSEEK_R1_LATEST.name, + DEEPSEEK_R1_1_5b.name, + DEEPSEEK_R1_7b.name, + DEEPSEEK_R1_8b.name, + DEEPSEEK_R1_32b.name, + DEEPSEEK_R1_70b.name, + DEEPSEEK_R1_671b.name, +] as const + +// const DEEPSEEK_R1_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_R1_EMBEDDING_MODELS = [] as const + +// const DEEPSEEK_R1_AUDIO_MODELS = [] as const + +// const DEEPSEEK_R1_VIDEO_MODELS = [] as const + +// export type DeepseekChatModels = (typeof DEEPSEEK_R1_MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekR1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_R1_LATEST.name]: ChatRequest + [DEEPSEEK_R1_1_5b.name]: ChatRequest + [DEEPSEEK_R1_7b.name]: ChatRequest + [DEEPSEEK_R1_8b.name]: ChatRequest + [DEEPSEEK_R1_32b.name]: ChatRequest + [DEEPSEEK_R1_70b.name]: ChatRequest + [DEEPSEEK_R1_671b.name]: ChatRequest +} + +export type DeepseekR1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_R1_LATEST.name]: typeof DEEPSEEK_R1_LATEST.supports.input + [DEEPSEEK_R1_1_5b.name]: typeof DEEPSEEK_R1_1_5b.supports.input + [DEEPSEEK_R1_7b.name]: typeof DEEPSEEK_R1_7b.supports.input + [DEEPSEEK_R1_8b.name]: typeof DEEPSEEK_R1_8b.supports.input + [DEEPSEEK_R1_32b.name]: typeof DEEPSEEK_R1_32b.supports.input + [DEEPSEEK_R1_70b.name]: typeof DEEPSEEK_R1_70b.supports.input + [DEEPSEEK_R1_671b.name]: typeof DEEPSEEK_R1_671b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts new file mode 100644 index 00000000..413413ed --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -0,0 +1,67 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_V3_1_LATEST = { + name: 'deepseek-v3.1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_V3_1_671b = { + name: 'deepseek-v3.1:671', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + + size: '404gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_V3_1_671b_cloud = { + name: 'deepseek-v3.1:671-cloud', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_V3_1_MODELS = [ + DEEPSEEK_V3_1_LATEST.name, + DEEPSEEK_V3_1_671b.name, + DEEPSEEK_V3_1_671b_cloud.name, +] as const + +// export const DEEPSEEK_V3_1_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_V3_1_EMBEDDING_MODELS = [] as const + +// export const DEEPSEEK_V3_1_AUDIO_MODELS = [] as const + +// export const DEEPSEEK_V3_1_VIDEO_MODELS = [] as const + +// export type DeepseekV3_1ChatModels = (typeof DEEPSEEK_V3_1__MODELS)[number] + +// Manual type map for per-model provider options +export type Deepseekv3_1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_V3_1_LATEST.name]: ChatRequest + [DEEPSEEK_V3_1_671b.name]: ChatRequest + [DEEPSEEK_V3_1_671b_cloud.name]: ChatRequest +} + +export type Deepseekv3_1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_V3_1_LATEST.name]: typeof DEEPSEEK_V3_1_LATEST.supports.input + [DEEPSEEK_V3_1_671b.name]: typeof DEEPSEEK_V3_1_671b.supports.input + [DEEPSEEK_V3_1_671b_cloud.name]: typeof DEEPSEEK_V3_1_671b_cloud.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts new file mode 100644 index 00000000..246729ca --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEVSTRAL_LATEST = { + name: 'devstral:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEVSTRAL_24b = { + name: 'devstral:24b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEVSTRAL_MODELS = [ + DEVSTRAL_LATEST.name, + DEVSTRAL_24b.name, +] as const + +// const DEVSTRAL_IMAGE_MODELS = [] as const + +// export const DEVSTRAL_EMBEDDING_MODELS = [] as const + +// const DEVSTRAL_AUDIO_MODELS = [] as const + +// const DEVSTRAL_VIDEO_MODELS = [] as const + +// export type DevstralChatModels = (typeof DEVSTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type DevstralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEVSTRAL_LATEST.name]: ChatRequest + [DEVSTRAL_24b.name]: ChatRequest +} + +export type DevstralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEVSTRAL_LATEST.name]: typeof DEVSTRAL_LATEST.supports.input + [DEVSTRAL_24b.name]: typeof DEVSTRAL_24b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts new file mode 100644 index 00000000..f45d44b4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DOLPHIN3_LATEST = { + name: 'dolphin3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DOLPHIN3_8b = { + name: 'dolphin3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const + +// const DOLPHIN3_IMAGE_MODELS = [] as const + +// export const DOLPHIN3_EMBEDDING_MODELS = [] as const + +// const DOLPHIN3_AUDIO_MODELS = [] as const + +// const DOLPHIN3_VIDEO_MODELS = [] as const + +// export type Dolphin3ChatModels = (typeof DOLPHIN3_MODELS)[number] + +// Manual type map for per-model provider options +export type Dolphin3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DOLPHIN3_LATEST.name]: ChatRequest + [DOLPHIN3_8b.name]: ChatRequest +} + +export type Dolphin3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DOLPHIN3_LATEST.name]: typeof DOLPHIN3_LATEST.supports.input + [DOLPHIN3_8b.name]: typeof DOLPHIN3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts new file mode 100644 index 00000000..131f57c1 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const EXAONE3_5_LATEST = { + name: 'exaone3.5:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const EXAONE3_5_2_4b = { + name: 'exaone3.5:2.4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const EXAONE3_5_7_1b = { + name: 'exaone3.5:7.8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const EXAONE3_5_32b = { + name: 'exaone3.5:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '19gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const EXAONE3_5MODELS = [ + EXAONE3_5_LATEST.name, + EXAONE3_5_2_4b.name, + EXAONE3_5_7_1b.name, + EXAONE3_5_32b.name, +] as const + +// const EXAONE3_5IMAGE_MODELS = [] as const + +// export const EXAONE3_5EMBEDDING_MODELS = [] as const + +// const EXAONE3_5AUDIO_MODELS = [] as const + +// const EXAONE3_5VIDEO_MODELS = [] as const + +// export type AyaChatModels = (typeof EXAONE3_5MODELS)[number] + +// Manual type map for per-model provider options +export type Exaone3_5ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [EXAONE3_5_LATEST.name]: ChatRequest + [EXAONE3_5_2_4b.name]: ChatRequest + [EXAONE3_5_7_1b.name]: ChatRequest + [EXAONE3_5_32b.name]: ChatRequest +} + +export type Exaone3_5ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [EXAONE3_5_LATEST.name]: typeof EXAONE3_5_LATEST.supports.input + [EXAONE3_5_2_4b.name]: typeof EXAONE3_5_2_4b.supports.input + [EXAONE3_5_7_1b.name]: typeof EXAONE3_5_7_1b.supports.input + [EXAONE3_5_32b.name]: typeof EXAONE3_5_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts new file mode 100644 index 00000000..f353b2f4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const FALCON2_LATEST = { + name: 'falcon2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.4gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON2_11b = { + name: 'falcon2:11b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.4gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const + +// const FALCON2_IMAGE_MODELS = [] as const + +// export const FALCON2_EMBEDDING_MODELS = [] as const + +// const FALCON2_AUDIO_MODELS = [] as const + +// const FALCON2_VIDEO_MODELS = [] as const + +// export type Falcon2ChatModels = (typeof FALCON2_MODELS)[number] + +// Manual type map for per-model provider options +export type Falcon2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FALCON2_LATEST.name]: ChatRequest + [FALCON2_11b.name]: ChatRequest +} + +export type Falcon2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FALCON2_LATEST.name]: typeof FALCON2_LATEST.supports.input + [FALCON2_11b.name]: typeof FALCON2_11b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts new file mode 100644 index 00000000..50e15cee --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts @@ -0,0 +1,94 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const FALCON3_LATEST = { + name: 'falcon3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_1b = { + name: 'falcon3:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_3b = { + name: 'falcon3:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_7b = { + name: 'falcon3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_10b = { + name: 'falcon3:10b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.3gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const FALCON3_MODELS = [ + FALCON3_LATEST.name, + FALCON3_1b.name, + FALCON3_3b.name, + FALCON3_7b.name, + FALCON3_10b.name, +] as const + +// const FALCON3_IMAGE_MODELS = [] as const + +// export const FALCON3_EMBEDDING_MODELS = [] as const + +// const FALCON3_AUDIO_MODELS = [] as const + +// const FALCON3_VIDEO_MODELS = [] as const + +// export type Falcon3ChatModels = (typeof FALCON3_MODELS)[number] + +// Manual type map for per-model provider options +export type Falcon3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FALCON3_LATEST.name]: ChatRequest + [FALCON3_1b.name]: ChatRequest + [FALCON3_3b.name]: ChatRequest + [FALCON3_7b.name]: ChatRequest + [FALCON3_10b.name]: ChatRequest +} + +export type Falcon3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FALCON3_LATEST.name]: typeof FALCON3_LATEST.supports.input + [FALCON3_1b.name]: typeof FALCON3_1b.supports.input + [FALCON3_3b.name]: typeof FALCON3_3b.supports.input + [FALCON3_7b.name]: typeof FALCON3_7b.supports.input + [FALCON3_10b.name]: typeof FALCON3_10b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts new file mode 100644 index 00000000..517616a4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const FIREFUNCTION_V2_LATEST = { + name: 'firefunction-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const FIREFUNCTION_V2_70b = { + name: 'firefunction-v2:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const FIREFUNCTION_V2_MODELS = [ + FIREFUNCTION_V2_LATEST.name, + FIREFUNCTION_V2_70b.name, +] as const + +// const FIREFUNCTION_V2_IMAGE_MODELS = [] as const + +// export const FIREFUNCTION_V2_EMBEDDING_MODELS = [] as const + +// const FIREFUNCTION_V2_AUDIO_MODELS = [] as const + +// const FIREFUNCTION_V2_VIDEO_MODELS = [] as const + +// export type Firefunction_V2ChatModels = (typeof FIREFUNCTION_V2_MODELS)[number] + +// Manual type map for per-model provider options +export type Firefunction_V2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FIREFUNCTION_V2_LATEST.name]: ChatRequest + [FIREFUNCTION_V2_70b.name]: ChatRequest +} + +export type Firefunction_V2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FIREFUNCTION_V2_LATEST.name]: typeof FIREFUNCTION_V2_LATEST.supports.input + [FIREFUNCTION_V2_70b.name]: typeof FIREFUNCTION_V2_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts new file mode 100644 index 00000000..b1b66f02 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GEMMA_LATEST = { + name: 'gemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA_2b = { + name: 'gemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA_7b = { + name: 'gemma:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const GEMMA_MODELS = [ + GEMMA_LATEST.name, + GEMMA_2b.name, + GEMMA_7b.name, +] as const + +// const GEMMA_IMAGE_MODELS = [] as const + +// export const GEMMA_EMBEDDING_MODELS = [] as const + +// const GEMMA_AUDIO_MODELS = [] as const + +// const GEMMA_VIDEO_MODELS = [] as const + +// export type GemmaChatModels = (typeof GEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type GemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA_LATEST.name]: ChatRequest + [GEMMA_2b.name]: ChatRequest + [GEMMA_7b.name]: ChatRequest +} + +export type GemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA_LATEST.name]: typeof GEMMA_LATEST.supports.input + [GEMMA_2b.name]: typeof GEMMA_2b.supports.input + [GEMMA_7b.name]: typeof GEMMA_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts new file mode 100644 index 00000000..b5b594a8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GEMMA2_LATEST = { + name: 'gemma2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.4gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA2_2b = { + name: 'gemma2:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA2_9b = { + name: 'gemma2:9b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.4gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA2_27b = { + name: 'gemma2:27b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '16gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const GEMMA2_MODELS = [ + GEMMA2_LATEST.name, + GEMMA2_2b.name, + GEMMA2_9b.name, + GEMMA2_27b.name, +] as const + +// const GEMMA2_IMAGE_MODELS = [] as const + +// export const GEMMA2_EMBEDDING_MODELS = [] as const + +// const GEMMA2_AUDIO_MODELS = [] as const + +// const GEMMA2_VIDEO_MODELS = [] as const + +// export type Gemma2ChatModels = (typeof GEMMA2_MODELS)[number] + +// Manual type map for per-model provider options +export type Gemma2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA2_LATEST.name]: ChatRequest + [GEMMA2_2b.name]: ChatRequest + [GEMMA2_9b.name]: ChatRequest + [GEMMA2_27b.name]: ChatRequest +} + +export type Gemma2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA2_LATEST.name]: typeof GEMMA2_LATEST.supports.input + [GEMMA2_2b.name]: typeof GEMMA2_2b.supports.input + [GEMMA2_9b.name]: typeof GEMMA2_9b.supports.input + [GEMMA2_27b.name]: typeof GEMMA2_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts new file mode 100644 index 00000000..e10daf25 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts @@ -0,0 +1,108 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GEMMA3_LATEST = { + name: 'gemma3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '3.3gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_270m = { + name: 'gemma3:270m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '298mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_1b = { + name: 'gemma3:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '815mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_4b = { + name: 'gemma3:4b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '3.3gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_12b = { + name: 'gemma3:12b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '8.1gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_27b = { + name: 'gemma3:27b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '17gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const GEMMA3_MODELS = [ + GEMMA3_LATEST.name, + GEMMA3_270m.name, + GEMMA3_1b.name, + GEMMA3_4b.name, + GEMMA3_12b.name, + GEMMA3_27b.name, +] as const + +// const GEMMA3_IMAGE_MODELS = [] as const + +// export const GEMMA3_EMBEDDING_MODELS = [] as const + +// const GEMMA3_AUDIO_MODELS = [] as const + +// const GEMMA3_VIDEO_MODELS = [] as const + +// export type Gemma3ChatModels = (typeof GEMMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type Gemma3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA3_LATEST.name]: ChatRequest + [GEMMA3_270m.name]: ChatRequest + [GEMMA3_1b.name]: ChatRequest + [GEMMA3_4b.name]: ChatRequest + [GEMMA3_12b.name]: ChatRequest + [GEMMA3_27b.name]: ChatRequest +} + +export type Gemma3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA3_LATEST.name]: typeof GEMMA3_LATEST.supports.input + [GEMMA3_270m.name]: typeof GEMMA3_270m.supports.input + [GEMMA3_1b.name]: typeof GEMMA3_1b.supports.input + [GEMMA3_4b.name]: typeof GEMMA3_4b.supports.input + [GEMMA3_12b.name]: typeof GEMMA3_12b.supports.input + [GEMMA3_27b.name]: typeof GEMMA3_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts new file mode 100644 index 00000000..6f28a433 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_DENSE_LATEST = { + name: 'granite3-dense:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_DENSE_2b = { + name: 'granite3-dense:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_DENSE_8b = { + name: 'granite3-dense:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_DENSE_MODELS = [ + GRANITE3_DENSE_LATEST.name, + GRANITE3_DENSE_2b.name, + GRANITE3_DENSE_8b.name, +] as const + +// const GRANITE3_DENSE_IMAGE_MODELS = [] as const + +// export const GRANITE3_DENSE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_DENSE_AUDIO_MODELS = [] as const + +// const GRANITE3_DENSE_VIDEO_MODELS = [] as const + +// export type Granite3Dense3ChatModels = (typeof GRANITE3_DENSE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3DenseChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_DENSE_LATEST.name]: ChatRequest + [GRANITE3_DENSE_2b.name]: ChatRequest + [GRANITE3_DENSE_8b.name]: ChatRequest +} + +export type Granite3DenseModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_DENSE_LATEST.name]: typeof GRANITE3_DENSE_LATEST.supports.input + [GRANITE3_DENSE_2b.name]: typeof GRANITE3_DENSE_2b.supports.input + [GRANITE3_DENSE_8b.name]: typeof GRANITE3_DENSE_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts new file mode 100644 index 00000000..798118cb --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_GUARDIAN_LATEST = { + name: 'granite3-guardian:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_GUARDIAN_2b = { + name: 'granite3-guardian:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_GUARDIAN_8b = { + name: 'granite3-guardian:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_GUARDIAN_MODELS = [ + GRANITE3_GUARDIAN_LATEST.name, + GRANITE3_GUARDIAN_2b.name, + GRANITE3_GUARDIAN_8b.name, +] as const + +// const GRANITE3_GUARDIAN_IMAGE_MODELS = [] as const + +// export const GRANITE3_GUARDIAN_EMBEDDING_MODELS = [] as const + +// const GRANITE3_GUARDIAN_AUDIO_MODELS = [] as const + +// const GRANITE3_GUARDIAN_VIDEO_MODELS = [] as const + +// export type GraniteGuardian3ChatModels = (typeof GRANITE3_GUARDIAN_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3GuardianChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_GUARDIAN_LATEST.name]: ChatRequest + [GRANITE3_GUARDIAN_2b.name]: ChatRequest + [GRANITE3_GUARDIAN_8b.name]: ChatRequest +} + +export type Granite3GuardianModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_GUARDIAN_LATEST.name]: typeof GRANITE3_GUARDIAN_LATEST.supports.input + [GRANITE3_GUARDIAN_2b.name]: typeof GRANITE3_GUARDIAN_2b.supports.input + [GRANITE3_GUARDIAN_8b.name]: typeof GRANITE3_GUARDIAN_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts new file mode 100644 index 00000000..4d43bf2d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_MOE_LATEST = { + name: 'granite3-moe:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '822mb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_MOE_1b = { + name: 'granite3-moe:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '822mb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_MOE_3b = { + name: 'granite3-moe:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.1gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_MOE_MODELS = [ + GRANITE3_MOE_LATEST.name, + GRANITE3_MOE_1b.name, + GRANITE3_MOE_3b.name, +] as const + +// const GRANITE3_MOE_IMAGE_MODELS = [] as const + +// export const GRANITE3_MOE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_MOE_AUDIO_MODELS = [] as const + +// const GRANITE3_MOE_VIDEO_MODELS = [] as const + +// export type GraniteMoe3ChatModels = (typeof GRANITE3_MOE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3MoeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_MOE_LATEST.name]: ChatRequest + [GRANITE3_MOE_1b.name]: ChatRequest + [GRANITE3_MOE_3b.name]: ChatRequest +} + +export type Granite3MoeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_MOE_LATEST.name]: typeof GRANITE3_MOE_LATEST.supports.input + [GRANITE3_MOE_1b.name]: typeof GRANITE3_MOE_1b.supports.input + [GRANITE3_MOE_3b.name]: typeof GRANITE3_MOE_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts new file mode 100644 index 00000000..2dbf7374 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_1_DENSE_LATEST = { + name: 'granite3.1-dense:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_DENSE_2b = { + name: 'granite3.1-dense:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.6gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_DENSE_8b = { + name: 'granite3.1-dense:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_1_DENSE_MODELS = [ + GRANITE3_1_DENSE_LATEST.name, + GRANITE3_1_DENSE_2b.name, + GRANITE3_1_DENSE_8b.name, +] as const + +// const GRANITE3_1_DENSE_IMAGE_MODELS = [] as const + +// export const GRANITE3_1_DENSE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_1_DENSE_AUDIO_MODELS = [] as const + +// const GRANITE3_1_DENSE_VIDEO_MODELS = [] as const + +// export type Granite3_1Dense3ChatModels = (typeof GRANITE3_1_DENSE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3_1DenseChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_1_DENSE_LATEST.name]: ChatRequest + [GRANITE3_1_DENSE_2b.name]: ChatRequest + [GRANITE3_1_DENSE_8b.name]: ChatRequest +} + +export type Granite3_1DenseModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_1_DENSE_LATEST.name]: typeof GRANITE3_1_DENSE_LATEST.supports.input + [GRANITE3_1_DENSE_2b.name]: typeof GRANITE3_1_DENSE_2b.supports.input + [GRANITE3_1_DENSE_8b.name]: typeof GRANITE3_1_DENSE_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts new file mode 100644 index 00000000..7d513967 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_1_MOE_LATEST = { + name: 'granite3.1-moe:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_MOE_1b = { + name: 'granite3.1-moe:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.4gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_MOE_3b = { + name: 'granite3.1-moe:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_1_MOE_MODELS = [ + GRANITE3_1_MOE_LATEST.name, + GRANITE3_1_MOE_1b.name, + GRANITE3_1_MOE_3b.name, +] as const + +// const GRANITE3_1_MOE_IMAGE_MODELS = [] as const + +// export const GRANITE3_1_MOE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_1_MOE_AUDIO_MODELS = [] as const + +// const GRANITE3_1_MOE_VIDEO_MODELS = [] as const + +// export type Granite3_1MoeChatModels = (typeof GRANITE3_1_MOE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3_1MoeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_1_MOE_LATEST.name]: ChatRequest + [GRANITE3_1_MOE_1b.name]: ChatRequest + [GRANITE3_1_MOE_3b.name]: ChatRequest +} + +export type Granite3_1MoeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_1_MOE_LATEST.name]: typeof GRANITE3_1_MOE_LATEST.supports.input + [GRANITE3_1_MOE_1b.name]: typeof GRANITE3_1_MOE_1b.supports.input + [GRANITE3_1_MOE_3b.name]: typeof GRANITE3_1_MOE_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts new file mode 100644 index 00000000..db18d06d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA_GUARD3_LATEST = { + name: 'llama3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA_GUARD3_1b = { + name: 'llama3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA_GUARD3_8b = { + name: 'llama3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA_GUARD3_MODELS = [ + LLAMA_GUARD3_LATEST.name, + LLAMA_GUARD3_1b.name, + LLAMA_GUARD3_8b.name, +] as const + +// const LLAMA_GUARD3_IMAGE_MODELS = [] as const + +// export const LLAMA_GUARD3_EMBEDDING_MODELS = [] as const + +// const LLAMA_GUARD3_AUDIO_MODELS = [] as const + +// const LLAMA_GUARD3_VIDEO_MODELS = [] as const + +// export type LlamaGuard3ChatModels = (typeof LLAMA_GUARD3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlamaGuard3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA_GUARD3_LATEST.name]: ChatRequest + [LLAMA_GUARD3_1b.name]: ChatRequest + [LLAMA_GUARD3_8b.name]: ChatRequest +} + +export type LlamaGuard3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA_GUARD3_LATEST.name]: typeof LLAMA_GUARD3_LATEST.supports.input + [LLAMA_GUARD3_1b.name]: typeof LLAMA_GUARD3_1b.supports.input + [LLAMA_GUARD3_8b.name]: typeof LLAMA_GUARD3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts new file mode 100644 index 00000000..44a9c66d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA2_LATEST = { + name: 'llama2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA2_7b = { + name: 'llama2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA2_13b = { + name: 'llama2:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.4gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA2_70b = { + name: 'llama2:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '39gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA2_MODELS = [ + LLAMA2_LATEST.name, + LLAMA2_7b.name, + LLAMA2_13b.name, + LLAMA2_70b.name, +] as const + +// const LLAMA2_IMAGE_MODELS = [] as const + +// export const LLAMA2_EMBEDDING_MODELS = [] as const + +// const LLAMA2_AUDIO_MODELS = [] as const + +// const LLAMA2_VIDEO_MODELS = [] as const + +// export type Llama2ChatModels = (typeof LLAMA2_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA2_LATEST.name]: ChatRequest + [LLAMA2_7b.name]: ChatRequest + [LLAMA2_13b.name]: ChatRequest + [LLAMA2_70b.name]: ChatRequest +} + +export type Llama2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA2_LATEST.name]: typeof LLAMA2_LATEST.supports.input + [LLAMA2_7b.name]: typeof LLAMA2_7b.supports.input + [LLAMA2_13b.name]: typeof LLAMA2_13b.supports.input + [LLAMA2_70b.name]: typeof LLAMA2_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts new file mode 100644 index 00000000..58063a03 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_CHATQA_LATEST = { + name: 'llama3-chatqa:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_CHATQA_8b = { + name: 'llama3-chatqa:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_CHATQA_70b = { + name: 'llama3-chatqa:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_CHATQA_MODELS = [ + LLAMA3_CHATQA_LATEST.name, + LLAMA3_CHATQA_8b.name, + LLAMA3_CHATQA_70b.name, +] as const + +// const LLAMA3_CHATQA_IMAGE_MODELS = [] as const + +// export const LLAMA3_CHATQA_EMBEDDING_MODELS = [] as const + +// const LLAMA3_CHATQA_AUDIO_MODELS = [] as const + +// const LLAMA3_CHATQA_VIDEO_MODELS = [] as const + +// export type Llama3ChatQaChatModels = (typeof LLAMA3_CHATQA_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3ChatQaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_CHATQA_LATEST.name]: ChatRequest + [LLAMA3_CHATQA_8b.name]: ChatRequest + [LLAMA3_CHATQA_70b.name]: ChatRequest +} + +export type Llama3ChatQaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_CHATQA_LATEST.name]: typeof LLAMA3_CHATQA_LATEST.supports.input + [LLAMA3_CHATQA_8b.name]: typeof LLAMA3_CHATQA_8b.supports.input + [LLAMA3_CHATQA_70b.name]: typeof LLAMA3_CHATQA_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts new file mode 100644 index 00000000..8a658e6f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_GRADIENT_LATEST = { + name: 'llama3-gradient:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_GRADIENT_8b = { + name: 'llama3-gradient:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_GRADIENT_70b = { + name: 'llama3-gradient:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_GRADIENT_MODELS = [ + LLAMA3_GRADIENT_LATEST.name, + LLAMA3_GRADIENT_8b.name, + LLAMA3_GRADIENT_70b.name, +] as const + +// const LLAMA3_GRADIENT_IMAGE_MODELS = [] as const + +// export const LLAMA3_GRADIENT_EMBEDDING_MODELS = [] as const + +// const LLAMA3_GRADIENT_AUDIO_MODELS = [] as const + +// const LLAMA3_GRADIENT_VIDEO_MODELS = [] as const + +// export type Llama3GradientChatModels = (typeof LLAMA3_GRADIENT_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3GradientChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_GRADIENT_LATEST.name]: ChatRequest + [LLAMA3_GRADIENT_8b.name]: ChatRequest + [LLAMA3_GRADIENT_70b.name]: ChatRequest +} + +export type Llama3GradientModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_GRADIENT_LATEST.name]: typeof LLAMA3_GRADIENT_LATEST.supports.input + [LLAMA3_GRADIENT_8b.name]: typeof LLAMA3_GRADIENT_8b.supports.input + [LLAMA3_GRADIENT_70b.name]: typeof LLAMA3_GRADIENT_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts new file mode 100644 index 00000000..66186581 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_1_LATEST = { + name: 'llama3.1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.9b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_1_8b = { + name: 'llama3.1:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_1_70b = { + name: 'llama3.1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_1_405b = { + name: 'llama3.1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '243gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_1_MODELS = [ + LLAMA3_1_LATEST.name, + LLAMA3_1_8b.name, + LLAMA3_1_70b.name, + LLAMA3_1_405b.name, +] as const + +// const LLAMA3_1_IMAGE_MODELS = [] as const + +// export const LLAMA3_1_EMBEDDING_MODELS = [] as const + +// const LLAMA3_1_AUDIO_MODELS = [] as const + +// const LLAMA3_1_VIDEO_MODELS = [] as const + +// export type Llama3_1ChatModels = (typeof LLAMA3_1_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_1_LATEST.name]: ChatRequest + [LLAMA3_1_8b.name]: ChatRequest + [LLAMA3_1_70b.name]: ChatRequest + [LLAMA3_1_405b.name]: ChatRequest +} + +export type Llama3_1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_1_LATEST.name]: typeof LLAMA3_1_LATEST.supports.input + [LLAMA3_1_8b.name]: typeof LLAMA3_1_8b.supports.input + [LLAMA3_1_70b.name]: typeof LLAMA3_1_70b.supports.input + [LLAMA3_1_405b.name]: typeof LLAMA3_1_405b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts new file mode 100644 index 00000000..d840815f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_2_VISION_LATEST = { + name: 'llama3.2:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '7.8b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_VISION_11b = { + name: 'llama3.2:11b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_VISION_90b = { + name: 'llama3.2:90b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '55gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_2_VISION_MODELS = [ + LLAMA3_2_VISION_LATEST.name, + LLAMA3_2_VISION_11b.name, + LLAMA3_2_VISION_90b.name, +] as const + +// export const LLAMA3_2_VISION_IMAGE_MODELS = [] as const + +// export const LLAMA3_2_VISION_EMBEDDING_MODELS = [] as const + +// export const LLAMA3_2_VISION_AUDIO_MODELS = [] as const + +// export const LLAMA3_2_VISION_VIDEO_MODELS = [] as const + +// export export type Llama3_2VisionChatModels = (typeof LLAMA3_2Vision_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_2VisionChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_2_VISION_LATEST.name]: ChatRequest + [LLAMA3_2_VISION_11b.name]: ChatRequest + [LLAMA3_2_VISION_90b.name]: ChatRequest +} + +export type Llama3_2VisionModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_2_VISION_LATEST.name]: typeof LLAMA3_2_VISION_LATEST.supports.input + [LLAMA3_2_VISION_11b.name]: typeof LLAMA3_2_VISION_11b.supports.input + [LLAMA3_2_VISION_90b.name]: typeof LLAMA3_2_VISION_90b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts new file mode 100644 index 00000000..328adcce --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_2_LATEST = { + name: 'llama3.2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_1b = { + name: 'llama3.2:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.3gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_3b = { + name: 'llama3.2:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_2_MODELS = [ + LLAMA3_2_LATEST.name, + LLAMA3_2_1b.name, + LLAMA3_2_3b.name, +] as const + +// const LLAMA3_2_IMAGE_MODELS = [] as const + +// export const LLAMA3_2_EMBEDDING_MODELS = [] as const + +// const LLAMA3_2_AUDIO_MODELS = [] as const + +// const LLAMA3_2_VIDEO_MODELS = [] as const + +// export type Llama3_2ChatModels = (typeof LLAMA3_2_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_2_LATEST.name]: ChatRequest + [LLAMA3_2_1b.name]: ChatRequest + [LLAMA3_2_3b.name]: ChatRequest +} + +export type Llama3_2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_2_LATEST.name]: typeof LLAMA3_2_LATEST.supports.input + [LLAMA3_2_1b.name]: typeof LLAMA3_2_1b.supports.input + [LLAMA3_2_3b.name]: typeof LLAMA3_2_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts new file mode 100644 index 00000000..1cbc63a8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_3_LATEST = { + name: 'llama3.3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_3_70b = { + name: 'llama3.3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_3_MODELS = [ + LLAMA3_3_LATEST.name, + LLAMA3_3_70b.name, +] as const + +// const LLAMA3_3_IMAGE_MODELS = [] as const + +// export const LLAMA3_3_EMBEDDING_MODELS = [] as const + +// const LLAMA3_3_AUDIO_MODELS = [] as const + +// const LLAMA3_3_VIDEO_MODELS = [] as const + +// export type Llama3_3ChatModels = (typeof LLAMA3_3_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_3_LATEST.name]: ChatRequest + [LLAMA3_3_70b.name]: ChatRequest +} + +export type Llama3_3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_3_LATEST.name]: typeof LLAMA3_3_LATEST.supports.input + [LLAMA3_3_70b.name]: typeof LLAMA3_3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts new file mode 100644 index 00000000..d61504b9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_LATEST = { + name: 'llama3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_8b = { + name: 'llama3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_70b = { + name: 'llama3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_MODELS = [ + LLAMA3_LATEST.name, + LLAMA3_8b.name, + LLAMA3_70b.name, +] as const + +// const LLAMA3_IMAGE_MODELS = [] as const + +// export const LLAMA3_EMBEDDING_MODELS = [] as const + +// const LLAMA3_AUDIO_MODELS = [] as const + +// const LLAMA3_VIDEO_MODELS = [] as const + +// export type Llama3ChatModels = (typeof LLAMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_LATEST.name]: ChatRequest + [LLAMA3_8b.name]: ChatRequest + [LLAMA3_70b.name]: ChatRequest +} + +export type Llama3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_LATEST.name]: typeof LLAMA3_LATEST.supports.input + [LLAMA3_8b.name]: typeof LLAMA3_8b.supports.input + [LLAMA3_70b.name]: typeof LLAMA3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts new file mode 100644 index 00000000..418cc25d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA4_LATEST = { + name: 'llama4:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '67b', + context: 10_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA4_16X17b = { + name: 'llama4:16x17b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '67gb', + context: 10_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA4_128X17b = { + name: 'llama4:128x17b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '245gb', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA4_MODELS = [ + LLAMA4_LATEST.name, + LLAMA4_16X17b.name, + LLAMA4_128X17b.name, +] as const + +// const LLAMA4_IMAGE_MODELS = [] as const + +// export const LLAMA4_EMBEDDING_MODELS = [] as const + +// const LLAMA4_AUDIO_MODELS = [] as const + +// const LLAMA4_VIDEO_MODELS = [] as const + +// export type Llama3_4ChatModels = (typeof LLAMA4_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_4ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA4_LATEST.name]: ChatRequest + [LLAMA4_16X17b.name]: ChatRequest + [LLAMA4_128X17b.name]: ChatRequest +} + +export type Llama3_4ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA4_LATEST.name]: typeof LLAMA4_LATEST.supports.input + [LLAMA4_16X17b.name]: typeof LLAMA4_16X17b.supports.input + [LLAMA4_128X17b.name]: typeof LLAMA4_128X17b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts new file mode 100644 index 00000000..da96e112 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAVA_LLAMA3_LATEST = { + name: 'llava-llama3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '5.5b', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_LLAMA3_8b = { + name: 'llava-llama3:8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '5.5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAVA_LLAMA3_MODELS = [ + LLAVA_LLAMA3_LATEST.name, + LLAVA_LLAMA3_8b.name, +] as const + +// const LLAVA_LLAMA3_IMAGE_MODELS = [] as const + +// export const LLAVA_LLAMA3_EMBEDDING_MODELS = [] as const + +// const LLAVA_LLAMA3_AUDIO_MODELS = [] as const + +// const LLAVA_LLAMA3_VIDEO_MODELS = [] as const + +// export type LlavaLlamaChatModels = (typeof LLAVA_LLAMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlavaLlamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_LLAMA3_LATEST.name]: ChatRequest + [LLAVA_LLAMA3_8b.name]: ChatRequest +} + +export type LlavaLlamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_LLAMA3_LATEST.name]: typeof LLAVA_LLAMA3_LATEST.supports.input + [LLAVA_LLAMA3_8b.name]: typeof LLAVA_LLAMA3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts new file mode 100644 index 00000000..4c725a64 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAVA_PHI3_LATEST = { + name: 'llava-phi3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9b', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_PHI3_8b = { + name: 'llava-phi3:8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAVA_PHI3_MODELS = [ + LLAVA_PHI3_LATEST.name, + LLAVA_PHI3_8b.name, +] as const + +// const LLAVA_PHI3_IMAGE_MODELS = [] as const + +// export const LLAVA_PHI3_EMBEDDING_MODELS = [] as const + +// const LLAVA_PHI3_AUDIO_MODELS = [] as const + +// const LLAVA_PHI3_VIDEO_MODELS = [] as const + +// export type LlavaPhi3ChatModels = (typeof LLAVA_PHI3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlavaPhi3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_PHI3_LATEST.name]: ChatRequest + [LLAVA_PHI3_8b.name]: ChatRequest +} + +export type LlavaPhi3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_PHI3_LATEST.name]: typeof LLAVA_PHI3_LATEST.supports.input + [LLAVA_PHI3_8b.name]: typeof LLAVA_PHI3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts new file mode 100644 index 00000000..18e7f762 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAVA_LATEST = { + name: 'llava:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_7b = { + name: 'llava:7b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_13b = { + name: 'llava:13b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '8gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_34b = { + name: 'llava:34b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '20gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAVA_MODELS = [ + LLAVA_LATEST.name, + LLAVA_7b.name, + LLAVA_13b.name, + LLAVA_34b.name, +] as const + +// const LLAVA_IMAGE_MODELS = [] as const + +// export const LLAVA_EMBEDDING_MODELS = [] as const + +// const LLAVA_AUDIO_MODELS = [] as const + +// const LLAVA_VIDEO_MODELS = [] as const + +// export type llavaChatModels = (typeof LLAVA_MODELS)[number] + +// Manual type map for per-model provider options +export type llavaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_LATEST.name]: ChatRequest + [LLAVA_7b.name]: ChatRequest + [LLAVA_13b.name]: ChatRequest + [LLAVA_34b.name]: ChatRequest +} + +export type llavaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_LATEST.name]: typeof LLAVA_LATEST.supports.input + [LLAVA_7b.name]: typeof LLAVA_7b.supports.input + [LLAVA_13b.name]: typeof LLAVA_13b.supports.input + [LLAVA_34b.name]: typeof LLAVA_34b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts new file mode 100644 index 00000000..fb44d209 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MARCO_O1_LATEST = { + name: 'marco-o1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MARCO_O1_7b = { + name: 'marco-o1:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const + +// const MARCO_O1_IMAGE_MODELS = [] as const + +// export const MARCO_O1_EMBEDDING_MODELS = [] as const + +// const MARCO_O1_AUDIO_MODELS = [] as const + +// const MARCO_O1_VIDEO_MODELS = [] as const + +// export type MarcoO1ChatModels = (typeof MARCO_O1_MODELS)[number] + +// Manual type map for per-model provider options +export type MarcoO1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MARCO_O1_LATEST.name]: ChatRequest + [MARCO_O1_7b.name]: ChatRequest +} + +export type MarcoO1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MARCO_O1_LATEST.name]: typeof MARCO_O1_LATEST.supports.input + [MARCO_O1_7b.name]: typeof MARCO_O1_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts new file mode 100644 index 00000000..7f2055f2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_LARGE_LATEST = { + name: 'mistral-large:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '73gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_LARGE_123b = { + name: 'mistral-large:123b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '73gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_LARGE_MODELS = [ + MISTRAL_LARGE_LATEST.name, + MISTRAL_LARGE_123b.name, +] as const + +// const MISTRAL_LARGE_IMAGE_MODELS = [] as const + +// export const MISTRAL_LARGE_EMBEDDING_MODELS = [] as const + +// const MISTRAL_LARGE_AUDIO_MODELS = [] as const + +// const MISTRAL_LARGE_VIDEO_MODELS = [] as const + +// export type MistralLargeChatModels = (typeof MISTRAL_LARGE_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralLargeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_LARGE_LATEST.name]: ChatRequest + [MISTRAL_LARGE_123b.name]: ChatRequest +} + +export type MistralLargeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_LARGE_LATEST.name]: typeof MISTRAL_LARGE_LATEST.supports.input + [MISTRAL_LARGE_123b.name]: typeof MISTRAL_LARGE_123b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts new file mode 100644 index 00000000..39fb3ab6 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_NEMO_LATEST = { + name: 'mistral-nemo:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '7.1gb', + context: 1_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_NEMO_12b = { + name: 'mistral-nemo:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '7.1gb', + context: 1_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_NEMO_MODELS = [ + MISTRAL_NEMO_LATEST.name, + MISTRAL_NEMO_12b.name, +] as const + +// const MISTRAL_NEMO_IMAGE_MODELS = [] as const + +// export const MISTRAL_NEMO_EMBEDDING_MODELS = [] as const + +// const MISTRAL_NEMO_AUDIO_MODELS = [] as const + +// const MISTRAL_NEMO_VIDEO_MODELS = [] as const + +// export type MistralNemoChatModels = (typeof MISTRAL_NEMO_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralNemoChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_NEMO_LATEST.name]: ChatRequest + [MISTRAL_NEMO_12b.name]: ChatRequest +} + +export type MistralNemoModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_NEMO_LATEST.name]: typeof MISTRAL_NEMO_LATEST.supports.input + [MISTRAL_NEMO_12b.name]: typeof MISTRAL_NEMO_12b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts new file mode 100644 index 00000000..3dabd7d2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_SMALL_LATEST = { + name: 'mistral-small:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_SMALL_22b = { + name: 'mistral-small:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '13gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_SMALL_24b = { + name: 'mistral-small:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '13gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_SMALL_MODELS = [ + MISTRAL_SMALL_LATEST.name, + MISTRAL_SMALL_22b.name, + MISTRAL_SMALL_24b.name, +] as const + +// const MISTRAL_SMALL_IMAGE_MODELS = [] as const + +// export const MISTRAL_SMALL_EMBEDDING_MODELS = [] as const + +// const MISTRAL_SMALL_AUDIO_MODELS = [] as const + +// const MISTRAL_SMALL_VIDEO_MODELS = [] as const + +// export type MistralSmallChatModels = (typeof MISTRAL_SMALL_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralSmallChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_SMALL_LATEST.name]: ChatRequest + [MISTRAL_SMALL_22b.name]: ChatRequest + [MISTRAL_SMALL_24b.name]: ChatRequest +} + +export type MistralSmallModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_SMALL_LATEST.name]: typeof MISTRAL_SMALL_LATEST.supports.input + [MISTRAL_SMALL_22b.name]: typeof MISTRAL_SMALL_22b.supports.input + [MISTRAL_SMALL_24b.name]: typeof MISTRAL_SMALL_24b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts new file mode 100644 index 00000000..55efb14d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_LATEST = { + name: 'mistral:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_7b = { + name: 'mistral:87', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const + +// const MISTRAL_IMAGE_MODELS = [] as const + +// export const MISTRAL_EMBEDDING_MODELS = [] as const + +// const MISTRAL_AUDIO_MODELS = [] as const + +// const MISTRAL_VIDEO_MODELS = [] as const + +// export type MistralChatModels = (typeof MISTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_LATEST.name]: ChatRequest + [MISTRAL_7b.name]: ChatRequest +} + +export type MistralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_LATEST.name]: typeof MISTRAL_LATEST.supports.input + [MISTRAL_7b.name]: typeof MISTRAL_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts new file mode 100644 index 00000000..37656cd2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MIXTRAL_LATEST = { + name: 'mixtral:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '26gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MIXTRAL_8X7b = { + name: 'mixtral:8x7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '26gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MIXTRAL_8X22b = { + name: 'mixtral:8x22b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '80gb', + context: 64_000, +} as const satisfies DefaultOllamaModelMeta + +export const MIXTRAL_MODELS = [ + MIXTRAL_LATEST.name, + MIXTRAL_8X7b.name, + MIXTRAL_8X22b.name, +] as const + +// const MIXTRAL_IMAGE_MODELS = [] as const + +// export const MIXTRAL_EMBEDDING_MODELS = [] as const + +// const MIXTRAL_AUDIO_MODELS = [] as const + +// const MIXTRAL_VIDEO_MODELS = [] as const + +// export type MixtralChatModels = (typeof MIXTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type MixtralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MIXTRAL_LATEST.name]: ChatRequest + [MIXTRAL_8X7b.name]: ChatRequest + [MIXTRAL_8X22b.name]: ChatRequest +} + +export type MixtralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MIXTRAL_LATEST.name]: typeof MIXTRAL_LATEST.supports.input + [MIXTRAL_8X7b.name]: typeof MIXTRAL_8X7b.supports.input + [MIXTRAL_8X22b.name]: typeof MIXTRAL_8X22b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts new file mode 100644 index 00000000..50be72ad --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MOONDREAM_LATEST = { + name: 'moondream:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1.7gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const MOONDREAM_1_8b = { + name: 'moondream:1.8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1.7gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const MOONDREAM_MODELS = [ + MOONDREAM_LATEST.name, + MOONDREAM_1_8b.name, +] as const + +// const MOONDREAM_IMAGE_MODELS = [] as const + +// export const MOONDREAM_EMBEDDING_MODELS = [] as const + +// const MOONDREAM_AUDIO_MODELS = [] as const + +// const MOONDREAM_VIDEO_MODELS = [] as const + +// export type MoondreamChatModels = (typeof MOONDREAM_MODELS)[number] + +// Manual type map for per-model provider options +export type MoondreamChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MOONDREAM_LATEST.name]: ChatRequest + [MOONDREAM_1_8b.name]: ChatRequest +} + +export type MoondreamModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MOONDREAM_LATEST.name]: typeof MOONDREAM_LATEST.supports.input + [MOONDREAM_1_8b.name]: typeof MOONDREAM_1_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts new file mode 100644 index 00000000..b5a9cc2d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const NEMOTRON_MINI_LATEST = { + name: 'nemotron-mini:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.7gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const NEMOTRON_MINI_4b = { + name: 'nemotron-mini:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.7gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const NEMOTRON_MINI_MODELS = [ + NEMOTRON_MINI_LATEST.name, + NEMOTRON_MINI_4b.name, +] as const + +// const NEMOTRON_MINI_IMAGE_MODELS = [] as const + +// export const NEMOTRON_MINI_EMBEDDING_MODELS = [] as const + +// const NEMOTRON_MINI_AUDIO_MODELS = [] as const + +// const NEMOTRON_MINI_VIDEO_MODELS = [] as const + +// export type NemotronMiniChatModels = (typeof NEMOTRON_MINI_MODELS)[number] + +// Manual type map for per-model provider options +export type NemotronMiniChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [NEMOTRON_MINI_LATEST.name]: ChatRequest + [NEMOTRON_MINI_4b.name]: ChatRequest +} + +export type NemotronMiniModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [NEMOTRON_MINI_LATEST.name]: typeof NEMOTRON_MINI_LATEST.supports.input + [NEMOTRON_MINI_4b.name]: typeof NEMOTRON_MINI_4b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts new file mode 100644 index 00000000..3f06d9ea --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const NEMOTRON_LATEST = { + name: 'nemotron:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const NEMOTRON_70b = { + name: 'nemotron:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const NEMOTRON_MODELS = [ + NEMOTRON_LATEST.name, + NEMOTRON_70b.name, +] as const + +// const NEMOTRON_IMAGE_MODELS = [] as const + +// export const NEMOTRON_EMBEDDING_MODELS = [] as const + +// const NEMOTRON_AUDIO_MODELS = [] as const + +// const NEMOTRON_VIDEO_MODELS = [] as const + +// export type NemotronChatModels = (typeof NEMOTRON_MODELS)[number] + +// Manual type map for per-model provider options +export type NemotronChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [NEMOTRON_LATEST.name]: ChatRequest + [NEMOTRON_70b.name]: ChatRequest +} + +export type NemotronModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [NEMOTRON_LATEST.name]: typeof NEMOTRON_LATEST.supports.input + [NEMOTRON_70b.name]: typeof NEMOTRON_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts new file mode 100644 index 00000000..621bc7b8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const OLMO2_LATEST = { + name: 'olmo2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const OLMO2_7b = { + name: 'olmo2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const OLMO2_13b = { + name: 'olmo2:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.4gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const OLMO2_MODELS = [ + OLMO2_LATEST.name, + OLMO2_7b.name, + OLMO2_13b.name, +] as const + +// const OLMO2_IMAGE_MODELS = [] as const + +// export const OLMO2_EMBEDDING_MODELS = [] as const + +// const OLMO2_AUDIO_MODELS = [] as const + +// const OLMO2_VIDEO_MODELS = [] as const + +// export type Olmo2ChatModels = (typeof OLMO2_MODELS)[number] + +// Manual type map for per-model provider options +export type Olmo2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OLMO2_LATEST.name]: ChatRequest + [OLMO2_7b.name]: ChatRequest + [OLMO2_13b.name]: ChatRequest +} + +export type Olmo2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OLMO2_LATEST.name]: typeof OLMO2_LATEST.supports.input + [OLMO2_7b.name]: typeof OLMO2_7b.supports.input + [OLMO2_13b.name]: typeof OLMO2_13b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts new file mode 100644 index 00000000..0b22d3b2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const OPENCODER_LATEST = { + name: 'opencoder:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENCODER_1_5b = { + name: 'opencoder:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.4gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENCODER_8b = { + name: 'opencoder:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const OPENCODER_MODELS = [ + OPENCODER_LATEST.name, + OPENCODER_1_5b.name, + OPENCODER_8b.name, +] as const + +// const OPENCODER_IMAGE_MODELS = [] as const + +// export const OPENCODER_EMBEDDING_MODELS = [] as const + +// const OPENCODER_AUDIO_MODELS = [] as const + +// const OPENCODER_VIDEO_MODELS = [] as const + +// export type OpencoderChatModels = (typeof OPENCODER_MODELS)[number] + +// Manual type map for per-model provider options +export type OpencoderChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OPENCODER_LATEST.name]: ChatRequest + [OPENCODER_1_5b.name]: ChatRequest + [OPENCODER_8b.name]: ChatRequest +} + +export type OpencoderModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OPENCODER_LATEST.name]: typeof OPENCODER_LATEST.supports.input + [OPENCODER_1_5b.name]: typeof OPENCODER_1_5b.supports.input + [OPENCODER_8b.name]: typeof OPENCODER_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts new file mode 100644 index 00000000..459baeca --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const OPENHERMES_LATEST = { + name: 'openhermes:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENHERMES_V2 = { + name: 'openhermes:v2', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENHERMES_V2_5 = { + name: 'openhermes:v2.5', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const OPENHERMES_MODELS = [ + OPENHERMES_LATEST.name, + OPENHERMES_V2.name, + OPENHERMES_V2_5.name, +] as const + +// const OPENHERMES_IMAGE_MODELS = [] as const + +// export const OPENHERMES_EMBEDDING_MODELS = [] as const + +// const OPENHERMES_AUDIO_MODELS = [] as const + +// const OPENHERMES_VIDEO_MODELS = [] as const + +// export type OpenhermesChatModels = (typeof OPENHERMES_MODELS)[number] + +// Manual type map for per-model provider options +export type OpenhermesChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OPENHERMES_LATEST.name]: ChatRequest + [OPENHERMES_V2.name]: ChatRequest + [OPENHERMES_V2_5.name]: ChatRequest +} + +export type OpenhermesModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OPENHERMES_LATEST.name]: typeof OPENHERMES_LATEST.supports.input + [OPENHERMES_V2.name]: typeof OPENHERMES_V2.supports.input + [OPENHERMES_V2_5.name]: typeof OPENHERMES_V2_5.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts new file mode 100644 index 00000000..7836b653 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const PHI3_LATEST = { + name: 'phi3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const PHI3_3_8b = { + name: 'phi3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const PHI3_14b = { + name: 'phi3:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const PHI3_MODELS = [ + PHI3_LATEST.name, + PHI3_3_8b.name, + PHI3_14b.name, +] as const + +// const PHI3_IMAGE_MODELS = [] as const + +// export const PHI3_EMBEDDING_MODELS = [] as const + +// const PHI3_AUDIO_MODELS = [] as const + +// const PHI3_VIDEO_MODELS = [] as const + +// export type Phi3ChatModels = (typeof PHI3_MODELS)[number] + +// Manual type map for per-model provider options +export type Phi3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [PHI3_LATEST.name]: ChatRequest + [PHI3_3_8b.name]: ChatRequest + [PHI3_14b.name]: ChatRequest +} + +export type Phi3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [PHI3_LATEST.name]: typeof PHI3_LATEST.supports.input + [PHI3_3_8b.name]: typeof PHI3_3_8b.supports.input + [PHI3_14b.name]: typeof PHI3_14b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts new file mode 100644 index 00000000..38ffeb9b --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const PHI4_LATEST = { + name: 'phi4:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '9.1gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const PHI4_14b = { + name: 'phi4:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '9.1gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const + +// const PHI4_IMAGE_MODELS = [] as const + +// export const PHI4_EMBEDDING_MODELS = [] as const + +// const PHI4_AUDIO_MODELS = [] as const + +// const PHI4_VIDEO_MODELS = [] as const + +// export type Phi4ChatModels = (typeof PHI4_MODELS)[number] + +// Manual type map for per-model provider options +export type Phi4ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [PHI4_LATEST.name]: ChatRequest + [PHI4_14b.name]: ChatRequest +} + +export type Phi4ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [PHI4_LATEST.name]: typeof PHI4_LATEST.supports.input + [PHI4_14b.name]: typeof PHI4_14b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts new file mode 100644 index 00000000..fee586de --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts @@ -0,0 +1,150 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN_LATEST = { + name: 'qwen:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.3gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_0_5b = { + name: 'qwen:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '395mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_1_8b = { + name: 'qwen:1.8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_4b = { + name: 'qwen:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.3gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_7b = { + name: 'qwen:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_14b = { + name: 'qwen:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_32b = { + name: 'qwen:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '18gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_72b = { + name: 'qwen:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '41gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_110b = { + name: 'qwen:110b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '63gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN_MODELS = [ + QWEN_LATEST.name, + QWEN_0_5b.name, + QWEN_1_8b.name, + QWEN_4b.name, + QWEN_7b.name, + QWEN_14b.name, + QWEN_32b.name, + QWEN_72b.name, + QWEN_110b.name, +] as const + +// const QWEN_IMAGE_MODELS = [] as const + +// export const QWEN_EMBEDDING_MODELS = [] as const + +// const QWEN_AUDIO_MODELS = [] as const + +// const QWEN_VIDEO_MODELS = [] as const + +// export type QwenChatModels = (typeof QWEN_MODELS)[number] + +// Manual type map for per-model provider options +export type QwenChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN_LATEST.name]: ChatRequest + [QWEN_0_5b.name]: ChatRequest + [QWEN_1_8b.name]: ChatRequest + [QWEN_4b.name]: ChatRequest + [QWEN_7b.name]: ChatRequest + [QWEN_14b.name]: ChatRequest + [QWEN_32b.name]: ChatRequest + [QWEN_72b.name]: ChatRequest + [QWEN_110b.name]: ChatRequest +} + +export type QwenModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN_LATEST.name]: typeof QWEN_LATEST.supports.input + [QWEN_0_5b.name]: typeof QWEN_0_5b.supports.input + [QWEN_1_8b.name]: typeof QWEN_1_8b.supports.input + [QWEN_4b.name]: typeof QWEN_4b.supports.input + [QWEN_7b.name]: typeof QWEN_7b.supports.input + [QWEN_14b.name]: typeof QWEN_14b.supports.input + [QWEN_32b.name]: typeof QWEN_32b.supports.input + [QWEN_72b.name]: typeof QWEN_72b.supports.input + [QWEN_110b.name]: typeof QWEN_110b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts new file mode 100644 index 00000000..a033db9c --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts @@ -0,0 +1,121 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN2_5_CODER_LATEST = { + name: 'qwen2.5-coder:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_0_5b = { + name: 'qwen2.5-coder:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '398mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_1_5b = { + name: 'qwen2.5-coder:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '986mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_3b = { + name: 'qwen2.5-coder:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.9gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_7b = { + name: 'qwen2.5-coder:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_14b = { + name: 'qwen2.5-coder:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '9gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_32b = { + name: 'qwen2.5-coder:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN2_5_CODER_MODELS = [ + QWEN2_5_CODER_LATEST.name, + QWEN2_5_CODER_0_5b.name, + QWEN2_5_CODER_1_5b.name, + QWEN2_5_CODER_7b.name, + QWEN2_5_CODER_14b.name, + QWEN2_5_CODER_32b.name, +] as const + +// const QWEN2_5_CODER_IMAGE_MODELS = [] as const + +// export const QWEN2_5_CODER_EMBEDDING_MODELS = [] as const + +// const QWEN2_5_CODER_AUDIO_MODELS = [] as const + +// const QWEN2_5_CODER_VIDEO_MODELS = [] as const + +// export type Qwen2_5CoderChatModels = (typeof QWEN2_5_CODER_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2_5CoderChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_5_CODER_LATEST.name]: ChatRequest + [QWEN2_5_CODER_0_5b.name]: ChatRequest + [QWEN2_5_CODER_1_5b.name]: ChatRequest + [QWEN2_5_CODER_3b.name]: ChatRequest + [QWEN2_5_CODER_7b.name]: ChatRequest + [QWEN2_5_CODER_14b.name]: ChatRequest + [QWEN2_5_CODER_32b.name]: ChatRequest +} + +export type Qwen2_5CoderModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_5_CODER_LATEST.name]: typeof QWEN2_5_CODER_LATEST.supports.input + [QWEN2_5_CODER_0_5b.name]: typeof QWEN2_5_CODER_0_5b.supports.input + [QWEN2_5_CODER_1_5b.name]: typeof QWEN2_5_CODER_1_5b.supports.input + [QWEN2_5_CODER_3b.name]: typeof QWEN2_5_CODER_3b.supports.input + [QWEN2_5_CODER_7b.name]: typeof QWEN2_5_CODER_7b.supports.input + [QWEN2_5_CODER_14b.name]: typeof QWEN2_5_CODER_14b.supports.input + [QWEN2_5_CODER_32b.name]: typeof QWEN2_5_CODER_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts new file mode 100644 index 00000000..4827e758 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts @@ -0,0 +1,122 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN2_5_LATEST = { + name: 'qwen2.5:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_0_5b = { + name: 'qwen2.5:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '398mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_1_5b = { + name: 'qwen2.5:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '986mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_3b = { + name: 'qwen2.5:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.9gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_7b = { + name: 'qwen2.5:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_32b = { + name: 'qwen2.5:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_72b = { + name: 'qwen2.5:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN2_5_MODELS = [ + QWEN2_5_LATEST.name, + QWEN2_5_0_5b.name, + QWEN2_5_1_5b.name, + QWEN2_5_3b.name, + QWEN2_5_7b.name, + QWEN2_5_32b.name, + QWEN2_5_72b.name, +] as const + +// const QWEN2_5_IMAGE_MODELS = [] as const + +// export const QWEN2_5_EMBEDDING_MODELS = [] as const + +// const QWEN2_5_AUDIO_MODELS = [] as const + +// const QWEN2_5_VIDEO_MODELS = [] as const + +// export type Qwen2_5ChatModels = (typeof QWEN2_5_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2_5ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_5_LATEST.name]: ChatRequest + [QWEN2_5_0_5b.name]: ChatRequest + [QWEN2_5_1_5b.name]: ChatRequest + [QWEN2_5_3b.name]: ChatRequest + [QWEN2_5_7b.name]: ChatRequest + [QWEN2_5_32b.name]: ChatRequest + [QWEN2_5_72b.name]: ChatRequest +} + +export type Qwen2_5ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_5_LATEST.name]: typeof QWEN2_5_LATEST.supports.input + [QWEN2_5_0_5b.name]: typeof QWEN2_5_0_5b.supports.input + [QWEN2_5_1_5b.name]: typeof QWEN2_5_1_5b.supports.input + [QWEN2_5_3b.name]: typeof QWEN2_5_3b.supports.input + [QWEN2_5_7b.name]: typeof QWEN2_5_7b.supports.input + [QWEN2_5_32b.name]: typeof QWEN2_5_32b.supports.input + [QWEN2_5_72b.name]: typeof QWEN2_5_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts new file mode 100644 index 00000000..87e42e14 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts @@ -0,0 +1,94 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN2_LATEST = { + name: 'qwen2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.4gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_0_5b = { + name: 'qwen2:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '352mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_1_5b = { + name: 'qwen2:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '935mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_7b = { + name: 'qwen2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.4gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_72b = { + name: 'qwen2:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '41gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN2_MODELS = [ + QWEN2_LATEST.name, + QWEN2_0_5b.name, + QWEN2_1_5b.name, + QWEN2_7b.name, + QWEN2_72b.name, +] as const + +// const QWEN2_IMAGE_MODELS = [] as const + +// export const QWEN2_EMBEDDING_MODELS = [] as const + +// const QWEN2_AUDIO_MODELS = [] as const + +// const QWEN2_VIDEO_MODELS = [] as const + +// export type Qwen2ChatModels = (typeof QWEN2_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_LATEST.name]: ChatRequest + [QWEN2_0_5b.name]: ChatRequest + [QWEN2_1_5b.name]: ChatRequest + [QWEN2_7b.name]: ChatRequest + [QWEN2_72b.name]: ChatRequest +} + +export type Qwen2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_LATEST.name]: typeof QWEN2_LATEST.supports.input + [QWEN2_0_5b.name]: typeof QWEN2_0_5b.supports.input + [QWEN2_1_5b.name]: typeof QWEN2_1_5b.supports.input + [QWEN2_7b.name]: typeof QWEN2_7b.supports.input + [QWEN2_72b.name]: typeof QWEN2_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts new file mode 100644 index 00000000..b3bcbe99 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts @@ -0,0 +1,150 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN3_LATEST = { + name: 'qwen3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_0_6b = { + name: 'qwen3:0.6b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '523mb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_1_7b = { + name: 'qwen3:1.7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '1.4gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_4b = { + name: 'qwen3:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '2.5gb', + context: 256_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_8b = { + name: 'qwen3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_14b = { + name: 'qwen3:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '9.3gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_30b = { + name: 'qwen3:30b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '19gb', + context: 256_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_32b = { + name: 'qwen3:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_235b = { + name: 'qwen3:235b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '142gb', + context: 256_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN3_MODELS = [ + QWEN3_LATEST.name, + QWEN3_0_6b.name, + QWEN3_1_7b.name, + QWEN3_4b.name, + QWEN3_8b.name, + QWEN3_14b.name, + QWEN3_30b.name, + QWEN3_32b.name, + QWEN3_235b.name, +] as const + +// const QWEN3_IMAGE_MODELS = [] as const + +// export const QWEN3_EMBEDDING_MODELS = [] as const + +// const QWEN3_AUDIO_MODELS = [] as const + +// const QWEN3_VIDEO_MODELS = [] as const + +// export type Qwen3ChatModels = (typeof QWEN3_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN3_LATEST.name]: ChatRequest + [QWEN3_0_6b.name]: ChatRequest + [QWEN3_1_7b.name]: ChatRequest + [QWEN3_4b.name]: ChatRequest + [QWEN3_8b.name]: ChatRequest + [QWEN3_14b.name]: ChatRequest + [QWEN3_30b.name]: ChatRequest + [QWEN3_32b.name]: ChatRequest + [QWEN3_235b.name]: ChatRequest +} + +export type Qwen3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN3_LATEST.name]: typeof QWEN3_LATEST.supports.input + [QWEN3_0_6b.name]: typeof QWEN3_0_6b.supports.input + [QWEN3_1_7b.name]: typeof QWEN3_1_7b.supports.input + [QWEN3_4b.name]: typeof QWEN3_4b.supports.input + [QWEN3_8b.name]: typeof QWEN3_8b.supports.input + [QWEN3_14b.name]: typeof QWEN3_14b.supports.input + [QWEN3_30b.name]: typeof QWEN3_30b.supports.input + [QWEN3_32b.name]: typeof QWEN3_32b.supports.input + [QWEN3_235b.name]: typeof QWEN3_235b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts new file mode 100644 index 00000000..41738f15 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWQ_LATEST = { + name: 'qwq:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWQ_32b = { + name: 'qwq:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const + +// const QWQ_IMAGE_MODELS = [] as const + +// export const QWQ_EMBEDDING_MODELS = [] as const + +// const QWQ_AUDIO_MODELS = [] as const + +// const QWQ_VIDEO_MODELS = [] as const + +// export type QwqChatModels = (typeof QWQ_MODELS)[number] + +// Manual type map for per-model provider options +export type QwqChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWQ_LATEST.name]: ChatRequest + [QWQ_32b.name]: ChatRequest +} + +export type QwqModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWQ_LATEST.name]: typeof QWQ_LATEST.supports.input + [QWQ_32b.name]: typeof QWQ_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts new file mode 100644 index 00000000..9a6ae9f6 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts @@ -0,0 +1,79 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SAILOR2_LATEST = { + name: 'sailor2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SAILOR2_1b = { + name: 'sailor2:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SAILOR2_8b = { + name: 'sailor2:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SAILOR2_20b = { + name: 'sailor2:20b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '12gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const SAILOR2_MODELS = [ + SAILOR2_LATEST.name, + SAILOR2_8b.name, + SAILOR2_20b.name, +] as const + +// const SAILOR2_IMAGE_MODELS = [] as const + +// export const SAILOR2_EMBEDDING_MODELS = [] as const + +// const SAILOR2_AUDIO_MODELS = [] as const + +// const SAILOR2_VIDEO_MODELS = [] as const + +// export type Sailor2ChatModels = (typeof SAILOR2_MODELS)[number] + +// Manual type map for per-model provider options +export type Sailor2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SAILOR2_LATEST.name]: ChatRequest + [SAILOR2_1b.name]: ChatRequest + [SAILOR2_8b.name]: ChatRequest + [SAILOR2_20b.name]: ChatRequest +} + +export type Sailor2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SAILOR2_LATEST.name]: typeof SAILOR2_LATEST.supports.input + [SAILOR2_1b.name]: typeof SAILOR2_1b.supports.input + [SAILOR2_8b.name]: typeof SAILOR2_8b.supports.input + [SAILOR2_20b.name]: typeof SAILOR2_20b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts new file mode 100644 index 00000000..62fa1e6f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SHIELDGEMMA_LATEST = { + name: 'shieldgemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const SHIELDGEMMA_2b = { + name: 'shieldgemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const SHIELDGEMMA_9b = { + name: 'shieldgemma:9b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const SHIELDGEMMA_27b = { + name: 'shieldgemma:27b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '17gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const SHIELDGEMMA_MODELS = [ + SHIELDGEMMA_LATEST.name, + SHIELDGEMMA_2b.name, + SHIELDGEMMA_9b.name, + SHIELDGEMMA_27b.name, +] as const + +// const SHIELDGEMMA_IMAGE_MODELS = [] as const + +// export const SHIELDGEMMA_EMBEDDING_MODELS = [] as const + +// const SHIELDGEMMA_AUDIO_MODELS = [] as const + +// const SHIELDGEMMA_VIDEO_MODELS = [] as const + +// export type ShieldgemmaChatModels = (typeof SHIELDGEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type ShieldgemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SHIELDGEMMA_LATEST.name]: ChatRequest + [SHIELDGEMMA_2b.name]: ChatRequest + [SHIELDGEMMA_9b.name]: ChatRequest + [SHIELDGEMMA_27b.name]: ChatRequest +} + +export type ShieldgemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SHIELDGEMMA_LATEST.name]: typeof SHIELDGEMMA_LATEST.supports.input + [SHIELDGEMMA_2b.name]: typeof SHIELDGEMMA_2b.supports.input + [SHIELDGEMMA_9b.name]: typeof SHIELDGEMMA_9b.supports.input + [SHIELDGEMMA_27b.name]: typeof SHIELDGEMMA_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts new file mode 100644 index 00000000..eafdeb8e --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SMALLTINKER_LATEST = { + name: 'smalltinker:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SMALLTINKER_3b = { + name: 'smalltinker:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const SMALLTINKER_MODELS = [ + SMALLTINKER_LATEST.name, + SMALLTINKER_3b.name, +] as const + +// const SMALLTINKER_IMAGE_MODELS = [] as const + +// export const SMALLTINKER_EMBEDDING_MODELS = [] as const + +// const SMALLTINKER_AUDIO_MODELS = [] as const + +// const SMALLTINKER_VIDEO_MODELS = [] as const + +// export type SmalltinkerChatModels = (typeof SMALLTINKER_MODELS)[number] + +// Manual type map for per-model provider options +export type SmalltinkerChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SMALLTINKER_LATEST.name]: ChatRequest + [SMALLTINKER_3b.name]: ChatRequest +} + +export type SmalltinkerModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SMALLTINKER_LATEST.name]: typeof SMALLTINKER_LATEST.supports.input + [SMALLTINKER_3b.name]: typeof SMALLTINKER_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts new file mode 100644 index 00000000..79ebc939 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SMOLLM_LATEST = { + name: 'smollm:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '991mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const SMOLLM_135m = { + name: 'smollm:135m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '92mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const SMOLLM_360m = { + name: 'smollm:360m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '229mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const SMOLLM_1_7b = { + name: 'smollm:1.7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '991mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const SMOLLM_MODELS = [ + SMOLLM_LATEST.name, + SMOLLM_135m.name, + SMOLLM_360m.name, + SMOLLM_1_7b.name, +] as const + +// const SMOLLM_IMAGE_MODELS = [] as const + +// export const SMOLLM_EMBEDDING_MODELS = [] as const + +// const SMOLLM_AUDIO_MODELS = [] as const + +// const SMOLLM_VIDEO_MODELS = [] as const + +// export type SmollmChatModels = (typeof SMOLLM_MODELS)[number] + +// Manual type map for per-model provider options +export type SmollmChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SMOLLM_LATEST.name]: ChatRequest + [SMOLLM_135m.name]: ChatRequest + [SMOLLM_360m.name]: ChatRequest + [SMOLLM_1_7b.name]: ChatRequest +} + +export type SmollmModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SMOLLM_LATEST.name]: typeof SMOLLM_LATEST.supports.input + [SMOLLM_135m.name]: typeof SMOLLM_135m.supports.input + [SMOLLM_360m.name]: typeof SMOLLM_360m.supports.input + [SMOLLM_1_7b.name]: typeof SMOLLM_1_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts new file mode 100644 index 00000000..a4b0e110 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const TINNYLLAMA_LATEST = { + name: 'tinnyllama:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '638mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const TINNYLLAMA_1_1b = { + name: 'tinnyllama:1.1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '638mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const TINNYLLAMA_MODELS = [ + TINNYLLAMA_LATEST.name, + TINNYLLAMA_1_1b.name, +] as const + +// const TINNYLLAMA_IMAGE_MODELS = [] as const + +// export const TINNYLLAMA_EMBEDDING_MODELS = [] as const + +// const TINNYLLAMA_AUDIO_MODELS = [] as const + +// const TINNYLLAMA_VIDEO_MODELS = [] as const + +// export type TinnyllamaChatModels = (typeof TINNYLLAMA_MODELS)[number] + +// Manual type map for per-model provider options +export type TinnyllamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [TINNYLLAMA_LATEST.name]: ChatRequest + [TINNYLLAMA_1_1b.name]: ChatRequest +} + +export type TinnyllamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [TINNYLLAMA_LATEST.name]: typeof TINNYLLAMA_LATEST.supports.input + [TINNYLLAMA_1_1b.name]: typeof TINNYLLAMA_1_1b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts new file mode 100644 index 00000000..c76e6519 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const TULU3_LATEST = { + name: 'tulu3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const TULU3_8b = { + name: 'tulu3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const TULU3_70b = { + name: 'tulu3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const TULU3_MODELS = [ + TULU3_LATEST.name, + TULU3_8b.name, + TULU3_70b.name, +] as const + +// const TULU3_IMAGE_MODELS = [] as const + +// export const TULU3_EMBEDDING_MODELS = [] as const + +// const TULU3_AUDIO_MODELS = [] as const + +// const TULU3_VIDEO_MODELS = [] as const + +// export type Tulu3ChatModels = (typeof TULU3_MODELS)[number] + +// Manual type map for per-model provider options +export type Tulu3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [TULU3_LATEST.name]: ChatRequest + [TULU3_8b.name]: ChatRequest + [TULU3_70b.name]: ChatRequest +} + +export type Tulu3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [TULU3_LATEST.name]: typeof TULU3_LATEST.supports.input + [TULU3_8b.name]: typeof TULU3_8b.supports.input + [TULU3_70b.name]: typeof TULU3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/models-meta.ts b/packages/typescript/ai-ollama/src/meta/models-meta.ts new file mode 100644 index 00000000..099432b4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/models-meta.ts @@ -0,0 +1,11 @@ +export interface DefaultOllamaModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts new file mode 100644 index 00000000..1b9a3e8c --- /dev/null +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -0,0 +1,265 @@ +// constants +import { ATHENE_MODELS } from './meta/model-meta-athene' +import { AYA_MODELS } from './meta/model-meta-aya' +import { CODEGEMMA_MODELS } from './meta/model-meta-codegemma' +import { CODELLAMA_MODELS } from './meta/model-meta-codellama' +import { COMMAND_R_MODELS } from './meta/model-meta-command-r' +import { COMMAND_R_PLUS_MODELS } from './meta/model-meta-command-r-plus' +import { COMMAND_R_7b_MODELS } from './meta/model-meta-command-r7b' +import { DEEPSEEK_CODER_V2_MODELS } from './meta/model-meta-deepseek-coder-v2' +import { DEEPSEEK_OCR_MODELS } from './meta/model-meta-deepseek-ocr' +import { DEEPSEEK_R1_MODELS } from './meta/model-meta-deepseek-r1' +import { DEEPSEEK_V3_1_MODELS } from './meta/model-meta-deepseek-v3.1' +import { DEVSTRAL_MODELS } from './meta/model-meta-devstral' +import { DOLPHIN3_MODELS } from './meta/model-meta-dolphin3' +import { EXAONE3_5MODELS } from './meta/model-meta-exaone3.5' +import { FALCON2_MODELS } from './meta/model-meta-falcon2' +import { FALCON3_MODELS } from './meta/model-meta-falcon3' +import { FIREFUNCTION_V2_MODELS } from './meta/model-meta-firefunction-v2' +import { GEMMA_MODELS } from './meta/model-meta-gemma' +import { GEMMA2_MODELS } from './meta/model-meta-gemma2' +import { GEMMA3_MODELS } from './meta/model-meta-gemma3' +import { GRANITE3_DENSE_MODELS } from './meta/model-meta-granite3-dense' +import { GRANITE3_GUARDIAN_MODELS } from './meta/model-meta-granite3-guardian' +import { GRANITE3_MOE_MODELS } from './meta/model-meta-granite3-moe' +import { GRANITE3_1_DENSE_MODELS } from './meta/model-meta-granite3.1-dense' +import { GRANITE3_1_MOE_MODELS } from './meta/model-meta-granite3.1-moe' +import { LLAMA_GUARD3_MODELS } from './meta/model-meta-llama-guard3' +import { LLAMA2_MODELS } from './meta/model-meta-llama2' +import { LLAMA3_MODELS } from './meta/model-meta-llama3' +import { LLAMA3_CHATQA_MODELS } from './meta/model-meta-llama3-chatqa' +import { LLAMA3_GRADIENT_MODELS } from './meta/model-meta-llama3-gradient' +import { LLAMA3_1_MODELS } from './meta/model-meta-llama3.1' +import { LLAMA3_2_MODELS } from './meta/model-meta-llama3.2' +import { LLAMA3_2_VISION_MODELS } from './meta/model-meta-llama3.2-vision' +import { LLAMA3_3_MODELS } from './meta/model-meta-llama3.3' +import { LLAMA4_MODELS } from './meta/model-meta-llama4' +import { LLAVA_MODELS } from './meta/model-meta-llava' +import { LLAVA_LLAMA3_MODELS } from './meta/model-meta-llava-llama3' +import { LLAVA_PHI3_MODELS } from './meta/model-meta-llava-phi3' +import { MARCO_O1_MODELS } from './meta/model-meta-marco-o1' +import { MISTRAL_MODELS } from './meta/model-meta-mistral' +import { MISTRAL_LARGE_MODELS } from './meta/model-meta-mistral-large' +import { MISTRAL_NEMO_MODELS } from './meta/model-meta-mistral-nemo' +import { MISTRAL_SMALL_MODELS } from './meta/model-meta-mistral-small' +import { MIXTRAL_MODELS } from './meta/model-meta-mixtral' +import { MOONDREAM_MODELS } from './meta/model-meta-moondream' +import { NEMOTRON_MODELS } from './meta/model-meta-nemotron' +import { NEMOTRON_MINI_MODELS } from './meta/model-meta-nemotron-mini' +import { OLMO2_MODELS } from './meta/model-meta-olmo2' +import { OPENCODER_MODELS } from './meta/model-meta-opencoder' +import { OPENHERMES_MODELS } from './meta/model-meta-openhermes' +import { PHI3_MODELS } from './meta/model-meta-phi3' +import { PHI4_MODELS } from './meta/model-meta-phi4' +import { QWEN_MODELS } from './meta/model-meta-qwen' +import { QWEN2_MODELS } from './meta/model-meta-qwen2' +import { QWEN2_5_MODELS } from './meta/model-meta-qwen2.5' +import { QWEN2_5_CODER_MODELS } from './meta/model-meta-qwen2.5-coder' +import { QWEN3_MODELS } from './meta/model-meta-qwen3' +import { QWQ_MODELS } from './meta/model-meta-qwq' +import { SAILOR2_MODELS } from './meta/model-meta-sailor2' +import { SHIELDGEMMA_MODELS } from './meta/model-meta-shieldgemma' +import { SMALLTINKER_MODELS } from './meta/model-meta-smalltinker' +import { SMOLLM_MODELS } from './meta/model-meta-smollm' +import { TINNYLLAMA_MODELS } from './meta/model-meta-tinyllama' +import { TULU3_MODELS } from './meta/model-meta-tulu3' + +// types +import type { AtheneModelInputModalitiesByName } from './meta/model-meta-athene' +import type { AyaModelInputModalitiesByName } from './meta/model-meta-aya' +import type { CodegemmaModelInputModalitiesByName } from './meta/model-meta-codegemma' +import type { CodellamaModelInputModalitiesByName } from './meta/model-meta-codellama' +import type { CommandRModelInputModalitiesByName } from './meta/model-meta-command-r' +import type { CommandRPlusModelInputModalitiesByName } from './meta/model-meta-command-r-plus' +import type { CommandR7bModelInputModalitiesByName } from './meta/model-meta-command-r7b' +import type { DeepseekCoderV2ModelInputModalitiesByName } from './meta/model-meta-deepseek-coder-v2' +import type { DeepseekOcrModelInputModalitiesByName } from './meta/model-meta-deepseek-ocr' +import type { DeepseekR1ModelInputModalitiesByName } from './meta/model-meta-deepseek-r1' +import type { Deepseekv3_1ModelInputModalitiesByName } from './meta/model-meta-deepseek-v3.1' +import type { DevstralModelInputModalitiesByName } from './meta/model-meta-devstral' +import type { Dolphin3ModelInputModalitiesByName } from './meta/model-meta-dolphin3' +import type { Exaone3_5ModelInputModalitiesByName } from './meta/model-meta-exaone3.5' +import type { Falcon2ModelInputModalitiesByName } from './meta/model-meta-falcon2' +import type { Falcon3ModelInputModalitiesByName } from './meta/model-meta-falcon3' +import type { Firefunction_V2ModelInputModalitiesByName } from './meta/model-meta-firefunction-v2' +import type { GemmaModelInputModalitiesByName } from './meta/model-meta-gemma' +import type { Gemma2ModelInputModalitiesByName } from './meta/model-meta-gemma2' +import type { Gemma3ModelInputModalitiesByName } from './meta/model-meta-gemma3' +import type { Granite3DenseModelInputModalitiesByName } from './meta/model-meta-granite3-dense' +import type { Granite3GuardianModelInputModalitiesByName } from './meta/model-meta-granite3-guardian' +import type { Granite3MoeModelInputModalitiesByName } from './meta/model-meta-granite3-moe' +import type { Granite3_1DenseModelInputModalitiesByName } from './meta/model-meta-granite3.1-dense' +import type { Granite3_1MoeModelInputModalitiesByName } from './meta/model-meta-granite3.1-moe' +import type { LlamaGuard3ModelInputModalitiesByName } from './meta/model-meta-llama-guard3' +import type { Llama2ModelInputModalitiesByName } from './meta/model-meta-llama2' +import type { Llama3ModelInputModalitiesByName } from './meta/model-meta-llama3' +import type { Llama3ChatQaModelInputModalitiesByName } from './meta/model-meta-llama3-chatqa' +import type { Llama3GradientModelInputModalitiesByName } from './meta/model-meta-llama3-gradient' +import type { Llama3_1ModelInputModalitiesByName } from './meta/model-meta-llama3.1' +import type { Llama3_2ModelInputModalitiesByName } from './meta/model-meta-llama3.2' +import type { Llama3_2VisionModelInputModalitiesByName } from './meta/model-meta-llama3.2-vision' +import type { Llama3_3ModelInputModalitiesByName } from './meta/model-meta-llama3.3' +import type { Llama3_4ModelInputModalitiesByName } from './meta/model-meta-llama4' +import type { llavaModelInputModalitiesByName } from './meta/model-meta-llava' +import type { LlavaLlamaModelInputModalitiesByName } from './meta/model-meta-llava-llama3' +import type { LlavaPhi3ModelInputModalitiesByName } from './meta/model-meta-llava-phi3' +import type { MarcoO1ModelInputModalitiesByName } from './meta/model-meta-marco-o1' +import type { MistralModelInputModalitiesByName } from './meta/model-meta-mistral' +import type { MistralLargeModelInputModalitiesByName } from './meta/model-meta-mistral-large' +import type { MistralNemoModelInputModalitiesByName } from './meta/model-meta-mistral-nemo' +import type { MistralSmallModelInputModalitiesByName } from './meta/model-meta-mistral-small' +import type { MixtralModelInputModalitiesByName } from './meta/model-meta-mixtral' +import type { MoondreamModelInputModalitiesByName } from './meta/model-meta-moondream' +import type { NemotronModelInputModalitiesByName } from './meta/model-meta-nemotron' +import type { NemotronMiniModelInputModalitiesByName } from './meta/model-meta-nemotron-mini' +import type { Olmo2ModelInputModalitiesByName } from './meta/model-meta-olmo2' +import type { OpencoderModelInputModalitiesByName } from './meta/model-meta-opencoder' +import type { OpenhermesModelInputModalitiesByName } from './meta/model-meta-openhermes' +import type { Phi3ModelInputModalitiesByName } from './meta/model-meta-phi3' +import type { Phi4ModelInputModalitiesByName } from './meta/model-meta-phi4' +import type { QwenModelInputModalitiesByName } from './meta/model-meta-qwen' +import type { Qwen2ModelInputModalitiesByName } from './meta/model-meta-qwen2' +import type { Qwen2_5ModelInputModalitiesByName } from './meta/model-meta-qwen2.5' +import type { Qwen2_5CoderModelInputModalitiesByName } from './meta/model-meta-qwen2.5-coder' +import type { Qwen3ModelInputModalitiesByName } from './meta/model-meta-qwen3' +import type { QwqModelInputModalitiesByName } from './meta/model-meta-qwq' +import type { Sailor2ModelInputModalitiesByName } from './meta/model-meta-sailor2' +import type { ShieldgemmaModelInputModalitiesByName } from './meta/model-meta-shieldgemma' +import type { SmalltinkerModelInputModalitiesByName } from './meta/model-meta-smalltinker' +import type { SmollmModelInputModalitiesByName } from './meta/model-meta-smollm' +import type { TinnyllamaModelInputModalitiesByName } from './meta/model-meta-tinyllama' +import type { Tulu3ModelInputModalitiesByName } from './meta/model-meta-tulu3' + +export const OLLAMA_MODELS = [ + ...ATHENE_MODELS, + ...AYA_MODELS, + ...CODEGEMMA_MODELS, + ...CODELLAMA_MODELS, + ...COMMAND_R_PLUS_MODELS, + ...COMMAND_R_MODELS, + ...COMMAND_R_7b_MODELS, + ...DEEPSEEK_CODER_V2_MODELS, + ...DEEPSEEK_OCR_MODELS, + ...DEEPSEEK_R1_MODELS, + ...DEEPSEEK_V3_1_MODELS, + ...DEVSTRAL_MODELS, + ...DOLPHIN3_MODELS, + ...EXAONE3_5MODELS, + ...FALCON2_MODELS, + ...FALCON3_MODELS, + ...FIREFUNCTION_V2_MODELS, + ...GEMMA_MODELS, + ...GEMMA2_MODELS, + ...GEMMA3_MODELS, + ...GRANITE3_DENSE_MODELS, + ...GRANITE3_GUARDIAN_MODELS, + ...GRANITE3_MOE_MODELS, + ...GRANITE3_1_DENSE_MODELS, + ...GRANITE3_1_MOE_MODELS, + ...LLAMA_GUARD3_MODELS, + ...LLAMA2_MODELS, + ...LLAMA3_CHATQA_MODELS, + ...LLAMA3_GRADIENT_MODELS, + ...LLAMA3_1_MODELS, + ...LLAMA3_2_MODELS, + ...LLAMA3_2_VISION_MODELS, + ...LLAMA3_2_MODELS, + ...LLAMA3_3_MODELS, + ...LLAMA3_MODELS, + ...LLAMA4_MODELS, + ...LLAVA_LLAMA3_MODELS, + ...LLAVA_PHI3_MODELS, + ...LLAVA_MODELS, + ...MARCO_O1_MODELS, + ...MISTRAL_LARGE_MODELS, + ...MISTRAL_NEMO_MODELS, + ...MISTRAL_SMALL_MODELS, + ...MISTRAL_MODELS, + ...MIXTRAL_MODELS, + ...MOONDREAM_MODELS, + ...NEMOTRON_MINI_MODELS, + ...NEMOTRON_MODELS, + ...OLMO2_MODELS, + ...OPENCODER_MODELS, + ...OPENHERMES_MODELS, + ...PHI3_MODELS, + ...PHI4_MODELS, + ...QWEN_MODELS, + ...QWEN2_5_CODER_MODELS, + ...QWEN2_5_MODELS, + ...QWEN2_MODELS, + ...QWEN3_MODELS, + ...QWQ_MODELS, + ...SAILOR2_MODELS, + ...SHIELDGEMMA_MODELS, + ...SMALLTINKER_MODELS, + ...SMOLLM_MODELS, + ...TINNYLLAMA_MODELS, + ...TULU3_MODELS, +] as const + +export type OllamaModelInputModalitiesByName = + AtheneModelInputModalitiesByName & + AyaModelInputModalitiesByName & + CodegemmaModelInputModalitiesByName & + CodellamaModelInputModalitiesByName & + CommandRPlusModelInputModalitiesByName & + CommandRModelInputModalitiesByName & + CommandR7bModelInputModalitiesByName & + DeepseekCoderV2ModelInputModalitiesByName & + DeepseekOcrModelInputModalitiesByName & + DeepseekR1ModelInputModalitiesByName & + Deepseekv3_1ModelInputModalitiesByName & + DevstralModelInputModalitiesByName & + Dolphin3ModelInputModalitiesByName & + Exaone3_5ModelInputModalitiesByName & + Falcon2ModelInputModalitiesByName & + Falcon3ModelInputModalitiesByName & + Firefunction_V2ModelInputModalitiesByName & + GemmaModelInputModalitiesByName & + Gemma2ModelInputModalitiesByName & + Gemma3ModelInputModalitiesByName & + Granite3DenseModelInputModalitiesByName & + Granite3GuardianModelInputModalitiesByName & + Granite3MoeModelInputModalitiesByName & + Granite3_1DenseModelInputModalitiesByName & + Granite3_1MoeModelInputModalitiesByName & + LlamaGuard3ModelInputModalitiesByName & + Llama2ModelInputModalitiesByName & + Llama3ChatQaModelInputModalitiesByName & + Llama3GradientModelInputModalitiesByName & + Llama3_1ModelInputModalitiesByName & + Llama3_2VisionModelInputModalitiesByName & + Llama3_2ModelInputModalitiesByName & + Llama3_3ModelInputModalitiesByName & + Llama3ModelInputModalitiesByName & + Llama3_4ModelInputModalitiesByName & + LlavaLlamaModelInputModalitiesByName & + LlavaPhi3ModelInputModalitiesByName & + llavaModelInputModalitiesByName & + MarcoO1ModelInputModalitiesByName & + MistralLargeModelInputModalitiesByName & + MistralNemoModelInputModalitiesByName & + MistralSmallModelInputModalitiesByName & + MistralModelInputModalitiesByName & + MixtralModelInputModalitiesByName & + MoondreamModelInputModalitiesByName & + NemotronMiniModelInputModalitiesByName & + NemotronModelInputModalitiesByName & + Olmo2ModelInputModalitiesByName & + OpencoderModelInputModalitiesByName & + OpenhermesModelInputModalitiesByName & + Phi3ModelInputModalitiesByName & + Phi4ModelInputModalitiesByName & + QwenModelInputModalitiesByName & + Qwen2_5CoderModelInputModalitiesByName & + Qwen2_5ModelInputModalitiesByName & + Qwen2ModelInputModalitiesByName & + Qwen3ModelInputModalitiesByName & + QwqModelInputModalitiesByName & + Sailor2ModelInputModalitiesByName & + ShieldgemmaModelInputModalitiesByName & + SmalltinkerModelInputModalitiesByName & + SmollmModelInputModalitiesByName & + TinnyllamaModelInputModalitiesByName & + Tulu3ModelInputModalitiesByName diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts index fc6080c0..0c457861 100644 --- a/packages/typescript/ai-ollama/src/ollama-adapter.ts +++ b/packages/typescript/ai-ollama/src/ollama-adapter.ts @@ -1,5 +1,9 @@ import { Ollama as OllamaSDK } from 'ollama' import { BaseAdapter, convertZodToJsonSchema } from '@tanstack/ai' + +import { OLLAMA_MODELS } from './model-meta' + +import type { OllamaModelInputModalitiesByName } from './model-meta' import type { AbortableAsyncIterator, ChatRequest, @@ -23,60 +27,8 @@ export interface OllamaConfig { host?: string } -const OLLAMA_MODELS = [ - 'llama2', - 'llama3', - 'codellama', - 'mistral', - 'mixtral', - 'phi', - 'neural-chat', - 'starling-lm', - 'orca-mini', - 'vicuna', - 'nous-hermes', - 'nomic-embed-text', - 'gpt-oss:20b', -] as const - const OLLAMA_EMBEDDING_MODELS = [] as const -/** - * Type-only map from Ollama model name to its supported input modalities. - * Ollama models have varying multimodal capabilities: - * - Vision models (llava, bakllava, etc.) support text + image - * - Most text models support text only - * - * Note: This is a placeholder - Ollama models are dynamically loaded, - * so we provide a base type that can be extended. - * - * @see https://github.com/ollama/ollama/blob/main/docs/api.md - */ -export type OllamaModelInputModalitiesByName = { - // Vision-capable models (text + image) - llava: readonly ['text', 'image'] - bakllava: readonly ['text', 'image'] - 'llava-llama3': readonly ['text', 'image'] - 'llava-phi3': readonly ['text', 'image'] - moondream: readonly ['text', 'image'] - minicpm: readonly ['text', 'image'] - - // Text-only models - llama2: readonly ['text'] - llama3: readonly ['text'] - codellama: readonly ['text'] - mistral: readonly ['text'] - mixtral: readonly ['text'] - phi: readonly ['text'] - 'neural-chat': readonly ['text'] - 'starling-lm': readonly ['text'] - 'orca-mini': readonly ['text'] - vicuna: readonly ['text'] - 'nous-hermes': readonly ['text'] - 'nomic-embed-text': readonly ['text'] - 'gpt-oss:20b': readonly ['text'] -} - /** * Type-only map from Ollama model name to its provider-specific options. * Ollama models share the same options interface.