Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-athene.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import type { ChatRequest } from 'ollama'
import type { DefaultOllamaModelMeta } from './models-meta'

const ATHENE_V2_LATEST = {
name: 'athene-v2:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '47gb',
context: 32_000,
} as const satisfies DefaultOllamaModelMeta<any>

const ATHENE_V2_72b = {
name: 'athene-v2:72b',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '47gb',
context: 32_000,
} as const satisfies DefaultOllamaModelMeta<any>

export const ATHENE_MODELS = [
ATHENE_V2_LATEST.name,
ATHENE_V2_72b.name,
] as const

// const ATHENE_IMAGE_MODELS = [] as const

// export const ATHENE_EMBEDDING_MODELS = [] as const

// const ATHENE_AUDIO_MODELS = [] as const

// const ATHENE_VIDEO_MODELS = [] as const

// export type AtheneChatModels = (typeof ATHENE_MODELS)[number]

// Manual type map for per-model provider options
export type AtheneChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[ATHENE_V2_LATEST.name]: ChatRequest
[ATHENE_V2_72b.name]: ChatRequest
}

export type AtheneModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input
[ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input
}
62 changes: 62 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-aya.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import type { ChatRequest } from 'ollama'
import type { DefaultOllamaModelMeta } from './models-meta'

const AYA_LATEST = {
name: 'aya:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '4.8gb',
context: 8_000,
} as const satisfies DefaultOllamaModelMeta<any>

const AYA_8b = {
name: 'aya:8b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '4.8gb',
context: 8_000,
} as const satisfies DefaultOllamaModelMeta<any>

const AYA_35b = {
name: 'aya:35b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '20gb',
context: 8_000,
} as const satisfies DefaultOllamaModelMeta<any>

export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const

// const AYA_IMAGE_MODELS = [] as const

// export const AYA_EMBEDDING_MODELS = [] as const

// const AYA_AUDIO_MODELS = [] as const

// const AYA_VIDEO_MODELS = [] as const

// export type AyaChatModels = (typeof AYA_MODELS)[number]

// Manual type map for per-model provider options
export type AyaChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[AYA_LATEST.name]: ChatRequest
[AYA_8b.name]: ChatRequest
[AYA_35b.name]: ChatRequest
}

export type AyaModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[AYA_LATEST.name]: typeof AYA_LATEST.supports.input
[AYA_8b.name]: typeof AYA_8b.supports.input
[AYA_35b.name]: typeof AYA_35b.supports.input
}
66 changes: 66 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import type { ChatRequest } from 'ollama'
import type { DefaultOllamaModelMeta } from './models-meta'

const CODEGEMMA_LATEST = {
name: 'codegemma:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '5gb',
context: 8_000,
} as const satisfies DefaultOllamaModelMeta<any>

const CODEGEMMA_8b = {
name: 'codegemma:2b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '1.65gb',
context: 8_000,
} as const satisfies DefaultOllamaModelMeta<any>

const CODEGEMMA_35b = {
name: 'codegemma:7b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '5gb',
context: 8_000,
} as const satisfies DefaultOllamaModelMeta<any>

export const CODEGEMMA_MODELS = [
CODEGEMMA_LATEST.name,
CODEGEMMA_8b.name,
CODEGEMMA_35b.name,
] as const

// const CODEGEMMA_IMAGE_MODELS = [] as const

// export const CODEGEMMA_EMBEDDING_MODELS = [] as const

// const CODEGEMMA_AUDIO_MODELS = [] as const

// const CODEGEMMA_VIDEO_MODELS = [] as const

// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number]

// Manual type map for per-model provider options
export type CodegemmaChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[CODEGEMMA_LATEST.name]: ChatRequest
[CODEGEMMA_8b.name]: ChatRequest
[CODEGEMMA_35b.name]: ChatRequest
}

export type CodegemmaModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input
[CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input
[CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input
}
94 changes: 94 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import type { ChatRequest } from 'ollama'
import type { DefaultOllamaModelMeta } from './models-meta'

const CODELLAMA_LATEST = {
name: 'codellama:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '3.8gb',
context: 16_000,
} as const satisfies DefaultOllamaModelMeta<any>

const CODELLAMA_7b = {
name: 'codellama:7b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '3.8gb',
context: 16_000,
} as const satisfies DefaultOllamaModelMeta<any>

const CODELLAMA_13b = {
name: 'codellama:13b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '7.4gb',
context: 16_000,
} as const satisfies DefaultOllamaModelMeta<any>

const CODELLAMA_34b = {
name: 'codellama:34b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '19gb',
context: 16_000,
} as const satisfies DefaultOllamaModelMeta<any>

const CODELLAMA_70b = {
name: 'codellama:70b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '39gb',
context: 2_000,
} as const satisfies DefaultOllamaModelMeta<any>

export const CODELLAMA_MODELS = [
CODELLAMA_LATEST.name,
CODELLAMA_7b.name,
CODELLAMA_13b.name,
CODELLAMA_34b.name,
CODELLAMA_70b.name,
] as const

// const CODELLAMA_IMAGE_MODELS = [] as const

// export const CODELLAMA_EMBEDDING_MODELS = [] as const

// const CODELLAMA_AUDIO_MODELS = [] as const

// const CODELLAMA_VIDEO_MODELS = [] as const

// export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number]

// Manual type map for per-model provider options
export type CodellamaChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[CODELLAMA_LATEST.name]: ChatRequest
[CODELLAMA_7b.name]: ChatRequest
[CODELLAMA_13b.name]: ChatRequest
[CODELLAMA_34b.name]: ChatRequest
[CODELLAMA_70b.name]: ChatRequest
}

export type CodellamaModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[CODELLAMA_LATEST.name]: typeof CODELLAMA_LATEST.supports.input
[CODELLAMA_7b.name]: typeof CODELLAMA_7b.supports.input
[CODELLAMA_13b.name]: typeof CODELLAMA_13b.supports.input
[CODELLAMA_34b.name]: typeof CODELLAMA_34b.supports.input
[CODELLAMA_70b.name]: typeof CODELLAMA_70b.supports.input
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import type { ChatRequest } from 'ollama'
import type { DefaultOllamaModelMeta } from './models-meta'

const COMMAND_R_PLUS_LATEST = {
name: 'command-r-plus:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '59gb',
context: 128_000,
} as const satisfies DefaultOllamaModelMeta<any>

const COMMAND_R_PLUS_104b = {
name: 'command-r-plus:104b',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '59gb',
context: 128_000,
} as const satisfies DefaultOllamaModelMeta<any>

export const COMMAND_R_PLUS_MODELS = [
COMMAND_R_PLUS_LATEST.name,
COMMAND_R_PLUS_104b.name,
] as const

// const COMMAND_R_PLUS_IMAGE_MODELS = [] as const

// export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const

// const COMMAND_R_PLUS_AUDIO_MODELS = [] as const

// const COMMAND_R_PLUS_VIDEO_MODELS = [] as const

// export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number]

// Manual type map for per-model provider options
export type CommandRPlusChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[COMMAND_R_PLUS_LATEST.name]: ChatRequest
[COMMAND_R_PLUS_104b.name]: ChatRequest
}

export type CommandRPlusModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[COMMAND_R_PLUS_LATEST.name]: typeof COMMAND_R_PLUS_LATEST.supports.input
[COMMAND_R_PLUS_104b.name]: typeof COMMAND_R_PLUS_104b.supports.input
}
52 changes: 52 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import type { ChatRequest } from 'ollama'
import type { DefaultOllamaModelMeta } from './models-meta'

const COMMAND_R_LATEST = {
name: 'command-r:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '19gb',
context: 128_000,
} as const satisfies DefaultOllamaModelMeta<any>

const COMMAND_R_35b = {
name: 'command-r:35b',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '19gb',
context: 128_000,
} as const satisfies DefaultOllamaModelMeta<any>

export const COMMAND_R_MODELS = [
COMMAND_R_LATEST.name,
COMMAND_R_35b.name,
] as const

// const COMMAND_R_IMAGE_MODELS = [] as const

// export const COMMAND_R_EMBEDDING_MODELS = [] as const

// const COMMAND_R_AUDIO_MODELS = [] as const

// const COMMAND_R_VIDEO_MODELS = [] as const

// export type CommandRChatModels = (typeof COMMAND_R_MODELS)[number]

// Manual type map for per-model provider options
export type CommandRChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[COMMAND_R_LATEST.name]: ChatRequest
[COMMAND_R_35b.name]: ChatRequest
}

export type CommandRModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[COMMAND_R_LATEST.name]: typeof COMMAND_R_LATEST.supports.input
[COMMAND_R_35b.name]: typeof COMMAND_R_35b.supports.input
}
Loading
Loading