diff --git a/.changeset/two-bikes-kneel.md b/.changeset/two-bikes-kneel.md new file mode 100644 index 00000000..cd7f234b --- /dev/null +++ b/.changeset/two-bikes-kneel.md @@ -0,0 +1,18 @@ +--- +'@tanstack/ai-anthropic': minor +'@tanstack/ai-gemini': minor +'@tanstack/ai-ollama': minor +'@tanstack/ai-openai': minor +'@tanstack/ai': minor +'@tanstack/ai-client': minor +'@tanstack/ai-devtools': minor +'@tanstack/ai-react': minor +'@tanstack/ai-react-ui': minor +'@tanstack/ai-solid': minor +'@tanstack/ai-svelte': minor +'@tanstack/ai-vue': minor +'@tanstack/ai-vue-ui': minor +'@tanstack/react-ai-devtools': minor +--- + +Split up adapters for better tree shaking into separate functionalities diff --git a/CHANGELOG.md b/CHANGELOG.md index 6aa79194..c6c0cdb0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -369,10 +369,10 @@ The `chat()` method now includes an automatic tool execution loop: ```typescript import { chat, tool, maxIterations } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' +import { openaiText } from '@tanstack/ai-openai' const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: 'gpt-4o', messages: [{ role: 'user', content: "What's the weather in Paris?" }], tools: [weatherTool], diff --git a/README.md b/README.md index 77acf865..e0de4f3a 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,9 @@ A powerful, type-safe AI SDK for building AI-powered applications. - Provider-agnostic adapters (OpenAI, Anthropic, Gemini, Ollama, etc.) +- **Tree-shakeable adapters** - Import only what you need for smaller bundles - **Multimodal content support** - Send images, audio, video, and documents +- **Image generation** - Generate images with OpenAI DALL-E/GPT-Image and Gemini Imagen - Chat completion, streaming, and agent loop strategies - Headless chat state management with adapters (SSE, HTTP stream, custom) - Isomorphic type-safe tools with server/client execution @@ -46,6 +48,30 @@ A powerful, type-safe AI SDK for building AI-powered applications. ### Read the docs → +## Tree-Shakeable Adapters + +Import only the functionality you need for smaller bundle sizes: + +```typescript +// Only chat functionality - no summarization code bundled +import { openaiText } from '@tanstack/ai-openai/adapters' +import { generate } from '@tanstack/ai' + +const textAdapter = openaiText() + +const result = generate({ + adapter: textAdapter, + model: 'gpt-4o', + messages: [{ role: 'user', content: [{ type: 'text', content: 'Hello!' }] }], +}) + +for await (const chunk of result) { + console.log(chunk) +} +``` + +Available adapters: `openaiText`, `openaiEmbed`, `openaiSummarize`, `anthropicText`, `geminiText`, `ollamaText`, and more. + ## Bonus: TanStack Start Integration TanStack AI works with **any** framework (Next.js, Express, Remix, etc.). diff --git a/assets/CleanShot_2025-12-03_at_09.07.34_2x-e559659f-ceb7-4b86-879d-a603788b0b56.png b/assets/CleanShot_2025-12-03_at_09.07.34_2x-e559659f-ceb7-4b86-879d-a603788b0b56.png deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/adapters/anthropic.md b/docs/adapters/anthropic.md index 422f68bf..ec844602 100644 --- a/docs/adapters/anthropic.md +++ b/docs/adapters/anthropic.md @@ -1,9 +1,10 @@ --- -title: Anthropic Adapter -slug: /adapters/anthropic +title: Anthropic +id: anthropic-adapter +order: 2 --- -The Anthropic adapter provides access to Claude models, including Claude 3.5 Sonnet, Claude 3 Opus, and more. +The Anthropic adapter provides access to Claude models, including Claude Sonnet 4.5, Claude Opus 4.5, and more. ## Installation @@ -15,14 +16,11 @@ npm install @tanstack/ai-anthropic ```typescript import { chat } from "@tanstack/ai"; -import { anthropic } from "@tanstack/ai-anthropic"; - -const adapter = anthropic(); +import { anthropicText } from "@tanstack/ai-anthropic"; const stream = chat({ - adapter, + adapter: anthropicText("claude-sonnet-4-5"), messages: [{ role: "user", content: "Hello!" }], - model: "claude-3-5-sonnet-20241022", }); ``` @@ -30,29 +28,28 @@ const stream = chat({ ```typescript import { chat } from "@tanstack/ai"; -import { createAnthropic } from "@tanstack/ai-anthropic"; +import { createAnthropicChat } from "@tanstack/ai-anthropic"; -const adapter = createAnthropic(process.env.ANTHROPIC_API_KEY, { +const adapter = createAnthropicChat(process.env.ANTHROPIC_API_KEY!, { // ... your config options - }); +}); const stream = chat({ - adapter, + adapter: adapter("claude-sonnet-4-5"), messages: [{ role: "user", content: "Hello!" }], - model: "claude-3-5-sonnet-20241022", }); ``` ## Configuration ```typescript -import { anthropic, type AnthropicConfig } from "@tanstack/ai-anthropic"; +import { createAnthropicChat, type AnthropicChatConfig } from "@tanstack/ai-anthropic"; -const config: AnthropicConfig = { - // ... your config options +const config: Omit = { + baseURL: "https://api.anthropic.com", // Optional, for custom endpoints }; -const adapter = anthropic(config); +const adapter = createAnthropicChat(process.env.ANTHROPIC_API_KEY!, config); ``` @@ -60,17 +57,14 @@ const adapter = anthropic(config); ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { anthropic } from "@tanstack/ai-anthropic"; - -const adapter = anthropic(); +import { anthropicText } from "@tanstack/ai-anthropic"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter, + adapter: anthropicText("claude-sonnet-4-5"), messages, - model: "claude-3-5-sonnet-20241022", }); return toStreamResponse(stream); @@ -81,11 +75,9 @@ export async function POST(request: Request) { ```typescript import { chat, toolDefinition } from "@tanstack/ai"; -import { anthropic } from "@tanstack/ai-anthropic"; +import { anthropicText } from "@tanstack/ai-anthropic"; import { z } from "zod"; -const adapter = anthropic(); - const searchDatabaseDef = toolDefinition({ name: "search_database", description: "Search the database", @@ -96,46 +88,40 @@ const searchDatabaseDef = toolDefinition({ const searchDatabase = searchDatabaseDef.server(async ({ query }) => { // Search database - return { results: [...] }; + return { results: [] }; }); const stream = chat({ - adapter, + adapter: anthropicText("claude-sonnet-4-5"), messages, - model: "claude-3-5-sonnet-20241022", tools: [searchDatabase], }); ``` -## Provider Options +## Model Options -Anthropic supports provider-specific options: +Anthropic supports various provider-specific options: ```typescript const stream = chat({ - adapter: anthropic(), + adapter: anthropicText("claude-sonnet-4-5"), messages, - model: "claude-3-5-sonnet-20241022", - providerOptions: { - thinking: { - type: "enabled", - budgetTokens: 1000, - }, - cacheControl: { - type: "ephemeral", - ttl: "5m", - }, - sendReasoning: true, + modelOptions: { + max_tokens: 4096, + temperature: 0.7, + top_p: 0.9, + top_k: 40, + stop_sequences: ["END"], }, }); ``` ### Thinking (Extended Thinking) -Enable extended thinking with a token budget. This allows Claude to show its reasoning process, which is streamed as `thinking` chunks and displayed as `ThinkingPart` in messages: +Enable extended thinking with a token budget. This allows Claude to show its reasoning process, which is streamed as `thinking` chunks: ```typescript -providerOptions: { +modelOptions: { thinking: { type: "enabled", budget_tokens: 2048, // Maximum tokens for thinking @@ -145,32 +131,49 @@ providerOptions: { **Note:** `max_tokens` must be greater than `budget_tokens`. The adapter automatically adjusts `max_tokens` if needed. -**Supported Models:** +### Prompt Caching -- `claude-sonnet-4-5-20250929` and newer -- `claude-opus-4-5-20251101` and newer +Cache prompts for better performance and reduced costs: -When thinking is enabled, the model's reasoning process is streamed separately from the response text and appears as a collapsible thinking section in the UI. +```typescript +const stream = chat({ + adapter: anthropicText("claude-sonnet-4-5"), + messages: [ + { + role: "user", + content: [ + { + type: "text", + content: "What is the capital of France?", + metadata: { + cache_control: { + type: "ephemeral", + }, + }, + }, + ], + }, + ], +}); +``` -### Prompt Caching +## Summarization -Cache prompts for better performance: +Anthropic supports text summarization: ```typescript -messages: [ - { role: "user", content: [{ - type: "text", - content: "What is the capital of France?", - metadata: { - cache_control: { - type: "ephemeral", - ttl: "5m", - } - } - }]} -] -``` +import { summarize } from "@tanstack/ai"; +import { anthropicSummarize } from "@tanstack/ai-anthropic"; + +const result = await summarize({ + adapter: anthropicSummarize("claude-sonnet-4-5"), + text: "Your long text to summarize...", + maxLength: 100, + style: "concise", // "concise" | "bullet-points" | "paragraph" +}); +console.log(result.summary); +``` ## Environment Variables @@ -182,15 +185,43 @@ ANTHROPIC_API_KEY=sk-ant-... ## API Reference -### `anthropic(config)` +### `anthropicText(config?)` + +Creates an Anthropic chat adapter using environment variables. + +**Returns:** An Anthropic chat adapter instance. + +### `createAnthropicChat(apiKey, config?)` -Creates an Anthropic adapter instance. +Creates an Anthropic chat adapter with an explicit API key. **Parameters:** -- `config.apiKey` - Anthropic API key (required) +- `apiKey` - Your Anthropic API key +- `config.baseURL?` - Custom base URL (optional) + +**Returns:** An Anthropic chat adapter instance. + +### `anthropicSummarize(config?)` + +Creates an Anthropic summarization adapter using environment variables. + +**Returns:** An Anthropic summarize adapter instance. + +### `createAnthropicSummarize(apiKey, config?)` + +Creates an Anthropic summarization adapter with an explicit API key. + +**Parameters:** + +- `apiKey` - Your Anthropic API key +- `config.baseURL?` - Custom base URL (optional) + +**Returns:** An Anthropic summarize adapter instance. + +## Limitations -**Returns:** An Anthropic adapter instance. +- **Image Generation**: Anthropic does not support image generation. Use OpenAI or Gemini for image generation. ## Next Steps diff --git a/docs/adapters/gemini.md b/docs/adapters/gemini.md index 6dbb14cc..24e390e3 100644 --- a/docs/adapters/gemini.md +++ b/docs/adapters/gemini.md @@ -1,9 +1,10 @@ --- -title: Gemini Adapter +title: Google Gemini id: gemini-adapter +order: 3 --- -The Google Gemini adapter provides access to Google's Gemini models, including Gemini Pro and Gemini Ultra. +The Google Gemini adapter provides access to Google's Gemini models, including text generation, image generation with Imagen, and experimental text-to-speech. ## Installation @@ -15,14 +16,11 @@ npm install @tanstack/ai-gemini ```typescript import { chat } from "@tanstack/ai"; -import { gemini } from "@tanstack/ai-gemini"; - -const adapter = gemini(); +import { geminiText } from "@tanstack/ai-gemini"; const stream = chat({ - adapter, + adapter: geminiText("gemini-2.5-pro"), messages: [{ role: "user", content: "Hello!" }], - model: "gemini-2.5-pro", }); ``` @@ -30,52 +28,43 @@ const stream = chat({ ```typescript import { chat } from "@tanstack/ai"; -import { createGemini } from "@tanstack/ai-gemini"; -const adapter = createGemini(process.env.GEMINI_API_KEY, { +import { createGeminiChat } from "@tanstack/ai-gemini"; + +const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, { // ... your config options - }); +}); + const stream = chat({ - adapter, + adapter: adapter("gemini-2.5-pro"), messages: [{ role: "user", content: "Hello!" }], - model: "gemini-2.5-pro", }); ``` ## Configuration ```typescript -import { gemini, type GeminiConfig } from "@tanstack/ai-gemini"; +import { createGeminiChat, type GeminiChatConfig } from "@tanstack/ai-gemini"; -const config: GeminiConfig = { - baseURL: "https://generativelanguage.googleapis.com/v1", // Optional +const config: Omit = { + baseURL: "https://generativelanguage.googleapis.com/v1beta", // Optional }; -const adapter = gemini(config); +const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, config); ``` - -## Available Models - -### Chat Models - -- `gemini-2.5-pro` - Gemini Pro model -- `gemini-2.5-pro-vision` - Gemini Pro with vision capabilities -- `gemini-ultra` - Gemini Ultra model (when available) + ## Example: Chat Completion ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { gemini } from "@tanstack/ai-gemini"; - -const adapter = gemini(); +import { geminiText } from "@tanstack/ai-gemini"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter, + adapter: geminiText("gemini-2.5-pro"), messages, - model: "gemini-2.5-pro", }); return toStreamResponse(stream); @@ -86,14 +75,12 @@ export async function POST(request: Request) { ```typescript import { chat, toolDefinition } from "@tanstack/ai"; -import { gemini } from "@tanstack/ai-gemini"; +import { geminiText } from "@tanstack/ai-gemini"; import { z } from "zod"; -const adapter = gemini(); - const getCalendarEventsDef = toolDefinition({ name: "get_calendar_events", - description: "Get calendar events", + description: "Get calendar events for a date", inputSchema: z.object({ date: z.string(), }), @@ -101,59 +88,193 @@ const getCalendarEventsDef = toolDefinition({ const getCalendarEvents = getCalendarEventsDef.server(async ({ date }) => { // Fetch calendar events - return { events: [...] }; + return { events: [] }; }); const stream = chat({ - adapter, + adapter: geminiText("gemini-2.5-pro"), messages, - model: "gemini-2.5-pro", tools: [getCalendarEvents], }); ``` -## Provider Options +## Model Options -Gemini supports various provider-specific options: +Gemini supports various model-specific options: ```typescript const stream = chat({ - adapter: gemini(), + adapter: geminiText("gemini-2.5-pro"), messages, - model: "gemini-2.5-pro", - providerOptions: { - maxOutputTokens: 1000, + modelOptions: { + maxOutputTokens: 2048, + temperature: 0.7, + topP: 0.9, topK: 40, + stopSequences: ["END"], }, }); ``` +### Thinking + +Enable thinking for models that support it: + +```typescript +modelOptions: { + thinking: { + includeThoughts: true, + }, +} +``` + +### Structured Output + +Configure structured output format: + +```typescript +modelOptions: { + responseMimeType: "application/json", +} +``` + +## Summarization + +Summarize long text content: + +```typescript +import { summarize } from "@tanstack/ai"; +import { geminiSummarize } from "@tanstack/ai-gemini"; + +const result = await summarize({ + adapter: geminiSummarize("gemini-2.5-pro"), + text: "Your long text to summarize...", + maxLength: 100, + style: "concise", // "concise" | "bullet-points" | "paragraph" +}); + +console.log(result.summary); +``` + +## Image Generation + +Generate images with Imagen: + +```typescript +import { generateImage } from "@tanstack/ai"; +import { geminiImage } from "@tanstack/ai-gemini"; + +const result = await generateImage({ + adapter: geminiImage("imagen-3.0-generate-002"), + prompt: "A futuristic cityscape at sunset", + numberOfImages: 1, +}); + +console.log(result.images); +``` + +### Image Model Options + +```typescript +const result = await generateImage({ + adapter: geminiImage("imagen-3.0-generate-002"), + prompt: "...", + modelOptions: { + aspectRatio: "16:9", // "1:1" | "3:4" | "4:3" | "9:16" | "16:9" + personGeneration: "DONT_ALLOW", // Control person generation + safetyFilterLevel: "BLOCK_SOME", // Safety filtering + }, +}); +``` + +## Text-to-Speech (Experimental) + +> **Note:** Gemini TTS is experimental and may require the Live API for full functionality. + +Generate speech from text: + +```typescript +import { generateSpeech } from "@tanstack/ai"; +import { geminiSpeech } from "@tanstack/ai-gemini"; + +const result = await generateSpeech({ + adapter: geminiSpeech("gemini-2.5-flash-preview-tts"), + text: "Hello from Gemini TTS!", +}); + +console.log(result.audio); // Base64 encoded audio +``` + ## Environment Variables Set your API key in environment variables: ```bash GEMINI_API_KEY=your-api-key-here +# or +GOOGLE_API_KEY=your-api-key-here ``` ## Getting an API Key -1. Go to [Google AI Studio](https://makersuite.google.com/app/apikey) +1. Go to [Google AI Studio](https://aistudio.google.com/apikey) 2. Create a new API key 3. Add it to your environment variables ## API Reference -### `gemini(config)` +### `geminiText(config?)` + +Creates a Gemini text/chat adapter using environment variables. + +**Returns:** A Gemini text adapter instance. + +### `createGeminiText(apiKey, config?)` -Creates a Gemini adapter instance. +Creates a Gemini text/chat adapter with an explicit API key. **Parameters:** -- `config.apiKey` - Gemini API key (required) +- `apiKey` - Your Gemini API key - `config.baseURL?` - Custom base URL (optional) -**Returns:** A Gemini adapter instance. +**Returns:** A Gemini text adapter instance. + +### `geminiSummarize(config?)` + +Creates a Gemini summarization adapter using environment variables. + +**Returns:** A Gemini summarize adapter instance. + +### `createGeminiSummarize(apiKey, config?)` + +Creates a Gemini summarization adapter with an explicit API key. + +**Returns:** A Gemini summarize adapter instance. + +### `geminiImage(config?)` + +Creates a Gemini image generation adapter using environment variables. + +**Returns:** A Gemini image adapter instance. + +### `createGeminiImage(apiKey, config?)` + +Creates a Gemini image generation adapter with an explicit API key. + +**Returns:** A Gemini image adapter instance. + +### `geminiTTS(config?)` + +Creates a Gemini TTS adapter using environment variables. + +**Returns:** A Gemini TTS adapter instance. + +### `createGeminiTTS(apiKey, config?)` + +Creates a Gemini TTS adapter with an explicit API key. + +**Returns:** A Gemini TTS adapter instance. ## Next Steps diff --git a/docs/adapters/ollama.md b/docs/adapters/ollama.md index 9fbf65bd..d17a9985 100644 --- a/docs/adapters/ollama.md +++ b/docs/adapters/ollama.md @@ -1,9 +1,10 @@ --- -title: Ollama Adapter +title: Ollama id: ollama-adapter +order: 4 --- -The Ollama adapter provides access to local models running via Ollama, allowing you to run AI models on your own infrastructure. +The Ollama adapter provides access to local models running via Ollama, allowing you to run AI models on your own infrastructure with full privacy and no API costs. ## Installation @@ -15,57 +16,71 @@ npm install @tanstack/ai-ollama ```typescript import { chat } from "@tanstack/ai"; -import { ollama } from "@tanstack/ai-ollama"; +import { ollamaText } from "@tanstack/ai-ollama"; -const adapter = ollama({ - baseURL: "http://localhost:11434", // Default Ollama URL +const stream = chat({ + adapter: ollamaText("llama3"), + messages: [{ role: "user", content: "Hello!" }], }); +``` + +## Basic Usage - Custom Host + +```typescript +import { chat } from "@tanstack/ai"; +import { createOllamaChat } from "@tanstack/ai-ollama"; + +const adapter = createOllamaChat("http://your-server:11434"); const stream = chat({ - adapter, + adapter: adapter("llama3"), messages: [{ role: "user", content: "Hello!" }], - model: "llama3", }); ``` ## Configuration ```typescript -import { ollama, type OllamaConfig } from "@tanstack/ai-ollama"; +import { createOllamaChat } from "@tanstack/ai-ollama"; -const config: OllamaConfig = { - baseURL: "http://localhost:11434", // Ollama server URL - // No API key needed for local Ollama -}; +// Default localhost +const adapter = createOllamaChat(); -const adapter = ollama(config); +// Custom host +const adapter = createOllamaChat("http://your-server:11434"); ``` -## Available Models +## Available Models -To see available models, run: +To see available models on your Ollama instance: ```bash ollama list ``` +### Popular Models + +- `llama3` / `llama3.1` / `llama3.2` - Meta's Llama models +- `mistral` / `mistral:7b` - Mistral AI models +- `mixtral` - Mixtral MoE model +- `codellama` - Code-focused Llama +- `phi3` - Microsoft's Phi models +- `gemma` / `gemma2` - Google's Gemma models +- `qwen2` / `qwen2.5` - Alibaba's Qwen models +- `deepseek-coder` - DeepSeek coding model + ## Example: Chat Completion ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { ollama } from "@tanstack/ai-ollama"; - -const adapter = ollama({ - baseURL: "http://localhost:11434", -}); +import { ollamaText } from "@tanstack/ai-ollama"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter, + adapter: ollamaText("llama3"), messages, - model: "llama3", // Use a model you have installed }); return toStreamResponse(stream); @@ -76,13 +91,9 @@ export async function POST(request: Request) { ```typescript import { chat, toolDefinition } from "@tanstack/ai"; -import { ollama } from "@tanstack/ai-ollama"; +import { ollamaText } from "@tanstack/ai-ollama"; import { z } from "zod"; -const adapter = ollama({ - baseURL: "http://localhost:11434", -}); - const getLocalDataDef = toolDefinition({ name: "get_local_data", description: "Get data from local storage", @@ -97,85 +108,182 @@ const getLocalData = getLocalDataDef.server(async ({ key }) => { }); const stream = chat({ - adapter, + adapter: ollamaText("llama3"), messages, - model: "llama3", tools: [getLocalData], }); ``` -## Setting Up Ollama - -1. **Install Ollama:** - - ```bash - # macOS - brew install ollama - - # Linux - curl -fsSL https://ollama.com/install.sh | sh +**Note:** Tool support varies by model. Models like `llama3`, `mistral`, and `qwen2` generally have good tool calling support. - # Windows - # Download from https://ollama.com - ``` - -2. **Pull a model:** - - ```bash - ollama pull llama3 - ``` - -3. **Start Ollama server:** - ```bash - ollama serve - ``` - -## Provider Options +## Model Options Ollama supports various provider-specific options: ```typescript const stream = chat({ - adapter: ollama({ baseURL: "http://localhost:11434" }), + adapter: ollamaText("llama3"), messages, - model: "llama3", - providerOptions: { + modelOptions: { temperature: 0.7, - numPredict: 1000, - topP: 0.9, - topK: 40, + top_p: 0.9, + top_k: 40, + num_predict: 1000, // Max tokens to generate + repeat_penalty: 1.1, + num_ctx: 4096, // Context window size + num_gpu: -1, // GPU layers (-1 = auto) }, }); ``` -## Custom Ollama Server +### Advanced Options + +```typescript +modelOptions: { + // Sampling + temperature: 0.7, + top_p: 0.9, + top_k: 40, + min_p: 0.05, + typical_p: 1.0, + + // Generation + num_predict: 1000, + repeat_penalty: 1.1, + repeat_last_n: 64, + penalize_newline: false, + + // Performance + num_ctx: 4096, + num_batch: 512, + num_gpu: -1, + num_thread: 0, // 0 = auto + + // Memory + use_mmap: true, + use_mlock: false, + + // Mirostat sampling + mirostat: 0, // 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0 + mirostat_tau: 5.0, + mirostat_eta: 0.1, +} +``` + +## Summarization -If you're running Ollama on a different host or port: +Summarize long text content locally: ```typescript -const adapter = ollama({ - baseURL: "http://your-server:11434", +import { summarize } from "@tanstack/ai"; +import { ollamaSummarize } from "@tanstack/ai-ollama"; + +const result = await summarize({ + adapter: ollamaSummarize("llama3"), + text: "Your long text to summarize...", + maxLength: 100, + style: "concise", // "concise" | "bullet-points" | "paragraph" }); + +console.log(result.summary); +``` + +## Setting Up Ollama + +### 1. Install Ollama + +```bash +# macOS +brew install ollama + +# Linux +curl -fsSL https://ollama.com/install.sh | sh + +# Windows +# Download from https://ollama.com +``` + +### 2. Pull a Model + +```bash +ollama pull llama3 +``` + +### 3. Start Ollama Server + +```bash +ollama serve +``` + +The server runs on `http://localhost:11434` by default. + +## Running on a Remote Server + +```typescript +const adapter = createOllamaChat("http://your-server:11434"); +``` + +To expose Ollama on a network interface: + +```bash +OLLAMA_HOST=0.0.0.0:11434 ollama serve +``` + +## Environment Variables + +Optionally set the host in environment variables: + +```bash +OLLAMA_HOST=http://localhost:11434 ``` ## API Reference -### `ollama(config)` +### `ollamaText(options?)` + +Creates an Ollama text/chat adapter. + +**Parameters:** + +- `options.model?` - Default model (optional) + +**Returns:** An Ollama text adapter instance. -Creates an Ollama adapter instance. +### `createOllamaText(host?, options?)` + +Creates an Ollama text/chat adapter with a custom host. **Parameters:** -- `config.baseURL` - Ollama server URL (default: `http://localhost:11434`) +- `host` - Ollama server URL (default: `http://localhost:11434`) +- `options.model?` - Default model (optional) + +**Returns:** An Ollama text adapter instance. + +### `ollamaSummarize(options?)` + +Creates an Ollama summarization adapter. + +**Returns:** An Ollama summarize adapter instance. -**Returns:** An Ollama adapter instance. +### `createOllamaSummarize(host?, options?)` + +Creates an Ollama summarization adapter with a custom host. + +**Returns:** An Ollama summarize adapter instance. ## Benefits of Ollama - ✅ **Privacy** - Data stays on your infrastructure -- ✅ **Cost** - No API costs +- ✅ **Cost** - No API costs after hardware - ✅ **Customization** - Use any compatible model - ✅ **Offline** - Works without internet +- ✅ **Speed** - No network latency for local deployment + +## Limitations + +- **Image Generation**: Ollama does not support image generation. Use OpenAI or Gemini for image generation. +- **Performance**: Depends on your hardware (GPU recommended for larger models) ## Next Steps diff --git a/docs/adapters/openai.md b/docs/adapters/openai.md index 1d1392b0..f4c999b2 100644 --- a/docs/adapters/openai.md +++ b/docs/adapters/openai.md @@ -1,9 +1,10 @@ --- -title: OpenAI Adapter +title: OpenAI id: openai-adapter +order: 1 --- -The OpenAI adapter provides access to OpenAI's GPT models, including GPT-4, GPT-3.5, and more. +The OpenAI adapter provides access to OpenAI's models, including GPT-4o, GPT-5, image generation (DALL-E), text-to-speech (TTS), and audio transcription (Whisper). ## Installation @@ -15,14 +16,11 @@ npm install @tanstack/ai-openai ```typescript import { chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; - -const adapter = openai(); +import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ - adapter, + adapter: openaiText("gpt-4o"), messages: [{ role: "user", content: "Hello!" }], - model: "gpt-4o", }); ``` @@ -30,45 +28,43 @@ const stream = chat({ ```typescript import { chat } from "@tanstack/ai"; -import { createOpenAI } from "@tanstack/ai-openai"; -const adapter = createOpenAI(process.env.OPENAI_API_KEY!, { +import { createOpenaiChat } from "@tanstack/ai-openai"; + +const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, { // ... your config options - }); +}); + const stream = chat({ - adapter, + adapter: adapter("gpt-4o"), messages: [{ role: "user", content: "Hello!" }], - model: "gpt-4o", }); ``` ## Configuration ```typescript -import { openai, type OpenAIConfig } from "@tanstack/ai-openai"; +import { createOpenaiChat, type OpenAIChatConfig } from "@tanstack/ai-openai"; -const config: OpenAIConfig = { +const config: Omit = { organization: "org-...", // Optional baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints }; -const adapter = openai(config); +const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, config); ``` ## Example: Chat Completion ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; - -const adapter = openai(); +import { openaiText } from "@tanstack/ai-openai"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter, + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", }); return toStreamResponse(stream); @@ -79,11 +75,9 @@ export async function POST(request: Request) { ```typescript import { chat, toolDefinition } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { z } from "zod"; -const adapter = openai(); - const getWeatherDef = toolDefinition({ name: "get_weather", description: "Get the current weather", @@ -98,48 +92,164 @@ const getWeather = getWeatherDef.server(async ({ location }) => { }); const stream = chat({ - adapter, + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [getWeather], }); ``` -## Provider Options +## Model Options OpenAI supports various provider-specific options: ```typescript const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", - providerOptions: { + modelOptions: { temperature: 0.7, - maxTokens: 1000, - topP: 0.9, - frequencyPenalty: 0.5, - presencePenalty: 0.5, + max_tokens: 1000, + top_p: 0.9, + frequency_penalty: 0.5, + presence_penalty: 0.5, + stop: ["END"], }, }); ``` ### Reasoning -Enable reasoning for models that support it (e.g., GPT-5). This allows the model to show its reasoning process, which is streamed as `thinking` chunks: +Enable reasoning for models that support it (e.g., GPT-5, O3). This allows the model to show its reasoning process, which is streamed as `thinking` chunks: ```typescript -providerOptions: { +modelOptions: { reasoning: { effort: "medium", // "none" | "minimal" | "low" | "medium" | "high" summary: "detailed", // "auto" | "detailed" (optional) }, } ``` - When reasoning is enabled, the model's reasoning process is streamed separately from the response text and appears as a collapsible thinking section in the UI. +## Summarization + +Summarize long text content: + +```typescript +import { summarize } from "@tanstack/ai"; +import { openaiSummarize } from "@tanstack/ai-openai"; + +const result = await summarize({ + adapter: openaiSummarize("gpt-4o-mini"), + text: "Your long text to summarize...", + maxLength: 100, + style: "concise", // "concise" | "bullet-points" | "paragraph" +}); + +console.log(result.summary); +``` + +## Image Generation + +Generate images with DALL-E: + +```typescript +import { generateImage } from "@tanstack/ai"; +import { openaiImage } from "@tanstack/ai-openai"; + +const result = await generateImage({ + adapter: openaiImage("gpt-image-1"), + prompt: "A futuristic cityscape at sunset", + numberOfImages: 1, + size: "1024x1024", +}); + +console.log(result.images); +``` + +### Image Model Options + +```typescript +const result = await generateImage({ + adapter: openaiImage("gpt-image-1"), + prompt: "...", + modelOptions: { + quality: "hd", // "standard" | "hd" + style: "natural", // "natural" | "vivid" + }, +}); +``` + +## Text-to-Speech + +Generate speech from text: + +```typescript +import { generateSpeech } from "@tanstack/ai"; +import { openaiTTS } from "@tanstack/ai-openai"; + +const result = await generateSpeech({ + adapter: openaiTTS("tts-1"), + text: "Hello, welcome to TanStack AI!", + voice: "alloy", + format: "mp3", +}); + +// result.audio contains base64-encoded audio +console.log(result.format); // "mp3" +``` + +### TTS Voices + +Available voices: `alloy`, `echo`, `fable`, `onyx`, `nova`, `shimmer`, `ash`, `ballad`, `coral`, `sage`, `verse` + +### TTS Model Options + +```typescript +const result = await generateSpeech({ + adapter: openaiTTS("tts-1-hd"), + text: "High quality speech", + modelOptions: { + speed: 1.0, // 0.25 to 4.0 + }, +}); +``` + +## Transcription + +Transcribe audio to text: + +```typescript +import { generateTranscription } from "@tanstack/ai"; +import { openaiTranscription } from "@tanstack/ai-openai"; + +const result = await generateTranscription({ + adapter: openaiTranscription("whisper-1"), + audio: audioFile, // File object or base64 string + language: "en", +}); + +console.log(result.text); // Transcribed text +``` + +### Transcription Model Options + +```typescript +const result = await generateTranscription({ + adapter: openaiTranscription("whisper-1"), + audio: audioFile, + modelOptions: { + response_format: "verbose_json", // Get timestamps + temperature: 0, + prompt: "Technical terms: API, SDK", + }, +}); + +// Access segments with timestamps +console.log(result.segments); +``` + ## Environment Variables Set your API key in environment variables: @@ -150,16 +260,71 @@ OPENAI_API_KEY=sk-... ## API Reference -### `openai(config)` +### `openaiText(config?)` + +Creates an OpenAI chat adapter using environment variables. + +**Returns:** An OpenAI chat adapter instance. + +### `createOpenaiChat(apiKey, config?)` -Creates an OpenAI adapter instance. +Creates an OpenAI chat adapter with an explicit API key. **Parameters:** - + +- `apiKey` - Your OpenAI API key - `config.organization?` - Organization ID (optional) - `config.baseURL?` - Custom base URL (optional) -**Returns:** An OpenAI adapter instance. +**Returns:** An OpenAI chat adapter instance. + +### `openaiSummarize(config?)` + +Creates an OpenAI summarization adapter using environment variables. + +**Returns:** An OpenAI summarize adapter instance. + +### `createOpenaiSummarize(apiKey, config?)` + +Creates an OpenAI summarization adapter with an explicit API key. + +**Returns:** An OpenAI summarize adapter instance. + +### `openaiImage(config?)` + +Creates an OpenAI image generation adapter using environment variables. + +**Returns:** An OpenAI image adapter instance. + +### `createOpenaiImage(apiKey, config?)` + +Creates an OpenAI image generation adapter with an explicit API key. + +**Returns:** An OpenAI image adapter instance. + +### `openaiTTS(config?)` + +Creates an OpenAI TTS adapter using environment variables. + +**Returns:** An OpenAI TTS adapter instance. + +### `createOpenaiTTS(apiKey, config?)` + +Creates an OpenAI TTS adapter with an explicit API key. + +**Returns:** An OpenAI TTS adapter instance. + +### `openaiTranscription(config?)` + +Creates an OpenAI transcription adapter using environment variables. + +**Returns:** An OpenAI transcription adapter instance. + +### `createOpenaiTranscription(apiKey, config?)` + +Creates an OpenAI transcription adapter with an explicit API key. + +**Returns:** An OpenAI transcription adapter instance. ## Next Steps diff --git a/docs/api/ai-client.md b/docs/api/ai-client.md index a8f3b340..b2a7daf5 100644 --- a/docs/api/ai-client.md +++ b/docs/api/ai-client.md @@ -1,6 +1,7 @@ --- -title: TanStack AI Client API +title: "@tanstack/ai-client" slug: /api/ai-client +order: 2 --- Framework-agnostic headless client for managing chat state and streaming. diff --git a/docs/api/ai-react.md b/docs/api/ai-react.md index 7f44f921..8b3dd1dc 100644 --- a/docs/api/ai-react.md +++ b/docs/api/ai-react.md @@ -1,6 +1,7 @@ --- -title: TanStack AI React API +title: "@tanstack/ai-react" slug: /api/ai-react +order: 3 --- React hooks for TanStack AI, providing convenient React bindings for the headless client. diff --git a/docs/api/ai-solid.md b/docs/api/ai-solid.md index 6299cd1d..96d154d9 100644 --- a/docs/api/ai-solid.md +++ b/docs/api/ai-solid.md @@ -1,6 +1,7 @@ --- -title: Tanstack AI Solid API +title: "@tanstack/ai-solid" slug: /api/ai-solid +order: 4 --- SolidJS primitives for TanStack AI, providing convenient SolidJS bindings for the headless client. diff --git a/docs/api/ai.md b/docs/api/ai.md index 6f9240f4..626fa047 100644 --- a/docs/api/ai.md +++ b/docs/api/ai.md @@ -1,6 +1,7 @@ --- -title: TanStack AI Core API +title: "@tanstack/ai" id: tanstack-ai-api +order: 1 --- The core AI library for TanStack AI. @@ -17,12 +18,11 @@ Creates a streaming chat response. ```typescript import { chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages: [{ role: "user", content: "Hello!" }], - model: "gpt-4o", tools: [myTool], systemPrompts: ["You are a helpful assistant"], agentLoopStrategy: maxIterations(20), @@ -31,14 +31,13 @@ const stream = chat({ ### Parameters -- `adapter` - An AI adapter instance (e.g., `openai()`, `anthropic()`) +- `adapter` - An AI adapter instance with model (e.g., `openaiText('gpt-4o')`, `anthropicText('claude-sonnet-4-5')`) - `messages` - Array of chat messages -- `model` - Model identifier (type-safe based on adapter) - `tools?` - Array of tools for function calling - `systemPrompts?` - System prompts to prepend to messages - `agentLoopStrategy?` - Strategy for agent loops (default: `maxIterations(5)`) - `abortController?` - AbortController for cancellation -- `providerOptions?` - Provider-specific options +- `modelOptions?` - Model-specific options (renamed from `providerOptions`) ### Returns @@ -50,11 +49,10 @@ Creates a text summarization. ```typescript import { summarize } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiSummarize } from "@tanstack/ai-openai"; const result = await summarize({ - adapter: openai(), - model: "gpt-4o", + adapter: openaiSummarize("gpt-4o"), text: "Long text to summarize...", maxLength: 100, style: "concise", @@ -63,41 +61,16 @@ const result = await summarize({ ### Parameters -- `adapter` - An AI adapter instance -- `model` - Model identifier (type-safe based on adapter) +- `adapter` - An AI adapter instance with model - `text` - Text to summarize - `maxLength?` - Maximum length of summary - `style?` - Summary style ("concise" | "detailed") +- `modelOptions?` - Model-specific options ### Returns A `SummarizationResult` with the summary text. -## `embedding(options)` - -Creates embeddings for text input. - -```typescript -import { embedding } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; - -const result = await embedding({ - adapter: openai(), - model: "text-embedding-3-small", - input: "Text to embed", -}); -``` - -### Parameters - -- `adapter` - An AI adapter instance -- `model` - Embedding model identifier (type-safe based on adapter) -- `input` - Text or array of texts to embed - -### Returns - -An `EmbeddingResult` with embeddings array. - ## `toolDefinition(config)` Creates an isomorphic tool definition that can be instantiated for server or client execution. @@ -126,8 +99,9 @@ const myClientTool = myToolDef.client(async ({ param }) => { // Use directly in chat() (server-side, no execute) chat({ + adapter: openaiText("gpt-4o"), tools: [myToolDef], - // ... + messages: [{ role: "user", content: "..." }], }); // Or create server implementation @@ -138,8 +112,9 @@ const myServerTool = myToolDef.server(async ({ param }) => { // Use directly in chat() (server-side, no execute) chat({ + adapter: openaiText("gpt-4o"), tools: [myServerTool], - // ... + messages: [{ role: "user", content: "..." }], }); ``` @@ -161,13 +136,12 @@ A `ToolDefinition` object with `.server()` and `.client()` methods for creating Converts a stream to a ReadableStream in Server-Sent Events format. ```typescript -import { toServerSentEventsStream, chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { chat, toServerSentEventsStream } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages: [...], - model: "gpt-4o", }); const readableStream = toServerSentEventsStream(stream); ``` @@ -189,13 +163,12 @@ A `ReadableStream` in Server-Sent Events format. Each chunk is: Converts a stream to an HTTP Response with proper SSE headers. ```typescript -import { toStreamResponse, chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { chat, toStreamResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages: [...], - model: "gpt-4o", }); return toStreamResponse(stream); ``` @@ -214,13 +187,12 @@ A `Response` object suitable for HTTP endpoints with SSE headers (`Content-Type: Creates an agent loop strategy that limits iterations. ```typescript -import { maxIterations, chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { chat, maxIterations } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages: [...], - model: "gpt-4o", agentLoopStrategy: maxIterations(20), }); ``` @@ -293,31 +265,78 @@ interface Tool { ## Usage Examples ```typescript -import { chat, summarize, embedding } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; - -const adapter = openai(); - -// Streaming chat +import { chat, summarize, generateImage } from "@tanstack/ai"; +import { + openaiText, + openaiSummarize, + openaiImage, +} from "@tanstack/ai-openai"; + +// --- Streaming chat const stream = chat({ - adapter, + adapter: openaiText("gpt-4o"), messages: [{ role: "user", content: "Hello!" }], - model: "gpt-4o", }); -// Summarization +// --- One-shot chat response (stream: false) +const response = await chat({ + adapter: openaiText("gpt-4o"), + messages: [{ role: "user", content: "What's the capital of France?" }], + stream: false, // Returns a Promise instead of AsyncIterable +}); + +// --- Structured response with outputSchema +import { z } from "zod"; +const parsed = await chat({ + adapter: openaiText("gpt-4o"), + messages: [{ role: "user", content: "Summarize this text in JSON with keys 'summary' and 'keywords': ... " }], + outputSchema: z.object({ + summary: z.string(), + keywords: z.array(z.string()), + }), +}); + +// --- Structured response with tools +import { toolDefinition } from "@tanstack/ai"; +const weatherTool = toolDefinition({ + name: "getWeather", + description: "Get the current weather for a city", + inputSchema: z.object({ + city: z.string().describe("City name"), + }), +}).server(async ({ city }) => { + // Implementation that fetches weather info + return JSON.stringify({ temperature: 72, condition: "Sunny" }); +}); + +const toolResult = await chat({ + adapter: openaiText("gpt-4o"), + messages: [ + { role: "user", content: "What's the weather in Paris?" } + ], + tools: [weatherTool], + outputSchema: z.object({ + answer: z.string(), + weather: z.object({ + temperature: z.number(), + condition: z.string(), + }), + }), +}); + +// --- Summarization const summary = await summarize({ - adapter, - model: "gpt-4o", + adapter: openaiSummarize("gpt-4o"), text: "Long text to summarize...", maxLength: 100, }); -// Embeddings -const embeddings = await embedding({ - adapter, - model: "text-embedding-3-small", - input: "Text to embed", +// --- Image generation +const image = await generateImage({ + adapter: openaiImage("dall-e-3"), + prompt: "A futuristic city skyline at sunset", + numberOfImages: 1, + size: "1024x1024", }); ``` diff --git a/docs/config.json b/docs/config.json index d75a3b7b..375489c0 100644 --- a/docs/config.json +++ b/docs/config.json @@ -42,14 +42,14 @@ "label": "Client Tools", "to": "guides/client-tools" }, - { - "label": "Agentic Cycle", - "to": "guides/agentic-cycle" - }, { "label": "Tool Approval Flow", "to": "guides/tool-approval" }, + { + "label": "Agentic Cycle", + "to": "guides/agentic-cycle" + }, { "label": "Streaming", "to": "guides/streaming" @@ -69,6 +69,34 @@ { "label": "Per-Model Type Safety", "to": "guides/per-model-type-safety" + }, + { + "label": "Runtime Adapter Switching", + "to": "guides/runtime-adapter-switching" + }, + { + "label": "Text-to-Speech", + "to": "guides/text-to-speech" + }, + { + "label": "Transcription", + "to": "guides/transcription" + }, + { + "label": "Image Generation", + "to": "guides/image-generation" + }, + { + "label": "Video Generation", + "to": "guides/video-generation" + }, + { + "label": "Tree-Shaking", + "to": "guides/tree-shaking" + }, + { + "label": "Migration Guide", + "to": "guides/migration" } ] }, @@ -163,12 +191,12 @@ "defaultCollapsed": true, "children": [ { - "label": "chat", - "to": "reference/functions/chat" + "label": "text", + "to": "reference/functions/text" }, { - "label": "chatOptions", - "to": "reference/functions/chatOptions" + "label": "textOptions", + "to": "reference/functions/textOptions" }, { "label": "combineStrategies", @@ -274,12 +302,12 @@ "to": "reference/interfaces/BaseStreamChunk" }, { - "label": "ChatCompletionChunk", - "to": "reference/interfaces/ChatCompletionChunk" + "label": "TextCompletionChunk", + "to": "reference/interfaces/TextCompletionChunk" }, { - "label": "ChatOptions", - "to": "reference/interfaces/ChatOptions" + "label": "TextOptions", + "to": "reference/interfaces/TextOptions" }, { "label": "ChunkRecording", @@ -457,12 +485,12 @@ "to": "reference/type-aliases/AnyClientTool" }, { - "label": "ChatStreamOptionsForModel", - "to": "reference/type-aliases/ChatStreamOptionsForModel" + "label": "TextStreamOptionsForModel", + "to": "reference/type-aliases/TextStreamOptionsForModel" }, { - "label": "ChatStreamOptionsUnion", - "to": "reference/type-aliases/ChatStreamOptionsUnion" + "label": "TextStreamOptionsUnion", + "to": "reference/type-aliases/TextStreamOptionsUnion" }, { "label": "ConstrainedContent", @@ -546,4 +574,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/docs/getting-started/devtools.md b/docs/getting-started/devtools.md index e490470d..e0ea553d 100644 --- a/docs/getting-started/devtools.md +++ b/docs/getting-started/devtools.md @@ -1,6 +1,7 @@ --- title: Devtools id: devtools +order: 3 --- TanStack Devtools is a unified devtools panel for inspecting and debugging TanStack libraries, including TanStack AI. It provides real-time insights into AI interactions, tool calls, and state changes, making it easier to develop and troubleshoot AI-powered applications. diff --git a/docs/getting-started/overview.md b/docs/getting-started/overview.md index 67395478..aed5ec58 100644 --- a/docs/getting-started/overview.md +++ b/docs/getting-started/overview.md @@ -1,6 +1,7 @@ --- title: Overview id: overview +order: 1 --- TanStack AI is a lightweight, type-safe SDK for building production-ready AI experiences. Its framework-agnostic core provides type-safe tool/function calling, streaming responses, and first-class React and Solid integrations, with adapters for multiple LLM providers — enabling predictable, composable, and testable AI features across any stack. @@ -24,10 +25,12 @@ The framework-agnostic core of TanStack AI provides the building blocks for crea - **Express** - Node.js server - **Remix Router v7** - Loaders and actions -TanStack AI lets you define a tool once and provide environment-specific implementations. Using `toolDefinition()` to declare the tool’s input/output types and the server behavior with `.server()` (or a client implementation with `.client()`). These isomorphic tools can be invoked from the AI runtime regardless of framework. +TanStack AI lets you define a tool once and provide environment-specific implementations. Using `toolDefinition()` to declare the tool's input/output types and the server behavior with `.server()` (or a client implementation with `.client()`). These isomorphic tools can be invoked from the AI runtime regardless of framework. ```typescript +import { chat } from '@tanstack/ai' import { toolDefinition } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' // Define a tool const getProductsDef = toolDefinition({ @@ -42,7 +45,11 @@ const getProducts = getProductsDef.server(async ({ query }) => { }) // Use in AI chat -chat({ tools: [getProducts] }) +chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Find products' }], + tools: [getProducts] +}) ``` ## Core Packages @@ -56,7 +63,7 @@ The core AI library that provides: - Isomorphic tool/function calling system - Agent loop strategies - Type-safe tool definitions with `toolDefinition()` -- Type-safe provider options based on adapter & model selection +- Type-safe Model Options based on adapter & model selection - Type-safe content modalities (text, image, audio, video, document) based on model capabilities ### `@tanstack/ai-client` @@ -94,4 +101,4 @@ With the help of adapters, TanStack AI can connect to various LLM providers. Ava - [Quick Start Guide](./quick-start) - Get up and running in minutes - [Tools Guide](../guides/tools) - Learn about the isomorphic tool system -- [API Reference](../api/ai) - Explore the full API \ No newline at end of file +- [API Reference](../api/ai) - Explore the full API diff --git a/docs/getting-started/quick-start.md b/docs/getting-started/quick-start.md index ff0dcd8b..663db1c6 100644 --- a/docs/getting-started/quick-start.md +++ b/docs/getting-started/quick-start.md @@ -1,6 +1,7 @@ --- title: Quick Start id: quick-start +order: 2 --- Get started with TanStack AI in minutes. This guide will walk you through creating a simple chat application using the React integration and OpenAI adapter. @@ -23,7 +24,7 @@ First, create an API route that handles chat requests. Here's a simplified examp // app/api/chat/route.ts (Next.js) // or src/routes/api/chat.ts (TanStack Start) import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; export async function POST(request: Request) { // Check for API key @@ -44,9 +45,8 @@ export async function POST(request: Request) { try { // Create a streaming chat response const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", conversationId }); @@ -176,10 +176,12 @@ You now have a working chat application. The `useChat` hook handles: ## Using Tools -Since TanStack AI is framework-agnostic, you can define and use tools in any environment. Here’s a quick example of defining a tool and using it in a chat: +Since TanStack AI is framework-agnostic, you can define and use tools in any environment. Here's a quick example of defining a tool and using it in a chat: ```typescript +import { chat } from '@tanstack/ai' import { toolDefinition } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' const getProductsDef = toolDefinition({ name: 'getProducts', @@ -190,7 +192,11 @@ const getProducts = getProductsDef.server(async ({ query }) => { return await db.products.search(query) }) -chat({ tools: [getProducts] }) +chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Find products' }], + tools: [getProducts] +}) ``` ## Next Steps diff --git a/docs/guides/agentic-cycle.md b/docs/guides/agentic-cycle.md index 2e68c1e7..20aa2e08 100644 --- a/docs/guides/agentic-cycle.md +++ b/docs/guides/agentic-cycle.md @@ -1,6 +1,7 @@ --- title: Agentic Cycle id: agentic-cycle +order: 6 --- The agentic cycle is the pattern where the LLM repeatedly calls tools, receives results, and continues reasoning until it can provide a final answer. This enables complex multi-step operations. @@ -122,9 +123,8 @@ export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [getWeather, getClothingAdvice], }); @@ -137,4 +137,4 @@ export async function POST(request: Request) { **Agentic Cycle**: 1. LLM calls `get_weather({city: "San Francisco"})` → Returns `{temp: 62, conditions: "cloudy"}` 2. LLM calls `get_clothing_advice({temperature: 62, conditions: "cloudy"})` → Returns `{recommendation: "Light jacket recommended"}` -3. LLM generates: "The weather in San Francisco is 62°F and cloudy. I recommend wearing a light jacket." \ No newline at end of file +3. LLM generates: "The weather in San Francisco is 62°F and cloudy. I recommend wearing a light jacket." diff --git a/docs/guides/client-tools.md b/docs/guides/client-tools.md index a7b19cb5..f3167d13 100644 --- a/docs/guides/client-tools.md +++ b/docs/guides/client-tools.md @@ -1,6 +1,7 @@ --- title: Client Tools id: client-tools +order: 4 --- Client tools execute in the browser, enabling UI updates, local storage access, and browser API interactions. Unlike server tools, client tools don't have an `execute` function in their server definition. @@ -94,16 +95,15 @@ To give the LLM access to client tools, pass the tool definitions (not implement ```typescript // api/chat/route.ts import { chat, toServerSentEventsStream } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { updateUIDef, saveToLocalStorageDef } from "@/tools/definitions"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [updateUIDef, saveToLocalStorageDef], // Pass definitions }); @@ -232,7 +232,7 @@ messages.forEach((message) => { ## Tool States Client tools go through a small set of observable lifecycle states you can surface in the UI to indicate progress: -- `awaiting-input` — the model intends to call the tool but arguments haven’t arrived yet. +- `awaiting-input` — the model intends to call the tool but arguments haven't arrived yet. - `input-streaming` — the model is streaming the tool arguments (partial input may be available). - `input-complete` — all arguments have been received and the tool is executing. - `completed` — the tool finished; part.output contains the result (or error details). @@ -297,17 +297,17 @@ const addToCartClient = addToCartDef.client((input) => { }); // Server: Pass definition for client execution -chat({ tools: [addToCartDef] }); // Client will execute +chat({ adapter: openaiText('gpt-4o'), messages: [], tools: [addToCartDef] }); // Client will execute // Or pass server implementation for server execution -chat({ tools: [addToCartServer] }); // Server will execute +chat({ adapter: openaiText('gpt-4o'), messages: [], tools: [addToCartServer] }); // Server will execute ``` ## Best Practices - **Keep client tools simple** - Since client tools run in the browser, avoid heavy computations or large dependencies that could bloat your bundle size. - **Handle errors gracefully** - Define clear error handling in your tool implementations and return meaningful error messages in your output schema. -- **Update UI reactively** - Use your framework’s state management (eg. React/Vue/Solid) to update the UI in response to tool executions. +- **Update UI reactively** - Use your framework's state management (eg. React/Vue/Solid) to update the UI in response to tool executions. - **Secure sensitive data** - Never store sensitive data (like API keys or personal info) in local storage or expose it via client tools. - **Provide feedback** - Use tool states to inform users about ongoing operations and results of client tool executions (loading spinners, success messages, error alerts). - **Type everything** - Leverage TypeScript and Zod schemas for full type safety from tool definitions to implementations to usage. diff --git a/docs/guides/connection-adapters.md b/docs/guides/connection-adapters.md index 7d55c9ee..7f312463 100644 --- a/docs/guides/connection-adapters.md +++ b/docs/guides/connection-adapters.md @@ -1,6 +1,7 @@ --- title: Connection Adapters id: connection-adapters +order: 9 --- diff --git a/docs/guides/image-generation.md b/docs/guides/image-generation.md new file mode 100644 index 00000000..1239a123 --- /dev/null +++ b/docs/guides/image-generation.md @@ -0,0 +1,234 @@ +--- +title: Image Generation +id: image-generation +order: 15 +--- + +# Image Generation + +TanStack AI provides support for image generation through dedicated image adapters. This guide covers how to use the image generation functionality with OpenAI and Gemini providers. + +## Overview + +Image generation is handled by image adapters that follow the same tree-shakeable architecture as other adapters in TanStack AI. The image adapters support: + +- **OpenAI**: DALL-E 2, DALL-E 3, GPT-Image-1, and GPT-Image-1-Mini models +- **Gemini**: Imagen 3 and Imagen 4 models + +## Basic Usage + +### OpenAI Image Generation + +```typescript +import { generateImage } from '@tanstack/ai' +import { openaiImage } from '@tanstack/ai-openai' + +// Create an image adapter (uses OPENAI_API_KEY from environment) +const adapter = openaiImage() + +// Generate an image +const result = await generateImage({ + adapter: openaiImage('dall-e-3'), + prompt: 'A beautiful sunset over mountains', +}) + +console.log(result.images[0].url) // URL to the generated image +``` + +### Gemini Image Generation + +```typescript +import { generateImage } from '@tanstack/ai' +import { geminiImage } from '@tanstack/ai-gemini' + +// Create an image adapter (uses GOOGLE_API_KEY from environment) +const adapter = geminiImage() + +// Generate an image +const result = await generateImage({ + adapter: geminiImage('imagen-3.0-generate-002'), + prompt: 'A futuristic cityscape at night', +}) + +console.log(result.images[0].b64Json) // Base64 encoded image +``` + +## Options + +### Common Options + +All image adapters support these common options: + +| Option | Type | Description | +|--------|------|-------------| +| `adapter` | `ImageAdapter` | Image adapter instance with model (required) | +| `prompt` | `string` | Text description of the image to generate (required) | +| `numberOfImages` | `number` | Number of images to generate | +| `size` | `string` | Size of the generated image in WIDTHxHEIGHT format | +| `modelOptions?` | `object` | Model-specific options (renamed from `providerOptions`) | + +### Size Options + +#### OpenAI Models + +| Model | Supported Sizes | +|-------|----------------| +| `gpt-image-1` | `1024x1024`, `1536x1024`, `1024x1536`, `auto` | +| `gpt-image-1-mini` | `1024x1024`, `1536x1024`, `1024x1536`, `auto` | +| `dall-e-3` | `1024x1024`, `1792x1024`, `1024x1792` | +| `dall-e-2` | `256x256`, `512x512`, `1024x1024` | + +#### Gemini Models + +Gemini uses aspect ratios internally, but TanStack AI accepts WIDTHxHEIGHT format and converts them: + +| Size | Aspect Ratio | +|------|-------------| +| `1024x1024` | 1:1 | +| `1920x1080` | 16:9 | +| `1080x1920` | 9:16 | + +Alternatively, you can specify the aspect ratio directly in Model Options: + +```typescript +const result = await generateImage({ + adapter: geminiImage('imagen-4.0-generate-001'), + prompt: 'A landscape photo', + modelOptions: { + aspectRatio: '16:9' + } +}) +``` + +## Model Options + +### OpenAI Model Options + +OpenAI models support model-specific Model Options: + +#### GPT-Image-1 / GPT-Image-1-Mini + +```typescript +const result = await generateImage({ + adapter: openaiImage('gpt-image-1'), + prompt: 'A cat wearing a hat', + modelOptions: { + quality: 'high', // 'high' | 'medium' | 'low' | 'auto' + background: 'transparent', // 'transparent' | 'opaque' | 'auto' + outputFormat: 'png', // 'png' | 'jpeg' | 'webp' + moderation: 'low', // 'low' | 'auto' + } +}) +``` + +#### DALL-E 3 + +```typescript +const result = await generateImage({ + adapter: openaiImage('dall-e-3'), + prompt: 'A futuristic car', + modelOptions: { + quality: 'hd', // 'hd' | 'standard' + style: 'vivid', // 'vivid' | 'natural' + } +}) +``` + +### Gemini Model Options + +```typescript +const result = await generateImage({ + adapter: geminiImage('imagen-4.0-generate-001'), + prompt: 'A beautiful garden', + modelOptions: { + aspectRatio: '16:9', + personGeneration: 'ALLOW_ADULT', // 'DONT_ALLOW' | 'ALLOW_ADULT' | 'ALLOW_ALL' + negativePrompt: 'blurry, low quality', + addWatermark: true, + outputMimeType: 'image/png', // 'image/png' | 'image/jpeg' | 'image/webp' + } +}) +``` + +## Response Format + +The image generation result includes: + +```typescript +interface ImageGenerationResult { + id: string // Unique identifier for this generation + model: string // The model used + images: GeneratedImage[] // Array of generated images + usage?: { + inputTokens: number + outputTokens: number + totalTokens: number + } +} + +interface GeneratedImage { + b64Json?: string // Base64 encoded image data + url?: string // URL to the image (OpenAI only) + revisedPrompt?: string // Revised prompt (OpenAI only) +} +``` + +## Model Availability + +### OpenAI Models + +| Model | Images per Request | +|-------|-------------------| +| `gpt-image-1` | 1-10 | +| `gpt-image-1-mini` | 1-10 | +| `dall-e-3` | 1 | +| `dall-e-2` | 1-10 | + +### Gemini Models + +| Model | Images per Request | +|-------|-------------------| +| `imagen-3.0-generate-002` | 1-4 | +| `imagen-4.0-generate-001` | 1-4 | +| `imagen-4.0-fast-generate-001` | 1-4 | +| `imagen-4.0-ultra-generate-001` | 1-4 | + +## Error Handling + +Image generation can fail for various reasons. The adapters validate inputs before making API calls: + +```typescript +try { + const result = await generateImage({ + adapter: openaiImage('dall-e-3'), + prompt: 'A cat', + size: '512x512', // Invalid size for DALL-E 3 + }) +} catch (error) { + console.error(error.message) + // "Size "512x512" is not supported by model "dall-e-3". + // Supported sizes: 1024x1024, 1792x1024, 1024x1792" +} +``` + +## Environment Variables + +The image adapters use the same environment variables as the text adapters: + +- **OpenAI**: `OPENAI_API_KEY` +- **Gemini**: `GOOGLE_API_KEY` or `GEMINI_API_KEY` + +## Explicit API Keys + +For production use or when you need explicit control: + +```typescript +import { createOpenaiImage } from '@tanstack/ai-openai' +import { createGeminiImage } from '@tanstack/ai-gemini' + +// OpenAI +const openaiAdapter = createOpenaiImage('your-openai-api-key') + +// Gemini +const geminiAdapter = createGeminiImage('your-google-api-key') +``` diff --git a/docs/guides/migration.md b/docs/guides/migration.md new file mode 100644 index 00000000..6e1e4ccd --- /dev/null +++ b/docs/guides/migration.md @@ -0,0 +1,437 @@ +--- +title: Migration Guide +id: migration +order: 18 +--- + +# Migration Guide + +This guide helps you migrate from the previous version of TanStack AI to the latest version. The major changes focus on improved tree-shaking, clearer API naming, and simplified configuration. + +## Overview of Changes + +The main breaking changes in this release are: + +1. **Adapter functions split** - Adapters are now split into activity-specific functions for optimal tree-shaking +2. **Common options flattened** - Options are now flattened in the config instead of nested +3. **`providerOptions` renamed** - Now called `modelOptions` for clarity +4. **`toResponseStream` renamed** - Now called `toServerSentEventsStream` for clarity +5. **Embeddings removed** - Embeddings support has been removed (most vector DB services have built-in support) + +## 1. Adapter Functions Split + +Adapters have been split into activity-specific functions to enable optimal tree-shaking. Instead of importing a monolithic adapter, you now import specific functions for each activity type. + +### Before + +```typescript +import { chat } from '@tanstack/ai' +import { openai } from '@tanstack/ai-openai' + +const stream = chat({ + adapter: openai(), + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Hello!' }], +}) +``` + +### After + +```typescript +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +const stream = chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Hello!' }], +}) +``` + +### Key Changes + +- **Model is passed to adapter factory** - The model name is now passed directly to the adapter function (e.g., `openaiText('gpt-4o')`) +- **No separate `model` parameter** - The model is stored on the adapter, so you don't need to pass it separately to `chat()` +- **Activity-specific imports** - Import only what you need (e.g., `openaiText`, `openaiSummarize`, `openaiImage`) + +### All Adapter Functions + +Each provider package now exports activity-specific functions: + +#### OpenAI + +```typescript +import { + openaiText, // Chat/text generation + openaiSummarize, // Summarization + openaiImage, // Image generation + openaiSpeech, // Text-to-speech + openaiTranscription, // Audio transcription + openaiVideo, // Video generation +} from '@tanstack/ai-openai' +``` + +#### Anthropic + +```typescript +import { + anthropicText, // Chat/text generation + anthropicSummarize, // Summarization +} from '@tanstack/ai-anthropic' +``` + +#### Gemini + +```typescript +import { + geminiText, // Chat/text generation + geminiSummarize, // Summarization + geminiImage, // Image generation + geminiSpeech, // Text-to-speech (experimental) +} from '@tanstack/ai-gemini' +``` + +#### Ollama + +```typescript +import { + ollamaText, // Chat/text generation + ollamaSummarize, // Summarization +} from '@tanstack/ai-ollama' +``` + +### Migration Example + +Here's a complete example of migrating adapter usage: + +#### Before + +```typescript +import { chat } from '@tanstack/ai' +import { openai } from '@tanstack/ai-openai' +import { anthropic } from '@tanstack/ai-anthropic' + +type Provider = 'openai' | 'anthropic' + +function getAdapter(provider: Provider) { + switch (provider) { + case 'openai': + return openai() + case 'anthropic': + return anthropic() + } +} + +const stream = chat({ + adapter: getAdapter(provider), + model: provider === 'openai' ? 'gpt-4o' : 'claude-sonnet-4-5', + messages, +}) +``` + +#### After + +```typescript +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { anthropicText } from '@tanstack/ai-anthropic' + +type Provider = 'openai' | 'anthropic' + +const adapters = { + openai: () => openaiText('gpt-4o'), + anthropic: () => anthropicText('claude-sonnet-4-5'), +} + +const stream = chat({ + adapter: adapters[provider](), + messages, +}) +``` + +## 2. Common Options Flattened + +Common options that were previously nested in an `options` object are now flattened directly in the config. + +### Before + +```typescript +const stream = chat({ + adapter: openai(), + model: 'gpt-4o', + messages, + options: { + temperature: 0.7, + maxTokens: 1000, + topP: 0.9, + }, +}) +``` + +### After + +```typescript +const stream = chat({ + adapter: openaiText('gpt-4o'), + messages, + temperature: 0.7, + maxTokens: 1000, + topP: 0.9, +}) +``` + +### Available Options + +These options are now available at the top level: + +- `temperature` - Controls randomness (0.0 to 2.0) +- `topP` - Nucleus sampling parameter +- `maxTokens` - Maximum tokens to generate +- `metadata` - Additional metadata to attach + +## 3. `providerOptions` → `modelOptions` + +The `providerOptions` parameter has been renamed to `modelOptions` for clarity. This parameter contains model-specific options that vary by provider and model. + +### Before + +```typescript +const stream = chat({ + adapter: openai(), + model: 'gpt-4o', + messages, + providerOptions: { + // OpenAI-specific options + responseFormat: { type: 'json_object' }, + logitBias: { '123': 1.0 }, + }, +}) +``` + +### After + +```typescript +const stream = chat({ + adapter: openaiText('gpt-4o'), + messages, + modelOptions: { + // OpenAI-specific options + responseFormat: { type: 'json_object' }, + logitBias: { '123': 1.0 }, + }, +}) +``` + +### Type Safety + +`modelOptions` is fully typed based on the adapter and model you're using: + +```typescript +import { openaiText } from '@tanstack/ai-openai' + +const adapter = openaiText('gpt-4o') + +// TypeScript knows the exact modelOptions type for gpt-4o +const stream = chat({ + adapter, + messages, + modelOptions: { + // Autocomplete and type checking for gpt-4o options + responseFormat: { type: 'json_object' }, + }, +}) +``` + +## 4. `toResponseStream` → `toServerSentEventsStream` + +The `toResponseStream` function has been renamed to `toServerSentEventsStream` to better reflect its purpose. Additionally, the API has changed slightly. + +### Before + +```typescript +import { chat, toResponseStream } from '@tanstack/ai' +import { openai } from '@tanstack/ai-openai' + +export async function POST(request: Request) { + const { messages } = await request.json() + const abortController = new AbortController() + + const stream = chat({ + adapter: openai(), + model: 'gpt-4o', + messages, + abortController, + }) + + return toResponseStream(stream, { abortController }) +} +``` + +### After + +```typescript +import { chat, toServerSentEventsStream } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +export async function POST(request: Request) { + const { messages } = await request.json() + const abortController = new AbortController() + + const stream = chat({ + adapter: openaiText('gpt-4o'), + messages, + abortController, + }) + + const readableStream = toServerSentEventsStream(stream, abortController) + return new Response(readableStream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }, + }) +} +``` + +### Key Changes + +- **Function renamed** - `toResponseStream` → `toServerSentEventsStream` +- **Returns ReadableStream** - Now returns a `ReadableStream` instead of a `Response` +- **Manual Response creation** - You create the `Response` object yourself with appropriate headers +- **AbortController parameter** - Passed as a separate parameter instead of in options + +### Alternative: HTTP Stream Format + +If you need HTTP stream format (newline-delimited JSON) instead of SSE, use `toHttpStream`: + +```typescript +import { toHttpStream } from '@tanstack/ai' + +const readableStream = toHttpStream(stream, abortController) +return new Response(readableStream, { + headers: { + 'Content-Type': 'application/x-ndjson', + }, +}) +``` + +## 5. Embeddings Removed + +Embeddings support has been removed from TanStack AI. Most vector database services (like Pinecone, Weaviate, Qdrant, etc.) have built-in support for embeddings, and most applications pick an embedding model and stick with it. + +### Before + +```typescript +import { embedding } from '@tanstack/ai' +import { openaiEmbed } from '@tanstack/ai-openai' + +const result = await embedding({ + adapter: openaiEmbed(), + model: 'text-embedding-3-small', + input: 'Hello, world!', +}) +``` + +### After + +Use your vector database service's built-in embedding support, or call the provider's API directly: + +```typescript +// Example with OpenAI SDK directly +import OpenAI from 'openai' + +const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY }) + +const result = await openai.embeddings.create({ + model: 'text-embedding-3-small', + input: 'Hello, world!', +}) +``` + +### Why This Change? + +- **Vector DB services handle it** - Most vector databases have native embedding support +- **Simpler API** - Reduces API surface area and complexity +- **Direct provider access** - You can use the provider SDK directly for embeddings +- **Focused scope** - TanStack AI focuses on chat, tools, and agentic workflows + +## Complete Migration Example + +Here's a complete example showing all the changes together: + +### Before + +```typescript +import { chat, toResponseStream } from '@tanstack/ai' +import { openai } from '@tanstack/ai-openai' + +export async function POST(request: Request) { + const { messages } = await request.json() + const abortController = new AbortController() + + const stream = chat({ + adapter: openai(), + model: 'gpt-4o', + messages, + options: { + temperature: 0.7, + maxTokens: 1000, + }, + providerOptions: { + responseFormat: { type: 'json_object' }, + }, + abortController, + }) + + return toResponseStream(stream, { abortController }) +} +``` + +### After + +```typescript +import { chat, toServerSentEventsStream } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +export async function POST(request: Request) { + const { messages } = await request.json() + const abortController = new AbortController() + + const stream = chat({ + adapter: openaiText('gpt-4o'), + messages, + temperature: 0.7, + maxTokens: 1000, + modelOptions: { + responseFormat: { type: 'json_object' }, + }, + abortController, + }) + + const readableStream = toServerSentEventsStream(stream, abortController) + return new Response(readableStream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }, + }) +} +``` + +## Benefits of These Changes + +1. **Better Tree-Shaking** - Import only what you need, resulting in smaller bundle sizes +2. **Clearer API** - Function names clearly indicate their purpose +3. **Type Safety** - Model-specific options are fully typed +4. **Simpler Configuration** - Flattened options are easier to work with +5. **Focused Scope** - Removed features that are better handled elsewhere + +## Need Help? + +If you encounter issues during migration: + +1. Check the [Tree-Shaking Guide](./tree-shaking) for details on the new adapter structure +2. Review the [API Reference](../api/ai) for complete function signatures +3. Look at the [examples](../getting-started/quick-start) for working code samples + diff --git a/docs/guides/multimodal-content.md b/docs/guides/multimodal-content.md index 88c8d71a..516a5f63 100644 --- a/docs/guides/multimodal-content.md +++ b/docs/guides/multimodal-content.md @@ -1,6 +1,7 @@ --- title: Multimodal Content id: multimodal-content +order: 8 --- TanStack AI supports multimodal content in messages, allowing you to send images, audio, video, and documents alongside text to AI models that support these modalities. @@ -54,13 +55,10 @@ Messages can have `content` as either a string or an array of `ContentPart`: ```typescript import { chat } from '@tanstack/ai' -import { OpenAI } from '@tanstack/ai-openai' - -const openai = new OpenAI({ apiKey: 'your-key' }) +import { openaiText } from '@tanstack/ai-openai' const response = await chat({ - adapter: openai, - model: 'gpt-4o', + adapter: openaiText('gpt-4o'), messages: [ { role: 'user', @@ -86,9 +84,9 @@ const response = await chat({ OpenAI supports images and audio in their vision and audio models: ```typescript -import { OpenAI } from '@tanstack/ai-openai' +import { openaiText } from '@tanstack/ai-openai' -const openai = new OpenAI({ apiKey: 'your-key' }) +const adapter = openaiText() // Image with detail level metadata const message = { @@ -113,9 +111,9 @@ const message = { Anthropic's Claude models support images and PDF documents: ```typescript -import { Anthropic } from '@tanstack/ai-anthropic' +import { anthropicText } from '@tanstack/ai-anthropic' -const anthropic = new Anthropic({ apiKey: 'your-key' }) +const adapter = anthropicText() // Image with media type const imageMessage = { @@ -152,9 +150,9 @@ const docMessage = { Google's Gemini models support a wide range of modalities: ```typescript -import { GeminiAdapter } from '@tanstack/ai-gemini' +import { geminiText } from '@tanstack/ai-gemini' -const gemini = new GeminiAdapter({ apiKey: 'your-key' }) +const adapter = geminiText() // Image with mimeType const message = { @@ -179,9 +177,9 @@ const message = { Ollama supports images in compatible models: ```typescript -import { OllamaAdapter } from '@tanstack/ai-ollama' +import { ollamaText } from '@tanstack/ai-ollama' -const ollama = new OllamaAdapter({ host: 'http://localhost:11434' }) +const adapter = ollamaText('http://localhost:11434') // Image as base64 const message = { @@ -278,21 +276,20 @@ import type { GeminiMediaMetadata } from '@tanstack/ai-gemini' When receiving messages from external sources (like `request.json()`), the data is typed as `any`, which can bypass TypeScript's type checking. Use `assertMessages` to restore type safety: ```typescript -import { assertMessages, chat } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' +import { chat, assertMessages } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' // In an API route handler const { messages: incomingMessages } = await request.json() -const adapter = openai() +const adapter = openaiText('gpt-4o') // Assert incoming messages are compatible with gpt-4o (text + image only) -const typedMessages = assertMessages({ adapter, model: 'gpt-4o' }, incomingMessages) +const typedMessages = assertMessages({ adapter }, incomingMessages) // Now TypeScript will properly check any additional messages you add const stream = chat({ adapter, - model: 'gpt-4o', messages: [ ...typedMessages, // This will error if you try to add unsupported content types diff --git a/docs/guides/observability.md b/docs/guides/observability.md index 30c36d20..9b94f30d 100644 --- a/docs/guides/observability.md +++ b/docs/guides/observability.md @@ -1,3 +1,9 @@ +--- +title: Observability +id: observability +order: 10 +--- + # Event client The `@tanstack/ai` package offers you an event client for observability and debugging purposes. diff --git a/docs/guides/per-model-type-safety.md b/docs/guides/per-model-type-safety.md index ffa91256..d44a2cd8 100644 --- a/docs/guides/per-model-type-safety.md +++ b/docs/guides/per-model-type-safety.md @@ -1,9 +1,10 @@ --- title: Per-Model Type Safety id: per-model-type-safety +order: 11 --- -The AI SDK provides **model-specific type safety** for `providerOptions`. Each model's capabilities determine which provider options are allowed, and TypeScript will enforce this at compile time. +The AI SDK provides **model-specific type safety** for `modelOptions`. Each model's capabilities determine which model options are allowed, and TypeScript will enforce this at compile time. ## How It Works @@ -13,16 +14,13 @@ The AI SDK provides **model-specific type safety** for `providerOptions`. Each m ```typescript import { chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; - -const adapter = openai(); +import { openaiText } from "@tanstack/ai-openai"; // ✅ gpt-5 supports structured outputs - `text` is allowed const validCall = chat({ - adapter, - model: "gpt-5", + adapter: openaiText("gpt-5"), messages: [], - providerOptions: { + modelOptions: { // OK - text is included for gpt-5 text: { type: "json_schema", @@ -39,10 +37,9 @@ const validCall = chat({ ```typescript // ❌ gpt-4-turbo does NOT support structured outputs - `text` is rejected const invalidCall = chat({ - adapter: openai(), - model: "gpt-4-turbo", + adapter: openaiText("gpt-4-turbo"), messages: [], - providerOptions: { + modelOptions: { text: {}, // ❌ TypeScript error: 'text' does not exist in type }, }); @@ -56,7 +53,7 @@ error TS2353: Object literal may only specify known properties, and 'text' does ## Benefits -- **Compile-time safety**: Catch incorrect provider options before deployment +- **Compile-time safety**: Catch incorrect model options before deployment - **Better IDE experience**: Autocomplete shows only valid options for each model - **Self-documenting**: Model capabilities are explicit in the type system - **Zero runtime overhead**: All type checking happens at compile time diff --git a/docs/guides/runtime-adapter-switching.md b/docs/guides/runtime-adapter-switching.md new file mode 100644 index 00000000..7efdbec6 --- /dev/null +++ b/docs/guides/runtime-adapter-switching.md @@ -0,0 +1,195 @@ +--- +title: Runtime Adapter Switching +id: runtime-adapter-switching +order: 12 +--- + +# Runtime Adapter Switching with Type Safety + +Learn how to build interfaces where users can switch between LLM providers at runtime while maintaining full TypeScript type safety. + +## The Simple Approach + +With TanStack AI, the model is passed directly to the adapter factory function. This gives you full type safety and autocomplete at the point of definition: + +```typescript +import { chat, toStreamResponse } from '@tanstack/ai' +import { anthropicText } from '@tanstack/ai-anthropic' +import { openaiText } from '@tanstack/ai-openai' + +type Provider = 'openai' | 'anthropic' + +// Define adapters with their models - autocomplete works here! +const adapters = { + anthropic: () => anthropicText('claude-sonnet-4-5'), // ✅ Autocomplete! + openai: () => openaiText('gpt-4o'), // ✅ Autocomplete! +} + +// In your request handler: +const provider: Provider = request.body.provider || 'openai' + +const stream = chat({ + adapter: adapters[provider](), + messages, +}) +``` + +## Why This Works + +Each adapter factory function accepts a model name as its first argument and returns a fully typed adapter: + +```typescript +// These are equivalent: +const adapter1 = openaiText('gpt-4o') +const adapter2 = new OpenAITextAdapter({ apiKey: process.env.OPENAI_API_KEY }, 'gpt-4o') + +// The model is stored on the adapter +console.log(adapter1.selectedModel) // 'gpt-4o' +``` + +When you pass an adapter to `chat()`, it uses the model from `adapter.selectedModel`. This means: + +- **Full autocomplete** - When typing the model name, TypeScript knows valid options +- **Type validation** - Invalid model names cause compile errors +- **Clean code** - No separate `model` parameter needed + +## Full Example + +Here's a complete example showing a multi-provider chat API: + +```typescript +import { createFileRoute } from '@tanstack/react-router' +import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { anthropicText } from '@tanstack/ai-anthropic' +import { geminiText } from '@tanstack/ai-gemini' +import { ollamaText } from '@tanstack/ai-ollama' + +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' + +// Define adapters with their models +const adapters = { + anthropic: () => anthropicText('claude-sonnet-4-5'), + gemini: () => geminiText('gemini-2.0-flash-exp'), + ollama: () => ollamaText('mistral:7b'), + openai: () => openaiText('gpt-4o'), +} + +export const Route = createFileRoute('/api/chat')({ + server: { + handlers: { + POST: async ({ request }) => { + const abortController = new AbortController() + const body = await request.json() + const { messages, data } = body + + const provider: Provider = data?.provider || 'openai' + + const stream = chat({ + adapter: adapters[provider](), + tools: [...], + systemPrompts: [...], + messages, + abortController, + }) + + return toStreamResponse(stream, { abortController }) + }, + }, + }, +}) +``` + +## Using with Image Adapters + +The same pattern works for image generation: + +```typescript +import { generateImage } from '@tanstack/ai' +import { openaiImage } from '@tanstack/ai-openai' +import { geminiImage } from '@tanstack/ai-gemini' + +const imageAdapters = { + openai: () => openaiImage('gpt-image-1'), + gemini: () => geminiImage('gemini-2.0-flash-preview-image-generation'), +} + +// Usage +const result = await generateImage({ + adapter: imageAdapters[provider](), + prompt: 'A beautiful sunset over mountains', + size: '1024x1024', +}) +``` + +## Using with Summarize Adapters + +And for summarization: + +```typescript +import { summarize } from '@tanstack/ai' +import { openaiSummarize } from '@tanstack/ai-openai' +import { anthropicSummarize } from '@tanstack/ai-anthropic' + +const summarizeAdapters = { + openai: () => openaiSummarize('gpt-4o-mini'), + anthropic: () => anthropicSummarize('claude-sonnet-4-5'), +} + +// Usage +const result = await summarize({ + adapter: summarizeAdapters[provider](), + text: longDocument, + maxLength: 100, + style: 'concise', +}) +``` + +## Migration from Switch Statements + +If you have existing code using switch statements, here's how to migrate: + +### Before + +```typescript +let adapter +let model + +switch (provider) { + case 'anthropic': + adapter = anthropicText() + model = 'claude-sonnet-4-5' + break + case 'openai': + default: + adapter = openaiText() + model = 'gpt-4o' + break +} + +const stream = chat({ + adapter: adapter as any, + model: model as any, + messages, +}) +``` + +### After + +```typescript +const adapters = { + anthropic: () => anthropicText('claude-sonnet-4-5'), + openai: () => openaiText('gpt-4o'), +} + +const stream = chat({ + adapter: adapters[provider](), + messages, +}) +``` + +The key changes: + +1. Replace the switch statement with an object of factory functions +2. Each factory function creates an adapter with the model included +3. No more `as any` casts - full type safety! diff --git a/docs/guides/server-tools.md b/docs/guides/server-tools.md index 0a8e43ea..aabbd833 100644 --- a/docs/guides/server-tools.md +++ b/docs/guides/server-tools.md @@ -1,6 +1,7 @@ --- title: Server Tools id: server-tools +order: 3 --- Server tools execute automatically when called by the LLM. They have full access to server resources like databases, APIs, and environment variables. @@ -137,20 +138,19 @@ const searchProducts = searchProductsDef.server(async ({ query, limit = 10 }) => ## Using Server Tools -Pass tools to the `chat` method: +Pass tools to the `chat` function: ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { getUserData, searchProducts } from "./tools"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [getUserData, searchProducts], }); @@ -203,13 +203,12 @@ export const searchProducts = searchProductsDef.server(async ({ query }) => { // api/chat/route.ts import { chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { getUserData, searchProducts } from "@/tools/server"; const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [getUserData, searchProducts], }); ``` diff --git a/docs/guides/streaming.md b/docs/guides/streaming.md index da2a806b..47581e7a 100644 --- a/docs/guides/streaming.md +++ b/docs/guides/streaming.md @@ -1,6 +1,7 @@ --- -title: Streaming Responses +title: Streaming id: streaming-responses +order: 7 --- TanStack AI supports streaming responses for real-time chat experiences. Streaming allows you to display responses as they're generated, rather than waiting for the complete response. @@ -11,12 +12,11 @@ When you use `chat()`, it returns an async iterable stream of chunks: ```typescript import { chat } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", }); // Stream contains chunks as they arrive @@ -31,15 +31,14 @@ Convert the stream to an HTTP response using `toStreamResponse`: ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", }); // Convert to HTTP response with proper headers diff --git a/docs/guides/text-to-speech.md b/docs/guides/text-to-speech.md new file mode 100644 index 00000000..b4575c70 --- /dev/null +++ b/docs/guides/text-to-speech.md @@ -0,0 +1,249 @@ +--- +title: Text-to-Speech +id: text-to-speech +order: 13 +--- + +# Text-to-Speech (TTS) + +TanStack AI provides support for text-to-speech generation through dedicated TTS adapters. This guide covers how to convert text into spoken audio using OpenAI and Gemini providers. + +## Overview + +Text-to-speech (TTS) is handled by TTS adapters that follow the same tree-shakeable architecture as other adapters in TanStack AI. The TTS adapters support: + +- **OpenAI**: TTS-1, TTS-1-HD, and audio-capable GPT-4o models +- **Gemini**: Gemini 2.5 Flash TTS (experimental) + +## Basic Usage + +### OpenAI Text-to-Speech + +```typescript +import { generateSpeech } from '@tanstack/ai' +import { openaiTTS } from '@tanstack/ai-openai' + +// Create a TTS adapter (uses OPENAI_API_KEY from environment) +const adapter = openaiSpeech() + +// Generate speech from text +const result = await generateSpeech({ + adapter: openaiTTS('tts-1'), + text: 'Hello, welcome to TanStack AI!', + voice: 'alloy', +}) + +// result.audio contains base64-encoded audio data +console.log(result.format) // 'mp3' +console.log(result.contentType) // 'audio/mpeg' +``` + +### Gemini Text-to-Speech (Experimental) + +```typescript +import { generateSpeech } from '@tanstack/ai' +import { geminiSpeech } from '@tanstack/ai-gemini' + +// Create a TTS adapter (uses GOOGLE_API_KEY from environment) +const adapter = geminiSpeech() + +// Generate speech from text +const result = await generateSpeech({ + adapter: geminiTTS('gemini-2.5-flash-preview-tts'), + text: 'Hello from Gemini TTS!', +}) + +console.log(result.audio) // Base64 encoded audio +``` + +## Options + +### Common Options + +All TTS adapters support these common options: + +| Option | Type | Description | +|--------|------|-------------| +| `text` | `string` | The text to convert to speech (required) | +| `voice` | `string` | The voice to use for generation | +| `format` | `string` | Output audio format (e.g., "mp3", "wav") | + +### OpenAI Voice Options + +OpenAI provides several distinct voices: + +| Voice | Description | +|-------|-------------| +| `alloy` | Neutral, balanced voice | +| `echo` | Warm, conversational voice | +| `fable` | Expressive, storytelling voice | +| `onyx` | Deep, authoritative voice | +| `nova` | Friendly, upbeat voice | +| `shimmer` | Clear, gentle voice | +| `ash` | Calm, measured voice | +| `ballad` | Melodic, flowing voice | +| `coral` | Bright, energetic voice | +| `sage` | Wise, thoughtful voice | +| `verse` | Poetic, rhythmic voice | + +### OpenAI Format Options + +| Format | Description | +|--------|-------------| +| `mp3` | MP3 audio (default) | +| `opus` | Opus audio (good for streaming) | +| `aac` | AAC audio | +| `flac` | FLAC audio (lossless) | +| `wav` | WAV audio (uncompressed) | +| `pcm` | Raw PCM audio | + +## Model Options + +### OpenAI Model Options + +```typescript +const result = await generateSpeech({ + adapter: openaiTTS('tts-1-hd'), + text: 'High quality speech synthesis', + voice: 'nova', + format: 'mp3', + modelOptions: { + speed: 1.0, // 0.25 to 4.0 + }, +}) +``` + +| Option | Type | Description | +|--------|------|-------------| +| `speed` | `number` | Playback speed (0.25 to 4.0, default 1.0) | +| `instructions` | `string` | Voice style instructions (GPT-4o audio models only) | + +> **Note:** The `instructions` and `stream_format` options are only available with `gpt-4o-audio-preview` and `gpt-4o-mini-audio-preview` models, not with `tts-1` or `tts-1-hd`. + +## Response Format + +The TTS result includes: + +```typescript +interface TTSResult { + id: string // Unique identifier for this generation + model: string // The model used + audio: string // Base64-encoded audio data + format: string // Audio format (e.g., "mp3") + contentType: string // MIME type (e.g., "audio/mpeg") + duration?: number // Duration in seconds (if available) +} +``` + +## Playing Audio in the Browser + +```typescript +// Convert base64 to audio and play +function playAudio(result: TTSResult) { + const audioData = atob(result.audio) + const bytes = new Uint8Array(audioData.length) + for (let i = 0; i < audioData.length; i++) { + bytes[i] = audioData.charCodeAt(i) + } + + const blob = new Blob([bytes], { type: result.contentType }) + const url = URL.createObjectURL(blob) + + const audio = new Audio(url) + audio.play() + + // Clean up when done + audio.onended = () => URL.revokeObjectURL(url) +} +``` + +## Saving Audio to File (Node.js) + +```typescript +import { writeFile } from 'fs/promises' + +async function saveAudio(result: TTSResult, filename: string) { + const audioBuffer = Buffer.from(result.audio, 'base64') + await writeFile(filename, audioBuffer) + console.log(`Saved to ${filename}`) +} + +// Usage +const result = await generateSpeech({ + adapter: openaiTTS('tts-1'), + text: 'Hello world!', +}) + +await saveAudio(result, 'output.mp3') +``` + +## Model Availability + +### OpenAI Models + +| Model | Quality | Speed | Use Case | +|-------|---------|-------|----------| +| `tts-1` | Standard | Fast | Real-time applications | +| `tts-1-hd` | High | Slower | Production audio | +| `gpt-4o-audio-preview` | Highest | Variable | Advanced voice control | +| `gpt-4o-mini-audio-preview` | High | Fast | Balanced quality/speed | + +### Gemini Models + +| Model | Status | Notes | +|-------|--------|-------| +| `gemini-2.5-flash-preview-tts` | Experimental | May require Live API for full features | + +## Error Handling + +```typescript +try { + const result = await generateSpeech({ + adapter: openaiTTS('tts-1'), + text: 'Hello!', + }) +} catch (error) { + if (error.message.includes('exceeds maximum length')) { + console.error('Text is too long (max 4096 characters)') + } else if (error.message.includes('Speed must be between')) { + console.error('Invalid speed value') + } else { + console.error('TTS error:', error.message) + } +} +``` + +## Environment Variables + +The TTS adapters use the same environment variables as other adapters: + +- **OpenAI**: `OPENAI_API_KEY` +- **Gemini**: `GOOGLE_API_KEY` or `GEMINI_API_KEY` + +## Explicit API Keys + +For production use or when you need explicit control: + +```typescript +import { createOpenaiTTS } from '@tanstack/ai-openai' +import { createGeminiTTS } from '@tanstack/ai-gemini' + +// OpenAI +const openaiAdapter = createOpenaiTTS('your-openai-api-key') + +// Gemini +const geminiAdapter = createGeminiTTS('your-google-api-key') +``` + +## Best Practices + +1. **Text Length**: OpenAI TTS supports up to 4096 characters per request. For longer content, split into chunks. + +2. **Voice Selection**: Choose voices appropriate for your content—use `onyx` for authoritative content, `nova` for friendly interactions. + +3. **Format Selection**: Use `mp3` for general use, `opus` for streaming, `wav` for further processing. + +4. **Caching**: Cache generated audio to avoid regenerating the same content. + +5. **Error Handling**: Always handle errors gracefully, especially for user-facing applications. + diff --git a/docs/guides/tool-approval.md b/docs/guides/tool-approval.md index 0479a112..bcdff34c 100644 --- a/docs/guides/tool-approval.md +++ b/docs/guides/tool-approval.md @@ -1,6 +1,7 @@ --- title: Tool Approval Flow id: tool-approval-flow +order: 5 --- The tool approval flow allows you to require user approval before executing sensitive tools, giving users control over actions like sending emails, making purchases, or deleting data. Tools go through these states during approval: @@ -57,16 +58,15 @@ On the server, tools with `needsApproval: true` will pause execution and wait fo ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { sendEmail } from "./tools"; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [sendEmail], }); diff --git a/docs/guides/tool-architecture.md b/docs/guides/tool-architecture.md index f691f510..f018d776 100644 --- a/docs/guides/tool-architecture.md +++ b/docs/guides/tool-architecture.md @@ -1,6 +1,7 @@ --- title: Tool Architecture id: tool-architecture +order: 2 --- The TanStack AI tool system provides a powerful, flexible architecture for enabling AI agents to interact with external systems: @@ -69,7 +70,7 @@ sequenceDiagram ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { getWeather, sendEmail } from "./tools"; export async function POST(request: Request) { @@ -77,9 +78,8 @@ export async function POST(request: Request) { // Create streaming chat with tools const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [getWeather, sendEmail], // Tool definitions passed here }); diff --git a/docs/guides/tools.md b/docs/guides/tools.md index 9f361696..cd09d7d5 100644 --- a/docs/guides/tools.md +++ b/docs/guides/tools.md @@ -1,6 +1,7 @@ --- title: Tools id: tools +order: 1 --- Tools (also called "function calling") allow AI models to interact with external systems, APIs, or perform computations. TanStack AI provides an isomorphic tool system that enables type-safe, framework-agnostic tool definitions that work on both server and client. @@ -174,7 +175,7 @@ const getWeatherServer = getWeatherDef.server(async (args) => { ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; +import { openaiText } from "@tanstack/ai-openai"; import { getWeatherDef } from "./tools"; export async function POST(request: Request) { @@ -187,9 +188,8 @@ export async function POST(request: Request) { }); const stream = chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, - model: "gpt-4o", tools: [getWeather], // Pass server tools }); @@ -223,16 +223,16 @@ const saveToStorage = saveToStorageDef.client((input) => { // Create typed tools array (no 'as const' needed!) const tools = clientTools(updateUI, saveToStorage); -const chatOptions = createChatClientOptions({ +const textOptions = createChatClientOptions({ connection: fetchServerSentEvents("/api/chat"), tools, }); // Infer message types for full type safety -type ChatMessages = InferChatMessages; +type ChatMessages = InferChatMessages; function ChatComponent() { - const { messages, sendMessage } = useChat(chatOptions); + const { messages, sendMessage } = useChat(textOptions); // messages is now fully typed with tool names and outputs! return ; @@ -280,7 +280,7 @@ On the server, pass the definition (for client execution) or server implementati ```typescript chat({ - adapter: openai(), + adapter: openaiText("gpt-4o"), messages, tools: [addToCartDef], // Client will execute, or tools: [addToCartServer], // Server will execute diff --git a/docs/guides/transcription.md b/docs/guides/transcription.md new file mode 100644 index 00000000..9df61062 --- /dev/null +++ b/docs/guides/transcription.md @@ -0,0 +1,334 @@ +--- +title: Transcription +id: transcription +order: 14 +--- + +# Audio Transcription + +TanStack AI provides support for audio transcription (speech-to-text) through dedicated transcription adapters. This guide covers how to convert spoken audio into text using OpenAI's Whisper and GPT-4o transcription models. + +## Overview + +Audio transcription is handled by transcription adapters that follow the same tree-shakeable architecture as other adapters in TanStack AI. + +Currently supported: +- **OpenAI**: Whisper-1, GPT-4o-transcribe, GPT-4o-mini-transcribe + +## Basic Usage + +### OpenAI Transcription + +```typescript +import { generateTranscription } from '@tanstack/ai' +import { openaiTranscription } from '@tanstack/ai-openai' + +// Create a transcription adapter (uses OPENAI_API_KEY from environment) +const adapter = openaiTranscription() + +// Transcribe audio from a file +const audioFile = new File([audioBuffer], 'audio.mp3', { type: 'audio/mpeg' }) + +const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, + language: 'en', +}) + +console.log(result.text) // The transcribed text +``` + +### Using Base64 Audio + +```typescript +import { readFile } from 'fs/promises' + +// Read audio file as base64 +const audioBuffer = await readFile('recording.mp3') +const base64Audio = audioBuffer.toString('base64') + +const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: base64Audio, +}) + +console.log(result.text) +``` + +### Using Data URLs + +```typescript +const dataUrl = `data:audio/mpeg;base64,${base64AudioData}` + +const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: dataUrl, +}) +``` + +## Options + +### Common Options + +| Option | Type | Description | +|--------|------|-------------| +| `audio` | `File \| string` | Audio data (File object or base64 string) - required | +| `language` | `string` | Language code (e.g., "en", "es", "fr") | + +### Supported Languages + +Whisper supports many languages. Common codes include: + +| Code | Language | +|------|----------| +| `en` | English | +| `es` | Spanish | +| `fr` | French | +| `de` | German | +| `it` | Italian | +| `pt` | Portuguese | +| `ja` | Japanese | +| `ko` | Korean | +| `zh` | Chinese | +| `ru` | Russian | + +> **Tip:** Providing the correct language code improves accuracy and reduces latency. + +## Model Options + +### OpenAI Model Options + +```typescript +const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, + modelOptions: { + response_format: 'verbose_json', // Get detailed output with timestamps + temperature: 0, // Lower = more deterministic + prompt: 'Technical terms: API, SDK, CLI', // Guide transcription + }, +}) +``` + +| Option | Type | Description | +|--------|------|-------------| +| `response_format` | `string` | Output format: "json", "text", "srt", "verbose_json", "vtt" | +| `temperature` | `number` | Sampling temperature (0 to 1) | +| `prompt` | `string` | Optional text to guide transcription style | +| `include` | `string[]` | Timestamp granularity: ["word"], ["segment"], or both | + +### Response Formats + +| Format | Description | +|--------|-------------| +| `json` | Simple JSON with text | +| `text` | Plain text only | +| `srt` | SubRip subtitle format | +| `verbose_json` | Detailed JSON with timestamps and segments | +| `vtt` | WebVTT subtitle format | + +## Response Format + +The transcription result includes: + +```typescript +interface TranscriptionResult { + id: string // Unique identifier + model: string // Model used + text: string // Full transcribed text + language?: string // Detected/specified language + duration?: number // Audio duration in seconds + segments?: Array<{ // Timestamped segments + start: number // Start time in seconds + end: number // End time in seconds + text: string // Segment text + words?: Array<{ // Word-level timestamps + word: string + start: number + end: number + confidence?: number + }> + }> +} +``` + +## Complete Example + +```typescript +import { generateTranscription } from '@tanstack/ai' +import { openaiTranscription } from '@tanstack/ai-openai' +import { readFile } from 'fs/promises' + +async function transcribeAudio(filepath: string) { + const adapter = openaiTranscription() + + // Read the audio file + const audioBuffer = await readFile(filepath) + const audioFile = new File( + [audioBuffer], + filepath.split('/').pop()!, + { type: 'audio/mpeg' } + ) + + // Transcribe with detailed output + const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, + language: 'en', + modelOptions: { + response_format: 'verbose_json', + include: ['segment', 'word'], + }, + }) + + console.log('Full text:', result.text) + console.log('Duration:', result.duration, 'seconds') + + // Print segments with timestamps + if (result.segments) { + for (const segment of result.segments) { + console.log(`[${segment.start.toFixed(2)}s - ${segment.end.toFixed(2)}s]: ${segment.text}`) + } + } + + return result +} + +// Usage +await transcribeAudio('./meeting-recording.mp3') +``` + +## Model Availability + +### OpenAI Models + +| Model | Description | Use Case | +|-------|-------------|----------| +| `whisper-1` | Whisper large-v2 | General transcription | +| `gpt-4o-transcribe` | GPT-4o-based transcription | Higher accuracy | +| `gpt-4o-transcribe-diarize` | With speaker diarization | Multi-speaker audio | +| `gpt-4o-mini-transcribe` | Faster, lighter model | Cost-effective | + +### Supported Audio Formats + +OpenAI supports these audio formats: + +- `mp3` - MPEG Audio Layer 3 +- `mp4` - MPEG-4 Audio +- `mpeg` - MPEG Audio +- `mpga` - MPEG Audio +- `m4a` - MPEG-4 Audio +- `wav` - Waveform Audio +- `webm` - WebM Audio +- `flac` - Free Lossless Audio Codec +- `ogg` - Ogg Vorbis + +> **Note:** Maximum file size is 25 MB. + +## Browser Usage + +### Recording and Transcribing + +```typescript +async function recordAndTranscribe() { + // Request microphone access + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }) + const mediaRecorder = new MediaRecorder(stream) + const chunks: Blob[] = [] + + mediaRecorder.ondataavailable = (e) => chunks.push(e.data) + + mediaRecorder.onstop = async () => { + const audioBlob = new Blob(chunks, { type: 'audio/webm' }) + const audioFile = new File([audioBlob], 'recording.webm', { type: 'audio/webm' }) + + // Send to your API endpoint for transcription + const formData = new FormData() + formData.append('audio', audioFile) + + const response = await fetch('/api/transcribe', { + method: 'POST', + body: formData, + }) + + const result = await response.json() + console.log('Transcription:', result.text) + } + + // Start recording + mediaRecorder.start() + + // Stop after 10 seconds + setTimeout(() => mediaRecorder.stop(), 10000) +} +``` + +### Server API Endpoint + +```typescript +// api/transcribe.ts +import { generateTranscription } from '@tanstack/ai' +import { openaiTranscription } from '@tanstack/ai-openai' + +export async function POST(request: Request) { + const formData = await request.formData() + const audioFile = formData.get('audio') as File + + const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, + }) + + return Response.json(result) +} +``` + +## Error Handling + +```typescript +try { + const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, + }) +} catch (error) { + if (error.message.includes('Invalid file format')) { + console.error('Unsupported audio format') + } else if (error.message.includes('File too large')) { + console.error('Audio file exceeds 25 MB limit') + } else if (error.message.includes('Audio file is too short')) { + console.error('Audio must be at least 0.1 seconds') + } else { + console.error('Transcription error:', error.message) + } +} +``` + +## Environment Variables + +The transcription adapter uses: + +- `OPENAI_API_KEY`: Your OpenAI API key + +## Explicit API Keys + +```typescript +import { createOpenaiTranscription } from '@tanstack/ai-openai' + +const adapter = createOpenaiTranscription('your-openai-api-key') +``` + +## Best Practices + +1. **Audio Quality**: Better audio quality leads to more accurate transcriptions. Reduce background noise when possible. + +2. **Language Specification**: Always specify the language if known—this improves accuracy and speed. + +3. **File Size**: Keep audio files under 25 MB. For longer recordings, split into chunks. + +4. **Format Selection**: MP3 offers a good balance of quality and size. Use WAV or FLAC for highest quality. + +5. **Prompting**: Use the `prompt` option to provide context or expected vocabulary (e.g., technical terms, names). + +6. **Timestamps**: Request `verbose_json` format and enable `include: ['word', 'segment']` when you need timing information for captions or synchronization. + diff --git a/docs/guides/tree-shaking.md b/docs/guides/tree-shaking.md new file mode 100644 index 00000000..5829c74e --- /dev/null +++ b/docs/guides/tree-shaking.md @@ -0,0 +1,290 @@ +--- +title: Tree-Shaking +id: tree-shaking +order: 17 +--- + +# Tree-Shaking & Bundle Optimization + +TanStack AI is designed from the ground up for maximum tree-shakeability. The entire system—from activity functions to adapters—uses a functional, modular architecture that ensures you only bundle the code you actually use. + +## Design Philosophy + +Instead of a monolithic API that includes everything, TanStack AI provides: + +- **Individual activity functions** - Import only the activities you need (`chat`, `summarize`, etc.) +- **Individual adapter functions** - Import only the adapters you need (`openaiText`, `openaiSummarize`, etc.) +- **Functional API design** - Pure functions that can be easily eliminated by bundlers +- **Separate modules** - Each activity and adapter lives in its own module + +This design means that if you only use `chat` with OpenAI, you won't bundle code for summarization, image generation, or other providers. + +## Activity Functions + +Each AI activity is exported as a separate function from `@tanstack/ai`: + +```ts +// Import only the activities you need +import { chat } from '@tanstack/ai' // Chat/text generation +import { summarize } from '@tanstack/ai' // Summarization +import { generateImage } from '@tanstack/ai' // Image generation +import { generateSpeech } from '@tanstack/ai' // Text-to-speech +import { generateTranscription } from '@tanstack/ai' // Audio transcription +import { generateVideo } from '@tanstack/ai' // Video generation +``` + +### Example: Chat Only + +If you only need chat functionality: + +```ts +// Only chat code is bundled +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +const stream = chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Hello!' }], +}) +``` + +Your bundle will **not** include: +- Summarization logic +- Image generation logic +- Other activity implementations + +## Adapter Functions + +Each provider package exports individual adapter functions for each activity type: + +### OpenAI + +```ts +import { + openaiText, // Chat/text generation + openaiSummarize, // Summarization + openaiImage, // Image generation + openaiSpeech, // Text-to-speech + openaiTranscription, // Audio transcription + openaiVideo, // Video generation +} from '@tanstack/ai-openai' +``` + +### Anthropic + +```ts +import { + anthropicText, // Chat/text generation + anthropicSummarize, // Summarization +} from '@tanstack/ai-anthropic' +``` + +### Gemini + +```ts +import { + geminiText, // Chat/text generation + geminiSummarize, // Summarization + geminiImage, // Image generation + geminiSpeech, // Text-to-speech (experimental) +} from '@tanstack/ai-gemini' +``` + +### Ollama + +```ts +import { + ollamaText, // Chat/text generation + ollamaSummarize, // Summarization +} from '@tanstack/ai-ollama' +``` + +## Complete Example + +Here's how the tree-shakeable design works in practice: + +```ts +// Only import what you need +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +// Chat generation - returns AsyncIterable +const chatResult = chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Hello!' }], +}) + +for await (const chunk of chatResult) { + console.log(chunk) +} +``` + +**What gets bundled:** +- ✅ `chat` function and its dependencies +- ✅ `openaiText` adapter and its dependencies +- ✅ Chat-specific streaming and tool handling logic + +**What doesn't get bundled:** +- ❌ `summarize` function +- ❌ `generateImage` function +- ❌ Other adapter implementations (Anthropic, Gemini, etc.) +- ❌ Other activity implementations + +## Using Multiple Activities + +If you need multiple activities, import only what you use: + +```ts +import { chat, summarize } from '@tanstack/ai' +import { + openaiText, + openaiSummarize +} from '@tanstack/ai-openai' + +// Each activity is independent +const chatResult = chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Hello!' }], +}) + +const summarizeResult = await summarize({ + adapter: openaiSummarize('gpt-4o-mini'), + text: 'Long text to summarize...', +}) +``` + +Each activity is in its own module, so bundlers can eliminate unused ones. + +## Type Safety + +The tree-shakeable design doesn't sacrifice type safety. Each adapter provides full type safety for its supported models: + +```ts +import { openaiText, type OpenAIChatModel } from '@tanstack/ai-openai' + +const adapter = openaiText() + +// TypeScript knows the exact models supported +const model: OpenAIChatModel = 'gpt-4o' // ✓ Valid +const model2: OpenAIChatModel = 'invalid' // ✗ Type error +``` + +## Create Options Functions + +The `create___Options` functions are also tree-shakeable: + +```ts +import { + createChatOptions, + createImageOptions +} from '@tanstack/ai' + +// Only import what you need +const chatOptions = createChatOptions({ + adapter: openaiText('gpt-4o'), +}) +``` + +## Bundle Size Benefits + +The functional, modular design provides significant bundle size benefits: + +### Importing Everything (Less Efficient) + +```ts +// ❌ Importing more than needed +import * as ai from '@tanstack/ai' +import * as openai from '@tanstack/ai-openai' + +// This bundles all exports from both packages +``` + +### Importing Only What You Need (Recommended) + +```ts +// ✅ Only what you use gets bundled +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +// You only get: +// - Chat activity implementation +// - OpenAI text adapter +// - Chat-specific dependencies +``` + +### Real-World Impact + +For a typical chat application: + +- **Monolithic approach**: ~200KB+ (all activities + all adapters) +- **Tree-shakeable approach**: ~50KB (only chat + one adapter) + +That's a **75% reduction** in bundle size for most applications! + +## How It Works + +The tree-shakeability is achieved through: + +1. **ES Module exports** - Each function is a named export, not a default export +2. **Separate modules** - Each activity and adapter lives in its own file +3. **No side effects** - Functions are pure and don't have module-level side effects +4. **Functional composition** - Functions compose together, allowing dead code elimination +5. **Type-only imports** - Type imports are stripped at build time + +Modern bundlers (Vite, Webpack, Rollup, esbuild) can easily eliminate unused code because: + +- Functions are statically analyzable +- No dynamic imports of unused code +- No module-level side effects +- Clear dependency graphs + +## Best Practices + +1. **Import only what you need** - Don't import entire namespaces +2. **Use specific adapter functions** - Import `openaiText` not `openai` +3. **Separate activities by route** - Different API routes can use different activities +4. **Lazy load when possible** - Use dynamic imports for code-split routes + +```ts +// ✅ Good - Only imports chat +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +// ❌ Bad - Imports everything +import * as ai from '@tanstack/ai' +import * as openai from '@tanstack/ai-openai' +``` + +## Adapter Types + +Each adapter type implements a specific interface: + +- `ChatAdapter` - Provides `chatStream()` method for streaming chat responses +- `SummarizeAdapter` - Provides `summarize()` method for text summarization +- `ImageAdapter` - Provides `generateImage()` method for image generation +- `TTSAdapter` - Provides `generateSpeech()` method for text-to-speech +- `TranscriptionAdapter` - Provides `generateTranscription()` method for audio transcription +- `VideoAdapter` - Provides `generateVideo()` method for video generation + +All adapters have a `kind` property that indicates their type: + +```ts +const chatAdapter = openaiText() +console.log(chatAdapter.kind) // 'text' + +const summarizeAdapter = openaiSummarize() +console.log(summarizeAdapter.kind) // 'summarize' +``` + +## Summary + +TanStack AI's tree-shakeable design means: + +- ✅ **Smaller bundles** - Only include code you actually use +- ✅ **Faster load times** - Less JavaScript to download and parse +- ✅ **Better performance** - Less code means faster execution +- ✅ **Type safety** - Full TypeScript support without runtime overhead +- ✅ **Flexibility** - Mix and match activities and adapters as needed + +The functional, modular architecture ensures that modern bundlers can eliminate unused code effectively, resulting in optimal bundle sizes for your application. + diff --git a/docs/guides/video-generation.md b/docs/guides/video-generation.md new file mode 100644 index 00000000..426ec3f6 --- /dev/null +++ b/docs/guides/video-generation.md @@ -0,0 +1,334 @@ +--- +title: Video Generation +id: video-generation +order: 16 +--- + +# Video Generation (Experimental) + +> **⚠️ EXPERIMENTAL FEATURE WARNING** +> +> Video generation is an **experimental feature** that is subject to significant changes. Please read the caveats below carefully before using this feature. +> +> **Key Caveats:** +> - The API may change without notice in future versions +> - OpenAI's Sora API is in limited availability and may require organization verification +> - Video generation uses a jobs/polling architecture, which differs from other synchronous activities +> - Pricing, rate limits, and quotas may vary and are subject to change +> - Not all features described here may be available in your OpenAI account + +## Overview + +TanStack AI provides experimental support for video generation through dedicated video adapters. Unlike image generation, video generation is an **asynchronous operation** that uses a jobs/polling pattern: + +1. **Create a job** - Submit a prompt and receive a job ID +2. **Poll for status** - Check the job status until it's complete +3. **Retrieve the video** - Get the URL to download/view the generated video + +Currently supported: +- **OpenAI**: Sora-2 and Sora-2-Pro models (when available) + +## Basic Usage + +### Creating a Video Job + +```typescript +import { generateVideo } from '@tanstack/ai' +import { openaiVideo } from '@tanstack/ai-openai' + +// Create a video adapter (uses OPENAI_API_KEY from environment) +const adapter = openaiVideo() + +// Start a video generation job +const { jobId, model } = await generateVideo({ + adapter: openaiVideo('sora-2'), + prompt: 'A golden retriever puppy playing in a field of sunflowers', +}) + +console.log('Job started:', jobId) +``` + +### Polling for Status + +```typescript +import { getVideoJobStatus } from '@tanstack/ai' + +// Check the status of the job +const status = await getVideoJobStatus({ + adapter: openaiVideo('sora-2'), + jobId, +}) + +console.log('Status:', status.status) // 'pending' | 'processing' | 'completed' | 'failed' +console.log('Progress:', status.progress) // 0-100 (if available) + +if (status.status === 'failed') { + console.error('Error:', status.error) +} +``` + +### Getting the Video URL + +```typescript +import { getVideoJobStatus } from '@tanstack/ai' + +// Only call this after status is 'completed' +const result = await getVideoJobStatus({ + adapter: openaiVideo('sora-2'), + jobId, +}) + +if (result.status === 'completed' && result.url) { + console.log('Video URL:', result.url) + console.log('Expires at:', result.expiresAt) +} +``` + +### Complete Example with Polling Loop + +```typescript +import { generateVideo, getVideoJobStatus } from '@tanstack/ai' +import { openaiVideo } from '@tanstack/ai-openai' + +async function generateVideo(prompt: string) { + const adapter = openaiVideo() + + // 1. Create the job + const { jobId } = await generateVideo({ + adapter: openaiVideo('sora-2'), + prompt, + size: '1280x720', + duration: 8, // 4, 8, or 12 seconds + }) + + console.log('Job created:', jobId) + + // 2. Poll for completion + let status = 'pending' + while (status !== 'completed' && status !== 'failed') { + // Wait 5 seconds between polls + await new Promise((resolve) => setTimeout(resolve, 5000)) + + const result = await getVideoJobStatus({ + adapter: openaiVideo('sora-2'), + jobId, + }) + + status = result.status + console.log(`Status: ${status}${result.progress ? ` (${result.progress}%)` : ''}`) + + if (result.status === 'failed') { + throw new Error(result.error || 'Video generation failed') + } + } + + // 3. Get the video URL + const result = await getVideoJobStatus({ + adapter: openaiVideo('sora-2'), + jobId, + }) + + if (result.status === 'completed' && result.url) { + return result.url + } + + throw new Error('Video generation failed or URL not available') +} + +// Usage +const videoUrl = await generateVideo('A cat playing piano in a jazz bar') +console.log('Video ready:', videoUrl) +``` + +## Options + +### Job Creation Options + +| Option | Type | Description | +|--------|------|-------------| +| `adapter` | `VideoAdapter` | Video adapter instance with model (required) | +| `prompt` | `string` | Text description of the video to generate (required) | +| `size` | `string` | Video resolution in WIDTHxHEIGHT format | +| `duration` | `number` | Video duration in seconds (maps to `seconds` parameter in API) | +| `modelOptions?` | `object` | Model-specific options (renamed from `providerOptions`) | + +### Supported Sizes + +Based on [OpenAI API docs](https://platform.openai.com/docs/api-reference/videos/create): + +| Size | Description | +|------|-------------| +| `1280x720` | 720p landscape (16:9) - default | +| `720x1280` | 720p portrait (9:16) | +| `1792x1024` | Wide landscape | +| `1024x1792` | Tall portrait | + +### Supported Durations + +The API uses the `seconds` parameter. Allowed values: + +- `4` seconds +- `8` seconds (default) +- `12` seconds + +## Model Options + +### OpenAI Model Options + +Based on the [OpenAI Sora API](https://platform.openai.com/docs/api-reference/videos/create): + +```typescript +const { jobId } = await generateVideo({ + adapter: openaiVideo('sora-2'), + prompt: 'A beautiful sunset over the ocean', + size: '1280x720', // '1280x720', '720x1280', '1792x1024', '1024x1792' + duration: 8, // 4, 8, or 12 seconds + modelOptions: { + size: '1280x720', // Alternative way to specify size + seconds: 8, // Alternative way to specify duration + } +}) +``` + +## Response Types + +### VideoJobResult (from create) + +```typescript +interface VideoJobResult { + jobId: string // Unique job identifier for polling + model: string // Model used for generation +} +``` + +### VideoStatusResult (from status) + +```typescript +interface VideoStatusResult { + jobId: string + status: 'pending' | 'processing' | 'completed' | 'failed' + progress?: number // 0-100, if available + error?: string // Error message if failed +} +``` + +### VideoUrlResult (from url) + +```typescript +interface VideoUrlResult { + jobId: string + url: string // URL to download/stream the video + expiresAt?: Date // When the URL expires +} +``` + +## Model Variants + +| Model | Description | Use Case | +|-------|-------------|----------| +| `sora-2` | Faster generation, good quality | Rapid iteration, prototyping | +| `sora-2-pro` | Higher quality, slower | Production-quality output | + +## Error Handling + +Video generation can fail for various reasons. Always implement proper error handling: + +```typescript +try { + const { jobId } = await generateVideo({ + adapter: openaiVideo('sora-2'), + prompt: 'A scene', + }) + + // Poll for status... + const status = await getVideoJobStatus({ + adapter: openaiVideo('sora-2'), + jobId, + }) + + if (status.status === 'failed') { + console.error('Generation failed:', status.error) + // Handle failure (e.g., retry, notify user) + } +} catch (error) { + if (error.message.includes('Video generation API is not available')) { + console.error('Sora API access may be required. Check your OpenAI account.') + } else if (error.message.includes('rate limit')) { + console.error('Rate limited. Please wait before trying again.') + } else { + console.error('Unexpected error:', error) + } +} +``` + +## Rate Limits and Quotas + +> **⚠️ Note:** Rate limits and quotas for video generation are subject to change and may vary by account tier. + +Typical considerations: +- Video generation is computationally expensive +- Concurrent job limits may apply +- Monthly generation quotas may exist +- Longer/higher-quality videos consume more quota + +Check the [OpenAI documentation](https://platform.openai.com/docs) for current limits. + +## Environment Variables + +The video adapter uses the same environment variable as other OpenAI adapters: + +- `OPENAI_API_KEY`: Your OpenAI API key + +## Explicit API Keys + +For production use or when you need explicit control: + +```typescript +import { createOpenaiVideo } from '@tanstack/ai-openai' + +const adapter = createOpenaiVideo('your-openai-api-key') +``` + +## Differences from Image Generation + +| Aspect | Image Generation | Video Generation | +|--------|-----------------|------------------| +| API Type | Synchronous | Jobs/Polling | +| Return Type | `ImageGenerationResult` | `VideoJobResult` → `VideoStatusResult` → `VideoUrlResult` | +| Wait Time | Seconds | Minutes | +| Multiple Outputs | `numberOfImages` option | Not supported | +| Options Field | `prompt`, `size`, `numberOfImages` | `prompt`, `size`, `duration` | + +## Known Limitations + +> **⚠️ These limitations are subject to change as the feature evolves.** + +1. **API Availability**: The Sora API may not be available in all OpenAI accounts +2. **Generation Time**: Video generation can take several minutes +3. **URL Expiration**: Generated video URLs may expire after a certain period +4. **No Real-time Progress**: Progress updates may be limited or delayed +5. **Audio Limitations**: Audio generation support may be limited +6. **Prompt Length**: Long prompts may be truncated + +## Best Practices + +1. **Implement Timeouts**: Set reasonable timeouts for the polling loop +2. **Handle Failures Gracefully**: Have fallback behavior for failed generations +3. **Cache URLs**: Store video URLs and check expiration before re-fetching +4. **User Feedback**: Show clear progress indicators during generation +5. **Validate Prompts**: Check prompt length and content before submission +6. **Monitor Usage**: Track generation usage to avoid hitting quotas + +## Future Considerations + +This feature is experimental. Future versions may include: + +- Additional video models and providers +- Streaming progress updates +- Video editing and manipulation +- Audio track generation +- Batch video generation +- Custom style/aesthetic controls + +Stay tuned to the [TanStack AI changelog](https://github.com/TanStack/ai/blob/main/CHANGELOG.md) for updates. + diff --git a/docs/protocol/http-stream-protocol.md b/docs/protocol/http-stream-protocol.md index 8330cd42..0422f2cb 100644 --- a/docs/protocol/http-stream-protocol.md +++ b/docs/protocol/http-stream-protocol.md @@ -174,16 +174,15 @@ TanStack AI doesn't provide a built-in NDJSON formatter, but you can create one ```typescript import { chat } from '@tanstack/ai'; -import { openai } from '@tanstack/ai-openai'; +import { openaiText } from '@tanstack/ai-openai'; export async function POST(request: Request) { const { messages } = await request.json(); const encoder = new TextEncoder(); const stream = chat({ - adapter: openai(), + adapter: openaiText('gpt-4o'), messages, - model: 'gpt-4o', }); const readableStream = new ReadableStream({ @@ -223,7 +222,7 @@ export async function POST(request: Request) { ```typescript import express from 'express'; import { chat } from '@tanstack/ai'; -import { openai } from '@tanstack/ai-openai'; +import { openaiText } from '@tanstack/ai-openai'; const app = express(); app.use(express.json()); @@ -237,9 +236,8 @@ app.post('/api/chat', async (req, res) => { try { const stream = chat({ - adapter: openai(), + adapter: openaiText('gpt-4o'), messages, - model: 'gpt-4o', }); for await (const chunk of stream) { diff --git a/docs/protocol/sse-protocol.md b/docs/protocol/sse-protocol.md index 1f9f3d9e..f8709d4c 100644 --- a/docs/protocol/sse-protocol.md +++ b/docs/protocol/sse-protocol.md @@ -168,15 +168,14 @@ TanStack AI provides `toServerSentEventsStream()` and `toStreamResponse()` utili ```typescript import { chat, toStreamResponse } from '@tanstack/ai'; -import { openai } from '@tanstack/ai-openai'; +import { openaiText } from '@tanstack/ai-openai'; export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: openai(), + adapter: openaiText('gpt-4o'), messages, - model: 'gpt-4o', }); // Automatically converts StreamChunks to SSE format @@ -224,7 +223,7 @@ export async function POST(request: Request) { const stream = new ReadableStream({ async start(controller) { try { - for await (const chunk of chat({ ... })) { + for await (const chunk of chat({ adapter: openaiText('gpt-4o'), messages })) { const sseData = `data: ${JSON.stringify(chunk)}\n\n`; controller.enqueue(encoder.encode(sseData)); } diff --git a/docs/reference/classes/BaseAdapter.md b/docs/reference/classes/BaseAdapter.md deleted file mode 100644 index 1127e644..00000000 --- a/docs/reference/classes/BaseAdapter.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -id: BaseAdapter -title: BaseAdapter ---- - -# Abstract Class: BaseAdapter\ - -Defined in: [base-adapter.ts:26](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L26) - -Base adapter class with support for endpoint-specific models and provider options. - -Generic parameters: -- TChatModels: Models that support chat/text completion -- TEmbeddingModels: Models that support embeddings -- TChatProviderOptions: Provider-specific options for chat endpoint -- TEmbeddingProviderOptions: Provider-specific options for embedding endpoint -- TModelProviderOptionsByName: Provider-specific options for model by name -- TModelInputModalitiesByName: Map from model name to its supported input modalities -- TMessageMetadataByModality: Map from modality type to adapter-specific metadata types - -## Type Parameters - -### TChatModels - -`TChatModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\> - -### TEmbeddingModels - -`TEmbeddingModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\> - -### TChatProviderOptions - -`TChatProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TEmbeddingProviderOptions - -`TEmbeddingProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TModelProviderOptionsByName - -`TModelProviderOptionsByName` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TModelInputModalitiesByName - -`TModelInputModalitiesByName` *extends* `Record`\<`string`, `ReadonlyArray`\<[`Modality`](../type-aliases/Modality.md)\>\> = `Record`\<`string`, `ReadonlyArray`\<[`Modality`](../type-aliases/Modality.md)\>\> - -### TMessageMetadataByModality - -`TMessageMetadataByModality` *extends* `object` = [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md) - -## Implements - -- [`AIAdapter`](../interfaces/AIAdapter.md)\<`TChatModels`, `TEmbeddingModels`, `TChatProviderOptions`, `TEmbeddingProviderOptions`, `TModelProviderOptionsByName`, `TModelInputModalitiesByName`, `TMessageMetadataByModality`\> - -## Constructors - -### Constructor - -```ts -new BaseAdapter(config): BaseAdapter; -``` - -Defined in: [base-adapter.ts:70](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L70) - -#### Parameters - -##### config - -[`AIAdapterConfig`](../interfaces/AIAdapterConfig.md) = `{}` - -#### Returns - -`BaseAdapter`\<`TChatModels`, `TEmbeddingModels`, `TChatProviderOptions`, `TEmbeddingProviderOptions`, `TModelProviderOptionsByName`, `TModelInputModalitiesByName`, `TMessageMetadataByModality`\> - -## Properties - -### \_chatProviderOptions? - -```ts -optional _chatProviderOptions: TChatProviderOptions; -``` - -Defined in: [base-adapter.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L61) - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`_chatProviderOptions`](../interfaces/AIAdapter.md#_chatprovideroptions) - -*** - -### \_embeddingProviderOptions? - -```ts -optional _embeddingProviderOptions: TEmbeddingProviderOptions; -``` - -Defined in: [base-adapter.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L62) - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`_embeddingProviderOptions`](../interfaces/AIAdapter.md#_embeddingprovideroptions) - -*** - -### \_messageMetadataByModality? - -```ts -optional _messageMetadataByModality: TMessageMetadataByModality; -``` - -Defined in: [base-adapter.ts:68](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L68) - -Type-only map from modality type to adapter-specific metadata types. -Used to provide type-safe autocomplete for metadata on content parts. - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`_messageMetadataByModality`](../interfaces/AIAdapter.md#_messagemetadatabymodality) - -*** - -### \_modelInputModalitiesByName? - -```ts -optional _modelInputModalitiesByName: TModelInputModalitiesByName; -``` - -Defined in: [base-adapter.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L66) - -Type-only map from model name to its supported input modalities. -Used by the core AI types to narrow ContentPart types based on the selected model. -Must be provided by all adapters. - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`_modelInputModalitiesByName`](../interfaces/AIAdapter.md#_modelinputmodalitiesbyname) - -*** - -### \_modelProviderOptionsByName - -```ts -_modelProviderOptionsByName: TModelProviderOptionsByName; -``` - -Defined in: [base-adapter.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L64) - -Type-only map from model name to its specific provider options. -Used by the core AI types to narrow providerOptions based on the selected model. -Must be provided by all adapters. - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`_modelProviderOptionsByName`](../interfaces/AIAdapter.md#_modelprovideroptionsbyname) - -*** - -### \_providerOptions? - -```ts -optional _providerOptions: TChatProviderOptions; -``` - -Defined in: [base-adapter.ts:60](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L60) - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`_providerOptions`](../interfaces/AIAdapter.md#_provideroptions) - -*** - -### config - -```ts -protected config: AIAdapterConfig; -``` - -Defined in: [base-adapter.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L57) - -*** - -### embeddingModels? - -```ts -optional embeddingModels: TEmbeddingModels; -``` - -Defined in: [base-adapter.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L56) - -Models that support embeddings - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`embeddingModels`](../interfaces/AIAdapter.md#embeddingmodels) - -*** - -### models - -```ts -abstract models: TChatModels; -``` - -Defined in: [base-adapter.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L55) - -Models that support chat/text completion - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`models`](../interfaces/AIAdapter.md#models) - -*** - -### name - -```ts -abstract name: string; -``` - -Defined in: [base-adapter.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L54) - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`name`](../interfaces/AIAdapter.md#name) - -## Methods - -### chatStream() - -```ts -abstract chatStream(options): AsyncIterable; -``` - -Defined in: [base-adapter.ts:74](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L74) - -#### Parameters - -##### options - -[`ChatOptions`](../interfaces/ChatOptions.md) - -#### Returns - -`AsyncIterable`\<[`StreamChunk`](../type-aliases/StreamChunk.md)\> - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`chatStream`](../interfaces/AIAdapter.md#chatstream) - -*** - -### createEmbeddings() - -```ts -abstract createEmbeddings(options): Promise; -``` - -Defined in: [base-adapter.ts:79](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L79) - -#### Parameters - -##### options - -[`EmbeddingOptions`](../interfaces/EmbeddingOptions.md) - -#### Returns - -`Promise`\<[`EmbeddingResult`](../interfaces/EmbeddingResult.md)\> - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`createEmbeddings`](../interfaces/AIAdapter.md#createembeddings) - -*** - -### generateId() - -```ts -protected generateId(): string; -``` - -Defined in: [base-adapter.ts:81](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L81) - -#### Returns - -`string` - -*** - -### summarize() - -```ts -abstract summarize(options): Promise; -``` - -Defined in: [base-adapter.ts:76](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L76) - -#### Parameters - -##### options - -[`SummarizationOptions`](../interfaces/SummarizationOptions.md) - -#### Returns - -`Promise`\<[`SummarizationResult`](../interfaces/SummarizationResult.md)\> - -#### Implementation of - -[`AIAdapter`](../interfaces/AIAdapter.md).[`summarize`](../interfaces/AIAdapter.md#summarize) diff --git a/docs/reference/classes/BatchStrategy.md b/docs/reference/classes/BatchStrategy.md index a437b0aa..555db34f 100644 --- a/docs/reference/classes/BatchStrategy.md +++ b/docs/reference/classes/BatchStrategy.md @@ -5,7 +5,7 @@ title: BatchStrategy # Class: BatchStrategy -Defined in: [stream/strategies.ts:34](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L34) +Defined in: [activities/chat/stream/strategies.ts:34](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L34) Batch Strategy - emit every N chunks Useful for reducing UI update frequency @@ -22,7 +22,7 @@ Useful for reducing UI update frequency new BatchStrategy(batchSize): BatchStrategy; ``` -Defined in: [stream/strategies.ts:37](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L37) +Defined in: [activities/chat/stream/strategies.ts:37](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L37) #### Parameters @@ -42,7 +42,7 @@ Defined in: [stream/strategies.ts:37](https://github.com/TanStack/ai/blob/main/p reset(): void; ``` -Defined in: [stream/strategies.ts:48](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L48) +Defined in: [activities/chat/stream/strategies.ts:48](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L48) Optional: Reset strategy state (called when streaming starts) @@ -62,7 +62,7 @@ Optional: Reset strategy state (called when streaming starts) shouldEmit(_chunk, _accumulated): boolean; ``` -Defined in: [stream/strategies.ts:39](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L39) +Defined in: [activities/chat/stream/strategies.ts:39](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L39) Called for each text chunk received diff --git a/docs/reference/classes/CompositeStrategy.md b/docs/reference/classes/CompositeStrategy.md index 5c6f71e3..2b4e0347 100644 --- a/docs/reference/classes/CompositeStrategy.md +++ b/docs/reference/classes/CompositeStrategy.md @@ -5,7 +5,7 @@ title: CompositeStrategy # Class: CompositeStrategy -Defined in: [stream/strategies.ts:68](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L68) +Defined in: [activities/chat/stream/strategies.ts:68](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L68) Composite Strategy - combine multiple strategies (OR logic) Emits if ANY strategy says to emit @@ -22,7 +22,7 @@ Emits if ANY strategy says to emit new CompositeStrategy(strategies): CompositeStrategy; ``` -Defined in: [stream/strategies.ts:69](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L69) +Defined in: [activities/chat/stream/strategies.ts:69](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L69) #### Parameters @@ -42,7 +42,7 @@ Defined in: [stream/strategies.ts:69](https://github.com/TanStack/ai/blob/main/p reset(): void; ``` -Defined in: [stream/strategies.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L75) +Defined in: [activities/chat/stream/strategies.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L75) Optional: Reset strategy state (called when streaming starts) @@ -62,7 +62,7 @@ Optional: Reset strategy state (called when streaming starts) shouldEmit(chunk, accumulated): boolean; ``` -Defined in: [stream/strategies.ts:71](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L71) +Defined in: [activities/chat/stream/strategies.ts:71](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L71) Called for each text chunk received diff --git a/docs/reference/classes/ImmediateStrategy.md b/docs/reference/classes/ImmediateStrategy.md index fcf89ea0..7d3504e0 100644 --- a/docs/reference/classes/ImmediateStrategy.md +++ b/docs/reference/classes/ImmediateStrategy.md @@ -5,7 +5,7 @@ title: ImmediateStrategy # Class: ImmediateStrategy -Defined in: [stream/strategies.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L12) +Defined in: [activities/chat/stream/strategies.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L12) Immediate Strategy - emit on every chunk (default behavior) @@ -33,7 +33,7 @@ new ImmediateStrategy(): ImmediateStrategy; shouldEmit(_chunk, _accumulated): boolean; ``` -Defined in: [stream/strategies.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L13) +Defined in: [activities/chat/stream/strategies.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L13) Called for each text chunk received diff --git a/docs/reference/classes/PartialJSONParser.md b/docs/reference/classes/PartialJSONParser.md index 59fdcfb0..d60510d4 100644 --- a/docs/reference/classes/PartialJSONParser.md +++ b/docs/reference/classes/PartialJSONParser.md @@ -5,7 +5,7 @@ title: PartialJSONParser # Class: PartialJSONParser -Defined in: [stream/json-parser.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/json-parser.ts#L25) +Defined in: [activities/chat/stream/json-parser.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/json-parser.ts#L25) Partial JSON Parser implementation using the partial-json library This parser can handle incomplete JSON strings during streaming @@ -34,7 +34,7 @@ new PartialJSONParser(): PartialJSONParser; parse(jsonString): any; ``` -Defined in: [stream/json-parser.ts:31](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/json-parser.ts#L31) +Defined in: [activities/chat/stream/json-parser.ts:31](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/json-parser.ts#L31) Parse a potentially incomplete JSON string diff --git a/docs/reference/classes/PunctuationStrategy.md b/docs/reference/classes/PunctuationStrategy.md index f5e1ebe4..cdc403db 100644 --- a/docs/reference/classes/PunctuationStrategy.md +++ b/docs/reference/classes/PunctuationStrategy.md @@ -5,7 +5,7 @@ title: PunctuationStrategy # Class: PunctuationStrategy -Defined in: [stream/strategies.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L22) +Defined in: [activities/chat/stream/strategies.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L22) Punctuation Strategy - emit when chunk contains punctuation Useful for natural text flow in UI @@ -34,7 +34,7 @@ new PunctuationStrategy(): PunctuationStrategy; shouldEmit(chunk, _accumulated): boolean; ``` -Defined in: [stream/strategies.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L25) +Defined in: [activities/chat/stream/strategies.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L25) Called for each text chunk received diff --git a/docs/reference/classes/StreamProcessor.md b/docs/reference/classes/StreamProcessor.md index 8acdf904..8c13ab36 100644 --- a/docs/reference/classes/StreamProcessor.md +++ b/docs/reference/classes/StreamProcessor.md @@ -5,7 +5,7 @@ title: StreamProcessor # Class: StreamProcessor -Defined in: [stream/processor.ts:171](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L171) +Defined in: [activities/chat/stream/processor.ts:168](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L168) StreamProcessor - State machine for processing AI response streams @@ -31,7 +31,7 @@ Tool call completion is detected when: new StreamProcessor(options): StreamProcessor; ``` -Defined in: [stream/processor.ts:200](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L200) +Defined in: [activities/chat/stream/processor.ts:197](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L197) #### Parameters @@ -51,7 +51,7 @@ Defined in: [stream/processor.ts:200](https://github.com/TanStack/ai/blob/main/p addToolApprovalResponse(approvalId, approved): void; ``` -Defined in: [stream/processor.ts:314](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L314) +Defined in: [activities/chat/stream/processor.ts:311](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L311) Add an approval response (called by client after handling onApprovalRequest) @@ -80,7 +80,7 @@ addToolResult( error?): void; ``` -Defined in: [stream/processor.ts:270](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L270) +Defined in: [activities/chat/stream/processor.ts:267](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L267) Add a tool result (called by client after handling onToolCall) @@ -110,7 +110,7 @@ Add a tool result (called by client after handling onToolCall) addUserMessage(content): UIMessage; ``` -Defined in: [stream/processor.ts:228](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L228) +Defined in: [activities/chat/stream/processor.ts:225](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L225) Add a user message to the conversation @@ -132,7 +132,7 @@ Add a user message to the conversation areAllToolsComplete(): boolean; ``` -Defined in: [stream/processor.ts:345](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L345) +Defined in: [activities/chat/stream/processor.ts:342](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L342) Check if all tool calls in the last assistant message are complete Useful for auto-continue logic @@ -149,7 +149,7 @@ Useful for auto-continue logic clearMessages(): void; ``` -Defined in: [stream/processor.ts:377](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L377) +Defined in: [activities/chat/stream/processor.ts:374](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L374) Clear all messages @@ -165,7 +165,7 @@ Clear all messages finalizeStream(): void; ``` -Defined in: [stream/processor.ts:951](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L951) +Defined in: [activities/chat/stream/processor.ts:948](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L948) Finalize the stream - complete all pending operations @@ -181,7 +181,7 @@ Finalize the stream - complete all pending operations getMessages(): UIMessage[]; ``` -Defined in: [stream/processor.ts:337](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L337) +Defined in: [activities/chat/stream/processor.ts:334](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L334) Get current messages @@ -197,7 +197,7 @@ Get current messages getRecording(): ChunkRecording | null; ``` -Defined in: [stream/processor.ts:1037](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1037) +Defined in: [activities/chat/stream/processor.ts:1034](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1034) Get the current recording @@ -213,7 +213,7 @@ Get the current recording getState(): ProcessorState; ``` -Defined in: [stream/processor.ts:1010](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1010) +Defined in: [activities/chat/stream/processor.ts:1007](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1007) Get current processor state (legacy) @@ -229,7 +229,7 @@ Get current processor state (legacy) process(stream): Promise; ``` -Defined in: [stream/processor.ts:390](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L390) +Defined in: [activities/chat/stream/processor.ts:387](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L387) Process a stream and emit events through handlers @@ -251,7 +251,7 @@ Process a stream and emit events through handlers processChunk(chunk): void; ``` -Defined in: [stream/processor.ts:418](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L418) +Defined in: [activities/chat/stream/processor.ts:415](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L415) Process a single chunk from the stream @@ -273,7 +273,7 @@ Process a single chunk from the stream removeMessagesAfter(index): void; ``` -Defined in: [stream/processor.ts:369](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L369) +Defined in: [activities/chat/stream/processor.ts:366](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L366) Remove messages after a certain index (for reload/retry) @@ -295,7 +295,7 @@ Remove messages after a certain index (for reload/retry) reset(): void; ``` -Defined in: [stream/processor.ts:1060](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1060) +Defined in: [activities/chat/stream/processor.ts:1057](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1057) Full reset (including messages) @@ -311,7 +311,7 @@ Full reset (including messages) setMessages(messages): void; ``` -Defined in: [stream/processor.ts:220](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L220) +Defined in: [activities/chat/stream/processor.ts:217](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L217) Set the messages array (e.g., from persisted state) @@ -333,7 +333,7 @@ Set the messages array (e.g., from persisted state) startAssistantMessage(): string; ``` -Defined in: [stream/processor.ts:246](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L246) +Defined in: [activities/chat/stream/processor.ts:243](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L243) Start streaming a new assistant message Returns the message ID @@ -350,7 +350,7 @@ Returns the message ID startRecording(): void; ``` -Defined in: [stream/processor.ts:1024](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1024) +Defined in: [activities/chat/stream/processor.ts:1021](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1021) Start recording chunks @@ -369,7 +369,7 @@ toModelMessages(): ModelMessage< | null>[]; ``` -Defined in: [stream/processor.ts:326](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L326) +Defined in: [activities/chat/stream/processor.ts:323](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L323) Get the conversation as ModelMessages (for sending to LLM) @@ -388,7 +388,7 @@ Get the conversation as ModelMessages (for sending to LLM) static replay(recording, options?): Promise; ``` -Defined in: [stream/processor.ts:1069](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1069) +Defined in: [activities/chat/stream/processor.ts:1066](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1066) Replay a recording through the processor diff --git a/docs/reference/classes/ToolCallManager.md b/docs/reference/classes/ToolCallManager.md index 7b0c7894..dc52cb47 100644 --- a/docs/reference/classes/ToolCallManager.md +++ b/docs/reference/classes/ToolCallManager.md @@ -5,7 +5,7 @@ title: ToolCallManager # Class: ToolCallManager -Defined in: [tools/tool-calls.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L51) +Defined in: [activities/chat/tools/tool-calls.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L51) Manages tool call accumulation and execution for the chat() method's automatic tool execution loop. @@ -47,7 +47,7 @@ if (manager.hasToolCalls()) { new ToolCallManager(tools): ToolCallManager; ``` -Defined in: [tools/tool-calls.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L55) +Defined in: [activities/chat/tools/tool-calls.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L55) #### Parameters @@ -67,7 +67,7 @@ readonly [`Tool`](../interfaces/Tool.md)\<`ZodType`\<`unknown`, `unknown`, `$Zod addToolCallChunk(chunk): void; ``` -Defined in: [tools/tool-calls.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L63) +Defined in: [activities/chat/tools/tool-calls.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L63) Add a tool call chunk to the accumulator Handles streaming tool calls by accumulating arguments @@ -126,7 +126,7 @@ Handles streaming tool calls by accumulating arguments clear(): void; ``` -Defined in: [tools/tool-calls.ts:208](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L208) +Defined in: [activities/chat/tools/tool-calls.ts:208](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L208) Clear the tool calls map for the next iteration @@ -145,7 +145,7 @@ executeTools(doneChunk): AsyncGenerator[], void>; ``` -Defined in: [tools/tool-calls.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L121) +Defined in: [activities/chat/tools/tool-calls.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L121) Execute all tool calls and return tool result messages Also yields tool_result chunks for streaming @@ -171,7 +171,7 @@ Also yields tool_result chunks for streaming getToolCalls(): ToolCall[]; ``` -Defined in: [tools/tool-calls.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L111) +Defined in: [activities/chat/tools/tool-calls.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L111) Get all complete tool calls (filtered for valid ID and name) @@ -187,7 +187,7 @@ Get all complete tool calls (filtered for valid ID and name) hasToolCalls(): boolean; ``` -Defined in: [tools/tool-calls.ts:104](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L104) +Defined in: [activities/chat/tools/tool-calls.ts:104](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts#L104) Check if there are any complete tool calls to execute diff --git a/docs/reference/classes/WordBoundaryStrategy.md b/docs/reference/classes/WordBoundaryStrategy.md index 985ce4f6..3f54ff3d 100644 --- a/docs/reference/classes/WordBoundaryStrategy.md +++ b/docs/reference/classes/WordBoundaryStrategy.md @@ -5,7 +5,7 @@ title: WordBoundaryStrategy # Class: WordBoundaryStrategy -Defined in: [stream/strategies.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L57) +Defined in: [activities/chat/stream/strategies.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L57) Word Boundary Strategy - emit at word boundaries Prevents cutting words in half @@ -34,7 +34,7 @@ new WordBoundaryStrategy(): WordBoundaryStrategy; shouldEmit(chunk, _accumulated): boolean; ``` -Defined in: [stream/strategies.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/strategies.ts#L58) +Defined in: [activities/chat/stream/strategies.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/strategies.ts#L58) Called for each text chunk received diff --git a/docs/reference/functions/chat.md b/docs/reference/functions/chat.md index 16934d76..e41d36a7 100644 --- a/docs/reference/functions/chat.md +++ b/docs/reference/functions/chat.md @@ -6,50 +6,91 @@ title: chat # Function: chat() ```ts -function chat(options): AsyncIterable; +function chat(options): TextActivityResult; ``` -Defined in: [core/chat.ts:741](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/chat.ts#L741) +Defined in: [activities/chat/index.ts:945](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/index.ts#L945) -Standalone chat streaming function with type inference from adapter -Returns an async iterable of StreamChunks for streaming responses -Includes automatic tool execution loop +Text activity - handles agentic text generation, one-shot text generation, and agentic structured output. + +This activity supports four modes: +1. **Streaming agentic text**: Stream responses with automatic tool execution +2. **Streaming one-shot text**: Simple streaming request/response without tools +3. **Non-streaming text**: Returns collected text as a string (stream: false) +4. **Agentic structured output**: Run tools, then return structured data ## Type Parameters ### TAdapter -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `any`, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> +`TAdapter` *extends* `AnyTextAdapter` + +### TSchema + +`TSchema` *extends* + \| `ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\> + \| `undefined` = `undefined` -### TModel +### TStream -`TModel` *extends* `any` +`TStream` *extends* `boolean` = `true` ## Parameters ### options -[`ChatStreamOptionsForModel`](../type-aliases/ChatStreamOptionsForModel.md)\<`TAdapter`, `TModel`\> - -Chat options +`TextActivityOptions`\<`TAdapter`, `TSchema`, `TStream`\> ## Returns -`AsyncIterable`\<[`StreamChunk`](../type-aliases/StreamChunk.md)\> +`TextActivityResult`\<`TSchema`, `TStream`\> -## Example +## Examples -```typescript -const stream = chat({ - adapter: openai(), - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Hello!' }], - tools: [weatherTool], // Optional: auto-executed when called -}); - -for await (const chunk of stream) { +```ts +import { chat } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +for await (const chunk of chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'What is the weather?' }], + tools: [weatherTool] +})) { if (chunk.type === 'content') { - console.log(chunk.delta); + console.log(chunk.delta) } } ``` + +```ts +for await (const chunk of chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Hello!' }] +})) { + console.log(chunk) +} +``` + +```ts +const text = await chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Hello!' }], + stream: false +}) +// text is a string with the full response +``` + +```ts +import { z } from 'zod' + +const result = await chat({ + adapter: openaiText('gpt-4o'), + messages: [{ role: 'user', content: 'Research and summarize the topic' }], + tools: [researchTool, analyzeTool], + outputSchema: z.object({ + summary: z.string(), + keyPoints: z.array(z.string()) + }) +}) +// result is { summary: string, keyPoints: string[] } +``` diff --git a/docs/reference/functions/chatOptions.md b/docs/reference/functions/chatOptions.md deleted file mode 100644 index d776680b..00000000 --- a/docs/reference/functions/chatOptions.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: chatOptions -title: chatOptions ---- - -# Function: chatOptions() - -```ts -function chatOptions(options): Omit, "model" | "providerOptions" | "messages" | "abortController"> & object; -``` - -Defined in: [utilities/chat-options.ts:3](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/chat-options.ts#L3) - -## Type Parameters - -### TAdapter - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `Record`\<`string`, readonly [`Modality`](../type-aliases/Modality.md)[]\>, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> - -### TModel - -`TModel` *extends* `any` - -## Parameters - -### options - -`Omit`\<[`ChatStreamOptionsUnion`](../type-aliases/ChatStreamOptionsUnion.md)\<`TAdapter`\>, `"model"` \| `"providerOptions"` \| `"messages"` \| `"abortController"`\> & `object` - -## Returns - -`Omit`\<[`ChatStreamOptionsUnion`](../type-aliases/ChatStreamOptionsUnion.md)\<`TAdapter`\>, `"model"` \| `"providerOptions"` \| `"messages"` \| `"abortController"`\> & `object` diff --git a/docs/reference/functions/combineStrategies.md b/docs/reference/functions/combineStrategies.md index 454a0f33..618f93a9 100644 --- a/docs/reference/functions/combineStrategies.md +++ b/docs/reference/functions/combineStrategies.md @@ -9,7 +9,7 @@ title: combineStrategies function combineStrategies(strategies): AgentLoopStrategy; ``` -Defined in: [utilities/agent-loop-strategies.ts:79](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/agent-loop-strategies.ts#L79) +Defined in: [activities/chat/agent-loop-strategies.ts:79](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/agent-loop-strategies.ts#L79) Creates a strategy that combines multiple strategies with AND logic All strategies must return true to continue @@ -32,7 +32,7 @@ AgentLoopStrategy that continues only if all strategies return true ```typescript const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: "gpt-4o", messages: [...], tools: [weatherTool], diff --git a/docs/reference/functions/convertMessagesToModelMessages.md b/docs/reference/functions/convertMessagesToModelMessages.md index 00224ace..7ba8d96d 100644 --- a/docs/reference/functions/convertMessagesToModelMessages.md +++ b/docs/reference/functions/convertMessagesToModelMessages.md @@ -12,7 +12,7 @@ function convertMessagesToModelMessages(messages): ModelMessage< | null>[]; ``` -Defined in: [message-converters.ts:38](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/message-converters.ts#L38) +Defined in: [activities/chat/messages.ts:35](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L35) Convert UIMessages or ModelMessages to ModelMessages diff --git a/docs/reference/functions/convertZodToJsonSchema.md b/docs/reference/functions/convertZodToJsonSchema.md index 97097eb9..23b46ffb 100644 --- a/docs/reference/functions/convertZodToJsonSchema.md +++ b/docs/reference/functions/convertZodToJsonSchema.md @@ -6,10 +6,10 @@ title: convertZodToJsonSchema # Function: convertZodToJsonSchema() ```ts -function convertZodToJsonSchema(schema): Record | undefined; +function convertZodToJsonSchema(schema, options): Record | undefined; ``` -Defined in: [tools/zod-converter.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/zod-converter.ts#L57) +Defined in: [activities/chat/tools/zod-converter.ts:161](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/zod-converter.ts#L161) Converts a schema (Zod or JSONSchema) to JSON Schema format compatible with LLM providers. If the input is already a JSONSchema object, it is returned as-is. @@ -23,6 +23,12 @@ Zod schema or JSONSchema object to convert [`SchemaInput`](../type-aliases/SchemaInput.md) | `undefined` +### options + +`ConvertSchemaOptions` = `{}` + +Conversion options + ## Returns `Record`\<`string`, `any`\> \| `undefined` @@ -51,6 +57,19 @@ const jsonSchema = convertZodToJsonSchema(zodSchema); // required: ['location'] // } +// For OpenAI structured output (all fields required, optional fields nullable) +const structuredSchema = convertZodToJsonSchema(zodSchema, { forStructuredOutput: true }); +// Returns: +// { +// type: 'object', +// properties: { +// location: { type: 'string', description: 'City name' }, +// unit: { type: ['string', 'null'], enum: ['celsius', 'fahrenheit'] } +// }, +// required: ['location', 'unit'], +// additionalProperties: false +// } + // Using JSONSchema directly (passes through unchanged) const rawSchema = { type: 'object', diff --git a/docs/reference/functions/createChatOptions.md b/docs/reference/functions/createChatOptions.md new file mode 100644 index 00000000..0d2abf6d --- /dev/null +++ b/docs/reference/functions/createChatOptions.md @@ -0,0 +1,51 @@ +--- +id: createChatOptions +title: createChatOptions +--- + +# Function: createChatOptions() + +```ts +function createChatOptions(options): TextActivityOptions; +``` + +Defined in: [activities/chat/index.ts:135](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/index.ts#L135) + +Create typed options for the chat() function without executing. +This is useful for pre-defining configurations with full type inference. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `AnyTextAdapter` + +### TSchema + +`TSchema` *extends* + \| `ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\> + \| `undefined` = `undefined` + +### TStream + +`TStream` *extends* `boolean` = `true` + +## Parameters + +### options + +`TextActivityOptions`\<`TAdapter`, `TSchema`, `TStream`\> + +## Returns + +`TextActivityOptions`\<`TAdapter`, `TSchema`, `TStream`\> + +## Example + +```ts +const chatOptions = createChatOptions({ + adapter: anthropicText('claude-sonnet-4-5'), +}) + +const stream = chat({ ...chatOptions, messages }) +``` diff --git a/docs/reference/functions/createImageOptions.md b/docs/reference/functions/createImageOptions.md new file mode 100644 index 00000000..c535fddc --- /dev/null +++ b/docs/reference/functions/createImageOptions.md @@ -0,0 +1,30 @@ +--- +id: createImageOptions +title: createImageOptions +--- + +# Function: createImageOptions() + +```ts +function createImageOptions(options): ImageActivityOptions; +``` + +Defined in: [activities/generateImage/index.ts:150](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateImage/index.ts#L150) + +Create typed options for the generateImage() function without executing. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `ImageAdapter`\<`string`, `object`, `any`, `any`\> + +## Parameters + +### options + +`ImageActivityOptions`\<`TAdapter`\> + +## Returns + +`ImageActivityOptions`\<`TAdapter`\> diff --git a/docs/reference/functions/createReplayStream.md b/docs/reference/functions/createReplayStream.md index 39ec2a49..72a9a3d8 100644 --- a/docs/reference/functions/createReplayStream.md +++ b/docs/reference/functions/createReplayStream.md @@ -9,7 +9,7 @@ title: createReplayStream function createReplayStream(recording): AsyncIterable; ``` -Defined in: [stream/processor.ts:1081](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1081) +Defined in: [activities/chat/stream/processor.ts:1078](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1078) Create an async iterable from a recording diff --git a/docs/reference/functions/createSpeechOptions.md b/docs/reference/functions/createSpeechOptions.md new file mode 100644 index 00000000..e73aa765 --- /dev/null +++ b/docs/reference/functions/createSpeechOptions.md @@ -0,0 +1,30 @@ +--- +id: createSpeechOptions +title: createSpeechOptions +--- + +# Function: createSpeechOptions() + +```ts +function createSpeechOptions(options): TTSActivityOptions; +``` + +Defined in: [activities/generateSpeech/index.ts:114](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateSpeech/index.ts#L114) + +Create typed options for the generateSpeech() function without executing. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `TTSAdapter`\<`string`, `object`\> + +## Parameters + +### options + +`TTSActivityOptions`\<`TAdapter`\> + +## Returns + +`TTSActivityOptions`\<`TAdapter`\> diff --git a/docs/reference/functions/createSummarizeOptions.md b/docs/reference/functions/createSummarizeOptions.md new file mode 100644 index 00000000..18870794 --- /dev/null +++ b/docs/reference/functions/createSummarizeOptions.md @@ -0,0 +1,34 @@ +--- +id: createSummarizeOptions +title: createSummarizeOptions +--- + +# Function: createSummarizeOptions() + +```ts +function createSummarizeOptions(options): SummarizeActivityOptions; +``` + +Defined in: [activities/summarize/index.ts:272](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/summarize/index.ts#L272) + +Create typed options for the summarize() function without executing. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `SummarizeAdapter`\<`string`, `object`\> + +### TStream + +`TStream` *extends* `boolean` = `false` + +## Parameters + +### options + +`SummarizeActivityOptions`\<`TAdapter`, `TStream`\> + +## Returns + +`SummarizeActivityOptions`\<`TAdapter`, `TStream`\> diff --git a/docs/reference/functions/createTranscriptionOptions.md b/docs/reference/functions/createTranscriptionOptions.md new file mode 100644 index 00000000..3f2b112e --- /dev/null +++ b/docs/reference/functions/createTranscriptionOptions.md @@ -0,0 +1,30 @@ +--- +id: createTranscriptionOptions +title: createTranscriptionOptions +--- + +# Function: createTranscriptionOptions() + +```ts +function createTranscriptionOptions(options): TranscriptionActivityOptions; +``` + +Defined in: [activities/generateTranscription/index.ts:118](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateTranscription/index.ts#L118) + +Create typed options for the generateTranscription() function without executing. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `TranscriptionAdapter`\<`string`, `object`\> + +## Parameters + +### options + +`TranscriptionActivityOptions`\<`TAdapter`\> + +## Returns + +`TranscriptionActivityOptions`\<`TAdapter`\> diff --git a/docs/reference/functions/createVideoOptions.md b/docs/reference/functions/createVideoOptions.md new file mode 100644 index 00000000..d88c05e7 --- /dev/null +++ b/docs/reference/functions/createVideoOptions.md @@ -0,0 +1,30 @@ +--- +id: createVideoOptions +title: createVideoOptions +--- + +# Function: createVideoOptions() + +```ts +function createVideoOptions(options): VideoCreateOptions; +``` + +Defined in: [activities/generateVideo/index.ts:249](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateVideo/index.ts#L249) + +Create typed options for the generateVideo() function without executing. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `VideoAdapter`\<`string`, `object`\> + +## Parameters + +### options + +`VideoCreateOptions`\<`TAdapter`\> + +## Returns + +`VideoCreateOptions`\<`TAdapter`\> diff --git a/docs/reference/functions/embedding.md b/docs/reference/functions/embedding.md deleted file mode 100644 index 058e3ff4..00000000 --- a/docs/reference/functions/embedding.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: embedding -title: embedding ---- - -# Function: embedding() - -```ts -function embedding(options): Promise; -``` - -Defined in: [core/embedding.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/embedding.ts#L16) - -Standalone embedding function with type inference from adapter - -## Type Parameters - -### TAdapter - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `Record`\<`string`, readonly [`Modality`](../type-aliases/Modality.md)[]\>, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> - -## Parameters - -### options - -`Omit`\<[`EmbeddingOptions`](../interfaces/EmbeddingOptions.md), `"model"`\> & `object` - -## Returns - -`Promise`\<[`EmbeddingResult`](../interfaces/EmbeddingResult.md)\> diff --git a/docs/reference/functions/generateImage.md b/docs/reference/functions/generateImage.md new file mode 100644 index 00000000..39209296 --- /dev/null +++ b/docs/reference/functions/generateImage.md @@ -0,0 +1,71 @@ +--- +id: generateImage +title: generateImage +--- + +# Function: generateImage() + +```ts +function generateImage(options): ImageActivityResult; +``` + +Defined in: [activities/generateImage/index.ts:134](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateImage/index.ts#L134) + +Image activity - generates images from text prompts. + +Uses AI image generation models to create images based on natural language descriptions. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `ImageAdapter`\<`string`, `object`, `any`, `any`\> + +## Parameters + +### options + +`ImageActivityOptions`\<`TAdapter`\> + +## Returns + +`ImageActivityResult` + +## Examples + +```ts +import { generateImage } from '@tanstack/ai' +import { openaiImage } from '@tanstack/ai-openai' + +const result = await generateImage({ + adapter: openaiImage('dall-e-3'), + prompt: 'A serene mountain landscape at sunset' +}) + +console.log(result.images[0].url) +``` + +```ts +const result = await generateImage({ + adapter: openaiImage('dall-e-2'), + prompt: 'A cute robot mascot', + numberOfImages: 4, + size: '512x512' +}) + +result.images.forEach((image, i) => { + console.log(`Image ${i + 1}: ${image.url}`) +}) +``` + +```ts +const result = await generateImage({ + adapter: openaiImage('dall-e-3'), + prompt: 'A professional headshot photo', + size: '1024x1024', + modelOptions: { + quality: 'hd', + style: 'natural' + } +}) +``` diff --git a/docs/reference/functions/generateMessageId.md b/docs/reference/functions/generateMessageId.md index 44568ce3..1e545ba6 100644 --- a/docs/reference/functions/generateMessageId.md +++ b/docs/reference/functions/generateMessageId.md @@ -9,7 +9,7 @@ title: generateMessageId function generateMessageId(): string; ``` -Defined in: [message-converters.ts:283](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/message-converters.ts#L283) +Defined in: [activities/chat/messages.ts:280](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L280) Generate a unique message ID diff --git a/docs/reference/functions/generateSpeech.md b/docs/reference/functions/generateSpeech.md new file mode 100644 index 00000000..c48cbb19 --- /dev/null +++ b/docs/reference/functions/generateSpeech.md @@ -0,0 +1,57 @@ +--- +id: generateSpeech +title: generateSpeech +--- + +# Function: generateSpeech() + +```ts +function generateSpeech(options): TTSActivityResult; +``` + +Defined in: [activities/generateSpeech/index.ts:98](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateSpeech/index.ts#L98) + +TTS activity - generates speech from text. + +Uses AI text-to-speech models to create audio from natural language text. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `TTSAdapter`\<`string`, `object`\> + +## Parameters + +### options + +`TTSActivityOptions`\<`TAdapter`\> + +## Returns + +`TTSActivityResult` + +## Examples + +```ts +import { generateSpeech } from '@tanstack/ai' +import { openaiTTS } from '@tanstack/ai-openai' + +const result = await generateSpeech({ + adapter: openaiTTS('tts-1-hd'), + text: 'Hello, welcome to TanStack AI!', + voice: 'nova' +}) + +console.log(result.audio) // base64-encoded audio +``` + +```ts +const result = await generateSpeech({ + adapter: openaiTTS('tts-1'), + text: 'This is slower speech.', + voice: 'alloy', + format: 'wav', + speed: 0.8 +}) +``` diff --git a/docs/reference/functions/generateTranscription.md b/docs/reference/functions/generateTranscription.md new file mode 100644 index 00000000..38703bc8 --- /dev/null +++ b/docs/reference/functions/generateTranscription.md @@ -0,0 +1,59 @@ +--- +id: generateTranscription +title: generateTranscription +--- + +# Function: generateTranscription() + +```ts +function generateTranscription(options): TranscriptionActivityResult; +``` + +Defined in: [activities/generateTranscription/index.ts:100](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateTranscription/index.ts#L100) + +Transcription activity - converts audio to text. + +Uses AI speech-to-text models to transcribe audio content. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `TranscriptionAdapter`\<`string`, `object`\> + +## Parameters + +### options + +`TranscriptionActivityOptions`\<`TAdapter`\> + +## Returns + +`TranscriptionActivityResult` + +## Examples + +```ts +import { generateTranscription } from '@tanstack/ai' +import { openaiTranscription } from '@tanstack/ai-openai' + +const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, // File, Blob, or base64 string + language: 'en' +}) + +console.log(result.text) +``` + +```ts +const result = await generateTranscription({ + adapter: openaiTranscription('whisper-1'), + audio: audioFile, + responseFormat: 'verbose_json' +}) + +result.segments?.forEach(segment => { + console.log(`[${segment.start}s - ${segment.end}s]: ${segment.text}`) +}) +``` diff --git a/docs/reference/functions/generateVideo.md b/docs/reference/functions/generateVideo.md new file mode 100644 index 00000000..4a5a56b0 --- /dev/null +++ b/docs/reference/functions/generateVideo.md @@ -0,0 +1,52 @@ +--- +id: generateVideo +title: generateVideo +--- + +# Function: generateVideo() + +```ts +function generateVideo(options): Promise; +``` + +Defined in: [activities/generateVideo/index.ts:158](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateVideo/index.ts#L158) + +**`Experimental`** + +Generate video - creates a video generation job from a text prompt. + +Uses AI video generation models to create videos based on natural language descriptions. +Unlike image generation, video generation is asynchronous and requires polling for completion. + + Video generation is an experimental feature and may change. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `VideoAdapter`\<`string`, `object`\> + +## Parameters + +### options + +`VideoCreateOptions`\<`TAdapter`\> + +## Returns + +`Promise`\<[`VideoJobResult`](../interfaces/VideoJobResult.md)\> + +## Example + +```ts +import { generateVideo } from '@tanstack/ai' +import { openaiVideo } from '@tanstack/ai-openai' + +// Start a video generation job +const { jobId } = await generateVideo({ + adapter: openaiVideo('sora-2'), + prompt: 'A cat chasing a dog in a sunny park' +}) + +console.log('Job started:', jobId) +``` diff --git a/docs/reference/functions/getVideoJobStatus.md b/docs/reference/functions/getVideoJobStatus.md new file mode 100644 index 00000000..40a498b1 --- /dev/null +++ b/docs/reference/functions/getVideoJobStatus.md @@ -0,0 +1,71 @@ +--- +id: getVideoJobStatus +title: getVideoJobStatus +--- + +# Function: getVideoJobStatus() + +```ts +function getVideoJobStatus(options): Promise<{ + error?: string; + progress?: number; + status: "pending" | "processing" | "completed" | "failed"; + url?: string; +}>; +``` + +Defined in: [activities/generateVideo/index.ts:198](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/generateVideo/index.ts#L198) + +**`Experimental`** + +Get video job status - returns the current status, progress, and URL if available. + +This function combines status checking and URL retrieval. If the job is completed, +it will automatically fetch and include the video URL. + + Video generation is an experimental feature and may change. + +## Type Parameters + +### TAdapter + +`TAdapter` *extends* `VideoAdapter`\<`string`, `object`\> + +## Parameters + +### options + +#### adapter + +`TAdapter` & `object` + +#### jobId + +`string` + +## Returns + +`Promise`\<\{ + `error?`: `string`; + `progress?`: `number`; + `status`: `"pending"` \| `"processing"` \| `"completed"` \| `"failed"`; + `url?`: `string`; +\}\> + +## Example + +```ts +import { getVideoJobStatus } from '@tanstack/ai' +import { openaiVideo } from '@tanstack/ai-openai' + +const result = await getVideoJobStatus({ + adapter: openaiVideo('sora-2'), + jobId: 'job-123' +}) + +console.log('Status:', result.status) +console.log('Progress:', result.progress) +if (result.url) { + console.log('Video URL:', result.url) +} +``` diff --git a/docs/reference/functions/maxIterations.md b/docs/reference/functions/maxIterations.md index 1ab98cda..bdcab002 100644 --- a/docs/reference/functions/maxIterations.md +++ b/docs/reference/functions/maxIterations.md @@ -9,7 +9,7 @@ title: maxIterations function maxIterations(max): AgentLoopStrategy; ``` -Defined in: [utilities/agent-loop-strategies.ts:20](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/agent-loop-strategies.ts#L20) +Defined in: [activities/chat/agent-loop-strategies.ts:20](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/agent-loop-strategies.ts#L20) Creates a strategy that continues for a maximum number of iterations @@ -31,7 +31,7 @@ AgentLoopStrategy that stops after max iterations ```typescript const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: "gpt-4o", messages: [...], tools: [weatherTool], diff --git a/docs/reference/functions/messages.md b/docs/reference/functions/messages.md deleted file mode 100644 index b426dc57..00000000 --- a/docs/reference/functions/messages.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -id: messages -title: messages ---- - -# Function: messages() - -```ts -function messages(_options, msgs): TAdapter extends AIAdapter ? TModel extends keyof ModelInputModalities ? ModelInputModalities[TModel] extends readonly Modality[] ? ConstrainedModelMessage[] : ModelMessage< - | string - | ContentPart[] - | null>[] : ModelMessage< - | string - | ContentPart[] - | null>[] : ModelMessage< - | string - | ContentPart[] - | null>[]; -``` - -Defined in: [utilities/messages.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/messages.ts#L33) - -Type-safe helper to create a messages array constrained by a model's supported modalities. - -This function provides compile-time checking that your messages only contain -content types supported by the specified model. It's particularly useful when -combining typed messages with untyped data (like from request.json()). - -## Type Parameters - -### TAdapter - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `any`, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> - -### TModel - -`TModel` *extends* `any` - -## Parameters - -### \_options - -#### adapter - -`TAdapter` - -#### model - -`TModel` - -### msgs - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `ModelInputModalities`, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> ? `TModel` *extends* keyof `ModelInputModalities` ? `ModelInputModalities`\[`TModel`\<`TModel`\>\] *extends* readonly [`Modality`](../type-aliases/Modality.md)[] ? [`ConstrainedModelMessage`](../type-aliases/ConstrainedModelMessage.md)\<`any`\[`any`\]\>[] : [`ModelMessage`](../interfaces/ModelMessage.md)\< - \| `string` - \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] - \| `null`\>[] : [`ModelMessage`](../interfaces/ModelMessage.md)\< - \| `string` - \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] - \| `null`\>[] : [`ModelMessage`](../interfaces/ModelMessage.md)\< - \| `string` - \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] - \| `null`\>[] - -## Returns - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `ModelInputModalities`, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> ? `TModel` *extends* keyof `ModelInputModalities` ? `ModelInputModalities`\[`TModel`\<`TModel`\>\] *extends* readonly [`Modality`](../type-aliases/Modality.md)[] ? [`ConstrainedModelMessage`](../type-aliases/ConstrainedModelMessage.md)\<`any`\[`any`\]\>[] : [`ModelMessage`](../interfaces/ModelMessage.md)\< - \| `string` - \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] - \| `null`\>[] : [`ModelMessage`](../interfaces/ModelMessage.md)\< - \| `string` - \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] - \| `null`\>[] : [`ModelMessage`](../interfaces/ModelMessage.md)\< - \| `string` - \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] - \| `null`\>[] - -## Example - -```typescript -import { messages, chat } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' - -const adapter = openai() - -// This will error at compile time because gpt-4o only supports text+image -const msgs = messages({ adapter, model: 'gpt-4o' }, [ - { - role: 'user', - content: [ - { type: 'video', source: { type: 'url', value: '...' } } // Error! - ] - } -]) -``` diff --git a/docs/reference/functions/modelMessageToUIMessage.md b/docs/reference/functions/modelMessageToUIMessage.md index 02e80f8d..f6bbeb2a 100644 --- a/docs/reference/functions/modelMessageToUIMessage.md +++ b/docs/reference/functions/modelMessageToUIMessage.md @@ -9,7 +9,7 @@ title: modelMessageToUIMessage function modelMessageToUIMessage(modelMessage, id?): UIMessage; ``` -Defined in: [message-converters.ts:158](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/message-converters.ts#L158) +Defined in: [activities/chat/messages.ts:155](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L155) Convert a ModelMessage to UIMessage diff --git a/docs/reference/functions/modelMessagesToUIMessages.md b/docs/reference/functions/modelMessagesToUIMessages.md index dd50df71..02b20aed 100644 --- a/docs/reference/functions/modelMessagesToUIMessages.md +++ b/docs/reference/functions/modelMessagesToUIMessages.md @@ -9,7 +9,7 @@ title: modelMessagesToUIMessages function modelMessagesToUIMessages(modelMessages): UIMessage[]; ``` -Defined in: [message-converters.ts:211](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/message-converters.ts#L211) +Defined in: [activities/chat/messages.ts:208](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L208) Convert an array of ModelMessages to UIMessages diff --git a/docs/reference/functions/normalizeToUIMessage.md b/docs/reference/functions/normalizeToUIMessage.md index a42310e3..f85ca4f0 100644 --- a/docs/reference/functions/normalizeToUIMessage.md +++ b/docs/reference/functions/normalizeToUIMessage.md @@ -9,7 +9,7 @@ title: normalizeToUIMessage function normalizeToUIMessage(message, generateId): UIMessage; ``` -Defined in: [message-converters.ts:260](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/message-converters.ts#L260) +Defined in: [activities/chat/messages.ts:257](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L257) Normalize a message (UIMessage or ModelMessage) to a UIMessage Ensures the message has an ID and createdAt timestamp diff --git a/docs/reference/functions/parsePartialJSON.md b/docs/reference/functions/parsePartialJSON.md index c3fb3806..0afc5075 100644 --- a/docs/reference/functions/parsePartialJSON.md +++ b/docs/reference/functions/parsePartialJSON.md @@ -9,7 +9,7 @@ title: parsePartialJSON function parsePartialJSON(jsonString): any; ``` -Defined in: [stream/json-parser.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/json-parser.ts#L56) +Defined in: [activities/chat/stream/json-parser.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/json-parser.ts#L56) Parse partial JSON string (convenience function) diff --git a/docs/reference/functions/streamToText.md b/docs/reference/functions/streamToText.md new file mode 100644 index 00000000..9d281666 --- /dev/null +++ b/docs/reference/functions/streamToText.md @@ -0,0 +1,43 @@ +--- +id: streamToText +title: streamToText +--- + +# Function: streamToText() + +```ts +function streamToText(stream): Promise; +``` + +Defined in: [stream-to-response.ts:23](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream-to-response.ts#L23) + +Collect all text content from a StreamChunk async iterable and return as a string. + +This function consumes the entire stream, accumulating content from 'content' type chunks, +and returns the final concatenated text. + +## Parameters + +### stream + +`AsyncIterable`\<[`StreamChunk`](../type-aliases/StreamChunk.md)\> + +AsyncIterable of StreamChunks from chat() + +## Returns + +`Promise`\<`string`\> + +Promise - The accumulated text content + +## Example + +```typescript +const stream = chat({ + adapter: openaiText(), + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Hello!' }] +}); +const text = await streamToText(stream); +console.log(text); // "Hello! How can I help you today?" +``` diff --git a/docs/reference/functions/summarize.md b/docs/reference/functions/summarize.md index 2ae9aead..3efaab3b 100644 --- a/docs/reference/functions/summarize.md +++ b/docs/reference/functions/summarize.md @@ -6,25 +6,74 @@ title: summarize # Function: summarize() ```ts -function summarize(options): Promise; +function summarize(options): SummarizeActivityResult; ``` -Defined in: [core/summarize.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/summarize.ts#L16) +Defined in: [activities/summarize/index.ts:146](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/summarize/index.ts#L146) -Standalone summarize function with type inference from adapter +Summarize activity - generates summaries from text. + +Supports both streaming and non-streaming modes. ## Type Parameters ### TAdapter -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `Record`\<`string`, readonly [`Modality`](../type-aliases/Modality.md)[]\>, [`DefaultMessageMetadataByModality`](../interfaces/DefaultMessageMetadataByModality.md)\> +`TAdapter` *extends* `SummarizeAdapter`\<`string`, `object`\> + +### TStream + +`TStream` *extends* `boolean` = `false` ## Parameters ### options -`Omit`\<[`SummarizationOptions`](../interfaces/SummarizationOptions.md), `"model"`\> & `object` +`SummarizeActivityOptions`\<`TAdapter`, `TStream`\> ## Returns -`Promise`\<[`SummarizationResult`](../interfaces/SummarizationResult.md)\> +`SummarizeActivityResult`\<`TStream`\> + +## Examples + +```ts +import { summarize } from '@tanstack/ai' +import { openaiSummarize } from '@tanstack/ai-openai' + +const result = await summarize({ + adapter: openaiSummarize('gpt-4o-mini'), + text: 'Long article text here...' +}) + +console.log(result.summary) +``` + +```ts +const result = await summarize({ + adapter: openaiSummarize('gpt-4o-mini'), + text: 'Long article text here...', + style: 'bullet-points', + maxLength: 100 +}) +``` + +```ts +const result = await summarize({ + adapter: openaiSummarize('gpt-4o-mini'), + text: 'Long technical document...', + focus: ['key findings', 'methodology'] +}) +``` + +```ts +for await (const chunk of summarize({ + adapter: openaiSummarize('gpt-4o-mini'), + text: 'Long article text here...', + stream: true +})) { + if (chunk.type === 'content') { + process.stdout.write(chunk.delta) + } +} +``` diff --git a/docs/reference/functions/toServerSentEventsStream.md b/docs/reference/functions/toServerSentEventsStream.md index 65582450..633041c7 100644 --- a/docs/reference/functions/toServerSentEventsStream.md +++ b/docs/reference/functions/toServerSentEventsStream.md @@ -9,7 +9,7 @@ title: toServerSentEventsStream function toServerSentEventsStream(stream, abortController?): ReadableStream>; ``` -Defined in: [utilities/stream-to-response.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/stream-to-response.ts#L22) +Defined in: [stream-to-response.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream-to-response.ts#L56) Convert a StreamChunk async iterable to a ReadableStream in Server-Sent Events format @@ -41,7 +41,7 @@ ReadableStream in Server-Sent Events format ## Example ```typescript -const stream = chat({ adapter: openai(), model: "gpt-4o", messages: [...] }); +const stream = chat({ adapter: openaiText(), model: "gpt-4o", messages: [...] }); const readableStream = toServerSentEventsStream(stream); // Use with Response, or any API that accepts ReadableStream ``` diff --git a/docs/reference/functions/toStreamResponse.md b/docs/reference/functions/toStreamResponse.md index 0753057c..fcb6ce73 100644 --- a/docs/reference/functions/toStreamResponse.md +++ b/docs/reference/functions/toStreamResponse.md @@ -9,7 +9,7 @@ title: toStreamResponse function toStreamResponse(stream, init?): Response; ``` -Defined in: [utilities/stream-to-response.ts:102](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/stream-to-response.ts#L102) +Defined in: [stream-to-response.ts:136](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream-to-response.ts#L136) Create a streaming HTTP response from a StreamChunk async iterable Includes proper headers for Server-Sent Events @@ -41,7 +41,7 @@ export async function POST(request: Request) { const { messages } = await request.json(); const abortController = new AbortController(); const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: "gpt-4o", messages, options: { abortSignal: abortController.signal } diff --git a/docs/reference/functions/toolDefinition.md b/docs/reference/functions/toolDefinition.md index 419cd4f4..53530f58 100644 --- a/docs/reference/functions/toolDefinition.md +++ b/docs/reference/functions/toolDefinition.md @@ -9,7 +9,7 @@ title: toolDefinition function toolDefinition(config): ToolDefinition; ``` -Defined in: [tools/tool-definition.ts:174](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L174) +Defined in: [activities/chat/tools/tool-definition.ts:179](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L179) Create an isomorphic tool definition that can be used directly or instantiated for server/client diff --git a/docs/reference/functions/uiMessageToModelMessages.md b/docs/reference/functions/uiMessageToModelMessages.md index 9b295ad6..7e35c05b 100644 --- a/docs/reference/functions/uiMessageToModelMessages.md +++ b/docs/reference/functions/uiMessageToModelMessages.md @@ -12,7 +12,7 @@ function uiMessageToModelMessages(uiMessage): ModelMessage< | null>[]; ``` -Defined in: [message-converters.ts:65](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/message-converters.ts#L65) +Defined in: [activities/chat/messages.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L62) Convert a UIMessage to ModelMessage(s) diff --git a/docs/reference/functions/untilFinishReason.md b/docs/reference/functions/untilFinishReason.md index 2522b3f0..0ad4c70d 100644 --- a/docs/reference/functions/untilFinishReason.md +++ b/docs/reference/functions/untilFinishReason.md @@ -9,7 +9,7 @@ title: untilFinishReason function untilFinishReason(stopReasons): AgentLoopStrategy; ``` -Defined in: [utilities/agent-loop-strategies.ts:41](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/agent-loop-strategies.ts#L41) +Defined in: [activities/chat/agent-loop-strategies.ts:41](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/agent-loop-strategies.ts#L41) Creates a strategy that continues until a specific finish reason is encountered @@ -31,7 +31,7 @@ AgentLoopStrategy that stops on specific finish reasons ```typescript const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: "gpt-4o", messages: [...], tools: [weatherTool], diff --git a/docs/reference/index.md b/docs/reference/index.md index a3d506c0..d8edb0ea 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -7,7 +7,6 @@ title: "@tanstack/ai" ## Classes -- [BaseAdapter](classes/BaseAdapter.md) - [BatchStrategy](classes/BatchStrategy.md) - [CompositeStrategy](classes/CompositeStrategy.md) - [ImmediateStrategy](classes/ImmediateStrategy.md) @@ -20,24 +19,22 @@ title: "@tanstack/ai" ## Interfaces - [AgentLoopState](interfaces/AgentLoopState.md) -- [AIAdapter](interfaces/AIAdapter.md) -- [AIAdapterConfig](interfaces/AIAdapterConfig.md) - [ApprovalRequestedStreamChunk](interfaces/ApprovalRequestedStreamChunk.md) - [AudioPart](interfaces/AudioPart.md) - [BaseStreamChunk](interfaces/BaseStreamChunk.md) -- [ChatCompletionChunk](interfaces/ChatCompletionChunk.md) -- [ChatOptions](interfaces/ChatOptions.md) - [ChunkRecording](interfaces/ChunkRecording.md) - [ChunkStrategy](interfaces/ChunkStrategy.md) - [ClientTool](interfaces/ClientTool.md) +- [CommonOptions](interfaces/CommonOptions.md) - [ContentPartSource](interfaces/ContentPartSource.md) - [ContentStreamChunk](interfaces/ContentStreamChunk.md) - [DefaultMessageMetadataByModality](interfaces/DefaultMessageMetadataByModality.md) - [DocumentPart](interfaces/DocumentPart.md) - [DoneStreamChunk](interfaces/DoneStreamChunk.md) -- [EmbeddingOptions](interfaces/EmbeddingOptions.md) -- [EmbeddingResult](interfaces/EmbeddingResult.md) - [ErrorStreamChunk](interfaces/ErrorStreamChunk.md) +- [GeneratedImage](interfaces/GeneratedImage.md) +- [ImageGenerationOptions](interfaces/ImageGenerationOptions.md) +- [ImageGenerationResult](interfaces/ImageGenerationResult.md) - [ImagePart](interfaces/ImagePart.md) - [InternalToolCallState](interfaces/InternalToolCallState.md) - [JSONParser](interfaces/JSONParser.md) @@ -52,6 +49,8 @@ title: "@tanstack/ai" - [StreamProcessorOptions](interfaces/StreamProcessorOptions.md) - [SummarizationOptions](interfaces/SummarizationOptions.md) - [SummarizationResult](interfaces/SummarizationResult.md) +- [TextCompletionChunk](interfaces/TextCompletionChunk.md) +- [TextOptions](interfaces/TextOptions.md) - [TextPart](interfaces/TextPart.md) - [ThinkingPart](interfaces/ThinkingPart.md) - [ThinkingStreamChunk](interfaces/ThinkingStreamChunk.md) @@ -66,25 +65,33 @@ title: "@tanstack/ai" - [ToolInputAvailableStreamChunk](interfaces/ToolInputAvailableStreamChunk.md) - [ToolResultPart](interfaces/ToolResultPart.md) - [ToolResultStreamChunk](interfaces/ToolResultStreamChunk.md) +- [TranscriptionOptions](interfaces/TranscriptionOptions.md) +- [TranscriptionResult](interfaces/TranscriptionResult.md) +- [TranscriptionSegment](interfaces/TranscriptionSegment.md) +- [TranscriptionWord](interfaces/TranscriptionWord.md) +- [TTSOptions](interfaces/TTSOptions.md) +- [TTSResult](interfaces/TTSResult.md) - [UIMessage](interfaces/UIMessage.md) +- [VideoGenerationOptions](interfaces/VideoGenerationOptions.md) +- [VideoJobResult](interfaces/VideoJobResult.md) - [VideoPart](interfaces/VideoPart.md) +- [VideoStatusResult](interfaces/VideoStatusResult.md) +- [VideoUrlResult](interfaces/VideoUrlResult.md) ## Type Aliases - [AgentLoopStrategy](type-aliases/AgentLoopStrategy.md) +- [AIAdapter](type-aliases/AIAdapter.md) - [AnyClientTool](type-aliases/AnyClientTool.md) -- [ChatStreamOptionsForModel](type-aliases/ChatStreamOptionsForModel.md) -- [ChatStreamOptionsUnion](type-aliases/ChatStreamOptionsUnion.md) - [ConstrainedContent](type-aliases/ConstrainedContent.md) - [ConstrainedModelMessage](type-aliases/ConstrainedModelMessage.md) - [ContentPart](type-aliases/ContentPart.md) -- [ContentPartForModalities](type-aliases/ContentPartForModalities.md) -- [ExtractModalitiesForModel](type-aliases/ExtractModalitiesForModel.md) -- [ExtractModelsFromAdapter](type-aliases/ExtractModelsFromAdapter.md) +- [ContentPartForInputModalitiesTypes](type-aliases/ContentPartForInputModalitiesTypes.md) - [InferSchemaType](type-aliases/InferSchemaType.md) - [InferToolInput](type-aliases/InferToolInput.md) - [InferToolName](type-aliases/InferToolName.md) - [InferToolOutput](type-aliases/InferToolOutput.md) +- [InputModalitiesTypes](type-aliases/InputModalitiesTypes.md) - [MessagePart](type-aliases/MessagePart.md) - [ModalitiesArrayToUnion](type-aliases/ModalitiesArrayToUnion.md) - [Modality](type-aliases/Modality.md) @@ -102,19 +109,28 @@ title: "@tanstack/ai" ## Functions - [chat](functions/chat.md) -- [chatOptions](functions/chatOptions.md) - [combineStrategies](functions/combineStrategies.md) - [convertMessagesToModelMessages](functions/convertMessagesToModelMessages.md) - [convertZodToJsonSchema](functions/convertZodToJsonSchema.md) +- [createChatOptions](functions/createChatOptions.md) +- [createImageOptions](functions/createImageOptions.md) - [createReplayStream](functions/createReplayStream.md) -- [embedding](functions/embedding.md) +- [createSpeechOptions](functions/createSpeechOptions.md) +- [createSummarizeOptions](functions/createSummarizeOptions.md) +- [createTranscriptionOptions](functions/createTranscriptionOptions.md) +- [createVideoOptions](functions/createVideoOptions.md) +- [generateImage](functions/generateImage.md) - [generateMessageId](functions/generateMessageId.md) +- [generateSpeech](functions/generateSpeech.md) +- [generateTranscription](functions/generateTranscription.md) +- [generateVideo](functions/generateVideo.md) +- [getVideoJobStatus](functions/getVideoJobStatus.md) - [maxIterations](functions/maxIterations.md) -- [messages](functions/messages.md) - [modelMessagesToUIMessages](functions/modelMessagesToUIMessages.md) - [modelMessageToUIMessage](functions/modelMessageToUIMessage.md) - [normalizeToUIMessage](functions/normalizeToUIMessage.md) - [parsePartialJSON](functions/parsePartialJSON.md) +- [streamToText](functions/streamToText.md) - [summarize](functions/summarize.md) - [toolDefinition](functions/toolDefinition.md) - [toServerSentEventsStream](functions/toServerSentEventsStream.md) diff --git a/docs/reference/interfaces/AIAdapter.md b/docs/reference/interfaces/AIAdapter.md deleted file mode 100644 index 2ad47311..00000000 --- a/docs/reference/interfaces/AIAdapter.md +++ /dev/null @@ -1,214 +0,0 @@ ---- -id: AIAdapter -title: AIAdapter ---- - -# Interface: AIAdapter\ - -Defined in: [types.ts:756](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L756) - -AI adapter interface with support for endpoint-specific models and provider options. - -Generic parameters: -- TChatModels: Models that support chat/text completion -- TEmbeddingModels: Models that support embeddings -- TChatProviderOptions: Provider-specific options for chat endpoint -- TEmbeddingProviderOptions: Provider-specific options for embedding endpoint -- TModelProviderOptionsByName: Map from model name to its specific provider options -- TModelInputModalitiesByName: Map from model name to its supported input modalities -- TMessageMetadataByModality: Map from modality type to adapter-specific metadata types - -## Type Parameters - -### TChatModels - -`TChatModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\> - -### TEmbeddingModels - -`TEmbeddingModels` *extends* `ReadonlyArray`\<`string`\> = `ReadonlyArray`\<`string`\> - -### TChatProviderOptions - -`TChatProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TEmbeddingProviderOptions - -`TEmbeddingProviderOptions` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TModelProviderOptionsByName - -`TModelProviderOptionsByName` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TModelInputModalitiesByName - -`TModelInputModalitiesByName` *extends* `Record`\<`string`, `ReadonlyArray`\<[`Modality`](../type-aliases/Modality.md)\>\> = `Record`\<`string`, `ReadonlyArray`\<[`Modality`](../type-aliases/Modality.md)\>\> - -### TMessageMetadataByModality - -`TMessageMetadataByModality` *extends* `object` = [`DefaultMessageMetadataByModality`](DefaultMessageMetadataByModality.md) - -## Properties - -### \_chatProviderOptions? - -```ts -optional _chatProviderOptions: TChatProviderOptions; -``` - -Defined in: [types.ts:783](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L783) - -*** - -### \_embeddingProviderOptions? - -```ts -optional _embeddingProviderOptions: TEmbeddingProviderOptions; -``` - -Defined in: [types.ts:784](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L784) - -*** - -### \_messageMetadataByModality? - -```ts -optional _messageMetadataByModality: TMessageMetadataByModality; -``` - -Defined in: [types.ts:801](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L801) - -Type-only map from modality type to adapter-specific metadata types. -Used to provide type-safe autocomplete for metadata on content parts. - -*** - -### \_modelInputModalitiesByName? - -```ts -optional _modelInputModalitiesByName: TModelInputModalitiesByName; -``` - -Defined in: [types.ts:796](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L796) - -Type-only map from model name to its supported input modalities. -Used by the core AI types to narrow ContentPart types based on the selected model. -Must be provided by all adapters. - -*** - -### \_modelProviderOptionsByName - -```ts -_modelProviderOptionsByName: TModelProviderOptionsByName; -``` - -Defined in: [types.ts:790](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L790) - -Type-only map from model name to its specific provider options. -Used by the core AI types to narrow providerOptions based on the selected model. -Must be provided by all adapters. - -*** - -### \_providerOptions? - -```ts -optional _providerOptions: TChatProviderOptions; -``` - -Defined in: [types.ts:782](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L782) - -*** - -### chatStream() - -```ts -chatStream: (options) => AsyncIterable; -``` - -Defined in: [types.ts:804](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L804) - -#### Parameters - -##### options - -[`ChatOptions`](ChatOptions.md)\<`string`, `TChatProviderOptions`\> - -#### Returns - -`AsyncIterable`\<[`StreamChunk`](../type-aliases/StreamChunk.md)\> - -*** - -### createEmbeddings() - -```ts -createEmbeddings: (options) => Promise; -``` - -Defined in: [types.ts:812](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L812) - -#### Parameters - -##### options - -[`EmbeddingOptions`](EmbeddingOptions.md) - -#### Returns - -`Promise`\<[`EmbeddingResult`](EmbeddingResult.md)\> - -*** - -### embeddingModels? - -```ts -optional embeddingModels: TEmbeddingModels; -``` - -Defined in: [types.ts:779](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L779) - -Models that support embeddings - -*** - -### models - -```ts -models: TChatModels; -``` - -Defined in: [types.ts:776](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L776) - -Models that support chat/text completion - -*** - -### name - -```ts -name: string; -``` - -Defined in: [types.ts:774](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L774) - -*** - -### summarize() - -```ts -summarize: (options) => Promise; -``` - -Defined in: [types.ts:809](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L809) - -#### Parameters - -##### options - -[`SummarizationOptions`](SummarizationOptions.md) - -#### Returns - -`Promise`\<[`SummarizationResult`](SummarizationResult.md)\> diff --git a/docs/reference/interfaces/AIAdapterConfig.md b/docs/reference/interfaces/AIAdapterConfig.md deleted file mode 100644 index 76abb781..00000000 --- a/docs/reference/interfaces/AIAdapterConfig.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -id: AIAdapterConfig -title: AIAdapterConfig ---- - -# Interface: AIAdapterConfig - -Defined in: [types.ts:815](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L815) - -## Properties - -### apiKey? - -```ts -optional apiKey: string; -``` - -Defined in: [types.ts:816](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L816) - -*** - -### baseUrl? - -```ts -optional baseUrl: string; -``` - -Defined in: [types.ts:817](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L817) - -*** - -### headers? - -```ts -optional headers: Record; -``` - -Defined in: [types.ts:820](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L820) - -*** - -### maxRetries? - -```ts -optional maxRetries: number; -``` - -Defined in: [types.ts:819](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L819) - -*** - -### timeout? - -```ts -optional timeout: number; -``` - -Defined in: [types.ts:818](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L818) diff --git a/docs/reference/interfaces/AgentLoopState.md b/docs/reference/interfaces/AgentLoopState.md index dff4f90b..11245920 100644 --- a/docs/reference/interfaces/AgentLoopState.md +++ b/docs/reference/interfaces/AgentLoopState.md @@ -5,7 +5,7 @@ title: AgentLoopState # Interface: AgentLoopState -Defined in: [types.ts:522](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L522) +Defined in: [types.ts:579](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L579) State passed to agent loop strategy for determining whether to continue @@ -17,7 +17,7 @@ State passed to agent loop strategy for determining whether to continue finishReason: string | null; ``` -Defined in: [types.ts:528](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L528) +Defined in: [types.ts:585](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L585) Finish reason from the last response @@ -29,7 +29,7 @@ Finish reason from the last response iterationCount: number; ``` -Defined in: [types.ts:524](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L524) +Defined in: [types.ts:581](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L581) Current iteration count (0-indexed) @@ -44,6 +44,6 @@ messages: ModelMessage< | null>[]; ``` -Defined in: [types.ts:526](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L526) +Defined in: [types.ts:583](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L583) Current messages array diff --git a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md index ab57b5dd..1cd6ec0c 100644 --- a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md +++ b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md @@ -5,7 +5,7 @@ title: ApprovalRequestedStreamChunk # Interface: ApprovalRequestedStreamChunk -Defined in: [types.ts:645](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L645) +Defined in: [types.ts:708](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L708) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:645](https://github.com/TanStack/ai/blob/main/packages/typ approval: object; ``` -Defined in: [types.ts:650](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L650) +Defined in: [types.ts:713](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L713) #### id @@ -41,7 +41,7 @@ needsApproval: true; id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -55,7 +55,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ input: any; ``` -Defined in: [types.ts:649](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L649) +Defined in: [types.ts:712](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L712) *** @@ -65,7 +65,7 @@ Defined in: [types.ts:649](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -79,7 +79,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -93,7 +93,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:647](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L647) +Defined in: [types.ts:710](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L710) *** @@ -103,7 +103,7 @@ Defined in: [types.ts:647](https://github.com/TanStack/ai/blob/main/packages/typ toolName: string; ``` -Defined in: [types.ts:648](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L648) +Defined in: [types.ts:711](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L711) *** @@ -113,7 +113,7 @@ Defined in: [types.ts:648](https://github.com/TanStack/ai/blob/main/packages/typ type: "approval-requested"; ``` -Defined in: [types.ts:646](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L646) +Defined in: [types.ts:709](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L709) #### Overrides diff --git a/docs/reference/interfaces/AudioPart.md b/docs/reference/interfaces/AudioPart.md index 836e16d1..1038e5d8 100644 --- a/docs/reference/interfaces/AudioPart.md +++ b/docs/reference/interfaces/AudioPart.md @@ -5,7 +5,7 @@ title: AudioPart # Interface: AudioPart\ -Defined in: [types.ts:120](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L120) +Defined in: [types.ts:196](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L196) Audio content part for multimodal messages. @@ -25,7 +25,7 @@ Provider-specific metadata type optional metadata: TMetadata; ``` -Defined in: [types.ts:125](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L125) +Defined in: [types.ts:201](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L201) Provider-specific metadata (e.g., format, sample rate) @@ -37,7 +37,7 @@ Provider-specific metadata (e.g., format, sample rate) source: ContentPartSource; ``` -Defined in: [types.ts:123](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L123) +Defined in: [types.ts:199](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L199) Source of the audio content @@ -49,4 +49,4 @@ Source of the audio content type: "audio"; ``` -Defined in: [types.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L121) +Defined in: [types.ts:197](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L197) diff --git a/docs/reference/interfaces/BaseStreamChunk.md b/docs/reference/interfaces/BaseStreamChunk.md index 81481d16..3b00b198 100644 --- a/docs/reference/interfaces/BaseStreamChunk.md +++ b/docs/reference/interfaces/BaseStreamChunk.md @@ -5,7 +5,7 @@ title: BaseStreamChunk # Interface: BaseStreamChunk -Defined in: [types.ts:594](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L594) +Defined in: [types.ts:657](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L657) ## Extended by @@ -26,7 +26,7 @@ Defined in: [types.ts:594](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) *** @@ -36,7 +36,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) *** @@ -46,7 +46,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) *** @@ -56,4 +56,4 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ type: StreamChunkType; ``` -Defined in: [types.ts:595](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L595) +Defined in: [types.ts:658](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L658) diff --git a/docs/reference/interfaces/ChatCompletionChunk.md b/docs/reference/interfaces/ChatCompletionChunk.md deleted file mode 100644 index 78235a12..00000000 --- a/docs/reference/interfaces/ChatCompletionChunk.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -id: ChatCompletionChunk -title: ChatCompletionChunk ---- - -# Interface: ChatCompletionChunk - -Defined in: [types.ts:684](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L684) - -## Properties - -### content - -```ts -content: string; -``` - -Defined in: [types.ts:687](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L687) - -*** - -### finishReason? - -```ts -optional finishReason: "length" | "stop" | "content_filter" | null; -``` - -Defined in: [types.ts:689](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L689) - -*** - -### id - -```ts -id: string; -``` - -Defined in: [types.ts:685](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L685) - -*** - -### model - -```ts -model: string; -``` - -Defined in: [types.ts:686](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L686) - -*** - -### role? - -```ts -optional role: "assistant"; -``` - -Defined in: [types.ts:688](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L688) - -*** - -### usage? - -```ts -optional usage: object; -``` - -Defined in: [types.ts:690](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L690) - -#### completionTokens - -```ts -completionTokens: number; -``` - -#### promptTokens - -```ts -promptTokens: number; -``` - -#### totalTokens - -```ts -totalTokens: number; -``` diff --git a/docs/reference/interfaces/ChatOptions.md b/docs/reference/interfaces/ChatOptions.md deleted file mode 100644 index 723a13d8..00000000 --- a/docs/reference/interfaces/ChatOptions.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -id: ChatOptions -title: ChatOptions ---- - -# Interface: ChatOptions\ - -Defined in: [types.ts:548](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L548) - -Options passed into the SDK and further piped to the AI provider. - -## Type Parameters - -### TModel - -`TModel` *extends* `string` = `string` - -### TProviderOptionsSuperset - -`TProviderOptionsSuperset` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> - -### TOutput - -`TOutput` *extends* [`ResponseFormat`](ResponseFormat.md)\<`any`\> \| `undefined` = `undefined` - -### TProviderOptionsForModel - -`TProviderOptionsForModel` = `TProviderOptionsSuperset` - -## Properties - -### abortController? - -```ts -optional abortController: AbortController; -``` - -Defined in: [types.ts:581](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L581) - -AbortController for request cancellation. - -Allows you to cancel an in-progress request using an AbortController. -Useful for implementing timeouts or user-initiated cancellations. - -#### Example - -```ts -const abortController = new AbortController(); -setTimeout(() => abortController.abort(), 5000); // Cancel after 5 seconds -await chat({ ..., abortController }); -``` - -#### See - -https://developer.mozilla.org/en-US/docs/Web/API/AbortController - -*** - -### agentLoopStrategy? - -```ts -optional agentLoopStrategy: AgentLoopStrategy; -``` - -Defined in: [types.ts:558](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L558) - -*** - -### conversationId? - -```ts -optional conversationId: string; -``` - -Defined in: [types.ts:567](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L567) - -Conversation ID for correlating client and server-side devtools events. -When provided, server-side events will be linked to the client conversation in devtools. - -*** - -### messages - -```ts -messages: ModelMessage< - | string - | ContentPart[] - | null>[]; -``` - -Defined in: [types.ts:555](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L555) - -*** - -### model - -```ts -model: TModel; -``` - -Defined in: [types.ts:554](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L554) - -*** - -### options? - -```ts -optional options: CommonOptions; -``` - -Defined in: [types.ts:559](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L559) - -*** - -### output? - -```ts -optional output: TOutput; -``` - -Defined in: [types.ts:562](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L562) - -*** - -### providerOptions? - -```ts -optional providerOptions: TProviderOptionsForModel; -``` - -Defined in: [types.ts:560](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L560) - -*** - -### request? - -```ts -optional request: Request | RequestInit; -``` - -Defined in: [types.ts:561](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L561) - -*** - -### systemPrompts? - -```ts -optional systemPrompts: string[]; -``` - -Defined in: [types.ts:557](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L557) - -*** - -### tools? - -```ts -optional tools: Tool[]; -``` - -Defined in: [types.ts:556](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L556) diff --git a/docs/reference/interfaces/ChunkRecording.md b/docs/reference/interfaces/ChunkRecording.md index 3833f041..984fa148 100644 --- a/docs/reference/interfaces/ChunkRecording.md +++ b/docs/reference/interfaces/ChunkRecording.md @@ -5,7 +5,7 @@ title: ChunkRecording # Interface: ChunkRecording -Defined in: [stream/types.ts:83](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L83) +Defined in: [activities/chat/stream/types.ts:73](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L73) Recording format for replay testing @@ -17,7 +17,7 @@ Recording format for replay testing chunks: object[]; ``` -Defined in: [stream/types.ts:88](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L88) +Defined in: [activities/chat/stream/types.ts:78](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L78) #### chunk @@ -45,7 +45,7 @@ timestamp: number; optional model: string; ``` -Defined in: [stream/types.ts:86](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L86) +Defined in: [activities/chat/stream/types.ts:76](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L76) *** @@ -55,7 +55,7 @@ Defined in: [stream/types.ts:86](https://github.com/TanStack/ai/blob/main/packag optional provider: string; ``` -Defined in: [stream/types.ts:87](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L87) +Defined in: [activities/chat/stream/types.ts:77](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L77) *** @@ -65,7 +65,7 @@ Defined in: [stream/types.ts:87](https://github.com/TanStack/ai/blob/main/packag optional result: ProcessorResult; ``` -Defined in: [stream/types.ts:93](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L93) +Defined in: [activities/chat/stream/types.ts:83](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L83) *** @@ -75,7 +75,7 @@ Defined in: [stream/types.ts:93](https://github.com/TanStack/ai/blob/main/packag timestamp: number; ``` -Defined in: [stream/types.ts:85](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L85) +Defined in: [activities/chat/stream/types.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L75) *** @@ -85,4 +85,4 @@ Defined in: [stream/types.ts:85](https://github.com/TanStack/ai/blob/main/packag version: "1.0"; ``` -Defined in: [stream/types.ts:84](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L84) +Defined in: [activities/chat/stream/types.ts:74](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L74) diff --git a/docs/reference/interfaces/ChunkStrategy.md b/docs/reference/interfaces/ChunkStrategy.md index c9b06168..a6652233 100644 --- a/docs/reference/interfaces/ChunkStrategy.md +++ b/docs/reference/interfaces/ChunkStrategy.md @@ -5,7 +5,7 @@ title: ChunkStrategy # Interface: ChunkStrategy -Defined in: [stream/types.ts:43](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L43) +Defined in: [activities/chat/stream/types.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L33) Strategy for determining when to emit text updates @@ -17,7 +17,7 @@ Strategy for determining when to emit text updates optional reset: () => void; ``` -Defined in: [stream/types.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L55) +Defined in: [activities/chat/stream/types.ts:45](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L45) Optional: Reset strategy state (called when streaming starts) @@ -33,7 +33,7 @@ Optional: Reset strategy state (called when streaming starts) shouldEmit: (chunk, accumulated) => boolean; ``` -Defined in: [stream/types.ts:50](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L50) +Defined in: [activities/chat/stream/types.ts:40](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L40) Called for each text chunk received diff --git a/docs/reference/interfaces/ClientTool.md b/docs/reference/interfaces/ClientTool.md index 9dc73d7c..71a7de65 100644 --- a/docs/reference/interfaces/ClientTool.md +++ b/docs/reference/interfaces/ClientTool.md @@ -5,7 +5,7 @@ title: ClientTool # Interface: ClientTool\ -Defined in: [tools/tool-definition.ts:18](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L18) +Defined in: [activities/chat/tools/tool-definition.ts:23](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L23) Marker type for client-side tools @@ -31,7 +31,7 @@ Marker type for client-side tools __toolSide: "client"; ``` -Defined in: [tools/tool-definition.ts:23](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L23) +Defined in: [activities/chat/tools/tool-definition.ts:28](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L28) *** @@ -41,7 +41,7 @@ Defined in: [tools/tool-definition.ts:23](https://github.com/TanStack/ai/blob/ma description: string; ``` -Defined in: [tools/tool-definition.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L25) +Defined in: [activities/chat/tools/tool-definition.ts:30](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L30) *** @@ -53,7 +53,7 @@ optional execute: (args) => | Promise>; ``` -Defined in: [tools/tool-definition.ts:30](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L30) +Defined in: [activities/chat/tools/tool-definition.ts:35](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L35) #### Parameters @@ -74,7 +74,7 @@ Defined in: [tools/tool-definition.ts:30](https://github.com/TanStack/ai/blob/ma optional inputSchema: TInput; ``` -Defined in: [tools/tool-definition.ts:26](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L26) +Defined in: [activities/chat/tools/tool-definition.ts:31](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L31) *** @@ -84,7 +84,7 @@ Defined in: [tools/tool-definition.ts:26](https://github.com/TanStack/ai/blob/ma optional metadata: Record; ``` -Defined in: [tools/tool-definition.ts:29](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L29) +Defined in: [activities/chat/tools/tool-definition.ts:34](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L34) *** @@ -94,7 +94,7 @@ Defined in: [tools/tool-definition.ts:29](https://github.com/TanStack/ai/blob/ma name: TName; ``` -Defined in: [tools/tool-definition.ts:24](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L24) +Defined in: [activities/chat/tools/tool-definition.ts:29](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L29) *** @@ -104,7 +104,7 @@ Defined in: [tools/tool-definition.ts:24](https://github.com/TanStack/ai/blob/ma optional needsApproval: boolean; ``` -Defined in: [tools/tool-definition.ts:28](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L28) +Defined in: [activities/chat/tools/tool-definition.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L33) *** @@ -114,4 +114,4 @@ Defined in: [tools/tool-definition.ts:28](https://github.com/TanStack/ai/blob/ma optional outputSchema: TOutput; ``` -Defined in: [tools/tool-definition.ts:27](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L27) +Defined in: [activities/chat/tools/tool-definition.ts:32](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L32) diff --git a/docs/reference/interfaces/CommonOptions.md b/docs/reference/interfaces/CommonOptions.md new file mode 100644 index 00000000..9241c1d2 --- /dev/null +++ b/docs/reference/interfaces/CommonOptions.md @@ -0,0 +1,89 @@ +--- +id: CommonOptions +title: CommonOptions +--- + +# Interface: CommonOptions + +Defined in: [types.ts:29](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L29) + +Common options shared across different AI provider implementations. +These options represent the standard parameters that work across OpenAI, Anthropic, and Gemini. + +## Properties + +### maxTokens? + +```ts +optional maxTokens: number; +``` + +Defined in: [types.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L66) + +The maximum number of tokens to generate in the response. + +Provider usage: +- OpenAI: `max_output_tokens` (number) - includes visible output and reasoning tokens +- Anthropic: `max_tokens` (number, required) - range x >= 1 +- Gemini: `generationConfig.maxOutputTokens` (number) + +*** + +### metadata? + +```ts +optional metadata: Record; +``` + +Defined in: [types.ts:78](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L78) + +Additional metadata to attach to the request. +Can be used for tracking, debugging, or passing custom information. +Structure and constraints vary by provider. + +Provider usage: +- OpenAI: `metadata` (Record) - max 16 key-value pairs, keys max 64 chars, values max 512 chars +- Anthropic: `metadata` (Record) - includes optional user_id (max 256 chars) +- Gemini: Not directly available in TextProviderOptions + +*** + +### temperature? + +```ts +optional temperature: number; +``` + +Defined in: [types.ts:42](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L42) + +Controls the randomness of the output. +Higher values (e.g., 0.8) make output more random, lower values (e.g., 0.2) make it more focused and deterministic. +Range: [0.0, 2.0] + +Note: Generally recommended to use either temperature or topP, but not both. + +Provider usage: +- OpenAI: `temperature` (number) - in text.top_p field +- Anthropic: `temperature` (number) - ranges from 0.0 to 1.0, default 1.0 +- Gemini: `generationConfig.temperature` (number) - ranges from 0.0 to 2.0 + +*** + +### topP? + +```ts +optional topP: number; +``` + +Defined in: [types.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L56) + +Nucleus sampling parameter. An alternative to temperature sampling. +The model considers the results of tokens with topP probability mass. +For example, 0.1 means only tokens comprising the top 10% probability mass are considered. + +Note: Generally recommended to use either temperature or topP, but not both. + +Provider usage: +- OpenAI: `text.top_p` (number) +- Anthropic: `top_p` (number | null) +- Gemini: `generationConfig.topP` (number) diff --git a/docs/reference/interfaces/ContentPartSource.md b/docs/reference/interfaces/ContentPartSource.md index 45dea6a6..eb9e5957 100644 --- a/docs/reference/interfaces/ContentPartSource.md +++ b/docs/reference/interfaces/ContentPartSource.md @@ -5,7 +5,7 @@ title: ContentPartSource # Interface: ContentPartSource -Defined in: [types.ts:89](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L89) +Defined in: [types.ts:165](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L165) Source specification for multimodal content. Supports both inline data (base64) and URL-based content. @@ -18,7 +18,7 @@ Supports both inline data (base64) and URL-based content. type: "data" | "url"; ``` -Defined in: [types.ts:95](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L95) +Defined in: [types.ts:171](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L171) The type of source: - 'data': Inline data (typically base64 encoded) @@ -32,7 +32,7 @@ The type of source: value: string; ``` -Defined in: [types.ts:101](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L101) +Defined in: [types.ts:177](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L177) The actual content value: - For 'data': base64-encoded string diff --git a/docs/reference/interfaces/ContentStreamChunk.md b/docs/reference/interfaces/ContentStreamChunk.md index e03782a4..3071e638 100644 --- a/docs/reference/interfaces/ContentStreamChunk.md +++ b/docs/reference/interfaces/ContentStreamChunk.md @@ -5,7 +5,7 @@ title: ContentStreamChunk # Interface: ContentStreamChunk -Defined in: [types.ts:601](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L601) +Defined in: [types.ts:664](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L664) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:601](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:604](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L604) +Defined in: [types.ts:667](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L667) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:604](https://github.com/TanStack/ai/blob/main/packages/typ delta: string; ``` -Defined in: [types.ts:603](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L603) +Defined in: [types.ts:666](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L666) *** @@ -39,7 +39,7 @@ Defined in: [types.ts:603](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -53,7 +53,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -67,7 +67,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ optional role: "assistant"; ``` -Defined in: [types.ts:605](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L605) +Defined in: [types.ts:668](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L668) *** @@ -77,7 +77,7 @@ Defined in: [types.ts:605](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -91,7 +91,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ type: "content"; ``` -Defined in: [types.ts:602](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L602) +Defined in: [types.ts:665](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L665) #### Overrides diff --git a/docs/reference/interfaces/DefaultMessageMetadataByModality.md b/docs/reference/interfaces/DefaultMessageMetadataByModality.md index d13b5888..ec33a6ef 100644 --- a/docs/reference/interfaces/DefaultMessageMetadataByModality.md +++ b/docs/reference/interfaces/DefaultMessageMetadataByModality.md @@ -5,7 +5,7 @@ title: DefaultMessageMetadataByModality # Interface: DefaultMessageMetadataByModality -Defined in: [types.ts:736](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L736) +Defined in: [types.ts:1019](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1019) Default metadata type for adapters that don't define custom metadata. Uses unknown for all modalities. @@ -18,7 +18,7 @@ Uses unknown for all modalities. audio: unknown; ``` -Defined in: [types.ts:739](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L739) +Defined in: [types.ts:1022](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1022) *** @@ -28,7 +28,7 @@ Defined in: [types.ts:739](https://github.com/TanStack/ai/blob/main/packages/typ document: unknown; ``` -Defined in: [types.ts:741](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L741) +Defined in: [types.ts:1024](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1024) *** @@ -38,7 +38,7 @@ Defined in: [types.ts:741](https://github.com/TanStack/ai/blob/main/packages/typ image: unknown; ``` -Defined in: [types.ts:738](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L738) +Defined in: [types.ts:1021](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1021) *** @@ -48,7 +48,7 @@ Defined in: [types.ts:738](https://github.com/TanStack/ai/blob/main/packages/typ text: unknown; ``` -Defined in: [types.ts:737](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L737) +Defined in: [types.ts:1020](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1020) *** @@ -58,4 +58,4 @@ Defined in: [types.ts:737](https://github.com/TanStack/ai/blob/main/packages/typ video: unknown; ``` -Defined in: [types.ts:740](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L740) +Defined in: [types.ts:1023](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1023) diff --git a/docs/reference/interfaces/DocumentPart.md b/docs/reference/interfaces/DocumentPart.md index f29a80b7..ae1f10d0 100644 --- a/docs/reference/interfaces/DocumentPart.md +++ b/docs/reference/interfaces/DocumentPart.md @@ -5,7 +5,7 @@ title: DocumentPart # Interface: DocumentPart\ -Defined in: [types.ts:144](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L144) +Defined in: [types.ts:220](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L220) Document content part for multimodal messages (e.g., PDFs). @@ -25,7 +25,7 @@ Provider-specific metadata type (e.g., Anthropic's media_type) optional metadata: TMetadata; ``` -Defined in: [types.ts:149](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L149) +Defined in: [types.ts:225](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L225) Provider-specific metadata (e.g., media_type for PDFs) @@ -37,7 +37,7 @@ Provider-specific metadata (e.g., media_type for PDFs) source: ContentPartSource; ``` -Defined in: [types.ts:147](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L147) +Defined in: [types.ts:223](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L223) Source of the document content @@ -49,4 +49,4 @@ Source of the document content type: "document"; ``` -Defined in: [types.ts:145](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L145) +Defined in: [types.ts:221](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L221) diff --git a/docs/reference/interfaces/DoneStreamChunk.md b/docs/reference/interfaces/DoneStreamChunk.md index a62e11e8..99c53e29 100644 --- a/docs/reference/interfaces/DoneStreamChunk.md +++ b/docs/reference/interfaces/DoneStreamChunk.md @@ -5,7 +5,7 @@ title: DoneStreamChunk # Interface: DoneStreamChunk -Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627) +Defined in: [types.ts:690](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L690) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typ finishReason: "length" | "stop" | "content_filter" | "tool_calls" | null; ``` -Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629) +Defined in: [types.ts:692](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L692) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -43,7 +43,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ type: "done"; ``` -Defined in: [types.ts:628](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L628) +Defined in: [types.ts:691](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L691) #### Overrides @@ -85,7 +85,7 @@ Defined in: [types.ts:628](https://github.com/TanStack/ai/blob/main/packages/typ optional usage: object; ``` -Defined in: [types.ts:630](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L630) +Defined in: [types.ts:693](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L693) #### completionTokens diff --git a/docs/reference/interfaces/EmbeddingOptions.md b/docs/reference/interfaces/EmbeddingOptions.md deleted file mode 100644 index 471035f6..00000000 --- a/docs/reference/interfaces/EmbeddingOptions.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: EmbeddingOptions -title: EmbeddingOptions ---- - -# Interface: EmbeddingOptions - -Defined in: [types.ts:716](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L716) - -## Properties - -### dimensions? - -```ts -optional dimensions: number; -``` - -Defined in: [types.ts:719](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L719) - -*** - -### input - -```ts -input: string | string[]; -``` - -Defined in: [types.ts:718](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L718) - -*** - -### model - -```ts -model: string; -``` - -Defined in: [types.ts:717](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L717) diff --git a/docs/reference/interfaces/EmbeddingResult.md b/docs/reference/interfaces/EmbeddingResult.md deleted file mode 100644 index 39e385e4..00000000 --- a/docs/reference/interfaces/EmbeddingResult.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: EmbeddingResult -title: EmbeddingResult ---- - -# Interface: EmbeddingResult - -Defined in: [types.ts:722](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L722) - -## Properties - -### embeddings - -```ts -embeddings: number[][]; -``` - -Defined in: [types.ts:725](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L725) - -*** - -### id - -```ts -id: string; -``` - -Defined in: [types.ts:723](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L723) - -*** - -### model - -```ts -model: string; -``` - -Defined in: [types.ts:724](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L724) - -*** - -### usage - -```ts -usage: object; -``` - -Defined in: [types.ts:726](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L726) - -#### promptTokens - -```ts -promptTokens: number; -``` - -#### totalTokens - -```ts -totalTokens: number; -``` diff --git a/docs/reference/interfaces/ErrorStreamChunk.md b/docs/reference/interfaces/ErrorStreamChunk.md index 57cb491d..a55bf1d6 100644 --- a/docs/reference/interfaces/ErrorStreamChunk.md +++ b/docs/reference/interfaces/ErrorStreamChunk.md @@ -5,7 +5,7 @@ title: ErrorStreamChunk # Interface: ErrorStreamChunk -Defined in: [types.ts:637](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L637) +Defined in: [types.ts:700](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L700) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:637](https://github.com/TanStack/ai/blob/main/packages/typ error: object; ``` -Defined in: [types.ts:639](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L639) +Defined in: [types.ts:702](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L702) #### code? @@ -41,7 +41,7 @@ message: string; id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -55,7 +55,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -69,7 +69,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -83,7 +83,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ type: "error"; ``` -Defined in: [types.ts:638](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L638) +Defined in: [types.ts:701](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L701) #### Overrides diff --git a/docs/reference/interfaces/GeneratedImage.md b/docs/reference/interfaces/GeneratedImage.md new file mode 100644 index 00000000..978b8e02 --- /dev/null +++ b/docs/reference/interfaces/GeneratedImage.md @@ -0,0 +1,46 @@ +--- +id: GeneratedImage +title: GeneratedImage +--- + +# Interface: GeneratedImage + +Defined in: [types.ts:805](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L805) + +A single generated image + +## Properties + +### b64Json? + +```ts +optional b64Json: string; +``` + +Defined in: [types.ts:807](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L807) + +Base64-encoded image data + +*** + +### revisedPrompt? + +```ts +optional revisedPrompt: string; +``` + +Defined in: [types.ts:811](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L811) + +Revised prompt used by the model (if applicable) + +*** + +### url? + +```ts +optional url: string; +``` + +Defined in: [types.ts:809](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L809) + +URL to the generated image (may be temporary) diff --git a/docs/reference/interfaces/ImageGenerationOptions.md b/docs/reference/interfaces/ImageGenerationOptions.md new file mode 100644 index 00000000..bcb8bec3 --- /dev/null +++ b/docs/reference/interfaces/ImageGenerationOptions.md @@ -0,0 +1,77 @@ +--- +id: ImageGenerationOptions +title: ImageGenerationOptions +--- + +# Interface: ImageGenerationOptions\ + +Defined in: [types.ts:787](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L787) + +Options for image generation. +These are the common options supported across providers. + +## Type Parameters + +### TProviderOptions + +`TProviderOptions` *extends* `object` = `object` + +## Properties + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:791](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L791) + +The model to use for image generation + +*** + +### modelOptions? + +```ts +optional modelOptions: TProviderOptions; +``` + +Defined in: [types.ts:799](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L799) + +Model-specific options for image generation + +*** + +### numberOfImages? + +```ts +optional numberOfImages: number; +``` + +Defined in: [types.ts:795](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L795) + +Number of images to generate (default: 1) + +*** + +### prompt + +```ts +prompt: string; +``` + +Defined in: [types.ts:793](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L793) + +Text description of the desired image(s) + +*** + +### size? + +```ts +optional size: string; +``` + +Defined in: [types.ts:797](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L797) + +Image size in WIDTHxHEIGHT format (e.g., "1024x1024") diff --git a/docs/reference/interfaces/ImageGenerationResult.md b/docs/reference/interfaces/ImageGenerationResult.md new file mode 100644 index 00000000..060d9b22 --- /dev/null +++ b/docs/reference/interfaces/ImageGenerationResult.md @@ -0,0 +1,76 @@ +--- +id: ImageGenerationResult +title: ImageGenerationResult +--- + +# Interface: ImageGenerationResult + +Defined in: [types.ts:817](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L817) + +Result of image generation + +## Properties + +### id + +```ts +id: string; +``` + +Defined in: [types.ts:819](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L819) + +Unique identifier for the generation + +*** + +### images + +```ts +images: GeneratedImage[]; +``` + +Defined in: [types.ts:823](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L823) + +Array of generated images + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:821](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L821) + +Model used for generation + +*** + +### usage? + +```ts +optional usage: object; +``` + +Defined in: [types.ts:825](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L825) + +Token usage information (if available) + +#### inputTokens? + +```ts +optional inputTokens: number; +``` + +#### outputTokens? + +```ts +optional outputTokens: number; +``` + +#### totalTokens? + +```ts +optional totalTokens: number; +``` diff --git a/docs/reference/interfaces/ImagePart.md b/docs/reference/interfaces/ImagePart.md index 25e0f890..6ba2d826 100644 --- a/docs/reference/interfaces/ImagePart.md +++ b/docs/reference/interfaces/ImagePart.md @@ -5,7 +5,7 @@ title: ImagePart # Interface: ImagePart\ -Defined in: [types.ts:108](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L108) +Defined in: [types.ts:184](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L184) Image content part for multimodal messages. @@ -25,7 +25,7 @@ Provider-specific metadata type (e.g., OpenAI's detail level) optional metadata: TMetadata; ``` -Defined in: [types.ts:113](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L113) +Defined in: [types.ts:189](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L189) Provider-specific metadata (e.g., OpenAI's detail: 'auto' | 'low' | 'high') @@ -37,7 +37,7 @@ Provider-specific metadata (e.g., OpenAI's detail: 'auto' | 'low' | 'high') source: ContentPartSource; ``` -Defined in: [types.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L111) +Defined in: [types.ts:187](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L187) Source of the image content @@ -49,4 +49,4 @@ Source of the image content type: "image"; ``` -Defined in: [types.ts:109](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L109) +Defined in: [types.ts:185](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L185) diff --git a/docs/reference/interfaces/InternalToolCallState.md b/docs/reference/interfaces/InternalToolCallState.md index e8607bce..2f8e5014 100644 --- a/docs/reference/interfaces/InternalToolCallState.md +++ b/docs/reference/interfaces/InternalToolCallState.md @@ -5,7 +5,7 @@ title: InternalToolCallState # Interface: InternalToolCallState -Defined in: [stream/types.ts:31](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L31) +Defined in: [activities/chat/stream/types.ts:21](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L21) Internal state for a tool call being tracked @@ -17,7 +17,7 @@ Internal state for a tool call being tracked arguments: string; ``` -Defined in: [stream/types.ts:34](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L34) +Defined in: [activities/chat/stream/types.ts:24](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L24) *** @@ -27,7 +27,7 @@ Defined in: [stream/types.ts:34](https://github.com/TanStack/ai/blob/main/packag id: string; ``` -Defined in: [stream/types.ts:32](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L32) +Defined in: [activities/chat/stream/types.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L22) *** @@ -37,7 +37,7 @@ Defined in: [stream/types.ts:32](https://github.com/TanStack/ai/blob/main/packag index: number; ``` -Defined in: [stream/types.ts:37](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L37) +Defined in: [activities/chat/stream/types.ts:27](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L27) *** @@ -47,7 +47,7 @@ Defined in: [stream/types.ts:37](https://github.com/TanStack/ai/blob/main/packag name: string; ``` -Defined in: [stream/types.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L33) +Defined in: [activities/chat/stream/types.ts:23](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L23) *** @@ -57,7 +57,7 @@ Defined in: [stream/types.ts:33](https://github.com/TanStack/ai/blob/main/packag optional parsedArguments: any; ``` -Defined in: [stream/types.ts:36](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L36) +Defined in: [activities/chat/stream/types.ts:26](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L26) *** @@ -67,4 +67,4 @@ Defined in: [stream/types.ts:36](https://github.com/TanStack/ai/blob/main/packag state: ToolCallState; ``` -Defined in: [stream/types.ts:35](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L35) +Defined in: [activities/chat/stream/types.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L25) diff --git a/docs/reference/interfaces/JSONParser.md b/docs/reference/interfaces/JSONParser.md index 228cd1a2..b0b665af 100644 --- a/docs/reference/interfaces/JSONParser.md +++ b/docs/reference/interfaces/JSONParser.md @@ -5,7 +5,7 @@ title: JSONParser # Interface: JSONParser -Defined in: [stream/json-parser.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/json-parser.ts#L12) +Defined in: [activities/chat/stream/json-parser.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/json-parser.ts#L12) JSON Parser interface - allows for custom parser implementations @@ -17,7 +17,7 @@ JSON Parser interface - allows for custom parser implementations parse: (jsonString) => any; ``` -Defined in: [stream/json-parser.ts:18](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/json-parser.ts#L18) +Defined in: [activities/chat/stream/json-parser.ts:18](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/json-parser.ts#L18) Parse a JSON string (may be incomplete/partial) diff --git a/docs/reference/interfaces/JSONSchema.md b/docs/reference/interfaces/JSONSchema.md index 6c86f49b..de9fdfb7 100644 --- a/docs/reference/interfaces/JSONSchema.md +++ b/docs/reference/interfaces/JSONSchema.md @@ -5,7 +5,7 @@ title: JSONSchema # Interface: JSONSchema -Defined in: [types.ts:9](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L9) +Defined in: [types.ts:85](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L85) JSON Schema type for defining tool input/output schemas as raw JSON Schema objects. This allows tools to be defined without Zod when you have JSON Schema definitions available. @@ -24,7 +24,7 @@ This allows tools to be defined without Zod when you have JSON Schema definition optional $defs: Record; ``` -Defined in: [types.ts:19](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L19) +Defined in: [types.ts:95](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L95) *** @@ -34,7 +34,7 @@ Defined in: [types.ts:19](https://github.com/TanStack/ai/blob/main/packages/type optional $ref: string; ``` -Defined in: [types.ts:18](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L18) +Defined in: [types.ts:94](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L94) *** @@ -44,7 +44,7 @@ Defined in: [types.ts:18](https://github.com/TanStack/ai/blob/main/packages/type optional additionalItems: boolean | JSONSchema; ``` -Defined in: [types.ts:40](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L40) +Defined in: [types.ts:116](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L116) *** @@ -54,7 +54,7 @@ Defined in: [types.ts:40](https://github.com/TanStack/ai/blob/main/packages/type optional additionalProperties: boolean | JSONSchema; ``` -Defined in: [types.ts:39](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L39) +Defined in: [types.ts:115](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L115) *** @@ -64,7 +64,7 @@ Defined in: [types.ts:39](https://github.com/TanStack/ai/blob/main/packages/type optional allOf: JSONSchema[]; ``` -Defined in: [types.ts:21](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L21) +Defined in: [types.ts:97](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L97) *** @@ -74,7 +74,7 @@ Defined in: [types.ts:21](https://github.com/TanStack/ai/blob/main/packages/type optional anyOf: JSONSchema[]; ``` -Defined in: [types.ts:22](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L22) +Defined in: [types.ts:98](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L98) *** @@ -84,7 +84,7 @@ Defined in: [types.ts:22](https://github.com/TanStack/ai/blob/main/packages/type optional const: any; ``` -Defined in: [types.ts:15](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L15) +Defined in: [types.ts:91](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L91) *** @@ -94,7 +94,7 @@ Defined in: [types.ts:15](https://github.com/TanStack/ai/blob/main/packages/type optional default: any; ``` -Defined in: [types.ts:17](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L17) +Defined in: [types.ts:93](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L93) *** @@ -104,7 +104,7 @@ Defined in: [types.ts:17](https://github.com/TanStack/ai/blob/main/packages/type optional definitions: Record; ``` -Defined in: [types.ts:20](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L20) +Defined in: [types.ts:96](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L96) *** @@ -114,7 +114,7 @@ Defined in: [types.ts:20](https://github.com/TanStack/ai/blob/main/packages/type optional description: string; ``` -Defined in: [types.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L16) +Defined in: [types.ts:92](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L92) *** @@ -124,7 +124,7 @@ Defined in: [types.ts:16](https://github.com/TanStack/ai/blob/main/packages/type optional else: JSONSchema; ``` -Defined in: [types.ts:27](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L27) +Defined in: [types.ts:103](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L103) *** @@ -134,7 +134,7 @@ Defined in: [types.ts:27](https://github.com/TanStack/ai/blob/main/packages/type optional enum: any[]; ``` -Defined in: [types.ts:14](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L14) +Defined in: [types.ts:90](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L90) *** @@ -144,7 +144,7 @@ Defined in: [types.ts:14](https://github.com/TanStack/ai/blob/main/packages/type optional examples: any[]; ``` -Defined in: [types.ts:46](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L46) +Defined in: [types.ts:122](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L122) *** @@ -154,7 +154,7 @@ Defined in: [types.ts:46](https://github.com/TanStack/ai/blob/main/packages/type optional exclusiveMaximum: number; ``` -Defined in: [types.ts:31](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L31) +Defined in: [types.ts:107](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L107) *** @@ -164,7 +164,7 @@ Defined in: [types.ts:31](https://github.com/TanStack/ai/blob/main/packages/type optional exclusiveMinimum: number; ``` -Defined in: [types.ts:30](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L30) +Defined in: [types.ts:106](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L106) *** @@ -174,7 +174,7 @@ Defined in: [types.ts:30](https://github.com/TanStack/ai/blob/main/packages/type optional format: string; ``` -Defined in: [types.ts:35](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L35) +Defined in: [types.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L111) *** @@ -184,7 +184,7 @@ Defined in: [types.ts:35](https://github.com/TanStack/ai/blob/main/packages/type optional if: JSONSchema; ``` -Defined in: [types.ts:25](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L25) +Defined in: [types.ts:101](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L101) *** @@ -194,7 +194,7 @@ Defined in: [types.ts:25](https://github.com/TanStack/ai/blob/main/packages/type optional items: JSONSchema | JSONSchema[]; ``` -Defined in: [types.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L12) +Defined in: [types.ts:88](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L88) *** @@ -204,7 +204,7 @@ Defined in: [types.ts:12](https://github.com/TanStack/ai/blob/main/packages/type optional maximum: number; ``` -Defined in: [types.ts:29](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L29) +Defined in: [types.ts:105](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L105) *** @@ -214,7 +214,7 @@ Defined in: [types.ts:29](https://github.com/TanStack/ai/blob/main/packages/type optional maxItems: number; ``` -Defined in: [types.ts:37](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L37) +Defined in: [types.ts:113](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L113) *** @@ -224,7 +224,7 @@ Defined in: [types.ts:37](https://github.com/TanStack/ai/blob/main/packages/type optional maxLength: number; ``` -Defined in: [types.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L33) +Defined in: [types.ts:109](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L109) *** @@ -234,7 +234,7 @@ Defined in: [types.ts:33](https://github.com/TanStack/ai/blob/main/packages/type optional maxProperties: number; ``` -Defined in: [types.ts:44](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L44) +Defined in: [types.ts:120](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L120) *** @@ -244,7 +244,7 @@ Defined in: [types.ts:44](https://github.com/TanStack/ai/blob/main/packages/type optional minimum: number; ``` -Defined in: [types.ts:28](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L28) +Defined in: [types.ts:104](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L104) *** @@ -254,7 +254,7 @@ Defined in: [types.ts:28](https://github.com/TanStack/ai/blob/main/packages/type optional minItems: number; ``` -Defined in: [types.ts:36](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L36) +Defined in: [types.ts:112](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L112) *** @@ -264,7 +264,7 @@ Defined in: [types.ts:36](https://github.com/TanStack/ai/blob/main/packages/type optional minLength: number; ``` -Defined in: [types.ts:32](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L32) +Defined in: [types.ts:108](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L108) *** @@ -274,7 +274,7 @@ Defined in: [types.ts:32](https://github.com/TanStack/ai/blob/main/packages/type optional minProperties: number; ``` -Defined in: [types.ts:43](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L43) +Defined in: [types.ts:119](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L119) *** @@ -284,7 +284,7 @@ Defined in: [types.ts:43](https://github.com/TanStack/ai/blob/main/packages/type optional not: JSONSchema; ``` -Defined in: [types.ts:24](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L24) +Defined in: [types.ts:100](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L100) *** @@ -294,7 +294,7 @@ Defined in: [types.ts:24](https://github.com/TanStack/ai/blob/main/packages/type optional oneOf: JSONSchema[]; ``` -Defined in: [types.ts:23](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L23) +Defined in: [types.ts:99](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L99) *** @@ -304,7 +304,7 @@ Defined in: [types.ts:23](https://github.com/TanStack/ai/blob/main/packages/type optional pattern: string; ``` -Defined in: [types.ts:34](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L34) +Defined in: [types.ts:110](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L110) *** @@ -314,7 +314,7 @@ Defined in: [types.ts:34](https://github.com/TanStack/ai/blob/main/packages/type optional patternProperties: Record; ``` -Defined in: [types.ts:41](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L41) +Defined in: [types.ts:117](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L117) *** @@ -324,7 +324,7 @@ Defined in: [types.ts:41](https://github.com/TanStack/ai/blob/main/packages/type optional properties: Record; ``` -Defined in: [types.ts:11](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L11) +Defined in: [types.ts:87](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L87) *** @@ -334,7 +334,7 @@ Defined in: [types.ts:11](https://github.com/TanStack/ai/blob/main/packages/type optional propertyNames: JSONSchema; ``` -Defined in: [types.ts:42](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L42) +Defined in: [types.ts:118](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L118) *** @@ -344,7 +344,7 @@ Defined in: [types.ts:42](https://github.com/TanStack/ai/blob/main/packages/type optional required: string[]; ``` -Defined in: [types.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L13) +Defined in: [types.ts:89](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L89) *** @@ -354,7 +354,7 @@ Defined in: [types.ts:13](https://github.com/TanStack/ai/blob/main/packages/type optional then: JSONSchema; ``` -Defined in: [types.ts:26](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L26) +Defined in: [types.ts:102](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L102) *** @@ -364,7 +364,7 @@ Defined in: [types.ts:26](https://github.com/TanStack/ai/blob/main/packages/type optional title: string; ``` -Defined in: [types.ts:45](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L45) +Defined in: [types.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L121) *** @@ -374,7 +374,7 @@ Defined in: [types.ts:45](https://github.com/TanStack/ai/blob/main/packages/type optional type: string | string[]; ``` -Defined in: [types.ts:10](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L10) +Defined in: [types.ts:86](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L86) *** @@ -384,4 +384,4 @@ Defined in: [types.ts:10](https://github.com/TanStack/ai/blob/main/packages/type optional uniqueItems: boolean; ``` -Defined in: [types.ts:38](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L38) +Defined in: [types.ts:114](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L114) diff --git a/docs/reference/interfaces/ModelMessage.md b/docs/reference/interfaces/ModelMessage.md index 075cb3c5..9b50467c 100644 --- a/docs/reference/interfaces/ModelMessage.md +++ b/docs/reference/interfaces/ModelMessage.md @@ -5,7 +5,7 @@ title: ModelMessage # Interface: ModelMessage\ -Defined in: [types.ts:220](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L220) +Defined in: [types.ts:283](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L283) ## Type Parameters @@ -21,7 +21,7 @@ Defined in: [types.ts:220](https://github.com/TanStack/ai/blob/main/packages/typ content: TContent; ``` -Defined in: [types.ts:227](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L227) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) *** @@ -31,7 +31,7 @@ Defined in: [types.ts:227](https://github.com/TanStack/ai/blob/main/packages/typ optional name: string; ``` -Defined in: [types.ts:228](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L228) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) *** @@ -41,7 +41,7 @@ Defined in: [types.ts:228](https://github.com/TanStack/ai/blob/main/packages/typ role: "user" | "assistant" | "tool"; ``` -Defined in: [types.ts:226](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L226) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) *** @@ -51,7 +51,7 @@ Defined in: [types.ts:226](https://github.com/TanStack/ai/blob/main/packages/typ optional toolCallId: string; ``` -Defined in: [types.ts:230](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L230) +Defined in: [types.ts:293](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L293) *** @@ -61,4 +61,4 @@ Defined in: [types.ts:230](https://github.com/TanStack/ai/blob/main/packages/typ optional toolCalls: ToolCall[]; ``` -Defined in: [types.ts:229](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L229) +Defined in: [types.ts:292](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L292) diff --git a/docs/reference/interfaces/ProcessorResult.md b/docs/reference/interfaces/ProcessorResult.md index 9fb65250..f6531c94 100644 --- a/docs/reference/interfaces/ProcessorResult.md +++ b/docs/reference/interfaces/ProcessorResult.md @@ -5,7 +5,7 @@ title: ProcessorResult # Interface: ProcessorResult -Defined in: [stream/types.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L61) +Defined in: [activities/chat/stream/types.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L51) Result from processing a stream @@ -17,7 +17,7 @@ Result from processing a stream content: string; ``` -Defined in: [stream/types.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L62) +Defined in: [activities/chat/stream/types.ts:52](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L52) *** @@ -27,7 +27,7 @@ Defined in: [stream/types.ts:62](https://github.com/TanStack/ai/blob/main/packag optional finishReason: string | null; ``` -Defined in: [stream/types.ts:65](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L65) +Defined in: [activities/chat/stream/types.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L55) *** @@ -37,7 +37,7 @@ Defined in: [stream/types.ts:65](https://github.com/TanStack/ai/blob/main/packag optional thinking: string; ``` -Defined in: [stream/types.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L63) +Defined in: [activities/chat/stream/types.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L53) *** @@ -47,4 +47,4 @@ Defined in: [stream/types.ts:63](https://github.com/TanStack/ai/blob/main/packag optional toolCalls: ToolCall[]; ``` -Defined in: [stream/types.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L64) +Defined in: [activities/chat/stream/types.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L54) diff --git a/docs/reference/interfaces/ProcessorState.md b/docs/reference/interfaces/ProcessorState.md index 7ef5bba6..c5efbbd2 100644 --- a/docs/reference/interfaces/ProcessorState.md +++ b/docs/reference/interfaces/ProcessorState.md @@ -5,7 +5,7 @@ title: ProcessorState # Interface: ProcessorState -Defined in: [stream/types.ts:71](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L71) +Defined in: [activities/chat/stream/types.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L61) Current state of the processor @@ -17,7 +17,7 @@ Current state of the processor content: string; ``` -Defined in: [stream/types.ts:72](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L72) +Defined in: [activities/chat/stream/types.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L62) *** @@ -27,7 +27,7 @@ Defined in: [stream/types.ts:72](https://github.com/TanStack/ai/blob/main/packag done: boolean; ``` -Defined in: [stream/types.ts:77](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L77) +Defined in: [activities/chat/stream/types.ts:67](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L67) *** @@ -37,7 +37,7 @@ Defined in: [stream/types.ts:77](https://github.com/TanStack/ai/blob/main/packag finishReason: string | null; ``` -Defined in: [stream/types.ts:76](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L76) +Defined in: [activities/chat/stream/types.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L66) *** @@ -47,7 +47,7 @@ Defined in: [stream/types.ts:76](https://github.com/TanStack/ai/blob/main/packag thinking: string; ``` -Defined in: [stream/types.ts:73](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L73) +Defined in: [activities/chat/stream/types.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L63) *** @@ -57,7 +57,7 @@ Defined in: [stream/types.ts:73](https://github.com/TanStack/ai/blob/main/packag toolCallOrder: string[]; ``` -Defined in: [stream/types.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L75) +Defined in: [activities/chat/stream/types.ts:65](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L65) *** @@ -67,4 +67,4 @@ Defined in: [stream/types.ts:75](https://github.com/TanStack/ai/blob/main/packag toolCalls: Map; ``` -Defined in: [stream/types.ts:74](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L74) +Defined in: [activities/chat/stream/types.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/types.ts#L64) diff --git a/docs/reference/interfaces/ResponseFormat.md b/docs/reference/interfaces/ResponseFormat.md index 5e141882..9bebaeeb 100644 --- a/docs/reference/interfaces/ResponseFormat.md +++ b/docs/reference/interfaces/ResponseFormat.md @@ -5,7 +5,7 @@ title: ResponseFormat # Interface: ResponseFormat\ -Defined in: [types.ts:438](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L438) +Defined in: [types.ts:495](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L495) Structured output format specification. @@ -33,7 +33,7 @@ TypeScript type of the expected data structure (for type safety) optional __data: TData; ``` -Defined in: [types.ts:516](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L516) +Defined in: [types.ts:573](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L573) **`Internal`** @@ -50,7 +50,7 @@ Allows the SDK to know what type to expect when parsing the response. optional json_schema: object; ``` -Defined in: [types.ts:455](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L455) +Defined in: [types.ts:512](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L512) JSON schema specification (required when type is "json_schema"). @@ -139,7 +139,7 @@ https://platform.openai.com/docs/guides/structured-outputs#strict-mode type: "json_object" | "json_schema"; ``` -Defined in: [types.ts:447](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L447) +Defined in: [types.ts:504](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L504) Type of structured output. diff --git a/docs/reference/interfaces/ServerTool.md b/docs/reference/interfaces/ServerTool.md index 00c7bbbb..a0a66e4a 100644 --- a/docs/reference/interfaces/ServerTool.md +++ b/docs/reference/interfaces/ServerTool.md @@ -5,7 +5,7 @@ title: ServerTool # Interface: ServerTool\ -Defined in: [tools/tool-definition.ts:7](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L7) +Defined in: [activities/chat/tools/tool-definition.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L12) Marker type for server-side tools @@ -35,7 +35,7 @@ Marker type for server-side tools __toolSide: "server"; ``` -Defined in: [tools/tool-definition.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L12) +Defined in: [activities/chat/tools/tool-definition.ts:17](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L17) *** @@ -45,7 +45,7 @@ Defined in: [tools/tool-definition.ts:12](https://github.com/TanStack/ai/blob/ma description: string; ``` -Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343) +Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400) Clear description of what the tool does. @@ -70,7 +70,7 @@ Be specific about what the tool does, what parameters it needs, and what it retu optional execute: (args) => any; ``` -Defined in: [types.ts:414](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L414) +Defined in: [types.ts:471](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L471) Optional function to execute when the model calls this tool. @@ -114,7 +114,7 @@ execute: async (args) => { optional inputSchema: TInput; ``` -Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L375) +Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L432) Schema describing the tool's input parameters. @@ -163,7 +163,7 @@ z.object({ optional metadata: Record; ``` -Defined in: [types.ts:420](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L420) +Defined in: [types.ts:477](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L477) Additional metadata for adapters or custom extensions @@ -179,7 +179,7 @@ Additional metadata for adapters or custom extensions name: TName; ``` -Defined in: [types.ts:333](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L333) +Defined in: [types.ts:390](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L390) Unique name of the tool (used by the model to call it). @@ -204,7 +204,7 @@ Must be unique within the tools array. optional needsApproval: boolean; ``` -Defined in: [types.ts:417](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L417) +Defined in: [types.ts:474](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L474) If true, tool execution requires user approval before running. Works with both server and client tools. @@ -220,7 +220,7 @@ If true, tool execution requires user approval before running. Works with both s optional outputSchema: TOutput; ``` -Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395) +Defined in: [types.ts:452](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L452) Optional schema for validating tool output. diff --git a/docs/reference/interfaces/StreamProcessorEvents.md b/docs/reference/interfaces/StreamProcessorEvents.md index 313e1987..a3546357 100644 --- a/docs/reference/interfaces/StreamProcessorEvents.md +++ b/docs/reference/interfaces/StreamProcessorEvents.md @@ -5,7 +5,7 @@ title: StreamProcessorEvents # Interface: StreamProcessorEvents -Defined in: [stream/processor.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L51) +Defined in: [activities/chat/stream/processor.ts:48](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L48) Events emitted by the StreamProcessor @@ -17,7 +17,7 @@ Events emitted by the StreamProcessor optional onApprovalRequest: (args) => void; ``` -Defined in: [stream/processor.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L66) +Defined in: [activities/chat/stream/processor.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L63) #### Parameters @@ -51,7 +51,7 @@ Defined in: [stream/processor.ts:66](https://github.com/TanStack/ai/blob/main/pa optional onError: (error) => void; ``` -Defined in: [stream/processor.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L58) +Defined in: [activities/chat/stream/processor.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L55) #### Parameters @@ -71,7 +71,7 @@ Defined in: [stream/processor.ts:58](https://github.com/TanStack/ai/blob/main/pa optional onMessagesChange: (messages) => void; ``` -Defined in: [stream/processor.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L53) +Defined in: [activities/chat/stream/processor.ts:50](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L50) #### Parameters @@ -91,7 +91,7 @@ Defined in: [stream/processor.ts:53](https://github.com/TanStack/ai/blob/main/pa optional onStreamEnd: (message) => void; ``` -Defined in: [stream/processor.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L57) +Defined in: [activities/chat/stream/processor.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L54) #### Parameters @@ -111,7 +111,7 @@ Defined in: [stream/processor.ts:57](https://github.com/TanStack/ai/blob/main/pa optional onStreamStart: () => void; ``` -Defined in: [stream/processor.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L56) +Defined in: [activities/chat/stream/processor.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L53) #### Returns @@ -125,7 +125,7 @@ Defined in: [stream/processor.ts:56](https://github.com/TanStack/ai/blob/main/pa optional onTextUpdate: (messageId, content) => void; ``` -Defined in: [stream/processor.ts:74](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L74) +Defined in: [activities/chat/stream/processor.ts:71](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L71) #### Parameters @@ -149,7 +149,7 @@ Defined in: [stream/processor.ts:74](https://github.com/TanStack/ai/blob/main/pa optional onThinkingUpdate: (messageId, content) => void; ``` -Defined in: [stream/processor.ts:81](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L81) +Defined in: [activities/chat/stream/processor.ts:78](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L78) #### Parameters @@ -173,7 +173,7 @@ Defined in: [stream/processor.ts:81](https://github.com/TanStack/ai/blob/main/pa optional onToolCall: (args) => void; ``` -Defined in: [stream/processor.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L61) +Defined in: [activities/chat/stream/processor.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L58) #### Parameters @@ -203,7 +203,7 @@ Defined in: [stream/processor.ts:61](https://github.com/TanStack/ai/blob/main/pa optional onToolCallStateChange: (messageId, toolCallId, state, args) => void; ``` -Defined in: [stream/processor.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L75) +Defined in: [activities/chat/stream/processor.ts:72](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L72) #### Parameters diff --git a/docs/reference/interfaces/StreamProcessorHandlers.md b/docs/reference/interfaces/StreamProcessorHandlers.md index c4e50f56..32d2cd4d 100644 --- a/docs/reference/interfaces/StreamProcessorHandlers.md +++ b/docs/reference/interfaces/StreamProcessorHandlers.md @@ -5,7 +5,7 @@ title: StreamProcessorHandlers # Interface: StreamProcessorHandlers -Defined in: [stream/processor.ts:88](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L88) +Defined in: [activities/chat/stream/processor.ts:85](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L85) Legacy handlers for backward compatibility These are the old callback-style handlers @@ -18,7 +18,7 @@ These are the old callback-style handlers optional onApprovalRequested: (toolCallId, toolName, input, approvalId) => void; ``` -Defined in: [stream/processor.ts:119](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L119) +Defined in: [activities/chat/stream/processor.ts:116](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L116) #### Parameters @@ -50,7 +50,7 @@ Defined in: [stream/processor.ts:119](https://github.com/TanStack/ai/blob/main/p optional onError: (error) => void; ``` -Defined in: [stream/processor.ts:133](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L133) +Defined in: [activities/chat/stream/processor.ts:130](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L130) #### Parameters @@ -76,7 +76,7 @@ Defined in: [stream/processor.ts:133](https://github.com/TanStack/ai/blob/main/p optional onStreamEnd: (content, toolCalls?) => void; ``` -Defined in: [stream/processor.ts:132](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L132) +Defined in: [activities/chat/stream/processor.ts:129](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L129) #### Parameters @@ -100,7 +100,7 @@ Defined in: [stream/processor.ts:132](https://github.com/TanStack/ai/blob/main/p optional onTextUpdate: (content) => void; ``` -Defined in: [stream/processor.ts:89](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L89) +Defined in: [activities/chat/stream/processor.ts:86](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L86) #### Parameters @@ -120,7 +120,7 @@ Defined in: [stream/processor.ts:89](https://github.com/TanStack/ai/blob/main/pa optional onThinkingUpdate: (content) => void; ``` -Defined in: [stream/processor.ts:90](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L90) +Defined in: [activities/chat/stream/processor.ts:87](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L87) #### Parameters @@ -140,7 +140,7 @@ Defined in: [stream/processor.ts:90](https://github.com/TanStack/ai/blob/main/pa optional onToolCallComplete: (index, id, name, args) => void; ``` -Defined in: [stream/processor.ts:95](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L95) +Defined in: [activities/chat/stream/processor.ts:92](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L92) #### Parameters @@ -172,7 +172,7 @@ Defined in: [stream/processor.ts:95](https://github.com/TanStack/ai/blob/main/pa optional onToolCallDelta: (index, args) => void; ``` -Defined in: [stream/processor.ts:94](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L94) +Defined in: [activities/chat/stream/processor.ts:91](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L91) #### Parameters @@ -196,7 +196,7 @@ Defined in: [stream/processor.ts:94](https://github.com/TanStack/ai/blob/main/pa optional onToolCallStart: (index, id, name) => void; ``` -Defined in: [stream/processor.ts:93](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L93) +Defined in: [activities/chat/stream/processor.ts:90](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L90) #### Parameters @@ -224,7 +224,7 @@ Defined in: [stream/processor.ts:93](https://github.com/TanStack/ai/blob/main/pa optional onToolCallStateChange: (index, id, name, state, args, parsedArgs?) => void; ``` -Defined in: [stream/processor.ts:101](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L101) +Defined in: [activities/chat/stream/processor.ts:98](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L98) #### Parameters @@ -264,7 +264,7 @@ Defined in: [stream/processor.ts:101](https://github.com/TanStack/ai/blob/main/p optional onToolInputAvailable: (toolCallId, toolName, input) => void; ``` -Defined in: [stream/processor.ts:125](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L125) +Defined in: [activities/chat/stream/processor.ts:122](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L122) #### Parameters @@ -292,7 +292,7 @@ Defined in: [stream/processor.ts:125](https://github.com/TanStack/ai/blob/main/p optional onToolResultStateChange: (toolCallId, content, state, error?) => void; ``` -Defined in: [stream/processor.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L111) +Defined in: [activities/chat/stream/processor.ts:108](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L108) #### Parameters diff --git a/docs/reference/interfaces/StreamProcessorOptions.md b/docs/reference/interfaces/StreamProcessorOptions.md index ebe37672..61668336 100644 --- a/docs/reference/interfaces/StreamProcessorOptions.md +++ b/docs/reference/interfaces/StreamProcessorOptions.md @@ -5,7 +5,7 @@ title: StreamProcessorOptions # Interface: StreamProcessorOptions -Defined in: [stream/processor.ts:139](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L139) +Defined in: [activities/chat/stream/processor.ts:136](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L136) Options for StreamProcessor @@ -17,7 +17,7 @@ Options for StreamProcessor optional chunkStrategy: ChunkStrategy; ``` -Defined in: [stream/processor.ts:140](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L140) +Defined in: [activities/chat/stream/processor.ts:137](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L137) *** @@ -27,7 +27,7 @@ Defined in: [stream/processor.ts:140](https://github.com/TanStack/ai/blob/main/p optional events: StreamProcessorEvents; ``` -Defined in: [stream/processor.ts:142](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L142) +Defined in: [activities/chat/stream/processor.ts:139](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L139) New event-driven handlers @@ -39,7 +39,7 @@ New event-driven handlers optional handlers: StreamProcessorHandlers; ``` -Defined in: [stream/processor.ts:144](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L144) +Defined in: [activities/chat/stream/processor.ts:141](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L141) Legacy callback handlers (for backward compatibility) @@ -51,7 +51,7 @@ Legacy callback handlers (for backward compatibility) optional initialMessages: UIMessage[]; ``` -Defined in: [stream/processor.ts:151](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L151) +Defined in: [activities/chat/stream/processor.ts:148](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L148) Initial messages to populate the processor @@ -63,7 +63,7 @@ Initial messages to populate the processor optional jsonParser: object; ``` -Defined in: [stream/processor.ts:145](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L145) +Defined in: [activities/chat/stream/processor.ts:142](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L142) #### parse() @@ -89,6 +89,6 @@ parse: (jsonString) => any; optional recording: boolean; ``` -Defined in: [stream/processor.ts:149](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L149) +Defined in: [activities/chat/stream/processor.ts:146](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L146) Enable recording for replay testing diff --git a/docs/reference/interfaces/SummarizationOptions.md b/docs/reference/interfaces/SummarizationOptions.md index 575580e2..8f67c7b6 100644 --- a/docs/reference/interfaces/SummarizationOptions.md +++ b/docs/reference/interfaces/SummarizationOptions.md @@ -5,7 +5,7 @@ title: SummarizationOptions # Interface: SummarizationOptions -Defined in: [types.ts:697](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L697) +Defined in: [types.ts:760](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L760) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:697](https://github.com/TanStack/ai/blob/main/packages/typ optional focus: string[]; ``` -Defined in: [types.ts:702](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L702) +Defined in: [types.ts:765](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L765) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:702](https://github.com/TanStack/ai/blob/main/packages/typ optional maxLength: number; ``` -Defined in: [types.ts:700](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L700) +Defined in: [types.ts:763](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L763) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:700](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:698](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L698) +Defined in: [types.ts:761](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L761) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:698](https://github.com/TanStack/ai/blob/main/packages/typ optional style: "bullet-points" | "paragraph" | "concise"; ``` -Defined in: [types.ts:701](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L701) +Defined in: [types.ts:764](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L764) *** @@ -55,4 +55,4 @@ Defined in: [types.ts:701](https://github.com/TanStack/ai/blob/main/packages/typ text: string; ``` -Defined in: [types.ts:699](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L699) +Defined in: [types.ts:762](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L762) diff --git a/docs/reference/interfaces/SummarizationResult.md b/docs/reference/interfaces/SummarizationResult.md index 390814d8..a30ed860 100644 --- a/docs/reference/interfaces/SummarizationResult.md +++ b/docs/reference/interfaces/SummarizationResult.md @@ -5,7 +5,7 @@ title: SummarizationResult # Interface: SummarizationResult -Defined in: [types.ts:705](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L705) +Defined in: [types.ts:768](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L768) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:705](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:706](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L706) +Defined in: [types.ts:769](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L769) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:706](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:707](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L707) +Defined in: [types.ts:770](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L770) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:707](https://github.com/TanStack/ai/blob/main/packages/typ summary: string; ``` -Defined in: [types.ts:708](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L708) +Defined in: [types.ts:771](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L771) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:708](https://github.com/TanStack/ai/blob/main/packages/typ usage: object; ``` -Defined in: [types.ts:709](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L709) +Defined in: [types.ts:772](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L772) #### completionTokens diff --git a/docs/reference/interfaces/TTSOptions.md b/docs/reference/interfaces/TTSOptions.md new file mode 100644 index 00000000..5ed2f309 --- /dev/null +++ b/docs/reference/interfaces/TTSOptions.md @@ -0,0 +1,89 @@ +--- +id: TTSOptions +title: TTSOptions +--- + +# Interface: TTSOptions\ + +Defined in: [types.ts:907](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L907) + +Options for text-to-speech generation. +These are the common options supported across providers. + +## Type Parameters + +### TProviderOptions + +`TProviderOptions` *extends* `object` = `object` + +## Properties + +### format? + +```ts +optional format: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm"; +``` + +Defined in: [types.ts:915](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L915) + +The output audio format + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:909](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L909) + +The model to use for TTS generation + +*** + +### modelOptions? + +```ts +optional modelOptions: TProviderOptions; +``` + +Defined in: [types.ts:919](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L919) + +Model-specific options for TTS generation + +*** + +### speed? + +```ts +optional speed: number; +``` + +Defined in: [types.ts:917](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L917) + +The speed of the generated audio (0.25 to 4.0) + +*** + +### text + +```ts +text: string; +``` + +Defined in: [types.ts:911](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L911) + +The text to convert to speech + +*** + +### voice? + +```ts +optional voice: string; +``` + +Defined in: [types.ts:913](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L913) + +The voice to use for generation diff --git a/docs/reference/interfaces/TTSResult.md b/docs/reference/interfaces/TTSResult.md new file mode 100644 index 00000000..5a3bb4e0 --- /dev/null +++ b/docs/reference/interfaces/TTSResult.md @@ -0,0 +1,82 @@ +--- +id: TTSResult +title: TTSResult +--- + +# Interface: TTSResult + +Defined in: [types.ts:925](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L925) + +Result of text-to-speech generation. + +## Properties + +### audio + +```ts +audio: string; +``` + +Defined in: [types.ts:931](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L931) + +Base64-encoded audio data + +*** + +### contentType? + +```ts +optional contentType: string; +``` + +Defined in: [types.ts:937](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L937) + +Content type of the audio (e.g., 'audio/mp3') + +*** + +### duration? + +```ts +optional duration: number; +``` + +Defined in: [types.ts:935](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L935) + +Duration of the audio in seconds, if available + +*** + +### format + +```ts +format: string; +``` + +Defined in: [types.ts:933](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L933) + +Audio format of the generated audio + +*** + +### id + +```ts +id: string; +``` + +Defined in: [types.ts:927](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L927) + +Unique identifier for the generation + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:929](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L929) + +Model used for generation diff --git a/docs/reference/interfaces/TextCompletionChunk.md b/docs/reference/interfaces/TextCompletionChunk.md new file mode 100644 index 00000000..6afadd6b --- /dev/null +++ b/docs/reference/interfaces/TextCompletionChunk.md @@ -0,0 +1,86 @@ +--- +id: TextCompletionChunk +title: TextCompletionChunk +--- + +# Interface: TextCompletionChunk + +Defined in: [types.ts:747](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L747) + +## Properties + +### content + +```ts +content: string; +``` + +Defined in: [types.ts:750](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L750) + +*** + +### finishReason? + +```ts +optional finishReason: "length" | "stop" | "content_filter" | null; +``` + +Defined in: [types.ts:752](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L752) + +*** + +### id + +```ts +id: string; +``` + +Defined in: [types.ts:748](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L748) + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:749](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L749) + +*** + +### role? + +```ts +optional role: "assistant"; +``` + +Defined in: [types.ts:751](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L751) + +*** + +### usage? + +```ts +optional usage: object; +``` + +Defined in: [types.ts:753](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L753) + +#### completionTokens + +```ts +completionTokens: number; +``` + +#### promptTokens + +```ts +promptTokens: number; +``` + +#### totalTokens + +```ts +totalTokens: number; +``` diff --git a/docs/reference/interfaces/TextOptions.md b/docs/reference/interfaces/TextOptions.md new file mode 100644 index 00000000..d5dc1731 --- /dev/null +++ b/docs/reference/interfaces/TextOptions.md @@ -0,0 +1,172 @@ +--- +id: TextOptions +title: TextOptions +--- + +# Interface: TextOptions\ + +Defined in: [types.ts:605](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L605) + +Options passed into the SDK and further piped to the AI provider. + +## Type Parameters + +### TProviderOptionsSuperset + +`TProviderOptionsSuperset` *extends* `Record`\<`string`, `any`\> = `Record`\<`string`, `any`\> + +### TOutput + +`TOutput` *extends* [`ResponseFormat`](ResponseFormat.md)\<`any`\> \| `undefined` = `undefined` + +### TProviderOptionsForModel + +`TProviderOptionsForModel` = `TProviderOptionsSuperset` + +## Properties + +### abortController? + +```ts +optional abortController: AbortController; +``` + +Defined in: [types.ts:644](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L644) + +AbortController for request cancellation. + +Allows you to cancel an in-progress request using an AbortController. +Useful for implementing timeouts or user-initiated cancellations. + +#### Example + +```ts +const abortController = new AbortController(); +setTimeout(() => abortController.abort(), 5000); // Cancel after 5 seconds +await chat({ ..., abortController }); +``` + +#### See + +https://developer.mozilla.org/en-US/docs/Web/API/AbortController + +*** + +### agentLoopStrategy? + +```ts +optional agentLoopStrategy: AgentLoopStrategy; +``` + +Defined in: [types.ts:614](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L614) + +*** + +### conversationId? + +```ts +optional conversationId: string; +``` + +Defined in: [types.ts:630](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L630) + +Conversation ID for correlating client and server-side devtools events. +When provided, server-side events will be linked to the client conversation in devtools. + +*** + +### messages + +```ts +messages: ModelMessage< + | string + | ContentPart[] + | null>[]; +``` + +Defined in: [types.ts:611](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L611) + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:610](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L610) + +*** + +### modelOptions? + +```ts +optional modelOptions: TProviderOptionsForModel; +``` + +Defined in: [types.ts:616](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L616) + +*** + +### options? + +```ts +optional options: CommonOptions; +``` + +Defined in: [types.ts:615](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L615) + +*** + +### output? + +```ts +optional output: TOutput; +``` + +Defined in: [types.ts:618](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L618) + +*** + +### outputSchema? + +```ts +optional outputSchema: ZodType>; +``` + +Defined in: [types.ts:625](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L625) + +Zod schema for structured output. +When provided, the adapter should use the provider's native structured output API +to ensure the response conforms to this schema. +The schema will be converted to JSON Schema format before being sent to the provider. + +*** + +### request? + +```ts +optional request: Request | RequestInit; +``` + +Defined in: [types.ts:617](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L617) + +*** + +### systemPrompts? + +```ts +optional systemPrompts: string[]; +``` + +Defined in: [types.ts:613](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L613) + +*** + +### tools? + +```ts +optional tools: Tool[]; +``` + +Defined in: [types.ts:612](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L612) diff --git a/docs/reference/interfaces/TextPart.md b/docs/reference/interfaces/TextPart.md index 105418d1..f51896d6 100644 --- a/docs/reference/interfaces/TextPart.md +++ b/docs/reference/interfaces/TextPart.md @@ -5,7 +5,7 @@ title: TextPart # Interface: TextPart\ -Defined in: [types.ts:236](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L236) +Defined in: [types.ts:299](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L299) Message parts - building blocks of UIMessage @@ -23,7 +23,7 @@ Message parts - building blocks of UIMessage content: string; ``` -Defined in: [types.ts:238](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L238) +Defined in: [types.ts:301](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L301) *** @@ -33,7 +33,7 @@ Defined in: [types.ts:238](https://github.com/TanStack/ai/blob/main/packages/typ optional metadata: TMetadata; ``` -Defined in: [types.ts:239](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L239) +Defined in: [types.ts:302](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L302) *** @@ -43,4 +43,4 @@ Defined in: [types.ts:239](https://github.com/TanStack/ai/blob/main/packages/typ type: "text"; ``` -Defined in: [types.ts:237](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L237) +Defined in: [types.ts:300](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L300) diff --git a/docs/reference/interfaces/ThinkingPart.md b/docs/reference/interfaces/ThinkingPart.md index 72f8e9da..7545e966 100644 --- a/docs/reference/interfaces/ThinkingPart.md +++ b/docs/reference/interfaces/ThinkingPart.md @@ -5,7 +5,7 @@ title: ThinkingPart # Interface: ThinkingPart -Defined in: [types.ts:266](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L266) +Defined in: [types.ts:329](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L329) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:266](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:268](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L268) +Defined in: [types.ts:331](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L331) *** @@ -25,4 +25,4 @@ Defined in: [types.ts:268](https://github.com/TanStack/ai/blob/main/packages/typ type: "thinking"; ``` -Defined in: [types.ts:267](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L267) +Defined in: [types.ts:330](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L330) diff --git a/docs/reference/interfaces/ThinkingStreamChunk.md b/docs/reference/interfaces/ThinkingStreamChunk.md index d67abbe0..78659955 100644 --- a/docs/reference/interfaces/ThinkingStreamChunk.md +++ b/docs/reference/interfaces/ThinkingStreamChunk.md @@ -5,7 +5,7 @@ title: ThinkingStreamChunk # Interface: ThinkingStreamChunk -Defined in: [types.ts:663](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L663) +Defined in: [types.ts:726](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L726) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:663](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:666](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L666) +Defined in: [types.ts:729](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L729) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:666](https://github.com/TanStack/ai/blob/main/packages/typ optional delta: string; ``` -Defined in: [types.ts:665](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L665) +Defined in: [types.ts:728](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L728) *** @@ -39,7 +39,7 @@ Defined in: [types.ts:665](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -53,7 +53,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -67,7 +67,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -81,7 +81,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ type: "thinking"; ``` -Defined in: [types.ts:664](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L664) +Defined in: [types.ts:727](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L727) #### Overrides diff --git a/docs/reference/interfaces/Tool.md b/docs/reference/interfaces/Tool.md index 245373b1..75883d49 100644 --- a/docs/reference/interfaces/Tool.md +++ b/docs/reference/interfaces/Tool.md @@ -5,7 +5,7 @@ title: Tool # Interface: Tool\ -Defined in: [types.ts:320](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L320) +Defined in: [types.ts:377](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L377) Tool/Function definition for function calling. @@ -46,7 +46,7 @@ Tools can use either Zod schemas or JSON Schema objects for runtime validation a description: string; ``` -Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343) +Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400) Clear description of what the tool does. @@ -67,7 +67,7 @@ Be specific about what the tool does, what parameters it needs, and what it retu optional execute: (args) => any; ``` -Defined in: [types.ts:414](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L414) +Defined in: [types.ts:471](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L471) Optional function to execute when the model calls this tool. @@ -107,7 +107,7 @@ execute: async (args) => { optional inputSchema: TInput; ``` -Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L375) +Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L432) Schema describing the tool's input parameters. @@ -152,7 +152,7 @@ z.object({ optional metadata: Record; ``` -Defined in: [types.ts:420](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L420) +Defined in: [types.ts:477](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L477) Additional metadata for adapters or custom extensions @@ -164,7 +164,7 @@ Additional metadata for adapters or custom extensions name: TName; ``` -Defined in: [types.ts:333](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L333) +Defined in: [types.ts:390](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L390) Unique name of the tool (used by the model to call it). @@ -185,7 +185,7 @@ Must be unique within the tools array. optional needsApproval: boolean; ``` -Defined in: [types.ts:417](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L417) +Defined in: [types.ts:474](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L474) If true, tool execution requires user approval before running. Works with both server and client tools. @@ -197,7 +197,7 @@ If true, tool execution requires user approval before running. Works with both s optional outputSchema: TOutput; ``` -Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395) +Defined in: [types.ts:452](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L452) Optional schema for validating tool output. diff --git a/docs/reference/interfaces/ToolCall.md b/docs/reference/interfaces/ToolCall.md index 3c479192..e8e31be1 100644 --- a/docs/reference/interfaces/ToolCall.md +++ b/docs/reference/interfaces/ToolCall.md @@ -5,7 +5,7 @@ title: ToolCall # Interface: ToolCall -Defined in: [types.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L62) +Defined in: [types.ts:138](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L138) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:62](https://github.com/TanStack/ai/blob/main/packages/type function: object; ``` -Defined in: [types.ts:65](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L65) +Defined in: [types.ts:141](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L141) #### arguments @@ -37,7 +37,7 @@ name: string; id: string; ``` -Defined in: [types.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L63) +Defined in: [types.ts:139](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L139) *** @@ -47,4 +47,4 @@ Defined in: [types.ts:63](https://github.com/TanStack/ai/blob/main/packages/type type: "function"; ``` -Defined in: [types.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L64) +Defined in: [types.ts:140](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L140) diff --git a/docs/reference/interfaces/ToolCallPart.md b/docs/reference/interfaces/ToolCallPart.md index f46e12f4..4dc8b2de 100644 --- a/docs/reference/interfaces/ToolCallPart.md +++ b/docs/reference/interfaces/ToolCallPart.md @@ -5,7 +5,7 @@ title: ToolCallPart # Interface: ToolCallPart -Defined in: [types.ts:242](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L242) +Defined in: [types.ts:305](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L305) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:242](https://github.com/TanStack/ai/blob/main/packages/typ optional approval: object; ``` -Defined in: [types.ts:249](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L249) +Defined in: [types.ts:312](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L312) Approval metadata if tool requires user approval @@ -45,7 +45,7 @@ needsApproval: boolean; arguments: string; ``` -Defined in: [types.ts:246](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L246) +Defined in: [types.ts:309](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L309) *** @@ -55,7 +55,7 @@ Defined in: [types.ts:246](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:244](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L244) +Defined in: [types.ts:307](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L307) *** @@ -65,7 +65,7 @@ Defined in: [types.ts:244](https://github.com/TanStack/ai/blob/main/packages/typ name: string; ``` -Defined in: [types.ts:245](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L245) +Defined in: [types.ts:308](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L308) *** @@ -75,7 +75,7 @@ Defined in: [types.ts:245](https://github.com/TanStack/ai/blob/main/packages/typ optional output: any; ``` -Defined in: [types.ts:255](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L255) +Defined in: [types.ts:318](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L318) Tool execution output (for client tools or after approval) @@ -87,7 +87,7 @@ Tool execution output (for client tools or after approval) state: ToolCallState; ``` -Defined in: [types.ts:247](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L247) +Defined in: [types.ts:310](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L310) *** @@ -97,4 +97,4 @@ Defined in: [types.ts:247](https://github.com/TanStack/ai/blob/main/packages/typ type: "tool-call"; ``` -Defined in: [types.ts:243](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L243) +Defined in: [types.ts:306](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L306) diff --git a/docs/reference/interfaces/ToolCallStreamChunk.md b/docs/reference/interfaces/ToolCallStreamChunk.md index 1f5ab343..1df60390 100644 --- a/docs/reference/interfaces/ToolCallStreamChunk.md +++ b/docs/reference/interfaces/ToolCallStreamChunk.md @@ -5,7 +5,7 @@ title: ToolCallStreamChunk # Interface: ToolCallStreamChunk -Defined in: [types.ts:608](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L608) +Defined in: [types.ts:671](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L671) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:608](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -33,7 +33,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ index: number; ``` -Defined in: [types.ts:618](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L618) +Defined in: [types.ts:681](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L681) *** @@ -43,7 +43,7 @@ Defined in: [types.ts:618](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ toolCall: object; ``` -Defined in: [types.ts:610](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L610) +Defined in: [types.ts:673](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L673) #### function @@ -111,7 +111,7 @@ type: "function"; type: "tool_call"; ``` -Defined in: [types.ts:609](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L609) +Defined in: [types.ts:672](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L672) #### Overrides diff --git a/docs/reference/interfaces/ToolConfig.md b/docs/reference/interfaces/ToolConfig.md index 6caa598f..001188eb 100644 --- a/docs/reference/interfaces/ToolConfig.md +++ b/docs/reference/interfaces/ToolConfig.md @@ -5,7 +5,7 @@ title: ToolConfig # Interface: ToolConfig -Defined in: [types.ts:423](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L423) +Defined in: [types.ts:480](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L480) ## Indexable diff --git a/docs/reference/interfaces/ToolDefinition.md b/docs/reference/interfaces/ToolDefinition.md index 776e614f..30106f9f 100644 --- a/docs/reference/interfaces/ToolDefinition.md +++ b/docs/reference/interfaces/ToolDefinition.md @@ -5,7 +5,7 @@ title: ToolDefinition # Interface: ToolDefinition\ -Defined in: [tools/tool-definition.ts:99](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L99) +Defined in: [activities/chat/tools/tool-definition.ts:104](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L104) Tool definition builder that allows creating server or client tools from a shared definition @@ -35,7 +35,7 @@ Tool definition builder that allows creating server or client tools from a share __toolSide: "definition"; ``` -Defined in: [tools/tool-definition.ts:43](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L43) +Defined in: [activities/chat/tools/tool-definition.ts:48](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L48) #### Inherited from @@ -49,7 +49,7 @@ Defined in: [tools/tool-definition.ts:43](https://github.com/TanStack/ai/blob/ma client: (execute?) => ClientTool; ``` -Defined in: [tools/tool-definition.ts:116](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L116) +Defined in: [activities/chat/tools/tool-definition.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L121) Create a client-side tool with optional execute function @@ -73,7 +73,7 @@ Create a client-side tool with optional execute function description: string; ``` -Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343) +Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400) Clear description of what the tool does. @@ -98,7 +98,7 @@ Be specific about what the tool does, what parameters it needs, and what it retu optional execute: (args) => any; ``` -Defined in: [types.ts:414](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L414) +Defined in: [types.ts:471](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L471) Optional function to execute when the model calls this tool. @@ -142,7 +142,7 @@ execute: async (args) => { optional inputSchema: TInput; ``` -Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L375) +Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L432) Schema describing the tool's input parameters. @@ -191,7 +191,7 @@ z.object({ optional metadata: Record; ``` -Defined in: [types.ts:420](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L420) +Defined in: [types.ts:477](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L477) Additional metadata for adapters or custom extensions @@ -207,7 +207,7 @@ Additional metadata for adapters or custom extensions name: TName; ``` -Defined in: [types.ts:333](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L333) +Defined in: [types.ts:390](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L390) Unique name of the tool (used by the model to call it). @@ -232,7 +232,7 @@ Must be unique within the tools array. optional needsApproval: boolean; ``` -Defined in: [types.ts:417](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L417) +Defined in: [types.ts:474](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L474) If true, tool execution requires user approval before running. Works with both server and client tools. @@ -248,7 +248,7 @@ If true, tool execution requires user approval before running. Works with both s optional outputSchema: TOutput; ``` -Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395) +Defined in: [types.ts:452](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L452) Optional schema for validating tool output. @@ -282,7 +282,7 @@ z.object({ server: (execute) => ServerTool; ``` -Defined in: [tools/tool-definition.ts:107](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L107) +Defined in: [activities/chat/tools/tool-definition.ts:112](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L112) Create a server-side tool with execute function diff --git a/docs/reference/interfaces/ToolDefinitionConfig.md b/docs/reference/interfaces/ToolDefinitionConfig.md index 7d5c26f5..f35272ef 100644 --- a/docs/reference/interfaces/ToolDefinitionConfig.md +++ b/docs/reference/interfaces/ToolDefinitionConfig.md @@ -5,7 +5,7 @@ title: ToolDefinitionConfig # Interface: ToolDefinitionConfig\ -Defined in: [tools/tool-definition.ts:83](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L83) +Defined in: [activities/chat/tools/tool-definition.ts:88](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L88) Tool definition configuration @@ -31,7 +31,7 @@ Tool definition configuration description: string; ``` -Defined in: [tools/tool-definition.ts:89](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L89) +Defined in: [activities/chat/tools/tool-definition.ts:94](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L94) *** @@ -41,7 +41,7 @@ Defined in: [tools/tool-definition.ts:89](https://github.com/TanStack/ai/blob/ma optional inputSchema: TInput; ``` -Defined in: [tools/tool-definition.ts:90](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L90) +Defined in: [activities/chat/tools/tool-definition.ts:95](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L95) *** @@ -51,7 +51,7 @@ Defined in: [tools/tool-definition.ts:90](https://github.com/TanStack/ai/blob/ma optional metadata: Record; ``` -Defined in: [tools/tool-definition.ts:93](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L93) +Defined in: [activities/chat/tools/tool-definition.ts:98](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L98) *** @@ -61,7 +61,7 @@ Defined in: [tools/tool-definition.ts:93](https://github.com/TanStack/ai/blob/ma name: TName; ``` -Defined in: [tools/tool-definition.ts:88](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L88) +Defined in: [activities/chat/tools/tool-definition.ts:93](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L93) *** @@ -71,7 +71,7 @@ Defined in: [tools/tool-definition.ts:88](https://github.com/TanStack/ai/blob/ma optional needsApproval: boolean; ``` -Defined in: [tools/tool-definition.ts:92](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L92) +Defined in: [activities/chat/tools/tool-definition.ts:97](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L97) *** @@ -81,4 +81,4 @@ Defined in: [tools/tool-definition.ts:92](https://github.com/TanStack/ai/blob/ma optional outputSchema: TOutput; ``` -Defined in: [tools/tool-definition.ts:91](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L91) +Defined in: [activities/chat/tools/tool-definition.ts:96](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L96) diff --git a/docs/reference/interfaces/ToolDefinitionInstance.md b/docs/reference/interfaces/ToolDefinitionInstance.md index 7de343d2..0bc4cfc1 100644 --- a/docs/reference/interfaces/ToolDefinitionInstance.md +++ b/docs/reference/interfaces/ToolDefinitionInstance.md @@ -5,7 +5,7 @@ title: ToolDefinitionInstance # Interface: ToolDefinitionInstance\ -Defined in: [tools/tool-definition.ts:38](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L38) +Defined in: [activities/chat/tools/tool-definition.ts:43](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L43) Tool definition that can be used directly or instantiated for server/client @@ -39,7 +39,7 @@ Tool definition that can be used directly or instantiated for server/client __toolSide: "definition"; ``` -Defined in: [tools/tool-definition.ts:43](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L43) +Defined in: [activities/chat/tools/tool-definition.ts:48](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L48) *** @@ -49,7 +49,7 @@ Defined in: [tools/tool-definition.ts:43](https://github.com/TanStack/ai/blob/ma description: string; ``` -Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343) +Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400) Clear description of what the tool does. @@ -74,7 +74,7 @@ Be specific about what the tool does, what parameters it needs, and what it retu optional execute: (args) => any; ``` -Defined in: [types.ts:414](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L414) +Defined in: [types.ts:471](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L471) Optional function to execute when the model calls this tool. @@ -118,7 +118,7 @@ execute: async (args) => { optional inputSchema: TInput; ``` -Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L375) +Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L432) Schema describing the tool's input parameters. @@ -167,7 +167,7 @@ z.object({ optional metadata: Record; ``` -Defined in: [types.ts:420](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L420) +Defined in: [types.ts:477](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L477) Additional metadata for adapters or custom extensions @@ -183,7 +183,7 @@ Additional metadata for adapters or custom extensions name: TName; ``` -Defined in: [types.ts:333](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L333) +Defined in: [types.ts:390](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L390) Unique name of the tool (used by the model to call it). @@ -208,7 +208,7 @@ Must be unique within the tools array. optional needsApproval: boolean; ``` -Defined in: [types.ts:417](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L417) +Defined in: [types.ts:474](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L474) If true, tool execution requires user approval before running. Works with both server and client tools. @@ -224,7 +224,7 @@ If true, tool execution requires user approval before running. Works with both s optional outputSchema: TOutput; ``` -Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395) +Defined in: [types.ts:452](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L452) Optional schema for validating tool output. diff --git a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md index a7256939..7be2ce58 100644 --- a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md +++ b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md @@ -5,7 +5,7 @@ title: ToolInputAvailableStreamChunk # Interface: ToolInputAvailableStreamChunk -Defined in: [types.ts:656](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L656) +Defined in: [types.ts:719](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L719) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:656](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -33,7 +33,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ input: any; ``` -Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) +Defined in: [types.ts:723](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L723) *** @@ -43,7 +43,7 @@ Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:658](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L658) +Defined in: [types.ts:721](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L721) *** @@ -81,7 +81,7 @@ Defined in: [types.ts:658](https://github.com/TanStack/ai/blob/main/packages/typ toolName: string; ``` -Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) +Defined in: [types.ts:722](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L722) *** @@ -91,7 +91,7 @@ Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typ type: "tool-input-available"; ``` -Defined in: [types.ts:657](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L657) +Defined in: [types.ts:720](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L720) #### Overrides diff --git a/docs/reference/interfaces/ToolResultPart.md b/docs/reference/interfaces/ToolResultPart.md index 95676b00..9caa2264 100644 --- a/docs/reference/interfaces/ToolResultPart.md +++ b/docs/reference/interfaces/ToolResultPart.md @@ -5,7 +5,7 @@ title: ToolResultPart # Interface: ToolResultPart -Defined in: [types.ts:258](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L258) +Defined in: [types.ts:321](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L321) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:258](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:261](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L261) +Defined in: [types.ts:324](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L324) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:261](https://github.com/TanStack/ai/blob/main/packages/typ optional error: string; ``` -Defined in: [types.ts:263](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L263) +Defined in: [types.ts:326](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L326) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:263](https://github.com/TanStack/ai/blob/main/packages/typ state: ToolResultState; ``` -Defined in: [types.ts:262](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L262) +Defined in: [types.ts:325](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L325) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:262](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:260](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L260) +Defined in: [types.ts:323](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L323) *** @@ -55,4 +55,4 @@ Defined in: [types.ts:260](https://github.com/TanStack/ai/blob/main/packages/typ type: "tool-result"; ``` -Defined in: [types.ts:259](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L259) +Defined in: [types.ts:322](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L322) diff --git a/docs/reference/interfaces/ToolResultStreamChunk.md b/docs/reference/interfaces/ToolResultStreamChunk.md index e5f728a7..2562ffe0 100644 --- a/docs/reference/interfaces/ToolResultStreamChunk.md +++ b/docs/reference/interfaces/ToolResultStreamChunk.md @@ -5,7 +5,7 @@ title: ToolResultStreamChunk # Interface: ToolResultStreamChunk -Defined in: [types.ts:621](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L621) +Defined in: [types.ts:684](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L684) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:621](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:624](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L624) +Defined in: [types.ts:687](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L687) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:624](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596) +Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659) #### Inherited from @@ -43,7 +43,7 @@ Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597) +Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598) +Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:623](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L623) +Defined in: [types.ts:686](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L686) *** @@ -81,7 +81,7 @@ Defined in: [types.ts:623](https://github.com/TanStack/ai/blob/main/packages/typ type: "tool_result"; ``` -Defined in: [types.ts:622](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L622) +Defined in: [types.ts:685](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L685) #### Overrides diff --git a/docs/reference/interfaces/TranscriptionOptions.md b/docs/reference/interfaces/TranscriptionOptions.md new file mode 100644 index 00000000..d477fb61 --- /dev/null +++ b/docs/reference/interfaces/TranscriptionOptions.md @@ -0,0 +1,89 @@ +--- +id: TranscriptionOptions +title: TranscriptionOptions +--- + +# Interface: TranscriptionOptions\ + +Defined in: [types.ts:948](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L948) + +Options for audio transcription. +These are the common options supported across providers. + +## Type Parameters + +### TProviderOptions + +`TProviderOptions` *extends* `object` = `object` + +## Properties + +### audio + +```ts +audio: string | File | Blob | ArrayBuffer; +``` + +Defined in: [types.ts:954](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L954) + +The audio data to transcribe - can be base64 string, File, Blob, or Buffer + +*** + +### language? + +```ts +optional language: string; +``` + +Defined in: [types.ts:956](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L956) + +The language of the audio in ISO-639-1 format (e.g., 'en') + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:952](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L952) + +The model to use for transcription + +*** + +### modelOptions? + +```ts +optional modelOptions: TProviderOptions; +``` + +Defined in: [types.ts:962](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L962) + +Model-specific options for transcription + +*** + +### prompt? + +```ts +optional prompt: string; +``` + +Defined in: [types.ts:958](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L958) + +An optional prompt to guide the transcription + +*** + +### responseFormat? + +```ts +optional responseFormat: "text" | "json" | "srt" | "verbose_json" | "vtt"; +``` + +Defined in: [types.ts:960](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L960) + +The format of the transcription output diff --git a/docs/reference/interfaces/TranscriptionResult.md b/docs/reference/interfaces/TranscriptionResult.md new file mode 100644 index 00000000..6f90d295 --- /dev/null +++ b/docs/reference/interfaces/TranscriptionResult.md @@ -0,0 +1,94 @@ +--- +id: TranscriptionResult +title: TranscriptionResult +--- + +# Interface: TranscriptionResult + +Defined in: [types.ts:998](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L998) + +Result of audio transcription. + +## Properties + +### duration? + +```ts +optional duration: number; +``` + +Defined in: [types.ts:1008](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1008) + +Duration of the audio in seconds + +*** + +### id + +```ts +id: string; +``` + +Defined in: [types.ts:1000](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1000) + +Unique identifier for the transcription + +*** + +### language? + +```ts +optional language: string; +``` + +Defined in: [types.ts:1006](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1006) + +Language detected or specified + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:1002](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1002) + +Model used for transcription + +*** + +### segments? + +```ts +optional segments: TranscriptionSegment[]; +``` + +Defined in: [types.ts:1010](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1010) + +Detailed segments with timing, if available + +*** + +### text + +```ts +text: string; +``` + +Defined in: [types.ts:1004](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1004) + +The full transcribed text + +*** + +### words? + +```ts +optional words: TranscriptionWord[]; +``` + +Defined in: [types.ts:1012](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1012) + +Word-level timestamps, if available diff --git a/docs/reference/interfaces/TranscriptionSegment.md b/docs/reference/interfaces/TranscriptionSegment.md new file mode 100644 index 00000000..51bb521a --- /dev/null +++ b/docs/reference/interfaces/TranscriptionSegment.md @@ -0,0 +1,82 @@ +--- +id: TranscriptionSegment +title: TranscriptionSegment +--- + +# Interface: TranscriptionSegment + +Defined in: [types.ts:968](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L968) + +A single segment of transcribed audio with timing information. + +## Properties + +### confidence? + +```ts +optional confidence: number; +``` + +Defined in: [types.ts:978](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L978) + +Confidence score (0-1), if available + +*** + +### end + +```ts +end: number; +``` + +Defined in: [types.ts:974](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L974) + +End time of the segment in seconds + +*** + +### id + +```ts +id: number; +``` + +Defined in: [types.ts:970](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L970) + +Unique identifier for the segment + +*** + +### speaker? + +```ts +optional speaker: string; +``` + +Defined in: [types.ts:980](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L980) + +Speaker identifier, if diarization is enabled + +*** + +### start + +```ts +start: number; +``` + +Defined in: [types.ts:972](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L972) + +Start time of the segment in seconds + +*** + +### text + +```ts +text: string; +``` + +Defined in: [types.ts:976](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L976) + +Transcribed text for this segment diff --git a/docs/reference/interfaces/TranscriptionWord.md b/docs/reference/interfaces/TranscriptionWord.md new file mode 100644 index 00000000..1ffc2037 --- /dev/null +++ b/docs/reference/interfaces/TranscriptionWord.md @@ -0,0 +1,46 @@ +--- +id: TranscriptionWord +title: TranscriptionWord +--- + +# Interface: TranscriptionWord + +Defined in: [types.ts:986](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L986) + +A single word with timing information. + +## Properties + +### end + +```ts +end: number; +``` + +Defined in: [types.ts:992](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L992) + +End time in seconds + +*** + +### start + +```ts +start: number; +``` + +Defined in: [types.ts:990](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L990) + +Start time in seconds + +*** + +### word + +```ts +word: string; +``` + +Defined in: [types.ts:988](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L988) + +The transcribed word diff --git a/docs/reference/interfaces/UIMessage.md b/docs/reference/interfaces/UIMessage.md index a4119372..ff70329a 100644 --- a/docs/reference/interfaces/UIMessage.md +++ b/docs/reference/interfaces/UIMessage.md @@ -5,7 +5,7 @@ title: UIMessage # Interface: UIMessage -Defined in: [types.ts:281](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L281) +Defined in: [types.ts:344](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L344) UIMessage - Domain-specific message format optimized for building chat UIs Contains parts that can be text, tool calls, or tool results @@ -18,7 +18,7 @@ Contains parts that can be text, tool calls, or tool results optional createdAt: Date; ``` -Defined in: [types.ts:285](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L285) +Defined in: [types.ts:348](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L348) *** @@ -28,7 +28,7 @@ Defined in: [types.ts:285](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:282](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L282) +Defined in: [types.ts:345](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L345) *** @@ -38,7 +38,7 @@ Defined in: [types.ts:282](https://github.com/TanStack/ai/blob/main/packages/typ parts: MessagePart[]; ``` -Defined in: [types.ts:284](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L284) +Defined in: [types.ts:347](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L347) *** @@ -48,4 +48,4 @@ Defined in: [types.ts:284](https://github.com/TanStack/ai/blob/main/packages/typ role: "user" | "assistant" | "system"; ``` -Defined in: [types.ts:283](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L283) +Defined in: [types.ts:346](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L346) diff --git a/docs/reference/interfaces/VideoGenerationOptions.md b/docs/reference/interfaces/VideoGenerationOptions.md new file mode 100644 index 00000000..b698ceca --- /dev/null +++ b/docs/reference/interfaces/VideoGenerationOptions.md @@ -0,0 +1,91 @@ +--- +id: VideoGenerationOptions +title: VideoGenerationOptions +--- + +# Interface: VideoGenerationOptions\ + +Defined in: [types.ts:842](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L842) + +**`Experimental`** + +Options for video generation. +These are the common options supported across providers. + + Video generation is an experimental feature and may change. + +## Type Parameters + +### TProviderOptions + +`TProviderOptions` *extends* `object` = `object` + +## Properties + +### duration? + +```ts +optional duration: number; +``` + +Defined in: [types.ts:852](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L852) + +**`Experimental`** + +Video duration in seconds + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:846](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L846) + +**`Experimental`** + +The model to use for video generation + +*** + +### modelOptions? + +```ts +optional modelOptions: TProviderOptions; +``` + +Defined in: [types.ts:854](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L854) + +**`Experimental`** + +Model-specific options for video generation + +*** + +### prompt + +```ts +prompt: string; +``` + +Defined in: [types.ts:848](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L848) + +**`Experimental`** + +Text description of the desired video + +*** + +### size? + +```ts +optional size: string; +``` + +Defined in: [types.ts:850](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L850) + +**`Experimental`** + +Video size in WIDTHxHEIGHT format (e.g., "1280x720") diff --git a/docs/reference/interfaces/VideoJobResult.md b/docs/reference/interfaces/VideoJobResult.md new file mode 100644 index 00000000..d6f82646 --- /dev/null +++ b/docs/reference/interfaces/VideoJobResult.md @@ -0,0 +1,42 @@ +--- +id: VideoJobResult +title: VideoJobResult +--- + +# Interface: VideoJobResult + +Defined in: [types.ts:862](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L862) + +**`Experimental`** + +Result of creating a video generation job. + + Video generation is an experimental feature and may change. + +## Properties + +### jobId + +```ts +jobId: string; +``` + +Defined in: [types.ts:864](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L864) + +**`Experimental`** + +Unique job identifier for polling status + +*** + +### model + +```ts +model: string; +``` + +Defined in: [types.ts:866](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L866) + +**`Experimental`** + +Model used for generation diff --git a/docs/reference/interfaces/VideoPart.md b/docs/reference/interfaces/VideoPart.md index 1b3e7063..e95859c0 100644 --- a/docs/reference/interfaces/VideoPart.md +++ b/docs/reference/interfaces/VideoPart.md @@ -5,7 +5,7 @@ title: VideoPart # Interface: VideoPart\ -Defined in: [types.ts:132](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L132) +Defined in: [types.ts:208](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L208) Video content part for multimodal messages. @@ -25,7 +25,7 @@ Provider-specific metadata type optional metadata: TMetadata; ``` -Defined in: [types.ts:137](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L137) +Defined in: [types.ts:213](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L213) Provider-specific metadata (e.g., duration, resolution) @@ -37,7 +37,7 @@ Provider-specific metadata (e.g., duration, resolution) source: ContentPartSource; ``` -Defined in: [types.ts:135](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L135) +Defined in: [types.ts:211](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L211) Source of the video content @@ -49,4 +49,4 @@ Source of the video content type: "video"; ``` -Defined in: [types.ts:133](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L133) +Defined in: [types.ts:209](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L209) diff --git a/docs/reference/interfaces/VideoStatusResult.md b/docs/reference/interfaces/VideoStatusResult.md new file mode 100644 index 00000000..a0526318 --- /dev/null +++ b/docs/reference/interfaces/VideoStatusResult.md @@ -0,0 +1,70 @@ +--- +id: VideoStatusResult +title: VideoStatusResult +--- + +# Interface: VideoStatusResult + +Defined in: [types.ts:874](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L874) + +**`Experimental`** + +Status of a video generation job. + + Video generation is an experimental feature and may change. + +## Properties + +### error? + +```ts +optional error: string; +``` + +Defined in: [types.ts:882](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L882) + +**`Experimental`** + +Error message if status is 'failed' + +*** + +### jobId + +```ts +jobId: string; +``` + +Defined in: [types.ts:876](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L876) + +**`Experimental`** + +Job identifier + +*** + +### progress? + +```ts +optional progress: number; +``` + +Defined in: [types.ts:880](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L880) + +**`Experimental`** + +Progress percentage (0-100), if available + +*** + +### status + +```ts +status: "pending" | "processing" | "completed" | "failed"; +``` + +Defined in: [types.ts:878](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L878) + +**`Experimental`** + +Current status of the job diff --git a/docs/reference/interfaces/VideoUrlResult.md b/docs/reference/interfaces/VideoUrlResult.md new file mode 100644 index 00000000..4a79f0c4 --- /dev/null +++ b/docs/reference/interfaces/VideoUrlResult.md @@ -0,0 +1,56 @@ +--- +id: VideoUrlResult +title: VideoUrlResult +--- + +# Interface: VideoUrlResult + +Defined in: [types.ts:890](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L890) + +**`Experimental`** + +Result containing the URL to a generated video. + + Video generation is an experimental feature and may change. + +## Properties + +### expiresAt? + +```ts +optional expiresAt: Date; +``` + +Defined in: [types.ts:896](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L896) + +**`Experimental`** + +When the URL expires, if applicable + +*** + +### jobId + +```ts +jobId: string; +``` + +Defined in: [types.ts:892](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L892) + +**`Experimental`** + +Job identifier + +*** + +### url + +```ts +url: string; +``` + +Defined in: [types.ts:894](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L894) + +**`Experimental`** + +URL to the generated video diff --git a/docs/reference/type-aliases/AIAdapter.md b/docs/reference/type-aliases/AIAdapter.md new file mode 100644 index 00000000..ef7f5f5c --- /dev/null +++ b/docs/reference/type-aliases/AIAdapter.md @@ -0,0 +1,20 @@ +--- +id: AIAdapter +title: AIAdapter +--- + +# Type Alias: AIAdapter + +```ts +type AIAdapter = + | AnyTextAdapter + | AnySummarizeAdapter + | AnyImageAdapter + | AnyVideoAdapter + | AnyTTSAdapter + | AnyTranscriptionAdapter; +``` + +Defined in: [activities/index.ts:149](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/index.ts#L149) + +Union of all adapter types that can be passed to chat() diff --git a/docs/reference/type-aliases/AgentLoopStrategy.md b/docs/reference/type-aliases/AgentLoopStrategy.md index 96bdb2cb..b128de9b 100644 --- a/docs/reference/type-aliases/AgentLoopStrategy.md +++ b/docs/reference/type-aliases/AgentLoopStrategy.md @@ -9,7 +9,7 @@ title: AgentLoopStrategy type AgentLoopStrategy = (state) => boolean; ``` -Defined in: [types.ts:543](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L543) +Defined in: [types.ts:600](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L600) Strategy function that determines whether the agent loop should continue diff --git a/docs/reference/type-aliases/AnyClientTool.md b/docs/reference/type-aliases/AnyClientTool.md index 4f395412..bbbf59ec 100644 --- a/docs/reference/type-aliases/AnyClientTool.md +++ b/docs/reference/type-aliases/AnyClientTool.md @@ -11,6 +11,6 @@ type AnyClientTool = | ToolDefinitionInstance; ``` -Defined in: [tools/tool-definition.ts:49](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L49) +Defined in: [activities/chat/tools/tool-definition.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L54) Union type for any kind of client-side tool (client tool or definition) diff --git a/docs/reference/type-aliases/ChatStreamOptionsForModel.md b/docs/reference/type-aliases/ChatStreamOptionsForModel.md deleted file mode 100644 index 651be480..00000000 --- a/docs/reference/type-aliases/ChatStreamOptionsForModel.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: ChatStreamOptionsForModel -title: ChatStreamOptionsForModel ---- - -# Type Alias: ChatStreamOptionsForModel\ - -```ts -type ChatStreamOptionsForModel = TAdapter extends AIAdapter ? Omit & object : never; -``` - -Defined in: [types.ts:883](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L883) - -Chat options constrained by a specific model's capabilities. -Unlike ChatStreamOptionsUnion which creates a union over all models, -this type takes a specific model and constrains messages accordingly. - -## Type Parameters - -### TAdapter - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `any`, `any`\> - -### TModel - -`TModel` *extends* `string` diff --git a/docs/reference/type-aliases/ChatStreamOptionsUnion.md b/docs/reference/type-aliases/ChatStreamOptionsUnion.md deleted file mode 100644 index 02e3cb26..00000000 --- a/docs/reference/type-aliases/ChatStreamOptionsUnion.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: ChatStreamOptionsUnion -title: ChatStreamOptionsUnion ---- - -# Type Alias: ChatStreamOptionsUnion\ - -```ts -type ChatStreamOptionsUnion = TAdapter extends AIAdapter ? Models[number] extends infer TModel ? TModel extends string ? Omit & object : never : never : never; -``` - -Defined in: [types.ts:823](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L823) - -## Type Parameters - -### TAdapter - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `any`, `any`\> diff --git a/docs/reference/type-aliases/ConstrainedContent.md b/docs/reference/type-aliases/ConstrainedContent.md index 9d430559..1ed97e63 100644 --- a/docs/reference/type-aliases/ConstrainedContent.md +++ b/docs/reference/type-aliases/ConstrainedContent.md @@ -3,42 +3,22 @@ id: ConstrainedContent title: ConstrainedContent --- -# Type Alias: ConstrainedContent\ +# Type Alias: ConstrainedContent\ ```ts -type ConstrainedContent = +type ConstrainedContent = | string | null - | ContentPartForModalities, TImageMeta, TAudioMeta, TVideoMeta, TDocumentMeta, TTextMeta>[]; + | ContentPartForInputModalitiesTypes[]; ``` -Defined in: [types.ts:199](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L199) +Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) Type for message content constrained by supported modalities. When modalities is ['text', 'image'], only TextPart and ImagePart are allowed in the array. ## Type Parameters -### TModalities +### TInputModalitiesTypes -`TModalities` *extends* `ReadonlyArray`\<[`Modality`](Modality.md)\> - -### TImageMeta - -`TImageMeta` = `unknown` - -### TAudioMeta - -`TAudioMeta` = `unknown` - -### TVideoMeta - -`TVideoMeta` = `unknown` - -### TDocumentMeta - -`TDocumentMeta` = `unknown` - -### TTextMeta - -`TTextMeta` = `unknown` +`TInputModalitiesTypes` *extends* [`InputModalitiesTypes`](InputModalitiesTypes.md) diff --git a/docs/reference/type-aliases/ConstrainedModelMessage.md b/docs/reference/type-aliases/ConstrainedModelMessage.md index c3f78bd2..83928cb1 100644 --- a/docs/reference/type-aliases/ConstrainedModelMessage.md +++ b/docs/reference/type-aliases/ConstrainedModelMessage.md @@ -3,13 +3,13 @@ id: ConstrainedModelMessage title: ConstrainedModelMessage --- -# Type Alias: ConstrainedModelMessage\ +# Type Alias: ConstrainedModelMessage\ ```ts -type ConstrainedModelMessage = Omit & object; +type ConstrainedModelMessage = Omit & object; ``` -Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) +Defined in: [types.ts:360](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L360) A ModelMessage with content constrained to only allow content parts matching the specified input modalities. @@ -19,31 +19,11 @@ matching the specified input modalities. ### content ```ts -content: ConstrainedContent; +content: ConstrainedContent; ``` ## Type Parameters -### TModalities +### TInputModalitiesTypes -`TModalities` *extends* `ReadonlyArray`\<[`Modality`](Modality.md)\> - -### TImageMeta - -`TImageMeta` = `unknown` - -### TAudioMeta - -`TAudioMeta` = `unknown` - -### TVideoMeta - -`TVideoMeta` = `unknown` - -### TDocumentMeta - -`TDocumentMeta` = `unknown` - -### TTextMeta - -`TTextMeta` = `unknown` +`TInputModalitiesTypes` *extends* [`InputModalitiesTypes`](InputModalitiesTypes.md) diff --git a/docs/reference/type-aliases/ContentPart.md b/docs/reference/type-aliases/ContentPart.md index 783073c9..b7e148c4 100644 --- a/docs/reference/type-aliases/ContentPart.md +++ b/docs/reference/type-aliases/ContentPart.md @@ -3,10 +3,10 @@ id: ContentPart title: ContentPart --- -# Type Alias: ContentPart\ +# Type Alias: ContentPart\ ```ts -type ContentPart = +type ContentPart = | TextPart | ImagePart | AudioPart @@ -14,12 +14,16 @@ type ContentPart = | DocumentPart; ``` -Defined in: [types.ts:159](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L159) +Defined in: [types.ts:235](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L235) Union type for all multimodal content parts. ## Type Parameters +### TTextMeta + +`TTextMeta` = `unknown` + ### TImageMeta `TImageMeta` = `unknown` @@ -43,7 +47,3 @@ Provider-specific video metadata type `TDocumentMeta` = `unknown` Provider-specific document metadata type - -### TTextMeta - -`TTextMeta` = `unknown` diff --git a/docs/reference/type-aliases/ContentPartForInputModalitiesTypes.md b/docs/reference/type-aliases/ContentPartForInputModalitiesTypes.md new file mode 100644 index 00000000..ce05b0ad --- /dev/null +++ b/docs/reference/type-aliases/ContentPartForInputModalitiesTypes.md @@ -0,0 +1,23 @@ +--- +id: ContentPartForInputModalitiesTypes +title: ContentPartForInputModalitiesTypes +--- + +# Type Alias: ContentPartForInputModalitiesTypes\ + +```ts +type ContentPartForInputModalitiesTypes = Extract, { + type: TInputModalitiesTypes["inputModalities"][number]; +}>; +``` + +Defined in: [types.ts:252](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L252) + +Helper type to filter ContentPart union to only include specific modalities. +Used to constrain message content based on model capabilities. + +## Type Parameters + +### TInputModalitiesTypes + +`TInputModalitiesTypes` *extends* [`InputModalitiesTypes`](InputModalitiesTypes.md) diff --git a/docs/reference/type-aliases/ContentPartForModalities.md b/docs/reference/type-aliases/ContentPartForModalities.md deleted file mode 100644 index d653bbcd..00000000 --- a/docs/reference/type-aliases/ContentPartForModalities.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: ContentPartForModalities -title: ContentPartForModalities ---- - -# Type Alias: ContentPartForModalities\ - -```ts -type ContentPartForModalities = Extract, { - type: TModalities; -}>; -``` - -Defined in: [types.ts:176](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L176) - -Helper type to filter ContentPart union to only include specific modalities. -Used to constrain message content based on model capabilities. - -## Type Parameters - -### TModalities - -`TModalities` *extends* [`Modality`](Modality.md) - -### TImageMeta - -`TImageMeta` = `unknown` - -### TAudioMeta - -`TAudioMeta` = `unknown` - -### TVideoMeta - -`TVideoMeta` = `unknown` - -### TDocumentMeta - -`TDocumentMeta` = `unknown` - -### TTextMeta - -`TTextMeta` = `unknown` diff --git a/docs/reference/type-aliases/ExtractModalitiesForModel.md b/docs/reference/type-aliases/ExtractModalitiesForModel.md deleted file mode 100644 index fe165380..00000000 --- a/docs/reference/type-aliases/ExtractModalitiesForModel.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: ExtractModalitiesForModel -title: ExtractModalitiesForModel ---- - -# Type Alias: ExtractModalitiesForModel\ - -```ts -type ExtractModalitiesForModel = TAdapter extends AIAdapter ? TModel extends keyof ModelInputModalities ? ModelInputModalities[TModel] : ReadonlyArray : ReadonlyArray; -``` - -Defined in: [types.ts:942](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L942) - -Extract the supported input modalities for a specific model from an adapter. - -## Type Parameters - -### TAdapter - -`TAdapter` *extends* [`AIAdapter`](../interfaces/AIAdapter.md)\<`any`, `any`, `any`, `any`, `any`, `any`\> - -### TModel - -`TModel` *extends* `string` diff --git a/docs/reference/type-aliases/ExtractModelsFromAdapter.md b/docs/reference/type-aliases/ExtractModelsFromAdapter.md deleted file mode 100644 index 7b1edfb0..00000000 --- a/docs/reference/type-aliases/ExtractModelsFromAdapter.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: ExtractModelsFromAdapter -title: ExtractModelsFromAdapter ---- - -# Type Alias: ExtractModelsFromAdapter\ - -```ts -type ExtractModelsFromAdapter = T extends AIAdapter ? M[number] : never; -``` - -Defined in: [types.ts:936](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L936) - -## Type Parameters - -### T - -`T` diff --git a/docs/reference/type-aliases/InferSchemaType.md b/docs/reference/type-aliases/InferSchemaType.md index 50d9cf8d..5fdb94bb 100644 --- a/docs/reference/type-aliases/InferSchemaType.md +++ b/docs/reference/type-aliases/InferSchemaType.md @@ -9,7 +9,7 @@ title: InferSchemaType type InferSchemaType = T extends z.ZodType ? z.infer : any; ``` -Defined in: [types.ts:60](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L60) +Defined in: [types.ts:136](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L136) Infer the TypeScript type from a schema. For Zod schemas, uses z.infer to get the proper type. diff --git a/docs/reference/type-aliases/InferToolInput.md b/docs/reference/type-aliases/InferToolInput.md index 9caf3099..0975434e 100644 --- a/docs/reference/type-aliases/InferToolInput.md +++ b/docs/reference/type-aliases/InferToolInput.md @@ -9,7 +9,7 @@ title: InferToolInput type InferToolInput = T extends object ? TInput extends z.ZodType ? z.infer : TInput extends JSONSchema ? any : any : any; ``` -Defined in: [tools/tool-definition.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L61) +Defined in: [activities/chat/tools/tool-definition.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L66) Extract the input type from a tool (inferred from Zod schema, or `any` for JSONSchema) diff --git a/docs/reference/type-aliases/InferToolName.md b/docs/reference/type-aliases/InferToolName.md index 25b0aa93..c656c060 100644 --- a/docs/reference/type-aliases/InferToolName.md +++ b/docs/reference/type-aliases/InferToolName.md @@ -9,7 +9,7 @@ title: InferToolName type InferToolName = T extends object ? N : never; ``` -Defined in: [tools/tool-definition.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L56) +Defined in: [activities/chat/tools/tool-definition.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L61) Extract the tool name as a literal type diff --git a/docs/reference/type-aliases/InferToolOutput.md b/docs/reference/type-aliases/InferToolOutput.md index 2c3d3607..e6b03cb8 100644 --- a/docs/reference/type-aliases/InferToolOutput.md +++ b/docs/reference/type-aliases/InferToolOutput.md @@ -9,7 +9,7 @@ title: InferToolOutput type InferToolOutput = T extends object ? TOutput extends z.ZodType ? z.infer : TOutput extends JSONSchema ? any : any : any; ``` -Defined in: [tools/tool-definition.ts:72](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-definition.ts#L72) +Defined in: [activities/chat/tools/tool-definition.ts:77](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/tools/tool-definition.ts#L77) Extract the output type from a tool (inferred from Zod schema, or `any` for JSONSchema) diff --git a/docs/reference/type-aliases/InputModalitiesTypes.md b/docs/reference/type-aliases/InputModalitiesTypes.md new file mode 100644 index 00000000..5c3b0905 --- /dev/null +++ b/docs/reference/type-aliases/InputModalitiesTypes.md @@ -0,0 +1,32 @@ +--- +id: InputModalitiesTypes +title: InputModalitiesTypes +--- + +# Type Alias: InputModalitiesTypes + +```ts +type InputModalitiesTypes = object; +``` + +Defined in: [types.ts:351](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L351) + +## Properties + +### inputModalities + +```ts +inputModalities: ReadonlyArray; +``` + +Defined in: [types.ts:352](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L352) + +*** + +### messageMetadataByModality + +```ts +messageMetadataByModality: DefaultMessageMetadataByModality; +``` + +Defined in: [types.ts:353](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L353) diff --git a/docs/reference/type-aliases/MessagePart.md b/docs/reference/type-aliases/MessagePart.md index 93e4695c..e0f62857 100644 --- a/docs/reference/type-aliases/MessagePart.md +++ b/docs/reference/type-aliases/MessagePart.md @@ -13,4 +13,4 @@ type MessagePart = | ThinkingPart; ``` -Defined in: [types.ts:271](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L271) +Defined in: [types.ts:334](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L334) diff --git a/docs/reference/type-aliases/ModalitiesArrayToUnion.md b/docs/reference/type-aliases/ModalitiesArrayToUnion.md index 6a736d2e..9956c9f2 100644 --- a/docs/reference/type-aliases/ModalitiesArrayToUnion.md +++ b/docs/reference/type-aliases/ModalitiesArrayToUnion.md @@ -9,7 +9,7 @@ title: ModalitiesArrayToUnion type ModalitiesArrayToUnion = T[number]; ``` -Defined in: [types.ts:192](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L192) +Defined in: [types.ts:269](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L269) Helper type to convert a readonly array of modalities to a union type. e.g., readonly ['text', 'image'] -> 'text' | 'image' diff --git a/docs/reference/type-aliases/Modality.md b/docs/reference/type-aliases/Modality.md index f44c2f26..883631bd 100644 --- a/docs/reference/type-aliases/Modality.md +++ b/docs/reference/type-aliases/Modality.md @@ -9,7 +9,7 @@ title: Modality type Modality = "text" | "image" | "audio" | "video" | "document"; ``` -Defined in: [types.ts:83](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L83) +Defined in: [types.ts:159](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L159) Supported input modality types for multimodal content. - 'text': Plain text content diff --git a/docs/reference/type-aliases/SchemaInput.md b/docs/reference/type-aliases/SchemaInput.md index 1a1b09cd..52150599 100644 --- a/docs/reference/type-aliases/SchemaInput.md +++ b/docs/reference/type-aliases/SchemaInput.md @@ -9,6 +9,6 @@ title: SchemaInput type SchemaInput = z.ZodType | JSONSchema; ``` -Defined in: [types.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L53) +Defined in: [types.ts:129](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L129) Union type for schema input - can be either a Zod schema or a JSONSchema object. diff --git a/docs/reference/type-aliases/StreamChunk.md b/docs/reference/type-aliases/StreamChunk.md index 7227135c..6c721940 100644 --- a/docs/reference/type-aliases/StreamChunk.md +++ b/docs/reference/type-aliases/StreamChunk.md @@ -17,6 +17,6 @@ type StreamChunk = | ThinkingStreamChunk; ``` -Defined in: [types.ts:672](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L672) +Defined in: [types.ts:735](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L735) Chunk returned by the sdk during streaming chat completions. diff --git a/docs/reference/type-aliases/StreamChunkType.md b/docs/reference/type-aliases/StreamChunkType.md index 3d83a468..0b3d9514 100644 --- a/docs/reference/type-aliases/StreamChunkType.md +++ b/docs/reference/type-aliases/StreamChunkType.md @@ -17,4 +17,4 @@ type StreamChunkType = | "thinking"; ``` -Defined in: [types.ts:584](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L584) +Defined in: [types.ts:647](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L647) diff --git a/docs/reference/type-aliases/ToolCallState.md b/docs/reference/type-aliases/ToolCallState.md index 7ff8e334..dc71a615 100644 --- a/docs/reference/type-aliases/ToolCallState.md +++ b/docs/reference/type-aliases/ToolCallState.md @@ -14,6 +14,6 @@ type ToolCallState = | "approval-responded"; ``` -Defined in: [stream/types.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L13) +Defined in: [types.ts:6](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L6) Tool call states - track the lifecycle of a tool call diff --git a/docs/reference/type-aliases/ToolResultState.md b/docs/reference/type-aliases/ToolResultState.md index d9d29e6e..c641d4c7 100644 --- a/docs/reference/type-aliases/ToolResultState.md +++ b/docs/reference/type-aliases/ToolResultState.md @@ -9,6 +9,6 @@ title: ToolResultState type ToolResultState = "streaming" | "complete" | "error"; ``` -Defined in: [stream/types.ts:23](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/types.ts#L23) +Defined in: [types.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L16) Tool result states - track the lifecycle of a tool result diff --git a/docs/reference/variables/aiEventClient.md b/docs/reference/variables/aiEventClient.md index 8c37775a..6c15d1f2 100644 --- a/docs/reference/variables/aiEventClient.md +++ b/docs/reference/variables/aiEventClient.md @@ -9,4 +9,4 @@ title: aiEventClient const aiEventClient: AiEventClient; ``` -Defined in: [event-client.ts:307](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/event-client.ts#L307) +Defined in: [event-client.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/event-client.ts#L291) diff --git a/docs/reference/variables/defaultJSONParser.md b/docs/reference/variables/defaultJSONParser.md index e5e32dad..0c72078d 100644 --- a/docs/reference/variables/defaultJSONParser.md +++ b/docs/reference/variables/defaultJSONParser.md @@ -9,6 +9,6 @@ title: defaultJSONParser const defaultJSONParser: PartialJSONParser; ``` -Defined in: [stream/json-parser.ts:49](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/json-parser.ts#L49) +Defined in: [activities/chat/stream/json-parser.ts:49](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/json-parser.ts#L49) Default parser instance diff --git a/examples/README.md b/examples/README.md index 460baa07..1cdc89aa 100644 --- a/examples/README.md +++ b/examples/README.md @@ -309,10 +309,10 @@ All examples use SSE for real-time streaming: ```typescript import { chat, toStreamResponse } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' +import { openaiText } from '@tanstack/ai-openai' const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: 'gpt-4o', messages, }) @@ -383,7 +383,7 @@ const weatherTool = weatherToolDef.server(async ({ location }) => { }) const stream = chat({ - adapter: openai(), + adapter: openaiText(), model: 'gpt-4o', messages, tools: [weatherTool], // SDK executes these automatically diff --git a/examples/ts-group-chat/chat-server/capnweb-rpc.ts b/examples/ts-group-chat/chat-server/capnweb-rpc.ts index ffa2bcd1..f9c067ba 100644 --- a/examples/ts-group-chat/chat-server/capnweb-rpc.ts +++ b/examples/ts-group-chat/chat-server/capnweb-rpc.ts @@ -1,14 +1,15 @@ // Cap'n Web RPC server implementation for chat import { RpcTarget } from 'capnweb' -import { WebSocket } from 'ws' import { ChatLogic } from './chat-logic.js' +import type { WebSocket } from 'ws' + // Local type definition to avoid importing from @tanstack/ai at module parse time interface ModelMessage { role: 'system' | 'user' | 'assistant' | 'tool' content?: string toolCallId?: string - toolCalls?: any[] + toolCalls?: Array } // Lazy-load claude service to avoid importing AI packages at module parse time @@ -57,7 +58,7 @@ export const activeServers = new Set() export const userMessageQueues = new Map>() // Global registry of client callbacks -export const clients = new Map() +export const clients = new Map) => void>() // Chat Server Implementation (one per connection) export class ChatServer extends RpcTarget { @@ -95,7 +96,7 @@ export class ChatServer extends RpcTarget { console.log(`📬 Exclude user: ${excludeUser || 'none'}`) let successCount = 0 - const successful: string[] = [] + const successful: Array = [] for (const username of clients.keys()) { if (excludeUser && username === excludeUser) { @@ -150,7 +151,10 @@ export class ChatServer extends RpcTarget { } // Client joins the chat - async joinChat(username: string, notificationCallback: Function) { + async joinChat( + username: string, + notificationCallback: (...args: Array) => void, + ) { console.log(`${username} is joining the chat`) this.currentUsername = username @@ -264,7 +268,7 @@ export class ChatServer extends RpcTarget { ) // Build conversation history for Claude - const conversationHistory: ModelMessage[] = globalChat + const conversationHistory: Array = globalChat .getMessages() .map((msg) => ({ role: 'user' as const, @@ -345,7 +349,7 @@ export class ChatServer extends RpcTarget { }) // Get conversation history from the current request - const conversationHistory: ModelMessage[] = globalChat + const conversationHistory: Array = globalChat .getMessages() .map((msg) => ({ role: 'user' as const, @@ -409,7 +413,7 @@ export class ChatServer extends RpcTarget { } // Stream Claude response (for future use if needed) - async *streamClaudeResponse(conversationHistory: ModelMessage[]) { + async *streamClaudeResponse(conversationHistory: Array) { const claudeService = await getClaudeService() yield* claudeService.streamResponse(conversationHistory) } diff --git a/examples/ts-group-chat/chat-server/chat-logic.ts b/examples/ts-group-chat/chat-server/chat-logic.ts index b6d7ac79..6133a55b 100644 --- a/examples/ts-group-chat/chat-server/chat-logic.ts +++ b/examples/ts-group-chat/chat-server/chat-logic.ts @@ -8,8 +8,8 @@ export interface ChatMessage { } export interface ChatState { - onlineUsers: string[] - messages: ChatMessage[] + onlineUsers: Array + messages: Array } // Core chat business logic class @@ -105,11 +105,11 @@ export class ChatLogic { } } - getMessages(): ChatMessage[] { + getMessages(): Array { return [...this.chatState.messages] } - getOnlineUsers(): string[] { + getOnlineUsers(): Array { return [...this.chatState.onlineUsers] } } diff --git a/examples/ts-group-chat/chat-server/claude-service.ts b/examples/ts-group-chat/chat-server/claude-service.ts index 0d2d6dbc..d377c9bf 100644 --- a/examples/ts-group-chat/chat-server/claude-service.ts +++ b/examples/ts-group-chat/chat-server/claude-service.ts @@ -1,5 +1,5 @@ // Claude AI service for handling queued AI responses -import { anthropic } from '@tanstack/ai-anthropic' +import { anthropicText } from '@tanstack/ai-anthropic' import { chat, toolDefinition } from '@tanstack/ai' import type { JSONSchema, ModelMessage, StreamChunk } from '@tanstack/ai' @@ -92,7 +92,7 @@ export interface ClaudeQueueStatus { } export class ClaudeService { - private adapter = anthropic() // Uses ANTHROPIC_API_KEY from env + private adapter = anthropicText('claude-sonnet-4-5') // Uses ANTHROPIC_API_KEY from env private queue: Array = [] private currentRequest: ClaudeRequest | null = null private isProcessing = false @@ -153,7 +153,6 @@ export class ClaudeService { adapter: this.adapter, systemPrompts: [systemMessage], messages: [...conversationHistory] as any, - model: 'claude-sonnet-4-5', tools: [getWeatherTool], })) { chunkCount++ diff --git a/examples/ts-group-chat/package.json b/examples/ts-group-chat/package.json index 72e64073..90137ede 100644 --- a/examples/ts-group-chat/package.json +++ b/examples/ts-group-chat/package.json @@ -8,21 +8,21 @@ "test": "exit 0" }, "dependencies": { - "@tailwindcss/vite": "^4.1.17", + "@tailwindcss/vite": "^4.1.18", "@tanstack/ai": "workspace:*", "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/react-devtools": "^0.8.2", - "@tanstack/react-router": "^1.139.7", + "@tanstack/react-router": "^1.141.1", "@tanstack/react-router-devtools": "^1.139.7", "@tanstack/react-router-ssr-query": "^1.139.7", - "@tanstack/react-start": "^1.139.8", + "@tanstack/react-start": "^1.141.1", "@tanstack/router-plugin": "^1.139.7", "capnweb": "^0.1.0", - "react": "^19.2.0", - "react-dom": "^19.2.0", - "tailwindcss": "^4.1.17", + "react": "^19.2.3", + "react-dom": "^19.2.3", + "tailwindcss": "^4.1.18", "vite-tsconfig-paths": "^5.1.4", "ws": "^8.18.3" }, @@ -34,10 +34,10 @@ "@types/react": "^19.2.7", "@types/react-dom": "^19.2.3", "@types/ws": "^8.18.1", - "@vitejs/plugin-react": "^5.1.1", + "@vitejs/plugin-react": "^5.1.2", "jsdom": "^27.2.0", "typescript": "5.9.3", - "vite": "^7.2.4", + "vite": "^7.2.7", "vitest": "^4.0.14", "web-vitals": "^5.1.0" } diff --git a/examples/ts-react-chat/package.json b/examples/ts-react-chat/package.json index 3c4ebfb4..f9a10c40 100644 --- a/examples/ts-react-chat/package.json +++ b/examples/ts-react-chat/package.json @@ -9,7 +9,7 @@ "test": "exit 0" }, "dependencies": { - "@tailwindcss/vite": "^4.1.17", + "@tailwindcss/vite": "^4.1.18", "@tanstack/ai": "workspace:*", "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", @@ -18,25 +18,25 @@ "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/ai-react-ui": "workspace:*", - "@tanstack/nitro-v2-vite-plugin": "^1.139.0", + "@tanstack/nitro-v2-vite-plugin": "^1.141.0", "@tanstack/react-devtools": "^0.8.2", - "@tanstack/react-router": "^1.139.7", + "@tanstack/react-router": "^1.141.1", "@tanstack/react-router-devtools": "^1.139.7", "@tanstack/react-router-ssr-query": "^1.139.7", - "@tanstack/react-start": "^1.139.8", + "@tanstack/react-start": "^1.141.1", "@tanstack/react-store": "^0.8.0", "@tanstack/router-plugin": "^1.139.7", "@tanstack/store": "^0.8.0", "highlight.js": "^11.11.1", - "lucide-react": "^0.555.0", - "react": "^19.2.0", - "react-dom": "^19.2.0", + "lucide-react": "^0.561.0", + "react": "^19.2.3", + "react-dom": "^19.2.3", "react-markdown": "^10.1.0", "rehype-highlight": "^7.0.2", "rehype-raw": "^7.0.0", "rehype-sanitize": "^6.0.0", "remark-gfm": "^4.0.1", - "tailwindcss": "^4.1.17", + "tailwindcss": "^4.1.18", "vite-tsconfig-paths": "^5.1.4", "zod": "^4.1.13" }, @@ -48,10 +48,10 @@ "@types/node": "^24.10.1", "@types/react": "^19.2.7", "@types/react-dom": "^19.2.3", - "@vitejs/plugin-react": "^5.1.1", + "@vitejs/plugin-react": "^5.1.2", "jsdom": "^27.2.0", "typescript": "5.9.3", - "vite": "^7.2.4", + "vite": "^7.2.7", "vitest": "^4.0.14", "web-vitals": "^5.1.0" } diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index 39cf0d5f..550e373c 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -1,9 +1,15 @@ import { createFileRoute } from '@tanstack/react-router' -import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' -import { ollama } from '@tanstack/ai-ollama' -import { anthropic } from '@tanstack/ai-anthropic' -import { gemini } from '@tanstack/ai-gemini' +import { + chat, + createChatOptions, + maxIterations, + toServerSentEventsStream, +} from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { ollamaText } from '@tanstack/ai-ollama' +import { anthropicText } from '@tanstack/ai-anthropic' +import { geminiText } from '@tanstack/ai-gemini' +import type { AnyTextAdapter } from '@tanstack/ai' import { addToCartToolDef, addToWishListToolDef, @@ -61,45 +67,48 @@ export const Route = createFileRoute('/api/tanchat')({ const body = await request.json() const { messages, data } = body - // Extract provider, model, and conversationId from data + // Extract provider and model from data const provider: Provider = data?.provider || 'openai' - const model: string | undefined = data?.model + const model: string = data?.model || 'gpt-4o' const conversationId: string | undefined = data?.conversationId - try { - // Select adapter based on provider - let adapter - let defaultModel - - switch (provider) { - case 'anthropic': - adapter = anthropic() - defaultModel = 'claude-sonnet-4-5-20250929' - break - case 'gemini': - adapter = gemini() - defaultModel = 'gemini-2.0-flash-exp' - break - case 'ollama': - adapter = ollama() - defaultModel = 'mistral:7b' - break - case 'openai': - default: - adapter = openai() - defaultModel = 'gpt-4o' - break - } + // Pre-define typed adapter configurations with full type inference + // Model is passed to the adapter factory function for type-safe autocomplete + const adapterConfig: Record< + Provider, + () => { adapter: AnyTextAdapter } + > = { + anthropic: () => + createChatOptions({ + adapter: anthropicText( + (model || 'claude-sonnet-4-5') as 'claude-sonnet-4-5', + ), + }), + gemini: () => + createChatOptions({ + adapter: geminiText( + (model || 'gemini-2.5-flash') as 'gemini-2.5-flash', + ), + }), + ollama: () => + createChatOptions({ + adapter: ollamaText((model || 'mistral:7b') as 'mistral:7b'), + }), + openai: () => + createChatOptions({ + adapter: openaiText((model || 'gpt-4o') as 'gpt-4o'), + }), + } - // Determine model - use provided model or default based on provider - const selectedModel = model || defaultModel - console.log( - `[API Route] Using provider: ${provider}, model: ${selectedModel}`, - ) + try { + // Get typed adapter options using createChatOptions pattern + const options = adapterConfig[provider]() + // Note: We cast to AsyncIterable because all chat adapters + // return streams, but TypeScript sees a union of all possible return types const stream = chat({ - adapter: adapter as any, - model: selectedModel as any, + ...options, + tools: [ getGuitars, // Server tool recommendGuitarToolDef, // No server execute - client will handle @@ -113,7 +122,17 @@ export const Route = createFileRoute('/api/tanchat')({ abortController, conversationId, }) - return toStreamResponse(stream, { abortController }) + const readableStream = toServerSentEventsStream( + stream, + abortController, + ) + return new Response(readableStream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }, + }) } catch (error: any) { console.error('[API Route] Error in chat request:', { message: error?.message, diff --git a/examples/ts-solid-chat/package.json b/examples/ts-solid-chat/package.json index 87920223..5c16d816 100644 --- a/examples/ts-solid-chat/package.json +++ b/examples/ts-solid-chat/package.json @@ -9,7 +9,7 @@ "test": "exit 0" }, "dependencies": { - "@tailwindcss/vite": "^4.1.17", + "@tailwindcss/vite": "^4.1.18", "@tanstack/ai": "workspace:*", "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", @@ -19,7 +19,7 @@ "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-solid": "workspace:*", "@tanstack/ai-solid-ui": "workspace:*", - "@tanstack/nitro-v2-vite-plugin": "^1.139.0", + "@tanstack/nitro-v2-vite-plugin": "^1.141.0", "@tanstack/router-plugin": "^1.139.7", "@tanstack/solid-ai-devtools": "workspace:*", "@tanstack/solid-devtools": "^0.7.15", @@ -33,7 +33,7 @@ "lucide-solid": "^0.554.0", "solid-js": "^1.9.10", "solid-markdown": "^2.1.0", - "tailwindcss": "^4.1.17", + "tailwindcss": "^4.1.18", "vite-tsconfig-paths": "^5.1.4", "zod": "^4.1.13" }, @@ -45,7 +45,7 @@ "@types/node": "^24.10.1", "jsdom": "^27.2.0", "typescript": "5.9.3", - "vite": "^7.2.4", + "vite": "^7.2.7", "vite-plugin-solid": "^2.11.10", "vitest": "^4.0.14", "web-vitals": "^5.1.0" diff --git a/examples/ts-solid-chat/src/routes/api.chat.ts b/examples/ts-solid-chat/src/routes/api.chat.ts index 8c96e7ae..99500a21 100644 --- a/examples/ts-solid-chat/src/routes/api.chat.ts +++ b/examples/ts-solid-chat/src/routes/api.chat.ts @@ -1,6 +1,6 @@ import { createFileRoute } from '@tanstack/solid-router' -import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' -import { anthropic } from '@tanstack/ai-anthropic' +import { chat, maxIterations, toServerSentEventsStream } from '@tanstack/ai' +import { anthropicText } from '@tanstack/ai-anthropic' import { serverTools } from '@/lib/guitar-tools' const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -57,13 +57,12 @@ export const Route = createFileRoute('/api/chat')({ try { // Use the stream abort signal for proper cancellation handling const stream = chat({ - adapter: anthropic(), - model: 'claude-sonnet-4-5-20250929', + adapter: anthropicText('claude-sonnet-4-5'), tools: serverTools, systemPrompts: [SYSTEM_PROMPT], agentLoopStrategy: maxIterations(20), messages, - providerOptions: { + modelOptions: { thinking: { type: 'enabled', budget_tokens: 10000, @@ -72,7 +71,17 @@ export const Route = createFileRoute('/api/chat')({ abortController, }) - return toStreamResponse(stream, { abortController }) + const readableStream = toServerSentEventsStream( + stream, + abortController, + ) + return new Response(readableStream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }, + }) } catch (error: any) { // If request was aborted, return early (don't send error response) if (error.name === 'AbortError' || abortController.signal.aborted) { diff --git a/examples/ts-svelte-chat/package.json b/examples/ts-svelte-chat/package.json index 6c8e552d..7c3a40c7 100644 --- a/examples/ts-svelte-chat/package.json +++ b/examples/ts-svelte-chat/package.json @@ -29,13 +29,13 @@ "@sveltejs/adapter-auto": "^3.3.1", "@sveltejs/kit": "^2.15.10", "@sveltejs/vite-plugin-svelte": "^5.1.1", - "@tailwindcss/vite": "^4.1.17", + "@tailwindcss/vite": "^4.1.18", "@types/node": "^24.10.1", "svelte": "^5.20.0", "svelte-check": "^4.2.0", - "tailwindcss": "^4.1.17", + "tailwindcss": "^4.1.18", "tslib": "^2.8.1", "typescript": "5.9.3", - "vite": "^7.2.4" + "vite": "^7.2.7" } } diff --git a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts index ee6a3195..a6128809 100644 --- a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts +++ b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts @@ -1,11 +1,16 @@ -import { env } from '$env/dynamic/private' -import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' -import { ollama } from '@tanstack/ai-ollama' -import { anthropic } from '@tanstack/ai-anthropic' -import { gemini } from '@tanstack/ai-gemini' +import { + chat, + createChatOptions, + maxIterations, + toServerSentEventsStream, +} from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { ollamaText } from '@tanstack/ai-ollama' +import { anthropicText } from '@tanstack/ai-anthropic' +import { geminiText } from '@tanstack/ai-gemini' import type { RequestHandler } from './$types' +import { env } from '$env/dynamic/private' import { addToCartToolDef, @@ -23,6 +28,27 @@ if (env.OPENAI_API_KEY) process.env.OPENAI_API_KEY = env.OPENAI_API_KEY if (env.ANTHROPIC_API_KEY) process.env.ANTHROPIC_API_KEY = env.ANTHROPIC_API_KEY if (env.GEMINI_API_KEY) process.env.GEMINI_API_KEY = env.GEMINI_API_KEY +// Pre-define typed adapter configurations with full type inference +// This pattern gives you model autocomplete at definition time +const adapterConfig = { + anthropic: () => + createChatOptions({ + adapter: anthropicText('claude-sonnet-4-5'), + }), + gemini: () => + createChatOptions({ + adapter: geminiText('gemini-2.0-flash-exp'), + }), + ollama: () => + createChatOptions({ + adapter: ollamaText('mistral:7b'), + }), + openai: () => + createChatOptions({ + adapter: openaiText('gpt-4o'), + }), +} + const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. CRITICAL INSTRUCTIONS - YOU MUST FOLLOW THIS EXACT WORKFLOW: @@ -69,42 +95,14 @@ export const POST: RequestHandler = async ({ request }) => { const body = await request.json() const { messages, data } = body - // Extract provider and model from data + // Extract provider from data const provider: Provider = data?.provider || 'openai' - const model: string | undefined = data?.model - - // Select adapter based on provider - // Note: Adapters automatically read API keys from environment variables - // Environment variables must be set in .env file and the dev server restarted - let adapter - let defaultModel - - switch (provider) { - case 'anthropic': - adapter = anthropic() - defaultModel = 'claude-sonnet-4-5-20250929' - break - case 'gemini': - adapter = gemini() - defaultModel = 'gemini-2.0-flash-exp' - break - case 'ollama': - adapter = ollama() - defaultModel = 'mistral:7b' - break - case 'openai': - default: - adapter = openai() - defaultModel = 'gpt-4o' - break - } - // Determine model - use provided model or default based on provider - const selectedModel = model || defaultModel + // Get typed adapter options using createOptions pattern + const options = adapterConfig[provider]() const stream = chat({ - adapter: adapter as any, - model: selectedModel as any, + ...options, tools: [ getGuitars, // Server tool recommendGuitarToolDef, // No server execute - client will handle @@ -118,7 +116,14 @@ export const POST: RequestHandler = async ({ request }) => { abortController, }) - return toStreamResponse(stream, { abortController }) + const readableStream = toServerSentEventsStream(stream, abortController) + return new Response(readableStream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }, + }) } catch (error: any) { console.error('[API Route] Error in chat request:', { message: error?.message, diff --git a/examples/ts-vue-chat/package.json b/examples/ts-vue-chat/package.json index d35140bb..1f58ba7f 100644 --- a/examples/ts-vue-chat/package.json +++ b/examples/ts-vue-chat/package.json @@ -24,17 +24,17 @@ "zod": "^4.1.13" }, "devDependencies": { - "@tailwindcss/vite": "^4.1.17", + "@tailwindcss/vite": "^4.1.18", "@types/node": "^24.10.1", "@vitejs/plugin-vue": "^5.2.3", "autoprefixer": "^10.4.21", "concurrently": "^9.1.2", "dotenv": "^17.2.3", "express": "^5.1.0", - "tailwindcss": "^4.1.17", + "tailwindcss": "^4.1.18", "tsx": "^4.20.6", "typescript": "5.9.3", - "vite": "^7.2.4", + "vite": "^7.2.7", "vue-tsc": "^2.2.10" } } diff --git a/examples/ts-vue-chat/vite.config.ts b/examples/ts-vue-chat/vite.config.ts index 7d3f3093..21a58c4d 100644 --- a/examples/ts-vue-chat/vite.config.ts +++ b/examples/ts-vue-chat/vite.config.ts @@ -2,11 +2,11 @@ import { fileURLToPath, URL } from 'node:url' import { defineConfig } from 'vite' import vue from '@vitejs/plugin-vue' import tailwindcss from '@tailwindcss/vite' -import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' -import { anthropic } from '@tanstack/ai-anthropic' -import { gemini } from '@tanstack/ai-gemini' -import { ollama } from '@tanstack/ai-ollama' +import { chat, maxIterations, toServerSentEventsStream } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { anthropicText } from '@tanstack/ai-anthropic' +import { geminiText } from '@tanstack/ai-gemini' +import { ollamaText } from '@tanstack/ai-ollama' import { toolDefinition } from '@tanstack/ai' import { z } from 'zod' import dotenv from 'dotenv' @@ -202,29 +202,29 @@ export default defineConfig({ const model: string | undefined = data?.model let adapter - let defaultModel + + let selectedModel: string switch (provider) { case 'anthropic': - adapter = anthropic() - defaultModel = 'claude-sonnet-4-5-20250929' + selectedModel = model || 'claude-sonnet-4-5-20250929' + adapter = anthropicText(selectedModel) break case 'gemini': - adapter = gemini() - defaultModel = 'gemini-2.0-flash-exp' + selectedModel = model || 'gemini-2.0-flash-exp' + adapter = geminiText(selectedModel) break case 'ollama': - adapter = ollama() - defaultModel = 'mistral:7b' + selectedModel = model || 'mistral:7b' + adapter = ollamaText(selectedModel) break case 'openai': default: - adapter = openai() - defaultModel = 'gpt-4o' + selectedModel = model || 'gpt-4o' + adapter = openaiText(selectedModel) break } - const selectedModel = model || defaultModel console.log( `[API] Using provider: ${provider}, model: ${selectedModel}`, ) @@ -232,8 +232,7 @@ export default defineConfig({ const abortController = new AbortController() const stream = chat({ - adapter: adapter as any, - model: selectedModel as any, + adapter, tools: [ getGuitars, recommendGuitarToolDef, @@ -247,31 +246,30 @@ export default defineConfig({ abortController, }) - const response = toStreamResponse(stream, { abortController }) + const readableStream = toServerSentEventsStream( + stream, + abortController, + ) - // Forward headers - response.headers.forEach((value, key) => { - res.setHeader(key, value) - }) + // Set headers + res.setHeader('Content-Type', 'text/event-stream') + res.setHeader('Cache-Control', 'no-cache') + res.setHeader('Connection', 'keep-alive') // Stream the body - if (response.body) { - const reader = response.body.getReader() - const pump = async () => { - while (true) { - const { done, value } = await reader.read() - if (done) break - res.write(value) - } - res.end() + const reader = readableStream.getReader() + const pump = async () => { + while (true) { + const { done, value } = await reader.read() + if (done) break + res.write(value) } - pump().catch((err) => { - console.error('Stream error:', err) - res.end() - }) - } else { res.end() } + pump().catch((err) => { + console.error('Stream error:', err) + res.end() + }) } catch (error: any) { console.error('[API] Error:', error) res.statusCode = 500 diff --git a/examples/vanilla-chat/package.json b/examples/vanilla-chat/package.json index 0f1fa49b..512b87b5 100644 --- a/examples/vanilla-chat/package.json +++ b/examples/vanilla-chat/package.json @@ -13,6 +13,6 @@ "@tanstack/ai-client": "workspace:*" }, "devDependencies": { - "vite": "^7.2.4" + "vite": "^7.2.7" } } diff --git a/package.json b/package.json index 2b7d740f..27baecb6 100644 --- a/package.json +++ b/package.json @@ -32,6 +32,7 @@ "dev": "pnpm run watch", "format": "prettier --experimental-cli --ignore-unknown '**/*' --write", "generate-docs": "node scripts/generate-docs.ts && pnpm run copy:readme", + "sync-docs-config": "node scripts/sync-docs-config.ts", "copy:readme": "cp README.md packages/typescript/ai/README.md && cp README.md packages/typescript/ai-devtools/README.md && cp README.md packages/typescript/ai-client/README.md && cp README.md packages/typescript/ai-gemini/README.md && cp README.md packages/typescript/ai-ollama/README.md && cp README.md packages/typescript/ai-openai/README.md && cp README.md packages/typescript/ai-react/README.md && cp README.md packages/typescript/ai-react-ui/README.md && cp README.md packages/typescript/react-ai-devtools/README.md && cp README.md packages/typescript/solid-ai-devtools/README.md", "changeset": "changeset", "changeset:publish": "changeset publish", @@ -65,7 +66,7 @@ "sherif": "^1.9.0", "tinyglobby": "^0.2.15", "typescript": "5.9.3", - "vite": "^7.2.4", + "vite": "^7.2.7", "vitest": "^4.0.14" } } diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index b3b500a7..6e02eb3a 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -44,10 +44,10 @@ "@tanstack/ai": "workspace:*" }, "devDependencies": { - "@vitest/coverage-v8": "4.0.14", - "zod": "^4.1.13" + "@vitest/coverage-v8": "4.0.14" }, "peerDependencies": { - "@tanstack/ai": "workspace:*" + "@tanstack/ai": "workspace:*", + "zod": "^4.0.0" } } diff --git a/packages/typescript/ai-anthropic/src/adapters/summarize.ts b/packages/typescript/ai-anthropic/src/adapters/summarize.ts new file mode 100644 index 00000000..02e08506 --- /dev/null +++ b/packages/typescript/ai-anthropic/src/adapters/summarize.ts @@ -0,0 +1,200 @@ +import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { + createAnthropicClient, + generateId, + getAnthropicApiKeyFromEnv, +} from '../utils' +import type { ANTHROPIC_MODELS } from '../model-meta' +import type { + StreamChunk, + SummarizationOptions, + SummarizationResult, +} from '@tanstack/ai' +import type { AnthropicClientConfig } from '../utils' + +/** + * Configuration for Anthropic summarize adapter + */ +export interface AnthropicSummarizeConfig extends AnthropicClientConfig {} + +/** + * Anthropic-specific provider options for summarization + */ +export interface AnthropicSummarizeProviderOptions { + /** Temperature for response generation (0-1) */ + temperature?: number + /** Maximum tokens in the response */ + maxTokens?: number +} + +/** Model type for Anthropic summarization */ +export type AnthropicSummarizeModel = (typeof ANTHROPIC_MODELS)[number] + +/** + * Anthropic Summarize Adapter + * + * Tree-shakeable adapter for Anthropic summarization functionality. + * Import only what you need for smaller bundle sizes. + */ +export class AnthropicSummarizeAdapter< + TModel extends AnthropicSummarizeModel, +> extends BaseSummarizeAdapter { + readonly kind = 'summarize' as const + readonly name = 'anthropic' as const + + private client: ReturnType + + constructor(config: AnthropicSummarizeConfig, model: TModel) { + super({}, model) + this.client = createAnthropicClient(config) + } + + async summarize(options: SummarizationOptions): Promise { + const systemPrompt = this.buildSummarizationPrompt(options) + + const response = await this.client.messages.create({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + system: systemPrompt, + max_tokens: options.maxLength || 500, + temperature: 0.3, + stream: false, + }) + + const content = response.content + .map((c) => (c.type === 'text' ? c.text : '')) + .join('') + + return { + id: response.id, + model: response.model, + summary: content, + usage: { + promptTokens: response.usage.input_tokens, + completionTokens: response.usage.output_tokens, + totalTokens: response.usage.input_tokens + response.usage.output_tokens, + }, + } + } + + async *summarizeStream( + options: SummarizationOptions, + ): AsyncIterable { + const systemPrompt = this.buildSummarizationPrompt(options) + const id = generateId(this.name) + const model = options.model + let accumulatedContent = '' + let inputTokens = 0 + let outputTokens = 0 + + const stream = await this.client.messages.create({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + system: systemPrompt, + max_tokens: options.maxLength || 500, + temperature: 0.3, + stream: true, + }) + + for await (const event of stream) { + if (event.type === 'message_start') { + inputTokens = event.message.usage.input_tokens + } else if (event.type === 'content_block_delta') { + if (event.delta.type === 'text_delta') { + const delta = event.delta.text + accumulatedContent += delta + yield { + type: 'content', + id, + model, + timestamp: Date.now(), + delta, + content: accumulatedContent, + role: 'assistant', + } + } + } else if (event.type === 'message_delta') { + outputTokens = event.usage.output_tokens + yield { + type: 'done', + id, + model, + timestamp: Date.now(), + finishReason: event.delta.stop_reason as + | 'stop' + | 'length' + | 'content_filter' + | null, + usage: { + promptTokens: inputTokens, + completionTokens: outputTokens, + totalTokens: inputTokens + outputTokens, + }, + } + } + } + } + + private buildSummarizationPrompt(options: SummarizationOptions): string { + let prompt = 'You are a professional summarizer. ' + + switch (options.style) { + case 'bullet-points': + prompt += 'Provide a summary in bullet point format. ' + break + case 'paragraph': + prompt += 'Provide a summary in paragraph format. ' + break + case 'concise': + prompt += 'Provide a very concise summary in 1-2 sentences. ' + break + default: + prompt += 'Provide a clear and concise summary. ' + } + + if (options.focus && options.focus.length > 0) { + prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` + } + + if (options.maxLength) { + prompt += `Keep the summary under ${options.maxLength} tokens. ` + } + + return prompt + } +} + +/** + * Creates an Anthropic summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'claude-sonnet-4-5', 'claude-3-5-haiku-latest') + * @param apiKey - Your Anthropic API key + * @param config - Optional additional configuration + * @returns Configured Anthropic summarize adapter instance with resolved types + */ +export function createAnthropicSummarize< + TModel extends AnthropicSummarizeModel, +>( + model: TModel, + apiKey: string, + config?: Omit, +): AnthropicSummarizeAdapter { + return new AnthropicSummarizeAdapter({ apiKey, ...config }, model) +} + +/** + * Creates an Anthropic summarize adapter with automatic API key detection. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'claude-sonnet-4-5', 'claude-3-5-haiku-latest') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Anthropic summarize adapter instance with resolved types + */ +export function anthropicSummarize( + model: TModel, + config?: Omit, +): AnthropicSummarizeAdapter { + const apiKey = getAnthropicApiKeyFromEnv() + return createAnthropicSummarize(model, apiKey, config) +} diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts similarity index 56% rename from packages/typescript/ai-anthropic/src/anthropic-adapter.ts rename to packages/typescript/ai-anthropic/src/adapters/text.ts index edfcea79..744911e9 100644 --- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -1,32 +1,20 @@ -import Anthropic_SDK from '@anthropic-ai/sdk' -import { BaseAdapter } from '@tanstack/ai' -import { ANTHROPIC_MODELS } from './model-meta' -import { convertToolsToProviderFormat } from './tools/tool-converter' -import { validateTextProviderOptions } from './text/text-provider-options' -import type { - AnthropicDocumentMetadata, - AnthropicImageMetadata, - AnthropicMessageMetadataByModality, - AnthropicTextMetadata, -} from './message-types' -import type { - ChatOptions, - ContentPart, - EmbeddingOptions, - EmbeddingResult, - ModelMessage, - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { convertToolsToProviderFormat } from '../tools/tool-converter' +import { validateTextProviderOptions } from '../text/text-provider-options' +import { + createAnthropicClient, + generateId, + getAnthropicApiKeyFromEnv, +} from '../utils' import type { + ANTHROPIC_MODELS, AnthropicChatModelProviderOptionsByName, AnthropicModelInputModalitiesByName, -} from './model-meta' +} from '../model-meta' import type { - ExternalTextProviderOptions, - InternalTextProviderOptions, -} from './text/text-provider-options' + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' import type { Base64ImageSource, Base64PDFSource, @@ -37,16 +25,35 @@ import type { URLImageSource, URLPDFSource, } from '@anthropic-ai/sdk/resources/messages' +import type Anthropic_SDK from '@anthropic-ai/sdk' +import type { + ContentPart, + Modality, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { + ExternalTextProviderOptions, + InternalTextProviderOptions, +} from '../text/text-provider-options' +import type { + AnthropicDocumentMetadata, + AnthropicImageMetadata, + AnthropicMessageMetadataByModality, + AnthropicTextMetadata, +} from '../message-types' +import type { AnthropicClientConfig } from '../utils' -export interface AnthropicConfig { - apiKey: string -} +/** + * Configuration for Anthropic text adapter + */ +export interface AnthropicTextConfig extends AnthropicClientConfig {} /** - * Anthropic-specific provider options - * @see https://ai-sdk.dev/providers/ai-sdk-providers/anthropic + * Anthropic-specific provider options for text/chat */ -export type AnthropicProviderOptions = ExternalTextProviderOptions +export type AnthropicTextProviderOptions = ExternalTextProviderOptions type AnthropicContentBlocks = Extract> extends Array @@ -55,36 +62,63 @@ type AnthropicContentBlocks = type AnthropicContentBlock = AnthropicContentBlocks extends Array ? Block : never -export class Anthropic extends BaseAdapter< - typeof ANTHROPIC_MODELS, - [], - AnthropicProviderOptions, - Record, - AnthropicChatModelProviderOptionsByName, - AnthropicModelInputModalitiesByName, +// =========================== +// Type Resolution Helpers +// =========================== + +/** + * Resolve provider options for a specific model. + * If the model has explicit options in the map, use those; otherwise use base options. + */ +type ResolveProviderOptions = + TModel extends keyof AnthropicChatModelProviderOptionsByName + ? AnthropicChatModelProviderOptionsByName[TModel] + : AnthropicTextProviderOptions + +/** + * Resolve input modalities for a specific model. + * If the model has explicit modalities in the map, use those; otherwise use default. + */ +type ResolveInputModalities = + TModel extends keyof AnthropicModelInputModalitiesByName + ? AnthropicModelInputModalitiesByName[TModel] + : readonly ['text', 'image', 'document'] + +// =========================== +// Adapter Implementation +// =========================== + +/** + * Anthropic Text (Chat) Adapter + * + * Tree-shakeable adapter for Anthropic chat/text completion functionality. + * Import only what you need for smaller bundle sizes. + */ +export class AnthropicTextAdapter< + TModel extends (typeof ANTHROPIC_MODELS)[number], + TProviderOptions extends object = ResolveProviderOptions, + TInputModalities extends ReadonlyArray = + ResolveInputModalities, +> extends BaseTextAdapter< + TModel, + TProviderOptions, + TInputModalities, AnthropicMessageMetadataByModality > { - name = 'anthropic' as const - models = ANTHROPIC_MODELS - - declare _modelProviderOptionsByName: AnthropicChatModelProviderOptionsByName - declare _modelInputModalitiesByName: AnthropicModelInputModalitiesByName - declare _messageMetadataByModality: AnthropicMessageMetadataByModality + readonly kind = 'text' as const + readonly name = 'anthropic' as const private client: Anthropic_SDK - constructor(config: AnthropicConfig) { - super({}) - this.client = new Anthropic_SDK({ - apiKey: config.apiKey, - }) + constructor(config: AnthropicTextConfig, model: TModel) { + super({}, model) + this.client = createAnthropicClient(config) } async *chatStream( - options: ChatOptions, + options: TextOptions, ): AsyncIterable { try { - // Map common options to Anthropic format using the centralized mapping function const requestParams = this.mapCommonOptionsToAnthropic(options) const stream = await this.client.beta.messages.create( @@ -96,105 +130,111 @@ export class Anthropic extends BaseAdapter< ) yield* this.processAnthropicStream(stream, options.model, () => - this.generateId(), + generateId(this.name), ) - } catch (error: any) { - console.error('[Anthropic Adapter] Error in chatStream:', { - message: error?.message, - status: error?.status, - statusText: error?.statusText, - code: error?.code, - type: error?.type, - error: error, - stack: error?.stack, - }) - - // Emit an error chunk + } catch (error: unknown) { + const err = error as Error & { status?: number; code?: string } yield { type: 'error', - id: this.generateId(), + id: generateId(this.name), model: options.model, timestamp: Date.now(), error: { - message: error?.message || 'Unknown error occurred', - code: error?.code || error?.status, + message: err.message || 'Unknown error occurred', + code: err.code || String(err.status), }, } } } - async summarize(options: SummarizationOptions): Promise { - const systemPrompt = this.buildSummarizationPrompt(options) - - const response = await this.client.messages.create({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - system: systemPrompt, - max_tokens: options.maxLength || 500, - temperature: 0.3, - stream: false, - }) - - const content = response.content - .map((c) => (c.type === 'text' ? c.text : '')) - .join('') - - return { - id: response.id, - model: response.model, - summary: content, - usage: { - promptTokens: response.usage.input_tokens, - completionTokens: response.usage.output_tokens, - totalTokens: response.usage.input_tokens + response.usage.output_tokens, + /** + * Generate structured output using Anthropic's tool-based approach. + * Anthropic doesn't have native structured output, so we use a tool with the schema + * and force the model to call it. + * The outputSchema is already JSON Schema (converted in the ai layer). + */ + async structuredOutput( + options: StructuredOutputOptions, + ): Promise> { + const { chatOptions, outputSchema } = options + + const requestParams = this.mapCommonOptionsToAnthropic(chatOptions) + + // Create a tool that will capture the structured output + // Anthropic's SDK requires input_schema with type: 'object' literal + const structuredOutputTool = { + name: 'structured_output', + description: + 'Use this tool to provide your response in the required structured format.', + input_schema: { + type: 'object' as const, + properties: outputSchema.properties ?? {}, + required: outputSchema.required ?? [], }, } - } - createEmbeddings(_options: EmbeddingOptions): Promise { - // Note: Anthropic doesn't have a native embeddings API - // You would need to use a different service or implement a workaround - throw new Error( - 'Embeddings are not natively supported by Anthropic. Consider using OpenAI or another provider for embeddings.', - ) - } + try { + // Make non-streaming request with tool_choice forced to our structured output tool + const response = await this.client.messages.create( + { + ...requestParams, + stream: false, + tools: [structuredOutputTool], + tool_choice: { type: 'tool', name: 'structured_output' }, + }, + { + signal: chatOptions.request?.signal, + headers: chatOptions.request?.headers, + }, + ) - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } + // Extract the tool use content from the response + let parsed: unknown = null + let rawText = '' - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } + for (const block of response.content) { + if (block.type === 'tool_use' && block.name === 'structured_output') { + parsed = block.input + rawText = JSON.stringify(block.input) + break + } + } - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} tokens. ` - } + if (parsed === null) { + // Fallback: try to extract text content and parse as JSON + rawText = response.content + .map((b) => { + if (b.type === 'text') { + return b.text + } + return '' + }) + .join('') + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to extract structured output from response. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + } - return prompt + return { + data: parsed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + throw new Error( + `Structured output generation failed: ${err.message || 'Unknown error occurred'}`, + ) + } } - /** - * Maps common options to Anthropic-specific format - * Handles translation of normalized options to Anthropic's API format - */ private mapCommonOptionsToAnthropic( - options: ChatOptions, + options: TextOptions, ) { - const providerOptions = options.providerOptions as + const modelOptions = options.modelOptions as | InternalTextProviderOptions | undefined @@ -203,9 +243,8 @@ export class Anthropic extends BaseAdapter< ? convertToolsToProviderFormat(options.tools) : undefined - // Filter out invalid fields from providerOptions (like 'store' which is OpenAI-specific) const validProviderOptions: Partial = {} - if (providerOptions) { + if (modelOptions) { const validKeys: Array = [ 'container', 'context_management', @@ -218,34 +257,34 @@ export class Anthropic extends BaseAdapter< 'top_k', ] for (const key of validKeys) { - if (key in providerOptions) { - const value = providerOptions[key] - // Anthropic expects tool_choice to be an object, not a string + if (key in modelOptions) { + const value = modelOptions[key] if (key === 'tool_choice' && typeof value === 'string') { - ;(validProviderOptions as any)[key] = { type: value } + ;(validProviderOptions as Record)[key] = { + type: value, + } } else { - ;(validProviderOptions as any)[key] = value + ;(validProviderOptions as Record)[key] = value } } } } - // Ensure max_tokens is greater than thinking.budget_tokens if thinking is enabled const thinkingBudget = validProviderOptions.thinking?.type === 'enabled' ? validProviderOptions.thinking.budget_tokens : undefined - const defaultMaxTokens = options.options?.maxTokens || 1024 + const defaultMaxTokens = options.maxTokens || 1024 const maxTokens = thinkingBudget && thinkingBudget >= defaultMaxTokens - ? thinkingBudget + 1 // Ensure max_tokens is greater than budget_tokens + ? thinkingBudget + 1 : defaultMaxTokens const requestParams: InternalTextProviderOptions = { model: options.model, max_tokens: maxTokens, - temperature: options.options?.temperature, - top_p: options.options?.topP, + temperature: options.temperature, + top_p: options.topP, messages: formattedMessages, system: options.systemPrompts?.join('\n'), tools: tools, @@ -260,9 +299,7 @@ export class Anthropic extends BaseAdapter< ): TextBlockParam | ImageBlockParam | DocumentBlockParam { switch (part.type) { case 'text': { - const metadata = part.metadata as any as - | AnthropicTextMetadata - | undefined + const metadata = part.metadata as AnthropicTextMetadata | undefined return { type: 'text', text: part.content, @@ -271,9 +308,7 @@ export class Anthropic extends BaseAdapter< } case 'image': { - const metadata = part.metadata as any as - | AnthropicImageMetadata - | undefined + const metadata = part.metadata as AnthropicImageMetadata | undefined const imageSource: Base64ImageSource | URLImageSource = part.source.type === 'data' ? { @@ -285,8 +320,7 @@ export class Anthropic extends BaseAdapter< type: 'url', url: part.source.value, } - // exclude the media type - const { mediaType, ...meta } = metadata || {} + const { mediaType: _mediaType, ...meta } = metadata || {} return { type: 'image', source: imageSource, @@ -294,9 +328,7 @@ export class Anthropic extends BaseAdapter< } } case 'document': { - const metadata = part.metadata as any as - | AnthropicDocumentMetadata - | undefined + const metadata = part.metadata as AnthropicDocumentMetadata | undefined const docSource: Base64PDFSource | URLPDFSource = part.source.type === 'data' ? { @@ -316,12 +348,10 @@ export class Anthropic extends BaseAdapter< } case 'audio': case 'video': - // Anthropic doesn't support audio/video directly, treat as text with a note throw new Error( `Anthropic does not support ${part.type} content directly`, ) default: { - // Exhaustive check - this should never happen with known types const _exhaustiveCheck: never = part throw new Error( `Unsupported content part type: ${(_exhaustiveCheck as ContentPart).type}`, @@ -393,7 +423,6 @@ export class Anthropic extends BaseAdapter< continue } - // Handle user messages with multimodal content if (role === 'user' && Array.isArray(message.content)) { const contentBlocks = message.content.map((part) => this.convertContentPartToAnthropic(part), @@ -424,7 +453,7 @@ export class Anthropic extends BaseAdapter< private async *processAnthropicStream( stream: AsyncIterable, model: string, - generateId: () => string, + genId: () => string, ): AsyncIterable { let accumulatedContent = '' let accumulatedThinking = '' @@ -446,7 +475,6 @@ export class Anthropic extends BaseAdapter< input: '', }) } else if (event.content_block.type === 'thinking') { - // Reset thinking content when a new thinking block starts accumulatedThinking = '' } } else if (event.type === 'content_block_delta') { @@ -455,7 +483,7 @@ export class Anthropic extends BaseAdapter< accumulatedContent += delta yield { type: 'content', - id: generateId(), + id: genId(), model: model, timestamp, delta, @@ -463,29 +491,24 @@ export class Anthropic extends BaseAdapter< role: 'assistant', } } else if (event.delta.type === 'thinking_delta') { - // Handle thinking content const delta = event.delta.thinking accumulatedThinking += delta yield { type: 'thinking', - id: generateId(), + id: genId(), model: model, timestamp, delta, content: accumulatedThinking, } } else if (event.delta.type === 'input_json_delta') { - // Tool input is being streamed const existing = toolCallsMap.get(currentToolIndex) if (existing) { - // Accumulate the input for final processing existing.input += event.delta.partial_json - // Yield the DELTA (partial_json), not the full accumulated input - // The stream processor will concatenate these deltas yield { type: 'tool_call', - id: generateId(), + id: genId(), model: model, timestamp, toolCall: { @@ -501,14 +524,11 @@ export class Anthropic extends BaseAdapter< } } } else if (event.type === 'content_block_stop') { - // If this is a tool call and we haven't received any input deltas, - // emit a tool_call chunk with empty arguments const existing = toolCallsMap.get(currentToolIndex) if (existing && existing.input === '') { - // No input_json_delta events received, emit empty arguments yield { type: 'tool_call', - id: generateId(), + id: genId(), model: model, timestamp, toolCall: { @@ -525,7 +545,7 @@ export class Anthropic extends BaseAdapter< } else if (event.type === 'message_stop') { yield { type: 'done', - id: generateId(), + id: genId(), model: model, timestamp, finishReason: 'stop', @@ -536,11 +556,10 @@ export class Anthropic extends BaseAdapter< case 'tool_use': { yield { type: 'done', - id: generateId(), + id: genId(), model: model, timestamp, finishReason: 'tool_calls', - usage: { promptTokens: event.usage.input_tokens || 0, completionTokens: event.usage.output_tokens || 0, @@ -554,7 +573,7 @@ export class Anthropic extends BaseAdapter< case 'max_tokens': { yield { type: 'error', - id: generateId(), + id: genId(), model: model, timestamp, error: { @@ -565,37 +584,10 @@ export class Anthropic extends BaseAdapter< } break } - case 'model_context_window_exceeded': { - yield { - type: 'error', - id: generateId(), - model: model, - timestamp, - error: { - message: - "The response was cut off because the model's context window was exceeded.", - code: 'context_window_exceeded', - }, - } - break - } - case 'refusal': { - yield { - type: 'error', - id: generateId(), - model: model, - timestamp, - error: { - message: 'The model refused to complete the request.', - code: 'refusal', - }, - } - break - } default: { yield { type: 'done', - id: generateId(), + id: genId(), model: model, timestamp, finishReason: 'stop', @@ -612,84 +604,53 @@ export class Anthropic extends BaseAdapter< } } } - } catch (error: any) { - console.error('[Anthropic Adapter] Error in processAnthropicStream:', { - message: error?.message, - status: error?.status, - statusText: error?.statusText, - code: error?.code, - type: error?.type, - error: error, - stack: error?.stack, - }) + } catch (error: unknown) { + const err = error as Error & { status?: number; code?: string } yield { type: 'error', - id: generateId(), + id: genId(), model: model, timestamp, error: { - message: error?.message || 'Unknown error occurred', - code: error?.code || error?.status, + message: err.message || 'Unknown error occurred', + code: err.code || String(err.status), }, } } } } + /** - * Creates an Anthropic adapter with simplified configuration - * @param apiKey - Your Anthropic API key - * @returns A fully configured Anthropic adapter instance - * - * @example - * ```typescript - * const anthropic = createAnthropic("sk-ant-..."); - * - * const ai = new AI({ - * adapters: { - * anthropic, - * } - * }); - * ``` + * Creates an Anthropic chat adapter with explicit API key. + * Type resolution happens here at the call site. */ -export function createAnthropic( +export function createAnthropicChat< + TModel extends (typeof ANTHROPIC_MODELS)[number], +>( + model: TModel, apiKey: string, - config?: Omit, -): Anthropic { - return new Anthropic({ apiKey, ...config }) + config?: Omit, +): AnthropicTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities +> { + return new AnthropicTextAdapter({ apiKey, ...config }, model) } /** - * Create an Anthropic adapter with automatic API key detection from environment variables. - * - * Looks for `ANTHROPIC_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Anthropic adapter instance - * @throws Error if ANTHROPIC_API_KEY is not found in environment - * - * @example - * ```typescript - * // Automatically uses ANTHROPIC_API_KEY from environment - * const aiInstance = ai(anthropic()); - * ``` + * Creates an Anthropic text adapter with automatic API key detection. + * Type resolution happens here at the call site. */ -export function anthropic(config?: Omit): Anthropic { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.ANTHROPIC_API_KEY - - if (!key) { - throw new Error( - 'ANTHROPIC_API_KEY is required. Please set it in your environment variables or use createAnthropic(apiKey, config) instead.', - ) - } - - return createAnthropic(key, config) +export function anthropicText( + model: TModel, + config?: Omit, +): AnthropicTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities +> { + const apiKey = getAnthropicApiKeyFromEnv() + return createAnthropicChat(model, apiKey, config) } diff --git a/packages/typescript/ai-anthropic/src/index.ts b/packages/typescript/ai-anthropic/src/index.ts index b4580b15..4bca2e4b 100644 --- a/packages/typescript/ai-anthropic/src/index.ts +++ b/packages/typescript/ai-anthropic/src/index.ts @@ -1,9 +1,29 @@ +// ============================================================================ +// New Tree-Shakeable Adapters (Recommended) +// ============================================================================ + +// Text (Chat) adapter - for chat/text completion +export { + AnthropicTextAdapter, + anthropicText, + createAnthropicChat, + type AnthropicTextConfig, + type AnthropicTextProviderOptions, +} from './adapters/text' + +// Summarize adapter - for text summarization export { - Anthropic, - createAnthropic, - anthropic, - type AnthropicConfig, -} from './anthropic-adapter' + AnthropicSummarizeAdapter, + anthropicSummarize, + createAnthropicSummarize, + type AnthropicSummarizeConfig, + type AnthropicSummarizeProviderOptions, +} from './adapters/summarize' + +// ============================================================================ +// Type Exports +// ============================================================================ + export type { AnthropicChatModelProviderOptionsByName, AnthropicModelInputModalitiesByName, diff --git a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts index 07ac10a3..582af7b3 100644 --- a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts @@ -1,5 +1,4 @@ -import { convertZodToJsonSchema } from '@tanstack/ai' -import type { Tool } from '@tanstack/ai' +import type { JSONSchema, Tool } from '@tanstack/ai' import type { z } from 'zod' import type { CacheControl } from '../text/text-provider-options' @@ -29,13 +28,17 @@ export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { const metadata = (tool.metadata as { cacheControl?: CacheControl | null } | undefined) || {} - // Convert Zod schema to JSON Schema - const jsonSchema = convertZodToJsonSchema(tool.inputSchema) + // Tool schemas are already converted to JSON Schema in the ai layer + const jsonSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema const inputSchema = { type: 'object' as const, - properties: jsonSchema?.properties || null, - required: jsonSchema?.required || null, + properties: jsonSchema.properties || null, + required: jsonSchema.required || null, } return { diff --git a/packages/typescript/ai-anthropic/src/utils/client.ts b/packages/typescript/ai-anthropic/src/utils/client.ts new file mode 100644 index 00000000..dddc5caf --- /dev/null +++ b/packages/typescript/ai-anthropic/src/utils/client.ts @@ -0,0 +1,45 @@ +import Anthropic_SDK from '@anthropic-ai/sdk' + +export interface AnthropicClientConfig { + apiKey: string +} + +/** + * Creates an Anthropic SDK client instance + */ +export function createAnthropicClient( + config: AnthropicClientConfig, +): Anthropic_SDK { + return new Anthropic_SDK({ + apiKey: config.apiKey, + }) +} + +/** + * Gets Anthropic API key from environment variables + * @throws Error if ANTHROPIC_API_KEY is not found + */ +export function getAnthropicApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.ANTHROPIC_API_KEY + + if (!key) { + throw new Error( + 'ANTHROPIC_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix + */ +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-anthropic/src/utils/index.ts b/packages/typescript/ai-anthropic/src/utils/index.ts new file mode 100644 index 00000000..b11d8e36 --- /dev/null +++ b/packages/typescript/ai-anthropic/src/utils/index.ts @@ -0,0 +1,6 @@ +export { + createAnthropicClient, + generateId, + getAnthropicApiKeyFromEnv, + type AnthropicClientConfig, +} from './client' diff --git a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts index 934a9204..5e3db434 100644 --- a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts +++ b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts @@ -1,9 +1,7 @@ import { describe, it, expect, beforeEach, vi } from 'vitest' import { chat, type Tool, type StreamChunk } from '@tanstack/ai' -import { - Anthropic, - type AnthropicProviderOptions, -} from '../src/anthropic-adapter' +import { AnthropicTextAdapter } from '../src/adapters/text' +import type { AnthropicTextProviderOptions } from '../src/adapters/text' import { z } from 'zod' const mocks = vi.hoisted(() => { @@ -37,7 +35,9 @@ vi.mock('@anthropic-ai/sdk', () => { return { default: MockAnthropic } }) -const createAdapter = () => new Anthropic({ apiKey: 'test-key' }) +const createAdapter = ( + model: TModel, +) => new AnthropicTextAdapter({ apiKey: 'test-key' }, model) const toolArguments = JSON.stringify({ location: 'Berlin' }) @@ -101,15 +101,14 @@ describe('Anthropic adapter option mapping', () => { thinking: { type: 'enabled', budget_tokens: 1500 }, top_k: 5, system: 'Respond with JSON', - } satisfies AnthropicProviderOptions & { system: string } + } satisfies AnthropicTextProviderOptions & { system: string } - const adapter = createAdapter() + const adapter = createAdapter('claude-3-7-sonnet-20250219') // Consume the stream to trigger the API call const chunks: StreamChunk[] = [] for await (const chunk of chat({ adapter, - model: 'claude-3-7-sonnet-20250219', messages: [ { role: 'user', content: 'What is the forecast?' }, { @@ -126,11 +125,9 @@ describe('Anthropic adapter option mapping', () => { { role: 'tool', toolCallId: 'call_weather', content: '{"temp":72}' }, ], tools: [weatherTool], - options: { - maxTokens: 3000, - temperature: 0.4, - }, - providerOptions, + maxTokens: 3000, + temperature: 0.4, + modelOptions: providerOptions, })) { chunks.push(chunk) } diff --git a/packages/typescript/ai-client/README.md b/packages/typescript/ai-client/README.md index 77acf865..e0de4f3a 100644 --- a/packages/typescript/ai-client/README.md +++ b/packages/typescript/ai-client/README.md @@ -38,7 +38,9 @@ A powerful, type-safe AI SDK for building AI-powered applications. - Provider-agnostic adapters (OpenAI, Anthropic, Gemini, Ollama, etc.) +- **Tree-shakeable adapters** - Import only what you need for smaller bundles - **Multimodal content support** - Send images, audio, video, and documents +- **Image generation** - Generate images with OpenAI DALL-E/GPT-Image and Gemini Imagen - Chat completion, streaming, and agent loop strategies - Headless chat state management with adapters (SSE, HTTP stream, custom) - Isomorphic type-safe tools with server/client execution @@ -46,6 +48,30 @@ A powerful, type-safe AI SDK for building AI-powered applications. ### Read the docs → +## Tree-Shakeable Adapters + +Import only the functionality you need for smaller bundle sizes: + +```typescript +// Only chat functionality - no summarization code bundled +import { openaiText } from '@tanstack/ai-openai/adapters' +import { generate } from '@tanstack/ai' + +const textAdapter = openaiText() + +const result = generate({ + adapter: textAdapter, + model: 'gpt-4o', + messages: [{ role: 'user', content: [{ type: 'text', content: 'Hello!' }] }], +}) + +for await (const chunk of result) { + console.log(chunk) +} +``` + +Available adapters: `openaiText`, `openaiEmbed`, `openaiSummarize`, `anthropicText`, `geminiText`, `ollamaText`, and more. + ## Bonus: TanStack Start Integration TanStack AI works with **any** framework (Next.js, Express, Remix, etc.). diff --git a/packages/typescript/ai-client/package.json b/packages/typescript/ai-client/package.json index 43f17818..00c52dbb 100644 --- a/packages/typescript/ai-client/package.json +++ b/packages/typescript/ai-client/package.json @@ -47,7 +47,7 @@ }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", - "vite": "^7.2.4", + "vite": "^7.2.7", "zod": "^4.1.13" } } diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index 3b9e1787..1d1ba091 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -26,6 +26,7 @@ export class ChatClient { private clientToolsRef: { current: Map } private currentStreamId: string | null = null private currentMessageId: string | null = null + private postStreamActions: Array<() => Promise> = [] private callbacksRef: { current: { @@ -323,6 +324,9 @@ export class ChatClient { } finally { this.abortController = null this.setIsLoading(false) + + // Drain any actions that were queued while the stream was in progress + await this.drainPostStreamActions() } } @@ -394,10 +398,13 @@ export class ChatClient { result.errorText, ) - // Check if we should auto-send - if (this.shouldAutoSend()) { - await this.continueFlow() + // If stream is in progress, queue continuation check for after it ends + if (this.isLoading) { + this.queuePostStreamAction(() => this.checkForContinuation()) + return } + + await this.checkForContinuation() } /** @@ -433,18 +440,39 @@ export class ChatClient { // Add response via processor this.processor.addToolApprovalResponse(response.id, response.approved) - // Check if we should auto-send - if (this.shouldAutoSend()) { - await this.continueFlow() + // If stream is in progress, queue continuation check for after it ends + if (this.isLoading) { + this.queuePostStreamAction(() => this.checkForContinuation()) + return } + + await this.checkForContinuation() } /** - * Continue the agent flow with current messages + * Queue an action to be executed after the current stream ends */ - private async continueFlow(): Promise { - if (this.isLoading) return - await this.streamResponse() + private queuePostStreamAction(action: () => Promise): void { + this.postStreamActions.push(action) + } + + /** + * Drain and execute all queued post-stream actions + */ + private async drainPostStreamActions(): Promise { + while (this.postStreamActions.length > 0) { + const action = this.postStreamActions.shift()! + await action() + } + } + + /** + * Check if we should continue the flow and do so if needed + */ + private async checkForContinuation(): Promise { + if (this.shouldAutoSend()) { + await this.streamResponse() + } } /** diff --git a/packages/typescript/ai-client/src/types.ts b/packages/typescript/ai-client/src/types.ts index b5e58962..4f83debb 100644 --- a/packages/typescript/ai-client/src/types.ts +++ b/packages/typescript/ai-client/src/types.ts @@ -126,7 +126,7 @@ export type MessagePart = any> = */ export interface UIMessage = any> { id: string - role: 'user' | 'assistant' + role: 'system' | 'user' | 'assistant' parts: Array> createdAt?: Date } diff --git a/packages/typescript/ai-devtools/README.md b/packages/typescript/ai-devtools/README.md index 77acf865..e0de4f3a 100644 --- a/packages/typescript/ai-devtools/README.md +++ b/packages/typescript/ai-devtools/README.md @@ -38,7 +38,9 @@ A powerful, type-safe AI SDK for building AI-powered applications. - Provider-agnostic adapters (OpenAI, Anthropic, Gemini, Ollama, etc.) +- **Tree-shakeable adapters** - Import only what you need for smaller bundles - **Multimodal content support** - Send images, audio, video, and documents +- **Image generation** - Generate images with OpenAI DALL-E/GPT-Image and Gemini Imagen - Chat completion, streaming, and agent loop strategies - Headless chat state management with adapters (SSE, HTTP stream, custom) - Isomorphic type-safe tools with server/client execution @@ -46,6 +48,30 @@ A powerful, type-safe AI SDK for building AI-powered applications. ### Read the docs → +## Tree-Shakeable Adapters + +Import only the functionality you need for smaller bundle sizes: + +```typescript +// Only chat functionality - no summarization code bundled +import { openaiText } from '@tanstack/ai-openai/adapters' +import { generate } from '@tanstack/ai' + +const textAdapter = openaiText() + +const result = generate({ + adapter: textAdapter, + model: 'gpt-4o', + messages: [{ role: 'user', content: [{ type: 'text', content: 'Hello!' }] }], +}) + +for await (const chunk of result) { + console.log(chunk) +} +``` + +Available adapters: `openaiText`, `openaiEmbed`, `openaiSummarize`, `anthropicText`, `geminiText`, `ollamaText`, and more. + ## Bonus: TanStack Start Integration TanStack AI works with **any** framework (Next.js, Express, Remix, etc.). diff --git a/packages/typescript/ai-devtools/package.json b/packages/typescript/ai-devtools/package.json index 94734697..c27e8353 100644 --- a/packages/typescript/ai-devtools/package.json +++ b/packages/typescript/ai-devtools/package.json @@ -55,7 +55,7 @@ "devDependencies": { "@vitest/coverage-v8": "4.0.14", "jsdom": "^27.2.0", - "vite": "^7.2.4", + "vite": "^7.2.7", "vite-plugin-solid": "^2.11.10" } } diff --git a/packages/typescript/ai-devtools/src/components/ConversationDetails.tsx b/packages/typescript/ai-devtools/src/components/ConversationDetails.tsx index aad415d1..333bc56f 100644 --- a/packages/typescript/ai-devtools/src/components/ConversationDetails.tsx +++ b/packages/typescript/ai-devtools/src/components/ConversationDetails.tsx @@ -5,7 +5,6 @@ import { ChunksTab, ConversationHeader, ConversationTabs, - EmbeddingsTab, MessagesTab, SummariesTab, } from './conversation' @@ -31,11 +30,6 @@ export const ConversationDetails: Component = () => { if (conv.type === 'server') { if (conv.chunks.length > 0) { setActiveTab('chunks') - } else if ( - conv.hasEmbedding || - (conv.embeddings && conv.embeddings.length > 0) - ) { - setActiveTab('embeddings') } else if ( conv.hasSummarize || (conv.summaries && conv.summaries.length > 0) @@ -75,9 +69,6 @@ export const ConversationDetails: Component = () => { - - - diff --git a/packages/typescript/ai-devtools/src/components/conversation/ConversationTabs.tsx b/packages/typescript/ai-devtools/src/components/conversation/ConversationTabs.tsx index a76105b4..fe6eeec7 100644 --- a/packages/typescript/ai-devtools/src/components/conversation/ConversationTabs.tsx +++ b/packages/typescript/ai-devtools/src/components/conversation/ConversationTabs.tsx @@ -3,7 +3,7 @@ import { useStyles } from '../../styles/use-styles' import type { Component } from 'solid-js' import type { Conversation } from '../../store/ai-context' -export type TabType = 'messages' | 'chunks' | 'embeddings' | 'summaries' +export type TabType = 'messages' | 'chunks' | 'summaries' interface ConversationTabsProps { conversation: Conversation @@ -19,7 +19,6 @@ export const ConversationTabs: Component = (props) => { const totalRawChunks = () => conv().chunks.reduce((sum, c) => sum + (c.chunkCount || 1), 0) - const embeddingsCount = () => conv().embeddings?.length ?? 0 const summariesCount = () => conv().summaries?.length ?? 0 // Determine if we should show any chat-related tabs @@ -27,7 +26,6 @@ export const ConversationTabs: Component = (props) => { const hasMessages = () => conv().type === 'client' && conv().messages.length > 0 const hasChunks = () => conv().chunks.length > 0 || conv().type === 'server' - const hasEmbeddings = () => conv().hasEmbedding || embeddingsCount() > 0 const hasSummaries = () => conv().hasSummarize || summariesCount() > 0 // Count how many tabs would be visible @@ -35,7 +33,6 @@ export const ConversationTabs: Component = (props) => { let count = 0 if (hasMessages()) count++ if (hasChunks() && conv().type === 'server') count++ - if (hasEmbeddings()) count++ if (hasSummaries()) count++ return count } @@ -73,19 +70,6 @@ export const ConversationTabs: Component = (props) => { 📦 Chunks ({totalRawChunks()}) - {/* Show embeddings tab if there are embedding operations */} - - - {/* Show summaries tab if there are summarize operations */} + + + ) +} + +function AddOnPanel({ + addOnState, + onToggle, +}: { + addOnState: Record + onToggle: (id: string) => void +}) { + const selectedCount = Object.values(addOnState).filter( + (s) => s.selected, + ).length + + return ( +
+
+

+ + Project Add-ons +

+

+ {selectedCount} of {availableAddOns.length} add-ons selected +

+
+
+ {availableAddOns.map((addOn) => ( + onToggle(addOn.id)} + /> + ))} +
+
+ ) +} + +function Messages({ messages }: { messages: Array }) { + const messagesContainerRef = useRef(null) + + useEffect(() => { + if (messagesContainerRef.current) { + messagesContainerRef.current.scrollTop = + messagesContainerRef.current.scrollHeight + } + }, [messages]) + + if (!messages.length) { + return ( +
+
+ +

Ask the AI to configure your add-ons

+

+ Try: "Add authentication and payments" or "Show me what's available" +

+
+
+ ) + } + + return ( +
+ {messages.map(({ id, role, parts }) => ( +
+
+ {role === 'assistant' ? ( +
+ AI +
+ ) : ( +
+ U +
+ )} +
+ {parts.map((part, index) => { + if (part.type === 'text' && part.content) { + return ( +
+ + {part.content} + +
+ ) + } + + // Show tool call status + if (part.type === 'tool-call') { + return ( +
+
+ Tool: + + {part.name} + + {part.state === 'input-streaming' && ( + + )} + {part.output !== undefined && ( + + )} +
+ {part.output !== undefined && ( +
+
+                            {JSON.stringify(part.output, null, 2).slice(0, 200)}
+                            {JSON.stringify(part.output).length > 200
+                              ? '...'
+                              : ''}
+                          
+
+ )} +
+ ) + } + + return null + })} +
+
+
+ ))} +
+ ) +} + +function DebugPanel({ + messages, + chunks, + onClearChunks, +}: { + messages: Array + chunks: Array + onClearChunks: () => void +}) { + const [activeTab, setActiveTab] = useState<'messages' | 'chunks'>('chunks') + + return ( +
+
+

Debug Panel

+

+ Monitor multi-tool execution +

+ +
+ + +
+
+ +
+ {activeTab === 'messages' && ( +
+            {JSON.stringify(messages, null, 2)}
+          
+ )} + + {activeTab === 'chunks' && ( +
+ + +
+ + + + + + + + + + {chunks.map((chunk, idx) => { + const toolName = + chunk.toolCall?.function?.name || chunk.toolName || '-' + + let detail = '-' + if (chunk.type === 'content' && chunk.content) { + detail = chunk.content + } else if ( + chunk.type === 'tool_call' && + chunk.toolCall?.function?.arguments + ) { + detail = chunk.toolCall.function.arguments + } else if (chunk.type === 'tool_result' && chunk.content) { + detail = chunk.content + } else if (chunk.type === 'tool-input-available') { + detail = JSON.stringify(chunk.input) + } else if (chunk.type === 'done') { + detail = `Finish: ${chunk.finishReason || 'unknown'}` + } + + if (detail.length > 100) { + detail = detail.substring(0, 100) + '...' + } + + return ( + + + + + + ) + })} + +
TypeTool NameDetail
{chunk.type}{toolName} + {detail} +
+
+
+ )} +
+
+ ) +} + +function AddonManagerPage() { + const [chunks, setChunks] = useState>([]) + const [input, setInput] = useState('') + + // Initialize add-on state + const [addOnState, setAddOnState] = useState>( + () => { + const initial: Record = {} + for (const addOn of availableAddOns) { + initial[addOn.id] = { selected: false, enabled: true } + } + return initial + }, + ) + + // Toggle add-on selection (for manual UI interaction) + const toggleAddOn = useCallback((id: string) => { + setAddOnState((prev) => ({ + ...prev, + [id]: { + ...prev[id], + selected: !prev[id]?.selected, + }, + })) + }, []) + + // Client tool 1: Returns current add-on state + const getAvailableAddOnsClient = useMemo( + () => + getAvailableAddOnsToolDef.client(() => { + console.log('[Client Tool] getAvailableAddOns called') + return availableAddOns.map((addOn) => ({ + id: addOn.id, + name: addOn.name, + description: addOn.description, + type: addOn.type, + selected: addOnState[addOn.id]?.selected ?? false, + enabled: addOnState[addOn.id]?.enabled ?? true, + })) + }), + [addOnState], + ) + + // Client tool 2: Selects add-ons + const selectAddOnsClient = useMemo( + () => + selectAddOnsToolDef.client((args) => { + console.log('[Client Tool] selectAddOns called with:', args) + + // Calculate what will be selected BEFORE calling setState + // (setState callback is async, so we can't read results from it) + const toSelect: string[] = [] + for (const addOnId of args.addOnIds) { + const state = addOnState[addOnId] + if (state && !state.selected && state.enabled) { + toSelect.push(addOnId) + } + } + + // Update state if there's anything to select + if (toSelect.length > 0) { + setAddOnState((prev) => { + const next = { ...prev } + for (const addOnId of toSelect) { + next[addOnId] = { ...next[addOnId], selected: true } + } + return next + }) + } + + return { + success: toSelect.length > 0, + selectedAddOns: toSelect, + message: + toSelect.length > 0 + ? `Successfully selected: ${toSelect.join(', ')}` + : 'No add-ons were selected (may already be selected or not found).', + } + }), + [addOnState], + ) + + // Client tool 3: Unselects add-ons + const unselectAddOnsClient = useMemo( + () => + unselectAddOnsToolDef.client((args) => { + console.log('[Client Tool] unselectAddOns called with:', args) + + // Calculate what will be unselected BEFORE calling setState + // (setState callback is async, so we can't read results from it) + const toUnselect: string[] = [] + for (const addOnId of args.addOnIds) { + const state = addOnState[addOnId] + if (state && state.selected && state.enabled) { + toUnselect.push(addOnId) + } + } + + // Update state if there's anything to unselect + if (toUnselect.length > 0) { + setAddOnState((prev) => { + const next = { ...prev } + for (const addOnId of toUnselect) { + next[addOnId] = { ...next[addOnId], selected: false } + } + return next + }) + } + + return { + success: toUnselect.length > 0, + unselectedAddOns: toUnselect, + message: + toUnselect.length > 0 + ? `Successfully unselected: ${toUnselect.join(', ')}` + : 'No add-ons were unselected (may not be selected or not found).', + } + }), + [addOnState], + ) + + // Combine client tools + const tools = useMemo( + () => + clientTools( + getAvailableAddOnsClient, + selectAddOnsClient, + unselectAddOnsClient, + ), + [getAvailableAddOnsClient, selectAddOnsClient, unselectAddOnsClient], + ) + + const { messages, sendMessage, isLoading, stop } = useChat({ + connection: fetchServerSentEvents('/api/addon-chat'), + tools, + onChunk: (chunk: any) => { + setChunks((prev) => [...prev, chunk]) + }, + }) + + const clearChunks = () => setChunks([]) + + return ( +
+ {/* Left side - Add-on Selection Panel (1/4 width) */} +
+ +
+ + {/* Middle - Chat (1/2 width) */} +
+
+

AI Add-on Assistant

+

+ Ask me to configure your project add-ons +

+
+ + + +
+ {isLoading && ( +
+ +
+ )} +
+