diff --git a/docs/adapters/openrouter.md b/docs/adapters/openrouter.md
new file mode 100644
index 00000000..fc11af39
--- /dev/null
+++ b/docs/adapters/openrouter.md
@@ -0,0 +1,231 @@
+---
+title: OpenRouter Adapter
+id: openrouter-adapter
+---
+
+The OpenRouter adapter provides access to 300+ AI models from various providers through a single unified API, including models from OpenAI, Anthropic, Google, Meta, Mistral, and many more.
+
+## Installation
+
+```bash
+npm install @tanstack/ai-openrouter
+```
+
+## Basic Usage
+
+```typescript
+import { chat } from "@tanstack/ai";
+import { openrouter } from "@tanstack/ai-openrouter";
+
+const adapter = openrouter();
+
+const stream = chat({
+ adapter,
+ messages: [{ role: "user", content: "Hello!" }],
+ model: "openai/gpt-4o",
+});
+```
+
+## Configuration
+
+```typescript
+import { createOpenRouter, type OpenRouterConfig } from "@tanstack/ai-openrouter";
+
+const config: OpenRouterConfig = {
+ apiKey: process.env.OPENROUTER_API_KEY!,
+ baseURL: "https://openrouter.ai/api/v1", // Optional
+ httpReferer: "https://your-app.com", // Optional, for rankings
+ xTitle: "Your App Name", // Optional, for rankings
+};
+
+const adapter = createOpenRouter(config.apiKey, config);
+```
+
+## Available Models
+
+OpenRouter provides access to 300+ models from various providers. Models use the format `provider/model-name`:
+
+```typescript
+model: "openai/gpt-5.1"
+model: "anthropic/claude-sonnet-4.5"
+model: "google/gemini-3-pro-preview"
+model: "meta-llama/llama-4-maverick"
+model: "deepseek/deepseek-v3.2"
+```
+
+See the full list at [openrouter.ai/models](https://openrouter.ai/models).
+
+## Example: Chat Completion
+
+```typescript
+import { chat, toStreamResponse } from "@tanstack/ai";
+import { openrouter } from "@tanstack/ai-openrouter";
+
+const adapter = openrouter();
+
+export async function POST(request: Request) {
+ const { messages } = await request.json();
+
+ const stream = chat({
+ adapter,
+ messages,
+ model: "openai/gpt-4o",
+ });
+
+ return toStreamResponse(stream);
+}
+```
+
+## Example: With Tools
+
+```typescript
+import { chat, toolDefinition } from "@tanstack/ai";
+import { openrouter } from "@tanstack/ai-openrouter";
+import { z } from "zod";
+
+const adapter = openrouter();
+
+const getWeatherDef = toolDefinition({
+ name: "get_weather",
+ description: "Get the current weather",
+ inputSchema: z.object({
+ location: z.string(),
+ }),
+});
+
+const getWeather = getWeatherDef.server(async ({ location }) => {
+ return { temperature: 72, conditions: "sunny" };
+});
+
+const stream = chat({
+ adapter,
+ messages,
+ model: "openai/gpt-4o",
+ tools: [getWeather],
+});
+```
+
+## Web Search
+
+OpenRouter supports web search through the `plugins` configuration. This enables real-time web search capabilities for any model:
+
+```typescript
+const stream = chat({
+ adapter,
+ messages: [{ role: "user", content: "What's the latest AI news?" }],
+ model: "openai/gpt-4o-mini",
+ providerOptions: {
+ plugins: [
+ {
+ id: "web",
+ engine: "exa", // "native" or "exa"
+ max_results: 5, // default: 5
+ },
+ ],
+ },
+});
+```
+
+Alternatively, use the `:online` model suffix:
+
+```typescript
+const stream = chat({
+ adapter,
+ messages,
+ model: "openai/gpt-4o-mini:online",
+});
+```
+
+## Provider Options
+
+OpenRouter supports extensive provider-specific options:
+
+```typescript
+const stream = chat({
+ adapter,
+ messages,
+ model: "openai/gpt-4o",
+ providerOptions: {
+ temperature: 0.7,
+ max_tokens: 1000,
+ top_p: 0.9,
+ top_k: 40,
+ frequency_penalty: 0.5,
+ presence_penalty: 0.5,
+ repetition_penalty: 1.1,
+ seed: 42,
+ tool_choice: "auto",
+ response_format: { type: "json_object" },
+ // Routing options
+ models: ["openai/gpt-4o", "anthropic/claude-3.5-sonnet"], // Fallback models
+ route: "fallback",
+ // Provider preferences
+ provider: {
+ order: ["OpenAI", "Anthropic"],
+ allow_fallbacks: true,
+ },
+ },
+});
+```
+
+## Environment Variables
+
+Set your API key in environment variables:
+
+```bash
+OPENROUTER_API_KEY=sk-or-...
+```
+
+## Model Routing
+
+OpenRouter can automatically route requests to the best available provider:
+
+```typescript
+const stream = chat({
+ adapter,
+ messages,
+ model: "openrouter/auto", // Automatic model selection
+ providerOptions: {
+ models: [
+ "openai/gpt-4o",
+ "anthropic/claude-3.5-sonnet",
+ "google/gemini-pro",
+ ],
+ route: "fallback", // Use fallback if primary fails
+ },
+});
+```
+
+## API Reference
+
+### `openrouter(config?)`
+
+Creates an OpenRouter adapter with automatic API key detection from `OPENROUTER_API_KEY`.
+
+**Parameters:**
+
+- `config.baseURL?` - Custom base URL (optional)
+- `config.httpReferer?` - HTTP Referer header for rankings (optional)
+- `config.xTitle?` - X-Title header for rankings (optional)
+
+**Returns:** An OpenRouter adapter instance.
+
+### `createOpenRouter(apiKey, config?)`
+
+Creates an OpenRouter adapter with explicit API key.
+
+**Parameters:**
+
+- `apiKey` - OpenRouter API key (required)
+- `config.baseURL?` - Custom base URL (optional)
+- `config.httpReferer?` - HTTP Referer header (optional)
+- `config.xTitle?` - X-Title header (optional)
+
+**Returns:** An OpenRouter adapter instance.
+
+## Next Steps
+
+- [Getting Started](../getting-started/quick-start) - Learn the basics
+- [Tools Guide](../guides/tools) - Learn about tools
+- [Other Adapters](./openai) - Explore other providers
+
diff --git a/docs/config.json b/docs/config.json
index d75a3b7b..d3c9be70 100644
--- a/docs/config.json
+++ b/docs/config.json
@@ -100,6 +100,10 @@
"label": "OpenAI",
"to": "adapters/openai"
},
+ {
+ "label": "OpenRouter",
+ "to": "adapters/openrouter"
+ },
{
"label": "Anthropic",
"to": "adapters/anthropic"
diff --git a/packages/typescript/smoke-tests/adapters/README.md b/packages/typescript/ai-openrouter/README.md
similarity index 78%
rename from packages/typescript/smoke-tests/adapters/README.md
rename to packages/typescript/ai-openrouter/README.md
index 7c414307..77acf865 100644
--- a/packages/typescript/smoke-tests/adapters/README.md
+++ b/packages/typescript/ai-openrouter/README.md
@@ -38,12 +38,39 @@
A powerful, type-safe AI SDK for building AI-powered applications.
- Provider-agnostic adapters (OpenAI, Anthropic, Gemini, Ollama, etc.)
+- **Multimodal content support** - Send images, audio, video, and documents
- Chat completion, streaming, and agent loop strategies
- Headless chat state management with adapters (SSE, HTTP stream, custom)
-- Type-safe tools with server/client execution
+- Isomorphic type-safe tools with server/client execution
+- **Enhanced integration with TanStack Start** - Share implementations between AI tools and server functions
### Read the docs →
+## Bonus: TanStack Start Integration
+
+TanStack AI works with **any** framework (Next.js, Express, Remix, etc.).
+
+**With TanStack Start**, you get a bonus: share implementations between AI tools and server functions with `createServerFnTool`:
+
+```typescript
+import { createServerFnTool } from '@tanstack/ai-react'
+
+// Define once, get AI tool AND server function (TanStack Start only)
+const getProducts = createServerFnTool({
+ name: 'getProducts',
+ inputSchema: z.object({ query: z.string() }),
+ execute: async ({ query }) => db.products.search(query),
+})
+
+// Use in AI chat
+chat({ tools: [getProducts.server] })
+
+// Call directly from components (no API endpoint needed!)
+const products = await getProducts.serverFn({ query: 'laptop' })
+```
+
+No duplicate logic, full type safety, automatic validation. The `serverFn` feature requires TanStack Start. See [docs](https://tanstack.com/ai) for details.
+
## Get Involved
- We welcome issues and pull requests!
@@ -88,7 +115,7 @@ We're looking for TanStack AI Partners to join our mission! Partner with us to p
- TanStack Config – Tooling for JS/TS packages
- TanStack DB – Reactive sync client store
-- TanStack Devtools – Unified devtools panel
+- TanStack Devtools – Unified devtools panel
- TanStack Form – Type‑safe form state
- TanStack Pacer – Debouncing, throttling, batching
- TanStack Query – Async state & caching
diff --git a/packages/typescript/ai-openrouter/live-tests/image-test.ts b/packages/typescript/ai-openrouter/live-tests/image-test.ts
new file mode 100644
index 00000000..b5cee61b
--- /dev/null
+++ b/packages/typescript/ai-openrouter/live-tests/image-test.ts
@@ -0,0 +1,265 @@
+import { createOpenRouter } from '../src/index'
+import { readFileSync } from 'fs'
+import { join, dirname } from 'path'
+import { fileURLToPath } from 'url'
+
+const __dirname = dirname(fileURLToPath(import.meta.url))
+try {
+ const envContent = readFileSync(join(__dirname, '.env.local'), 'utf-8')
+ envContent.split('\n').forEach((line) => {
+ const match = line.match(/^([^=]+)=(.*)$/)
+ if (match) {
+ process.env[match[1].trim()] = match[2].trim()
+ }
+ })
+} catch {}
+
+const apiKey = process.env.OPENROUTER_API_KEY
+
+if (!apiKey) {
+ console.error('❌ OPENROUTER_API_KEY not found in .env.local')
+ process.exit(1)
+}
+
+function extractImageUrls(content: string): Array {
+ const imageRegex = /!\[Generated Image\]\(([^)]+)\)/g
+ const urls: Array = []
+ let match
+ while ((match = imageRegex.exec(content)) !== null) {
+ urls.push(match[1])
+ }
+ return urls
+}
+
+async function testGeminiImageGeneration() {
+ console.log(
+ '🚀 Testing OpenRouter image generation with gemini-2.5-flash-image\n',
+ )
+
+ const adapter = createOpenRouter(apiKey!)
+
+ const model = 'google/gemini-2.5-flash-image'
+ const prompt =
+ 'Generate a beautiful image of a futuristic cityscape at night with neon lights and flying cars.'
+
+ const messages = [
+ {
+ role: 'user' as const,
+ content: prompt,
+ },
+ ]
+
+ console.log('📤 Sending image generation request:')
+ console.log(' Model:', model)
+ console.log(' Prompt:', prompt)
+ console.log()
+
+ try {
+ console.log('⏳ Generating image (this may take a moment)...\n')
+
+ let fullContent = ''
+
+ const stream = adapter.chatStream({
+ model,
+ messages,
+ providerOptions: {
+ modalities: ['image', 'text'],
+ },
+ })
+
+ for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ fullContent = chunk.content
+ }
+
+ if (chunk.type === 'done') {
+ console.log('📊 Usage:', chunk.usage)
+ }
+
+ if (chunk.type === 'error') {
+ console.error('❌ Stream error:', chunk.error)
+ return false
+ }
+ }
+
+ const imageUrls = extractImageUrls(fullContent)
+
+ console.log('\n' + '='.repeat(60))
+ console.log('📊 Test Summary (Gemini Image Generation):')
+
+ const textContent = fullContent
+ .replace(/!\[Generated Image\]\([^)]+\)/g, '')
+ .trim()
+ if (textContent) {
+ console.log(
+ ' Text response:',
+ textContent.substring(0, 100) + (textContent.length > 100 ? '...' : ''),
+ )
+ }
+
+ if (imageUrls.length > 0) {
+ console.log('\n🖼️ Generated Images:')
+ imageUrls.forEach((url, index) => {
+ if (url.startsWith('data:image')) {
+ console.log(
+ ` Image ${index + 1}: [Base64 Data URL] (${url.length} chars)`,
+ )
+ console.log(` Preview: ${url.substring(0, 80)}...`)
+ } else {
+ console.log(` Image ${index + 1}: ${url}`)
+ }
+ })
+ }
+
+ console.log('\n Images generated:', imageUrls.length)
+ console.log(' Has images:', imageUrls.length > 0 ? '✅' : '❌')
+ console.log('='.repeat(60))
+
+ if (imageUrls.length === 0) {
+ console.error('\n❌ FAIL: No images were generated')
+ return false
+ }
+
+ console.log('\n✅ SUCCESS: Gemini image generation works correctly!')
+ return true
+ } catch (error: unknown) {
+ const err = error as { message?: string; stack?: string }
+ console.error('\n❌ ERROR:', err.message)
+ console.error('Stack:', err.stack)
+ return false
+ }
+}
+
+async function testFluxImageGeneration() {
+ console.log('\n🚀 Testing OpenRouter image generation with flux.2-pro\n')
+
+ const adapter = createOpenRouter(apiKey!)
+
+ const model = 'black-forest-labs/flux.2-pro'
+ const prompt =
+ 'Generate a beautiful landscape image of a mountain range at sunset with vibrant colors.'
+
+ const messages = [
+ {
+ role: 'user' as const,
+ content: prompt,
+ },
+ ]
+
+ console.log('📤 Sending image generation request:')
+ console.log(' Model:', model)
+ console.log(' Prompt:', prompt)
+ console.log()
+
+ try {
+ console.log('⏳ Generating image (this may take a moment)...\n')
+
+ let fullContent = ''
+
+ const stream = adapter.chatStream({
+ model,
+ messages,
+ providerOptions: {
+ modalities: ['image', 'text'],
+ },
+ })
+
+ for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ fullContent = chunk.content
+ }
+
+ if (chunk.type === 'done') {
+ console.log('📊 Usage:', chunk.usage)
+ }
+
+ if (chunk.type === 'error') {
+ console.error('❌ Stream error:', chunk.error)
+ return false
+ }
+ }
+
+ const imageUrls = extractImageUrls(fullContent)
+
+ console.log('\n' + '='.repeat(60))
+ console.log('📊 Test Summary (Flux Image Generation):')
+
+ const textContent = fullContent
+ .replace(/!\[Generated Image\]\([^)]+\)/g, '')
+ .trim()
+ if (textContent) {
+ console.log(
+ ' Text response:',
+ textContent.substring(0, 100) + (textContent.length > 100 ? '...' : ''),
+ )
+ }
+
+ if (imageUrls.length > 0) {
+ console.log('\n🖼️ Generated Images:')
+ imageUrls.forEach((url, index) => {
+ if (url.startsWith('data:image')) {
+ console.log(
+ ` Image ${index + 1}: [Base64 Data URL] (${url.length} chars)`,
+ )
+ console.log(` Preview: ${url.substring(0, 80)}...`)
+ } else {
+ console.log(` Image ${index + 1}: ${url}`)
+ }
+ })
+ }
+
+ console.log('\n Images generated:', imageUrls.length)
+ console.log(' Has images:', imageUrls.length > 0 ? '✅' : '❌')
+ console.log('='.repeat(60))
+
+ if (imageUrls.length === 0) {
+ console.error('\n❌ FAIL: No images were generated')
+ return false
+ }
+
+ console.log('\n✅ SUCCESS: Flux image generation works correctly!')
+ return true
+ } catch (error: unknown) {
+ const err = error as { message?: string; stack?: string }
+ console.error('\n❌ ERROR:', err.message)
+ console.error('Stack:', err.stack)
+ return false
+ }
+}
+
+async function runAllTests() {
+ console.log('='.repeat(60))
+ console.log('🧪 OpenRouter Image Generation Tests')
+ console.log('='.repeat(60))
+ console.log()
+
+ const results = {
+ geminiImageGeneration: false,
+ fluxImageGeneration: false,
+ }
+
+ results.geminiImageGeneration = await testGeminiImageGeneration()
+ results.fluxImageGeneration = await testFluxImageGeneration()
+
+ console.log('\n' + '='.repeat(60))
+ console.log('📊 Final Test Results:')
+ console.log(
+ ' Image Generation (gemini-2.5-flash-image):',
+ results.geminiImageGeneration ? '✅' : '❌',
+ )
+ console.log(
+ ' Image Generation (flux.2-pro):',
+ results.fluxImageGeneration ? '✅' : '❌',
+ )
+ console.log('='.repeat(60))
+
+ if (!results.geminiImageGeneration || !results.fluxImageGeneration) {
+ console.error('\n❌ Some tests failed')
+ process.exit(1)
+ }
+
+ console.log('\n✅ All image generation tests passed!')
+ process.exit(0)
+}
+
+runAllTests()
diff --git a/packages/typescript/ai-openrouter/live-tests/package.json b/packages/typescript/ai-openrouter/live-tests/package.json
new file mode 100644
index 00000000..53c1aba6
--- /dev/null
+++ b/packages/typescript/ai-openrouter/live-tests/package.json
@@ -0,0 +1,20 @@
+{
+ "name": "ai-openrouter-live-tests",
+ "version": "0.0.0",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "test": "tsx tool-test.ts",
+ "test:web-search": "tsx web-search-test.ts",
+ "test:image": "tsx image-test.ts",
+ "test:all": "tsx tool-test.ts && tsx web-search-test.ts && tsx image-test.ts"
+ },
+ "dependencies": {
+ "@tanstack/ai": "workspace:*",
+ "@tanstack/ai-openrouter": "workspace:*",
+ "zod": "4.1.13"
+ },
+ "devDependencies": {
+ "tsx": "^4.19.2"
+ }
+}
diff --git a/packages/typescript/ai-openrouter/live-tests/tool-test.ts b/packages/typescript/ai-openrouter/live-tests/tool-test.ts
new file mode 100644
index 00000000..ab817ed0
--- /dev/null
+++ b/packages/typescript/ai-openrouter/live-tests/tool-test.ts
@@ -0,0 +1,183 @@
+import { createOpenRouter } from '../src/index'
+import { z } from 'zod'
+import { readFileSync } from 'fs'
+import { join, dirname } from 'path'
+import { fileURLToPath } from 'url'
+
+const __dirname = dirname(fileURLToPath(import.meta.url))
+try {
+ const envContent = readFileSync(join(__dirname, '.env.local'), 'utf-8')
+ envContent.split('\n').forEach((line) => {
+ const match = line.match(/^([^=]+)=(.*)$/)
+ if (match) {
+ process.env[match[1].trim()] = match[2].trim()
+ }
+ })
+} catch (e) {}
+
+const apiKey = process.env.OPENROUTER_API_KEY
+
+if (!apiKey) {
+ console.error('❌ OPENROUTER_API_KEY not found in .env.local')
+ process.exit(1)
+}
+
+async function testToolCallingWithArguments() {
+ console.log('🚀 Testing OpenRouter tool calling with arguments\n')
+
+ const adapter = createOpenRouter(apiKey!)
+
+ const getTemperatureTool = {
+ name: 'get_temperature',
+ description: 'Get the current temperature for a specific location',
+ inputSchema: z.object({
+ location: z
+ .string()
+ .describe('The city or location to get the temperature for'),
+ unit: z.enum(['celsius', 'fahrenheit']).describe('The temperature unit'),
+ }),
+ execute: async (args: {
+ location: string
+ unit: 'celsius' | 'fahrenheit'
+ }) => {
+ console.log(
+ '✅ Tool executed with arguments:',
+ JSON.stringify(args, null, 2),
+ )
+
+ if (!args) {
+ console.error('❌ ERROR: Arguments are undefined!')
+ return 'Error: No arguments received'
+ }
+
+ if (typeof args !== 'object') {
+ console.error('❌ ERROR: Arguments are not an object:', typeof args)
+ return 'Error: Invalid arguments type'
+ }
+
+ if (!args.location) {
+ console.error('❌ ERROR: Location argument is missing!')
+ return 'Error: Location is required'
+ }
+
+ console.log(
+ ` - location: "${args.location}" (type: ${typeof args.location})`,
+ )
+ console.log(` - unit: "${args.unit}" (type: ${typeof args.unit})`)
+
+ return `The temperature in ${args.location} is 72°${args.unit === 'celsius' ? 'C' : 'F'}`
+ },
+ }
+
+ const messages = [
+ {
+ role: 'user' as const,
+ content: 'What is the temperature in San Francisco in fahrenheit?',
+ },
+ ]
+
+ console.log('📤 Sending request with tool:')
+ console.log(' Tool name:', getTemperatureTool.name)
+ console.log(' User message:', messages[0].content)
+ console.log()
+
+ try {
+ console.log('📥 Streaming response...\n')
+
+ let toolCallFound = false
+ let toolCallArguments: Record | null = null
+ let toolExecuted = false
+ let finalResponse = ''
+
+ const stream = adapter.chatStream({
+ model: 'openai/gpt-4o-mini',
+ messages,
+ tools: [getTemperatureTool],
+ })
+
+ for await (const chunk of stream) {
+ console.log('Chunk:', JSON.stringify(chunk, null, 2))
+
+ if (chunk.type === 'tool_call') {
+ toolCallFound = true
+ const rawArgs = chunk.toolCall.function.arguments
+ console.log('\n🔧 Tool call detected!')
+ console.log(' Name:', chunk.toolCall.function.name)
+ console.log(' Arguments (raw):', rawArgs)
+ console.log(' Arguments (type):', typeof rawArgs)
+
+ if (typeof rawArgs === 'string') {
+ try {
+ const parsed = JSON.parse(rawArgs)
+ console.log(
+ ' Arguments (parsed):',
+ JSON.stringify(parsed, null, 2),
+ )
+ toolCallArguments = parsed
+ } catch (e) {
+ console.error(' ❌ Failed to parse arguments as JSON:', e)
+ }
+ }
+
+ if (getTemperatureTool.execute && toolCallArguments) {
+ console.log('\n🔨 Executing tool...')
+ try {
+ const result = await getTemperatureTool.execute(
+ toolCallArguments as {
+ location: string
+ unit: 'celsius' | 'fahrenheit'
+ },
+ )
+ toolExecuted = true
+ console.log(' Result:', result)
+ } catch (error) {
+ console.error(' ❌ Tool execution error:', error)
+ }
+ }
+ }
+
+ if (chunk.type === 'content') {
+ finalResponse += chunk.delta
+ }
+ }
+
+ console.log('\n' + '='.repeat(60))
+ console.log('📊 Test Summary:')
+ console.log(' Tool call found:', toolCallFound ? '✅' : '❌')
+ console.log(' Arguments received:', toolCallArguments ? '✅' : '❌')
+ console.log(' Arguments value:', JSON.stringify(toolCallArguments))
+ console.log(' Tool executed:', toolExecuted ? '✅' : '❌')
+ console.log(' Final response:', finalResponse)
+ console.log('='.repeat(60))
+
+ if (!toolCallFound) {
+ console.error('\n❌ FAIL: No tool call was detected in the stream')
+ process.exit(1)
+ }
+
+ if (!toolCallArguments) {
+ console.error('\n❌ FAIL: Tool call arguments are missing or null')
+ process.exit(1)
+ }
+
+ if (typeof toolCallArguments === 'object' && !toolCallArguments.location) {
+ console.error('\n❌ FAIL: Location parameter is missing from arguments')
+ process.exit(1)
+ }
+
+ if (!toolExecuted) {
+ console.error('\n❌ FAIL: Tool was not executed successfully')
+ process.exit(1)
+ }
+
+ console.log('\n✅ SUCCESS: Tool calling with arguments works correctly!')
+ process.exit(0)
+ } catch (error: unknown) {
+ const err = error as { message?: string; stack?: string }
+ console.error('\n❌ ERROR:', err.message)
+ console.error('Stack:', err.stack)
+ process.exit(1)
+ }
+}
+
+testToolCallingWithArguments()
diff --git a/packages/typescript/ai-openrouter/live-tests/web-search-test.ts b/packages/typescript/ai-openrouter/live-tests/web-search-test.ts
new file mode 100644
index 00000000..94662b0f
--- /dev/null
+++ b/packages/typescript/ai-openrouter/live-tests/web-search-test.ts
@@ -0,0 +1,100 @@
+import { createOpenRouter } from '../src/index'
+import { readFileSync } from 'fs'
+import { join, dirname } from 'path'
+import { fileURLToPath } from 'url'
+
+const __dirname = dirname(fileURLToPath(import.meta.url))
+try {
+ const envContent = readFileSync(join(__dirname, '.env.local'), 'utf-8')
+ envContent.split('\n').forEach((line) => {
+ const match = line.match(/^([^=]+)=(.*)$/)
+ if (match) {
+ process.env[match[1].trim()] = match[2].trim()
+ }
+ })
+} catch {}
+
+const apiKey = process.env.OPENROUTER_API_KEY
+
+if (!apiKey) {
+ console.error('❌ OPENROUTER_API_KEY not found in .env.local')
+ process.exit(1)
+}
+
+async function testWebSearch() {
+ console.log('🚀 Testing OpenRouter web search via plugins\n')
+
+ const adapter = createOpenRouter(apiKey!)
+
+ const messages = [
+ {
+ role: 'user' as const,
+ content:
+ 'What is the latest news about AI today? Please search the web and summarize.',
+ },
+ ]
+
+ console.log('📤 Sending request with web search plugin:')
+ console.log(' Model: openai/gpt-4o-mini')
+ console.log(' Plugin: web (engine: exa, max_results: 5)')
+ console.log(' User message:', messages[0].content)
+ console.log()
+
+ try {
+ console.log('📥 Streaming response...\n')
+
+ let fullContent = ''
+ let hasContent = false
+
+ const stream = adapter.chatStream({
+ model: 'openai/gpt-4o-mini',
+ messages,
+ providerOptions: {
+ plugins: [
+ {
+ id: 'web',
+ engine: 'exa',
+ max_results: 5,
+ },
+ ],
+ },
+ })
+
+ for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ hasContent = true
+ fullContent += chunk.delta
+ process.stdout.write(chunk.delta)
+ }
+
+ if (chunk.type === 'done') {
+ console.log('\n\n📊 Usage:', chunk.usage)
+ }
+
+ if (chunk.type === 'error') {
+ console.error('\n❌ Stream error:', chunk.error)
+ }
+ }
+
+ console.log('\n' + '='.repeat(60))
+ console.log('📊 Test Summary:')
+ console.log(' Content received:', hasContent ? '✅' : '❌')
+ console.log(' Content length:', fullContent.length, 'characters')
+ console.log('='.repeat(60))
+
+ if (!hasContent) {
+ console.error('\n❌ FAIL: No content was received from the stream')
+ process.exit(1)
+ }
+
+ console.log('\n✅ SUCCESS: Web search plugin works correctly!')
+ process.exit(0)
+ } catch (error: unknown) {
+ const err = error as { message?: string; stack?: string }
+ console.error('\n❌ ERROR:', err.message)
+ console.error('Stack:', err.stack)
+ process.exit(1)
+ }
+}
+
+testWebSearch()
diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json
new file mode 100644
index 00000000..9bbbce85
--- /dev/null
+++ b/packages/typescript/ai-openrouter/package.json
@@ -0,0 +1,51 @@
+{
+ "name": "@tanstack/ai-openrouter",
+ "version": "0.0.1",
+ "description": "OpenRouter adapter for TanStack AI",
+ "author": "",
+ "license": "MIT",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/TanStack/ai.git",
+ "directory": "packages/typescript/ai-openrouter"
+ },
+ "type": "module",
+ "module": "./dist/esm/index.js",
+ "types": "./dist/esm/index.d.ts",
+ "exports": {
+ ".": {
+ "types": "./dist/esm/index.d.ts",
+ "import": "./dist/esm/index.js"
+ }
+ },
+ "files": [
+ "dist",
+ "src"
+ ],
+ "scripts": {
+ "build": "vite build",
+ "clean": "premove ./build ./dist",
+ "lint:fix": "eslint ./src --fix",
+ "test:build": "publint --strict",
+ "test:eslint": "eslint ./src",
+ "test:lib": "vitest run",
+ "test:lib:dev": "pnpm test:lib --watch",
+ "test:types": "tsc"
+ },
+ "keywords": [
+ "ai",
+ "openrouter",
+ "tanstack",
+ "adapter"
+ ],
+ "dependencies": {
+ "@tanstack/ai": "workspace:*"
+ },
+ "devDependencies": {
+ "@vitest/coverage-v8": "4.0.14",
+ "vite": "^7.2.4"
+ },
+ "peerDependencies": {
+ "@tanstack/ai": "workspace:*"
+ }
+}
diff --git a/packages/typescript/ai-openrouter/src/index.ts b/packages/typescript/ai-openrouter/src/index.ts
new file mode 100644
index 00000000..5269f88d
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/index.ts
@@ -0,0 +1,26 @@
+export {
+ OpenRouter,
+ createOpenRouter,
+ openrouter,
+ type OpenRouterConfig,
+} from './openrouter-adapter'
+export type {
+ OpenRouterChatModelProviderOptionsByName,
+ OpenRouterModelInputModalitiesByName,
+} from './model-meta'
+export type {
+ OpenRouterTextMetadata,
+ OpenRouterImageMetadata,
+ OpenRouterAudioMetadata,
+ OpenRouterVideoMetadata,
+ OpenRouterDocumentMetadata,
+ OpenRouterMessageMetadataByModality,
+} from './message-types'
+export type {
+ WebPlugin,
+ ProviderPreferences,
+ ReasoningOptions,
+ StreamOptions,
+ ImageConfig,
+} from './text/text-provider-options'
+export type { OpenRouterTool, FunctionTool } from './tools'
diff --git a/packages/typescript/ai-openrouter/src/message-types.ts b/packages/typescript/ai-openrouter/src/message-types.ts
new file mode 100644
index 00000000..490ad645
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/message-types.ts
@@ -0,0 +1,19 @@
+export interface OpenRouterTextMetadata {}
+
+export interface OpenRouterImageMetadata {
+ detail?: 'auto' | 'low' | 'high'
+}
+
+export interface OpenRouterAudioMetadata {}
+
+export interface OpenRouterVideoMetadata {}
+
+export interface OpenRouterDocumentMetadata {}
+
+export interface OpenRouterMessageMetadataByModality {
+ text: OpenRouterTextMetadata
+ image: OpenRouterImageMetadata
+ audio: OpenRouterAudioMetadata
+ video: OpenRouterVideoMetadata
+ document: OpenRouterDocumentMetadata
+}
diff --git a/packages/typescript/ai-openrouter/src/model-meta.ts b/packages/typescript/ai-openrouter/src/model-meta.ts
new file mode 100644
index 00000000..4ed9192c
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/model-meta.ts
@@ -0,0 +1,11 @@
+import type { OpenRouterBaseOptions } from './text/text-provider-options'
+
+export const OPENROUTER_CHAT_MODELS: ReadonlyArray = [] as const
+
+export type OpenRouterChatModelProviderOptionsByName = {
+ [key: string]: OpenRouterBaseOptions
+}
+
+export type OpenRouterModelInputModalitiesByName = {
+ [key: string]: ReadonlyArray<'text' | 'image' | 'audio' | 'video'>
+}
diff --git a/packages/typescript/ai-openrouter/src/openrouter-adapter.ts b/packages/typescript/ai-openrouter/src/openrouter-adapter.ts
new file mode 100644
index 00000000..5d6a49c5
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/openrouter-adapter.ts
@@ -0,0 +1,732 @@
+import { BaseAdapter } from '@tanstack/ai'
+import { convertToolsToProviderFormat } from './tools'
+import type {
+ ChatOptions,
+ ContentPart,
+ EmbeddingOptions,
+ EmbeddingResult,
+ ModelMessage,
+ StreamChunk,
+ SummarizationOptions,
+ SummarizationResult,
+} from '@tanstack/ai'
+import type {
+ OpenRouterChatModelProviderOptionsByName,
+ OpenRouterModelInputModalitiesByName,
+} from './model-meta'
+import type {
+ ExternalTextProviderOptions,
+ InternalTextProviderOptions,
+} from './text/text-provider-options'
+import type {
+ OpenRouterImageMetadata,
+ OpenRouterMessageMetadataByModality,
+} from './message-types'
+import type { OpenRouterTool } from './tools'
+
+export interface OpenRouterConfig {
+ apiKey: string
+ baseURL?: string
+ httpReferer?: string
+ xTitle?: string
+}
+
+export type OpenRouterProviderOptions = ExternalTextProviderOptions
+
+type ContentPartType =
+ | 'text'
+ | 'image_url'
+ | 'audio_url'
+ | 'video_url'
+ | 'document_url'
+
+interface OpenRouterContentPart {
+ type: ContentPartType
+ text?: string
+ image_url?: { url: string; detail?: 'auto' | 'low' | 'high' }
+ audio_url?: { url: string }
+ video_url?: { url: string }
+ document_url?: { url: string }
+}
+
+interface OpenRouterMessage {
+ role: 'user' | 'assistant' | 'system' | 'tool'
+ content: string | Array
+ tool_call_id?: string
+ name?: string
+}
+
+interface OpenRouterRequest {
+ model: string
+ messages: Array
+ stream?: boolean
+ max_tokens?: number
+ temperature?: number
+ top_p?: number
+ stop?: string | Array
+ tools?: Array
+ tool_choice?:
+ | 'none'
+ | 'auto'
+ | 'required'
+ | { type: 'function'; function: { name: string } }
+ [key: string]: unknown
+}
+
+interface ToolCallBuffer {
+ id: string
+ name: string
+ arguments: string
+}
+
+interface OpenRouterError {
+ message: string
+ code?: string
+}
+
+interface OpenRouterToolCallDelta {
+ index: number
+ id?: string
+ type?: 'function'
+ function?: {
+ name?: string
+ arguments?: string
+ }
+}
+
+interface OpenRouterToolCall {
+ id: string
+ type: 'function'
+ function: {
+ name: string
+ arguments: string
+ }
+}
+
+interface OpenRouterReasoningDetail {
+ thinking?: string
+ text?: string
+}
+
+interface OpenRouterImage {
+ image_url: {
+ url: string
+ }
+}
+
+interface OpenRouterChoiceDelta {
+ content?: string
+ reasoning_details?: Array
+ images?: Array
+ tool_calls?: Array
+}
+
+interface OpenRouterChoiceMessage {
+ refusal?: string
+ images?: Array
+ tool_calls?: Array
+}
+
+interface OpenRouterChoice {
+ delta?: OpenRouterChoiceDelta
+ message?: OpenRouterChoiceMessage
+ finish_reason?: 'stop' | 'length' | 'tool_calls' | null
+}
+
+interface OpenRouterUsage {
+ prompt_tokens?: number
+ completion_tokens?: number
+ total_tokens?: number
+}
+
+interface OpenRouterSSEChunk {
+ id?: string
+ model?: string
+ error?: OpenRouterError
+ choices?: Array
+ usage?: OpenRouterUsage
+}
+
+export class OpenRouter extends BaseAdapter<
+ ReadonlyArray,
+ [],
+ OpenRouterProviderOptions,
+ Record,
+ OpenRouterChatModelProviderOptionsByName,
+ OpenRouterModelInputModalitiesByName,
+ OpenRouterMessageMetadataByModality
+> {
+ name = 'openrouter' as const
+ models: ReadonlyArray = []
+
+ // @ts-ignore - We never assign this at runtime and it's only used for types
+ _modelProviderOptionsByName: OpenRouterChatModelProviderOptionsByName
+ // @ts-ignore - We never assign this at runtime and it's only used for types
+ _modelInputModalitiesByName?: OpenRouterModelInputModalitiesByName
+ // @ts-ignore - We never assign this at runtime and it's only used for types
+ _messageMetadataByModality?: OpenRouterMessageMetadataByModality
+
+ private openRouterConfig: OpenRouterConfig
+ private baseURL: string
+
+ constructor(config: OpenRouterConfig) {
+ super({})
+ this.openRouterConfig = config
+ this.baseURL = config.baseURL || 'https://openrouter.ai/api/v1'
+ }
+
+ async *chatStream(
+ options: ChatOptions,
+ ): AsyncIterable {
+ const timestamp = Date.now()
+ const toolCallBuffers = new Map()
+ let accumulatedReasoning = ''
+ let accumulatedContent = ''
+ let responseId: string | null = null
+ let model = options.model
+
+ try {
+ const response = await this.createRequest(options, true)
+
+ if (!response.ok) {
+ yield this.createErrorChunk(
+ await this.parseErrorResponse(response),
+ options.model,
+ timestamp,
+ response.status.toString(),
+ )
+ return
+ }
+
+ if (!response.body) {
+ throw new Error('Response body is null')
+ }
+
+ for await (const event of this.parseSSE(response.body)) {
+ if (event.done) {
+ yield {
+ type: 'done',
+ id: responseId || this.generateId(),
+ model,
+ timestamp,
+ finishReason: 'stop',
+ }
+ continue
+ }
+
+ const chunk = event.data
+ if (chunk.id) responseId = chunk.id
+ if (chunk.model) model = chunk.model
+
+ if (chunk.error) {
+ yield this.createErrorChunk(
+ chunk.error.message || 'Unknown error',
+ model || options.model,
+ timestamp,
+ chunk.error.code,
+ )
+ continue
+ }
+
+ if (!chunk.choices) continue
+
+ for (const choice of chunk.choices) {
+ yield* this.processChoice(
+ choice,
+ toolCallBuffers,
+ { id: responseId || this.generateId(), model, timestamp },
+ { reasoning: accumulatedReasoning, content: accumulatedContent },
+ (r, c) => {
+ accumulatedReasoning = r
+ accumulatedContent = c
+ },
+ chunk.usage,
+ )
+ }
+ }
+ } catch (error) {
+ yield this.createErrorChunk(
+ (error as Error).message || 'Unknown error',
+ options.model,
+ timestamp,
+ )
+ }
+ }
+
+ async summarize(options: SummarizationOptions): Promise {
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
+ method: 'POST',
+ headers: this.buildHeaders(),
+ body: JSON.stringify({
+ model: options.model || 'openai/gpt-4o-mini',
+ messages: [
+ { role: 'system', content: this.buildSummarizationPrompt(options) },
+ { role: 'user', content: options.text },
+ ],
+ max_tokens: options.maxLength,
+ temperature: 0.3,
+ stream: false,
+ }),
+ })
+
+ if (!response.ok) {
+ throw new Error(await this.parseErrorResponse(response))
+ }
+
+ const data = await response.json()
+ return {
+ id: data.id,
+ model: data.model,
+ summary: data.choices[0]?.message?.content || '',
+ usage: {
+ promptTokens: data.usage?.prompt_tokens || 0,
+ completionTokens: data.usage?.completion_tokens || 0,
+ totalTokens: data.usage?.total_tokens || 0,
+ },
+ }
+ }
+
+ /**
+ * Creates embeddings from input text.
+ *
+ * @throws Error - OpenRouter does not support embeddings endpoint.
+ * Use a model-specific adapter (e.g., @tanstack/ai-openai) for embeddings.
+ */
+ createEmbeddings(_options: EmbeddingOptions): Promise {
+ throw new Error(
+ 'OpenRouter does not support embeddings endpoint. Use a model-specific adapter (e.g., @tanstack/ai-openai) instead.',
+ )
+ }
+
+ private buildHeaders(): Record {
+ const headers: Record = {
+ Authorization: `Bearer ${this.openRouterConfig.apiKey}`,
+ 'Content-Type': 'application/json',
+ }
+ if (this.openRouterConfig.httpReferer)
+ headers['HTTP-Referer'] = this.openRouterConfig.httpReferer
+ if (this.openRouterConfig.xTitle)
+ headers['X-Title'] = this.openRouterConfig.xTitle
+ return headers
+ }
+
+ private async createRequest(
+ options: ChatOptions,
+ stream: boolean,
+ ): Promise {
+ const requestParams = this.mapOptions(options)
+ return fetch(`${this.baseURL}/chat/completions`, {
+ method: 'POST',
+ headers: this.buildHeaders(),
+ body: JSON.stringify({ ...requestParams, stream }),
+ signal: options.request?.signal,
+ })
+ }
+
+ private async parseErrorResponse(response: Response): Promise {
+ try {
+ const error = await response.json()
+ return (
+ error.error?.message ||
+ `HTTP ${response.status}: ${response.statusText}`
+ )
+ } catch {
+ return `HTTP ${response.status}: ${response.statusText}`
+ }
+ }
+
+ private createErrorChunk(
+ message: string,
+ model: string,
+ timestamp: number,
+ code?: string,
+ ): StreamChunk {
+ return {
+ type: 'error',
+ id: this.generateId(),
+ model,
+ timestamp,
+ error: { message, code },
+ }
+ }
+
+ private async *parseSSE(
+ body: ReadableStream,
+ ): AsyncIterable<{ done: true } | { done: false; data: OpenRouterSSEChunk }> {
+ const reader = body.getReader()
+ const decoder = new TextDecoder()
+ let buffer = ''
+
+ try {
+ // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+
+ buffer += decoder.decode(value, { stream: true })
+ const lines = buffer.split('\n')
+ buffer = lines.pop() || ''
+
+ for (const line of lines) {
+ if (!line.trim() || !line.startsWith('data: ')) continue
+ const data = line.slice(6)
+ if (data === '[DONE]') {
+ yield { done: true }
+ } else {
+ try {
+ yield { done: false, data: JSON.parse(data) }
+ } catch {
+ continue
+ }
+ }
+ }
+ }
+ } finally {
+ reader.releaseLock()
+ }
+ }
+
+ private *processChoice(
+ choice: OpenRouterChoice,
+ toolCallBuffers: Map,
+ meta: { id: string; model: string; timestamp: number },
+ accumulated: { reasoning: string; content: string },
+ updateAccumulated: (reasoning: string, content: string) => void,
+ usage?: OpenRouterUsage,
+ ): Iterable {
+ const { delta, message, finish_reason } = choice
+
+ if (delta?.content) {
+ accumulated.content += delta.content
+ updateAccumulated(accumulated.reasoning, accumulated.content)
+ yield {
+ type: 'content',
+ ...meta,
+ delta: delta.content,
+ content: accumulated.content,
+ role: 'assistant',
+ }
+ }
+
+ if (delta?.reasoning_details) {
+ for (const detail of delta.reasoning_details) {
+ const text = detail.thinking || detail.text || ''
+ if (text) {
+ accumulated.reasoning += text
+ updateAccumulated(accumulated.reasoning, accumulated.content)
+ yield {
+ type: 'thinking',
+ ...meta,
+ delta: text,
+ content: accumulated.reasoning,
+ }
+ }
+ }
+ }
+
+ if (delta?.images) {
+ for (const img of delta.images) {
+ const imgContent = ``
+ accumulated.content += imgContent
+ updateAccumulated(accumulated.reasoning, accumulated.content)
+ yield {
+ type: 'content',
+ ...meta,
+ delta: imgContent,
+ content: accumulated.content,
+ role: 'assistant',
+ }
+ }
+ }
+
+ if (delta?.tool_calls) {
+ for (const tc of delta.tool_calls) {
+ const existing = toolCallBuffers.get(tc.index)
+ if (!existing) {
+ if (!tc.id) {
+ continue
+ }
+ toolCallBuffers.set(tc.index, {
+ id: tc.id,
+ name: tc.function?.name ?? '',
+ arguments: tc.function?.arguments ?? '',
+ })
+ } else {
+ if (tc.function?.name) existing.name = tc.function.name
+ if (tc.function?.arguments)
+ existing.arguments += tc.function.arguments
+ }
+ }
+ }
+
+ if (message?.refusal) {
+ yield {
+ type: 'error',
+ ...meta,
+ error: { message: message.refusal, code: 'refusal' },
+ }
+ }
+
+ if (message?.images) {
+ for (const img of message.images) {
+ const imgContent = ``
+ accumulated.content += imgContent
+ updateAccumulated(accumulated.reasoning, accumulated.content)
+ yield {
+ type: 'content',
+ ...meta,
+ delta: imgContent,
+ content: accumulated.content,
+ role: 'assistant',
+ }
+ }
+ }
+
+ if (message?.tool_calls) {
+ for (const [index, tc] of message.tool_calls.entries()) {
+ yield {
+ type: 'tool_call',
+ ...meta,
+ index,
+ toolCall: {
+ id: tc.id,
+ type: 'function',
+ function: {
+ name: tc.function.name,
+ arguments: tc.function.arguments,
+ },
+ },
+ }
+ }
+ }
+
+ if (finish_reason) {
+ if (finish_reason === 'tool_calls') {
+ for (const [index, tc] of toolCallBuffers.entries()) {
+ yield {
+ type: 'tool_call',
+ ...meta,
+ index,
+ toolCall: {
+ id: tc.id,
+ type: 'function',
+ function: { name: tc.name, arguments: tc.arguments },
+ },
+ }
+ }
+ toolCallBuffers.clear()
+ }
+
+ if (usage) {
+ yield {
+ type: 'done',
+ ...meta,
+ finishReason:
+ finish_reason === 'tool_calls'
+ ? 'tool_calls'
+ : finish_reason === 'length'
+ ? 'length'
+ : 'stop',
+ usage: {
+ promptTokens: usage.prompt_tokens || 0,
+ completionTokens: usage.completion_tokens || 0,
+ totalTokens: usage.total_tokens || 0,
+ },
+ }
+ }
+ }
+ }
+
+ private buildSummarizationPrompt(options: SummarizationOptions): string {
+ let prompt = 'You are a professional summarizer. '
+ switch (options.style) {
+ case 'bullet-points':
+ prompt += 'Provide a summary in bullet point format. '
+ break
+ case 'paragraph':
+ prompt += 'Provide a summary in paragraph format. '
+ break
+ case 'concise':
+ prompt += 'Provide a very concise summary in 1-2 sentences. '
+ break
+ default:
+ prompt += 'Provide a clear and concise summary. '
+ }
+ if (options.focus?.length) {
+ prompt += `Focus on the following aspects: ${options.focus.join(', ')}. `
+ }
+ if (options.maxLength) {
+ prompt += `Keep the summary under ${options.maxLength} tokens. `
+ }
+ return prompt
+ }
+
+ private mapOptions(options: ChatOptions): OpenRouterRequest {
+ const providerOptions = options.providerOptions as
+ | Omit
+ | undefined
+
+ const request: OpenRouterRequest = {
+ model: options.model,
+ messages: this.convertMessages(options.messages),
+ temperature: options.options?.temperature,
+ max_tokens: options.options?.maxTokens,
+ top_p: options.options?.topP,
+ ...providerOptions,
+ tools: options.tools
+ ? convertToolsToProviderFormat(options.tools)
+ : undefined,
+ }
+
+ if (providerOptions?.stop !== undefined) {
+ request.stop = providerOptions.stop
+ }
+
+ if (options.tools?.length && providerOptions?.tool_choice !== undefined) {
+ request.tool_choice = providerOptions.tool_choice
+ }
+
+ if (options.systemPrompts?.length) {
+ request.messages.unshift({
+ role: 'system',
+ content: options.systemPrompts.join('\n'),
+ })
+ }
+
+ return request
+ }
+
+ private convertMessages(
+ messages: Array,
+ ): Array {
+ return messages.map((msg) => {
+ if (msg.role === 'tool') {
+ return {
+ role: 'tool' as const,
+ content:
+ typeof msg.content === 'string'
+ ? msg.content
+ : JSON.stringify(msg.content),
+ tool_call_id: msg.toolCallId,
+ name: msg.name,
+ }
+ }
+
+ const parts = this.convertContentParts(msg.content)
+ return {
+ role: msg.role as 'user' | 'assistant',
+ content:
+ parts.length === 1 && parts[0]?.type === 'text'
+ ? parts[0].text || ''
+ : parts,
+ name: msg.name,
+ }
+ })
+ }
+
+ private convertContentParts(
+ content: string | null | Array,
+ ): Array {
+ if (!content) return [{ type: 'text', text: '' }]
+ if (typeof content === 'string') return [{ type: 'text', text: content }]
+
+ const parts: Array = []
+ for (const part of content) {
+ switch (part.type) {
+ case 'text':
+ parts.push({ type: 'text', text: part.content })
+ break
+ case 'image': {
+ const meta = part.metadata as OpenRouterImageMetadata | undefined
+ parts.push({
+ type: 'image_url',
+ image_url: {
+ url: part.source.value,
+ detail: meta?.detail || 'auto',
+ },
+ })
+ break
+ }
+ case 'audio':
+ parts.push({
+ type: 'audio_url',
+ audio_url: { url: part.source.value },
+ })
+ break
+ case 'video':
+ parts.push({
+ type: 'video_url',
+ video_url: { url: part.source.value },
+ })
+ break
+ case 'document':
+ parts.push({
+ type: 'document_url',
+ document_url: { url: part.source.value },
+ })
+ break
+ }
+ }
+ return parts.length ? parts : [{ type: 'text', text: '' }]
+ }
+}
+
+export function createOpenRouter(
+ apiKey: string,
+ config?: Omit,
+): OpenRouter {
+ return new OpenRouter({ apiKey, ...config })
+}
+
+/**
+ * Create an OpenRouter adapter with automatic API key detection from environment variables.
+ *
+ * Looks for `OPENROUTER_API_KEY` in:
+ * - `process.env` (Node.js)
+ * - `window.env` (Browser with injected env)
+ *
+ * @param config - Optional configuration (excluding apiKey which is auto-detected)
+ * @returns Configured OpenRouter adapter instance
+ * @throws Error if OPENROUTER_API_KEY is not found in environment
+ *
+ * @example
+ * ```typescript
+ * // Automatically uses OPENROUTER_API_KEY from environment
+ * const adapter = openrouter();
+ * ```
+ */
+interface EnvObject {
+ OPENROUTER_API_KEY?: string
+}
+
+interface WindowWithEnv {
+ env?: EnvObject
+}
+
+function getEnvironment(): EnvObject | undefined {
+ if (typeof globalThis !== 'undefined') {
+ const win = (globalThis as { window?: WindowWithEnv }).window
+ if (win?.env) {
+ return win.env
+ }
+ }
+ if (typeof process !== 'undefined') {
+ return process.env as EnvObject
+ }
+ return undefined
+}
+
+export function openrouter(
+ config?: Omit,
+): OpenRouter {
+ const env = getEnvironment()
+ const key = env?.OPENROUTER_API_KEY
+
+ if (!key) {
+ throw new Error(
+ 'OPENROUTER_API_KEY is required. Please set it in your environment variables or use createOpenRouter(apiKey, config) instead.',
+ )
+ }
+
+ return createOpenRouter(key, config)
+}
diff --git a/packages/typescript/ai-openrouter/src/text/text-provider-options.ts b/packages/typescript/ai-openrouter/src/text/text-provider-options.ts
new file mode 100644
index 00000000..c603837e
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/text/text-provider-options.ts
@@ -0,0 +1,322 @@
+export interface WebPlugin {
+ /**
+ * The plugin identifier. Currently only 'web' is supported.
+ */
+ id: 'web'
+ /**
+ * The search engine to use for web search.
+ * @default 'native'
+ */
+ engine?: 'native' | 'exa'
+ /**
+ * Maximum number of search results to return.
+ */
+ max_results?: number
+ /**
+ * Custom search prompt to guide the web search.
+ */
+ search_prompt?: string
+}
+
+export interface ProviderPreferences {
+ /**
+ * An ordered list of provider names. The router will attempt to use the first available provider from this list.
+ * https://openrouter.ai/docs/guides/routing/provider-selection
+ */
+ order?: Array
+ /**
+ * Whether to allow fallback to other providers if the preferred ones are unavailable.
+ * @default true
+ */
+ allow_fallbacks?: boolean
+ /**
+ * Whether to require all parameters to be supported by the provider.
+ * @default false
+ */
+ require_parameters?: boolean
+ /**
+ * Controls whether to allow providers that may collect data.
+ * 'allow' - Allow all providers
+ * 'deny' - Only use providers that don't collect data
+ */
+ data_collection?: 'allow' | 'deny'
+ /**
+ * Whether to prefer Zero Data Retention (ZDR) providers.
+ */
+ zdr?: boolean
+ /**
+ * An exclusive list of provider names to use. Only these providers will be considered.
+ */
+ only?: Array
+ /**
+ * A list of provider names to exclude from consideration.
+ */
+ ignore?: Array
+ /**
+ * A list of quantization levels to allow (e.g., 'int4', 'int8', 'fp8', 'fp16', 'bf16').
+ */
+ quantizations?: Array
+ /**
+ * How to sort/prioritize providers.
+ * 'price' - Sort by lowest price
+ * 'throughput' - Sort by highest throughput
+ */
+ sort?: 'price' | 'throughput'
+ /**
+ * Maximum price limits for tokens.
+ */
+ max_price?: {
+ /**
+ * Maximum price per completion token in credits.
+ */
+ completion_tokens?: number
+ /**
+ * Maximum price per prompt token in credits.
+ */
+ prompt_tokens?: number
+ }
+}
+
+export interface ReasoningOptions {
+ /**
+ * The level of reasoning effort the model should apply.
+ * Higher values produce more thorough reasoning but use more tokens.
+ */
+ effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high'
+ /**
+ * Maximum number of tokens to allocate for reasoning.
+ */
+ max_tokens?: number
+ /**
+ * Whether to exclude reasoning content from the response.
+ */
+ exclude?: boolean
+}
+
+export interface StreamOptions {
+ /**
+ * Whether to include token usage information in the stream.
+ */
+ include_usage?: boolean
+}
+
+export interface ImageConfig {
+ /**
+ * The aspect ratio for generated images.
+ */
+ aspect_ratio?: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | string
+}
+
+export interface OpenRouterBaseOptions {
+ /**
+ * Up to 4 sequences where the API will stop generating further tokens.
+ */
+ stop?: string | Array
+ /**
+ * Whether to stream the response using server-sent events.
+ * @default false
+ */
+ stream?: boolean
+ /**
+ * The maximum number of tokens to generate in the completion.
+ * @deprecated Use max_completion_tokens instead.
+ */
+ max_tokens?: number
+ /**
+ * The maximum number of tokens to generate in the completion.
+ */
+ max_completion_tokens?: number
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values make output more random.
+ * @default 1
+ */
+ temperature?: number
+ /**
+ * Nucleus sampling: only consider tokens with top_p cumulative probability.
+ * @default 1
+ */
+ top_p?: number
+ /**
+ * Only sample from the top K options for each subsequent token.
+ */
+ top_k?: number
+ /**
+ * Penalizes new tokens based on their existing frequency in the text so far.
+ * Range: -2.0 to 2.0
+ */
+ frequency_penalty?: number
+ /**
+ * Penalizes new tokens based on whether they appear in the text so far.
+ * Range: -2.0 to 2.0
+ */
+ presence_penalty?: number
+ /**
+ * Penalizes tokens that have already appeared in the generated text.
+ * Range: 0.0 to 2.0 (1.0 = no penalty)
+ */
+ repetition_penalty?: number
+ /**
+ * Modify the likelihood of specified tokens appearing in the completion.
+ * Maps token IDs to bias values from -100 to 100.
+ */
+ logit_bias?: { [key: number]: number }
+ /**
+ * Whether to return log probabilities of the output tokens.
+ */
+ logprobs?: boolean
+ /**
+ * Number of most likely tokens to return at each position (0-20). Requires logprobs: true.
+ */
+ top_logprobs?: number
+ /**
+ * Minimum probability threshold for token sampling.
+ */
+ min_p?: number
+ /**
+ * Consider only top tokens with "top_a" cumulative probability.
+ */
+ top_a?: number
+ /**
+ * Random seed for deterministic sampling. Same seed should produce same results.
+ */
+ seed?: number
+ /**
+ * Force the model to respond in a specific format.
+ */
+ response_format?: { type: 'json_object' }
+ /**
+ * Message transforms to apply (e.g., 'middle-out' for context compression).
+ */
+ transforms?: Array
+ /**
+ * A list of model IDs to use as fallbacks if the primary model is unavailable.
+ */
+ models?: Array
+ /**
+ * The routing strategy to use.
+ * 'fallback' - Try models in order until one succeeds
+ */
+ route?: 'fallback'
+ /**
+ * Provider routing preferences.
+ * https://openrouter.ai/docs/guides/routing/provider-selection
+ */
+ provider?: ProviderPreferences
+ /**
+ * A unique identifier representing your end-user for abuse monitoring.
+ */
+ user?: string
+ /**
+ * Metadata to attach to the request for tracking and analytics.
+ */
+ metadata?: Record
+ /**
+ * Reasoning configuration for models that support chain-of-thought reasoning.
+ */
+ reasoning?: ReasoningOptions
+ /**
+ * Options for streaming responses.
+ */
+ stream_options?: StreamOptions
+ /**
+ * Whether to allow the model to call multiple tools in parallel.
+ * @default true
+ */
+ parallel_tool_calls?: boolean
+ /**
+ * Constrains the verbosity of the model's response.
+ */
+ verbosity?: 'low' | 'medium' | 'high'
+ /**
+ * The modalities to enable for the response.
+ */
+ modalities?: Array<'text' | 'image'>
+ /**
+ * Configuration for image generation in the response.
+ */
+ image_config?: ImageConfig
+ /**
+ * Controls which (if any) tool the model should use.
+ * 'none' - Don't call any tools
+ * 'auto' - Model decides whether to call tools
+ * 'required' - Model must call at least one tool
+ * Or specify a specific function to call
+ */
+ tool_choice?:
+ | 'none'
+ | 'auto'
+ | 'required'
+ | {
+ type: 'function'
+ function: {
+ name: string
+ }
+ }
+ /**
+ * Plugins to enable for the request (e.g., web search).
+ * https://openrouter.ai/docs/features/web-search
+ */
+ plugins?: Array
+ /**
+ * Debug options for troubleshooting.
+ */
+ debug?: {
+ /**
+ * Whether to echo the upstream request body in the response for debugging.
+ */
+ echo_upstream_body?: boolean
+ }
+}
+
+export type ExternalTextProviderOptions = OpenRouterBaseOptions
+
+export interface InternalTextProviderOptions
+ extends ExternalTextProviderOptions {
+ /**
+ * The model ID to use for the request.
+ * https://openrouter.ai/models
+ */
+ model: string
+ /**
+ * The messages to send to the model.
+ */
+ messages: Array<{
+ role: 'user' | 'assistant' | 'system' | 'tool'
+ content:
+ | string
+ | Array<{
+ type: 'text' | 'image_url'
+ text?: string
+ image_url?: {
+ url: string
+ detail?: 'auto' | 'low' | 'high'
+ }
+ }>
+ tool_call_id?: string
+ name?: string
+ }>
+ /**
+ * Tools the model may call (functions).
+ */
+ tools?: Array<{
+ type: 'function'
+ function: {
+ name: string
+ description?: string
+ parameters: Record
+ }
+ }>
+ /**
+ * Controls which (if any) tool the model should use.
+ */
+ tool_choice?:
+ | 'none'
+ | 'auto'
+ | 'required'
+ | {
+ type: 'function'
+ function: {
+ name: string
+ }
+ }
+}
diff --git a/packages/typescript/ai-openrouter/src/tools/function-tool.ts b/packages/typescript/ai-openrouter/src/tools/function-tool.ts
new file mode 100644
index 00000000..ce5c251e
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/tools/function-tool.ts
@@ -0,0 +1,26 @@
+import { convertZodToJsonSchema } from '@tanstack/ai'
+import type { Tool } from '@tanstack/ai'
+
+export interface FunctionTool {
+ type: 'function'
+ function: {
+ name: string
+ description?: string
+ parameters: Record
+ }
+}
+
+export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool {
+ const jsonSchema = tool.inputSchema
+ ? convertZodToJsonSchema(tool.inputSchema)
+ : {}
+
+ return {
+ type: 'function',
+ function: {
+ name: tool.name,
+ description: tool.description,
+ parameters: jsonSchema || {},
+ },
+ }
+}
diff --git a/packages/typescript/ai-openrouter/src/tools/index.ts b/packages/typescript/ai-openrouter/src/tools/index.ts
new file mode 100644
index 00000000..a1f7e394
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/tools/index.ts
@@ -0,0 +1,5 @@
+export type { OpenRouterTool } from './tool-converter'
+export { convertToolsToProviderFormat } from './tool-converter'
+
+export type { FunctionTool } from './function-tool'
+export { convertFunctionToolToAdapterFormat } from './function-tool'
diff --git a/packages/typescript/ai-openrouter/src/tools/tool-converter.ts b/packages/typescript/ai-openrouter/src/tools/tool-converter.ts
new file mode 100644
index 00000000..8bed413f
--- /dev/null
+++ b/packages/typescript/ai-openrouter/src/tools/tool-converter.ts
@@ -0,0 +1,11 @@
+import { convertFunctionToolToAdapterFormat } from './function-tool'
+import type { Tool } from '@tanstack/ai'
+import type { FunctionTool } from './function-tool'
+
+export type OpenRouterTool = FunctionTool
+
+export function convertToolsToProviderFormat(
+ tools: Array,
+): Array {
+ return tools.map((tool) => convertFunctionToolToAdapterFormat(tool))
+}
diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts
new file mode 100644
index 00000000..e2c22745
--- /dev/null
+++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts
@@ -0,0 +1,398 @@
+import { beforeEach, describe, expect, it, vi } from 'vitest'
+import { chat } from '@tanstack/ai'
+import { OpenRouter } from '../src/openrouter-adapter'
+import type { StreamChunk, Tool } from '@tanstack/ai'
+import type { OpenRouterProviderOptions } from '../src/openrouter-adapter'
+
+const createAdapter = () => new OpenRouter({ apiKey: 'test-key' })
+
+const toolArguments = JSON.stringify({ location: 'Berlin' })
+
+const weatherTool: Tool = {
+ name: 'lookup_weather',
+ description: 'Return the forecast for a location',
+}
+
+function createMockSSEResponse(
+ chunks: Array>,
+): Response {
+ const encoder = new TextEncoder()
+ const stream = new ReadableStream({
+ start(controller) {
+ for (const chunk of chunks) {
+ const data = `data: ${JSON.stringify(chunk)}\n\n`
+ controller.enqueue(encoder.encode(data))
+ }
+ controller.enqueue(encoder.encode('data: [DONE]\n\n'))
+ controller.close()
+ },
+ })
+
+ return new Response(stream, {
+ status: 200,
+ headers: { 'Content-Type': 'text/event-stream' },
+ })
+}
+
+describe('OpenRouter adapter option mapping', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ })
+
+ it('maps options into the Chat Completions API payload', async () => {
+ const mockResponse = createMockSSEResponse([
+ {
+ id: 'chatcmpl-123',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: { content: 'It is sunny' },
+ finish_reason: null,
+ },
+ ],
+ },
+ {
+ id: 'chatcmpl-123',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: {},
+ finish_reason: 'stop',
+ },
+ ],
+ usage: {
+ prompt_tokens: 12,
+ completion_tokens: 4,
+ total_tokens: 16,
+ },
+ },
+ ])
+
+ const fetchSpy = vi
+ .spyOn(globalThis, 'fetch')
+ .mockResolvedValueOnce(mockResponse)
+
+ const adapter = createAdapter()
+
+ const providerOptions: OpenRouterProviderOptions = {
+ tool_choice: 'auto',
+ plugins: [{ id: 'web', max_results: 5 }],
+ }
+
+ const chunks: Array = []
+ for await (const chunk of chat({
+ adapter,
+ model: 'openai/gpt-4o-mini',
+ systemPrompts: ['Stay concise'],
+ messages: [
+ { role: 'user', content: 'How is the weather?' },
+ {
+ role: 'assistant',
+ content: 'Let me check',
+ toolCalls: [
+ {
+ id: 'call_weather',
+ type: 'function',
+ function: { name: 'lookup_weather', arguments: toolArguments },
+ },
+ ],
+ },
+ { role: 'tool', toolCallId: 'call_weather', content: '{"temp":72}' },
+ ],
+ tools: [weatherTool],
+ options: {
+ temperature: 0.25,
+ topP: 0.6,
+ maxTokens: 1024,
+ },
+ providerOptions,
+ })) {
+ chunks.push(chunk)
+ }
+
+ expect(fetchSpy).toHaveBeenCalledTimes(1)
+
+ const call = fetchSpy.mock.calls[0]
+ expect(call).toBeDefined()
+
+ const [url, options] = call!
+ expect(url).toBe('https://openrouter.ai/api/v1/chat/completions')
+
+ const payload = JSON.parse(options?.body as string)
+
+ expect(payload).toMatchObject({
+ model: 'openai/gpt-4o-mini',
+ temperature: 0.25,
+ top_p: 0.6,
+ max_tokens: 1024,
+ stream: true,
+ tool_choice: 'auto',
+ plugins: [{ id: 'web', max_results: 5 }],
+ })
+
+ expect(payload.messages).toBeDefined()
+ expect(Array.isArray(payload.messages)).toBe(true)
+
+ expect(payload.tools).toBeDefined()
+ expect(Array.isArray(payload.tools)).toBe(true)
+ expect(payload.tools.length).toBeGreaterThan(0)
+
+ fetchSpy.mockRestore()
+ })
+
+ it('streams chat chunks with content and usage', async () => {
+ const mockResponse = createMockSSEResponse([
+ {
+ id: 'chatcmpl-stream',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: { content: 'Hello ' },
+ finish_reason: null,
+ },
+ ],
+ },
+ {
+ id: 'chatcmpl-stream',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: { content: 'world' },
+ finish_reason: null,
+ },
+ ],
+ },
+ {
+ id: 'chatcmpl-stream',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: {},
+ finish_reason: 'stop',
+ },
+ ],
+ usage: {
+ prompt_tokens: 5,
+ completion_tokens: 2,
+ total_tokens: 7,
+ },
+ },
+ ])
+
+ const fetchSpy = vi
+ .spyOn(globalThis, 'fetch')
+ .mockResolvedValueOnce(mockResponse)
+
+ const adapter = createAdapter()
+ const chunks: Array = []
+
+ for await (const chunk of chat({
+ adapter,
+ model: 'openai/gpt-4o-mini',
+ messages: [{ role: 'user', content: 'Say hello' }],
+ })) {
+ chunks.push(chunk)
+ }
+
+ expect(chunks[0]).toMatchObject({
+ type: 'content',
+ delta: 'Hello ',
+ content: 'Hello ',
+ })
+
+ expect(chunks[1]).toMatchObject({
+ type: 'content',
+ delta: 'world',
+ content: 'Hello world',
+ })
+
+ const doneChunk = chunks.find(
+ (c) => c.type === 'done' && 'usage' in c && c.usage,
+ )
+ expect(doneChunk).toMatchObject({
+ type: 'done',
+ finishReason: 'stop',
+ usage: {
+ promptTokens: 5,
+ completionTokens: 2,
+ totalTokens: 7,
+ },
+ })
+
+ fetchSpy.mockRestore()
+ })
+
+ it('handles tool calls in streaming response', async () => {
+ const mockResponse = createMockSSEResponse([
+ {
+ id: 'chatcmpl-456',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: {
+ tool_calls: [
+ {
+ index: 0,
+ id: 'call_abc123',
+ type: 'function',
+ function: {
+ name: 'lookup_weather',
+ arguments: '{"location":',
+ },
+ },
+ ],
+ },
+ finish_reason: null,
+ },
+ ],
+ },
+ {
+ id: 'chatcmpl-456',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: {
+ tool_calls: [
+ {
+ index: 0,
+ function: {
+ arguments: '"Berlin"}',
+ },
+ },
+ ],
+ },
+ finish_reason: null,
+ },
+ ],
+ },
+ {
+ id: 'chatcmpl-456',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: {},
+ finish_reason: 'tool_calls',
+ },
+ ],
+ usage: {
+ prompt_tokens: 10,
+ completion_tokens: 5,
+ total_tokens: 15,
+ },
+ },
+ ])
+
+ const fetchSpy = vi
+ .spyOn(globalThis, 'fetch')
+ .mockResolvedValueOnce(mockResponse)
+
+ const adapter = createAdapter()
+
+ const chunks: Array = []
+ for await (const chunk of chat({
+ adapter,
+ model: 'openai/gpt-4o-mini',
+ messages: [{ role: 'user', content: 'What is the weather in Berlin?' }],
+ tools: [weatherTool],
+ })) {
+ chunks.push(chunk)
+ }
+
+ const toolCallChunks = chunks.filter((c) => c.type === 'tool_call')
+ expect(toolCallChunks.length).toBe(1)
+
+ const toolCallChunk = toolCallChunks[0]
+ expect(toolCallChunk.toolCall.function.name).toBe('lookup_weather')
+ expect(toolCallChunk.toolCall.function.arguments).toBe(
+ '{"location":"Berlin"}',
+ )
+
+ fetchSpy.mockRestore()
+ })
+
+ it('handles multimodal input with text and image', async () => {
+ const mockResponse = createMockSSEResponse([
+ {
+ id: 'chatcmpl-multimodal',
+ model: 'openai/gpt-4o-mini',
+ choices: [
+ {
+ delta: { content: 'I can see the image' },
+ finish_reason: 'stop',
+ },
+ ],
+ usage: { prompt_tokens: 50, completion_tokens: 5, total_tokens: 55 },
+ },
+ ])
+
+ const fetchSpy = vi
+ .spyOn(globalThis, 'fetch')
+ .mockResolvedValueOnce(mockResponse)
+
+ const adapter = createAdapter()
+
+ for await (const _ of chat({
+ adapter,
+ model: 'openai/gpt-4o-mini',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ { type: 'text', content: 'What do you see?' },
+ {
+ type: 'image',
+ source: { type: 'url', value: 'https://example.com/image.jpg' },
+ },
+ ],
+ },
+ ],
+ })) {
+ }
+
+ const [, options] = fetchSpy.mock.calls[0]!
+ const payload = JSON.parse(options?.body as string)
+
+ const contentParts = payload.messages[0].content
+ expect(contentParts[0]).toMatchObject({
+ type: 'text',
+ text: 'What do you see?',
+ })
+ expect(contentParts[1]).toMatchObject({
+ type: 'image_url',
+ image_url: { url: 'https://example.com/image.jpg' },
+ })
+
+ fetchSpy.mockRestore()
+ })
+
+ it('yields error chunk on HTTP error response', async () => {
+ const errorResponse = new Response(
+ JSON.stringify({ error: { message: 'Invalid API key' } }),
+ { status: 401 },
+ )
+
+ const fetchSpy = vi
+ .spyOn(globalThis, 'fetch')
+ .mockResolvedValueOnce(errorResponse)
+
+ const adapter = createAdapter()
+
+ const chunks: Array = []
+ for await (const chunk of adapter.chatStream({
+ model: 'openai/gpt-4o-mini',
+ messages: [{ role: 'user', content: 'Hello' }],
+ })) {
+ chunks.push(chunk)
+ }
+
+ expect(chunks.length).toBe(1)
+ expect(chunks[0]!.type).toBe('error')
+
+ if (chunks[0] && chunks[0].type === 'error') {
+ expect(chunks[0].error.message).toBe('Invalid API key')
+ expect(chunks[0].error.code).toBe('401')
+ }
+
+ fetchSpy.mockRestore()
+ })
+})
diff --git a/packages/typescript/ai-openrouter/tsconfig.json b/packages/typescript/ai-openrouter/tsconfig.json
new file mode 100644
index 00000000..ea11c109
--- /dev/null
+++ b/packages/typescript/ai-openrouter/tsconfig.json
@@ -0,0 +1,9 @@
+{
+ "extends": "../../../tsconfig.json",
+ "compilerOptions": {
+ "outDir": "dist",
+ "rootDir": "src"
+ },
+ "include": ["src/**/*.ts", "src/**/*.tsx"],
+ "exclude": ["node_modules", "dist", "**/*.config.ts"]
+}
diff --git a/packages/typescript/ai-openrouter/vite.config.ts b/packages/typescript/ai-openrouter/vite.config.ts
new file mode 100644
index 00000000..77bcc2e6
--- /dev/null
+++ b/packages/typescript/ai-openrouter/vite.config.ts
@@ -0,0 +1,36 @@
+import { defineConfig, mergeConfig } from 'vitest/config'
+import { tanstackViteConfig } from '@tanstack/vite-config'
+import packageJson from './package.json'
+
+const config = defineConfig({
+ test: {
+ name: packageJson.name,
+ dir: './',
+ watch: false,
+ globals: true,
+ environment: 'node',
+ include: ['tests/**/*.test.ts'],
+ coverage: {
+ provider: 'v8',
+ reporter: ['text', 'json', 'html', 'lcov'],
+ exclude: [
+ 'node_modules/',
+ 'dist/',
+ 'tests/',
+ '**/*.test.ts',
+ '**/*.config.ts',
+ '**/types.ts',
+ ],
+ include: ['src/**/*.ts'],
+ },
+ },
+})
+
+export default mergeConfig(
+ config,
+ tanstackViteConfig({
+ entry: ['./src/index.ts'],
+ srcDir: './src',
+ cjs: false,
+ }),
+)
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 214dfb20..1142390c 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -727,6 +727,19 @@ importers:
specifier: ^7.2.4
version: 7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)
+ packages/typescript/ai-openrouter:
+ dependencies:
+ '@tanstack/ai':
+ specifier: workspace:*
+ version: link:../ai
+ devDependencies:
+ '@vitest/coverage-v8':
+ specifier: 4.0.14
+ version: 4.0.14(vitest@4.0.14(@types/node@24.10.1)(happy-dom@20.0.10)(jiti@2.6.1)(jsdom@27.2.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ vite:
+ specifier: ^7.2.4
+ version: 7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)
+
packages/typescript/ai-react:
dependencies:
'@tanstack/ai':