TypeScript Integration
OpenRouter provides full TypeScript support with type-safe interfaces, auto-completion, and comprehensive error handling for a superior developer experience.
Installation
Install the OpenRouter TypeScript client using your preferred package manager:
npm install openrouter-ai
# or
yarn add openrouter-ai
# or
pnpm add openrouter-ai
Basic Setup
Initialize the OpenRouter client with your API key:
import { OpenRouter } from 'openrouter-ai';
const openrouter = new OpenRouter({
apiKey: process.env.OPENROUTER_API_KEY,
// Optional: Set default headers
defaultHeaders: {
'HTTP-Referer': process.env.YOUR_SITE_URL,
'X-Title': process.env.YOUR_APP_NAME,
}
});
Type Definitions
OpenRouter provides comprehensive type definitions for all API operations:
// Chat completion types
interface ChatCompletionRequest {
model: string;
messages: Message[];
temperature?: number;
max_tokens?: number;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
stream?: boolean;
stop?: string | string[];
transforms?: string[];
}
interface Message {
role: 'system' | 'user' | 'assistant' | 'function';
content: string;
name?: string;
function_call?: FunctionCall;
}
interface ChatCompletionResponse {
id: string;
object: 'chat.completion';
created: number;
model: string;
choices: Choice[];
usage: Usage;
}
interface Choice {
index: number;
message: Message;
finish_reason: 'stop' | 'length' | 'function_call' | null;
}
interface Usage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
Making Requests
Use the type-safe client to make API requests:
async function getChatCompletion() {
try {
const completion = await openrouter.chat.completions.create({
model: 'anthropic/claude-3-opus',
messages: [
{
role: 'system',
content: 'You are a helpful assistant.',
},
{
role: 'user',
content: 'Explain quantum computing in simple terms.',
},
],
temperature: 0.7,
max_tokens: 500,
});
console.log(completion.choices[0].message.content);
} catch (error) {
if (error instanceof OpenRouterError) {
console.error('API Error:', error.message);
console.error('Status:', error.status);
console.error('Code:', error.code);
} else {
console.error('Unexpected error:', error);
}
}
}
Streaming Responses
Handle streaming responses with proper TypeScript types:
async function streamChatCompletion() {
const stream = await openrouter.chat.completions.create({
model: 'openai/gpt-4-turbo',
messages: [
{ role: 'user', content: 'Write a short story about a robot.' }
],
stream: true,
});
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
}
Error Handling
Properly handle different error types with TypeScript:
import { OpenRouterError, RateLimitError, AuthenticationError } from 'openrouter-ai';
async function robustApiCall() {
try {
const response = await openrouter.chat.completions.create({
model: 'meta-llama/llama-3-70b-instruct',
messages: [{ role: 'user', content: 'Hello!' }],
});
return response;
} catch (error) {
if (error instanceof RateLimitError) {
console.error('Rate limit exceeded. Retry after:', error.retryAfter);
// Implement exponential backoff
} else if (error instanceof AuthenticationError) {
console.error('Authentication failed:', error.message);
// Check API key
} else if (error instanceof OpenRouterError) {
console.error('API error:', error.status, error.message);
// Handle other API errors
} else {
console.error('Unexpected error:', error);
throw error;
}
}
}
Advanced Usage
Custom Request Configuration
// Configure timeout and retry behavior
const openrouter = new OpenRouter({
apiKey: process.env.OPENROUTER_API_KEY,
timeout: 30000, // 30 seconds
maxRetries: 3,
retryDelay: 1000, // 1 second base delay
});
// Override per request
const completion = await openrouter.chat.completions.create(
{
model: 'anthropic/claude-3-sonnet',
messages: [{ role: 'user', content: 'Complex query...' }],
},
{
timeout: 60000, // 60 seconds for this request
signal: abortController.signal, // Support for AbortController
}
);
Model Selection Helper
// Type-safe model selection
enum ModelProvider {
Anthropic = 'anthropic',
OpenAI = 'openai',
Google = 'google',
Meta = 'meta-llama',
}
interface ModelConfig {
provider: ModelProvider;
model: string;
contextLength: number;
pricing: {
prompt: number;
completion: number;
};
}
const models: Record<string, ModelConfig> = {
'claude-3-opus': {
provider: ModelProvider.Anthropic,
model: 'anthropic/claude-3-opus',
contextLength: 200000,
pricing: { prompt: 0.015, completion: 0.075 },
},
'gpt-4-turbo': {
provider: ModelProvider.OpenAI,
model: 'openai/gpt-4-turbo',
contextLength: 128000,
pricing: { prompt: 0.01, completion: 0.03 },
},
};
function selectModel(requirements: {
minContext?: number;
maxCost?: number;
}): string {
// Implementation with type safety
return Object.entries(models)
.filter(([_, config]) =>
(!requirements.minContext || config.contextLength >= requirements.minContext) &&
(!requirements.maxCost || config.pricing.prompt <= requirements.maxCost)
)
.sort((a, b) => a[1].pricing.prompt - b[1].pricing.prompt)[0][1].model;
}
Best Practices
Always validate environment variables and handle missing API keys gracefully in production.
Environment Configuration
// .env.local
OPENROUTER_API_KEY=sk-or-v1-...
YOUR_SITE_URL=https://yourapp.com
YOUR_APP_NAME=YourApp
// config/openrouter.ts
import { OpenRouter } from 'openrouter-ai';
if (!process.env.OPENROUTER_API_KEY) {
throw new Error('OPENROUTER_API_KEY is required');
}
export const openrouter = new OpenRouter({
apiKey: process.env.OPENROUTER_API_KEY,
defaultHeaders: {
'HTTP-Referer': process.env.YOUR_SITE_URL || '',
'X-Title': process.env.YOUR_APP_NAME || '',
},
});
Type Guards
// Type guards for runtime validation
function isValidMessage(obj: any): obj is Message {
return (
typeof obj === 'object' &&
['system', 'user', 'assistant', 'function'].includes(obj.role) &&
typeof obj.content === 'string'
);
}
function isStreamChunk(obj: any): obj is StreamChunk {
return (
typeof obj === 'object' &&
obj.object === 'chat.completion.chunk' &&
Array.isArray(obj.choices)
);
}
// Usage
const data = await response.json();
if (isStreamChunk(data)) {
// TypeScript knows data is StreamChunk here
handleStreamChunk(data);
}
Testing
Write type-safe tests for your OpenRouter integration:
import { jest } from '@jest/globals';
import { OpenRouter } from 'openrouter-ai';
// Mock the OpenRouter client
jest.mock('openrouter-ai');
describe('OpenRouter Integration', () => {
let openrouter: jest.Mocked<OpenRouter>;
beforeEach(() => {
openrouter = new OpenRouter({
apiKey: 'test-key',
}) as jest.Mocked<OpenRouter>;
});
it('should handle chat completions', async () => {
const mockResponse = {
id: 'chatcmpl-123',
object: 'chat.completion',
created: Date.now(),
model: 'anthropic/claude-3-opus',
choices: [{
index: 0,
message: { role: 'assistant', content: 'Test response' },
finish_reason: 'stop',
}],
usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
};
openrouter.chat.completions.create.mockResolvedValue(mockResponse);
const result = await openrouter.chat.completions.create({
model: 'anthropic/claude-3-opus',
messages: [{ role: 'user', content: 'Test' }],
});
expect(result.choices[0].message.content).toBe('Test response');
});
});
Next Steps
Now that you have TypeScript set up with OpenRouter, explore these resources:
- Check the Available Models for model-specific features
- Learn about Vercel AI SDK integration for React applications
- Explore LangChain integration for complex workflows