Vercel AI SDK Integration
The Vercel AI SDK provides React hooks and utilities for building AI-powered user interfaces. Combined with OpenRouter, you can create streaming chat interfaces, AI-assisted forms, and interactive experiences with 100+ language models.
React-First AI Development
Build modern AI applications with React hooks, automatic streaming UI updates, and edge runtime support.
Installation
Install the Vercel AI SDK and OpenAI provider (configured for OpenRouter):
npm install ai openai
# or
yarn add ai openai
# or
pnpm add ai openai
Setup
API Route (Next.js App Router)
Create an API route to handle chat requests:
// app/api/chat/route.ts
import { OpenAI } from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';
// Create OpenAI client configured for OpenRouter
const openai = new OpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: process.env.OPENROUTER_API_KEY,
defaultHeaders: {
'HTTP-Referer': process.env.YOUR_SITE_URL,
'X-Title': process.env.YOUR_APP_NAME,
}
});
export async function POST(req: Request) {
const { messages } = await req.json();
// Create chat completion
const response = await openai.chat.completions.create({
model: 'anthropic/claude-3-sonnet',
messages,
stream: true,
temperature: 0.7,
max_tokens: 1000,
});
// Convert to stream
const stream = OpenAIStream(response);
// Return streaming response
return new StreamingTextResponse(stream);
}
API Route (Next.js Pages Router)
// pages/api/chat.ts
import { OpenAI } from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';
import type { NextApiRequest, NextApiResponse } from 'next';
const openai = new OpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: process.env.OPENROUTER_API_KEY,
});
export default async function handler(
req: NextApiRequest,
res: NextApiResponse
) {
if (req.method !== 'POST') {
return res.status(405).end();
}
const { messages } = req.body;
const response = await openai.chat.completions.create({
model: 'openai/gpt-4-turbo',
messages,
stream: true,
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream).pipeToResponse(res);
}
React Components
Basic Chat Interface
'use client';
import { useChat } from 'ai/react';
export function Chat() {
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
api: '/api/chat',
});
return (
<div className="flex flex-col h-screen max-w-2xl mx-auto">
<div className="flex-1 overflow-y-auto p-4 space-y-4">
{messages.map((message) => (
<div
key={message.id}
className={`flex ${
message.role === 'user' ? 'justify-end' : 'justify-start'
}`}
>
<div
className={`max-w-xs lg:max-w-md px-4 py-2 rounded-lg ${
message.role === 'user'
? 'bg-blue-500 text-white'
: 'bg-gray-200 text-gray-800'
}`}
>
{message.content}
</div>
</div>
))}
</div>
<form
onSubmit={handleSubmit}
className="border-t p-4 flex gap-2"
>
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
className="flex-1 px-4 py-2 border rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500"
disabled={isLoading}
/>
<button
type="submit"
disabled={isLoading}
className="px-6 py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600 disabled:opacity-50"
>
Send
</button>
</form>
</div>
);
}
Advanced Chat with Features
'use client';
import { useChat } from 'ai/react';
import { useState } from 'react';
export function AdvancedChat() {
const [model, setModel] = useState('anthropic/claude-3-sonnet');
const {
messages,
input,
handleInputChange,
handleSubmit,
isLoading,
stop,
reload,
append,
} = useChat({
api: '/api/chat',
body: {
model, // Pass model selection to API
},
onFinish: (message) => {
console.log('Completed message:', message);
},
onError: (error) => {
console.error('Chat error:', error);
},
});
const models = [
{ value: 'anthropic/claude-3-opus', label: 'Claude 3 Opus' },
{ value: 'anthropic/claude-3-sonnet', label: 'Claude 3 Sonnet' },
{ value: 'openai/gpt-4-turbo', label: 'GPT-4 Turbo' },
{ value: 'google/gemini-pro', label: 'Gemini Pro' },
];
return (
<div className="flex flex-col h-screen max-w-4xl mx-auto">
{/* Model selector */}
<div className="border-b p-4">
<select
value={model}
onChange={(e) => setModel(e.target.value)}
className="px-4 py-2 border rounded-lg"
>
{models.map((m) => (
<option key={m.value} value={m.value}>
{m.label}
</option>
))}
</select>
</div>
{/* Messages */}
<div className="flex-1 overflow-y-auto p-4 space-y-4">
{messages.map((message, index) => (
<div key={message.id} className="group relative">
<div
className={`flex ${
message.role === 'user' ? 'justify-end' : 'justify-start'
}`}
>
<div
className={`max-w-2xl px-4 py-2 rounded-lg ${
message.role === 'user'
? 'bg-blue-500 text-white'
: 'bg-gray-100'
}`}
>
<div className="whitespace-pre-wrap">{message.content}</div>
</div>
</div>
{/* Action buttons */}
{message.role === 'assistant' && index === messages.length - 1 && (
<div className="absolute -bottom-6 left-0 opacity-0 group-hover:opacity-100 transition-opacity">
<button
onClick={() => reload()}
className="text-sm text-gray-500 hover:text-gray-700"
>
Regenerate
</button>
</div>
)}
</div>
))}
{isLoading && (
<div className="flex justify-start">
<div className="bg-gray-100 rounded-lg px-4 py-2">
<div className="flex items-center gap-2">
<div className="animate-pulse">Thinking...</div>
<button
onClick={stop}
className="text-sm text-red-500 hover:text-red-700"
>
Stop
</button>
</div>
</div>
</div>
)}
</div>
{/* Input form */}
<form onSubmit={handleSubmit} className="border-t p-4">
<div className="flex gap-2">
<textarea
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
className="flex-1 px-4 py-2 border rounded-lg resize-none focus:outline-none focus:ring-2 focus:ring-blue-500"
rows={3}
onKeyDown={(e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleSubmit(e as any);
}
}}
/>
<div className="flex flex-col gap-2">
<button
type="submit"
disabled={isLoading || !input.trim()}
className="px-6 py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600 disabled:opacity-50"
>
Send
</button>
<button
type="button"
onClick={() => append({ role: 'user', content: 'Continue' })}
disabled={isLoading}
className="px-6 py-2 bg-gray-200 text-gray-700 rounded-lg hover:bg-gray-300 disabled:opacity-50"
>
Continue
</button>
</div>
</div>
</form>
</div>
);
}
Streaming with Metadata
Stream additional data alongside the response:
// app/api/chat/route.ts
import { OpenAI } from 'openai';
import { OpenAIStream, StreamingTextResponse, StreamData } from 'ai';
const openai = new OpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: process.env.OPENROUTER_API_KEY,
});
export async function POST(req: Request) {
const { messages } = await req.json();
// Create stream data helper
const data = new StreamData();
const response = await openai.chat.completions.create({
model: 'anthropic/claude-3-opus',
messages,
stream: true,
});
const stream = OpenAIStream(response, {
onStart: async () => {
// Send metadata when stream starts
data.append({ type: 'start', timestamp: Date.now() });
},
onToken: async (token) => {
// Track tokens if needed
console.log('Token:', token);
},
onCompletion: async (completion) => {
// Send completion metadata
data.append({
type: 'complete',
tokensUsed: completion.length,
timestamp: Date.now(),
});
},
onFinal: async () => {
data.close();
},
});
return new StreamingTextResponse(stream, {}, data);
}
Access the metadata in your React component:
const { messages, data } = useChat({
api: '/api/chat',
});
// Access streamed data
useEffect(() => {
if (data) {
console.log('Stream data:', data);
}
}, [data]);
AI-Assisted Forms
Use AI to help users fill out forms:
'use client';
import { useCompletion } from 'ai/react';
import { useState } from 'react';
export function AIAssistedForm() {
const [formData, setFormData] = useState({
title: '',
description: '',
tags: '',
});
const {
complete,
completion,
isLoading,
} = useCompletion({
api: '/api/completion',
});
const generateDescription = async () => {
const prompt = `Generate a compelling product description for: ${formData.title}`;
const result = await complete(prompt);
setFormData({ ...formData, description: result });
};
const suggestTags = async () => {
const prompt = `Suggest 5 relevant tags for a product titled "${formData.title}" with description: "${formData.description}". Return as comma-separated list.`;
const result = await complete(prompt);
setFormData({ ...formData, tags: result });
};
return (
<form className="max-w-2xl mx-auto p-6 space-y-4">
<div>
<label className="block text-sm font-medium mb-2">
Product Title
</label>
<input
type="text"
value={formData.title}
onChange={(e) => setFormData({ ...formData, title: e.target.value })}
className="w-full px-4 py-2 border rounded-lg"
placeholder="Enter product title"
/>
</div>
<div>
<div className="flex justify-between items-center mb-2">
<label className="block text-sm font-medium">
Description
</label>
<button
type="button"
onClick={generateDescription}
disabled={!formData.title || isLoading}
className="text-sm text-blue-500 hover:text-blue-700 disabled:opacity-50"
>
{isLoading ? 'Generating...' : 'Generate with AI'}
</button>
</div>
<textarea
value={formData.description}
onChange={(e) => setFormData({ ...formData, description: e.target.value })}
className="w-full px-4 py-2 border rounded-lg"
rows={4}
placeholder="Product description"
/>
</div>
<div>
<div className="flex justify-between items-center mb-2">
<label className="block text-sm font-medium">
Tags
</label>
<button
type="button"
onClick={suggestTags}
disabled={!formData.title || !formData.description || isLoading}
className="text-sm text-blue-500 hover:text-blue-700 disabled:opacity-50"
>
{isLoading ? 'Suggesting...' : 'Suggest tags'}
</button>
</div>
<input
type="text"
value={formData.tags}
onChange={(e) => setFormData({ ...formData, tags: e.target.value })}
className="w-full px-4 py-2 border rounded-lg"
placeholder="comma, separated, tags"
/>
</div>
<button
type="submit"
className="w-full py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600"
>
Submit
</button>
</form>
);
}
Edge Runtime Support
Deploy your AI endpoints to the edge for lower latency:
// app/api/chat/route.ts
import { OpenAI } from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';
// Configure for edge runtime
export const runtime = 'edge';
const openai = new OpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: process.env.OPENROUTER_API_KEY,
});
export async function POST(req: Request) {
const { messages } = await req.json();
const response = await openai.chat.completions.create({
model: 'anthropic/claude-3-haiku', // Fast model for edge
messages,
stream: true,
max_tokens: 500,
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
}
Function Calling
Integrate function calling with streaming UI:
// app/api/chat/route.ts
import { OpenAI } from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';
import { functions, runFunction } from './functions';
const openai = new OpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: process.env.OPENROUTER_API_KEY,
});
export async function POST(req: Request) {
const { messages } = await req.json();
const response = await openai.chat.completions.create({
model: 'openai/gpt-4-turbo',
messages,
stream: true,
functions: [
{
name: 'get_weather',
description: 'Get weather information',
parameters: {
type: 'object',
properties: {
location: { type: 'string' },
unit: { type: 'string', enum: ['celsius', 'fahrenheit'] },
},
required: ['location'],
},
},
],
});
const stream = OpenAIStream(response, {
experimental_onFunctionCall: async ({ name, arguments: args }) => {
const result = await runFunction(name, args);
// Return function result to continue conversation
return {
messages: [
...messages,
{
role: 'function',
name,
content: JSON.stringify(result),
},
],
};
},
});
return new StreamingTextResponse(stream);
}
Token Usage Tracking
Track token usage to monitor costs and stay within limits:
const { messages, isLoading, append } = useChat({
api: '/api/chat',
onResponse: async (response) => {
// Extract token usage from headers
const usage = {
promptTokens: response.headers.get('x-prompt-tokens'),
completionTokens: response.headers.get('x-completion-tokens'),
totalTokens: response.headers.get('x-total-tokens'),
};
console.log('Token usage:', usage);
// Update your usage tracking state
},
});
Best Practices
- Use streaming for better perceived performance
- Implement proper error boundaries in your React components
- Add loading states and skeleton screens during generation
- Use the edge runtime for lower latency when possible
- Implement rate limiting to prevent abuse
- Cache common completions to reduce API calls
- Use appropriate models based on task complexity
- Handle network errors gracefully with retry logic
Complete Example: AI Chat App
Here's a production-ready chat application with Vercel AI SDK and OpenRouter:
// app/page.tsx
'use client';
import { useChat } from 'ai/react';
import { useRef, useEffect } from 'react';
export default function ChatApp() {
const messagesEndRef = useRef<HTMLDivElement>(null);
const {
messages,
input,
handleInputChange,
handleSubmit,
isLoading,
error,
reload,
stop,
} = useChat({
api: '/api/chat',
initialMessages: [
{
id: 'welcome',
role: 'assistant',
content: 'Hello! I\'m powered by OpenRouter. How can I help you today?',
},
],
});
// Auto-scroll to bottom
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [messages]);
return (
<div className="flex flex-col h-screen bg-gray-50">
<header className="bg-white border-b px-6 py-4">
<h1 className="text-xl font-semibold">AI Chat with OpenRouter</h1>
</header>
<main className="flex-1 overflow-hidden flex flex-col">
<div className="flex-1 overflow-y-auto px-6 py-4">
<div className="max-w-3xl mx-auto space-y-4">
{messages.map((m) => (
<div
key={m.id}
className={`flex ${
m.role === 'user' ? 'justify-end' : 'justify-start'
}`}
>
<div
className={`max-w-xl px-4 py-2 rounded-lg ${
m.role === 'user'
? 'bg-blue-500 text-white'
: 'bg-white border shadow-sm'
}`}
>
<p className="whitespace-pre-wrap">{m.content}</p>
</div>
</div>
))}
{isLoading && (
<div className="flex justify-start">
<div className="bg-white border shadow-sm rounded-lg px-4 py-2">
<div className="flex items-center gap-2">
<div className="w-2 h-2 bg-blue-500 rounded-full animate-bounce" />
<div className="w-2 h-2 bg-blue-500 rounded-full animate-bounce delay-100" />
<div className="w-2 h-2 bg-blue-500 rounded-full animate-bounce delay-200" />
</div>
</div>
</div>
)}
{error && (
<div className="bg-red-50 border border-red-200 rounded-lg p-4">
<p className="text-red-800">Error: {error.message}</p>
<button
onClick={() => reload()}
className="mt-2 text-sm text-red-600 hover:text-red-800"
>
Try again
</button>
</div>
)}
<div ref={messagesEndRef} />
</div>
</div>
<div className="border-t bg-white px-6 py-4">
<form onSubmit={handleSubmit} className="max-w-3xl mx-auto">
<div className="flex gap-2">
<input
type="text"
value={input}
onChange={handleInputChange}
placeholder="Type your message..."
className="flex-1 px-4 py-2 border rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500"
disabled={isLoading}
/>
{isLoading ? (
<button
type="button"
onClick={stop}
className="px-6 py-2 bg-red-500 text-white rounded-lg hover:bg-red-600"
>
Stop
</button>
) : (
<button
type="submit"
disabled={!input.trim()}
className="px-6 py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600 disabled:opacity-50"
>
Send
</button>
)}
</div>
</form>
</div>
</main>
</div>
);
}
Next Steps
Ready to build more with Vercel AI SDK and OpenRouter:
- Explore available models for different use cases
- Learn about LangChain integration for complex workflows
- Check the Vercel AI SDK docs for advanced patterns
- Monitor your API usage in the dashboard