LangChain with Next.js
Next.js is the ideal framework for LangChain applications. The App Router provides server-side capabilities for running LangChain securely, while React Server Components and API routes enable powerful streaming patterns.
π Next.js + LangChain Benefits
- Server Components: Run LangChain on the server safely
- Route Handlers: Create streaming API endpoints
- Edge Runtime: Deploy to edge for low latency
- Built-in Streaming: Native support for streaming responses
Project Setup
Create a Next.js App Router project and install LangChain plus the Vercel AI SDK.
# Create Next.js app
npx create-next-app@latest my-ai-app --typescript --tailwind --app
cd my-ai-app
# Install LangChain and AI SDK
npm install langchain @langchain/openai @langchain/core
npm install ai # Vercel AI SDK
Environment Variables
Keep provider keys in .env.local so they are only available on the server.
# .env.local
OPENAI_API_KEY=sk-your-openai-key
# Optional: for other providers
ANTHROPIC_API_KEY=sk-ant-your-key
Basic API Route
A simple Route Handler that accepts a message, calls the model, and returns JSON.
// app/api/chat/route.ts
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
import { NextResponse } from "next/server";
const model = new ChatOpenAI({
modelName: "gpt-4",
temperature: 0.7,
});
export async function POST(req: Request) {
try {
const { message } = await req.json();
const response = await model.invoke([
new HumanMessage(message),
]);
return NextResponse.json({
content: response.content,
});
} catch (error) {
return NextResponse.json(
{ error: "Failed to generate response" },
{ status: 500 }
);
}
}
Streaming API Route
Enable real-time token streaming for better UX:
Stream chunks from the model and send them as a readable response to the client.
// app/api/chat/stream/route.ts
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
export const runtime = 'edge'; // Optional: use edge runtime
export async function POST(req: Request) {
const { messages } = await req.json();
const model = new ChatOpenAI({
modelName: "gpt-4",
streaming: true,
});
// Convert to LangChain messages
const langchainMessages = messages.map((m: any) =>
m.role === 'user'
? new HumanMessage(m.content)
: new SystemMessage(m.content)
);
// Create streaming response
const encoder = new TextEncoder();
const stream = new ReadableStream({
async start(controller) {
const streamResponse = await model.stream(langchainMessages);
for await (const chunk of streamResponse) {
const text = chunk.content;
controller.enqueue(encoder.encode(text));
}
controller.close();
},
});
return new Response(stream, {
headers: {
'Content-Type': 'text/plain; charset=utf-8',
'Transfer-Encoding': 'chunked',
},
});
}
Using Vercel AI SDK (Recommended)
The Vercel AI SDK simplifies streaming significantly:
Use the LangChain adapter to convert model streams into a response the SDK understands.
// app/api/chat/route.ts
import { ChatOpenAI } from "@langchain/openai";
import { LangChainAdapter, Message } from "ai";
export const runtime = 'edge';
export async function POST(req: Request) {
const { messages }: { messages: Message[] } = await req.json();
const model = new ChatOpenAI({
modelName: "gpt-4",
streaming: true,
});
const stream = await model.stream(
messages.map(m => ({
role: m.role,
content: m.content,
}))
);
return LangChainAdapter.toDataStreamResponse(stream);
}
Client Component
The client uses useChat to manage input and render streamed responses.
// app/components/Chat.tsx
'use client';
import { useChat } from 'ai/react';
export default function Chat() {
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat();
return (
<div className="flex flex-col h-screen max-w-2xl mx-auto p-4">
<div className="flex-1 overflow-y-auto space-y-4">
{messages.map(m => (
<div
key={m.id}
className={`p-4 rounded-lg ${
m.role === 'user' ? 'bg-blue-100 ml-auto' : 'bg-gray-100'
} max-w-[80%]`}
>
<p className="font-semibold text-sm mb-1">
{m.role === 'user' ? 'You' : 'AI'}
</p>
<p className="whitespace-pre-wrap">{m.content}</p>
</div>
))}
</div>
<form onSubmit={handleSubmit} className="mt-4">
<div className="flex gap-2">
<input
value={input}
onChange={handleInputChange}
placeholder="Ask something..."
className="flex-1 p-3 border rounded-lg"
/>
<button
type="submit"
disabled={isLoading}
className="px-6 py-3 bg-blue-600 text-white rounded-lg disabled:opacity-50"
>
Send
</button>
</div>
</form>
</div>
);
}
Server Actions
Use Server Actions for simple, non-streaming operations:
Actions are ideal for tasks like summarization or translation where streaming isnβt required.
// app/actions.ts
'use server';
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
const model = new ChatOpenAI({ modelName: "gpt-4" });
export async function generateSummary(text: string) {
const response = await model.invoke([
new HumanMessage(`Summarize this text: ${text}`),
]);
return response.content as string;
}
export async function translateText(text: string, language: string) {
const response = await model.invoke([
new HumanMessage(`Translate to ${language}: ${text}`),
]);
return response.content as string;
}
Using Server Actions
Call actions from client forms and manage UI state around the async result.
// app/page.tsx
'use client';
import { useState } from 'react';
import { generateSummary } from './actions';
export default function Page() {
const [summary, setSummary] = useState('');
const [loading, setLoading] = useState(false);
async function handleSummarize(formData: FormData) {
setLoading(true);
const text = formData.get('text') as string;
const result = await generateSummary(text);
setSummary(result);
setLoading(false);
}
return (
<form action={handleSummarize}>
<textarea name="text" className="w-full p-2 border" />
<button type="submit" disabled={loading}>
{loading ? 'Summarizing...' : 'Summarize'}
</button>
{summary && <p>{summary}</p>}
</form>
);
}
RAG with Next.js
Combine a retriever with a prompt and model to answer questions from your document store.
// app/api/rag/route.ts
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { RunnableSequence, RunnablePassthrough } from "@langchain/core/runnables";
// Initialize once (consider caching in production)
let vectorStore: MemoryVectorStore | null = null;
async function getVectorStore() {
if (!vectorStore) {
// Load your documents here
vectorStore = await MemoryVectorStore.fromTexts(
["Your documents...", "More content..."],
[{}, {}],
new OpenAIEmbeddings()
);
}
return vectorStore;
}
export async function POST(req: Request) {
const { question } = await req.json();
const store = await getVectorStore();
const retriever = store.asRetriever({ k: 4 });
const model = new ChatOpenAI({ modelName: "gpt-4" });
const prompt = ChatPromptTemplate.fromTemplate(`
Answer based on context:
{context}
Question: {question}
`);
const chain = RunnableSequence.from([
{
context: retriever.pipe(docs => docs.map(d => d.pageContent).join("\n")),
question: new RunnablePassthrough(),
},
prompt,
model,
new StringOutputParser(),
]);
const answer = await chain.invoke(question);
return Response.json({ answer });
}
Project Structure
Organize API routes, shared LangChain utilities, and UI components in the App Router.
my-ai-app/
βββ app/
β βββ api/
β β βββ chat/
β β β βββ route.ts # Chat API
β β βββ rag/
β β βββ route.ts # RAG API
β βββ components/
β β βββ Chat.tsx # Chat UI
β βββ lib/
β β βββ langchain.ts # LangChain setup
β βββ actions.ts # Server Actions
β βββ page.tsx # Home page
β βββ layout.tsx
βββ .env.local # API keys
βββ package.json
π‘ Key Takeaways
- β’ Use Route Handlers (app/api) for LangChain endpoints
- β’ Vercel AI SDK simplifies streaming significantly
- β’ Server Actions work great for non-streaming operations
- β’ Edge runtime provides lower latency for global users
- β’ Keep API keys in .env.local, never expose to client
π Learn More
-
LangChain Next.js Guide β
Official guide for Next.js integration.
-
Vercel AI SDK + Next.js β
Best practices for AI in Next.js apps.