AI & ML•February 2, 2026
AI-Powered Development: Building Smarter Apps with LLMs
SN
Shefayet Nayon14 min read
preview_render.png
The AI Revolution in Web Development
AI is no longer optional—it's becoming a core feature in modern applications. Let's build AI-powered features that provide real value.
Setting Up Your AI Stack
# Install essential packages
npm install openai langchain @pinecone-database/pinecone
npm install @langchain/openai aiBuilding a Smart Chatbot
// app/api/chat/route.ts
import { OpenAI } from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
export const runtime = 'edge';
export async function POST(req: Request) {
const { messages } = await req.json();
const response = await openai.chat.completions.create({
model: 'gpt-4-turbo-preview',
stream: true,
messages: [
{
role: 'system',
content: 'You are a helpful assistant for a web development blog.',
},
...messages,
],
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
}// components/ChatBot.tsx
'use client';
import { useChat } from 'ai/react';
export default function ChatBot() {
const { messages, input, handleInputChange, handleSubmit } = useChat({
api: '/api/chat',
});
return (
<div className="chat-container">
<div className="messages">
{messages.map((message) => (
<div key={message.id} className={`message ${message.role}`}>
<p>{message.content}</p>
</div>
))}
</div>
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
placeholder="Ask me anything..."
/>
<button type="submit">Send</button>
</form>
</div>
);
}Semantic Search with Vector Databases
// lib/embeddings.ts
import { OpenAIEmbeddings } from '@langchain/openai';
import { Pinecone } from '@pinecone-database/pinecone';
const embeddings = new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
});
const pinecone = new Pinecone({
apiKey: process.env.PINECONE_API_KEY!,
});
const index = pinecone.index('blog-posts');
export async function indexDocument(text: string, metadata: any) {
// Generate embedding
const embedding = await embeddings.embedQuery(text);
// Store in Pinecone
await index.upsert([
{
id: metadata.id,
values: embedding,
metadata,
},
]);
}
export async function semanticSearch(query: string, topK = 5) {
// Generate query embedding
const queryEmbedding = await embeddings.embedQuery(query);
// Search similar documents
const results = await index.query({
vector: queryEmbedding,
topK,
includeMetadata: true,
});
return results.matches;
}AI-Powered Content Generation
// app/api/generate/route.ts
import { OpenAI } from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
export async function POST(req: Request) {
const { prompt, type } = await req.json();
const systemPrompts = {
blog: 'You are a professional tech blogger. Write engaging, SEO-optimized content.',
code: 'You are an expert programmer. Write clean, well-documented code.',
summary: 'You are a skilled editor. Create concise, accurate summaries.',
};
const completion = await openai.chat.completions.create({
model: 'gpt-4-turbo-preview',
messages: [
{
role: 'system',
content: systemPrompts[type as keyof typeof systemPrompts],
},
{
role: 'user',
content: prompt,
},
],
temperature: 0.7,
max_tokens: 2000,
});
return Response.json({
content: completion.choices[0].message.content,
});
}Smart Code Completion
// components/CodeEditor.tsx
'use client';
import { useState } from 'react';
import { useCompletion } from 'ai/react';
export default function CodeEditor() {
const [code, setCode] = useState('');
const { complete, completion, isLoading } = useCompletion({
api: '/api/complete',
});
const handleComplete = async () => {
await complete(code);
};
return (
<div className="editor">
<textarea
value={code}
onChange={(e) => setCode(e.target.value)}
placeholder="Write your code here..."
/>
<button onClick={handleComplete} disabled={isLoading}>
{isLoading ? 'Generating...' : 'Complete Code'}
</button>
{completion && (
<div className="suggestion">
<h3>AI Suggestion:</h3>
<pre>{completion}</pre>
</div>
)}
</div>
);
}RAG (Retrieval-Augmented Generation)
// lib/rag.ts
import { ChatOpenAI } from '@langchain/openai';
import { PromptTemplate } from '@langchain/core/prompts';
import { semanticSearch } from './embeddings';
const model = new ChatOpenAI({
modelName: 'gpt-4-turbo-preview',
temperature: 0.7,
});
export async function ragQuery(question: string) {
// 1. Retrieve relevant documents
const relevantDocs = await semanticSearch(question, 3);
// 2. Create context from retrieved documents
const context = relevantDocs
.map((doc) => doc.metadata.content)
.join('\n\n');
// 3. Generate answer using context
const prompt = PromptTemplate.fromTemplate(`
Answer the question based on the following context:
Context:
{context}
Question: {question}
Answer:
`);
const chain = prompt.pipe(model);
const response = await chain.invoke({
context,
question,
});
return response.content;
}Image Generation Integration
// app/api/generate-image/route.ts
import { OpenAI } from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
export async function POST(req: Request) {
const { prompt } = await req.json();
const image = await openai.images.generate({
model: 'dall-e-3',
prompt,
n: 1,
size: '1024x1024',
quality: 'hd',
});
return Response.json({
imageUrl: image.data[0].url,
});
}Cost Optimization Strategies
// lib/cache.ts
import { Redis } from '@upstash/redis';
const redis = new Redis({
url: process.env.UPSTASH_REDIS_URL!,
token: process.env.UPSTASH_REDIS_TOKEN!,
});
export async function cachedCompletion(
prompt: string,
generateFn: () => Promise<string>
) {
// Check cache first
const cached = await redis.get(`completion:${prompt}`);
if (cached) {
return cached as string;
}
// Generate if not cached
const result = await generateFn();
// Cache for 24 hours
await redis.setex(`completion:${prompt}`, 86400, result);
return result;
}Rate Limiting
// middleware.ts
import { Ratelimit } from '@upstash/ratelimit';
import { Redis } from '@upstash/redis';
const ratelimit = new Ratelimit({
redis: Redis.fromEnv(),
limiter: Ratelimit.slidingWindow(10, '1 m'), // 10 requests per minute
});
export async function middleware(req: Request) {
const ip = req.headers.get('x-forwarded-for') ?? 'anonymous';
const { success, limit, remaining } = await ratelimit.limit(ip);
if (!success) {
return new Response('Rate limit exceeded', { status: 429 });
}
return Response.json({ limit, remaining });
}Best Practices
- Always validate AI outputs - Never trust generated content blindly
- Implement proper error handling - APIs can fail
- Use streaming for better UX - Show progress to users
- Cache aggressively - Reduce API costs
- Monitor usage - Track costs and performance
- Implement rate limiting - Prevent abuse
Security Considerations
// Never expose API keys to the client
// ❌ Bad
const openai = new OpenAI({
apiKey: 'sk-...' // Hardcoded key
});
// ✅ Good - Use environment variables
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
// Validate and sanitize user inputs
function sanitizeInput(input: string): string {
return input
.trim()
.slice(0, 1000) // Limit length
.replace(/[<>]/g, ''); // Remove potential XSS
}Conclusion
AI-powered development is transforming how we build applications. By integrating LLMs, vector databases, and smart caching, you can create intelligent features that provide real value to users.
Start small, experiment often, and always prioritize user privacy and security. The future of development is AI-augmented, and it's here now.