Integrating OpenAI GPT with Laravel: Complete Implementation Guide
Integrating OpenAI GPT with Laravel: Complete Implementation Guide
Laravel's elegant architecture makes it perfect for building AI-powered applications. Here's how to integrate OpenAI's GPT models into your Laravel projects.
Why Laravel + AI?
Laravel provides the perfect foundation for AI applications:
- Robust architecture for handling AI API calls
- Queue system for background AI processing
- Cache layer for optimizing API responses
- Eloquent ORM for managing AI-generated content
Project Setup
1. Installation
composer require openai-php/client
composer require guzzlehttp/guzzle
2. Configuration
Create a configuration file for OpenAI:
// config/openai.php
return [
'api_key' => env('OPENAI_API_KEY'),
'organization' => env('OPENAI_ORGANIZATION'),
'request_timeout' => 30,
'default_model' => 'gpt-4',
];
3. Service Provider
// app/Services/OpenAIService.php
namespace AppServices;
use OpenAILaravelFacadesOpenAI;
use IlluminateSupportFacadesCache;
use IlluminateSupportFacadesLog;
class OpenAIService
{
public function generateText(string $prompt, array $options = []): string
{
$cacheKey = 'openai_' . md5($prompt . serialize($options));
return Cache::remember($cacheKey, 3600, function () use ($prompt, $options) {
try {
$response = OpenAI::chat()->create([
'model' => $options['model'] ?? config('openai.default_model'),
'messages' => [
['role' => 'user', 'content' => $prompt]
],
'max_tokens' => $options['max_tokens'] ?? 1000,
'temperature' => $options['temperature'] ?? 0.7,
]);
return $response->choices[0]->message->content;
} catch (Exception $e) {
Log::error('OpenAI API Error: ' . $e->getMessage());
throw $e;
}
});
}
}
Use Case Implementations
1. Content Generation
// app/Http/Controllers/ContentController.php
namespace AppHttpControllers;
use AppServicesOpenAIService;
use AppModelsArticle;
use IlluminateHttpRequest;
class ContentController extends Controller
{
protected $openAI;
public function __construct(OpenAIService $openAI)
{
$this->openAI = $openAI;
}
public function generateArticle(Request $request)
{
$request->validate([
'topic' => 'required|string|max:255',
'style' => 'required|in:professional,casual,technical',
'length' => 'required|in:short,medium,long'
]);
$prompt = $this->buildContentPrompt(
$request->topic,
$request->style,
$request->length
);
$content = $this->openAI->generateText($prompt, [
'max_tokens' => $this->getTokenLimit($request->length),
'temperature' => 0.8
]);
$article = Article::create([
'title' => $request->topic,
'content' => $content,
'status' => 'draft',
'user_id' => auth()->id()
]);
return response()->json($article);
}
private function buildContentPrompt(string $topic, string $style, string $length): string
{
$styleInstructions = [
'professional' => 'Write in a professional, authoritative tone',
'casual' => 'Write in a friendly, conversational tone',
'technical' => 'Write with technical depth and precision'
];
$lengthInstructions = [
'short' => 'Write a concise article (300-500 words)',
'medium' => 'Write a comprehensive article (800-1200 words)',
'long' => 'Write an in-depth article (1500-2500 words)'
];
return sprintf(
"%s about '%s'. %s. Include relevant examples and actionable insights.",
$styleInstructions[$style],
$topic,
$lengthInstructions[$length]
);
}
}
2. Customer Support Chatbot
// app/Http/Controllers/ChatController.php
namespace AppHttpControllers;
use AppServicesOpenAIService;
use AppModelsConversation;
use IlluminateHttpRequest;
class ChatController extends Controller
{
protected $openAI;
public function __construct(OpenAIService $openAI)
{
$this->openAI = $openAI;
}
public function chat(Request $request)
{
$request->validate([
'message' => 'required|string',
'conversation_id' => 'nullable|exists:conversations,id'
]);
$conversation = $this->getOrCreateConversation($request->conversation_id);
// Add user message to conversation
$conversation->messages()->create([
'role' => 'user',
'content' => $request->message
]);
// Build context from conversation history
$context = $this->buildConversationContext($conversation);
$aiResponse = $this->openAI->generateText($context, [
'temperature' => 0.7,
'max_tokens' => 500
]);
// Store AI response
$conversation->messages()->create([
'role' => 'assistant',
'content' => $aiResponse
]);
return response()->json([
'response' => $aiResponse,
'conversation_id' => $conversation->id
]);
}
private function buildConversationContext(Conversation $conversation): string
{
$systemPrompt = "You are a helpful customer support assistant. ";
$systemPrompt .= "Be friendly, professional, and provide accurate information.";
$messages = $conversation->messages()
->latest()
->limit(10)
->get()
->reverse();
$context = $systemPrompt . "
Conversation:
";
foreach ($messages as $message) {
$context .= ucfirst($message->role) . ": " . $message->content . "
";
}
return $context;
}
}
3. Background Processing with Queues
// app/Jobs/GenerateContentJob.php
namespace AppJobs;
use AppServicesOpenAIService;
use AppModelsContentRequest;
use IlluminateBusQueueable;
use IlluminateContractsQueueShouldQueue;
use IlluminateFoundationBusDispatchable;
use IlluminateQueueInteractsWithQueue;
use IlluminateQueueSerializesModels;
class GenerateContentJob implements ShouldQueue
{
use Dispatchable, InteractsWithQueue, Queueable, SerializesModels;
protected $contentRequest;
public function __construct(ContentRequest $contentRequest)
{
$this->contentRequest = $contentRequest;
}
public function handle(OpenAIService $openAI)
{
try {
$this->contentRequest->update(['status' => 'processing']);
$content = $openAI->generateText(
$this->contentRequest->prompt,
$this->contentRequest->options
);
$this->contentRequest->update([
'status' => 'completed',
'generated_content' => $content,
'completed_at' => now()
]);
// Notify user via email or notification
// $this->contentRequest->user->notify(new ContentGeneratedNotification($this->contentRequest));
} catch (Exception $e) {
$this->contentRequest->update([
'status' => 'failed',
'error_message' => $e->getMessage()
]);
$this->fail($e);
}
}
}
Advanced Features
1. Token Management
// app/Services/TokenManagerService.php
namespace AppServices;
use AppModelsUser;
class TokenManagerService
{
public function getUserTokenUsage(User $user): int
{
return $user->ai_token_usage()
->whereMonth('created_at', now()->month)
->sum('tokens_used');
}
public function canUserMakeRequest(User $user, int $estimatedTokens): bool
{
$currentUsage = $this->getUserTokenUsage($user);
$monthlyLimit = $user->subscription_plan->token_limit;
return ($currentUsage + $estimatedTokens) <= $monthlyLimit;
}
public function recordTokenUsage(User $user, int $tokensUsed): void
{
$user->ai_token_usage()->create([
'tokens_used' => $tokensUsed,
'created_at' => now()
]);
}
}
2. Rate Limiting
// app/Http/Middleware/AIRateLimit.php
namespace AppHttpMiddleware;
use Closure;
use IlluminateHttpRequest;
use IlluminateSupportFacadesRateLimiter;
class AIRateLimit
{
public function handle(Request $request, Closure $next)
{
$key = 'ai-requests:' . $request->user()->id;
if (RateLimiter::tooManyAttempts($key, 10)) {
return response()->json([
'error' => 'Too many AI requests. Please try again later.'
], 429);
}
RateLimiter::hit($key, 3600); // 1 hour window
return $next($request);
}
}
Performance Optimization
1. Response Caching
// Cache frequently requested content
$cachedResponse = Cache::tags(['ai-content', 'user:' . $userId])
->remember($cacheKey, 86400, function () use ($prompt) {
return $this->openAI->generateText($prompt);
});
2. Streaming Responses
// For real-time chat interfaces
public function streamChat(Request $request)
{
return response()->stream(function () use ($request) {
$stream = OpenAI::chat()->createStreamed([
'model' => 'gpt-4',
'messages' => $request->messages,
]); foreach ($stream as $response) {
echo "data: " . json_encode($response) . "
";
ob_flush();
flush();
}
}, 200, [
'Content-Type' => 'text/event-stream',
'Cache-Control' => 'no-cache',
]);
}
Best Practices
1. Error Handling
- Implement proper exception handling
- Provide fallback responses
- Log errors for debugging
2. Security
- Validate and sanitize all inputs
- Implement proper authentication
- Monitor for abuse and unusual patterns
3. Cost Management
- Set usage limits per user
- Implement token counting
- Use caching to reduce API calls
Testing
// tests/Feature/OpenAIIntegrationTest.php
namespace TestsFeature;
use TestsTestCase;
use AppServicesOpenAIService;
use IlluminateFoundationTestingRefreshDatabase;
class OpenAIIntegrationTest extends TestCase
{
use RefreshDatabase;
public function test_can_generate_content()
{
$this->mock(OpenAIService::class, function ($mock) {
$mock->shouldReceive('generateText')
->once()
->andReturn('Generated content');
});
$response = $this->postJson('/api/generate-content', [
'topic' => 'Laravel Testing',
'style' => 'technical',
'length' => 'medium'
]);
$response->assertStatus(200)
->assertJsonStructure(['content']);
}
}
Conclusion
Integrating OpenAI with Laravel opens up endless possibilities for creating intelligent applications. The key is to implement proper error handling, caching, and rate limiting while maintaining good user experience.
Ready to add AI capabilities to your Laravel application? Let's discuss your specific requirements and build a custom AI integration that enhances your application's functionality.