Switch AI from OpenAI to Gemini 2.0 Flash (free, key exists)

All AI features now use Gemini 2.0 Flash via the existing API key.
Falls back to OpenAI if OPENAI_API_KEY is set instead.
Falls back to heuristics if neither key exists.

Gemini free tier: 15 RPM, 1M tokens/day, 1500 RPD
At PNPL's scale this is effectively unlimited and costs £0.

Changed:
- src/lib/ai.ts: chat() → tries Gemini first, OpenAI fallback
- src/app/api/automations/ai/route.ts: same dual-provider pattern
- docker-compose.yml: GEMINI_API_KEY added to app environment

All 11 AI features now work:
- Smart amount suggestions, message generation, fuzzy matching
- Column mapping, event parsing, impact stories, daily digest
- Nudge composer, donor classification, anomaly detection
- A/B variant generation, rewrites, auto-winner evaluation
This commit is contained in:
2026-03-05 00:56:44 +08:00
parent b25d8c453a
commit ea37d7d090
2 changed files with 88 additions and 18 deletions

View File

@@ -2,16 +2,43 @@ import { NextRequest, NextResponse } from "next/server"
import prisma from "@/lib/prisma" import prisma from "@/lib/prisma"
import { getUser } from "@/lib/session" import { getUser } from "@/lib/session"
const GEMINI_KEY = process.env.GEMINI_API_KEY
const OPENAI_KEY = process.env.OPENAI_API_KEY const OPENAI_KEY = process.env.OPENAI_API_KEY
const MODEL = "gpt-4o-mini" const HAS_AI = !!(GEMINI_KEY || OPENAI_KEY)
async function chat(messages: Array<{ role: string; content: string }>, maxTokens = 600): Promise<string> { async function chat(messages: Array<{ role: string; content: string }>, maxTokens = 600): Promise<string> {
if (!OPENAI_KEY) return "" if (!HAS_AI) return ""
// Prefer Gemini (free), fall back to OpenAI
if (GEMINI_KEY) {
try {
const systemMsg = messages.find(m => m.role === "system")?.content || ""
const contents = messages.filter(m => m.role !== "system").map(m => ({
role: m.role === "assistant" ? "model" : "user",
parts: [{ text: m.content }],
}))
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${GEMINI_KEY}`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
systemInstruction: systemMsg ? { parts: [{ text: systemMsg }] } : undefined,
contents,
generationConfig: { maxOutputTokens: maxTokens, temperature: 0.8 },
}),
}
)
const data = await res.json()
return data.candidates?.[0]?.content?.parts?.[0]?.text || ""
} catch { return "" }
}
try { try {
const res = await fetch("https://api.openai.com/v1/chat/completions", { const res = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST", method: "POST",
headers: { "Content-Type": "application/json", Authorization: `Bearer ${OPENAI_KEY}` }, headers: { "Content-Type": "application/json", Authorization: `Bearer ${OPENAI_KEY}` },
body: JSON.stringify({ model: MODEL, messages, max_tokens: maxTokens, temperature: 0.8 }), body: JSON.stringify({ model: "gpt-4o-mini", messages, max_tokens: maxTokens, temperature: 0.8 }),
}) })
const data = await res.json() const data = await res.json()
return data.choices?.[0]?.message?.content || "" return data.choices?.[0]?.message?.content || ""
@@ -288,7 +315,7 @@ Rewrite it following the instruction.`
// Generate new challenger // Generate new challenger
let newChallenger = false let newChallenger = false
if (OPENAI_KEY) { if (HAS_AI) {
try { try {
// Recursively call generate_variant // Recursively call generate_variant
const genRes = await fetch(new URL("/api/automations/ai", request.url), { const genRes = await fetch(new URL("/api/automations/ai", request.url), {

View File

@@ -1,10 +1,12 @@
/** /**
* AI module — uses OpenAI GPT-4o-mini (nano model, ~$0.15/1M input tokens) * AI module — uses Gemini 2.0 Flash (free tier: 15 RPM, 1M tokens/day)
* Falls back to OpenAI GPT-4o-mini if OPENAI_API_KEY is set instead
* Falls back to smart heuristics when no API key is set * Falls back to smart heuristics when no API key is set
*/ */
const GEMINI_KEY = process.env.GEMINI_API_KEY
const OPENAI_KEY = process.env.OPENAI_API_KEY const OPENAI_KEY = process.env.OPENAI_API_KEY
const MODEL = "gpt-4o-mini" const HAS_AI = !!(GEMINI_KEY || OPENAI_KEY)
interface ChatMessage { interface ChatMessage {
role: "system" | "user" | "assistant" role: "system" | "user" | "assistant"
@@ -12,8 +14,48 @@ interface ChatMessage {
} }
async function chat(messages: ChatMessage[], maxTokens = 300): Promise<string> { async function chat(messages: ChatMessage[], maxTokens = 300): Promise<string> {
if (!OPENAI_KEY) return "" if (!HAS_AI) return ""
// Prefer Gemini (free), fall back to OpenAI
if (GEMINI_KEY) return chatGemini(messages, maxTokens)
return chatOpenAI(messages, maxTokens)
}
async function chatGemini(messages: ChatMessage[], maxTokens: number): Promise<string> {
try {
// Gemini uses a different format: system instruction + contents
const systemMsg = messages.find(m => m.role === "system")?.content || ""
const contents = messages
.filter(m => m.role !== "system")
.map(m => ({
role: m.role === "assistant" ? "model" : "user",
parts: [{ text: m.content }],
}))
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${GEMINI_KEY}`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
systemInstruction: systemMsg ? { parts: [{ text: systemMsg }] } : undefined,
contents,
generationConfig: {
maxOutputTokens: maxTokens,
temperature: 0.7,
},
}),
}
)
const data = await res.json()
return data.candidates?.[0]?.content?.parts?.[0]?.text || ""
} catch (err) {
console.error("[AI/Gemini]", err)
return ""
}
}
async function chatOpenAI(messages: ChatMessage[], maxTokens: number): Promise<string> {
try { try {
const res = await fetch("https://api.openai.com/v1/chat/completions", { const res = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST", method: "POST",
@@ -21,11 +63,12 @@ async function chat(messages: ChatMessage[], maxTokens = 300): Promise<string> {
"Content-Type": "application/json", "Content-Type": "application/json",
Authorization: `Bearer ${OPENAI_KEY}`, Authorization: `Bearer ${OPENAI_KEY}`,
}, },
body: JSON.stringify({ model: MODEL, messages, max_tokens: maxTokens, temperature: 0.7 }), body: JSON.stringify({ model: "gpt-4o-mini", messages, max_tokens: maxTokens, temperature: 0.7 }),
}) })
const data = await res.json() const data = await res.json()
return data.choices?.[0]?.message?.content || "" return data.choices?.[0]?.message?.content || ""
} catch { } catch (err) {
console.error("[AI/OpenAI]", err)
return "" return ""
} }
} }
@@ -66,7 +109,7 @@ export async function suggestAmounts(context: {
// AI-generated nudge (or fallback) // AI-generated nudge (or fallback)
let nudge = "" let nudge = ""
if (OPENAI_KEY && context.pledgeCount && context.pledgeCount > 5) { if (HAS_AI && context.pledgeCount && context.pledgeCount > 5) {
nudge = await chat([ nudge = await chat([
{ {
role: "system", role: "system",
@@ -116,7 +159,7 @@ export async function generateMessage(type: "thank_you" | "reminder_gentle" | "r
let msg = templates[type] || templates.thank_you let msg = templates[type] || templates.thank_you
// Try AI-enhanced version for reminders // Try AI-enhanced version for reminders
if (OPENAI_KEY && (type === "reminder_gentle" || type === "reminder_urgent")) { if (HAS_AI && (type === "reminder_gentle" || type === "reminder_urgent")) {
const aiMsg = await chat([ const aiMsg = await chat([
{ {
role: "system", role: "system",
@@ -174,7 +217,7 @@ export async function smartMatch(bankDescription: string, candidates: Array<{ re
* Generate event description from a simple prompt * Generate event description from a simple prompt
*/ */
export async function generateEventDescription(prompt: string): Promise<string> { export async function generateEventDescription(prompt: string): Promise<string> {
if (!OPENAI_KEY) return "" if (!HAS_AI) return ""
return chat([ return chat([
{ {
@@ -194,7 +237,7 @@ export async function classifyDonorMessage(
// eslint-disable-next-line @typescript-eslint/no-unused-vars // eslint-disable-next-line @typescript-eslint/no-unused-vars
_fromPhone: string _fromPhone: string
): Promise<{ action: string; confidence: number; extractedInfo?: string } | null> { ): Promise<{ action: string; confidence: number; extractedInfo?: string } | null> {
if (!OPENAI_KEY) return null if (!HAS_AI) return null
const result = await chat([ const result = await chat([
{ {
@@ -235,7 +278,7 @@ export async function autoMapBankColumns(
referenceCol?: string referenceCol?: string
confidence: number confidence: number
} | null> { } | null> {
if (!OPENAI_KEY) return null if (!HAS_AI) return null
const result = await chat([ const result = await chat([
{ {
@@ -271,7 +314,7 @@ export async function parseEventFromPrompt(prompt: string): Promise<{
zakatEligible?: boolean zakatEligible?: boolean
tableCount?: number tableCount?: number
} | null> { } | null> {
if (!OPENAI_KEY) return null if (!HAS_AI) return null
const result = await chat([ const result = await chat([
{ {
@@ -301,7 +344,7 @@ export async function generateImpactMessage(context: {
impactUnit?: string // e.g. "£10 = 1 meal" impactUnit?: string // e.g. "£10 = 1 meal"
goalProgress?: number // 0-100 percentage goalProgress?: number // 0-100 percentage
}): Promise<string> { }): Promise<string> {
if (!OPENAI_KEY) { if (!HAS_AI) {
return `Your £${context.amount} pledge to ${context.eventName} makes a real difference. Ref: ${context.reference}` return `Your £${context.amount} pledge to ${context.eventName} makes a real difference. Ref: ${context.reference}`
} }
@@ -334,7 +377,7 @@ export async function generateDailyDigest(stats: {
}): Promise<string> { }): Promise<string> {
const collectionRate = stats.totalPledged > 0 ? Math.round((stats.totalCollected / stats.totalPledged) * 100) : 0 const collectionRate = stats.totalPledged > 0 ? Math.round((stats.totalCollected / stats.totalPledged) * 100) : 0
if (!OPENAI_KEY) { if (!HAS_AI) {
// Smart fallback without AI // Smart fallback without AI
let msg = `🤲 *Morning Update — ${stats.eventName || stats.orgName}*\n\n` let msg = `🤲 *Morning Update — ${stats.eventName || stats.orgName}*\n\n`
if (stats.newPledges > 0) msg += `*Yesterday:* ${stats.newPledges} new pledges (£${(stats.newPledgeAmount / 100).toFixed(0)})\n` if (stats.newPledges > 0) msg += `*Yesterday:* ${stats.newPledges} new pledges (£${(stats.newPledgeAmount / 100).toFixed(0)})\n`
@@ -379,7 +422,7 @@ export async function generateNudgeMessage(context: {
}): Promise<string> { }): Promise<string> {
const name = context.donorName?.split(" ")[0] || "there" const name = context.donorName?.split(" ")[0] || "there"
if (!OPENAI_KEY) { if (!HAS_AI) {
if (context.clickedIPaid) { if (context.clickedIPaid) {
return `Hi ${name}, you mentioned you'd paid your £${context.amount} pledge to ${context.eventName} — we haven't been able to match it yet. Could you double-check the reference was ${context.reference}? Thank you!` return `Hi ${name}, you mentioned you'd paid your £${context.amount} pledge to ${context.eventName} — we haven't been able to match it yet. Could you double-check the reference was ${context.reference}? Thank you!`
} }