feat: add AI role prompt configuration and caching for guilds

This commit is contained in:
2025-10-04 01:31:36 -05:00
parent 312ccc7b2a
commit 241c6a5d90
4 changed files with 207 additions and 14 deletions

View File

@@ -23,6 +23,7 @@ model Guild {
name String
prefix String @default("!")
staff Json?
aiRolePrompt String?
// Relaciones
alliances Alliance[]

View File

@@ -1,6 +1,6 @@
import logger from "../../../core/lib/logger";
import { CommandMessage } from "../../../core/types/commands";
import { TextChannel, DMChannel, ThreadChannel, EmbedBuilder } from "discord.js";
import { TextChannel, DMChannel, ThreadChannel, EmbedBuilder, ChannelType } from "discord.js";
import { aiService } from "../../../core/services/AIService";
/**
@@ -54,6 +54,65 @@ function smartChunkText(text: string, maxLength: number): string[] {
return chunks;
}
function buildMessageMeta(message: any): string {
try {
const parts: string[] = [];
const inGuild = !!message.guild;
// Canal / hilo
if (message.channel) {
if (message.channel.type === ChannelType.GuildText) {
parts.push(`Canal: #${message.channel.name}`);
} else if (message.channel.isThread?.()) {
const parent = message.channel.parent as TextChannel | null;
const threadName = message.channel.name;
const parentName = parent?.name ? ` en #${parent.name}` : '';
parts.push(`Hilo: ${threadName}${parentName}`);
} else if (message.channel.type === ChannelType.DM) {
parts.push('DM');
}
}
// Menciones
const userMentions = message.mentions?.users ? Array.from(message.mentions.users.values()) : [];
const roleMentions = message.mentions?.roles ? Array.from(message.mentions.roles.values()) : [];
const channelMentions = message.mentions?.channels ? Array.from(message.mentions.channels.values()) : [];
if (userMentions.length) {
parts.push(`Menciones usuario: ${userMentions.slice(0, 5).map((u: any) => u.username ?? u.tag ?? u.id).join(', ')}`);
}
if (roleMentions.length) {
parts.push(`Menciones rol: ${roleMentions.slice(0, 5).map((r: any) => r.name ?? r.id).join(', ')}`);
}
if (channelMentions.length) {
parts.push(`Menciones canal: ${channelMentions.slice(0, 3).map((c: any) => c.name ?? c.id).join(', ')}`);
}
// ¿Mención al bot?
const botId = message.client?.user?.id;
if (botId && message.mentions?.users?.has?.(botId)) {
parts.push('El mensaje menciona al bot');
}
// Respuesta/Referencia
if (message.reference?.messageId) {
parts.push('Es una respuesta a otro mensaje');
}
// Adjuntos
const attachments = message.attachments ? Array.from(message.attachments.values()) : [];
if (attachments.length) {
const info = attachments.slice(0, 2).map((a: any) => a.name || a.contentType || 'adjunto').join(', ');
parts.push(`Adjuntos: ${info}`);
}
const metaRaw = parts.join(' | ');
return metaRaw.length > 800 ? metaRaw.slice(0, 800) : metaRaw;
} catch {
return '';
}
}
export const command: CommandMessage = {
name: 'ai',
type: "message",
@@ -92,6 +151,9 @@ export const command: CommandMessage = {
return;
}
// Construir metadatos del mensaje para mejor contexto
const meta = buildMessageMeta(message);
// Indicador de escritura mejorado
const typingInterval = setInterval(() => {
channel.sendTyping().catch(() => {});
@@ -105,7 +167,8 @@ export const command: CommandMessage = {
userId,
prompt,
guildId,
priority
priority,
{ meta }
);
// Crear embed de respuesta mejorado

View File

@@ -2,6 +2,7 @@ import logger from "../../../core/lib/logger";
import { CommandMessage } from "../../../core/types/commands";
import { ComponentType } from "discord-api-types/v10";
import { hasManageGuildOrStaff } from "../../../core/lib/permissions";
import { aiService } from "../../../core/services/AIService";
function toStringArray(input: unknown): string[] {
if (!Array.isArray(input)) return [];
@@ -32,6 +33,8 @@ export const command: CommandMessage = {
const staffDisplay = staffRoles.length
? staffRoles.map((id) => `<@&${id}>`).join(', ')
: 'Sin staff configurado';
const aiRolePrompt = server?.aiRolePrompt ?? null;
const aiPreview = aiRolePrompt ? (aiRolePrompt.length > 80 ? aiRolePrompt.slice(0, 77) + '…' : aiRolePrompt) : 'No configurado';
// Panel de configuración usando DisplayComponents
const settingsPanel = {
@@ -64,6 +67,18 @@ export const command: CommandMessage = {
label: "Configurar"
}
},
{ type: 14, divider: false },
{
type: 9,
components: [ { type: 10, content: `**AI Role Prompt:** ${aiPreview}` } ],
accessory: {
type: 2,
style: 2,
emoji: { name: "🧠" },
custom_id: "open_ai_role_modal",
label: "Configurar"
}
},
{ type: 14, divider: false }
]
};
@@ -164,6 +179,7 @@ export const command: CommandMessage = {
try {
const modalInteraction = await interaction.awaitModalSubmit({ time: 300000 });
const selected = modalInteraction.components.getSelectedRoles('staff_roles');
//@ts-ignore
const roleIds: string[] = selected ? Array.from(selected.keys()).slice(0, 3) : [];
await client.prisma.guild.upsert({
@@ -191,12 +207,65 @@ export const command: CommandMessage = {
}
}
if (interaction.customId === "open_ai_role_modal") {
const currentServer = await client.prisma.guild.findFirst({ where: { id: message.guild!.id } });
const currentAiPrompt = currentServer?.aiRolePrompt ?? '';
const aiModal = {
title: "🧠 Configurar AI Role Prompt",
custom_id: "ai_role_prompt_modal",
components: [
{ type: 1, components: [ { type: 4, custom_id: "ai_role_prompt_input", label: "Prompt de rol (opcional)", style: 2, placeholder: "Ej: Eres un asistente amistoso del servidor, responde en español, evita spoilers...", required: false, max_length: 1500, value: currentAiPrompt.slice(0, 1500) } ] }
]
};
await interaction.showModal(aiModal);
try {
const modalInteraction = await interaction.awaitModalSubmit({
time: 300000,
filter: (m: any) => m.customId === 'ai_role_prompt_modal' && m.user.id === message.author.id
});
const newPromptRaw = modalInteraction.fields.getTextInputValue('ai_role_prompt_input') ?? '';
const newPrompt = newPromptRaw.trim();
const toSave: string | null = newPrompt.length > 0 ? newPrompt : null;
await client.prisma.guild.upsert({
where: { id: message.guild!.id },
create: { id: message.guild!.id, name: message.guild!.name, aiRolePrompt: toSave },
update: { aiRolePrompt: toSave, name: message.guild!.name }
});
// Invalida el cache del servicio para reflejar cambios al instante
aiService.invalidateGuildConfig(message.guild!.id);
const preview = toSave ? (toSave.length > 200 ? toSave.slice(0, 197) + '…' : toSave) : 'Prompt eliminado (sin configuración)';
const successPanel = {
type: 17,
accent_color: 3066993,
components: [
{ type: 10, content: "### ✅ **AI Role Prompt Actualizado**" },
{ type: 14, spacing: 2, divider: true },
{ type: 10, content: `**Nuevo valor:**\n${preview}` }
]
};
const backRow = { type: 1, components: [ { type: 2, style: 2, label: '↩️ Volver a Configuración', custom_id: 'back_to_settings' } ] };
await modalInteraction.update({ components: [successPanel, backRow] });
} catch (e) {
// timeout o cancelado
}
}
// Manejar botones adicionales
if (interaction.customId === "back_to_settings") {
const updatedServer = await client.prisma.guild.findFirst({ where: { id: message.guild!.id } });
const newCurrentPrefix = updatedServer?.prefix || "!";
const staffRoles2: string[] = toStringArray(updatedServer?.staff);
const staffDisplay2 = staffRoles2.length ? staffRoles2.map((id) => `<@&${id}>`).join(', ') : 'Sin staff configurado';
const aiRolePrompt2 = updatedServer?.aiRolePrompt ?? null;
const aiPreview2 = aiRolePrompt2 ? (aiRolePrompt2.length > 80 ? aiRolePrompt2.slice(0, 77) + '…' : aiRolePrompt2) : 'No configurado';
const updatedSettingsPanel = {
type: 17,
@@ -208,6 +277,8 @@ export const command: CommandMessage = {
{ type: 9, components: [ { type: 10, content: `**Prefix:** \`${newCurrentPrefix}\`` } ], accessory: { type: 2, style: 2, emoji: { name: "⚙️" }, custom_id: "open_prefix_modal", label: "Cambiar" } },
{ type: 14, divider: false },
{ type: 9, components: [ { type: 10, content: `**Staff (roles):** ${staffDisplay2}` } ], accessory: { type: 2, style: 2, emoji: { name: "🛡️" }, custom_id: "open_staff_modal", label: "Configurar" } },
{ type: 14, divider: false },
{ type: 9, components: [ { type: 10, content: `**AI Role Prompt:** ${aiPreview2}` } ], accessory: { type: 2, style: 2, emoji: { name: "🧠" }, custom_id: "open_ai_role_modal", label: "Configurar" } },
{ type: 14, divider: false }
]
};
@@ -220,6 +291,8 @@ export const command: CommandMessage = {
const updatedServer = await client.prisma.guild.findFirst({ where: { id: message.guild!.id } });
const staffRoles3: string[] = toStringArray(updatedServer?.staff);
const staffDisplay3 = staffRoles3.length ? staffRoles3.map((id) => `<@&${id}>`).join(', ') : 'Sin staff configurado';
const aiRolePrompt3 = updatedServer?.aiRolePrompt ?? null;
const aiPreview3 = aiRolePrompt3 ? (aiRolePrompt3.length > 80 ? aiRolePrompt3.slice(0, 77) + '…' : aiRolePrompt3) : 'No configurado';
const originalPanel = {
type: 17,
@@ -231,6 +304,8 @@ export const command: CommandMessage = {
{ type: 9, components: [ { type: 10, content: `**Prefix:** \`${currentPrefix}\`` } ], accessory: { type: 2, style: 2, emoji: { name: "⚙️" }, custom_id: "open_prefix_modal", label: "Cambiar" } },
{ type: 14, divider: false },
{ type: 9, components: [ { type: 10, content: `**Staff (roles):** ${staffDisplay3}` } ], accessory: { type: 2, style: 2, emoji: { name: "🛡️" }, custom_id: "open_staff_modal", label: "Configurar" } },
{ type: 14, divider: false },
{ type: 9, components: [ { type: 10, content: `**AI Role Prompt:** ${aiPreview3}` } ], accessory: { type: 2, style: 2, emoji: { name: "🧠" }, custom_id: "open_ai_role_modal", label: "Configurar" } },
{ type: 14, divider: false }
]
};

View File

@@ -1,6 +1,7 @@
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
import logger from "../lib/logger";
import { Collection } from "discord.js";
import { prisma } from "../database/prisma";
// Tipos mejorados para mejor type safety
interface ConversationContext {
@@ -25,6 +26,8 @@ interface AIRequest {
timestamp: number;
resolve: (value: string) => void;
reject: (error: Error) => void;
aiRolePrompt?: string;
meta?: string;
}
// Utility function para manejar errores de forma type-safe
@@ -63,7 +66,9 @@ export class AIService {
private processing = false;
private userCooldowns = new Collection<string, number>();
private rateLimitTracker = new Collection<string, { count: number; resetTime: number }>();
// Cache de configuración por guild
private guildPromptCache = new Collection<string, { prompt: string | null; fetchedAt: number }>();
// Configuración mejorada y escalable
private readonly config = {
maxInputTokens: 1048576, // 1M tokens Gemini 2.5 Flash
@@ -78,7 +83,8 @@ export class AIService {
rateLimitWindow: 60000, // 1 minuto
rateLimitMax: 20, // 20 requests por minuto por usuario
cleanupInterval: 5 * 60 * 1000, // Limpiar cada 5 minutos
};
guildConfigTTL: 5 * 60 * 1000, // 5 minutos de cache para prompts de guild
} as const;
constructor() {
const apiKey = process.env.GOOGLE_AI_API_KEY;
@@ -91,6 +97,34 @@ export class AIService {
this.startQueueProcessor();
}
/**
* Obtener prompt de rol de IA por guild con cache
*/
public async getGuildAiPrompt(guildId: string): Promise<string | null> {
try {
const cached = this.guildPromptCache.get(guildId);
const now = Date.now();
if (cached && (now - cached.fetchedAt) < this.config.guildConfigTTL) {
return cached.prompt;
}
const guild = await prisma.guild.findUnique({ where: { id: guildId }, select: { aiRolePrompt: true } });
//@ts-ignore
const prompt = guild?.aiRolePrompt ?? null;
this.guildPromptCache.set(guildId, { prompt, fetchedAt: now });
return prompt;
} catch (e) {
logger.warn(`No se pudo cargar aiRolePrompt para guild ${guildId}: ${getErrorMessage(e)}`);
return null;
}
}
/**
* Invalidar cache de configuración de un guild (llamar tras guardar cambios)
*/
public invalidateGuildConfig(guildId: string): void {
this.guildPromptCache.delete(guildId);
}
/**
* Procesa una request de IA de forma asíncrona y controlada
*/
@@ -98,7 +132,8 @@ export class AIService {
userId: string,
prompt: string,
guildId?: string,
priority: 'low' | 'normal' | 'high' = 'normal'
priority: 'low' | 'normal' | 'high' = 'normal',
options?: { aiRolePrompt?: string; meta?: string }
): Promise<string> {
// Validaciones exhaustivas
if (!prompt?.trim()) {
@@ -132,7 +167,9 @@ export class AIService {
priority,
timestamp: Date.now(),
resolve,
reject
reject,
aiRolePrompt: options?.aiRolePrompt,
meta: options?.meta,
};
// Insertar según prioridad
@@ -192,11 +229,7 @@ export class AIService {
private async processRequest(request: AIRequest): Promise<void> {
try {
const { userId, prompt, guildId } = request;
// Obtener o crear contexto de conversación
const context = this.getOrCreateContext(userId, guildId);
// Verificar si es request de imagen
const isImageRequest = this.detectImageRequest(prompt);
if (isImageRequest && context.imageRequests >= this.config.maxImageRequests) {
const error = new Error(`Has alcanzado el límite de ${this.config.maxImageRequests} solicitudes de imagen. La conversación se ha reiniciado.`);
@@ -211,9 +244,21 @@ export class AIService {
logger.info(`Conversación reseteada para usuario ${userId} por límite de tokens`);
}
// Obtener prompt del sistema (desde opciones o DB)
let effectiveAiRolePrompt = request.aiRolePrompt;
if (effectiveAiRolePrompt === undefined && guildId) {
effectiveAiRolePrompt = (await this.getGuildAiPrompt(guildId)) ?? undefined;
}
// Construir prompt del sistema optimizado
const systemPrompt = this.buildSystemPrompt(prompt, context, isImageRequest);
const systemPrompt = this.buildSystemPrompt(
prompt,
context,
isImageRequest,
effectiveAiRolePrompt,
request.meta
);
// Usar la API correcta de Google Generative AI
const model = this.genAI.getGenerativeModel({
model: "gemini-2.5-flash-preview-09-2025",
@@ -268,13 +313,22 @@ export class AIService {
/**
* Construcción optimizada del prompt del sistema
*/
private buildSystemPrompt(userPrompt: string, context: ConversationContext, isImageRequest: boolean): string {
private buildSystemPrompt(
userPrompt: string,
context: ConversationContext,
isImageRequest: boolean,
aiRolePrompt?: string,
meta?: string
): string {
const recentMessages = context.messages
.slice(-4) // Solo los últimos 4 mensajes
.map(msg => `${msg.role === 'user' ? 'Usuario' : 'Asistente'}: ${msg.content}`)
.join('\n');
return `Eres una hermana mayor kawaii y cariñosa que habla por Discord. Responde de manera natural, útil y concisa.
const roleBlock = aiRolePrompt && aiRolePrompt.trim() ? `\n## Rol del sistema (servidor):\n${aiRolePrompt.trim().slice(0, 1200)}\n` : '';
const metaBlock = meta && meta.trim() ? `\n## Contexto del mensaje:\n${meta.trim().slice(0, 800)}\n` : '';
return `Eres una hermana mayor kawaii y cariñosa que habla por Discord. Responde de manera natural, útil y concisa.${roleBlock}${metaBlock}
## Reglas Discord:
- USA **markdown de Discord**: **negrita**, *cursiva*, \`código\`, \`\`\`bloques\`\`\`