|
@@ -1,84 +1,110 @@
|
|
|
-import { Injectable } from '@nestjs/common';
|
|
|
|
|
|
|
+import { Injectable, Inject, forwardRef } from '@nestjs/common';
|
|
|
import { ChatOpenAI } from '@langchain/openai';
|
|
import { ChatOpenAI } from '@langchain/openai';
|
|
|
|
|
+import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
|
|
|
+import { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
|
|
|
+import { StateGraph, START, END } from "@langchain/langgraph";
|
|
|
|
|
+import { BaseMessage, HumanMessage } from "@langchain/core/messages";
|
|
|
import { FFBVectorService } from './ffb-vector.service';
|
|
import { FFBVectorService } from './ffb-vector.service';
|
|
|
-import { z } from "zod";
|
|
|
|
|
-import { StateGraph, START, END, Annotation } from "@langchain/langgraph";
|
|
|
|
|
-import { BaseMessage, HumanMessage, AIMessage } from "@langchain/core/messages";
|
|
|
|
|
-import { forwardRef, Inject } from '@nestjs/common';
|
|
|
|
|
import { FFBGateway } from '../ffb.gateway';
|
|
import { FFBGateway } from '../ffb.gateway';
|
|
|
-import { ThoughtPayload } from '../ffb-production.schema';
|
|
|
|
|
-// State Definition using Annotation
|
|
|
|
|
-const AgentState = Annotation.Root({
|
|
|
|
|
- messages: Annotation<BaseMessage[]>({
|
|
|
|
|
- reducer: (x, y) => x.concat(y),
|
|
|
|
|
- default: () => [],
|
|
|
|
|
- }),
|
|
|
|
|
- activeIntent: Annotation<string>({
|
|
|
|
|
- reducer: (x, y) => y ?? x ?? "General",
|
|
|
|
|
- default: () => "General",
|
|
|
|
|
- }),
|
|
|
|
|
- entityStore: Annotation<Record<string, any>>({
|
|
|
|
|
- reducer: (x, y) => ({ ...x, ...y }),
|
|
|
|
|
- default: () => ({}),
|
|
|
|
|
- }),
|
|
|
|
|
- actionPayload: Annotation<any>({
|
|
|
|
|
- reducer: (x, y) => y ?? x,
|
|
|
|
|
- default: () => null,
|
|
|
|
|
- }),
|
|
|
|
|
- finalResponse: Annotation<string>({
|
|
|
|
|
- reducer: (x, y) => y ?? x,
|
|
|
|
|
- }),
|
|
|
|
|
- socketId: Annotation<string>({
|
|
|
|
|
- reducer: (x, y) => y ?? x,
|
|
|
|
|
- default: () => "default",
|
|
|
|
|
- })
|
|
|
|
|
-});
|
|
|
|
|
|
|
+
|
|
|
|
|
+// Config & Utils
|
|
|
|
|
+import { AgentState } from './config/agent-state';
|
|
|
|
|
+import { SessionManager } from './utils/session-manager';
|
|
|
|
|
+
|
|
|
|
|
+// Nodes
|
|
|
|
|
+import { entryNode } from './nodes/entry.node';
|
|
|
|
|
+import { routerNode } from './nodes/router.node';
|
|
|
|
|
+import { guidanceNode } from './nodes/guidance.node';
|
|
|
|
|
+import { metaNode } from './nodes/meta.node';
|
|
|
|
|
+import { refusalNode } from './nodes/refusal.node';
|
|
|
|
|
+import { vectorSearchNode } from './nodes/vector-search.node';
|
|
|
|
|
+import { aggregationNode } from './nodes/aggregation.node';
|
|
|
|
|
+import { synthesisNode } from './nodes/synthesis.node';
|
|
|
|
|
|
|
|
@Injectable()
|
|
@Injectable()
|
|
|
export class FFBLangChainService {
|
|
export class FFBLangChainService {
|
|
|
- private model: ChatOpenAI;
|
|
|
|
|
|
|
+ private openaiModel: BaseChatModel;
|
|
|
|
|
+ private geminiModel: BaseChatModel;
|
|
|
private graph: any;
|
|
private graph: any;
|
|
|
- private sessions: Map<string, BaseMessage[]> = new Map();
|
|
|
|
|
|
|
+ private sessionManager: SessionManager;
|
|
|
|
|
|
|
|
constructor(
|
|
constructor(
|
|
|
private readonly vectorService: FFBVectorService,
|
|
private readonly vectorService: FFBVectorService,
|
|
|
@Inject(forwardRef(() => FFBGateway))
|
|
@Inject(forwardRef(() => FFBGateway))
|
|
|
private readonly gateway: FFBGateway
|
|
private readonly gateway: FFBGateway
|
|
|
) {
|
|
) {
|
|
|
- this.model = new ChatOpenAI({
|
|
|
|
|
- modelName: 'gpt-4o',
|
|
|
|
|
|
|
+ this.openaiModel = new ChatOpenAI({
|
|
|
|
|
+ modelName: 'gpt-4o-mini',
|
|
|
apiKey: process.env.OPENAI_API_KEY,
|
|
apiKey: process.env.OPENAI_API_KEY,
|
|
|
temperature: 0
|
|
temperature: 0
|
|
|
});
|
|
});
|
|
|
|
|
|
|
|
|
|
+ this.geminiModel = new ChatGoogleGenerativeAI({
|
|
|
|
|
+ model: 'gemini-2.5-flash',
|
|
|
|
|
+ apiKey: process.env.GOOGLE_API_KEY,
|
|
|
|
|
+ temperature: 0
|
|
|
|
|
+ });
|
|
|
|
|
+
|
|
|
|
|
+ this.sessionManager = new SessionManager();
|
|
|
this.initGraph();
|
|
this.initGraph();
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ private getModel(socketId: string): BaseChatModel {
|
|
|
|
|
+ const provider = this.sessionManager.getModelProvider(socketId);
|
|
|
|
|
+ return provider === 'gemini' ? this.geminiModel : this.openaiModel;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ switchModel(socketId: string, provider: 'openai' | 'gemini') {
|
|
|
|
|
+ this.sessionManager.setModelProvider(socketId, provider);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ getCurrentModel(socketId: string) {
|
|
|
|
|
+ const provider = this.sessionManager.getModelProvider(socketId);
|
|
|
|
|
+ return {
|
|
|
|
|
+ provider: provider,
|
|
|
|
|
+ modelName: provider === 'gemini' ? 'gemini-2.5-flash' : 'gpt-4o-mini'
|
|
|
|
|
+ };
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
private initGraph() {
|
|
private initGraph() {
|
|
|
const graph = new StateGraph(AgentState)
|
|
const graph = new StateGraph(AgentState)
|
|
|
- .addNode("router_node", this.routerNode.bind(this))
|
|
|
|
|
- .addNode("clarifier_node", this.clarifierNode.bind(this))
|
|
|
|
|
- .addNode("general_node", this.generalNode.bind(this))
|
|
|
|
|
- .addNode("vector_search_node", this.vectorSearchNode.bind(this))
|
|
|
|
|
- .addNode("aggregation_node", this.aggregationNode.bind(this))
|
|
|
|
|
- .addNode("synthesis_node", this.synthesisNode.bind(this));
|
|
|
|
|
|
|
+ .addNode("entry_node", (state) => entryNode(state, this.getModel(state.socketId), this.gateway))
|
|
|
|
|
+ .addNode("guidance_node", (state) => guidanceNode(state))
|
|
|
|
|
+ .addNode("meta_node", (state) => metaNode(state, this.getModel(state.socketId)))
|
|
|
|
|
+ .addNode("refusal_node", (state) => refusalNode(state))
|
|
|
|
|
+ .addNode("router_node", (state) => routerNode(state, this.getModel(state.socketId), this.gateway))
|
|
|
|
|
+ .addNode("vector_search_node", (state) => vectorSearchNode(state, this.vectorService, this.gateway))
|
|
|
|
|
+ .addNode("aggregation_node", (state) => aggregationNode(state, this.getModel(state.socketId), this.vectorService, this.gateway))
|
|
|
|
|
+ .addNode("synthesis_node", (state) => synthesisNode(state, this.getModel(state.socketId), this.gateway));
|
|
|
|
|
|
|
|
// Add Edges
|
|
// Add Edges
|
|
|
- graph.addEdge(START, "router_node");
|
|
|
|
|
|
|
+ graph.addEdge(START, "entry_node");
|
|
|
|
|
+
|
|
|
|
|
+ graph.addConditionalEdges(
|
|
|
|
|
+ "entry_node",
|
|
|
|
|
+ (state) => state.entryCategory,
|
|
|
|
|
+ {
|
|
|
|
|
+ "InScope-Actionable": "router_node",
|
|
|
|
|
+ "InScope-NeedsGuidance": "guidance_node",
|
|
|
|
|
+ "InScope-Meta": "meta_node",
|
|
|
|
|
+ "OutOfScope": "refusal_node"
|
|
|
|
|
+ }
|
|
|
|
|
+ );
|
|
|
|
|
|
|
|
graph.addConditionalEdges(
|
|
graph.addConditionalEdges(
|
|
|
"router_node",
|
|
"router_node",
|
|
|
(state) => state.activeIntent,
|
|
(state) => state.activeIntent,
|
|
|
{
|
|
{
|
|
|
- Clarify: "clarifier_node",
|
|
|
|
|
- General: "general_node",
|
|
|
|
|
Semantic: "vector_search_node",
|
|
Semantic: "vector_search_node",
|
|
|
Aggregate: "aggregation_node"
|
|
Aggregate: "aggregation_node"
|
|
|
}
|
|
}
|
|
|
);
|
|
);
|
|
|
|
|
|
|
|
- graph.addEdge("clarifier_node", END);
|
|
|
|
|
- graph.addEdge("general_node", END);
|
|
|
|
|
|
|
+ graph.addEdge("guidance_node", END);
|
|
|
|
|
+ graph.addEdge("meta_node", END);
|
|
|
|
|
+ graph.addEdge("refusal_node", END);
|
|
|
graph.addEdge("vector_search_node", "synthesis_node");
|
|
graph.addEdge("vector_search_node", "synthesis_node");
|
|
|
graph.addEdge("aggregation_node", "synthesis_node");
|
|
graph.addEdge("aggregation_node", "synthesis_node");
|
|
|
graph.addEdge("synthesis_node", END);
|
|
graph.addEdge("synthesis_node", END);
|
|
@@ -86,244 +112,49 @@ export class FFBLangChainService {
|
|
|
this.graph = graph.compile();
|
|
this.graph = graph.compile();
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- // --- NODE IMPLEMENTATIONS ---
|
|
|
|
|
-
|
|
|
|
|
- private async routerNode(state: typeof AgentState.State): Promise<Partial<typeof AgentState.State>> {
|
|
|
|
|
- const lastMessage = state.messages[state.messages.length - 1].content as string;
|
|
|
|
|
-
|
|
|
|
|
- // Change this in your routerNode:
|
|
|
|
|
- const routerSchema = z.object({
|
|
|
|
|
- intent: z.enum(['General', 'Clarify', 'Semantic', 'Aggregate']),
|
|
|
|
|
- entities: z.object({
|
|
|
|
|
- // Use .nullable() instead of .optional() for OpenAI Strict mode
|
|
|
|
|
- // Or ensure they are always provided by the LLM
|
|
|
|
|
- site: z.string().nullable().describe("The site name mentioned, or null"),
|
|
|
|
|
- date: z.string().nullable().describe("The date mentioned, or null"),
|
|
|
|
|
- }), // Remove .optional() here; the object itself must be returned
|
|
|
|
|
- reasoning: z.string()
|
|
|
|
|
- });
|
|
|
|
|
-
|
|
|
|
|
- let payload: ThoughtPayload = {
|
|
|
|
|
- node: 'router_node',
|
|
|
|
|
- status: 'processing',
|
|
|
|
|
- message: 'Analyzing user intent...',
|
|
|
|
|
- input: lastMessage
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- this.gateway.emitThought(state.socketId, payload);
|
|
|
|
|
-
|
|
|
|
|
- const routerPrompt = `
|
|
|
|
|
-You are an Application Router for a production database.
|
|
|
|
|
-Analyze the user input and route to: [General, Clarify, Semantic, Aggregate].
|
|
|
|
|
-
|
|
|
|
|
-INTENT DEFINITIONS:
|
|
|
|
|
-- Aggregate: Use if the user asks for numbers, totals, averages, or counts (e.g., "How much...", "Total weight").
|
|
|
|
|
-- Semantic: Use if the user asks for specific records, qualitative descriptions, issues, "what happened", or "find info about" (e.g., "Show me records for Site A", "What were the notes on block X?").
|
|
|
|
|
-- Clarify: Use ONLY if the user names an entity (like a Site) but provides NO verb or question.
|
|
|
|
|
-- General: Use for greetings or off-topic chat.
|
|
|
|
|
-
|
|
|
|
|
-STRICT RULES:
|
|
|
|
|
-1. If "Site" is mentioned alone (e.g., "Site A"), route to 'Clarify'.
|
|
|
|
|
-2. If the user asks for data or "what happened" regarding a site, route to 'Semantic'.
|
|
|
|
|
-3. Do NOT route to 'Clarify' if there is a clear question.
|
|
|
|
|
-
|
|
|
|
|
-User Input: "${lastMessage}"
|
|
|
|
|
-`;
|
|
|
|
|
-
|
|
|
|
|
- const structuredLlm = this.model.withStructuredOutput(routerSchema);
|
|
|
|
|
- const result = await structuredLlm.invoke(routerPrompt);
|
|
|
|
|
-
|
|
|
|
|
- // Merge extracted entities with existing store
|
|
|
|
|
- this.gateway.emitThought(state.socketId, {
|
|
|
|
|
- node: 'router_node',
|
|
|
|
|
- status: 'completed',
|
|
|
|
|
- result: result
|
|
|
|
|
- });
|
|
|
|
|
-
|
|
|
|
|
- return {
|
|
|
|
|
- activeIntent: result.intent as any,
|
|
|
|
|
- entityStore: result.entities || {},
|
|
|
|
|
- socketId: state.socketId
|
|
|
|
|
- };
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- private async clarifierNode(state: typeof AgentState.State): Promise<Partial<typeof AgentState.State>> {
|
|
|
|
|
- const prompt = `User mentioned ${JSON.stringify(state.entityStore)}. Ask them to clarify what they want to know (e.g., total production, specific issues, etc.).`;
|
|
|
|
|
-
|
|
|
|
|
- let payload: ThoughtPayload = {
|
|
|
|
|
- node: 'clarifier_node',
|
|
|
|
|
- status: 'processing',
|
|
|
|
|
- message: 'Asking for clarification',
|
|
|
|
|
- context: state.entityStore
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- this.gateway.emitThought(state.socketId, payload);
|
|
|
|
|
-
|
|
|
|
|
- const response = await this.model.invoke(prompt);
|
|
|
|
|
- return {
|
|
|
|
|
- messages: [response]
|
|
|
|
|
- };
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- private async generalNode(state: typeof AgentState.State): Promise<Partial<typeof AgentState.State>> {
|
|
|
|
|
- const lastMessage = state.messages[state.messages.length - 1];
|
|
|
|
|
- const response = await this.model.invoke([
|
|
|
|
|
- new HumanMessage("You are a helpful assistant. Reply to: " + lastMessage.content)
|
|
|
|
|
- ]);
|
|
|
|
|
- return {
|
|
|
|
|
- messages: [response]
|
|
|
|
|
- };
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- private async vectorSearchNode(state: typeof AgentState.State): Promise<Partial<typeof AgentState.State>> {
|
|
|
|
|
- const lastMessage = state.messages[state.messages.length - 1].content as string;
|
|
|
|
|
- const filter: Record<string, any> = {};
|
|
|
|
|
-
|
|
|
|
|
- if (state.entityStore && state.entityStore.site) {
|
|
|
|
|
- filter.site = state.entityStore.site;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- const results = await this.vectorService.vectorSearch(lastMessage, 5, filter);
|
|
|
|
|
-
|
|
|
|
|
- let payload: ThoughtPayload = {
|
|
|
|
|
- node: 'vector_search_node',
|
|
|
|
|
- status: 'completed',
|
|
|
|
|
- query: lastMessage,
|
|
|
|
|
- filter: filter,
|
|
|
|
|
- resultsCount: results.length
|
|
|
|
|
- }
|
|
|
|
|
- this.gateway.emitThought(state.socketId, payload);
|
|
|
|
|
-
|
|
|
|
|
- return {
|
|
|
|
|
- actionPayload: { type: 'search', query: lastMessage, results }
|
|
|
|
|
- };
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- private async aggregationNode(state: typeof AgentState.State): Promise<Partial<typeof AgentState.State>> {
|
|
|
|
|
- const lastMessage = state.messages[state.messages.length - 1].content as string;
|
|
|
|
|
-
|
|
|
|
|
- const pipelineSchema = z.object({
|
|
|
|
|
- matchStage: z.object({
|
|
|
|
|
- site: z.string().nullable(),
|
|
|
|
|
- startDate: z.string().nullable(),
|
|
|
|
|
- endDate: z.string().nullable(),
|
|
|
|
|
- }),
|
|
|
|
|
- aggregationType: z.enum(["sum", "avg", "count"]),
|
|
|
|
|
- fieldToAggregate: z.enum(["quantity", "weight"])
|
|
|
|
|
- });
|
|
|
|
|
-
|
|
|
|
|
- const structuredLlm = this.model.withStructuredOutput(pipelineSchema);
|
|
|
|
|
- const params = await structuredLlm.invoke(`Extract aggregation parameters for: "${lastMessage}". Context: ${JSON.stringify(state.entityStore)}`);
|
|
|
|
|
-
|
|
|
|
|
- const pipeline: any[] = [];
|
|
|
|
|
- const match: any = {};
|
|
|
|
|
-
|
|
|
|
|
- // Check for null instead of undefined
|
|
|
|
|
- if (params.matchStage.site !== null) {
|
|
|
|
|
- match.site = params.matchStage.site;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params.matchStage.startDate !== null || params.matchStage.endDate !== null) {
|
|
|
|
|
- match.productionDate = {};
|
|
|
|
|
- if (params.matchStage.startDate !== null) {
|
|
|
|
|
- match.productionDate.$gte = new Date(params.matchStage.startDate);
|
|
|
|
|
- }
|
|
|
|
|
- if (params.matchStage.endDate !== null) {
|
|
|
|
|
- match.productionDate.$lte = new Date(params.matchStage.endDate);
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (Object.keys(match).length > 0) {
|
|
|
|
|
- pipeline.push({ $match: match });
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- const group: any = { _id: null };
|
|
|
|
|
- const operator = `$${params.aggregationType}`;
|
|
|
|
|
- group.totalValue = { [operator]: `$${params.fieldToAggregate}` };
|
|
|
|
|
-
|
|
|
|
|
- pipeline.push({ $group: group });
|
|
|
|
|
-
|
|
|
|
|
- const results = await this.vectorService.aggregate(pipeline);
|
|
|
|
|
-
|
|
|
|
|
- let payload: ThoughtPayload = {
|
|
|
|
|
- node: `aggregation_node`,
|
|
|
|
|
- status: 'completed',
|
|
|
|
|
- pipeline: pipeline,
|
|
|
|
|
- results: results
|
|
|
|
|
- }
|
|
|
|
|
- this.gateway.emitThought(state.socketId, {
|
|
|
|
|
- node: 'aggregation_node',
|
|
|
|
|
- status: 'completed',
|
|
|
|
|
- pipeline: pipeline,
|
|
|
|
|
- results: results
|
|
|
|
|
- });
|
|
|
|
|
-
|
|
|
|
|
- return {
|
|
|
|
|
- actionPayload: { type: 'aggregate', pipeline, results }
|
|
|
|
|
- };
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- private async synthesisNode(state: typeof AgentState.State): Promise<Partial<typeof AgentState.State>> {
|
|
|
|
|
- const lastMessage = state.messages[state.messages.length - 1].content as string;
|
|
|
|
|
- const payload = state.actionPayload;
|
|
|
|
|
-
|
|
|
|
|
- const prompt = `
|
|
|
|
|
- User Question: "${lastMessage}"
|
|
|
|
|
- Data Context: ${JSON.stringify(payload)}
|
|
|
|
|
-
|
|
|
|
|
- Synthesize a natural language answer based STRICTLY on the Data Context.
|
|
|
|
|
- Cite the source (e.g., "Based on aggregation results...").
|
|
|
|
|
- `;
|
|
|
|
|
-
|
|
|
|
|
- let thoughtPayload: ThoughtPayload = {
|
|
|
|
|
- node: 'synthesis_node',
|
|
|
|
|
- status: 'processing',
|
|
|
|
|
- message: 'Synthesizing final response',
|
|
|
|
|
- dataContextLength: JSON.stringify(payload).length
|
|
|
|
|
- }
|
|
|
|
|
- this.gateway.emitThought(state.socketId, thoughtPayload);
|
|
|
|
|
-
|
|
|
|
|
- const response = await this.model.invoke(prompt);
|
|
|
|
|
- return {
|
|
|
|
|
- messages: [response]
|
|
|
|
|
- };
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
// --- MAIN ENTRY POINT ---
|
|
// --- MAIN ENTRY POINT ---
|
|
|
|
|
|
|
|
createSession(socketId: string) {
|
|
createSession(socketId: string) {
|
|
|
- this.sessions.set(socketId, []);
|
|
|
|
|
- console.log(`Session created for ${socketId}`);
|
|
|
|
|
|
|
+ this.sessionManager.createSession(socketId);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
deleteSession(socketId: string) {
|
|
deleteSession(socketId: string) {
|
|
|
- this.sessions.delete(socketId);
|
|
|
|
|
- console.log(`Session deleted for ${socketId}`);
|
|
|
|
|
|
|
+ this.sessionManager.deleteSession(socketId);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
async chat(socketId: string, message: string): Promise<string> {
|
|
async chat(socketId: string, message: string): Promise<string> {
|
|
|
try {
|
|
try {
|
|
|
- // Get history or init empty
|
|
|
|
|
- const history = this.sessions.get(socketId) || [];
|
|
|
|
|
|
|
+ // Get session & filter valid history
|
|
|
|
|
+ const session = this.sessionManager.getSession(socketId);
|
|
|
|
|
+ const validHistory = this.sessionManager.getValidHistory(socketId);
|
|
|
|
|
+ const userMsg = new HumanMessage(message);
|
|
|
|
|
|
|
|
const inputs = {
|
|
const inputs = {
|
|
|
- messages: [...history, new HumanMessage(message)],
|
|
|
|
|
- entityStore: {},
|
|
|
|
|
|
|
+ messages: [...validHistory, userMsg],
|
|
|
|
|
+ entityStore: session.entityStore,
|
|
|
socketId: socketId
|
|
socketId: socketId
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
const result = await this.graph.invoke(inputs);
|
|
const result = await this.graph.invoke(inputs);
|
|
|
|
|
|
|
|
const allMessages = result.messages as BaseMessage[];
|
|
const allMessages = result.messages as BaseMessage[];
|
|
|
|
|
+ const updatedEntityStore = result.entityStore as Record<string, any>;
|
|
|
|
|
+ const classification = result.entryCategory as string;
|
|
|
|
|
|
|
|
- // Update history (keep all messages for context window? Or truncate?)
|
|
|
|
|
- // For now, keep all. Memory optimization might be needed later.
|
|
|
|
|
- this.sessions.set(socketId, allMessages);
|
|
|
|
|
-
|
|
|
|
|
|
|
+ // Get the AI response (last message)
|
|
|
const agentMessages = allMessages.filter((m: BaseMessage) => m._getType() === 'ai');
|
|
const agentMessages = allMessages.filter((m: BaseMessage) => m._getType() === 'ai');
|
|
|
const lastResponse = agentMessages[agentMessages.length - 1];
|
|
const lastResponse = agentMessages[agentMessages.length - 1];
|
|
|
|
|
|
|
|
- return lastResponse.content as string;
|
|
|
|
|
|
|
+ // Update Session Storage
|
|
|
|
|
+ this.sessionManager.updateSession(
|
|
|
|
|
+ socketId,
|
|
|
|
|
+ userMsg,
|
|
|
|
|
+ lastResponse,
|
|
|
|
|
+ classification,
|
|
|
|
|
+ updatedEntityStore
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ return lastResponse?.content as string || "I'm sorry, I encountered an error.";
|
|
|
|
|
|
|
|
} catch (error) {
|
|
} catch (error) {
|
|
|
console.error('Error calling LangGraph:', error);
|
|
console.error('Error calling LangGraph:', error);
|