diff --git a/.cursorrules b/.cursorrules index dee792c..6cf0188 100644 --- a/.cursorrules +++ b/.cursorrules @@ -244,3 +244,5 @@ Ensure all required dependencies are properly installed and typed: - Implement proper error handling - Add response validation - Support function execution tracking +- Add message history support +- Handle message reconstruction diff --git a/README.md b/README.md index 144d1e5..7c4a78a 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,8 @@ A Next.js application that uses a large language model to control a computer thr > - ✅ Base architecture > - ✅ Model selection > - ✅ Model tracking +> - ✅ Message history +> - 🔳 Context management > - 🔳 Function calling > - ⬜ Streaming support > - ⬜ Computer use tooling diff --git a/src/app/api/llm/route.ts b/src/app/api/llm/route.ts index e62110e..4e126c9 100644 --- a/src/app/api/llm/route.ts +++ b/src/app/api/llm/route.ts @@ -1,4 +1,5 @@ import { LLMService } from '@/services/llm.service'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; import { NextRequest, NextResponse } from 'next/server'; export async function POST(req: NextRequest) { @@ -9,8 +10,30 @@ export async function POST(req: NextRequest) { return NextResponse.json({ error: 'Model ID is required' }, { status: 400 }); } + // Reconstruct Langchain message instances + const history = options?.history + ?.map((msg: any) => { + if (msg.type === 'constructor') { + switch (msg.id[2]) { + case 'HumanMessage': + return new HumanMessage(msg.kwargs); + case 'AIMessage': + return new AIMessage(msg.kwargs); + case 'SystemMessage': + return new SystemMessage(msg.kwargs); + default: + return null; + } + } + return null; + }) + .filter(Boolean); + const llmService = LLMService.getInstance(); - const response = await llmService.sendMessage(message, modelId, options); + const response = await llmService.sendMessage(message, modelId, { + ...options, + history, + }); return NextResponse.json(response); } catch (error) { diff --git a/src/components/chat/ChatComponent.tsx b/src/components/chat/ChatComponent.tsx index 8086bba..fba2df4 100644 --- a/src/components/chat/ChatComponent.tsx +++ b/src/components/chat/ChatComponent.tsx @@ -6,9 +6,10 @@ import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; import { ScrollArea } from '@/components/ui/scroll-area'; import { useChatMessages } from '@/hooks/useChatMessages'; import { useDockerHandlers } from '@/hooks/useDockerHandlers'; -import { AVAILABLE_MODELS } from '@/lib/llm/types'; +import { AVAILABLE_MODELS, convertToLangchainMessage } from '@/lib/llm/types'; import { cn } from '@/lib/utils'; import { LLMApiService } from '@/services/llm-api.service'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; import { Settings as SettingsIcon } from 'lucide-react'; import { useCallback, useEffect, useRef, useState } from 'react'; import ChatCopyButton from './ChatCopyButton'; @@ -80,15 +81,19 @@ const ChatComponent: React.FC = ({ const handleSendMessage = async () => { if (!inputMessage.trim()) return; - // Find the selected model info const selectedModelInfo = AVAILABLE_MODELS.find((m) => m.id === selectedModel); - const userMessageId = addChatMessage('user', inputMessage); setInputMessage(''); try { + // Convert and filter out log messages and nulls + const history = chatMessages + .map(convertToLangchainMessage) + .filter((msg): msg is HumanMessage | AIMessage | SystemMessage => msg !== null); + const response = await llmApiService.sendMessage(inputMessage, selectedModel, { stream: false, + history, }); addChatMessage('assistant', response.content, undefined, undefined, selectedModelInfo); } catch (error) { diff --git a/src/lib/llm/provider.ts b/src/lib/llm/provider.ts index 6d6cc2d..db2271e 100644 --- a/src/lib/llm/provider.ts +++ b/src/lib/llm/provider.ts @@ -1,13 +1,14 @@ import { ChatAnthropic } from '@langchain/anthropic'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { HumanMessage } from '@langchain/core/messages'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; import { ChatOpenAI } from '@langchain/openai'; -import { FunctionDefinition, LLMConfig, LLMResponse } from './types'; +import { FunctionDefinition, LLMConfig, LLMRequestOptions, LLMResponse } from './types'; -interface GenerateOptions { +export interface GenerateOptions { functions?: string[]; stream?: boolean; maxTokens?: number; + history?: Array; } export class LLMProvider { @@ -46,13 +47,14 @@ export class LLMProvider { this.functions.set(definition.name, definition); } - public async generateResponse(prompt: string, options?: GenerateOptions): Promise { + public async generateResponse(prompt: string, options?: LLMRequestOptions): Promise { try { - const response = await this.model.invoke([ - new HumanMessage({ - content: prompt, - }), - ]); + const messages = [ + ...(Array.isArray(options?.history) ? options.history : []), + new HumanMessage({ content: prompt }), + ]; + + const response = await this.model.invoke(messages); const content = typeof response.content === 'string' ? response.content : JSON.stringify(response.content); @@ -66,6 +68,7 @@ export class LLMProvider { }, }; } catch (error) { + console.error('Provider error:', error); throw new Error( `Failed to generate response: ${error instanceof Error ? error.message : 'Unknown error'}` ); diff --git a/src/lib/llm/types.ts b/src/lib/llm/types.ts index f975b46..1f5bdad 100644 --- a/src/lib/llm/types.ts +++ b/src/lib/llm/types.ts @@ -1,3 +1,6 @@ +import { ChatMessageData } from '@/components/chat/ChatMessage'; +import { AIMessage, BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; + // Core LLM types export type LLMProvider = 'openai' | 'anthropic' | 'local'; @@ -146,3 +149,60 @@ export const AVAILABLE_MODELS_SORTED = AVAILABLE_MODELS.sort((a, b) => { // Then by context window size (larger first) return b.contextWindow - a.contextWindow; }); + +// Chat Memory Types +export interface ChatMemory { + messages: BaseMessage[]; // Changed from ChatMessage[] to BaseMessage[] + returnMessages: boolean; + maxTokens?: number; +} + +export interface ChatMessageHistory { + addMessage(message: BaseMessage): Promise; + getMessages(): Promise; + clear(): Promise; +} + +export interface LLMRequestOptions { + stream?: boolean; + functions?: string[]; + history?: Array; + maxTokens?: number; +} + +// Convert our message types to Langchain message types +export function convertToLangchainMessage( + message: ChatMessageData +): HumanMessage | AIMessage | SystemMessage | null { + // Skip log messages + if (message.type === 'log') { + return null; + } + + switch (message.type) { + case 'assistant': + return new AIMessage({ content: message.content }); + case 'system': + return new SystemMessage({ content: message.content }); + case 'user': + return new HumanMessage({ content: message.content }); + default: + return null; + } +} + +// Helper type for message roles +export type MessageRole = 'human' | 'assistant' | 'system'; + +// Helper function to create messages with proper typing +export function createMessage(content: string, role: MessageRole): BaseMessage { + switch (role) { + case 'assistant': + return new AIMessage(content); + case 'system': + return new SystemMessage(content); + case 'human': + default: + return new HumanMessage({ content }); + } +} diff --git a/src/services/llm-api.service.ts b/src/services/llm-api.service.ts index a456706..d12a2cf 100644 --- a/src/services/llm-api.service.ts +++ b/src/services/llm-api.service.ts @@ -1,5 +1,6 @@ 'use client'; +import { GenerateOptions } from '@/lib/llm/provider'; import { LLMResponse } from '@/lib/llm/types'; export class LLMApiService { @@ -15,10 +16,7 @@ export class LLMApiService { public async sendMessage( message: string, modelId: string, - options?: { - stream?: boolean; - functions?: string[]; - } + options?: GenerateOptions ): Promise { const response = await fetch('/api/llm', { method: 'POST', @@ -26,7 +24,14 @@ export class LLMApiService { 'Content-Type': 'application/json', 'x-api-key': process.env.NEXT_PUBLIC_API_KEY || '', // Ensure this is set }, - body: JSON.stringify({ message, modelId, options }), + body: JSON.stringify({ + message, + modelId, + options: { + ...options, + history: options?.history || [], + }, + }), }); if (!response.ok) { diff --git a/src/services/llm.service.ts b/src/services/llm.service.ts index ae4a28b..b4ed30a 100644 --- a/src/services/llm.service.ts +++ b/src/services/llm.service.ts @@ -1,6 +1,12 @@ import { LLMProvider } from '@/lib/llm/provider'; import { FunctionRegistry } from '@/lib/llm/registry'; -import { AVAILABLE_MODELS, FunctionDefinition, LLMConfig } from '@/lib/llm/types'; +import { + AVAILABLE_MODELS, + FunctionDefinition, + LLMConfig, + LLMRequestOptions, +} from '@/lib/llm/types'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; export class LLMService { private static instance: LLMService; @@ -31,7 +37,7 @@ export class LLMService { model: model.id, apiKey: this.getApiKey(model.provider), temperature: 0.7, - maxTokens: model.maxOutputTokens, // Use the model-specific output token limit + maxTokens: model.maxOutputTokens, }; this.providers.set(modelId, new LLMProvider(config)); @@ -51,24 +57,23 @@ export class LLMService { return key; } - public async sendMessage( - message: string, - modelId: string, - options?: { - stream?: boolean; - functions?: string[]; - maxTokens?: number; - } - ) { + public async sendMessage(message: string, modelId: string, options?: LLMRequestOptions) { try { const model = AVAILABLE_MODELS.find((m) => m.id === modelId); if (!model) { throw new Error(`Model ${modelId} not found`); } + // Ensure history contains valid Langchain message types + const history = options?.history?.filter( + (msg) => + msg instanceof HumanMessage || msg instanceof AIMessage || msg instanceof SystemMessage + ); + const provider = this.getProvider(modelId); return await provider.generateResponse(message, { ...options, + history, maxTokens: model.maxOutputTokens, }); } catch (error) { diff --git a/src/types/langchain.d.ts b/src/types/langchain.d.ts deleted file mode 100644 index 4af3e44..0000000 --- a/src/types/langchain.d.ts +++ /dev/null @@ -1,36 +0,0 @@ -declare module '@langchain/anthropic' { - export class ChatAnthropic { - constructor(config: { - modelName: string; - anthropicApiKey: string; - temperature?: number; - maxTokens?: number; - }); - invoke(messages: any[]): Promise; - } -} - -declare module '@langchain/openai' { - export class ChatOpenAI { - constructor(config: { - modelName: string; - openAIApiKey: string; - temperature?: number; - maxTokens?: number; - }); - invoke(messages: any[]): Promise; - } -} - -declare module '@langchain/core/language_models/chat_models' { - export class BaseChatModel { - invoke(messages: any[]): Promise; - } -} - -declare module '@langchain/core/messages' { - export class HumanMessage { - constructor(config: { content: string }); - content: string; - } -}