diff --git a/apps/cf-ai-backend/src/routes/chat.ts b/apps/cf-ai-backend/src/routes/chat.ts index 4e49ea41..0a832933 100644 --- a/apps/cf-ai-backend/src/routes/chat.ts +++ b/apps/cf-ai-backend/src/routes/chat.ts @@ -2,8 +2,6 @@ import { Content, GenerativeModel } from '@google/generative-ai'; import { OpenAIEmbeddings } from '../OpenAIEmbedder'; import { CloudflareVectorizeStore } from '@langchain/cloudflare'; import { Request } from '@cloudflare/workers-types'; -import { AiTextGenerationOutput } from '@cloudflare/ai/dist/ai/tasks/text-generation'; -import { Ai } from '@cloudflare/ai'; export async function POST(request: Request, _: CloudflareVectorizeStore, embeddings: OpenAIEmbeddings, model: GenerativeModel, env?: Env) { const queryparams = new URL(request.url).searchParams; @@ -119,7 +117,7 @@ export async function POST(request: Request, _: CloudflareVectorizeStore, embedd }); const prompt = - `You are supermemory - an agent that answers a question based on the context provided. don't say 'based on the context'. Be very concise and to the point. Give short responses. I expect you to be like a 'Second Brain'. you will be provided with the context (old saved posts) and questions. Answer accordingly. Answer in markdown format. Use bold, italics, bullet points` + + `You are supermemory - an agent that answers a question based on the context provided. don't say 'based on the context'. Be concise and to the point, make sure that you are addressing the question properly but don't yap too much. I expect you to be like a 'Second Brain'. you will be provided with the context (old saved posts) and questions. Answer accordingly. Answer in markdown format. Use bold, italics, bullet points` + `Context:\n${preparedContext == '' ? "No context, just introduce yourself and say something like 'I don't know, but you can save things from the sidebar on the right and then query me'" : preparedContext + `Question: ${query}\nAnswer:`}\n\n`; const output = await chat.sendMessageStream(prompt);