diff --git a/.dockerignore b/.dockerignore index 6d68aeaf..809ba837 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,24 +1,24 @@ -**/.classpath -**/.dockerignore -**/.env -**/.git -**/.gitignore -**/.project -**/.settings -**/.toolstarget -**/.vs -**/.vscode -**/*.*proj.user -**/*.dbmdl -**/*.jfm -**/charts -**/docker-compose* -**/compose* -**/Dockerfile* -**/node_modules -**/npm-debug.log -**/obj -**/secrets.dev.yaml -**/values.dev.yaml -LICENSE -README.md +**/.classpath +**/.dockerignore +**/.env +**/.git +**/.gitignore +**/.project +**/.settings +**/.toolstarget +**/.vs +**/.vscode +**/*.*proj.user +**/*.dbmdl +**/*.jfm +**/charts +**/docker-compose* +**/compose* +**/Dockerfile* +**/node_modules +**/npm-debug.log +**/obj +**/secrets.dev.yaml +**/values.dev.yaml +LICENSE +README.md diff --git a/.env.example b/.env.example index 5a9a62a8..2ba4fede 100644 --- a/.env.example +++ b/.env.example @@ -1,24 +1,66 @@ -# DATABASE -DATABASE_URL= +# --------------- Providers ----------------- -# NEXTAUTH -NEXTAUTH_URL= -NEXTAUTH_SECRET= +## Amazon (not supported so far) +NEXT_PUBLIC_ACCESS_AWS= +AWS_ACCESS_KEY= +AWS_SECRET_KEY= +AWS_REGION= -EMAIL_HOST= -EMAIL_PORT= -EMAIL_USERNAME= -EMAIL_PASSWORD= -EMAIL_FROM= +## Anthropic +NEXT_PUBLIC_ACCESS_ANTHROPIC= +ANTHROPIC_API_KEY= -GITHUB_CLIENT_ID= -GITHUB_CLIENT_SECRET= +## Azure (not supported so far) +NEXT_PUBLIC_ACCESS_AZURE= +AZURE_OPENAI_API_KEY= +AZURE_OPENAI_ENDPOINT= +AZURE_OPENAI_DEPLOY_INSTANCE_NAME= -GOOGLE_CLIENT_ID= -GOOGLE_CLIENT_SECRET= +## Cohere +NEXT_PUBLIC_ACCESS_COHERE= +COHERE_API_KEY= -# PROVIDERS +## Fireworks +NEXT_PUBLIC_ACCESS_FIREWORKS= +FIREWORKS_API_KEY= + +## Google +NEXT_PUBLIC_ACCESS_GOOGLE= +GOOGLE_API_KEY= + +## Groq +NEXT_PUBLIC_ACCESS_GROQ= +GROQ_API_KEY= + +## Hugging Face +NEXT_PUBLIC_ACCESS_HUGGINGFACE= +HUGGINGFACE_API_KEY= + +## Mistral +NEXT_PUBLIC_ACCESS_MISTRAL= +MISTRAL_API_KEY= ## OpenAI +NEXT_PUBLIC_ACCESS_OPENAI= OPENAI_API_KEY= OPENAI_API_ENDPOINT= + +## Perplexity +NEXT_PUBLIC_ACCESS_PERPLEXITY= +PERPLEXITY_API_KEY= +PERPLEXITY_ENDPOINT= + +# -------------- Search Engines -------------- + +## Google +NEXT_PUBLIC_ACCESS_GOOGLE_SEARCH= +GOOGLE_SEARCH_API_KEY= +GOOGLE_SEARCH_ENGINE_ID= + +## Tavily +NEXT_PUBLIC_ACCESS_TAVILY_SEARCH= +TAVILY_SEARCH_API_KEY= + +## You +NEXT_PUBLIC_ACCESS_YOU_SEARCH= +YOU_SEARCH_API_KEY= diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 00000000..48055c95 --- /dev/null +++ b/.eslintignore @@ -0,0 +1 @@ +components/ui/**.tsx diff --git a/.eslintrc.json b/.eslintrc.json index ddb81d15..be48ee06 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -1,17 +1,82 @@ { - "root": true, - "extends": [ - "next/core-web-vitals", - "plugin:tailwindcss/recommended" + "root": true, + "extends": [ + "next", + "next/core-web-vitals", + "plugin:tailwindcss/recommended" + ], + "plugins": [ + "react", + "simple-import-sort", + "unused-imports" + ], + "parserOptions": { + "sourceType": "module", + "ecmaVersion": "latest" + }, + "rules": { + "simple-import-sort/imports": "error", + "simple-import-sort/exports": "error", + "unused-imports/no-unused-imports": "error", + "unused-imports/no-unused-vars": [ + "warn", + { + "vars": "all", + "varsIgnorePattern": "^_", + "args": "after-used", + "argsIgnorePattern": "^_" + } ], - "overrides": [ - { - "files": [ - "*.ts", - "*.tsx", - "*.js" - ], - "parser": "@typescript-eslint/parser" - } - ] + "no-console": "warn", + "react/no-unescaped-entities": "off" + }, + "overrides": [ + { + "files": [ + "*.ts", + "*.tsx", + "*.js" + ], + "parser": "@typescript-eslint/parser" + }, + { + "files": [ + "*.js", + "*.jsx", + "*.ts", + "*.tsx" + ], + "rules": { + "simple-import-sort/imports": [ + "error", + { + "groups": [ + [ + "^react", + "^@?\\w" + ], + [ + "^(@|components)(/.*|$)" + ], + [ + "^\\u0000" + ], + [ + "^\\.\\.(?!/?$)", + "^\\.\\./?$" + ], + [ + "^\\./(?=.*/)(?!/?$)", + "^\\.(?!/?$)", + "^\\./?$" + ], + [ + "^.+\\.?(css)$" + ] + ] + } + ] + } + } + ] } \ No newline at end of file diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 2ac163e2..979822c0 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -53,4 +53,4 @@ jobs: tags: | okisdev/chatchat:${{ steps.extract_tag.outputs.tag }} okisdev/chatchat:latest - platforms: linux/amd64,linux/arm64 + platforms: linux/amd64,linux/arm64 \ No newline at end of file diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml deleted file mode 100644 index d45ccdb0..00000000 --- a/.github/workflows/sync.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: ChatChat Upstream Sync - -permissions: - contents: write - -on: - schedule: - - cron: '0 0 * * *' # every day at 00:00 UTC - workflow_dispatch: - -jobs: - sync_latest_from_chatchat_upstream: - name: Sync latest commits from ChatChat upstream repo - runs-on: ubuntu-latest - if: ${{ github.event.repository.fork }} - - steps: - # Step 1: run a standard checkout action - - name: Checkout target repo - uses: actions/checkout@v4 - - # Step 2: run the sync action - - name: Sync upstream changes - id: sync - uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 - with: - upstream_sync_repo: okisdev/ChatChat - upstream_sync_branch: main - target_sync_branch: main - target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set - - # Set test_mode true to run tests instead of the true action!! - test_mode: false - - - name: Sync check - if: failure() - run: | - echo "::error::由于权限不足,导致同步失败(这是预期的行为),请前往仓库首页手动执行[Sync fork]。" - echo "::error::Due to insufficient permissions, synchronization failed (as expected). Please go to the repository homepage and manually perform [Sync fork]." - exit 1 diff --git a/.gitignore b/.gitignore index 549388c9..a84107af 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ /node_modules /.pnp .pnp.js +.yarn/install-state.gz # testing /coverage @@ -34,10 +35,3 @@ yarn-error.log* # typescript *.tsbuildinfo next-env.d.ts - -# vscode -.vscode - -# next-pwa -public/sw.js -public/workbox-*.js diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..e9d872ba --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,27 @@ +{ + "cSpell.words": [ + "buildx", + "DOCKERHUB", + "fastapi", + "Groq", + "huggingface", + "langchain", + "langsmith", + "Lightbox", + "lucide", + "markdownit", + "mistralai", + "mixtral", + "onest", + "rehype", + "sonner", + "Tavily", + "tippyjs" + ], + "python.analysis.typeCheckingMode": "basic", + "python.analysis.autoImportCompletions": true, + "i18n-ally.localesPaths": [ + "locales", + ], + "i18n-ally.keystyle": "flat" +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index baeeee6a..a91233cc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,36 +1,46 @@ -FROM node:lts-alpine as base - -WORKDIR /app -COPY package*.json pnpm-lock.yaml ./ -RUN npm i -g pnpm -RUN pnpm i - -COPY . . - -RUN pnpm prisma generate -RUN pnpm build - -FROM node:lts-alpine as production -WORKDIR /app -COPY --from=base /app/package*.json ./ -COPY --from=base /app/.next ./.next -COPY --from=base /app/public ./public -COPY --from=base /app/prisma ./prisma -COPY --from=base /app/node_modules ./node_modules -COPY --from=base /app/next.config.js ./next.config.js - -RUN npm i -g pnpm - -EXPOSE 3000 - -ENV NODE_ENV=production \ - DATABASE_URL="" \ - NEXTAUTH_URL="" \ - NEXTAUTH_SECRET="" \ - EMAIL_HOST="" \ - EMAIL_PORT="" \ - EMAIL_USERNAME="" \ - EMAIL_PASSWORD="" \ - EMAIL_FROM="" - -CMD ["pnpm", "start"] \ No newline at end of file +FROM node:lts-alpine AS base + +WORKDIR /app + +COPY package.json pnpm-lock.yaml ./ + +RUN npm i -g pnpm +RUN pnpm install + +COPY . . + +RUN pnpm build + +FROM node:lts-alpine AS production + +WORKDIR /app + +COPY --from=base /app/package*.json ./ +COPY --from=base /app/.next ./.next +COPY --from=base /app/public ./public +COPY --from=base /app/node_modules ./node_modules +COPY --from=base /app/next.config.mjs ./next.config.mjs + +RUN npm i -g pnpm + +EXPOSE 3000 + +ENV AWS_ACCESS_KEY="" \ + AWS_SECRET_KEY="" \ + AWS_REGION="" \ + ANTHROPIC_API_KEY="" \ + AZURE_OPENAI_API_KEY="" \ + AZURE_OPENAI_ENDPOINT="" \ + AZURE_OPENAI_DEPLOY_INSTANCE_NAME="" \ + COHERE_API_KEY="" \ + FIREWORKS_API_KEY="" \ + GOOGLE_API_KEY="" \ + GROQ_API_KEY="" \ + HUGGINGFACE_API_KEY="" \ + MISTRAL_API_KEY="" \ + OPENAI_API_KEY="" \ + OPENAI_API_ENDPOINT="" \ + PERPLEXITY_API_KEY="" \ + PERPLEXITY_ENDPOINT="" + +CMD ["pnpm", "start"] diff --git a/LICENSE b/LICENSE.txt similarity index 100% rename from LICENSE rename to LICENSE.txt diff --git a/README.ja.md b/README.ja.md new file mode 100644 index 00000000..255442a3 --- /dev/null +++ b/README.ja.md @@ -0,0 +1,48 @@ +# Chat Chat + +> シンプルで使いやすいインターフェイスを備えた、統合されたチャットとAIプラットフォーム。 + +

+ 🇺🇸 | 🇭🇰 | 🇨🇳 | 🇯🇵 +

+ +

+ + ドキュメント + +

+ +## インターフェイス + +![検索](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/chat.png) + +![チャット](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/search.png) + +## 機能 + +- 主要なAIプロバイダーに対応(Anthropic、OpenAI、Cohere、Google Geminiなど) +- 自己ホストが容易 + +## 使用方法 + +[ドキュメント](https://docs.okis.dev/chat) + +## デプロイメント + +[![Vercelでデプロイ](https://vercel.com/button)](https://vercel.com/import/project?template=https://github.com/okisdev/ChatChat) + +[![Railwayでデプロイ](https://railway.app/button.svg)](https://railway.app/template/-WWW5r) + +詳細なデプロイ方法は[ドキュメント](https://docs.okis.dev/chat)にて + +## ライセンス + +[AGPL-3.0](./LICENSE) + +## 技術スタック + +nextjs / tailwindcss / shadcn UI + +## 注意 + +- AIは不適切なコンテンツを生成する可能性がありますので、注意してご使用ください。 diff --git a/README.md b/README.md index 96ab943d..80172dcd 100644 --- a/README.md +++ b/README.md @@ -1,128 +1,48 @@ -# [Chat Chat](https://chat.okisdev.com) +# Chat Chat -> Chat Chat to unlock your next level AI conversational experience. You can use multiple APIs from OpenAI, Microsoft Azure, Claude, Cohere, Hugging Face, and more to make your AI conversation experience even richer. - -[![LICENSE](https://img.shields.io/github/license/okisdev/ChatChat?style=flat-square)](https://github.com/okisdev/ChatChat/blob/master/LICENSE) [![Twitter](https://img.shields.io/twitter/follow/okisdev)](https://twitter.com/okisdev) [![Telegram](https://img.shields.io/badge/Telegram-Chat%20Chat-blue?style=flat-square&logo=telegram)](https://t.me/+uWx9qtafv-BiNGVk) +> Your own unified chat and search to AI platform, with a simple and easy to use interface.

- English | 繁体中文 | 简体中文 | 日本語 + 🇺🇸 | 🇭🇰 | 🇨🇳 | 🇯🇵

Documentation - | Common Issue

-## Important Notes - -- Some APIs are paid APIs, please make sure you have read and agreed to the relevant terms of service before use. -- Some features are still under development, please submit PR or Issue. -- The demo is for demonstration purposes only, it may retain some user data. -- AI may generate offensive content, please use it with caution. - -## Preview - -### Interface - -![UI](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/UI-1.png) - -![Dashboard](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/Dashboard-1.png) +## Interface -### Functions +![Search](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/chat.png) -https://user-images.githubusercontent.com/66008528/235539101-562afbc8-cb62-41cc-84d9-1ea8ed83d435.mp4 - -https://user-images.githubusercontent.com/66008528/235539163-35f7ee91-e357-453a-ae8b-998018e003a7.mp4 +![Chat](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/search.png) ## Features -- [x] TTS -- [x] Dark Mode -- [x] Chat with files -- [x] Markdown formatting -- [x] Multi-language support -- [x] Support for System Prompt -- [x] Shortcut menu (command + k) -- [x] Wrapped API (no more proxies) -- [x] Support for sharing conversations -- [x] Chat history (local and cloud sync) -- [x] Support for streaming messages (SSE) -- [x] Plugin support (`/search`, `/fetch`) -- [x] Support for message code syntax highlighting -- [x] Support for OpenAI, Microsoft Azure, Claude, Cohere, Hugging Face - -## Roadmap - -Please refer to https://github.com/users/okisdev/projects/7 +- Support major AI Providers (Anthropic, OpenAI, Cohere, Google Gemini, etc.) +- Ease self-hosted ## Usage -### Prerequisites - -- Any API key from OpenAI, Microsoft Azure, Claude, Cohere, Hugging Face - -### Environment variables - -| variable name | description | default | mandatory | tips | -| ----------------- | --------------------------- | ------- | --------- | ----------------------------------------------------------------------------------------------------------------- | -| `DATABASE_URL` | Postgresql database address | | **Yes** | Start with `postgresql://` (if not required, please fill in `postgresql://user:password@example.com:port/dbname`) | -| `NEXTAUTH_URL` | Your website URL | | **Yes** | (with prefix) | -| `NEXTAUTH_SECRET` | NextAuth Secret | | **Yes** | Random hash (16 bits is best) | -| `EMAIL_HOST` | SMTP Host | | No | | -| `EMAIL_PORT` | SMTP Port | 587 | No | | -| `EMAIL_USERNAME` | SMTP username | | No | | -| `EMAIL_PASSWORD` | SMTP password | | No | | -| `EMAIL_FROM` | SMTP sending address | | No | | - -### Deployment +[docs](https://docs.okis.dev/chat) -> Please modify the environment variables before deployment, more details can be found in the [documentation](https://docs.okis.dev/chat/deployment/). - -#### Local Deployment - -```bash -git clone https://github.com/okisdev/ChatChat.git -cd ChatChat -cp .env.example .env -pnpm i -pnpm dev -``` - -#### Vercel +## Deployment [![Deployed in Vercel](https://vercel.com/button)](https://vercel.com/import/project?template=https://github.com/okisdev/ChatChat) -#### Zeabur - -Visit [Zeabur](https://zeabur.com) to deploy - -#### Railway - [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/-WWW5r) -#### Docker - -```bash -docker build -t chatchat . -docker run -p 3000:3000 chatchat -e DATABASE_URL="" -e NEXTAUTH_URL="" -e NEXTAUTH_SECRET="" -e EMAIL_HOST="" -e EMAIL_PORT="" -e EMAIL_USERNAME="" -e EMAIL_PASSWORD="" -e EMAIL_FROM="" -``` - -OR - -```bash -docker run -p 3000:3000 -e DATABASE_URL="" -e NEXTAUTH_URL="" -e NEXTAUTH_SECRET="" -e EMAIL_HOST="" -e EMAIL_PORT="" -e EMAIL_USERNAME="" -e EMAIL_PASSWORD="" -e EMAIL_FROM="" ghcr.io/okisdev/chatchat:latest -``` +more deployment methods in [docs](https://docs.okis.dev/chat) ## LICENSE [AGPL-3.0](./LICENSE) -## Support me +## Stack -[![Buy Me A Coffee](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/okisdev) +nextjs / tailwindcss / shadcn UI -## Technology Stack +## Note -nextjs / tailwindcss / shadcn UI +- AI may generate inappropriate content, please use it with caution. diff --git a/README.zh_CN.md b/README.zh_CN.md new file mode 100644 index 00000000..51567e11 --- /dev/null +++ b/README.zh_CN.md @@ -0,0 +1,48 @@ +# Chat Chat + +> 您自己的统一聊天和搜索至AI平台,界面简单易用。 + +

+ 🇺🇸 | 🇭🇰 | 🇨🇳 | 🇯🇵 +

+ +

+ + 文档 + +

+ +## 界面 + +![搜索](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/chat.png) + +![聊天](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/search.png) + +## 特点 + +- 支持主要的AI提供商(Anthropic、OpenAI、Cohere、Google Gemini等) +- 方便自托管 + +## 使用方式 + +[文档](https://docs.okis.dev/chat) + +## 部署 + +[![在Vercel中部署](https://vercel.com/button)](https://vercel.com/import/project?template=https://github.com/okisdev/ChatChat) + +[![在Railway上部署](https://railway.app/button.svg)](https://railway.app/template/-WWW5r) + +更多部署方法见[文档](https://docs.okis.dev/chat) + +## 许可证 + +[AGPL-3.0](./LICENSE) + +## 技术栈 + +nextjs / tailwindcss / shadcn UI + +## 注意事项 + +- AI可能会生成不适当的内容,请谨慎使用。 diff --git a/README.zh_HK.md b/README.zh_HK.md new file mode 100644 index 00000000..92bae77a --- /dev/null +++ b/README.zh_HK.md @@ -0,0 +1,48 @@ +# Chat Chat + +> 你的一體化聊天及搜索人工智能平台,界面簡單易用。 + +

+ 🇺🇸 | 🇭🇰 | 🇨🇳 | 🇯🇵 +

+ +

+ + 文件 + +

+ +## 介面 + +![搜索](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/chat.png) + +![聊天](https://cdn.harrly.com/project/GitHub/Chat-Chat/img/search.png) + +## 功能 + +- 支援主要人工智能供應商(Anthropic、OpenAI、Cohere、Google Gemini 等) +- 方便自行託管 + +## 使用方式 + +[文件](https://docs.okis.dev/chat) + +## 部署 + +[![在 Vercel 中部署](https://vercel.com/button)](https://vercel.com/import/project?template=https://github.com/okisdev/ChatChat) + +[![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/-WWW5r) + +更多部署方法在[文件](https://docs.okis.dev/chat) + +## 許可證 + +[AGPL-3.0](./LICENSE) + +## 技術棧 + +nextjs / tailwindcss / shadcn UI + +## 注意事項 + +- 人工智能可能會生成不當內容,請小心使用。 diff --git a/app/[locale]/(auth)/layout.tsx b/app/[locale]/(auth)/layout.tsx deleted file mode 100644 index 7b92e5e6..00000000 --- a/app/[locale]/(auth)/layout.tsx +++ /dev/null @@ -1,29 +0,0 @@ -import Image from 'next/image'; - -import { redirect } from 'next/navigation'; - -import { getCurrentUser } from '@/lib/auth/session'; - -import { siteConfig } from '@/config/site.config'; - -export default async function AuthLayout({ children }: { children: React.ReactNode }) { - const user = await getCurrentUser(); - - if (user) { - redirect('/dashboard/profile'); - } - - return ( -
-
{children}
-
-
-
- {siteConfig.title} -

{siteConfig.title}

-
-
-
-
- ); -} diff --git a/app/[locale]/(auth)/login/page.tsx b/app/[locale]/(auth)/login/page.tsx deleted file mode 100644 index f34918ff..00000000 --- a/app/[locale]/(auth)/login/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import AuthForm from '@/components/auth/form'; -import AuthHeader from '@/components/auth/header'; -import AuthFooter from '@/components/auth/footer'; - -export default function LoginPage() { - return ( -
- - - - - -
- ); -} diff --git a/app/[locale]/(auth)/register/page.tsx b/app/[locale]/(auth)/register/page.tsx deleted file mode 100644 index 51569300..00000000 --- a/app/[locale]/(auth)/register/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import AuthForm from '@/components/auth/form'; -import AuthHeader from '@/components/auth/header'; -import AuthFooter from '@/components/auth/footer'; - -export default function RegisterPage() { - return ( -
- - - - - -
- ); -} diff --git a/app/[locale]/(dashboard)/dashboard/layout.tsx b/app/[locale]/(dashboard)/dashboard/layout.tsx deleted file mode 100644 index c211aef1..00000000 --- a/app/[locale]/(dashboard)/dashboard/layout.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import { redirect } from 'next/navigation'; - -import { getCurrentUser } from '@/lib/auth/session'; - -import DashboardNav from '@/components/dashboard/nav'; -import DashboardSide from '@/components/dashboard/side'; - -export default async function DashboardLayout({ children }: { children: React.ReactNode }) { - const user = await getCurrentUser(); - - if (!user) { - redirect('/login'); - } - - return ( -
- -
- -
{children}
-
-
- ); -} diff --git a/app/[locale]/(dashboard)/dashboard/profile/info/page.tsx b/app/[locale]/(dashboard)/dashboard/profile/info/page.tsx deleted file mode 100644 index 5f34e9c7..00000000 --- a/app/[locale]/(dashboard)/dashboard/profile/info/page.tsx +++ /dev/null @@ -1,19 +0,0 @@ -import { getCurrentUser } from '@/lib/auth/session'; - -import ProfileInfoForm from '@/components/dashboard/profile-info-form'; - -const ProfilePage = async () => { - const user = await getCurrentUser(); - - if (!user) { - return null; - } - - return ( -
- -
- ); -}; - -export default ProfilePage; diff --git a/app/[locale]/(dashboard)/dashboard/profile/record/page.tsx b/app/[locale]/(dashboard)/dashboard/profile/record/page.tsx deleted file mode 100644 index a3f1923c..00000000 --- a/app/[locale]/(dashboard)/dashboard/profile/record/page.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { redirect } from 'next/navigation'; - -import { User } from '@prisma/client'; - -import { database } from '@/lib/database'; -import { getCurrentUser } from '@/lib/auth/session'; - -import RecordCard from '@/components/dashboard/record/card'; -import RecordButton from '@/components/dashboard/record/button'; - -const getRecordsByUser = async (id: User['id']) => { - return await database.record.findMany({ - where: { - authorId: id, - }, - orderBy: { - createdAt: 'desc', - }, - }); -}; - -const ProfileRecordPage = async () => { - const user = await getCurrentUser(); - - if (!user) { - redirect('/login'); - } - - const records = await getRecordsByUser(user.id); - - return ( -
- -
- {records.length > 0 ? ( - records.map((record, index) => { - return ; - }) - ) : ( -

No conversation records found

- )} -
-
- ); -}; - -export default ProfileRecordPage; diff --git a/app/[locale]/(dashboard)/dashboard/profile/settings/page.tsx b/app/[locale]/(dashboard)/dashboard/profile/settings/page.tsx deleted file mode 100644 index a5f3cdde..00000000 --- a/app/[locale]/(dashboard)/dashboard/profile/settings/page.tsx +++ /dev/null @@ -1,19 +0,0 @@ -import { getCurrentUser, getCurrentUserProfile } from '@/lib/auth/session'; - -import ProfileSettingsForm from '@/components/dashboard/profile-settings-form'; - -const ProfileSettingsPage = async () => { - const userProfile = await getCurrentUserProfile(); - - if (!userProfile) { - return null; - } - - return ( -
- -
- ); -}; - -export default ProfileSettingsPage; diff --git a/app/[locale]/(dashboard)/dashboard/team/info/page.tsx b/app/[locale]/(dashboard)/dashboard/team/info/page.tsx deleted file mode 100644 index d86cc0a0..00000000 --- a/app/[locale]/(dashboard)/dashboard/team/info/page.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import { database } from '@/lib/database'; -import { getCurrentUser } from '@/lib/auth/session'; - -import { Team } from '@prisma/client'; - -import TeamCard from '@/components/dashboard/team/card'; -import JoinButton from '@/components/dashboard/team/join-button'; -import CreateButton from '@/components/dashboard/team/create-button'; - -const getTeamsByAuthorId = async (authorId: string) => { - return await database.team.findMany({ - where: { - authorId: authorId, - }, - }); -}; - -const getTeamsByMemberId = async (memberId: string) => { - return await database.team.findMany({ - where: { - members: { - some: { - userId: memberId, - }, - }, - }, - }); -}; - -const ProfileTeamInfoPage = async () => { - const currentUser = await getCurrentUser(); - - if (!currentUser) { - return null; - } - - const teamsByAuthor = await getTeamsByAuthorId(currentUser.id); - - const teamsByMember = await getTeamsByMemberId(currentUser.id); - - const allTeams = Array.from(new Set([...teamsByAuthor, ...teamsByMember].map((team) => team.id))).map((id) => { - const team = teamsByAuthor.find((team) => team.id === id) || teamsByMember.find((team) => team.id === id); - const isAuthor = (team && team.authorId === currentUser.id) || false; - return { ...team, isAuthor }; - }) as (Team & { isAuthor: boolean })[]; - - return ( -
-
- - -
-
- {allTeams.length > 0 ? allTeams.map((team, index) => ) :

No teams found

} -
-
- ); -}; - -export default ProfileTeamInfoPage; diff --git a/app/[locale]/(home)/(chat)/page.tsx b/app/[locale]/(home)/(chat)/page.tsx new file mode 100644 index 00000000..5ea990d0 --- /dev/null +++ b/app/[locale]/(home)/(chat)/page.tsx @@ -0,0 +1,161 @@ +'use client'; + +import { useEffect, useRef, useState } from 'react'; +import { useChat } from 'ai/react'; +import { useAtom } from 'jotai'; +import { useRouter, useSearchParams } from 'next/navigation'; +import { useTranslations } from 'next-intl'; + +import { AddButton } from '@/components/layout/add-button'; +import { ConversationWindow } from '@/components/layout/chat/conversation-window'; +import { InputBox } from '@/components/layout/chat/input-box'; +import { ModelSelect } from '@/components/layout/model-select'; +import { ShareButton } from '@/components/layout/share-button'; +import { getLocalStorage } from '@/hooks/storage'; +import store from '@/hooks/store'; +import { ApiConfig } from '@/types/app'; +import { Conversation } from '@/types/conversation'; +import { ProviderSetting, SpecifiedProviderSetting } from '@/types/settings'; +import { whatTimeOfDay } from '@/utils/app/time'; + +export default function Chat() { + const searchParams = useSearchParams(); + + const router = useRouter(); + + const inputRef = useRef(null); + + const chat = searchParams.get('chat'); + + const prevChatRef = useRef(); + + useEffect(() => { + prevChatRef.current = chat!; + }); + + const prevChat = prevChatRef.current; + + const t = useTranslations(''); + + const [currentConversationUUID, setCurrentConversationUUID] = useState(); + + const [currentUseModel] = useAtom(store.currentUseModelAtom); + const [advancedSettings] = useAtom(store.advancedSettingsAtom); + const [conversationSettings] = useAtom(store.conversationSettingsAtom); + + const [conversations, setConversations] = useAtom(store.conversationsAtom); + + const [currentProviderSettings] = useAtom(store.currentProviderSettingsAtom); + + const { messages, setMessages, isLoading, input, handleInputChange, handleSubmit, stop } = useChat({ + initialInput: conversationSettings.systemPrompt ?? undefined, + api: advancedSettings.unifiedEndpoint ? '/api/chat/messages' : `/api/chat/messages/${currentUseModel.provider.toString().toLowerCase()}`, + body: { + uuid: currentConversationUUID, + config: { + provider: currentProviderSettings?.[currentUseModel.provider.toString() as keyof ProviderSetting] as SpecifiedProviderSetting, + model: currentUseModel, + stream: advancedSettings.streamMessages, + numberOfContext: conversationSettings.numOfContext, + } satisfies ApiConfig, + }, + }); + + const handleSubmitChat = (e: React.FormEvent) => { + e.preventDefault(); + + handleSubmit(e); + }; + + const handleStop = (e: React.FormEvent) => { + e.preventDefault(); + + stop(); + }; + + const checkIfConversationExists = (uuid: string) => conversations?.find((conversation: Conversation) => conversation.id === uuid); + + const updateConversation = (uuid: string) => { + const index = conversations?.findIndex((conversation: Conversation) => conversation.id === uuid); + if (index !== -1 && conversations) { + const updatedConversations = [...conversations]; + updatedConversations[index!].conversation = messages; + updatedConversations[index!].updatedAt = new Date().toISOString(); + + setConversations(updatedConversations); + } + }; + + const appendConversation = (conversation: Conversation) => { + if (messages.length == 0) { + return; + } + + const previousConversations = getLocalStorage('conversations'); + + const updatedConversations = previousConversations ? [...previousConversations, conversation] : [conversation]; + + setConversations(updatedConversations); + }; + + useEffect(() => { + if (chat && chat !== prevChat) { + setCurrentConversationUUID(chat); + + const conversation = checkIfConversationExists(chat); + + setMessages(conversation?.conversation || []); + + if (conversation) { + updateConversation(chat); + } + } else { + if (!currentConversationUUID) { + setCurrentConversationUUID(crypto.randomUUID()); + } + + if (currentConversationUUID && messages.length > 0) { + const conversationExists = checkIfConversationExists(currentConversationUUID); + + if (conversationExists) { + updateConversation(currentConversationUUID); + } else { + appendConversation({ + id: currentConversationUUID, + createdAt: new Date().toISOString(), + updatedAt: new Date().toISOString(), + conversation: messages, + }); + } + } + } + }, [chat, currentConversationUUID, messages, prevChat]); + + useEffect(() => { + inputRef.current?.focus(); + }, []); + + return ( +
+
+
+ + +
+
+ +
+
+ {messages.length > 0 ? ( +
+ +
+ ) : ( +
+

{t(whatTimeOfDay())}

+
+ )} + +
+ ); +} diff --git a/app/[locale]/(home)/action.tsx b/app/[locale]/(home)/action.tsx new file mode 100644 index 00000000..fe47d4a6 --- /dev/null +++ b/app/[locale]/(home)/action.tsx @@ -0,0 +1,156 @@ +import { ExperimentalMessage } from 'ai'; +import { createAI, createStreamableUI, createStreamableValue, getMutableAIState, StreamableValue } from 'ai/rsc'; + +import { AskFollowUpQuestion } from '@/components/layout/search/block/ask-follow-up-question'; +import { BlockError } from '@/components/layout/search/block/error'; +import { Searching } from '@/components/layout/search/block/searching'; +import { Provider } from '@/config/provider'; +import { challenger } from '@/lib/search/challenger'; +import { clarifier } from '@/lib/search/clarifier'; +import { illustrator } from '@/lib/search/illustrator'; +import { searcher } from '@/lib/search/searcher'; +import { SimpleModel } from '@/types/model'; +import { SearchEngineSetting, TChallengerAction } from '@/types/search'; +import { ProviderSetting } from '@/types/settings'; + +const allowProvider = ['OpenAI'] as Provider[]; + +const chat = async (model: SimpleModel, messages: ExperimentalMessage[]) => { + 'use server'; +}; + +const search = async ( + model: SimpleModel, + currentProviderSettings: ProviderSetting | null, + currentSearchEngineSettings: SearchEngineSetting | null, + formData?: FormData, + isProSearch: boolean = false, + skip?: boolean +) => { + 'use server'; + + if (!allowProvider.includes(model?.provider)) { + return { + id: Date.now(), + isGenerating: false, + component: , + }; + } + + const hasOpenAI = process.env['NEXT_PUBLIC_ACCESS_OPENAI'] == 'true'; + const hasTavily = process.env['NEXT_PUBLIC_ACCESS_TAVILY_SEARCH'] == 'true'; + + if (!hasOpenAI && !currentProviderSettings?.OpenAI) { + return { + id: Date.now(), + isGenerating: false, + component: , + }; + } + + if (!hasTavily && !currentSearchEngineSettings?.Tavily) { + return { + id: Date.now(), + isGenerating: false, + component: , + }; + } + + const aiState = getMutableAIState(); + const uiStream = createStreamableUI(); + const isGenerating = createStreamableValue(true); + + const messages: ExperimentalMessage[] = aiState.get() as any; + + const question = formData?.get('input') as string; + + const userInput = skip ? `{"action": "skip"}` : question; + const content = skip ? userInput : formData ? JSON.stringify(Object.fromEntries(formData)) : null; + + if (content) { + const message = { role: 'user', content }; + messages.push(message as ExperimentalMessage); + aiState.update([...(aiState.get() as any), message]); + } + + (async () => { + uiStream.update(); + + let action = { + object: { + next: 'proceed', + }, + } as TChallengerAction; + + if (isProSearch) { + if (!skip) { + const challenge = await challenger(messages, model, currentProviderSettings); + + action = challenge; + } + + if (action.object.next === 'challenge') { + const clarify = await clarifier(uiStream, messages, model, currentProviderSettings); + + uiStream.done(); + isGenerating.done(); + + aiState.done([ + ...aiState.get(), + { + role: 'assistant', + content: `clarify: ${clarify?.question}`, + }, + ]); + return; + } + } + + let answer = ''; + + const streamText = createStreamableValue(); + + while (answer.length === 0) { + const { fullResponse } = await searcher(uiStream, streamText, messages, isProSearch, model, currentSearchEngineSettings, currentProviderSettings); + answer = fullResponse; + } + + streamText.done(); + + await illustrator(uiStream, messages, model, currentProviderSettings); + + uiStream.append(); + + isGenerating.done(false); + + uiStream.done(); + + aiState.done([...aiState.get(), { role: 'assistant', content: answer }]); + })(); + + return { + id: Date.now(), + isGenerating: isGenerating.value, + component: uiStream.value, + }; +}; + +const initialAIState: { + role: 'user' | 'assistant' | 'system' | 'function' | 'tool'; + content: string; +}[] = []; + +const initialUIState: { + id: number; + isGenerating: StreamableValue; + component: React.ReactNode; +}[] = []; + +export const AI = createAI({ + actions: { + chat, + search, + }, + initialUIState, + initialAIState, +}); diff --git a/app/[locale]/(home)/layout.tsx b/app/[locale]/(home)/layout.tsx new file mode 100644 index 00000000..e57e4f0c --- /dev/null +++ b/app/[locale]/(home)/layout.tsx @@ -0,0 +1,16 @@ +import HomeProvider from '@/app/[locale]/(home)/provider'; +import AppSidebar from '@/app/[locale]/(home)/sidebar'; + +export default async function AppLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + + {children} + + ); +} diff --git a/app/[locale]/(home)/provider.tsx b/app/[locale]/(home)/provider.tsx new file mode 100644 index 00000000..276b4f13 --- /dev/null +++ b/app/[locale]/(home)/provider.tsx @@ -0,0 +1,5 @@ +import { AI as AiProvider } from '@/app/[locale]/(home)/action'; + +export default function HomeProvider({ children }: Readonly<{ children: React.ReactNode }>) { + return {children}; +} diff --git a/app/[locale]/(home)/search/page.tsx b/app/[locale]/(home)/search/page.tsx new file mode 100644 index 00000000..6b44ee56 --- /dev/null +++ b/app/[locale]/(home)/search/page.tsx @@ -0,0 +1,31 @@ +'use client'; + +import { AddButton } from '@/components/layout/add-button'; +import { ModelSelect } from '@/components/layout/model-select'; +import { SearchWindow } from '@/components/layout/search/search-window'; +import {SearchSelect} from '@/components/layout/search-select'; +import { ShareButton } from '@/components/layout/share-button'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export default function Search() { + return ( +
+
+
+ + + +
+
+ +
+
+
+ +
+
+ ); +} diff --git a/app/[locale]/(home)/sidebar.tsx b/app/[locale]/(home)/sidebar.tsx new file mode 100644 index 00000000..ae0a578a --- /dev/null +++ b/app/[locale]/(home)/sidebar.tsx @@ -0,0 +1,38 @@ +'use client'; + +import { Suspense } from 'react'; +import { useAtom } from 'jotai'; + +import { Brand } from '@/components/layout/brand'; +import { HistoryList } from '@/components/layout/history-list'; +import { LanguageDropdown } from '@/components/layout/language-dropdown'; +import { SettingsDialog } from '@/components/layout/settings-dialog'; +import { SettingsDrawer } from '@/components/layout/settings-drawer'; +import { ThemeDropdown } from '@/components/layout/theme-dropdown'; +import store from '@/hooks/store'; +import { useMediaQuery } from '@/hooks/window'; + +export default function AppSidebar() { + const [conversations, setConversations] = useAtom(store.conversationsAtom); + + const isDesktop = useMediaQuery('(min-width: 768px)'); + + return ( +
+ +
+
+ +
+
+ {isDesktop ? : } +
+ + + + +
+
+
+ ); +} diff --git a/app/[locale]/(landing)/layout.tsx b/app/[locale]/(landing)/layout.tsx deleted file mode 100644 index 89b3aed8..00000000 --- a/app/[locale]/(landing)/layout.tsx +++ /dev/null @@ -1,14 +0,0 @@ -import LandingSide from '@/components/landing/side'; - -import { getCurrentUserProfile } from '@/lib/auth/session'; - -export default async function LandingLayout({ children }: { children: React.ReactNode }) { - const userProfile = await getCurrentUserProfile(); - - return ( -
- - {children} -
- ); -} diff --git a/app/[locale]/(landing)/mode/chat/page.tsx b/app/[locale]/(landing)/mode/chat/page.tsx deleted file mode 100644 index b4fbf99c..00000000 --- a/app/[locale]/(landing)/mode/chat/page.tsx +++ /dev/null @@ -1,18 +0,0 @@ -'use client'; - -import store from '@/hooks/store'; -import { useAtomValue } from 'jotai'; - -import LandingHeader from '@/components/landing/main/header'; -import ChatMain from '@/components/landing/main/chat-main'; - -export default function ChatModePage() { - const isHiddenSide = useAtomValue(store.isHiddenSideAtom); - - return ( -
- - -
- ); -} diff --git a/app/[locale]/(landing)/mode/code/page.tsx b/app/[locale]/(landing)/mode/code/page.tsx deleted file mode 100644 index 3bdfdbe1..00000000 --- a/app/[locale]/(landing)/mode/code/page.tsx +++ /dev/null @@ -1,18 +0,0 @@ -'use client'; - -import store from '@/hooks/store'; -import { useAtomValue } from 'jotai'; - -import LandingHeader from '@/components/landing/main/header'; -import CodeMain from '@/components/landing/main/code-main'; - -export default function CodeModePage() { - const isHiddenSide = useAtomValue(store.isHiddenSideAtom); - - return ( -
- - -
- ); -} diff --git a/app/[locale]/(landing)/mode/file/page.tsx b/app/[locale]/(landing)/mode/file/page.tsx deleted file mode 100644 index 92a37835..00000000 --- a/app/[locale]/(landing)/mode/file/page.tsx +++ /dev/null @@ -1,18 +0,0 @@ -'use client'; - -import store from '@/hooks/store'; -import { useAtomValue } from 'jotai'; - -import LandingHeader from '@/components/landing/main/header'; -import FileMain from '@/components/landing/main/file-main'; - -export default function FileModePage() { - const isHiddenSide = useAtomValue(store.isHiddenSideAtom); - - return ( -
- - -
- ); -} diff --git a/app/[locale]/(landing)/page.tsx b/app/[locale]/(landing)/page.tsx deleted file mode 100644 index d4f9bde9..00000000 --- a/app/[locale]/(landing)/page.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import dynamic from 'next/dynamic'; - -const ChatMode = dynamic(() => import('@/app/[locale]/(landing)/mode/chat/page'), {}); - -export default function LandingPage() { - return ; -} diff --git a/app/[locale]/(share)/s/[shareID]/page.tsx b/app/[locale]/(share)/s/[shareID]/page.tsx deleted file mode 100644 index 1aa3bf12..00000000 --- a/app/[locale]/(share)/s/[shareID]/page.tsx +++ /dev/null @@ -1,38 +0,0 @@ -import { notFound } from 'next/navigation'; - -import { database } from '@/lib/database'; - -import { Share } from '@prisma/client'; - -import SharePost from '@/components/share/post'; - -const getShareByShareID = async (shareID: Share['id']) => { - return await database.share.findFirst({ - where: { - id: shareID, - }, - }); -}; - -export default async function SharePage({ params }: { params: { shareID: string } }) { - const share = await getShareByShareID(params.shareID); - - if (!share) { - notFound(); - } - - return ( -
- -
- ); -} diff --git a/app/[locale]/(share)/s/layout.tsx b/app/[locale]/(share)/s/layout.tsx deleted file mode 100644 index 88f42f25..00000000 --- a/app/[locale]/(share)/s/layout.tsx +++ /dev/null @@ -1,3 +0,0 @@ -export default function ShareLayout({ children }: { children: React.ReactNode }) { - return
{children}
; -} diff --git a/app/[locale]/layout.tsx b/app/[locale]/layout.tsx index 22d634d4..9e9553f5 100644 --- a/app/[locale]/layout.tsx +++ b/app/[locale]/layout.tsx @@ -1,29 +1,15 @@ -import '@/styles/globals.css'; -import '@/styles/markdown.css'; -import 'tippy.js/dist/tippy.css'; - -import { NextIntlClientProvider } from 'next-intl'; - -import { HotToaster } from '@/components/client/toaster'; -import { ClientCommand } from '@/components/client/command'; - -import { notFound } from 'next/navigation'; +import LocaleProvider from '@/app/[locale]/provider'; -export default async function LocaleLayout({ children, params: { locale } }: { children: React.ReactNode; params: { locale: string } }) { - let locales; - - try { - locales = (await import(`../../locales/${locale}.json`)).default; - } catch (error) { - notFound(); - } +import '@/styles/globals.css'; +export default function LocaleLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { return ( - - - - - {children} - + + {children} + ); } diff --git a/app/[locale]/provider.tsx b/app/[locale]/provider.tsx new file mode 100644 index 00000000..d792cd27 --- /dev/null +++ b/app/[locale]/provider.tsx @@ -0,0 +1,7 @@ +import { NextIntlClientProvider, useMessages } from 'next-intl'; + +export default function LocaleProvider({ children }: Readonly<{ children: React.ReactNode }>) { + const messages = useMessages(); + + return {children}; +} diff --git a/app/api/app/latest/route.ts b/app/api/app/latest/route.ts new file mode 100644 index 00000000..f6e9f02d --- /dev/null +++ b/app/api/app/latest/route.ts @@ -0,0 +1,13 @@ +import { getLatestVersion } from '@/utils/app/version'; + +export async function GET(request: Request) { + const latestVersion = await getLatestVersion({ owner: 'okisdev', repo: 'ChatChat' }); + + return Response.json( + { + short: { version: latestVersion.tag_name, version_name: latestVersion.name }, + details: latestVersion, + }, + { status: 200 } + ); +} diff --git a/app/api/author/route.ts b/app/api/author/route.ts deleted file mode 100644 index db093c2e..00000000 --- a/app/api/author/route.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { NextResponse } from 'next/server'; - -export const runtime = 'edge'; - -export async function GET(request: Request) { - return NextResponse.json({ author: 'Harry Yep' }, { status: 200 }); -} diff --git a/app/api/chat/messages/amazon/route.ts b/app/api/chat/messages/amazon/route.ts new file mode 100644 index 00000000..9af0dd6b --- /dev/null +++ b/app/api/chat/messages/amazon/route.ts @@ -0,0 +1,51 @@ +// import { BedrockRuntimeClient, InvokeModelWithResponseStreamCommand } from '@aws-sdk/client-bedrock-runtime'; +// import { AWSBedrockAnthropicStream, StreamingTextResponse } from 'ai'; +// import { experimental_buildAnthropicPrompt } from 'ai/prompts'; + +// import { ApiConfig } from '@/types/app'; + +// export const runtime = 'edge'; + +// export const dynamic = 'force-dynamic'; + +// const amazon = new BedrockRuntimeClient({ +// region: process.env.AWS_REGION ?? 'us-east-1', +// credentials: { +// accessKeyId: process.env.AWS_ACCESS_KEY ?? '', +// secretAccessKey: process.env.AWS_SECRET_KEY ?? '', +// }, +// }); + +// export async function POST(req: Request) { +// const { +// messages, +// config, +// stream, +// }: { +// messages: any[]; +// config: ApiConfig; +// stream: boolean; +// } = await req.json(); + +// const response = await amazon.send( +// new InvokeModelWithResponseStreamCommand({ +// modelId: config.model.model_id, +// contentType: 'application/json', +// accept: 'application/json', +// body: JSON.stringify({ +// prompt: experimental_buildAnthropicPrompt(messages), +// max_tokens_to_sample: 300, +// stream: stream, +// }), +// }) +// ); + +// const output = AWSBedrockAnthropicStream(response); + +// return new StreamingTextResponse(output); +// } + + +export async function GET(req: Request) { + return Response.json({ error: 'Method Not Allowed' }, { status: 405 }); +} diff --git a/app/api/chat/messages/anthropic/route.ts b/app/api/chat/messages/anthropic/route.ts new file mode 100644 index 00000000..2146cde5 --- /dev/null +++ b/app/api/chat/messages/anthropic/route.ts @@ -0,0 +1,35 @@ +import Anthropic from '@anthropic-ai/sdk'; +import { AnthropicStream, StreamingTextResponse } from 'ai'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const anthropic = new Anthropic({ + apiKey: config.provider?.apiKey ?? process.env.ANTHROPIC_API_KEY ?? '', + }); + + const response = await anthropic.messages.create({ + messages, + model: config.model.model_id, + stream: true, + max_tokens: 4096, + }); + + const output = AnthropicStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/azure/route.ts b/app/api/chat/messages/azure/route.ts new file mode 100644 index 00000000..252c9b85 --- /dev/null +++ b/app/api/chat/messages/azure/route.ts @@ -0,0 +1,31 @@ +// import { SimpleModel } from '@/types/model'; +// import { OpenAIClient, AzureKeyCredential } from '@azure/openai'; +// import { OpenAIStream, StreamingTextResponse } from 'ai'; + +// const client = new OpenAIClient(process.env.AZURE_OPENAI_ENDPOINT ?? '', new AzureKeyCredential(process.env.AZURE_OPENAI_API_KEY ?? '')); + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +// export async function POST(req: Request) { +// const { +// messages, +// model, +// }: { stream +// }: { +// messages: any[]; +// model: SimpleModel; +// stream: boolean; +// } = await req.json(); + +// const response = await client.streamChatCompletions(process.env.AZURE_OPENAI_DEPLOY_INSTANCE_NAME || '', messages); + +// const stream = OpenAIStream(response); + +// return new StreamingTextResponse(stream); +// } + +export async function GET(req: Request) { + return Response.json({ error: 'Method Not Allowed' }, { status: 405 }); +} diff --git a/app/api/chat/messages/cohere/route.ts b/app/api/chat/messages/cohere/route.ts new file mode 100644 index 00000000..793a15e4 --- /dev/null +++ b/app/api/chat/messages/cohere/route.ts @@ -0,0 +1,51 @@ +import { Message } from 'ai'; +import { CohereClient } from 'cohere-ai'; + +import { ApiConfig } from '@/types/app'; +import { toCohereRole } from '@/utils/provider/cohere'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const chatHistory = messages.map((message: Message) => ({ + message: message.content, + role: toCohereRole(message.role), + })); + + const lastMessage = chatHistory.pop()!; + + const cohere = new CohereClient({ + token: config.provider?.apiKey ?? process.env.COHERE_API_KEY ?? '', + }); + + const response = await cohere.chatStream({ + message: lastMessage.message, + chatHistory, + model: config.model.model_id, + }); + + const output = new ReadableStream({ + async start(controller) { + for await (const event of response) { + if (event.eventType === 'text-generation') { + controller.enqueue(event.text); + } + } + controller.close(); + }, + }); + + return new Response(output); +} diff --git a/app/api/chat/messages/fireworks/route.ts b/app/api/chat/messages/fireworks/route.ts new file mode 100644 index 00000000..5ce9032b --- /dev/null +++ b/app/api/chat/messages/fireworks/route.ts @@ -0,0 +1,36 @@ +import { OpenAIStream, StreamingTextResponse } from 'ai'; +import OpenAI from 'openai'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const fireworks = new OpenAI({ + apiKey: config.provider.apiKey ?? process.env.FIREWORKS_API_KEY ?? '', + baseURL: 'https://api.fireworks.ai/inference/v1', + }); + + const response = await fireworks.chat.completions.create({ + model: config.model.model_id, + stream: true, + max_tokens: 1000, + messages, + }); + + const output = OpenAIStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/google/route.ts b/app/api/chat/messages/google/route.ts new file mode 100644 index 00000000..833dd313 --- /dev/null +++ b/app/api/chat/messages/google/route.ts @@ -0,0 +1,29 @@ +import { GoogleGenerativeAI } from '@google/generative-ai'; +import { GoogleGenerativeAIStream, StreamingTextResponse } from 'ai'; + +import { ApiConfig } from '@/types/app'; +import { toGoogleMessage } from '@/utils/provider/google'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const genAI = new GoogleGenerativeAI(config.provider?.apiKey ?? process.env.GOOGLE_API_KEY ?? ''); + + const response = await genAI.getGenerativeModel({ model: config.model.model_id }).generateContentStream(toGoogleMessage(messages)); + + const output = GoogleGenerativeAIStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/groq/route.ts b/app/api/chat/messages/groq/route.ts new file mode 100644 index 00000000..462a8427 --- /dev/null +++ b/app/api/chat/messages/groq/route.ts @@ -0,0 +1,34 @@ +import { OpenAIStream, StreamingTextResponse } from 'ai'; +import Groq from 'groq-sdk'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const groq = new Groq({ + apiKey: config.provider?.apiKey ?? process.env.GROQ_API_KEY ?? '', + }); + + const response = await groq.chat.completions.create({ + model: config.model.model_id, + stream: true, + messages, + }); + + const output = OpenAIStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/huggingface/route.ts b/app/api/chat/messages/huggingface/route.ts new file mode 100644 index 00000000..3528a1a3 --- /dev/null +++ b/app/api/chat/messages/huggingface/route.ts @@ -0,0 +1,39 @@ +import { HfInference } from '@huggingface/inference'; +import { HuggingFaceStream, StreamingTextResponse } from 'ai'; +import { experimental_buildOpenAssistantPrompt } from 'ai/prompts'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const huggingface = new HfInference(config.provider?.apiKey ?? process.env.HUGGINGFACE_API_KEY ?? ''); + + const response = huggingface.textGenerationStream({ + model: config.model.model_id, + inputs: experimental_buildOpenAssistantPrompt(messages), + parameters: { + max_new_tokens: 200, + typical_p: 0.2, + repetition_penalty: 1, + truncate: 1000, + return_full_text: false, + }, + }); + + const output = HuggingFaceStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/mistral/route.ts b/app/api/chat/messages/mistral/route.ts new file mode 100644 index 00000000..a684040a --- /dev/null +++ b/app/api/chat/messages/mistral/route.ts @@ -0,0 +1,32 @@ +import MistralClient from '@mistralai/mistralai'; +import { MistralStream, StreamingTextResponse } from 'ai'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const mistral = new MistralClient(config.provider?.apiKey ?? process.env.MISTRAL_API_KEY ?? ''); + + const response = mistral.chatStream({ + model: config.model.model_id, + maxTokens: 1000, + messages, + }); + + const output = MistralStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/openai/route.ts b/app/api/chat/messages/openai/route.ts new file mode 100644 index 00000000..4a4d0569 --- /dev/null +++ b/app/api/chat/messages/openai/route.ts @@ -0,0 +1,35 @@ +import { OpenAIStream, StreamingTextResponse } from 'ai'; +import OpenAI from 'openai'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const openai = new OpenAI({ + apiKey: config.provider?.apiKey ?? process.env.OPENAI_API_KEY ?? '', + baseURL: config.provider?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1', + }); + + const response = await openai.chat.completions.create({ + model: config.model.model_id, + stream: true, + messages, + }); + + const output = OpenAIStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/perplexity/route.ts b/app/api/chat/messages/perplexity/route.ts new file mode 100644 index 00000000..567ee82f --- /dev/null +++ b/app/api/chat/messages/perplexity/route.ts @@ -0,0 +1,36 @@ +import { OpenAIStream, StreamingTextResponse } from 'ai'; +import OpenAI from 'openai'; + +import { ApiConfig } from '@/types/app'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + const perplexity = new OpenAI({ + apiKey: config.provider?.apiKey ?? process.env.PERPLEXITY_API_KEY ?? '', + baseURL: config.provider?.endpoint ?? process.env.PERPLEXITY_ENDPOINT ?? 'https://api.perplexity.ai/', + }); + + const response = await perplexity.chat.completions.create({ + model: config.model.model_id, + stream: true, + max_tokens: 4096, + messages, + }); + + const output = OpenAIStream(response); + + return new StreamingTextResponse(output); +} diff --git a/app/api/chat/messages/route.ts b/app/api/chat/messages/route.ts new file mode 100644 index 00000000..0090f6b4 --- /dev/null +++ b/app/api/chat/messages/route.ts @@ -0,0 +1,191 @@ +import Anthropic from '@anthropic-ai/sdk'; +// import { BedrockRuntimeClient, InvokeModelWithResponseStreamCommand } from '@aws-sdk/client-bedrock-runtime'; +import { GoogleGenerativeAI } from '@google/generative-ai'; +import { HfInference } from '@huggingface/inference'; +import MistralClient from '@mistralai/mistralai'; +import { AnthropicStream, GoogleGenerativeAIStream, HuggingFaceStream, Message, MistralStream, OpenAIStream, StreamingTextResponse } from 'ai'; +import { experimental_buildOpenAssistantPrompt } from 'ai/prompts'; +import { CohereClient } from 'cohere-ai'; +import Groq from 'groq-sdk'; +import OpenAI from 'openai'; + +import { Provider } from '@/config/provider'; +import { ApiConfig } from '@/types/app'; +import { toCohereRole } from '@/utils/provider/cohere'; +import { toGoogleMessage } from '@/utils/provider/google'; + +// const amazon = new BedrockRuntimeClient({ +// region: process.env.AWS_REGION ?? 'us-east-1', +// credentials: { +// accessKeyId: process.env.AWS_ACCESS_KEY ?? '', +// secretAccessKey: process.env.AWS_SECRET_KEY ?? '', +// }, +// }); + +const anthropic = new Anthropic({ + apiKey: process.env.ANTHROPIC_API_KEY ?? '', +}); + +const cohere = new CohereClient({ + token: process.env.COHERE_API_KEY ?? '', +}); + +const fireworks = new OpenAI({ + apiKey: process.env.FIREWORKS_API_KEY ?? '', + baseURL: 'https://api.fireworks.ai/inference/v1', +}); + +const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY ?? ''); + +const groq = new Groq({ + apiKey: process.env.GROQ_API_KEY ?? '', +}); + +const huggingface = new HfInference(process.env.HUGGINGFACE_API_KEY); + +const mistral = new MistralClient(process.env.MISTRAL_API_KEY ?? ''); + +const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY ?? '', +}); + +const perplexity = new OpenAI({ + apiKey: process.env.PERPLEXITY_API_KEY ?? '', + baseURL: process.env.PERPLEXITY_ENDPOINT ?? 'https://api.perplexity.ai/', +}); + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: any[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + switch (config.model.provider) { + // case Provider.Amazon: { + // const response = await amazon.send( + // new InvokeModelWithResponseStreamCommand({ + // modelId: model.model_id, + // contentType: 'application/json', + // accept: 'application/json', + // body: JSON.stringify({ + // prompt: experimental_buildAnthropicPrompt(messages), + // max_tokens_to_sample: 300, + // }), + // }) + // ); + // const output = AWSBedrockAnthropicStream(response); + // return new StreamingTextResponse(output); + // } + case Provider.Anthropic: { + const response = await anthropic.messages.create({ + messages, + model: config.model.model_id, + stream: true, + max_tokens: 4096, + }); + const output = AnthropicStream(response); + return new StreamingTextResponse(output); + } + case Provider.Cohere: { + const cohereChatHistory = messages.map((message: Message) => ({ + message: message.content, + role: toCohereRole(message.role), + })); + const lastMessage = cohereChatHistory.pop()!; + const response = await cohere.chatStream({ + message: lastMessage.message, + chatHistory: cohereChatHistory, + model: config.model.model_id, + }); + const output = new ReadableStream({ + async start(controller) { + for await (const event of response) { + if (event.eventType === 'text-generation') { + controller.enqueue(event.text); + } + } + controller.close(); + }, + }); + return new StreamingTextResponse(output); + } + case Provider.Fireworks: { + const response = await fireworks.chat.completions.create({ + model: config.model.model_id, + stream: true, + max_tokens: 1000, + messages, + }); + const output = OpenAIStream(response); + return new StreamingTextResponse(output); + } + case Provider.Google: { + const geminiStream = await genAI.getGenerativeModel({ model: config.model.model_id }).generateContentStream(toGoogleMessage(messages)); + const output = GoogleGenerativeAIStream(geminiStream); + return new StreamingTextResponse(output); + } + case Provider.Groq: { + const response = await groq.chat.completions.create({ + model: config.model.model_id, + stream: true, + messages, + }); + const output = OpenAIStream(response); + return new StreamingTextResponse(output); + } + case Provider.HuggingFace: { + const response = huggingface.textGenerationStream({ + model: config.model.model_id, + inputs: experimental_buildOpenAssistantPrompt(messages), + parameters: { + max_new_tokens: 200, + typical_p: 0.2, + repetition_penalty: 1, + truncate: 1000, + return_full_text: false, + }, + }); + const output = HuggingFaceStream(response); + return new StreamingTextResponse(output); + } + case Provider.Mistral: { + const response = mistral.chatStream({ + model: config.model.model_id, + maxTokens: 1000, + messages, + }); + const output = MistralStream(response); + return new StreamingTextResponse(output); + } + case Provider.OpenAI: { + const response = await openai.chat.completions.create({ + model: config.model.model_id, + stream: true, + messages, + }); + const output = OpenAIStream(response); + return new StreamingTextResponse(output); + } + case Provider.Perplexity: { + const response = await perplexity.chat.completions.create({ + model: config.model.model_id, + stream: true, + max_tokens: 4096, + messages, + }); + const output = OpenAIStream(response); + return new StreamingTextResponse(output); + } + default: + return new Response('Invalid Provider', { status: 400 }); + } +} diff --git a/app/api/messages/route.ts b/app/api/messages/route.ts deleted file mode 100644 index 1e1e68e3..00000000 --- a/app/api/messages/route.ts +++ /dev/null @@ -1,104 +0,0 @@ -import { sendClaudeMessages } from '@/utils/provider/claude/messages'; -import { sendCohereMessages } from '@/utils/provider/cohere/messages'; -import { sendTeamStreamMessages } from '@/utils/provider/team/messages'; -import { sendHuggingFaceMessages } from '@/utils/provider/huggingface/messages'; -import { sendAzureMessages, sendAzureStreamMessages } from '@/utils/provider/azure/messages'; -import { sendOpenAIMessages, sendOpenAIStreamMessages } from '@/utils/provider/openai/messages'; - -export const runtime = 'edge'; - -export async function POST(req: Request): Promise { - const { stream, serviceProvider, config, messages } = await req.json(); - - switch (serviceProvider) { - default: - case 'OpenAI': - const openAIAPIKey = (config?.apiKey || process.env.OPENAI_API_KEY) as string; - const openAIAPIEndpoint = (config?.apiEndpoint || process.env.OPENAI_API_ENDPOINT) as string; - const openAIAPIModel = (config?.apiModel as OpenAIModel) || 'gpt-3.5-turbo'; - const openAIAPITemperature = (config?.apiTemperature as number) || 0.3; - - if (!messages) { - return new Response('No messages in the request', { status: 400 }); - } - - const openAIPayload: OpenAIChatPayload = { - model: openAIAPIModel as OpenAIModel, - messages: messages as OpenAIMessage[], - temperature: openAIAPITemperature as number, - stream: stream as boolean, - }; - - if (stream) { - const openAIStreamMessagesResponse = await sendOpenAIStreamMessages(openAIPayload, openAIAPIKey, openAIAPIEndpoint); - - return new Response(openAIStreamMessagesResponse); - } else { - const openAIMessagesResponse = await sendOpenAIMessages(openAIPayload, openAIAPIKey, openAIAPIEndpoint); - - return new Response(openAIMessagesResponse); - } - case 'Azure': - const azureAPIKey = config?.apiKey as string; - const azureAPIEndpoint = config?.apiEndpoint as string; - const azureAPIModel = (config?.apiModel as OpenAIModel) || 'gpt-3.5-turbo'; - const azureAPITemperature = (config?.apiTemperature as number) || 0.3; - const azureAPIDeploymentName = config?.apiDeploymentName as string; - - if (!messages) { - return new Response('No messages in the request', { status: 400 }); - } - - const azurePayload: OpenAIChatPayload = { - model: azureAPIModel as OpenAIModel, - messages: messages as OpenAIMessage[], - temperature: azureAPITemperature as number, - stream: stream as boolean, - }; - - if (stream) { - const azureStreamMessagesResponse = await sendAzureStreamMessages(azurePayload, azureAPIKey, azureAPIEndpoint, azureAPIDeploymentName); - - return new Response(azureStreamMessagesResponse); - } else { - const azureMessagesResponse = await sendAzureMessages(azurePayload, azureAPIKey, azureAPIEndpoint, azureAPIDeploymentName); - - return new Response(JSON.stringify(azureMessagesResponse)); - } - case 'Team': - const teamAccessCode = config?.accessCode as string; - - const teamPayload = { - messages: messages as OpenAIMessage[], - }; - - const teamStreamMessagesResponse = await sendTeamStreamMessages(teamPayload, teamAccessCode); - - return new Response(teamStreamMessagesResponse); - case 'Hugging Face': - const huggingfaceModel = config?.model as string; - const huggingfaceAccessToken = config?.accessToken as string; - const huggingfaceMessage = messages[messages.length - 1]?.content; - - const huggingFaceMessagesResponse = await sendHuggingFaceMessages(huggingfaceModel, huggingfaceMessage, huggingfaceAccessToken); - - return new Response(JSON.stringify(huggingFaceMessagesResponse)); - case 'Cohere': - const cohereModel = config?.model as string; - const cohereMessage = messages[messages.length - 1]?.content; - const cohereAPIKey = config?.apiKey as string; - - const cohereMessagesResponse = await sendCohereMessages(cohereModel, cohereMessage, cohereAPIKey); - - return new Response(JSON.stringify(cohereMessagesResponse)); - case 'Claude': - const claudeModel = config?.model as string; - const claudeMessage = messages[messages.length - 1]?.content; - const claudeAPIKey = config?.apiKey as string; - const claudeAPITemperature = config?.apiTemperature as number; - - const claudeMessagesResponse = await sendClaudeMessages(claudeModel, claudeMessage, claudeAPIKey, claudeAPITemperature); - - return new Response(JSON.stringify(claudeMessagesResponse)); - } -} diff --git a/app/api/search/google/route.ts b/app/api/search/google/route.ts new file mode 100644 index 00000000..f33509f7 --- /dev/null +++ b/app/api/search/google/route.ts @@ -0,0 +1,87 @@ +import { experimental_streamText, ExperimentalMessage, StreamingTextResponse, ToolCallPart, ToolResultPart } from 'ai'; +import { OpenAI } from 'ai/openai'; +import { createStreamableUI, createStreamableValue } from 'ai/rsc'; + +import { searcherPrompt } from '@/lib/prompt'; +import { searcherSchema } from '@/lib/search/searcher'; +import { ApiConfig } from '@/types/app'; +import { withGoogleSearch } from '@/utils/search/engines/google'; + +export const runtime = 'edge'; + +export const dynamic = 'force-dynamic'; + +export async function POST(req: Request) { + const { + messages, + config, + stream, + }: { + messages: ExperimentalMessage[]; + config: ApiConfig; + stream: boolean; + } = await req.json(); + + let fullResponse = ''; + + const streamText = createStreamableValue(); + + const uiStream = createStreamableUI(); + + const openai = new OpenAI({ + apiKey: config.provider?.apiKey ?? process.env.OPENAI_API_KEY ?? '', + baseUrl: config.provider?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1', + }); + + const result = await experimental_streamText({ + model: openai.chat('gpt-4'), + system: searcherPrompt, + messages, + tools: { + search: { + description: 'Search the web for information.', + parameters: searcherSchema, + execute: async ({ query }: { query: string }) => { + const searchResult = await withGoogleSearch(query); + + return searchResult; + }, + }, + }, + }); + + const toolCalls: ToolCallPart[] = []; + const toolResponses: ToolResultPart[] = []; + for await (const delta of result.fullStream) { + switch (delta.type) { + case 'text-delta': + if (delta.textDelta) { + if (fullResponse.length === 0 && delta.textDelta.length > 0) { + } + + fullResponse += delta.textDelta; + streamText.update(fullResponse); + } + break; + case 'tool-call': + toolCalls.push(delta); + break; + case 'tool-result': + toolResponses.push(delta); + break; + case 'error': + fullResponse += `\nError occurred while executing the tool`; + break; + } + } + messages.push({ + role: 'assistant', + content: [{ type: 'text', text: fullResponse }, ...toolCalls], + }); + + if (toolResponses.length > 0) { + messages.push({ role: 'tool', content: toolResponses }); + } + + return new StreamingTextResponse(result.toAIStream()); +} diff --git a/app/api/search/route.ts b/app/api/search/route.ts new file mode 100644 index 00000000..08734c98 --- /dev/null +++ b/app/api/search/route.ts @@ -0,0 +1,242 @@ +// import { GeneralModel } from '@/types/model'; +// import { getGoogleSearch } from '@/utils/search/engines/google'; +// import { AnthropicStream, StreamingTextResponse } from 'ai'; +// import { defaultSearchPrompt, template1 } from '@/prompt/search'; + +// import Anthropic from '@anthropic-ai/sdk'; + +// import { CallbackManagerForRetrieverRun } from 'langchain/callbacks'; +// import { ChatOpenAI } from '@langchain/openai'; +// import { OpenAIEmbeddings } from '@langchain/openai'; +// import { ChatPromptTemplate, MessagesPlaceholder, PromptTemplate } from '@langchain/core/prompts'; +// import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'; +// import { TavilySearchAPIRetriever } from '@langchain/community/retrievers/tavily_search_api'; +// import { DocumentCompressorPipeline } from 'langchain/retrievers/document_compressors'; +// import { EmbeddingsFilter } from 'langchain/retrievers/document_compressors/embeddings_filter'; +// import { Document } from 'langchain/document'; +// import { BaseLanguageModel } from 'langchain/base_language'; +// import { HumanMessage, AIMessage } from '@langchain/core/messages'; +// import { StringOutputParser } from '@langchain/core/output_parsers'; +// import { RunnableMap, RunnableBranch, RunnableLambda, Runnable } from '@langchain/core/runnables'; +// import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; +// import { GoogleCustomSearch } from 'langchain/tools'; + +// const anthropic = new Anthropic({ +// apiKey: process.env.ANTHROPIC_API_KEY || '', +// }); + +// export const runtime = 'edge'; + +// interface ChatRequest { +// questions: string[]; +// chatHistory: string[]; +// } + +// const RESPONSE_TEMPLATE = ` +// You are an expert researcher and writer, tasked with answering any question. + +// Generate a comprehensive and informative, yet concise answer of 250 words or less for the \ +// given question based solely on the provided search results (URL and content). You must \ +// only use information from the provided search results. Use an unbiased and \ +// journalistic tone. Combine search results together into a coherent answer. Do not \ +// repeat text. Cite search results using [\${{number}}] notation. Only cite the most \ +// relevant results that answer the question accurately. Place these citations at the end \ +// of the sentence or paragraph that reference them - do not put them all at the end. If \ +// different results refer to different entities within the same name, write separate \ +// answers for each entity. If you want to cite multiple results for the same sentence, \ +// format it as \`[\${{number1}}] [\${{number2}}]\`. However, you should NEVER do this with the \ +// same number - if you want to cite \`number1\` multiple times for a sentence, only do \ +// \`[\${{number1}}]\` not \`[\${{number1}}] [\${{number1}}]\` + +// You should use bullet points in your answer for readability. Put citations where they apply \ +// rather than putting them all at the end. + +// If there is nothing in the context relevant to the question at hand, just say "Hmm, \ +// I'm not sure." Don't try to make up an answer. + +// Anything between the following \`context\` html blocks is retrieved from a knowledge \ +// bank, not part of the conversation with the user. + +// +// {context} +// + +// REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm \ +// not sure." Don't try to make up an answer. Anything between the preceding 'context' \ +// html blocks is retrieved from a knowledge bank, not part of the conversation with the \ +// user. The current date is {current_date}. +// `; + +// const REPHRASE_TEMPLATE = ` +// Given the following conversation and a follow up question, rephrase the follow up \ +// question to be a standalone question. + +// Chat History: +// {chat_history} +// Follow Up Input: {question} +// Standalone Question: +// `; + +// const getRetriever = () => { +// const embeddings = new OpenAIEmbeddings({ +// openAIApiKey: process.env.OPENAI_API_KEY, +// }); + +// const splitter = new RecursiveCharacterTextSplitter({ +// chunkSize: 800, +// chunkOverlap: 20, +// }); + +// const relevantFilter = new EmbeddingsFilter({ +// embeddings: embeddings, +// similarityThreshold: 0.8, +// }); + +// const pipelineCompressor = new DocumentCompressorPipeline({ +// transformers: [splitter, relevantFilter], +// }); + +// const baseTavilyRetriever = new TavilySearchAPIRetriever({ +// k: 6, +// includeRawContent: true, +// includeImages: true, +// }); + +// const tavilyRetriever = new ContextualCompressionRetriever({ +// baseCompressor: pipelineCompressor, +// baseRetriever: baseTavilyRetriever, +// }); + +// // const baseGoogleRetriever = new GoogleCustomSearchRetriever(); + +// // const googleRetriever = new ContextualCompressionRetriever({ +// // baseCompressor: pipelineCompressor, +// // baseRetriever: baseGoogleRetriever, +// // }); + +// return tavilyRetriever.withConfig({ +// runName: 'final_source_retriever', +// }); +// }; + +// const createRetrieverChain = (llm: BaseLanguageModel, retriever: ContextualCompressionRetriever): Runnable => { +// const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(REPHRASE_TEMPLATE); + +// const condenseQuestionChain = (CONDENSE_QUESTION_PROMPT || llm || StringOutputParser).withConfig({ +// runName: 'condense_question', +// }); + +// const conversationChain = condenseQuestionChain || retriever; + +// return new RunnableBranch( +// new RunnableLambda({ +// func: (context) => Boolean(context.chatHistory), +// }).withConfig({ +// runName: 'retrieve_chat_history', +// }) +// ).withConfig({ +// runName: 'route_depending_on_chat_history', +// }); +// }; + +// class GoogleCustomSearchRetriever { +// search = null; +// number_search_results = 10; + +// cleanSearchQuery(query: string) { +// // Check if the first character is a digit +// if (!isNaN(parseInt(query.charAt(0)))) { +// // Find the position of the first quote +// let firstQuotePos = query.indexOf('"'); +// if (firstQuotePos !== -1) { +// // Extract the part of the string after the quote +// query = query.substring(firstQuotePos + 1); +// // Remove the trailing quote if present +// if (query.endsWith('"')) { +// query = query.slice(0, -1); +// } +// } +// } +// return query.trim(); +// } + +// searchTool(query: string, num_search_results = 1) { +// let queryClean = this.cleanSearchQuery(query); +// let result = this.search.results(queryClean, num_search_results); +// return result; +// } + +// async getRelevantDocuments(query, runManager) { +// if (typeof process.env.GOOGLE_API_KEY === 'undefined') { +// throw new Error('No Google API key provided'); +// } + +// if (this.search === null) { +// this.search = new GoogleSearchAPIWrapper(); +// } + +// // Get search questions +// console.log('Generating questions for Google Search ...'); + +// // Get urls +// console.log('Searching for relevant urls...'); +// let urlsToLook = []; +// let searchResults = this.searchTool(query, this.num_search_results); +// console.log('Searching for relevant urls...'); +// console.log(`Search results: ${searchResults}`); +// searchResults.forEach((res) => { +// if (res.link) { +// urlsToLook.push(res.link); +// } +// }); + +// console.log(searchResults); +// let loader = new AsyncHtmlLoader(urlsToLook); +// let html2text = new Html2TextTransformer(); +// console.log('Indexing new urls...'); +// let docs = await loader.load(); +// docs = html2text.transformDocuments(docs); +// docs.forEach((doc, i) => { +// if (searchResults[i].title) { +// doc.metadata['title'] = searchResults[i].title; +// } +// }); +// return docs; +// } +// } + +// export async function POST(req: Request) { +// const { +// messages, +// model, +// }: { +// messages: any[]; +// model: GeneralModel; +// } = await req.json(); + +// const searchResults = await getGoogleSearch(process.env.GOOGLE_SEARCH_API_KEY || '', process.env.GOOGLE_SEARCH_ENGINE_ID || '', messages[0].content); + +// console.log(searchResults); + +// const withPromptMessage = template1(searchResults); + +// const response = await anthropic.messages.create({ +// messages: [ +// { +// content: withPromptMessage, +// role: 'user', +// }, +// ], +// model: model.model_id, +// stream: true, +// max_tokens: 4096, +// }); + +// const stream = AnthropicStream(response); + +// return new StreamingTextResponse(stream); +// } + +export async function GET(req: Request) { + return Response.json({ error: 'Method Not Allowed' }, { status: 405 }); +} diff --git a/app/fonts.ts b/app/fonts.ts deleted file mode 100644 index a1cda72b..00000000 --- a/app/fonts.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { Rubik, JetBrains_Mono } from 'next/font/google'; - -export const rubik = Rubik({ - subsets: ['latin'], - variable: '--font-rubik', - display: 'swap', -}); - -export const jetbrains_mono = JetBrains_Mono({ - subsets: ['latin'], - variable: '--font-jetbrains-mono', - display: 'swap', -}); diff --git a/app/layout.tsx b/app/layout.tsx index 1054bc24..44bbbeec 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -1,79 +1,32 @@ -import '@/styles/globals.css'; -import '@/styles/markdown.css'; -import 'tippy.js/dist/tippy.css'; - -import Script from 'next/script'; -import { Viewport } from 'next'; -import type { Metadata } from 'next'; +import type { Metadata, Viewport } from 'next'; +import { Onest } from 'next/font/google'; -import { rubik } from '@/app/fonts'; +import RootProvider from '@/app/provider'; -import { Providers } from '@/app/providers'; - -import { Analytics } from '@vercel/analytics/react'; -import { SpeedInsights } from '@vercel/speed-insights/next'; +import '@/styles/globals.css'; +import 'tippy.js/dist/tippy.css'; -import { siteConfig } from '@/config/site.config'; +const onest = Onest({ subsets: ['latin'] }); export const metadata: Metadata = { - title: siteConfig.title, - description: siteConfig.title + ' - ' + siteConfig.description, + title: 'Chat Chat', + description: 'Chat Chat - Unlock next-level conversations with AI', }; export const viewport: Viewport = { - width: 'device-width', - height: 'device-height', - initialScale: 1, minimumScale: 1, - maximumScale: 1, - userScalable: false, - themeColor: '#eee', }; -export default async function RootLayout({ children, params: { locale } }: { children: React.ReactNode; params: { locale: string } }) { +export default function RootLayout({ + children, + params: { locale }, +}: Readonly<{ + children: React.ReactNode; + params: { locale: string }; +}>) { return ( - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {children} - - - - -