diff --git a/docs/src/pages/docs/remote-models/vertexai.mdx b/docs/src/pages/docs/remote-models/vertexai.mdx
new file mode 100644
index 0000000000..159837b0a8
--- /dev/null
+++ b/docs/src/pages/docs/remote-models/vertexai.mdx
@@ -0,0 +1,72 @@
+---
+title: VertexAI API
+description: A step-by-step guide on integrating Jan with VertexAI.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ integration,
+ VertexAI,
+ Google,
+ GCP
+ ]
+---
+
+import { Callout, Steps } from 'nextra/components'
+
+
+
+# VertexAI API
+
+
+The VertexAI Extension can be used select models that [support the openai endpoints](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/call-vertex-using-openai-library) on vertexAI's.
+
+
+## How to Integrate VertexAI API with Jan
+This guide provides step-by-step instructions for integrating the VertexAI API with Jan, allowing users to utilize VertexAI's capabilities within Jan's conversational interface.
+
+## Integration Steps
+
+### Step 1: Configure GCP Service Account
+1. Open the [Credentials page](https://console.cloud.google.com/apis/credentials) in GCP
+2. Select your project
+3. Create Credentials -> Service Account
+4. Give your service account a name
+5. Give at least the "Vertex AI Online Prediction Service Agent" role
+6. Click on the account, then the "Keys" tab
+7. Create new key -> select json.
+8. Copy the exact project id, private key id, private key, and client email into your JAN configuration.
+9. Set the region to what is appropriate for your instance
+
+### Step 2: Start Chatting with the Model
+
+1. Select the VertexAI model you want to use.
+2. Specify the model's parameters.
+3. Start the conversation with the VertexAI model.
+
+
+### VertexAI Models
+
+You can also use specific VertexAI models you cannot find in the **Hub** section by customizing the `model.yaml` file, which you can see in the `~/jan/data/models/`. Follow the steps in the [Manage Models](/docs/models/manage-models) to manually add a model.
+
+
+- You can find the list of available models in the [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/call-vertex-using-openai-library).
+- The `id` property must match the model name in the list.
+ - For example, if you want to use the Gemini 1.0 Pro model you must set the `id` property to `google/gemini-1.0-pro-002`.
+
+
+## Troubleshooting
+
+If you encounter any issues during the integration process or while using VertexAI with Jan, consider the following troubleshooting steps:
+
+- Double-check your service account is active.
+- Confirm the model you selected is available in your selected region.
+- Ensure the model is enabled in VertexAI model garden.
+- Check for error messages or logs that may provide insight into the issue.
diff --git a/extensions/inference-vertexai-extension/README.md b/extensions/inference-vertexai-extension/README.md
new file mode 100644
index 0000000000..b60a1a4aac
--- /dev/null
+++ b/extensions/inference-vertexai-extension/README.md
@@ -0,0 +1,79 @@
+# VertexAI Engine Extension
+
+Created using Jan extension example
+
+# Create a Jan Extension using Typescript
+
+Use this template to bootstrap the creation of a TypeScript Jan extension. 🚀
+
+## Create Your Own Extension
+
+To create your own extension, you can use this repository as a template! Just follow the below instructions:
+
+1. Click the Use this template button at the top of the repository
+2. Select Create a new repository
+3. Select an owner and name for your new repository
+4. Click Create repository
+5. Clone your new repository
+
+## Initial Setup
+
+After you've cloned the repository to your local machine or codespace, you'll need to perform some initial setup steps before you can develop your extension.
+
+> [!NOTE]
+>
+> You'll need to have a reasonably modern version of
+> [Node.js](https://nodejs.org) handy. If you are using a version manager like
+> [`nodenv`](https://github.com/nodenv/nodenv) or
+> [`nvm`](https://github.com/nvm-sh/nvm), you can run `nodenv install` in the
+> root of your repository to install the version specified in
+> [`package.json`](./package.json). Otherwise, 20.x or later should work!
+
+1. :hammer_and_wrench: Install the dependencies
+
+ ```bash
+ npm install
+ ```
+
+1. :building_construction: Package the TypeScript for distribution
+
+ ```bash
+ npm run bundle
+ ```
+
+1. :white_check_mark: Check your artifact
+
+ There will be a tgz file in your extension directory now
+
+## Update the Extension Metadata
+
+The [`package.json`](package.json) file defines metadata about your extension, such as
+extension name, main entry, description and version.
+
+When you copy this repository, update `package.json` with the name, description for your extension.
+
+## Update the Extension Code
+
+The [`src/`](./src/) directory is the heart of your extension! This contains the
+source code that will be run when your extension functions are invoked. You can replace the
+contents of this directory with your own code.
+
+There are a few things to keep in mind when writing your extension code:
+
+- Most Jan Extension functions are processed asynchronously.
+ In `index.ts`, you will see that the extension function will return a `Promise`.
+
+ ```typescript
+ import { events, MessageEvent, MessageRequest } from '@janhq/core'
+
+ function onStart(): Promise {
+ return events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
+ this.inference(data)
+ )
+ }
+ ```
+
+ For more information about the Jan Extension Core module, see the
+ [documentation](https://github.com/janhq/jan/blob/main/core/README.md).
+
+So, what are you waiting for? Go ahead and start customizing your extension!
diff --git a/extensions/inference-vertexai-extension/jest.config.js b/extensions/inference-vertexai-extension/jest.config.js
new file mode 100644
index 0000000000..3e32adceb2
--- /dev/null
+++ b/extensions/inference-vertexai-extension/jest.config.js
@@ -0,0 +1,9 @@
+/** @type {import('ts-jest').JestConfigWithTsJest} */
+module.exports = {
+ preset: 'ts-jest',
+ testEnvironment: 'node',
+ transform: {
+ 'node_modules/@janhq/core/.+\\.(j|t)s?$': 'ts-jest',
+ },
+ transformIgnorePatterns: ['node_modules/(?!@janhq/core/.*)'],
+}
diff --git a/extensions/inference-vertexai-extension/package.json b/extensions/inference-vertexai-extension/package.json
new file mode 100644
index 0000000000..746f766f10
--- /dev/null
+++ b/extensions/inference-vertexai-extension/package.json
@@ -0,0 +1,43 @@
+{
+ "name": "@janhq/inference-vertexai-extension",
+ "productName": "VertexAI Inference Engine",
+ "version": "1.0.0",
+ "description": "This extension enables Vertex chat completion API calls",
+ "main": "dist/index.js",
+ "module": "dist/module.js",
+ "engine": "vertexai",
+ "author": "Jan ",
+ "license": "AGPL-3.0",
+ "scripts": {
+ "build": "rolldown -c rolldown.config.mjs",
+ "build:publish": "rimraf *.tgz --glob || true && yarn build && npm pack && cpx *.tgz ../../pre-install"
+ },
+ "devDependencies": {
+ "cpx": "^1.5.0",
+ "rimraf": "^3.0.2",
+ "rolldown": "1.0.0-beta.1",
+ "ts-loader": "^9.5.0",
+ "typescript": "^5.7.2"
+ },
+ "dependencies": {
+ "@janhq/core": "../../core/package.tgz",
+ "fetch-retry": "^5.0.6",
+ "jose": "^5.9.6",
+ "ulidx": "^2.3.0"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "files": [
+ "dist/*",
+ "package.json",
+ "README.md"
+ ],
+ "bundleDependencies": [
+ "fetch-retry"
+ ],
+ "installConfig": {
+ "hoistingLimits": "workspaces"
+ },
+ "packageManager": "yarn@4.5.3"
+}
\ No newline at end of file
diff --git a/extensions/inference-vertexai-extension/resources/models.json b/extensions/inference-vertexai-extension/resources/models.json
new file mode 100644
index 0000000000..ba30a9b30f
--- /dev/null
+++ b/extensions/inference-vertexai-extension/resources/models.json
@@ -0,0 +1,98 @@
+[
+ {
+ "sources": [
+ {
+ "url": "https://cloud.google.com/vertex-ai"
+ }
+ ],
+ "id": "google/gemini-1.5-flash-002",
+ "object": "model",
+ "name": "Gemini 1.5 Flash",
+ "version": "1.5",
+ "description": "Gemini 1.5 Flash is a high-performance general-purpose model designed for fast and efficient use cases.",
+ "format": "api",
+ "settings": {
+ "vision_model": true
+ },
+ "parameters": {
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Google",
+ "tags": [
+ "General"
+ ]
+ },
+ "engine": "vertexai"
+ },
+ {
+ "sources": [
+ {
+ "url": "https://cloud.google.com/vertex-ai"
+ }
+ ],
+ "id": "google/gemini-2.0-flash-exp",
+ "object": "model",
+ "name": "Gemini 2.0 Flash",
+ "version": "2.0",
+ "description": "Gemini 2.0 Flash is an enhanced version of the Flash series, offering robust performance for general tasks",
+ "format": "api",
+ "settings": {
+ "vision_model": true
+ },
+ "parameters": {
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Google",
+ "tags": [
+ "General"
+ ]
+ },
+ "engine": "vertexai"
+ },
+ {
+ "sources": [
+ {
+ "url": "https://cloud.google.com/vertex-ai"
+ }
+ ],
+ "id": "google/gemini-1.5-pro-002",
+ "object": "model",
+ "name": "Gemini 1.5 Pro",
+ "version": "1.5",
+ "description": "Gemini 1.5 Pro is a professional-grade model optimized for versatile and high-quality applications",
+ "format": "api",
+ "settings": {
+ "vision_model": true
+ },
+ "parameters": {
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Google",
+ "tags": [
+ "General"
+ ]
+ },
+ "engine": "vertexai"
+ }
+]
\ No newline at end of file
diff --git a/extensions/inference-vertexai-extension/resources/settings.json b/extensions/inference-vertexai-extension/resources/settings.json
new file mode 100644
index 0000000000..3f1b5e4055
--- /dev/null
+++ b/extensions/inference-vertexai-extension/resources/settings.json
@@ -0,0 +1,52 @@
+[
+ {
+ "key": "projectId",
+ "title": "Project ID",
+ "description": "The specific GCP project ID you want to use.",
+ "controllerType": "input",
+ "controllerProps": {
+ "placeholder": "Project Id",
+ "value": ""
+ }
+ },
+ {
+ "key": "location",
+ "title": "location",
+ "description": "The location of your GCP instance",
+ "controllerType": "input",
+ "controllerProps": {
+ "placeholder": "us-central1",
+ "value": "us-central1"
+ }
+ },
+ {
+ "key": "privateKey",
+ "title": "Private Key",
+ "description": "The private key of your service account Learn more: https://cloud.google.com/iam/docs/keys-create-delete",
+ "controllerType": "input",
+ "controllerProps": {
+ "placeholder": "Insert Private Key",
+ "value": ""
+ }
+ },
+ {
+ "key": "serviceEmail",
+ "title": "service Email",
+ "description": "Service Account Email",
+ "controllerType": "input",
+ "controllerProps": {
+ "placeholder": "Enter service email here",
+ "value": ""
+ }
+ },
+ {
+ "key": "vertexai-api-key",
+ "title": "Private Key ID",
+ "description": "The Private Key ID for your service account",
+ "controllerType": "input",
+ "controllerProps": {
+ "placeholder": "Enter private key id here",
+ "value": ""
+ }
+ }
+]
\ No newline at end of file
diff --git a/extensions/inference-vertexai-extension/rolldown.config.mjs b/extensions/inference-vertexai-extension/rolldown.config.mjs
new file mode 100644
index 0000000000..9ebaace2e1
--- /dev/null
+++ b/extensions/inference-vertexai-extension/rolldown.config.mjs
@@ -0,0 +1,18 @@
+import { defineConfig } from 'rolldown'
+import pkgJson from './package.json' with { type: 'json' }
+import settingJson from './resources/settings.json' with { type: 'json' }
+import modelsJson from './resources/models.json' with { type: 'json' }
+
+export default defineConfig({
+ input: 'src/index.ts',
+ output: {
+ format: 'esm',
+ file: 'dist/index.js',
+ },
+ platform: 'browser',
+ define: {
+ MODELS: JSON.stringify(modelsJson),
+ SETTINGS: JSON.stringify(settingJson),
+ ENGINE: JSON.stringify(pkgJson.engine),
+ },
+})
diff --git a/extensions/inference-vertexai-extension/src/env.d.ts b/extensions/inference-vertexai-extension/src/env.d.ts
new file mode 100644
index 0000000000..40ca58094a
--- /dev/null
+++ b/extensions/inference-vertexai-extension/src/env.d.ts
@@ -0,0 +1,2 @@
+declare const SETTINGS: SettingComponentProps[]
+declare const MODELS: Model[]
diff --git a/extensions/inference-vertexai-extension/src/index.ts b/extensions/inference-vertexai-extension/src/index.ts
new file mode 100644
index 0000000000..fbcfce4735
--- /dev/null
+++ b/extensions/inference-vertexai-extension/src/index.ts
@@ -0,0 +1,192 @@
+/**
+ * @file This file exports a class that implements the InferenceExtension interface from the @janhq/core package.
+ * The class provides methods for initializing and stopping a model, and for making inference requests.
+ * It also subscribes to events emitted by the @janhq/core package and handles new message requests.
+ * @version 1.0.0
+ * @module inference-vertexai-extension/src/index
+ */
+
+import { PayloadType, RemoteOAIEngine, SettingComponentProps } from '@janhq/core'
+import { SignJWT, importPKCS8 } from 'jose';
+
+
+export enum Settings {
+ location = 'location',
+ projectId = 'projectId',
+ privateKey = 'privateKey',
+ serviceEmail = 'serviceEmail',
+ privateKeyId = 'vertexai-api-key'
+}
+type VertexAIPayloadType = PayloadType &
+{
+ temperature?: number;
+ top_p?: number;
+ stream?: boolean;
+ max_tokens?: number;
+ stop?: string[];
+ frequency_penalty?: number;
+ presence_penalty?: number;
+}
+/**
+ * A class that implements the InferenceExtension interface from the @janhq/core package.
+ * The class provides methods for initializing and stopping a model, and for making inference requests.
+ * It also subscribes to events emitted by the @janhq/core package and handles new message requests.
+ */
+export default class JanInferenceVertexAIExtension extends RemoteOAIEngine {
+ inferenceUrl: string = ''
+ location: string = 'us-central1'
+ projectId: string = ''
+ provider: string = 'vertexai'
+ privateKey: string = ''
+ serviceEmail: string = ''
+ privateKeyId: string = ''
+ expires: number = 0
+
+
+ override async onLoad(): Promise {
+ super.onLoad()
+
+ // Register Settings
+ this.registerSettings(SETTINGS)
+ this.registerModels(MODELS)
+
+ this.location = await this.getSetting(Settings.location, 'us-central1')
+ this.projectId = await this.getSetting(
+ Settings.projectId,
+ ''
+ )
+ this.privateKey = await this.getSetting(Settings.privateKey, '')
+ this.serviceEmail = await this.getSetting(
+ Settings.serviceEmail,
+ ''
+ )
+ this.privateKeyId = await this.getSetting(
+ Settings.privateKeyId,
+ ''
+ )
+ await this.updateApiKey()
+
+ this.inferenceUrl = `https://${this.location}-aiplatform.googleapis.com/v1beta1/projects/${this.projectId}/locations/${this.location}/endpoints/openapi/chat/completions`
+
+ }
+
+ async getAccessToken(): Promise {
+ const authUrl = "https://www.googleapis.com/oauth2/v4/token";
+
+ const issued = Math.floor(Date.now() / 1000);
+ const expires = issued + 3600;
+ this.expires = expires - 1200 // Remove some time for buffer
+ // JWT Headers
+ const additionalHeaders = {
+ kid: this.privateKeyId,
+ alg: "RS256",
+ typ: "JWT",
+ };
+
+ // JWT Payload
+ const payload = {
+ iss: this.serviceEmail, // Issuer claim
+ sub: this.serviceEmail, // Subject claim
+ aud: authUrl, // Audience claim
+ iat: issued, // Issued At claim (in seconds since epoch)
+ exp: expires, // Expiration time (in seconds since epoch)
+ scope: "https://www.googleapis.com/auth/cloud-platform",
+ };
+ this.privateKey = this.privateKey.replace(/\\n/g, ' ')
+ const key = await importPKCS8(this.privateKey, "RS256")
+ // Create the signed JWT
+ const signedJwt = await new SignJWT(payload)
+ .setProtectedHeader(additionalHeaders)
+ .setIssuedAt(issued)
+ .setExpirationTime(expires)
+ .sign(key);
+
+ const params = new URLSearchParams();
+ params.append('grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer');
+ params.append('assertion', signedJwt);
+
+ try {
+ const response = await fetch(authUrl, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ },
+ body: params.toString(),
+ });
+
+ if (!response.ok) {
+ console.error('Failed to get access token:', await response.text());
+ return null;
+ }
+
+ const data = await response.json();
+ return data.access_token;
+ } catch (error) {
+ console.error('Error fetching access token:', error);
+ return null;
+ }
+
+ }
+
+
+ async updateApiKey(force: boolean = false): Promise {
+ const now = Math.floor(Date.now() / 1000);
+ if (
+ this.privateKey !== '' &&
+ this.serviceEmail !== '' &&
+ this.privateKeyId !== '' &&
+ (now > this.expires || force)
+ ) {
+ const apiKey = await this.getAccessToken();
+ if (apiKey) {
+ this.apiKey = apiKey;
+ } else {
+ console.error("Failed to update API key");
+ }
+ }
+ }
+
+ override async updateSettings(componentProps: Partial[]): Promise {
+ await super.updateSettings(componentProps)
+ this.updateApiKey(true).catch((error) =>
+ console.error("Error updating API key:", error)
+ );
+
+ }
+
+ onSettingUpdate(key: string, value: T): void {
+ if (key === Settings.location) {
+ this.location = value as string
+ } else if (key === Settings.projectId) {
+ this.projectId = value as string
+ } else if (key == Settings.privateKey) {
+ this.privateKey = value as string
+ this.privateKey = this.privateKey.replace(/\\n/g, ' ') ///\\n/g, '\n'
+ } else if (key == Settings.privateKeyId) {
+ this.privateKeyId = value as string
+ } else if (key == Settings.serviceEmail) {
+ this.serviceEmail = value as string
+ }
+ this.inferenceUrl = `https://${this.location}-aiplatform.googleapis.com/v1beta1/projects/${this.projectId}/locations/${this.location}/endpoints/openapi/chat/completions`
+
+ }
+
+
+ /**
+ * Tranform the payload before sending it to the inference endpoint.
+ * @param payload
+ * @returns
+ */
+ transformPayload = (payload: VertexAIPayloadType): VertexAIPayloadType => {
+ // Check if the api key needs to be updated and update if so.
+ this.updateApiKey().catch((error) =>
+ console.error("Error updating API key:", error)
+ );
+ // Remove empty stop words
+ if (payload.stop?.length === 0) {
+ const { stop, ...params } = payload
+ payload = params
+ }
+ return payload
+ }
+}
diff --git a/extensions/inference-vertexai-extension/test.js b/extensions/inference-vertexai-extension/test.js
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/extensions/inference-vertexai-extension/tsconfig.json b/extensions/inference-vertexai-extension/tsconfig.json
new file mode 100644
index 0000000000..6db951c9e4
--- /dev/null
+++ b/extensions/inference-vertexai-extension/tsconfig.json
@@ -0,0 +1,15 @@
+{
+ "compilerOptions": {
+ "target": "es2016",
+ "module": "ES6",
+ "moduleResolution": "node",
+ "outDir": "./dist",
+ "esModuleInterop": true,
+ "forceConsistentCasingInFileNames": true,
+ "strict": false,
+ "skipLibCheck": true,
+ "rootDir": "./src"
+ },
+ "include": ["./src"],
+ "exclude": ["**/*.test.ts"]
+}
diff --git a/extensions/yarn.lock b/extensions/yarn.lock
index d139b917c7..9fd8a7f9d8 100644
--- a/extensions/yarn.lock
+++ b/extensions/yarn.lock
@@ -509,161 +509,171 @@ __metadata:
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-anthropic-extension%40workspace%3Ainference-anthropic-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-anthropic-extension%40workspace%3Ainference-anthropic-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-anthropic-extension%40workspace%3Ainference-anthropic-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-cohere-extension%40workspace%3Ainference-cohere-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-cohere-extension%40workspace%3Ainference-cohere-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-cohere-extension%40workspace%3Ainference-cohere-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-groq-extension%40workspace%3Ainference-groq-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-groq-extension%40workspace%3Ainference-groq-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-groq-extension%40workspace%3Ainference-groq-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-martian-extension%40workspace%3Ainference-martian-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-martian-extension%40workspace%3Ainference-martian-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-martian-extension%40workspace%3Ainference-martian-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-mistral-extension%40workspace%3Ainference-mistral-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-mistral-extension%40workspace%3Ainference-mistral-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-mistral-extension%40workspace%3Ainference-mistral-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-nvidia-extension%40workspace%3Ainference-nvidia-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-nvidia-extension%40workspace%3Ainference-nvidia-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-nvidia-extension%40workspace%3Ainference-nvidia-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-openai-extension%40workspace%3Ainference-openai-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-openai-extension%40workspace%3Ainference-openai-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-openai-extension%40workspace%3Ainference-openai-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-openrouter-extension%40workspace%3Ainference-openrouter-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-openrouter-extension%40workspace%3Ainference-openrouter-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-openrouter-extension%40workspace%3Ainference-openrouter-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-triton-trt-llm-extension%40workspace%3Ainference-triton-trtllm-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Finference-triton-trt-llm-extension%40workspace%3Ainference-triton-trtllm-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-triton-trt-llm-extension%40workspace%3Ainference-triton-trtllm-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
+ languageName: node
+ linkType: hard
+
+"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-vertexai-extension%40workspace%3Ainference-vertexai-extension":
+ version: 0.1.10
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Finference-vertexai-extension%40workspace%3Ainference-vertexai-extension"
+ dependencies:
+ rxjs: "npm:^7.8.1"
+ ulidx: "npm:^2.3.0"
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Ftensorrt-llm-extension%40workspace%3Atensorrt-llm-extension":
version: 0.1.10
- resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=91cd98&locator=%40janhq%2Ftensorrt-llm-extension%40workspace%3Atensorrt-llm-extension"
+ resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=deff18&locator=%40janhq%2Ftensorrt-llm-extension%40workspace%3Atensorrt-llm-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
- checksum: 10c0/af79c509b1ff8a2893f5fd6545cfa8b3bb6a2e2bc13acdd5963766a1caac635b8b69ab627bfb356e052f16542f2b7187b607bdaed6acec24cd7c9a6087e4abc2
+ checksum: 10c0/0d5b0ba62ffdbc3eba7947c335f40aedc87de9afe7ed870a66903cd9d8e3be8e9e28b6cf781e5627d913382eb337ea786cb5db607104f6d483a16d652a113424
languageName: node
linkType: hard
@@ -849,6 +859,22 @@ __metadata:
languageName: unknown
linkType: soft
+"@janhq/inference-vertexai-extension@workspace:inference-vertexai-extension":
+ version: 0.0.0-use.local
+ resolution: "@janhq/inference-vertexai-extension@workspace:inference-vertexai-extension"
+ dependencies:
+ "@janhq/core": ../../core/package.tgz
+ cpx: "npm:^1.5.0"
+ fetch-retry: "npm:^5.0.6"
+ jose: "npm:^5.9.6"
+ rimraf: "npm:^3.0.2"
+ rolldown: "npm:1.0.0-beta.1"
+ ts-loader: "npm:^9.5.0"
+ typescript: "npm:^5.7.2"
+ ulidx: "npm:^2.3.0"
+ languageName: unknown
+ linkType: soft
+
"@janhq/model-extension@workspace:model-extension":
version: 0.0.0-use.local
resolution: "@janhq/model-extension@workspace:model-extension"
@@ -5280,6 +5306,13 @@ __metadata:
languageName: node
linkType: hard
+"jose@npm:^5.9.6":
+ version: 5.9.6
+ resolution: "jose@npm:5.9.6"
+ checksum: 10c0/d6bcd8c7d655b5cda8e182952a76f0c093347f5476d74795405bb91563f7ab676f61540310dd4b1531c60d685335ceb600571a409551d2cbd2ab3e9f9fbf1e4d
+ languageName: node
+ linkType: hard
+
"js-tiktoken@npm:^1.0.12, js-tiktoken@npm:^1.0.7":
version: 1.0.16
resolution: "js-tiktoken@npm:1.0.16"
diff --git a/web/public/images/ModelProvider/vertexai.svg b/web/public/images/ModelProvider/vertexai.svg
new file mode 100644
index 0000000000..1932c35bb4
--- /dev/null
+++ b/web/public/images/ModelProvider/vertexai.svg
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file