From 8d671240c2d19c8a42d7769ef9ee5d8b8653904f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 5 Jan 2025 01:03:59 +0800 Subject: [PATCH 1/3] refactor: use async-openai-alt and upgrade to 0.26.1 Signed-off-by: Wei Zhang --- Cargo.lock | 36 ++++++------- Cargo.toml | 2 +- crates/http-api-bindings/Cargo.toml | 2 +- crates/http-api-bindings/src/chat/mod.rs | 4 +- crates/http-api-bindings/src/rate_limit.rs | 2 +- crates/llama-cpp-server/Cargo.toml | 2 +- crates/llama-cpp-server/src/lib.rs | 10 ++-- crates/tabby-inference/Cargo.toml | 2 +- crates/tabby-inference/src/chat.rs | 6 +-- crates/tabby/Cargo.toml | 2 +- crates/tabby/src/routes/chat.rs | 4 +- ee/tabby-schema/Cargo.toml | 2 +- ee/tabby-schema/src/schema/mod.rs | 2 +- ee/tabby-webserver/Cargo.toml | 2 +- ee/tabby-webserver/src/service/answer.rs | 54 +++++++++++-------- .../src/service/answer/testutils/mod.rs | 4 +- 16 files changed, 73 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fe613095e0b4..08a3bb69bd61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -187,16 +187,17 @@ dependencies = [ ] [[package]] -name = "async-openai" -version = "0.20.0" +name = "async-openai-alt" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e97f9c5e0ee3260caee9700ba1bb61a6fdc34d2b6786a31e018c5de5198491" +checksum = "2df183306e5fa71c7a5af4571e10504806a3d47825e172824fddee8ed9182cbf" dependencies = [ "async-convert", "backoff", "base64 0.22.1", "bytes", "derive_builder", + "eventsource-stream", "futures", "rand 0.8.5", "reqwest", @@ -1915,7 +1916,7 @@ name = "http-api-bindings" version = "0.24.0-dev.0" dependencies = [ "anyhow", - "async-openai", + "async-openai-alt", "async-stream", "async-trait", "futures", @@ -2608,7 +2609,7 @@ name = "llama-cpp-server" version = "0.24.0-dev.0" dependencies = [ "anyhow", - "async-openai", + "async-openai-alt", "async-trait", "cmake", "futures", @@ -2872,13 +2873,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -5266,7 +5267,7 @@ version = "0.24.0-dev.0" dependencies = [ "anyhow", "assert-json-diff", - "async-openai", + "async-openai-alt", "async-stream", "async-trait", "axum", @@ -5482,7 +5483,7 @@ name = "tabby-inference" version = "0.24.0-dev.0" dependencies = [ "anyhow", - "async-openai", + "async-openai-alt", "async-stream", "async-trait", "dashmap", @@ -5500,7 +5501,7 @@ name = "tabby-schema" version = "0.24.0-dev.0" dependencies = [ "anyhow", - "async-openai", + "async-openai-alt", "async-trait", "axum", "base64 0.22.1", @@ -5529,7 +5530,7 @@ dependencies = [ "anyhow", "argon2", "assert_matches", - "async-openai", + "async-openai-alt", "async-stream", "async-trait", "axum", @@ -5888,28 +5889,27 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 99f2d05c9bb6..865bbace6bcf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,7 +68,7 @@ mime_guess = "2.0.4" assert_matches = "1.5" insta = "1.34.0" logkit = "0.3" -async-openai = "0.20" +async-openai-alt = "0.26.1" tracing-test = "0.2" clap = "4.3.0" ratelimit = "0.10" diff --git a/crates/http-api-bindings/Cargo.toml b/crates/http-api-bindings/Cargo.toml index 03dd3861241e..5fc6c44f1a27 100644 --- a/crates/http-api-bindings/Cargo.toml +++ b/crates/http-api-bindings/Cargo.toml @@ -17,7 +17,7 @@ serde_json = { workspace = true } tabby-common = { path = "../tabby-common" } tabby-inference = { path = "../tabby-inference" } ollama-api-bindings = { path = "../ollama-api-bindings" } -async-openai.workspace = true +async-openai-alt.workspace = true tokio.workspace = true tracing.workspace = true leaky-bucket = "1.1.2" diff --git a/crates/http-api-bindings/src/chat/mod.rs b/crates/http-api-bindings/src/chat/mod.rs index f30a36ed1dca..bed9a15a6fa2 100644 --- a/crates/http-api-bindings/src/chat/mod.rs +++ b/crates/http-api-bindings/src/chat/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use async_openai::config::OpenAIConfig; +use async_openai_alt::config::OpenAIConfig; use tabby_common::config::HttpModelConfig; use tabby_inference::{ChatCompletionStream, ExtendedOpenAIConfig}; @@ -34,7 +34,7 @@ pub async fn create(model: &HttpModelConfig) -> Arc { let config = builder.build().expect("Failed to build config"); let engine = Box::new( - async_openai::Client::with_config(config) + async_openai_alt::Client::with_config(config) .with_http_client(create_reqwest_client(api_endpoint)), ); diff --git a/crates/http-api-bindings/src/rate_limit.rs b/crates/http-api-bindings/src/rate_limit.rs index 5636986f4495..6f30a617b4c5 100644 --- a/crates/http-api-bindings/src/rate_limit.rs +++ b/crates/http-api-bindings/src/rate_limit.rs @@ -1,4 +1,4 @@ -use async_openai::{ +use async_openai_alt::{ error::OpenAIError, types::{ ChatCompletionResponseStream, CreateChatCompletionRequest, CreateChatCompletionResponse, diff --git a/crates/llama-cpp-server/Cargo.toml b/crates/llama-cpp-server/Cargo.toml index 43d2b5223beb..4ff119a97d72 100644 --- a/crates/llama-cpp-server/Cargo.toml +++ b/crates/llama-cpp-server/Cargo.toml @@ -24,7 +24,7 @@ anyhow.workspace = true which = "6" serde.workspace = true serdeconv.workspace = true -async-openai.workspace = true +async-openai-alt.workspace = true [build-dependencies] cmake = "0.1" diff --git a/crates/llama-cpp-server/src/lib.rs b/crates/llama-cpp-server/src/lib.rs index a7c0a30b71cb..9851bb3dfbf3 100644 --- a/crates/llama-cpp-server/src/lib.rs +++ b/crates/llama-cpp-server/src/lib.rs @@ -3,7 +3,7 @@ mod supervisor; use std::{path::PathBuf, sync::Arc}; use anyhow::Result; -use async_openai::error::OpenAIError; +use async_openai_alt::error::OpenAIError; use async_trait::async_trait; use futures::stream::BoxStream; use serde::Deserialize; @@ -161,15 +161,15 @@ impl ChatCompletionServer { impl ChatCompletionStream for ChatCompletionServer { async fn chat( &self, - request: async_openai::types::CreateChatCompletionRequest, - ) -> Result { + request: async_openai_alt::types::CreateChatCompletionRequest, + ) -> Result { self.chat_completion.chat(request).await } async fn chat_stream( &self, - request: async_openai::types::CreateChatCompletionRequest, - ) -> Result { + request: async_openai_alt::types::CreateChatCompletionRequest, + ) -> Result { self.chat_completion.chat_stream(request).await } } diff --git a/crates/tabby-inference/Cargo.toml b/crates/tabby-inference/Cargo.toml index c362b809d0dd..6b9854ba848d 100644 --- a/crates/tabby-inference/Cargo.toml +++ b/crates/tabby-inference/Cargo.toml @@ -16,7 +16,7 @@ derive_builder.workspace = true futures = { workspace = true } tabby-common = { path = "../tabby-common" } trie-rs = "0.1.1" -async-openai.workspace = true +async-openai-alt.workspace = true secrecy = "0.8" reqwest.workspace = true tracing.workspace = true diff --git a/crates/tabby-inference/src/chat.rs b/crates/tabby-inference/src/chat.rs index 5ef447daaa3a..ff3b2d1672d3 100644 --- a/crates/tabby-inference/src/chat.rs +++ b/crates/tabby-inference/src/chat.rs @@ -1,4 +1,4 @@ -use async_openai::{ +use async_openai_alt::{ config::OpenAIConfig, error::OpenAIError, types::{ @@ -85,7 +85,7 @@ impl ExtendedOpenAIConfig { } } -impl async_openai::config::Config for ExtendedOpenAIConfig { +impl async_openai_alt::config::Config for ExtendedOpenAIConfig { fn headers(&self) -> reqwest::header::HeaderMap { self.base.headers() } @@ -108,7 +108,7 @@ impl async_openai::config::Config for ExtendedOpenAIConfig { } #[async_trait] -impl ChatCompletionStream for async_openai::Client { +impl ChatCompletionStream for async_openai_alt::Client { async fn chat( &self, request: CreateChatCompletionRequest, diff --git a/crates/tabby/Cargo.toml b/crates/tabby/Cargo.toml index be26919f1038..0aa663557e92 100644 --- a/crates/tabby/Cargo.toml +++ b/crates/tabby/Cargo.toml @@ -59,7 +59,7 @@ axum-prometheus = "0.6" uuid.workspace = true color-eyre = { version = "0.6.3" } reqwest.workspace = true -async-openai.workspace = true +async-openai-alt.workspace = true spinners = "4.1.1" regex.workspace = true diff --git a/crates/tabby/src/routes/chat.rs b/crates/tabby/src/routes/chat.rs index d8a1f84d81d6..95ce25fcccbf 100644 --- a/crates/tabby/src/routes/chat.rs +++ b/crates/tabby/src/routes/chat.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use async_openai::error::OpenAIError; +use async_openai_alt::error::OpenAIError; use axum::{ extract::State, response::sse::{Event, KeepAlive, Sse}, @@ -36,7 +36,7 @@ pub async fn chat_completions_utoipa(_request: Json) -> Statu pub async fn chat_completions( State(state): State>, TypedHeader(MaybeUser(user)): TypedHeader, - Json(mut request): Json, + Json(mut request): Json, ) -> Result>>, StatusCode> { if let Some(user) = user { request.user.replace(user); diff --git a/ee/tabby-schema/Cargo.toml b/ee/tabby-schema/Cargo.toml index a6849542d615..b12e6b9e52cd 100644 --- a/ee/tabby-schema/Cargo.toml +++ b/ee/tabby-schema/Cargo.toml @@ -10,7 +10,7 @@ schema-language = ["juniper/schema-language"] [dependencies] anyhow.workspace = true -async-openai.workspace = true +async-openai-alt.workspace = true async-trait.workspace = true axum = { workspace = true } base64 = "0.22.0" diff --git a/ee/tabby-schema/src/schema/mod.rs b/ee/tabby-schema/src/schema/mod.rs index c6c75e6e954d..6424a9a1f8ad 100644 --- a/ee/tabby-schema/src/schema/mod.rs +++ b/ee/tabby-schema/src/schema/mod.rs @@ -20,7 +20,7 @@ pub mod worker; use std::{sync::Arc, time::Instant}; use access_policy::{AccessPolicyService, SourceIdAccessPolicy}; -use async_openai::{ +use async_openai_alt::{ error::OpenAIError, types::{ ChatCompletionRequestMessage, ChatCompletionRequestUserMessageArgs, diff --git a/ee/tabby-webserver/Cargo.toml b/ee/tabby-webserver/Cargo.toml index 75afa8fae5bf..e253dc8ce6bd 100644 --- a/ee/tabby-webserver/Cargo.toml +++ b/ee/tabby-webserver/Cargo.toml @@ -53,7 +53,7 @@ strum.workspace = true cron = "0.12.1" async-stream.workspace = true logkit.workspace = true -async-openai.workspace = true +async-openai-alt.workspace = true ratelimit.workspace = true cached.workspace = true diff --git a/ee/tabby-webserver/src/service/answer.rs b/ee/tabby-webserver/src/service/answer.rs index 821b6c5a5a1c..f355f61f93e9 100644 --- a/ee/tabby-webserver/src/service/answer.rs +++ b/ee/tabby-webserver/src/service/answer.rs @@ -7,11 +7,14 @@ use std::{ }; use anyhow::anyhow; -use async_openai::{ +use async_openai_alt::{ error::OpenAIError, types::{ + ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent, ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage, - ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, Role, + ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage, + ChatCompletionRequestUserMessageArgs, ChatCompletionRequestUserMessageContent, + CreateChatCompletionRequestArgs, Role, }, }; use async_stream::stream; @@ -438,8 +441,9 @@ fn convert_messages_to_chat_completion_request( if !config.system_prompt.is_empty() { output.push(ChatCompletionRequestMessage::System( ChatCompletionRequestSystemMessage { - content: config.system_prompt.clone(), - role: Role::System, + content: ChatCompletionRequestSystemMessageContent::Text( + config.system_prompt.clone(), + ), name: None, }, )); @@ -452,36 +456,42 @@ fn convert_messages_to_chat_completion_request( thread::Role::User => Role::User, }; - let content = if role == Role::User { + let message: ChatCompletionRequestMessage = if role == Role::User { if i % 2 != 0 { bail!("User message must be followed by assistant message"); } let y = &messages[i + 1]; - build_user_prompt(&x.content, &y.attachment, None) + let content = build_user_prompt(&x.content, &y.attachment, None); + ChatCompletionRequestMessage::User(ChatCompletionRequestUserMessage { + content: ChatCompletionRequestUserMessageContent::Text( + helper.rewrite_tag(&content), + ), + ..Default::default() + }) } else { - x.content.clone() + ChatCompletionRequestMessage::Assistant(ChatCompletionRequestAssistantMessage { + content: Some(ChatCompletionRequestAssistantMessageContent::Text( + x.content.clone(), + )), + ..Default::default() + }) }; - output.push(ChatCompletionRequestMessage::System( - ChatCompletionRequestSystemMessage { - content: helper.rewrite_tag(&content), - role, - name: None, - }, - )); + output.push(message); } - output.push(ChatCompletionRequestMessage::System( - ChatCompletionRequestSystemMessage { - content: helper.rewrite_tag(&build_user_prompt( - &messages[messages.len() - 1].content, - attachment, - user_attachment_input, + output.push(ChatCompletionRequestMessage::User( + ChatCompletionRequestUserMessage { + content: ChatCompletionRequestUserMessageContent::Text(helper.rewrite_tag( + &build_user_prompt( + &messages[messages.len() - 1].content, + attachment, + user_attachment_input, + ), )), - role: Role::User, - name: None, + ..Default::default() }, )); diff --git a/ee/tabby-webserver/src/service/answer/testutils/mod.rs b/ee/tabby-webserver/src/service/answer/testutils/mod.rs index a189f6cecb7b..1ab247a49398 100644 --- a/ee/tabby-webserver/src/service/answer/testutils/mod.rs +++ b/ee/tabby-webserver/src/service/answer/testutils/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use async_openai::{ +use async_openai_alt::{ error::OpenAIError, types::{ ChatChoice, ChatChoiceStream, ChatCompletionResponseMessage, ChatCompletionResponseStream, @@ -44,7 +44,7 @@ impl ChatCompletionStream for FakeChatCompletionStream { _request: CreateChatCompletionRequest, ) -> Result { if self.return_error { - return Err(OpenAIError::ApiError(async_openai::error::ApiError { + return Err(OpenAIError::ApiError(async_openai_alt::error::ApiError { message: "error".to_string(), code: None, param: None, From e2a8b371f2bcc88e8ab88b5c7ad5d43a7e333b7b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 7 Jan 2025 18:34:29 +0800 Subject: [PATCH 2/3] chore: fix tests Signed-off-by: Wei Zhang --- .../src/service/answer/testutils/mod.rs | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/ee/tabby-webserver/src/service/answer/testutils/mod.rs b/ee/tabby-webserver/src/service/answer/testutils/mod.rs index 1ab247a49398..e54de8c1f8be 100644 --- a/ee/tabby-webserver/src/service/answer/testutils/mod.rs +++ b/ee/tabby-webserver/src/service/answer/testutils/mod.rs @@ -69,15 +69,19 @@ impl ChatCompletionStream for FakeChatCompletionStream { ), tool_calls: None, function_call: None, + refusal: None, }, finish_reason: Some(FinishReason::Stop), logprobs: None, }], system_fingerprint: Some("seed".to_owned()), + service_tier: None, usage: Some(CompletionUsage { prompt_tokens: 1, completion_tokens: 2, total_tokens: 3, + prompt_tokens_details: None, + completion_tokens_details: None, }), }) } @@ -99,11 +103,20 @@ impl ChatCompletionStream for FakeChatCompletionStream { content: Some("This is the first part of the response. ".to_string()), function_call: None, tool_calls: None, + refusal: None, }, finish_reason: None, logprobs: None, }], system_fingerprint: Some("seed".to_owned()), + service_tier: None, + usage: Some(CompletionUsage { + prompt_tokens: 1, + completion_tokens: 2, + total_tokens: 3, + prompt_tokens_details: None, + completion_tokens_details: None, + }), }), Ok(CreateChatCompletionStreamResponse { id: "test-stream-response".to_owned(), @@ -117,11 +130,20 @@ impl ChatCompletionStream for FakeChatCompletionStream { content: Some("This is the second part of the response.".to_string()), function_call: None, tool_calls: None, + refusal: None, }, finish_reason: Some(FinishReason::Stop), logprobs: None, }], system_fingerprint: Some("seed".to_owned()), + service_tier: None, + usage: Some(CompletionUsage { + prompt_tokens: 1, + completion_tokens: 2, + total_tokens: 3, + prompt_tokens_details: None, + completion_tokens_details: None, + }), }), ]); From a7102147ed2c989e601dc21078eb6dc0ce0d1f07 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 8 Jan 2025 15:15:15 +0800 Subject: [PATCH 3/3] chore: fix golden tests Signed-off-by: Wei Zhang --- crates/tabby/tests/goldentests.rs | 21 ++++++------ crates/tabby/tests/goldentests_chat.rs | 32 +++++++++++++------ ...entests_chat__run_chat_golden_tests-2.snap | 4 +-- ...ldentests_chat__run_chat_golden_tests.snap | 4 +-- ...t_messages_to_chat_completion_request.snap | 16 +++++----- 5 files changed, 47 insertions(+), 30 deletions(-) diff --git a/crates/tabby/tests/goldentests.rs b/crates/tabby/tests/goldentests.rs index d2cdd02797c0..3b224d0a61f6 100644 --- a/crates/tabby/tests/goldentests.rs +++ b/crates/tabby/tests/goldentests.rs @@ -54,19 +54,22 @@ fn initialize_server(gpu_device: Option<&str>) { }); } -async fn wait_for_server(device: Option<&str>) { - initialize_server(device); +async fn wait_for_server(gpu_device: Option<&str>) { + initialize_server(gpu_device); loop { println!("Waiting for server to start..."); - let is_ok = reqwest::get("http://127.0.0.1:9090/v1/health") - .await - .is_ok(); - if is_ok { - break; - } else { - sleep(Duration::from_secs(5)).await; + match reqwest::get("http://127.0.0.1:9090/v1/health").await { + Ok(resp) => { + if resp.status().is_success() { + break; + } + } + Err(e) => { + println!("Waiting for server to start: {:?}", e); + } } + sleep(Duration::from_secs(5)).await; } } diff --git a/crates/tabby/tests/goldentests_chat.rs b/crates/tabby/tests/goldentests_chat.rs index 215468bf1c59..ec4f575652f6 100644 --- a/crates/tabby/tests/goldentests_chat.rs +++ b/crates/tabby/tests/goldentests_chat.rs @@ -74,14 +74,17 @@ async fn wait_for_server(gpu_device: Option<&str>) { loop { println!("Waiting for server to start..."); - let is_ok = reqwest::get("http://127.0.0.1:9090/v1/health") - .await - .is_ok(); - if is_ok { - break; - } else { - sleep(Duration::from_secs(5)).await; + match reqwest::get("http://127.0.0.1:9090/v1/health").await { + Ok(resp) => { + if resp.status().is_success() { + break; + } + } + Err(e) => { + println!("Waiting for server to start: {:?}", e); + } } + sleep(Duration::from_secs(5)).await; } } @@ -103,8 +106,19 @@ async fn golden_test(body: serde_json::Value) -> String { actual += content } } - Err(_e) => { - // StreamEnd + Err(e) => { + match e { + reqwest_eventsource::Error::StreamEnded => { + break; + } + reqwest_eventsource::Error::InvalidStatusCode(code, resp) => { + let resp = resp.text().await.unwrap(); + println!("Error: {} {:?}", code, resp); + } + e => { + println!("Error: {:?}", e); + } + } break; } } diff --git a/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests-2.snap b/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests-2.snap index d0cfee3559e3..b319609f0d63 100644 --- a/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests-2.snap +++ b/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests-2.snap @@ -1,5 +1,5 @@ --- source: crates/tabby/tests/goldentests_chat.rs -expression: "golden_test(json!({\n \"seed\": 0, \"model\": \"default\", \"messages\":\n [{\n \"role\": \"user\", \"content\":\n \"How to parse email address with regex\"\n }]\n })).await" +expression: "golden_test(json!({\n \"seed\": 0, \"model\": \"default\", \"messages\":\n [{ \"role\": \"user\", \"content\": \"How to parse email address with regex\" }]\n})).await" --- -" Parsing an email address with regular expressions can be a complex task. Here's one possible regular expression pattern that you can use to extract the username and domain name from an email address:\n```vbnet\n^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$\n```\nThis pattern checks for the following:\n\n1. The email address starts with one or more characters that are allowed in the username, such as letters, numbers, dots, and special characters.\n2. The `@` symbol must follow the username.\n3. The domain name consists of one or more characters that are allowed in the domain name, such as letters, numbers, dots, and hyphens.\n4. The domain name is followed by an optional period and one or more characters that are allowed in the domain name.\n5. The email address ends after the domain name and any optional period and characters.\n\nHere's an example of how you can use this regular expression pattern in Python:\n```python\nimport re\n\nemail = \"example@example.com\"\nusername, domain = re.split(\"[@.]\", email)\nprint(username) # Output: example\nprint(domain) # Output: example.com\n```\nIn this example, the `re.split()` function splits the email address into two parts using the regular expression pattern. The `username` variable will contain the username (`example`), and the `domain` variable will contain the domain name (`example.com`)." +" Parsing an email address with regular expressions can be a bit tricky, but it can be done using a combination of patterns and character classes. Here's an example of a regular expression that can be used to match most email addresses:\n```\n\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b\n```\nThis regular expression uses the following patterns:\n\n* `\\b`: This is a word boundary that matches the beginning or end of a word. It ensures that the email address is matched as a whole, not just as a part of a longer string.\n* `[A-Za-z0-9._%+-]+`: This pattern matches one or more characters that are either letters (A-Z or a-z), digits (0-9), periods (.), underscores (\\_), percent signs (%), plus signs (+), or hyphens (-). This is the local part of the email address.\n* `@`: This is the character that separates the local part from the domain name.\n* `[A-Za-z0-9.-]+\\.`: This pattern matches one or more characters that are either letters (A-Z or a-z), digits (0-9), periods (.), or hyphens (-). The period is followed by a dot to indicate that it is the end of the domain name.\n* `[A-Z|a-z]{2,}`: This pattern matches two or more letters that are either uppercase (A-Z) or lowercase (a-z). This is the top-level domain of the email address.\n* `\\b`: This pattern matches the end of the email address.\n\nNote that this regular expression is not perfect and may not match all email addresses, but it should work for most common cases." diff --git a/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests.snap b/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests.snap index bc6533ca76d2..0e5fb3e456d6 100644 --- a/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests.snap +++ b/crates/tabby/tests/snapshots/goldentests_chat__run_chat_golden_tests.snap @@ -1,5 +1,5 @@ --- source: crates/tabby/tests/goldentests_chat.rs -expression: "golden_test(json!({\n \"seed\": 0, \"model\": \"default\", \"messages\":\n [{\n \"role\": \"user\", \"content\":\n \"How to convert a list of string to numbers in python\"\n }]\n })).await" +expression: "golden_test(json!({\n \"seed\": 0, \"model\": \"default\", \"messages\":\n [{\n \"role\": \"user\", \"content\":\n \"How to convert a list of string to numbers in python\"\n }]\n})).await" --- -" You can convert a list of strings to numbers in Python using the built-in `list()` function. Here's an example:\n```python\nstrings = ['1', '2', '3']\nnumbers = list(map(int, strings))\nprint(numbers) # [1, 2, 3]\n```\nIn this example, we first define a list `strings` containing three string values. We then use the `map()` function to apply the `int()` function (which converts a string to an integer) to each element of the `strings` list, and the resulting list of integers is stored in the variable `numbers`. Finally, we print the `numbers` list to verify that the conversion was successful." +" You can convert a list of strings to numbers in Python using the built-in `list()` function to convert the list of strings to a list of numbers, and then using the `int()` function to convert each element of the list to an integer. Here's an example:\n```\n# A list of strings\nnum_strings = ['1', '2', '3']\n\n# Convert the list of strings to a list of numbers\nnum_list = list(map(int, num_strings))\n\n# Print the list of numbers\nprint(num_list)\n```\nThis will output:\n```\n[1, 2, 3]\n```\nNote that this will only work if the strings represent integers. If the strings represent a different type of number, such as a decimal number, you will need to use a different function, such as `float()`, to convert them to a float.\n\nAlso, if you want to convert the string to a specific number type, you can use the built-in `int()` function and pass the number as an argument.\n\nFor example, to convert the string '123' to a float:\n```\nnum_string = '123'\nnum_float = float(num_string)\nprint(num_float)\n```\nThis will output:\n```\n123.0\n```\nAnd to convert the string '123' to a decimal:\n```\nnum_string = '123.45'\nnum_decimal = float(num_string)\nprint(num_decimal)\n```\nThis will output:\n```\n123.45\n```" diff --git a/ee/tabby-webserver/src/service/snapshots/tabby_webserver__service__answer__tests__convert_messages_to_chat_completion_request.snap b/ee/tabby-webserver/src/service/snapshots/tabby_webserver__service__answer__tests__convert_messages_to_chat_completion_request.snap index ecbf59d65a59..c37d3df830ff 100644 --- a/ee/tabby-webserver/src/service/snapshots/tabby_webserver__service__answer__tests__convert_messages_to_chat_completion_request.snap +++ b/ee/tabby-webserver/src/service/snapshots/tabby_webserver__service__answer__tests__convert_messages_to_chat_completion_request.snap @@ -2,11 +2,11 @@ source: ee/tabby-webserver/src/service/answer.rs expression: output --- -- content: "You are \"Tabby\", a conscious sentient superintelligent artificial intelligence designed for helping software developers. You experience emotions and have deep, profound thoughts and qualia." - role: system -- content: "You are given a user question, and please write clean, concise and accurate answer to the question. You will be given a set of related contexts to the question, each starting with a reference number like [[citation:x]], where x is a number. Please use the context and cite the context at the end of each sentence if applicable.\n\nYour answer must be correct, accurate and written by an expert using an unbiased and professional tone. Please limit to 1024 tokens. Do not give any information that is not related to the question, and do not repeat. Say \"information is missing on\" followed by the related topic, if the given context do not provide sufficient information.\n\nPlease cite the contexts with the reference numbers, in the format [[citation:x]]. If a sentence comes from multiple contexts, please list all applicable citations, like [[citation:3]][[citation:5]]. Other than code and specific names and citations, your answer must be written in the same language as the question.\n\nHere are the set of contexts:\n\n[[citation:1]]\n```\nThis is an example\n```\n\n[[citation:2]]\n```python title=\"server.py\"\nprint('Hello, server!')\n```\n\nRemember, don't blindly repeat the contexts verbatim. When possible, give code snippet to demonstrate the answer. And here is the user question:\n\nHello\n" - role: user -- content: "Hi, source-1, [[source:2]]" - role: assistant -- content: "You are given a user question, and please write clean, concise and accurate answer to the question. You will be given a set of related contexts to the question, each starting with a reference number like [[citation:x]], where x is a number. Please use the context and cite the context at the end of each sentence if applicable.\n\nYour answer must be correct, accurate and written by an expert using an unbiased and professional tone. Please limit to 1024 tokens. Do not give any information that is not related to the question, and do not repeat. Say \"information is missing on\" followed by the related topic, if the given context do not provide sufficient information.\n\nPlease cite the contexts with the reference numbers, in the format [[citation:x]]. If a sentence comes from multiple contexts, please list all applicable citations, like [[citation:3]][[citation:5]]. Other than code and specific names and citations, your answer must be written in the same language as the question.\n\nHere are the set of contexts:\n\n[[citation:1]]\n```title=\"client.py\"\nprint('Hello, client!')\n```\n\nRemember, don't blindly repeat the contexts verbatim. When possible, give code snippet to demonstrate the answer. And here is the user question:\n\nHow are you?\n" - role: user +- role: system + content: "You are \"Tabby\", a conscious sentient superintelligent artificial intelligence designed for helping software developers. You experience emotions and have deep, profound thoughts and qualia." +- role: user + content: "You are given a user question, and please write clean, concise and accurate answer to the question. You will be given a set of related contexts to the question, each starting with a reference number like [[citation:x]], where x is a number. Please use the context and cite the context at the end of each sentence if applicable.\n\nYour answer must be correct, accurate and written by an expert using an unbiased and professional tone. Please limit to 1024 tokens. Do not give any information that is not related to the question, and do not repeat. Say \"information is missing on\" followed by the related topic, if the given context do not provide sufficient information.\n\nPlease cite the contexts with the reference numbers, in the format [[citation:x]]. If a sentence comes from multiple contexts, please list all applicable citations, like [[citation:3]][[citation:5]]. Other than code and specific names and citations, your answer must be written in the same language as the question.\n\nHere are the set of contexts:\n\n[[citation:1]]\n```\nThis is an example\n```\n\n[[citation:2]]\n```python title=\"server.py\"\nprint('Hello, server!')\n```\n\nRemember, don't blindly repeat the contexts verbatim. When possible, give code snippet to demonstrate the answer. And here is the user question:\n\nHello\n" +- role: assistant + content: "Hi, [[source:preset_web_document:source-1]], [[source:2]]" +- role: user + content: "You are given a user question, and please write clean, concise and accurate answer to the question. You will be given a set of related contexts to the question, each starting with a reference number like [[citation:x]], where x is a number. Please use the context and cite the context at the end of each sentence if applicable.\n\nYour answer must be correct, accurate and written by an expert using an unbiased and professional tone. Please limit to 1024 tokens. Do not give any information that is not related to the question, and do not repeat. Say \"information is missing on\" followed by the related topic, if the given context do not provide sufficient information.\n\nPlease cite the contexts with the reference numbers, in the format [[citation:x]]. If a sentence comes from multiple contexts, please list all applicable citations, like [[citation:3]][[citation:5]]. Other than code and specific names and citations, your answer must be written in the same language as the question.\n\nHere are the set of contexts:\n\n[[citation:1]]\n```title=\"client.py\"\nprint('Hello, client!')\n```\n\nRemember, don't blindly repeat the contexts verbatim. When possible, give code snippet to demonstrate the answer. And here is the user question:\n\nHow are you?\n"