From 632818f38c8eb5ff006c6c8bab5f38e03d7d4a2f Mon Sep 17 00:00:00 2001 From: Jonathan Irwin Date: Tue, 22 Oct 2024 11:02:14 -0400 Subject: [PATCH] refactor: update include/exclude to toml array --- cerebrium/environments/config-files.mdx | 4 ++-- migrations/hugging-face.mdx | 4 ++-- migrations/replicate.mdx | 4 ++-- v4/examples/langchain.mdx | 4 ++-- v4/examples/mistral-vllm.mdx | 4 ++-- v4/examples/sdxl.mdx | 4 ++-- v4/examples/streaming-falcon-7B.mdx | 4 ++-- v4/examples/tensorRT.mdx | 4 ++-- v4/examples/transcribe-whisper.mdx | 4 ++-- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/cerebrium/environments/config-files.mdx b/cerebrium/environments/config-files.mdx index b99a3516..f6163fd1 100644 --- a/cerebrium/environments/config-files.mdx +++ b/cerebrium/environments/config-files.mdx @@ -138,8 +138,8 @@ Below is an example of a config file that takes advantage of all the features we [cerebrium.deployment] name = "my-app" python_version = "3.10" -include = "[./*, main.py]" -exclude = "[./.*, ./__*]" +include = ["./*", "main.py"] +exclude = ["./.*", "./__*"] docker_base_image_url = "debian:bookworm-slim" shell_commands = [] diff --git a/migrations/hugging-face.mdx b/migrations/hugging-face.mdx index 8c279257..552b2d45 100644 --- a/migrations/hugging-face.mdx +++ b/migrations/hugging-face.mdx @@ -57,8 +57,8 @@ Scaffold your application by running `cerebrium init [PROJECT_NAME]`. During the name = "llama-8b-vllm" python_version = "3.11" docker_base_image_url = "debian:bookworm-slim" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[.*]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = [".*"] [cerebrium.hardware] cpu = 2 diff --git a/migrations/replicate.mdx b/migrations/replicate.mdx index 0e8803e6..9d3b56cf 100644 --- a/migrations/replicate.mdx +++ b/migrations/replicate.mdx @@ -25,8 +25,8 @@ Looking at the cog.yaml, we need to add/change the following in our cerebrium.to [cerebrium.deployment] name = "cog-migration-sdxl" python_version = "3.11" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[./example_exclude]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = ["./example_exclude"] docker_base_image_url = "nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04" shell_commands = [ "curl -o /usr/local/bin/pget -L 'https://github.com/replicate/pget/releases/download/v0.6.2/pget_linux_x86_64' && chmod +x /usr/local/bin/pget" diff --git a/v4/examples/langchain.mdx b/v4/examples/langchain.mdx index 815aaf48..6d2fd0fe 100644 --- a/v4/examples/langchain.mdx +++ b/v4/examples/langchain.mdx @@ -160,8 +160,8 @@ disable_deployment_confirmation = false [cerebrium.deployment] name = "langchain-qa" python_version = "3.10" -include = "[./*, main.py]" -exclude = "[./.*, ./__*]" +include = ["./*", "main.py"] +exclude = ["./.*", "./__*"] [cerebrium.hardware] gpu = "AMPERE_A5000" diff --git a/v4/examples/mistral-vllm.mdx b/v4/examples/mistral-vllm.mdx index 27a451d2..056f7935 100644 --- a/v4/examples/mistral-vllm.mdx +++ b/v4/examples/mistral-vllm.mdx @@ -121,8 +121,8 @@ disable_confirmation = false [cerebrium.deployment] name = "mistral-vllm" python_version = "3.11" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[./example_exclude]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = ["./example_exclude"] docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04" [cerebrium.hardware] diff --git a/v4/examples/sdxl.mdx b/v4/examples/sdxl.mdx index e462b880..6fec9afe 100644 --- a/v4/examples/sdxl.mdx +++ b/v4/examples/sdxl.mdx @@ -33,8 +33,8 @@ To start, your cerebrium.toml file is where you can set your compute/environment [cerebrium.deployment] name = "sdxl" python_version = "3.10" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[./.*, ./__*]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = ["./.*", "./__*"] docker_base_image_url = "debian:bookworm-slim" [cerebrium.hardware] diff --git a/v4/examples/streaming-falcon-7B.mdx b/v4/examples/streaming-falcon-7B.mdx index 62f7ff72..d742dbf7 100644 --- a/v4/examples/streaming-falcon-7B.mdx +++ b/v4/examples/streaming-falcon-7B.mdx @@ -147,8 +147,8 @@ disable_confirmation = false [cerebrium.deployment] name = "streaming-falcon" python_version = "3.11" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[./example_exclude]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = ["./example_exclude"] docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04" [cerebrium.hardware] diff --git a/v4/examples/tensorRT.mdx b/v4/examples/tensorRT.mdx index 19c63604..a5c162b3 100644 --- a/v4/examples/tensorRT.mdx +++ b/v4/examples/tensorRT.mdx @@ -34,8 +34,8 @@ TensorRT-LLM has a demo implementation of Llama on its GitHub repo which you can [cerebrium.deployment] name = "llama-3b-tensorrt" python_version = "3.10" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[./example_exclude]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = ["./example_exclude"] docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04" [cerebrium.hardware] diff --git a/v4/examples/transcribe-whisper.mdx b/v4/examples/transcribe-whisper.mdx index 331e7177..457ac954 100644 --- a/v4/examples/transcribe-whisper.mdx +++ b/v4/examples/transcribe-whisper.mdx @@ -128,8 +128,8 @@ Your cerebrium.toml file is where you can set your compute/environment. Your cer [cerebrium.deployment] name = "distil-whisper" python_version = "3.11" -include = "[./*, main.py, cerebrium.toml]" -exclude = "[./example_exclude]" +include = ["./*", "main.py", "cerebrium.toml"] +exclude = ["./example_exclude"] docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04" [cerebrium.hardware]