Skip to content

Commit

Permalink
Merge pull request #182 from CerebriumAI/jono/update-include-exclude-…
Browse files Browse the repository at this point in the history
…array

refactor: update include/exclude to toml array
  • Loading branch information
jonoirwinrsa authored Oct 22, 2024
2 parents 4d9d1c0 + 632818f commit 858d361
Show file tree
Hide file tree
Showing 9 changed files with 18 additions and 18 deletions.
4 changes: 2 additions & 2 deletions cerebrium/environments/config-files.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ Below is an example of a config file that takes advantage of all the features we
[cerebrium.deployment]
name = "my-app"
python_version = "3.10"
include = "[./*, main.py]"
exclude = "[./.*, ./__*]"
include = ["./*", "main.py"]
exclude = ["./.*", "./__*"]
docker_base_image_url = "debian:bookworm-slim"
shell_commands = []

Expand Down
4 changes: 2 additions & 2 deletions migrations/hugging-face.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ Scaffold your application by running `cerebrium init [PROJECT_NAME]`. During the
name = "llama-8b-vllm"
python_version = "3.11"
docker_base_image_url = "debian:bookworm-slim"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[.*]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = [".*"]

[cerebrium.hardware]
cpu = 2
Expand Down
4 changes: 2 additions & 2 deletions migrations/replicate.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ Looking at the cog.yaml, we need to add/change the following in our cerebrium.to
[cerebrium.deployment]
name = "cog-migration-sdxl"
python_version = "3.11"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[./example_exclude]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = ["./example_exclude"]
docker_base_image_url = "nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04"
shell_commands = [
"curl -o /usr/local/bin/pget -L 'https://github.com/replicate/pget/releases/download/v0.6.2/pget_linux_x86_64' && chmod +x /usr/local/bin/pget"
Expand Down
4 changes: 2 additions & 2 deletions v4/examples/langchain.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,8 @@ disable_deployment_confirmation = false
[cerebrium.deployment]
name = "langchain-qa"
python_version = "3.10"
include = "[./*, main.py]"
exclude = "[./.*, ./__*]"
include = ["./*", "main.py"]
exclude = ["./.*", "./__*"]

[cerebrium.hardware]
gpu = "AMPERE_A5000"
Expand Down
4 changes: 2 additions & 2 deletions v4/examples/mistral-vllm.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,8 @@ disable_confirmation = false
[cerebrium.deployment]
name = "mistral-vllm"
python_version = "3.11"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[./example_exclude]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = ["./example_exclude"]
docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04"

[cerebrium.hardware]
Expand Down
4 changes: 2 additions & 2 deletions v4/examples/sdxl.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ To start, your cerebrium.toml file is where you can set your compute/environment
[cerebrium.deployment]
name = "sdxl"
python_version = "3.10"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[./.*, ./__*]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = ["./.*", "./__*"]
docker_base_image_url = "debian:bookworm-slim"

[cerebrium.hardware]
Expand Down
4 changes: 2 additions & 2 deletions v4/examples/streaming-falcon-7B.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ disable_confirmation = false
[cerebrium.deployment]
name = "streaming-falcon"
python_version = "3.11"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[./example_exclude]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = ["./example_exclude"]
docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04"

[cerebrium.hardware]
Expand Down
4 changes: 2 additions & 2 deletions v4/examples/tensorRT.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ TensorRT-LLM has a demo implementation of Llama on its GitHub repo which you can
[cerebrium.deployment]
name = "llama-3b-tensorrt"
python_version = "3.10"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[./example_exclude]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = ["./example_exclude"]
docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04"
[cerebrium.hardware]
Expand Down
4 changes: 2 additions & 2 deletions v4/examples/transcribe-whisper.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ Your cerebrium.toml file is where you can set your compute/environment. Your cer
[cerebrium.deployment]
name = "distil-whisper"
python_version = "3.11"
include = "[./*, main.py, cerebrium.toml]"
exclude = "[./example_exclude]"
include = ["./*", "main.py", "cerebrium.toml"]
exclude = ["./example_exclude"]
docker_base_image_url = "nvidia/cuda:12.1.1-runtime-ubuntu22.04"

[cerebrium.hardware]
Expand Down

0 comments on commit 858d361

Please sign in to comment.