Skip to content

Commit

Permalink
Merge pull request #762 from roboflow/jetpack-6
Browse files Browse the repository at this point in the history
Add Jetpack 6 Support
  • Loading branch information
PawelPeczek-Roboflow authored Nov 7, 2024
2 parents c0f4bc7 + 1b5c058 commit e13d72c
Show file tree
Hide file tree
Showing 10 changed files with 217 additions and 6 deletions.
45 changes: 45 additions & 0 deletions .github/workflows/docker.jetson.6.0.0.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
name: Build and Push Jetson 6.X Container

on:
release:
types: [created]
push:
branches: [main]
workflow_dispatch:
inputs:
force_push:
type: boolean
description: "Do you want to push image after build?"
default: false

env:
VERSION: "0.0.0" # Default version, will be overwritten

jobs:
docker:
runs-on:
labels: ubuntu-latest
timeout-minutes: 120
permissions:
id-token: write
contents: read
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: 🛎️ Checkout
uses: actions/checkout@v4
- name: Read version from file
run: echo "VERSION=$(DISABLE_VERSION_CHECK=true python ./inference/core/version.py)" >> $GITHUB_ENV
- name: Set up Depot CLI
uses: depot/setup-action@v1
- name: Build and Push
uses: depot/build-push-action@v1
with:
push: ${{ github.event_name == 'release' || (github.event.inputs.force_push == 'true')}}
project: grl7ffzxd7
tags: roboflow/roboflow-inference-server-jetson-6.0.0:latest,roboflow/roboflow-inference-server-jetson-6.0.0:${{ env.VERSION}}
platforms: linux/arm64
file: ./docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0
49 changes: 49 additions & 0 deletions .github/workflows/test.jetson_6.0.0.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: Code Quality & Regression Tests - Jetson 6.0.0

on:
workflow_dispatch:

jobs:
build:
if: ${{ !github.event.act }}
runs-on: [self-hosted, jetson, jetpack-6.0.0]

steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}
- name: 📦 Cache Python packages
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('requirements/**') }}
restore-keys: |
${{ runner.os }}-pip-${{ matrix.python-version }}-
- name: 🦾 Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install -r requirements/requirements.test.integration.txt
- name: 🔨 Build and Push Test Docker - Jetson 6.0.0
run: |
docker pull roboflow/roboflow-inference-server-jetson-6.0.0:test
docker build -t roboflow/roboflow-inference-server-jetson-6.0.0:test -f docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 .
docker push roboflow/roboflow-inference-server-jetson-6.0.0:test
- name: 🔋 Start Test Docker - Jetson 6.0.0
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-6.0.0 make start_test_docker_jetson
- name: 🧪 Regression Tests - Jetson 6.0.0
run: |
SKIP_VISUALISATION_TESTS=true SKIP_LMM_TEST=True MAX_WAIT=300 SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - Jetson 6.0.0
run: make stop_test_docker
if: success() || failure()
80 changes: 80 additions & 0 deletions docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
FROM roboflow/l4t-ml:r36.3.0

ARG DEBIAN_FRONTEND=noninteractive
ENV LANG=en_US.UTF-8

# Install dependencies
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
lshw \
git \
gfortran \
build-essential \
libatlas-base-dev \
libsm6 \
libxext6 \
wget \
gdal-bin \
libgdal-dev && \
rm -rf /var/lib/apt/lists/*

# Copy all requirements files
COPY requirements/ ./requirements/

# Remove specific packages from requirements files
RUN sed -i '/^opencv-python/d;/^onnxruntime/d;/^opencv-python-contrib/d' requirements/*.txt

# Upgrade pip and install Python packages
RUN python3 -m pip install --upgrade pip && \
python3 -m pip install --upgrade \
-r requirements/_requirements.txt \
-r requirements/requirements.clip.txt \
-r requirements/requirements.http.txt \
-r requirements/requirements.doctr.txt \
-r requirements/requirements.groundingdino.txt \
-r requirements/requirements.sdk.http.txt \
-r requirements/requirements.yolo_world.txt \
-r requirements/requirements.jetson.txt

# Build the application
WORKDIR /build
COPY . .

RUN rm -f dist/* && \
python3 .release/pypi/inference.core.setup.py bdist_wheel && \
python3 .release/pypi/inference.gpu.setup.py bdist_wheel && \
python3 .release/pypi/inference.sdk.setup.py bdist_wheel && \
python3 .release/pypi/inference.cli.setup.py bdist_wheel && \
python3 -m pip install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl

# Set up the application runtime
WORKDIR /app
COPY inference/ ./inference/
COPY inference_sdk/ ./inference_sdk/
COPY docker/config/gpu_http.py ./gpu_http.py

# Set environment variables
ENV VERSION_CHECK_MODE=continuous \
PROJECT=roboflow-platform \
ORT_TENSORRT_FP16_ENABLE=1 \
ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \
CORE_MODEL_SAM_ENABLED=False \
NUM_WORKERS=1 \
HOST=0.0.0.0 \
PORT=9001 \
OPENBLAS_CORETYPE=ARMV8 \
LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 \
WORKFLOWS_STEP_EXECUTION_MODE=local \
WORKFLOWS_MAX_CONCURRENT_STEPS=2 \
API_LOGGING_ENABLED=True \
CORE_MODEL_TROCR_ENABLED=false \
RUNS_ON_JETSON=True \
ENABLE_STREAM_API=True \
ENABLE_WORKFLOWS_PROFILING=True \
ENABLE_PROMETHEUS=True

# Expose the application port
EXPOSE 9001

# Set the entrypoint
ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT
12 changes: 12 additions & 0 deletions docker/dockerfiles/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,15 @@ Dockerfile.onnx.trt | Deprecated | No |
Dockerfile.onnx.trt.base | Deprecated | No |
Dockerfile.onnx.udp.gpu | Deprecated | No |


## Base image

The `roboflow/l46-ml` image is based on the `l4t-ml` image from the [jetson-containers](https://github.com/dusty-nv/jetson-containers/tree/master/packages/l4t/l4t-ml) repository. The image is built on a Jetson with support for GPU acceleration using common ML tools.

To build the image, run the following command:

```bash
jetson-containers build l4t-ml
```

This requires that you have the `jetson-containers` tool installed on your system. Follow the instructions in the [jetson-containers](https://github.com/dusty-nv/jetson-containers/blob/master/docs/setup.md) repository to install the tool.
8 changes: 7 additions & 1 deletion docs/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ docker run -it --network=host --gpus=all roboflow/roboflow-inference-server-gpu:
docker run -p 9001:9001 roboflow/roboflow-inference-server-arm-cpu:latest
```

- Run on NVIDIA Jetson with JetPack `4.x`:
- Run on NVIDIA Jetson with JetPack `4.x` (Deprecated):

```bash
docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson:latest
Expand All @@ -145,6 +145,12 @@ docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-
docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-5.1.1:latest
```

- Run on NVIDIA Jetson with JetPack `6.x`:

```bash
docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-6.0.0:latest
```

</details>

<br/>
Expand Down
2 changes: 1 addition & 1 deletion docs/quickstart/devices.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ You can set up a server to use computer vision models with Inference on the foll
- ARM CPU (macOS, Raspberry Pi)
- x86 CPU (macOS, Linux, Windows)
- NVIDIA GPU
- NVIDIA Jetson (JetPack 4.5.x, JetPack 4.6.x, JetPack 5.x)
- NVIDIA Jetson (JetPack 4.5.x, JetPack 4.6.x, JetPack 5.x, JetPack 6.x)

## Model Compatability

Expand Down
11 changes: 9 additions & 2 deletions docs/quickstart/docker.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,14 @@ hardware configurations.
docker pull roboflow/roboflow-inference-server-gpu
```

=== "Jetson 4.5.x"
=== "Jetson 4.5.x" (Deprecated)
Official Roboflow Inference Server Docker Image for Nvidia Jetson JetPack 4.5.x Targets.

```
docker pull roboflow/roboflow-inference-server-jetson-4.5.0
```

=== "Jetson 4.6.x"
=== "Jetson 4.6.x" (Deprecated)
Official Roboflow Inference Server Docker Image for Nvidia Jetson JetPack 4.6.x Targets.

```
Expand All @@ -84,6 +84,13 @@ hardware configurations.
docker pull roboflow/roboflow-inference-server-jetson-5.1.1
```

=== "Jetson 6.x"
Official Roboflow Inference Server Docker Image for Nvidia Jetson JetPack 6.x Targets.

```
docker pull roboflow/roboflow-inference-server-jetson-6.0.0
```

### Step #2: Run the Docker Container

Once you have a Docker image (either built locally or pulled from Docker Hub), you can run the Roboflow Inference
Expand Down
8 changes: 7 additions & 1 deletion examples/inference-client/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ docker run --gpus=all --net=host -e STREAM_ID=0 -e MODEL_ID=<> -e ROBOFLOW_API_K
docker run -p 9001:9001 roboflow/roboflow-inference-server-arm-cpu:latest
```

- Run on Nvidia Jetson with JetPack `4.x`:
- Run on Nvidia Jetson with JetPack `4.x` (Deprecated):

```bash
docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson:latest
Expand All @@ -77,6 +77,12 @@ docker run --gpus=all --net=host -e STREAM_ID=0 -e MODEL_ID=<> -e ROBOFLOW_API_K
docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-5.1.1:latest
```

- Run on Nvidia Jetson with JetPack `6.x`:

```bash
docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-6.0.0:latest
```

### UDP

We only support one UDP container at the moment. Refer to the UDP command from earlier to set up UDP.
Expand Down
4 changes: 3 additions & 1 deletion inference_cli/lib/container_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,10 @@ def _get_jetpack_image(jetpack_version: str) -> str:
return "roboflow/roboflow-inference-server-jetson-4.5.0:latest"
if jetpack_version.startswith("4.6"):
return "roboflow/roboflow-inference-server-jetson-4.6.1:latest"
if jetpack_version.startswith("5.1"):
if jetpack_version.startswith("5."):
return "roboflow/roboflow-inference-server-jetson-5.1.1:latest"
if jetpack_version.startswith("6."):
return "roboflow/roboflow-inference-server-jetson-6.0.0:latest"
raise RuntimeError(f"Jetpack version: {jetpack_version} not supported")


Expand Down
4 changes: 4 additions & 0 deletions requirements/requirements.jetson.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
pypdfium2
jupyterlab
PyYAML
onnxruntime-gpu

0 comments on commit e13d72c

Please sign in to comment.