From 1570c5123a5b2617f0e57e6163abd276a35282e7 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 30 Oct 2024 20:42:19 -0700 Subject: [PATCH 01/10] Jetpack 6.0.0 Dockerfile --- .../dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 new file mode 100644 index 000000000..75eb662ca --- /dev/null +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -0,0 +1,81 @@ +FROM dustynv/l4t-ml:r36.2.0 + +ARG DEBIAN_FRONTEND=noninteractive +ENV LANG=en_US.UTF-8 + +RUN ln -s /usr/bin/python3 /usr/bin/python && \ + apt-get update -y && apt-get install -y \ + lshw \ + git \ + gfortran \ + build-essential \ + libatlas-base-dev \ + libsm6 \ + libxext6 \ + wget \ + python3-shapely \ + gdal-bin \ + libgdal-dev \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements/requirements.clip.txt \ + requirements/requirements.http.txt \ + requirements/requirements.doctr.txt \ + requirements/requirements.groundingdino.txt \ + requirements/requirements.sdk.http.txt \ + requirements/requirements.yolo_world.txt \ + requirements/_requirements.txt \ + ./ + +RUN find . -type f -name "*requirements*.txt" -exec sed -i '/^opencv-python/d' {} \; && \ + find . -type f -name "*requirements*.txt" -exec sed -i '/^onnxruntime/d' {} \; + +RUN python3 -m pip install --ignore-installed PyYAML && rm -rf ~/.cache/pip + +RUN python3 -m pip install --upgrade pip && python3 -m pip install \ + git+https://github.com/pypdfium2-team/pypdfium2 \ + -r _requirements.txt \ + -r requirements.clip.txt \ + -r requirements.http.txt \ + -r requirements.doctr.txt \ + -r requirements.groundingdino.txt \ + -r requirements.sdk.http.txt \ + -r requirements.yolo_world.txt \ + jupyterlab \ + --upgrade \ + && rm -rf ~/.cache/pip + +WORKDIR /build +COPY . . +RUN /bin/make create_wheels_for_gpu_notebook +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl + +RUN /bin/make create_wheels_for_gpu_notebook +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl + +WORKDIR /app/ +COPY inference inference +COPY inference_sdk inference_sdk +COPY docker/config/gpu_http.py gpu_http.py + +ENV VERSION_CHECK_MODE=continuous +ENV PROJECT=roboflow-platform +ENV ORT_TENSORRT_FP16_ENABLE=1 +ENV ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 +ENV CORE_MODEL_SAM_ENABLED=False +ENV PROJECT=roboflow-platform +ENV NUM_WORKERS=1 +ENV HOST=0.0.0.0 +ENV PORT=9001 +ENV OPENBLAS_CORETYPE=ARMV8 +ENV LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 +ENV WORKFLOWS_STEP_EXECUTION_MODE=local +ENV WORKFLOWS_MAX_CONCURRENT_STEPS=2 +ENV API_LOGGING_ENABLED=True +ENV CORE_MODEL_TROCR_ENABLED=false +ENV RUNS_ON_JETSON=True +ENV ENABLE_STREAM_API=True +ENV ENABLE_WORKFLOWS_PROFILING=True +ENV ENABLE_PROMETHEUS=True + +ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT \ No newline at end of file From 17d697f697119e71ff875c0a37b63b162cf90dd6 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 30 Oct 2024 20:54:07 -0700 Subject: [PATCH 02/10] Remove duplicate line --- docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 75eb662ca..8cd180e69 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -50,9 +50,6 @@ COPY . . RUN /bin/make create_wheels_for_gpu_notebook RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl -RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl - WORKDIR /app/ COPY inference inference COPY inference_sdk inference_sdk From b72ee7ae23cb8497e08d659a6bfa5a6b75612295 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 30 Oct 2024 20:42:19 -0700 Subject: [PATCH 03/10] Jetpack 6.0.0 Dockerfile --- .../dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 new file mode 100644 index 000000000..75eb662ca --- /dev/null +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -0,0 +1,81 @@ +FROM dustynv/l4t-ml:r36.2.0 + +ARG DEBIAN_FRONTEND=noninteractive +ENV LANG=en_US.UTF-8 + +RUN ln -s /usr/bin/python3 /usr/bin/python && \ + apt-get update -y && apt-get install -y \ + lshw \ + git \ + gfortran \ + build-essential \ + libatlas-base-dev \ + libsm6 \ + libxext6 \ + wget \ + python3-shapely \ + gdal-bin \ + libgdal-dev \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements/requirements.clip.txt \ + requirements/requirements.http.txt \ + requirements/requirements.doctr.txt \ + requirements/requirements.groundingdino.txt \ + requirements/requirements.sdk.http.txt \ + requirements/requirements.yolo_world.txt \ + requirements/_requirements.txt \ + ./ + +RUN find . -type f -name "*requirements*.txt" -exec sed -i '/^opencv-python/d' {} \; && \ + find . -type f -name "*requirements*.txt" -exec sed -i '/^onnxruntime/d' {} \; + +RUN python3 -m pip install --ignore-installed PyYAML && rm -rf ~/.cache/pip + +RUN python3 -m pip install --upgrade pip && python3 -m pip install \ + git+https://github.com/pypdfium2-team/pypdfium2 \ + -r _requirements.txt \ + -r requirements.clip.txt \ + -r requirements.http.txt \ + -r requirements.doctr.txt \ + -r requirements.groundingdino.txt \ + -r requirements.sdk.http.txt \ + -r requirements.yolo_world.txt \ + jupyterlab \ + --upgrade \ + && rm -rf ~/.cache/pip + +WORKDIR /build +COPY . . +RUN /bin/make create_wheels_for_gpu_notebook +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl + +RUN /bin/make create_wheels_for_gpu_notebook +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl + +WORKDIR /app/ +COPY inference inference +COPY inference_sdk inference_sdk +COPY docker/config/gpu_http.py gpu_http.py + +ENV VERSION_CHECK_MODE=continuous +ENV PROJECT=roboflow-platform +ENV ORT_TENSORRT_FP16_ENABLE=1 +ENV ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 +ENV CORE_MODEL_SAM_ENABLED=False +ENV PROJECT=roboflow-platform +ENV NUM_WORKERS=1 +ENV HOST=0.0.0.0 +ENV PORT=9001 +ENV OPENBLAS_CORETYPE=ARMV8 +ENV LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 +ENV WORKFLOWS_STEP_EXECUTION_MODE=local +ENV WORKFLOWS_MAX_CONCURRENT_STEPS=2 +ENV API_LOGGING_ENABLED=True +ENV CORE_MODEL_TROCR_ENABLED=false +ENV RUNS_ON_JETSON=True +ENV ENABLE_STREAM_API=True +ENV ENABLE_WORKFLOWS_PROFILING=True +ENV ENABLE_PROMETHEUS=True + +ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT \ No newline at end of file From 1fa0e3c845cc1f18b893ea830c7176b585b4f0a0 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 30 Oct 2024 20:54:07 -0700 Subject: [PATCH 04/10] Remove duplicate line --- docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 75eb662ca..8cd180e69 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -50,9 +50,6 @@ COPY . . RUN /bin/make create_wheels_for_gpu_notebook RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl -RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl - WORKDIR /app/ COPY inference inference COPY inference_sdk inference_sdk From a060ae5142610b7e7fac43d197f8a64c6b4899c9 Mon Sep 17 00:00:00 2001 From: Alex Norell Date: Thu, 31 Oct 2024 15:10:06 -0700 Subject: [PATCH 05/10] Jetpack 6.0.0 refactor --- .../dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 117 +++++++++--------- requirements/requirements.jetson.txt | 3 + 2 files changed, 63 insertions(+), 57 deletions(-) create mode 100644 requirements/requirements.jetson.txt diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 8cd180e69..df3037978 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -1,10 +1,11 @@ -FROM dustynv/l4t-ml:r36.2.0 +FROM roboflow/l4t-ml:r36.3.0 ARG DEBIAN_FRONTEND=noninteractive -ENV LANG=en_US.UTF-8 +ENV LANG=en_US.UTF-8 -RUN ln -s /usr/bin/python3 /usr/bin/python && \ - apt-get update -y && apt-get install -y \ +# Install dependencies +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends \ lshw \ git \ gfortran \ @@ -13,66 +14,68 @@ RUN ln -s /usr/bin/python3 /usr/bin/python && \ libsm6 \ libxext6 \ wget \ - python3-shapely \ gdal-bin \ - libgdal-dev \ - && rm -rf /var/lib/apt/lists/* + libgdal-dev && \ + rm -rf /var/lib/apt/lists/* -COPY requirements/requirements.clip.txt \ - requirements/requirements.http.txt \ - requirements/requirements.doctr.txt \ - requirements/requirements.groundingdino.txt \ - requirements/requirements.sdk.http.txt \ - requirements/requirements.yolo_world.txt \ - requirements/_requirements.txt \ - ./ +# Copy all requirements files +COPY requirements/ ./requirements/ -RUN find . -type f -name "*requirements*.txt" -exec sed -i '/^opencv-python/d' {} \; && \ - find . -type f -name "*requirements*.txt" -exec sed -i '/^onnxruntime/d' {} \; +# Remove specific packages from requirements files +RUN sed -i '/^opencv-python/d;/^onnxruntime/d' requirements/*.txt -RUN python3 -m pip install --ignore-installed PyYAML && rm -rf ~/.cache/pip - -RUN python3 -m pip install --upgrade pip && python3 -m pip install \ - git+https://github.com/pypdfium2-team/pypdfium2 \ - -r _requirements.txt \ - -r requirements.clip.txt \ - -r requirements.http.txt \ - -r requirements.doctr.txt \ - -r requirements.groundingdino.txt \ - -r requirements.sdk.http.txt \ - -r requirements.yolo_world.txt \ - jupyterlab \ - --upgrade \ - && rm -rf ~/.cache/pip +# Upgrade pip and install Python packages +RUN python3 -m pip install --upgrade pip && \ + python3 -m pip install --upgrade \ + -r requirements/_requirements.txt \ + -r requirements/requirements.clip.txt \ + -r requirements/requirements.http.txt \ + -r requirements/requirements.doctr.txt \ + -r requirements/requirements.groundingdino.txt \ + -r requirements/requirements.sdk.http.txt \ + -r requirements/requirements.yolo_world.txt \ + -r requirements/requirements.jetson.txt \ + rm -rf ~/.cache/pip +# Build the application WORKDIR /build COPY . . -RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl -WORKDIR /app/ -COPY inference inference -COPY inference_sdk inference_sdk -COPY docker/config/gpu_http.py gpu_http.py +RUN rm -f dist/* && \ + python3 .release/pypi/inference.core.setup.py bdist_wheel && \ + python3 .release/pypi/inference.gpu.setup.py bdist_wheel && \ + python3 .release/pypi/inference.sdk.setup.py bdist_wheel && \ + python3 .release/pypi/inference.cli.setup.py bdist_wheel && \ + python3 -m pip install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl + +# Set up the application runtime +WORKDIR /app +COPY inference/ ./inference/ +COPY inference_sdk/ ./inference_sdk/ +COPY docker/config/gpu_http.py ./gpu_http.py + +# Set environment variables +ENV VERSION_CHECK_MODE=continuous \ + PROJECT=roboflow-platform \ + ORT_TENSORRT_FP16_ENABLE=1 \ + ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \ + CORE_MODEL_SAM_ENABLED=False \ + NUM_WORKERS=1 \ + HOST=0.0.0.0 \ + PORT=9001 \ + OPENBLAS_CORETYPE=ARMV8 \ + LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 \ + WORKFLOWS_STEP_EXECUTION_MODE=local \ + WORKFLOWS_MAX_CONCURRENT_STEPS=2 \ + API_LOGGING_ENABLED=True \ + CORE_MODEL_TROCR_ENABLED=false \ + RUNS_ON_JETSON=True \ + ENABLE_STREAM_API=True \ + ENABLE_WORKFLOWS_PROFILING=True \ + ENABLE_PROMETHEUS=True -ENV VERSION_CHECK_MODE=continuous -ENV PROJECT=roboflow-platform -ENV ORT_TENSORRT_FP16_ENABLE=1 -ENV ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 -ENV CORE_MODEL_SAM_ENABLED=False -ENV PROJECT=roboflow-platform -ENV NUM_WORKERS=1 -ENV HOST=0.0.0.0 -ENV PORT=9001 -ENV OPENBLAS_CORETYPE=ARMV8 -ENV LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 -ENV WORKFLOWS_STEP_EXECUTION_MODE=local -ENV WORKFLOWS_MAX_CONCURRENT_STEPS=2 -ENV API_LOGGING_ENABLED=True -ENV CORE_MODEL_TROCR_ENABLED=false -ENV RUNS_ON_JETSON=True -ENV ENABLE_STREAM_API=True -ENV ENABLE_WORKFLOWS_PROFILING=True -ENV ENABLE_PROMETHEUS=True +# Expose the application port +EXPOSE 9001 -ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT \ No newline at end of file +# Set the entrypoint +ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT diff --git a/requirements/requirements.jetson.txt b/requirements/requirements.jetson.txt new file mode 100644 index 000000000..fb51df126 --- /dev/null +++ b/requirements/requirements.jetson.txt @@ -0,0 +1,3 @@ +pypdfium2 +jupyterlab +PyYAML From ae044716bf338587bfdef916de1a750e8ba629af Mon Sep 17 00:00:00 2001 From: Alex Norell Date: Thu, 31 Oct 2024 15:14:25 -0700 Subject: [PATCH 06/10] Remove cache --- docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index df3037978..812094aa8 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -34,8 +34,7 @@ RUN python3 -m pip install --upgrade pip && \ -r requirements/requirements.groundingdino.txt \ -r requirements/requirements.sdk.http.txt \ -r requirements/requirements.yolo_world.txt \ - -r requirements/requirements.jetson.txt \ - rm -rf ~/.cache/pip + -r requirements/requirements.jetson.txt # Build the application WORKDIR /build From 45a158ccef9bc9dc319fff2e1194175b12515011 Mon Sep 17 00:00:00 2001 From: Alex Norell Date: Mon, 4 Nov 2024 14:23:52 -0800 Subject: [PATCH 07/10] finalize container --- docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 2 +- requirements/requirements.jetson.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 812094aa8..0cdbae7b7 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -22,7 +22,7 @@ RUN apt-get update -y && \ COPY requirements/ ./requirements/ # Remove specific packages from requirements files -RUN sed -i '/^opencv-python/d;/^onnxruntime/d' requirements/*.txt +RUN sed -i '/^opencv-python/d;/^onnxruntime/d;/^opencv-python-contrib/d' requirements/*.txt # Upgrade pip and install Python packages RUN python3 -m pip install --upgrade pip && \ diff --git a/requirements/requirements.jetson.txt b/requirements/requirements.jetson.txt index fb51df126..dbcbeed61 100644 --- a/requirements/requirements.jetson.txt +++ b/requirements/requirements.jetson.txt @@ -1,3 +1,4 @@ pypdfium2 jupyterlab PyYAML +onnxruntime-gpu From 30319d101d6b907532135dca6b695ef6dc1420cf Mon Sep 17 00:00:00 2001 From: Alex Norell Date: Mon, 4 Nov 2024 14:58:50 -0800 Subject: [PATCH 08/10] Add 6.0.0 workflow --- .github/workflows/docker.jetson.6.0.0.yml | 45 +++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 .github/workflows/docker.jetson.6.0.0.yml diff --git a/.github/workflows/docker.jetson.6.0.0.yml b/.github/workflows/docker.jetson.6.0.0.yml new file mode 100644 index 000000000..a61130fc0 --- /dev/null +++ b/.github/workflows/docker.jetson.6.0.0.yml @@ -0,0 +1,45 @@ +name: Build and Push Jetson 6.X Container + +on: + release: + types: [created] + push: + branches: [main] + workflow_dispatch: + inputs: + force_push: + type: boolean + description: "Do you want to push image after build?" + default: false + +env: + VERSION: "0.0.0" # Default version, will be overwritten + +jobs: + docker: + runs-on: + labels: ubuntu-latest + timeout-minutes: 120 + permissions: + id-token: write + contents: read + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: ๐Ÿ›Ž๏ธ Checkout + uses: actions/checkout@v4 + - name: Read version from file + run: echo "VERSION=$(DISABLE_VERSION_CHECK=true python ./inference/core/version.py)" >> $GITHUB_ENV + - name: Set up Depot CLI + uses: depot/setup-action@v1 + - name: Build and Push + uses: depot/build-push-action@v1 + with: + push: ${{ github.event_name == 'release' || (github.event.inputs.force_push == 'true')}} + project: grl7ffzxd7 + tags: roboflow/roboflow-inference-server-jetson-6.0.0:latest,roboflow/roboflow-inference-server-jetson-6.0.0:${{ env.VERSION}} + platforms: linux/arm64 + file: ./docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 From 23082438e95024d4ca9c08bcbfd0feef89236fcb Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 5 Nov 2024 13:33:54 -0600 Subject: [PATCH 09/10] Update Jetpack Docs --- .github/workflows/test.jetson_6.0.0.yml | 49 +++++++++++++++++++++++++ docs/models.md | 8 +++- docs/quickstart/devices.md | 2 +- docs/quickstart/docker.md | 11 +++++- examples/inference-client/README.md | 8 +++- inference_cli/lib/container_adapter.py | 4 +- 6 files changed, 76 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/test.jetson_6.0.0.yml diff --git a/.github/workflows/test.jetson_6.0.0.yml b/.github/workflows/test.jetson_6.0.0.yml new file mode 100644 index 000000000..71b914688 --- /dev/null +++ b/.github/workflows/test.jetson_6.0.0.yml @@ -0,0 +1,49 @@ +name: Code Quality & Regression Tests - Jetson 6.0.0 + +on: + workflow_dispatch: + +jobs: + build: + if: ${{ !github.event.act }} + runs-on: [self-hosted, jetson, jetpack-6.0.0] + + steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: ๐Ÿ›Ž๏ธ Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + - name: ๐Ÿ“ฆ Cache Python packages + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('requirements/**') }} + restore-keys: | + ${{ runner.os }}-pip-${{ matrix.python-version }}- + - name: ๐Ÿฆพ Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -r requirements/requirements.test.integration.txt + - name: ๐Ÿ”จ Build and Push Test Docker - Jetson 6.0.0 + run: | + docker pull roboflow/roboflow-inference-server-jetson-6.0.0:test + docker build -t roboflow/roboflow-inference-server-jetson-6.0.0:test -f docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 . + docker push roboflow/roboflow-inference-server-jetson-6.0.0:test + - name: ๐Ÿ”‹ Start Test Docker - Jetson 6.0.0 + run: | + PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-jetson-6.0.0 make start_test_docker_jetson + - name: ๐Ÿงช Regression Tests - Jetson 6.0.0 + run: | + SKIP_VISUALISATION_TESTS=true SKIP_LMM_TEST=True MAX_WAIT=300 SKIP_GROUNDING_DINO_TEST=true SKIP_SAM_TEST=true SKIP_GAZE_TEST=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/ + - name: ๐Ÿงน Cleanup Test Docker - Jetson 6.0.0 + run: make stop_test_docker + if: success() || failure() diff --git a/docs/models.md b/docs/models.md index 2fbba55c2..2ab92ed35 100644 --- a/docs/models.md +++ b/docs/models.md @@ -133,7 +133,7 @@ docker run -it --network=host --gpus=all roboflow/roboflow-inference-server-gpu: docker run -p 9001:9001 roboflow/roboflow-inference-server-arm-cpu:latest ``` -- Run on NVIDIA Jetson with JetPack `4.x`: +- Run on NVIDIA Jetson with JetPack `4.x` (Deprecated): ```bash docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson:latest @@ -145,6 +145,12 @@ docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference- docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-5.1.1:latest ``` +- Run on NVIDIA Jetson with JetPack `6.x`: + +```bash +docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-6.0.0:latest +``` +
diff --git a/docs/quickstart/devices.md b/docs/quickstart/devices.md index 6410eba49..d3c497ed3 100644 --- a/docs/quickstart/devices.md +++ b/docs/quickstart/devices.md @@ -9,7 +9,7 @@ You can set up a server to use computer vision models with Inference on the foll - ARM CPU (macOS, Raspberry Pi) - x86 CPU (macOS, Linux, Windows) - NVIDIA GPU -- NVIDIA Jetson (JetPack 4.5.x, JetPack 4.6.x, JetPack 5.x) +- NVIDIA Jetson (JetPack 4.5.x, JetPack 4.6.x, JetPack 5.x, JetPack 6.x) ## Model Compatability diff --git a/docs/quickstart/docker.md b/docs/quickstart/docker.md index 5973871bb..d7ced7e92 100644 --- a/docs/quickstart/docker.md +++ b/docs/quickstart/docker.md @@ -63,14 +63,14 @@ hardware configurations. docker pull roboflow/roboflow-inference-server-gpu ``` - === "Jetson 4.5.x" + === "Jetson 4.5.x" (Deprecated) Official Roboflow Inference Server Docker Image for Nvidia Jetson JetPack 4.5.x Targets. ``` docker pull roboflow/roboflow-inference-server-jetson-4.5.0 ``` - === "Jetson 4.6.x" + === "Jetson 4.6.x" (Deprecated) Official Roboflow Inference Server Docker Image for Nvidia Jetson JetPack 4.6.x Targets. ``` @@ -84,6 +84,13 @@ hardware configurations. docker pull roboflow/roboflow-inference-server-jetson-5.1.1 ``` + === "Jetson 6.x" + Official Roboflow Inference Server Docker Image for Nvidia Jetson JetPack 6.x Targets. + + ``` + docker pull roboflow/roboflow-inference-server-jetson-6.0.0 + ``` + ### Step #2: Run the Docker Container Once you have a Docker image (either built locally or pulled from Docker Hub), you can run the Roboflow Inference diff --git a/examples/inference-client/README.md b/examples/inference-client/README.md index 67519a963..4cb80c19a 100644 --- a/examples/inference-client/README.md +++ b/examples/inference-client/README.md @@ -65,7 +65,7 @@ docker run --gpus=all --net=host -e STREAM_ID=0 -e MODEL_ID=<> -e ROBOFLOW_API_K docker run -p 9001:9001 roboflow/roboflow-inference-server-arm-cpu:latest ``` -- Run on Nvidia Jetson with JetPack `4.x`: +- Run on Nvidia Jetson with JetPack `4.x` (Deprecated): ```bash docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson:latest @@ -77,6 +77,12 @@ docker run --gpus=all --net=host -e STREAM_ID=0 -e MODEL_ID=<> -e ROBOFLOW_API_K docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-5.1.1:latest ``` +- Run on Nvidia Jetson with JetPack `6.x`: + + ```bash + docker run --privileged --net=host --runtime=nvidia roboflow/roboflow-inference-server-jetson-6.0.0:latest + ``` + ### UDP We only support one UDP container at the moment. Refer to the UDP command from earlier to set up UDP. diff --git a/inference_cli/lib/container_adapter.py b/inference_cli/lib/container_adapter.py index 82748e28c..450849a69 100644 --- a/inference_cli/lib/container_adapter.py +++ b/inference_cli/lib/container_adapter.py @@ -103,8 +103,10 @@ def _get_jetpack_image(jetpack_version: str) -> str: return "roboflow/roboflow-inference-server-jetson-4.5.0:latest" if jetpack_version.startswith("4.6"): return "roboflow/roboflow-inference-server-jetson-4.6.1:latest" - if jetpack_version.startswith("5.1"): + if jetpack_version.startswith("5."): return "roboflow/roboflow-inference-server-jetson-5.1.1:latest" + if jetpack_version.startswith("6."): + return "roboflow/roboflow-inference-server-jetson-6.0.0:latest" raise RuntimeError(f"Jetpack version: {jetpack_version} not supported") From a593d42d0d993fee3399ba0d1df136b1ac45ee9c Mon Sep 17 00:00:00 2001 From: Alex Norell Date: Wed, 6 Nov 2024 11:16:13 -0800 Subject: [PATCH 10/10] Add roboflow/l4t-ml documentation --- docker/dockerfiles/README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docker/dockerfiles/README.md b/docker/dockerfiles/README.md index 3eb204bd9..c319f5744 100644 --- a/docker/dockerfiles/README.md +++ b/docker/dockerfiles/README.md @@ -15,3 +15,15 @@ Dockerfile.onnx.trt | Deprecated | No | Dockerfile.onnx.trt.base | Deprecated | No | Dockerfile.onnx.udp.gpu | Deprecated | No | + +## Base image + +The `roboflow/l46-ml` image is based on the `l4t-ml` image from the [jetson-containers](https://github.com/dusty-nv/jetson-containers/tree/master/packages/l4t/l4t-ml) repository. The image is built on a Jetson with support for GPU acceleration using common ML tools. + +To build the image, run the following command: + +```bash +jetson-containers build l4t-ml +``` + +This requires that you have the `jetson-containers` tool installed on your system. Follow the instructions in the [jetson-containers](https://github.com/dusty-nv/jetson-containers/blob/master/docs/setup.md) repository to install the tool.