diff --git a/burn-tensor/README.md b/burn-tensor/README.md index 37501a301a..f7f6855419 100644 --- a/burn-tensor/README.md +++ b/burn-tensor/README.md @@ -50,7 +50,7 @@ Therefore, creating the tape only requires a simple and efficient graph traversa ## Cuda -To run with CUDA set `TORCH_CUDA_VERSION=cu113`. +To run with CUDA set `TORCH_CUDA_VERSION=cu121`. ## Notes diff --git a/examples/mnist/README.md b/examples/mnist/README.md index 4040dc56a6..4aa61016d4 100644 --- a/examples/mnist/README.md +++ b/examples/mnist/README.md @@ -17,7 +17,7 @@ cargo run --example mnist --release --features ndarray # CPU NdAr cargo run --example mnist --release --features ndarray-blas-openblas # CPU NdArray Backend - f32 - blas with openblas cargo run --example mnist --release --features ndarray-blas-netlib # CPU NdArray Backend - f32 - blas with netlib echo "Using tch backend" -export TORCH_CUDA_VERSION=cu113 # Set the cuda version +export TORCH_CUDA_VERSION=cu121 # Set the cuda version cargo run --example mnist --release --features tch-gpu # GPU Tch Backend - f32 cargo run --example mnist --release --features tch-cpu # CPU Tch Backend - f32 echo "Using wgpu backend" diff --git a/examples/simple-regression/README.md b/examples/simple-regression/README.md index 4c385fea70..4f4f445c01 100644 --- a/examples/simple-regression/README.md +++ b/examples/simple-regression/README.md @@ -23,7 +23,7 @@ cargo run --example regression --release --features ndarray # CPU cargo run --example regression --release --features ndarray-blas-openblas # CPU NdArray Backend - f32 - blas with openblas cargo run --example regression --release --features ndarray-blas-netlib # CPU NdArray Backend - f32 - blas with netlib echo "Using tch backend" -export TORCH_CUDA_VERSION=cu113 # Set the cuda version +export TORCH_CUDA_VERSION=cu121 # Set the cuda version cargo run --example regression --release --features tch-gpu # GPU Tch Backend - f32 cargo run --example regression --release --features tch-cpu # CPU Tch Backend - f32 echo "Using wgpu backend" diff --git a/examples/text-classification/README.md b/examples/text-classification/README.md index a38e78f0b9..cefd5e3784 100644 --- a/examples/text-classification/README.md +++ b/examples/text-classification/README.md @@ -29,7 +29,7 @@ cd burn # Use the --release flag to really speed up training. # Use the f16 feature if your CUDA device supports FP16 (half precision) operations. May not work well on every device. -export TORCH_CUDA_VERSION=cu117 # Set the cuda version (CUDA users) +export TORCH_CUDA_VERSION=cu121 # Set the cuda version (CUDA users) # AG News cargo run --example ag-news-train --release --features tch-gpu # Train on the ag news dataset diff --git a/examples/text-generation/README.md b/examples/text-generation/README.md index a8953244ac..0131c0c3e2 100644 --- a/examples/text-generation/README.md +++ b/examples/text-generation/README.md @@ -14,7 +14,7 @@ git clone https://github.com/tracel-ai/burn.git cd burn # Use the --release flag to really speed up training. -export TORCH_CUDA_VERSION=cu113 +export TORCH_CUDA_VERSION=cu121 cargo run --example text-generation --release ```