From b1c67119852f459028efb317e3b4c9640b3cbc20 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Sat, 12 Oct 2024 08:38:50 -0400 Subject: [PATCH 1/2] enable cpu bnb distributed lora finetune --- src/accelerate/accelerator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py index d85781646d6..8c7d03a97b4 100755 --- a/src/accelerate/accelerator.py +++ b/src/accelerate/accelerator.py @@ -1421,7 +1421,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, e current_device.index if isinstance(current_device, torch.device) else current_device ) - if torch.device(current_device_index) != self.device: + if torch.device(current_device_index) != self.device and self.device.type != "cpu": # if on the first device (GPU 0) we don't care if (self.device.index is not None) or (current_device_index != 0): raise ValueError( From 1cd46d57141e69cf0460e54d26bc84c520deec8d Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Tue, 15 Oct 2024 06:50:38 -0400 Subject: [PATCH 2/2] check bnb multi-backend --- src/accelerate/accelerator.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py index 8c7d03a97b4..419aab4f0cf 100755 --- a/src/accelerate/accelerator.py +++ b/src/accelerate/accelerator.py @@ -1421,7 +1421,10 @@ def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, e current_device.index if isinstance(current_device, torch.device) else current_device ) - if torch.device(current_device_index) != self.device and self.device.type != "cpu": + if self.device.type == "cpu" and is_bitsandbytes_multi_backend_available(): + # bnb with multi-backend supports CPU which don't need to check index. + pass + elif torch.device(current_device_index) != self.device: # if on the first device (GPU 0) we don't care if (self.device.index is not None) or (current_device_index != 0): raise ValueError(