Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

RuntimeError: CUDA out of memory. Tried to allocate 42.00 MiB (GPU 0; 3.81 GiB total capacity; 2.79 GiB already allocated; 25.44 MiB free; 2.92 GiB reserved in total by PyTorch) #21

Open
imenselmi opened this issue May 17, 2023 · 1 comment

Comments

@imenselmi
Copy link

RuntimeError Traceback (most recent call last)
Cell In[27], line 44
42 # Specify the optimizer with a lower learning rate
43 optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
---> 44 _ = train_model(model,
45 criterion,
46 dataloaders,
47 optimizer,
48 bpath=exp_directory,
49 metrics=metrics,
50 num_epochs=epochs)
52 # Save the trained model
53 torch.save(model, exp_directory / 'weights.pt')

File ~/Documents/5G/DeepLabv3FineTuning/trainer.py:49, in train_model(model, criterion, dataloaders, optimizer, metrics, bpath, num_epochs)
47 # track history if only in train
48 with torch.set_grad_enabled(phase == 'Train'):
---> 49 outputs = model(inputs)
50 loss = criterion(outputs['out'], masks)
51 y_pred = outputs['out'].data.cpu().numpy().ravel()

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py:722, in Module._call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
--> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),
725 self._forward_hooks.values()):
726 hook_result = hook(self, input, result)

File ~/.local/lib/python3.8/site-packages/torchvision/models/segmentation/_utils.py:20, in _SimpleSegmentationModel.forward(self, x)
18 input_shape = x.shape[-2:]
19 # contract: features is a dict of tensors
---> 20 features = self.backbone(x)
22 result = OrderedDict()
23 x = features["out"]

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py:722, in Module._call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
--> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),
725 self._forward_hooks.values()):
726 hook_result = hook(self, input, result)

File ~/.local/lib/python3.8/site-packages/torchvision/models/_utils.py:63, in IntermediateLayerGetter.forward(self, x)
61 out = OrderedDict()
62 for name, module in self.items():
---> 63 x = module(x)
64 if name in self.return_layers:
65 out_name = self.return_layers[name]

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py:722, in Module._call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
--> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),
725 self._forward_hooks.values()):
726 hook_result = hook(self, input, result)

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/container.py:117, in Sequential.forward(self, input)
115 def forward(self, input):
116 for module in self:
--> 117 input = module(input)
118 return input

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py:722, in Module._call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
--> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),
725 self._forward_hooks.values()):
726 hook_result = hook(self, input, result)

File ~/.local/lib/python3.8/site-packages/torchvision/models/resnet.py:112, in Bottleneck.forward(self, x)
109 out = self.bn2(out)
110 out = self.relu(out)
--> 112 out = self.conv3(out)
113 out = self.bn3(out)
115 if self.downsample is not None:

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/module.py:722, in Module._call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
--> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),
725 self._forward_hooks.values()):
726 hook_result = hook(self, input, result)

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/conv.py:419, in Conv2d.forward(self, input)
418 def forward(self, input: Tensor) -> Tensor:
--> 419 return self._conv_forward(input, self.weight)

File ~/.local/lib/python3.8/site-packages/torch/nn/modules/conv.py:415, in Conv2d._conv_forward(self, input, weight)
411 if self.padding_mode != 'zeros':
412 return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
413 weight, self.bias, self.stride,
414 _pair(0), self.dilation, self.groups)
--> 415 return F.conv2d(input, weight, self.bias, self.stride,
416 self.padding, self.dilation, self.groups)

RuntimeError: CUDA out of memory. Tried to allocate 42.00 MiB (GPU 0; 3.81 GiB total capacity; 2.79 GiB already allocated; 25.44 MiB free; 2.92 GiB reserved in total by PyTorch)

@silviazpi
Copy link

I fixed it by reducing the input image size but in a 4GB gpu it might not be runnable

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants