You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi all,
I defined my module MLPNet and then used NeuralNet to implement gridsearch, but I noticed that I couldn't use my cuda gpu tensor
I can't figure out where is the issue.
Instead, if I call the fit metod by using grid.fit(X_PM10_pt.cpu(), Y_PM10_pt.cpu()) works, but It doesn't use the gpu. So, how can I set everything to compute on my gpu by using skorch and not on cpu?
Below my code:
device = cuda
X_PM10_pt = pt.tensor(X_pm10,dtype=pt.float32, device=device)
Y_PM10_pt = pt.tensor(Y_pm10,dtype=pt.float32, device=device)
import numpy as np
import torch as pt
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
from numpy import array
from skorch import NeuralNet
from sklearn.model_selection import GridSearchCV, PredefinedSplit
import torch.nn as nn
import torch.nn.init as init
class MLPNet(nn.Module):
def __init__(self, dropout_rate=0.2, hidden_neurons=10, input_size=4, activation_fn= nn.ReLU(), output_size=1, weight_init=init.normal_):
super(MLPNet, self).__init__()
self.input_size = input_size
self.hidden_neurons = hidden_neurons
self.activation_fn = activation_fn
self.dropout_rate = dropout_rate
self.output_size = output_size
#self.weight_init = weight_init
self.hidden_layer = nn.Linear(self.input_size, self.hidden_neurons)
self.activation = self.activation_fn
self.dropout = nn.Dropout(self.dropout_rate)
self.output_layer = nn.Linear(self.hidden_neurons, self.output_size)
"""
Inizializza i pesi con la strategia specificata.
Se weight_init è None, utilizza init.normal_ come strategia di default.
"""
for module in self.modules():
if isinstance(module, nn.Linear):
weight_init(module.weight)
nn.init.constant_(module.bias, 0.1)
def forward(self, x):
x = self.activation(self.hidden_layer(x))
x = self.output_layer(self.dropout(x))
return x
def check_initialization(self):
for module in self.modules():
if isinstance(module, nn.Linear):
print(f"Weight initialization for layer {module}:")
print(module.weight)
print(f"Bias initialization for layer {module}:")
print(module.bias)
mlp_model = NeuralNet(
module= MLPNet,
criterion=nn.MSELoss,
optimizer=optim.Adam,
device=device,
verbose=True
)
param_grid = {
'module__hidden_neurons': [10, 50, 100], #[100, 50, 10],
'module__dropout_rate': [0.2, 0.5],
'module__weight_init': [init.normal_,init.kaiming_normal_],
'optimizer__lr': [0.01, 0.001],
'batch_size': [32, 64, 128],
'max_epochs': [10, 50, 100]
}
grid = GridSearchCV(estimator=mlp_model, param_grid=param_grid, cv=ps, scoring="neg_mean_squared_error", verbose=10, error_score='raise')
grid_result = grid.fit(X_PM10_pt, Y_PM10_pt)`
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
The text was updated successfully, but these errors were encountered:
Hi all,
I defined my module MLPNet and then used NeuralNet to implement gridsearch, but I noticed that I couldn't use my cuda gpu tensor
I can't figure out where is the issue.
Instead, if I call the fit metod by using grid.fit(X_PM10_pt.cpu(), Y_PM10_pt.cpu()) works, but It doesn't use the gpu. So, how can I set everything to compute on my gpu by using skorch and not on cpu?
Below my code:
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
The text was updated successfully, but these errors were encountered: