Multivariable linear regression with pytorch

I am working on a linear regression problem with Pytorch.
I had success with the single variable case, however, when I do multivariable linear regression, I get the following error. How do I perform linear regression on multiple variables?

TypeError Traceback (most recent call last) in () 9 optimizer.zero_grad () #gradient 10 outputs = model (inputs) # output ---> 11 losses = criterion (outputs, targets) # loss function 12 loss.backward () # backpropagation 13 optimizer.step () # 1-step optimization (downscale)

/anaconda/envs/tensorflow/lib/python3.6/site-packages/torch/nn/modules/module.py in the call (self, * input, ** kwargs) 204 205 def call (self, * input, ** kwargs): → 206 result = self.forward (* input, ** quargs) 207 to intercept in self._forward_hooks.values ​​(): 208 hook_result = hook (self, input, result)

/anaconda/envs/tensorflow/lib/python3.6/site-packages/torch/nn/modules/loss.py forward (self, input, target) 22 _assert_no_grad (target) 23 backend_fn = getattr (self._backend, type ( self). name ) ---> 24 return backend_fn (self.size_average) (input, target) 25 26

/anaconda/envs/tensorflow/lib/python3.6/site-packages/torch/nn/_functions/thnn/auto.py forward (self, input, target) 39 output = input.new (1) 40 getattr (self. _backend, update_output.name) (self._backend.library_state, input, target, ---> 41 output, * self.additional_args) 42 reverse output 43

TypeError: FloatMSECriterion_updateOutput received an invalid combination of arguments - got (int, torch.FloatTensor, torch.DoubleTensor, torch.FloatTensor, bool), but expected (int state, torch.FloatTensor input, torch.FloatTensor target, torch.FloatTensor bool sizeAverage)

here is the code

#import
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable

#input_size = 1
input_size = 3
output_size = 1
num_epochs = 300
learning_rate = 0.002

#Data set
#x_train = np.array([[1.564],[2.11],[3.3],[5.4]], dtype=np.float32)
x_train = np.array([[73.,80.,75.],[93.,88.,93.],[89.,91.,90.],[96.,98.,100.],[73.,63.,70.]],dtype=np.float32)
#y_train = np.array([[8.0],[19.0],[25.0],[34.45]], dtype= np.float32)
y_train = np.array([[152.],[185.],[180.],[196.],[142.]])
print('x_train:\n',x_train)
print('y_train:\n',y_train)

class LinearRegression(nn.Module):
    def __init__(self,input_size,output_size):
        super(LinearRegression,self).__init__()
        self.linear = nn.Linear(input_size,output_size)

    def forward(self,x):
        out = self.linear(x) #Forward propogation 
        return out

model = LinearRegression(input_size,output_size)

#Lost and Optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)

#train the Model
for epoch in range(num_epochs):
    #convert numpy array to torch Variable
    inputs = Variable(torch.from_numpy(x_train)) #convert numpy array to torch tensor
    #inputs = Variable(torch.Tensor(x_train))    
    targets = Variable(torch.from_numpy(y_train)) #convert numpy array to torch tensor

    #forward+ backward + optimize
    optimizer.zero_grad() #gradient
    outputs = model(inputs) #output
    loss = criterion(outputs,targets) #loss function
    loss.backward() #backward propogation
    optimizer.step() #1-step optimization(gradeint descent)

    if(epoch+1) %5 ==0:
        print('epoch [%d/%d], Loss: %.4f' % (epoch +1, num_epochs, loss.data[0]))
        predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
        plt.plot(x_train,y_train,'ro',label='Original Data')
        plt.plot(x_train,predicted,label='Fitted Line')
        plt.legend()
        plt.show()

      

+5


source to share


1 answer


You need to make sure the data is of the same type. In this case, x_train is a 32-bit float and y_train is a Double. You must use:



y_train = np.array([[152.],[185.],[180.],[196.],[142.]],dtype=np.float32)

      

+4


source







All Articles