import torch
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[2], [4], [6]])
W = torch.zeros(1, requires_grad=True) # required_grad -> 학습할 것이라고 명시
b = torch.zeros(1, requires_grad=True)
hypothesis = x_train * W + b
cost = torch.mean((hypothesis - y_train) ** 2)
optimizer = optim.SGD([W, b], lr=0.01)
optimizer.zero_grad() # gradient 초기화
cost.backward() # gradient 계산
optimizer.step() # gradient 개선
# Data 정의, Hypothesis 초기화, Optimizer 정의 (최초 1회)
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[2], [4], [6]])
W = torch.zeros(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
optimizer = torch.optim.SGD([W, b], lr=0.01)
# Hypothesis 예측, Cost계산, Optimizer로 학습 (반복)
nb_epochs = 1000
for epochs in range(1, nb_epochs + 1):
hypothesis = x_train * W + b
cost = torch.mean((hypothesis - y_train) **2 )
optimizer.zero_grad()
cost.backward()
optimizer.step()