전제
1️⃣ 1단계 - 미니배치
2️⃣ 2단계 - 기울기 산출
3️⃣ 3단계 - 매개변수 갱신
4️⃣ 4단계 - 반복
import sys, os
sys.path.append(os.pardir)
from common.functions import *
from common.gradient import numerical_gradient
class TwoLayerNet:
# 가중치 초기화
def __init__(self, input_size, hidden_size, output_size, weight_init_std = 0.01):
self.params = {} # 신경망의 매개변수
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
# 에측 실행
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b2'], self.params['b2']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
# 손실 함수
def loss(self, x, t): # x : input data / t : true label
y = self.predict(x)
return cross_entropy_error(y, t)
# 정확도
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
y = np.argmax(t, axis=2)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# 가중치 매개변수의 기울기
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {} # 기울기
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label = True)
train_loss_list = []
# hyper parameter
iters_num = 10000 # 학습 반복 획수
train_size = x_train.shape[0]
batch_size = 100 # 미니배치의 크기
learning_rate = 0.1
network = TwoLayerNet(input_size = 784, hidden_size = 50, output_size = 10)
for i in range(iters_num):
# 미니배치 획득
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 기울기 계산
grad = network.numerical_gradient(x_batch, t_batch)
# grad = network.gradient(x_batch, t_batch) # 성능 개선판
# 매개변수 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 학습경과 기록
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
import numpy as np
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
import matplotlib.pyplot as plt
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
train_loss_list = []
train_acc_list = []
test_acc_list = []
# ******1epoch 당 반복횟수******
iter_per_epoch = max(train_size / batch_size, 1)
# hyper parameter
iters_num = 10000
batch_size = 100
learning_rate = 0.1
network = TwoLayerNet(input_size = 784, hidden_size = 50, output_size = 10)
for i in range(iters_num):
# 미니배치 획득
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 기울기 계산
grad = network.numerical_gradient(x_batch, t_batch)
# grad = network.gradient(x_batch, t_batch) # 성능 개선판
# 매개변수 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 학습경과 기록
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
# ******1epoch 당 정확도 계산******
if i % iter_per_epoch == 0: # 1epoch 마다 실행
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
# print("train acc, test acc | ", str(train_acc),",",str(test_acc))
# graph
'''
x1 = np.arange(0, len(train_acc_list))
y1 = train_acc_list
x2 = np.arange(0, len(test_acc_list))
y2 = test_acc_list
plt.plot(x1, y1, x2, y2)
plt.show()
'''
'\nx1 = np.arange(0, len(train_acc_list))\ny1 = train_acc_list\n\nx2 = np.arange(0, len(test_acc_list))\ny2 = test_acc_list\nplt.plot(x1, y1, x2, y2)\nplt.show()\n'