import tensorflow as tf
x_data = [1, 2, 3, 4, 5] # input
y_data = [1, 2, 3, 4, 5] # output
# initialize Weight and bias
W = tf.Variable(2.9)
b = tf.Variable(0.5)
# Get hypothesis and Cost
hypothesis = W * x_data + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
# learning_rate initialize
learning_rate = 0.01
#Gradient descent
with tf.GradientTape() as tape:
hypothesis = W * x_data + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
W_grad, b_grad = tape.gradient(cost, [W, b])
W.assign_sub(learning_rate * W_grad)
b.assign_sub(learning_rate * b_grad)
learning_rate = 0.01
W = tf.Variable(2.9)
b = tf.Variable(0.5)
for i in range(101):
with tf.GradientTape() as tape:
hypothesis = W * x_data + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
W_grad, b_grad = tape.gradient(cost, [W, b])
W.assign_sub(learning_rate * W_grad)
b.assign_sub(learning_rate * b_grad)
if i % 10 == 0:
print("{:5}|{:10.4}|{:10.4}|{:10.6f}".format(i, W.numpy(), b.numpy(), cost))
0| 2.452| 0.376| 45.660004 10| 1.104| 0.003398| 0.206336 20| 1.013| -0.02091| 0.001026 30| 1.007| -0.02184| 0.000093 40| 1.006| -0.02123| 0.000083 50| 1.006| -0.02053| 0.000077 60| 1.005| -0.01984| 0.000072 70| 1.005| -0.01918| 0.000067 80| 1.005| -0.01854| 0.000063 90| 1.005| -0.01793| 0.000059 100| 1.005| -0.01733| 0.000055
print('predict : {0} / result : {1}'.format(5, W * 5 + b))
print('predict : {0} / result : {1}'.format(2.5, W * 2.5 + b))
predict : 5 / result : 5.006669998168945 predict : 2.5 / result : 2.4946701526641846