def loss_fn(hypothesis, labels):
cost = -tf.reduce_mean(labels * tf.log(hypothesis) + (1 - labels) * tf.log(1 - hypothesis))
return cost
cost = -tf.reduce_mean(labels * tf.log(hypothesis) + (1 - labels) * tf.log(1 - hypothesis))
def grad(hypothesis, labels):
with tf.GradientTape() as tape:
loss_value = loss_fn(hypothesis, labels)
return tape.gradient(loss_value, [W, b])
optimizer = tf.GradientDescentOptimizer(learning_rate = 0.01)
optimizer.apply_gradients(grads_and_vars=zip(grads,[W,b]))