homebody's blog

[Python][TensorFlow] 06. 다중 선형 회귀 TensorFlow 구현(Multi-variable Linear Regression) 본문

Python/TensorFlow

[Python][TensorFlow] 06. 다중 선형 회귀 TensorFlow 구현(Multi-variable Linear Regression)

homebody 2019. 6. 22. 21:45

TensorFlow(Multi-variable Linear Regression을 TensorFlow로 구현)

Hypothesis using matrix

# data and label
x1 = [73., 93., 89., 96., 73.]
x2 = [80., 88., 91., 98., 66.]
x3 = [75., 93., 90., 100., 70.]
Y = [152., 185., 180., 196., 142.]

# weights
w1 = tf.Variable(10.)
w2 = tf.Variable(10.)
w3 = tf.Variable(10.)
b = tf.Variable(10.)

hypothesis = w1 * x1 + w2 * x2 + w3 * x3 + b

전체 code

import tensorflow as tf

tf.enable_eager_execution()

# data and label
x1 = [73., 93., 89., 96., 73.]
x2 = [80., 88., 91., 98., 66.]
x3 = [75., 93., 90., 100., 70.]
Y = [152., 185., 180., 196., 142.]

# weights
w1 = tf.Variable(10.)
w2 = tf.Variable(10.)
w3 = tf.Variable(10.)
b = tf.Variable(10.)

learning_rate = 0.000001

for i in range(1000+1):
    # tf.GradientTape() to record the gradient of the cost function
    with tf.GradientTape() as tape:
        hypothesis = w1 * x1 + w2 * x2 + w3 * x3 + b
        cost = tf.reduce_mean(tf.square(hypothesis - Y))
    # calculates the gradients of the cost
    w1_grad, w2_grad, w3_grad, b_grad = tape.gradient(cost, [w1, w2, w3, b])

    # update w1, w2, w3 and b
    w1.assign_sub(learning_rate * w1_grad)
    w2.assign_sub(learning_rate * w2_grad)
    w3.assign_sub(learning_rate * w3_grad)
    b.assign_sub(learning_rate * b_grad)

    if i % 50 == 0:
        print("{:5} | {:10.4f}".format(i, cost.numpy()))

Matrix로 수정하기

data = np.array([
    # x1, x2, x3, y
    [73., 80., 75., 152.],
    [93., 88., 93., 185.],
    [89., 91., 90., 180.],
    [96., 98., 100., 196.],
    [73., 66., 70., 142]
], dtype = np.float32)

# slice data
X = data[:, :-1]
y = data[:, [-1]]

W = tf.Variable(tf.random_normal[3, 1])
b = tf.Variable(tf.random_normal[1])

# hypothesis, prediction function
def predict(X):
    return tf.matmul(X, W) + b
import tensorflow as tf
import numpy as np

tf.enable_eager_execution()

# hypothesis, prediction function
def predict(X):
    return tf.matmul(X, W) + b

data = np.array([
    # x1, x2, x3, y
    [73., 80., 75., 152.],
    [93., 88., 93., 185.],
    [89., 91., 90., 180.],
    [96., 98., 100., 196.],
    [73., 66., 70., 142.]
], dtype = np.float32)

# slice data
X = data[:, :-1]
y = data[:, [-1]]

W = tf.Variable(tf.random_normal([3, 1]))
b = tf.Variable(tf.random_normal([1]))

learning_rate = 0.000001

n_epochs = 2000
for i in range(n_epochs+1):
    # record the gradient of the cost function
    with tf.GradientTape() as tape:
        cost = tf.reduce_mean(tf.square(predict(X) - y))
        
    # calculates the gradients of the cost
    W_grad, b_grad = tape.gradient(cost, [W, b])

    # update w1, w2, w3 and b
    W.assign_sub(learning_rate * W_grad)
    b.assign_sub(learning_rate * b_grad)

    if i % 50 == 0:
        print("{:5} | {:10.4f}".format(i, cost.numpy()))
Comments