### 모델 정의
Define a graph : h = ReLU(Wx + b)
import tensorflow as tf
b = tf.get_variable('bias', tf.zeros((100,)))
W = tf.get_variable('weights', tf.random_uniform((784, 100), -1, 1))
x = tf.placeholder(tf.float32, (None, 784))
h = tf.nn.relu(tf.matmul(x, W) + b)
코드는 적엇는데 아직 구동방법을 모름
### 구동 심볼릭 모드
import tensorflow as tf
import numpy as np
b = tf.get_variable('bias', tf.zeros((100,)))
W = tf.get_variable('weights', tf.random_uniform((784, 100), -1, 1))
x = tf.placeholder(tf.float32, (None, 784))
h = tf.nn.relu(tf.matmul(x, W) + b)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(h, {x: np.random.random(64, 784)})
@@ Linear Regression
import tensorflow as tf
import utils
DATA_FILE = "data/system_cpuutil_applatency.txt"
# Step 1:readin data from the .txt file
# data is a numpy array of shape(100000, 2 ) , each eow is a datapoint
data, n_samples = utils.read_system
# Step 2: create placeholders for X (CPU util) and Y (app latency)
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
# Step 3: create weight and bias, initialized to 0
w = tf.get_variable('weights',initializer=tf.constant(0.0))
b = tf.get_variable('bias',initializer=tf.constant(0.0))
# Step 4: construct model to predict Y (app latency from CPU util)
Y_predicted = w * X + b
# Step 5: use the square error as the loss function
loss = tf.square(Y - Y_predicted, name='loss')
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
## 수행하는곳
with tf.Session() as sess:
# Step 7 : initialize the necessary variables, in this case, w and b
sess.run(tf.global_variables_initializer()
# Step 8 : train the model
for i in range(100): #run 100 epochs
for x, y in data:
# Session runs train_op to minimize loss
sess.run(optimizer, feed_dict={X:x,Y:y})
# Step 9 : output the values of w and b
w_out, b_out = sess.run([w.b])
### 구동 이거 모드 eager mode
import tensorflow as tf
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
x = [[3.]]
m = tf.matmul(x, x)
print(m)
# tf.Tensor([[9.]], -(1, 1), dtype=float32)
### 사이즈가 고정된경우 좀더 빨리 할 수 있다
dataset = tf.data.FixedLengthRecordDataset([file1, file2, file3, ...])
iterator = dataset.make_one_shot_iterator()
input, label = iterator.get_next()
...
for i in range(100):
...
try:
while True:
sess.run([optimizer])
except tf.errors.OutOfRangeError:
pass
dataset = dataset.shuffle(1000)
dataset = dataset.repeat(100)
dataset = dataset.batch(128)
dataset = dataset.map(lambda x:tf.one_hot(x, 10))
'K-MOOC > 빅데이터와 머신러닝 소프트웨어' 카테고리의 다른 글
8주차 분산 머신러닝 딥러닝 (0) | 2020.04.27 |
---|---|
7주차 네트워크 Recurrent neural network (0) | 2020.04.18 |
5주차 머신러닝 시스템 개요 (0) | 2020.04.02 |
4주차 스트림 처리 (0) | 2020.04.01 |
3주차 배치 분석 (0) | 2020.03.24 |