티스토리 뷰
In [ ]:
### 1) TF 1.15 단일 layer로 구현
%reset
# Tensorflow 1.15버전
# multinomial classification으로 MNIST 구현
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler # Normalization
from sklearn.model_selection import train_test_split # train, test 분리
from sklearn.metrics import classification_report
# Raw Data Loading
df = pd.read_csv('./data/mnist/train.csv')
##### 결측치와 이상치는 없음 #####
##### 이미지 확인 #####
img_data = df.drop('label', axis=1, inplace=False).values
fig = plt.figure()
fig_arr = []
for n in range(10):
fig_arr.append(fig.add_subplot(2,5,n+1))
fig_arr[n].imshow(img_data[n].reshape(28,28), cmap='Greys')
plt.tight_layout()
plt.show()
# Data Split
x_data_train, x_data_test, t_data_train, t_data_test = \
train_test_split(df.drop('label', axis=1, inplace=False), df['label'], test_size=0.3, random_state=0)
# Min-Max Normalization
scaler = MinMaxScaler() # scaler = StandardScaler()
scaler.fit(x_data_train)
x_data_train_norm = scaler.transform(x_data_train)
x_data_test_norm = scaler.transform(x_data_test)
del x_data_train
del x_data_test
##### Tensorflow implementation #####
sess = tf.Session()
t_data_train_onehot = sess.run(tf.one_hot(t_data_train,depth=10))
t_data_test_onehot = sess.run(tf.one_hot(t_data_test,depth=10))
# Placeholder
X = tf.placeholder(shape=[None,784], dtype=tf.float32)
T = tf.placeholder(shape=[None,10], dtype=tf.float32)
# Weight & bias
W = tf.Variable(tf.random.normal([784,10]), name='weight')
b = tf.Variable(tf.random.normal([10]), name='bias')
# Hypothesis
logit = tf.matmul(X,W) + b
H = tf.nn.softmax(logit) # softmax activation function
# loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit,
labels=T))
# train
train = tf.train.GradientDescentOptimizer(learning_rate=1e-1).minimize(loss)
# parameter
num_of_epoch = 200
batch_size = 100
# 학습
def run_train(sess, train_x, train_t):
print('### Starting Training ###')
# 초기화
sess.run(tf.global_variables_initializer())
for step in range(num_of_epoch):
total_batch = int(train_x.shape[0] / batch_size)
for i in range(total_batch):
batch_x = train_x[i*batch_size:(i+1)*batch_size]
batch_t = train_t[i*batch_size:(i+1)*batch_size]
_, loss_val = sess.run([train,loss], feed_dict={X:batch_x, T:batch_t})
if step % 20 == 0:
print('Loss : {}'.format(loss_val))
print('### End Training ###')
# Accuracy
predict = tf.argmax(H,1)
# Testing
run_train(sess,x_data_train_norm,t_data_train_onehot) # 학습
print('### Test Set으로 Accuracy 측정 ###')
result = sess.run(predict, feed_dict={X:x_data_test_norm})
print(classification_report(t_data_test,result.ravel()))
# Test Set 정확도 : 0.910793662071228
In [ ]:
### 2) TF 1.15 다중 layer로 구현 => DNN (초기화, acitvation, dropout function 안함)
3) TF 1.15 다중 layer로 구현 (초기화, acitvation, dropout function 설정)¶
In [26]:
# 1016_TF1.15
%reset
# Tensorflow 1.15버전
# DNN(초기화, activation function, dropout처리 함)으로 MNIST 구현
# 3) TF 1.15 다중 layer로 구현 (초기화, acitvation, dropout function 설정)
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler # Normalization
from sklearn.model_selection import train_test_split # train, test 분리
from sklearn.metrics import classification_report
tf.reset_default_graph()
# Raw Data Loading
df = pd.read_csv('./data/mnist/train.csv')
tf.reset_default_graph()
##### 결측치와 이상치는 없음 #####
##### 이미지 확인 #####
img_data = df.drop('label', axis=1, inplace=False).values
fig = plt.figure()
fig_arr = []
for n in range(10):
# 2행 5열 10개의 그래프를 그린다. n+1번쨰
fig_arr.append(fig.add_subplot(2,5,n+1))
fig_arr[n].imshow(img_data[n].reshape(28,28), cmap='Greys')
plt.tight_layout()
plt.show()
################################
# Data Split
x_data_train, x_data_test, t_data_train, t_data_test = \
train_test_split(df.drop('label', axis=1, inplace=False), df['label'], test_size=0.3, random_state=0)
# Min-Max Normalization
scaler = MinMaxScaler() # scaler = StandardScaler()
scaler.fit(x_data_train)
x_data_train_norm = scaler.transform(x_data_train)
x_data_test_norm = scaler.transform(x_data_test)
del x_data_train
del x_data_test
##############################################################
##### Tensorflow implementation #####
print(tf.__version__) # 1.15.0
sess = tf.Session()
t_data_train_onehot = sess.run(tf.one_hot(t_data_train, depth=10))
t_data_test_onehot = sess.run(tf.one_hot(t_data_test, depth=10))
# Placeholder
X = tf.placeholder(shape=[None,784], dtype=tf.float32)
T = tf.placeholder(shape=[None,10], dtype=tf.float32)
drop_rate = tf.placeholder(dtype=tf.float32) # drop할 node의 비율
# Weight & bias
# 256개의 로지스틱 => 784와 10의 평균
# 보통 입력과 출력의 평균, 500을 넘지 않는 수로 지정한다.
# W2 = tf.Variable(tf.random.normal([784,256]), name='weight2')
# He's 초기화(2015년도 논문)
W2 = tf.get_variable('weight2', shape=[784,256],
initializer=tf.contrib.layers.variance_scaling_initializer())
b2 = tf.Variable(tf.random.normal([256]), name='bias2')
_layer2 = tf.nn.relu(tf.matmul(X, W2) + b2)
layer2 = tf.nn.dropout(_layer2, rate=drop_rate)
# W3 = tf.Variable(tf.random.normal([256,128]), name='weight3')
W3 = tf.get_variable('weight3', shape=[256,128],
initializer=tf.contrib.layers.variance_scaling_initializer())
b3 = tf.Variable(tf.random.normal([128]), name='bias3')
_layer3 = tf.nn.relu(tf.matmul(layer2,W3) + b3)
layer3 = tf.nn.dropout(_layer3, rate=drop_rate)
# W4 = tf.Variable(tf.random.normal([128, 10]), name='weight4')
W4 = tf.get_variable('weight4', shape=[128, 10],
initializer=tf.contrib.layers.variance_scaling_initializer())
b4 = tf.Variable(tf.random.normal([10]), name='bias4')
# Hypothesis
logit = tf.matmul(layer3, W4) + b4
H = tf.nn.softmax(logit) # softmax activation function
# loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit,
labels=T))
# train
train = tf.train.GradientDescentOptimizer(learning_rate=1e-1).minimize(loss)
# parameter
num_of_epoch = 500
batch_size = 100
# 학습
def run_train(sess, train_x, train_t):
print('### Starting Training ###')
# 초기화
sess.run(tf.global_variables_initializer())
for step in range(num_of_epoch):
total_batch = int(train_x.shape[0] / batch_size)
for i in range(total_batch):
batch_x = train_x[i*batch_size:(i+1)*batch_size]
batch_t = train_t[i*batch_size:(i+1)*batch_size]
_, loss_val = sess.run([train, loss], feed_dict={X:batch_x,
T:batch_t,
drop_rate:0.3})
if step % 50 == 0:
print('Loss : {}'.format(loss_val))
print('### End Training ###')
# Accuracy
predict = tf.argmax(H, 1)
# Testing
run_train(sess, x_data_train_norm, t_data_train_onehot) # 학습
print('### Test Set으로 Accuracy 측정 ###')
result = sess.run(predict, feed_dict={X:x_data_test_norm,
drop_rate:0})
print(classification_report(t_data_test, result.ravel()))
'멀티캠퍼스 AI과정 > 06 Deep Learning' 카테고리의 다른 글
Deep Learning 05 - Decision Tree (0) | 2020.10.22 |
---|---|
Deep Learning 04 - SVM (Support Vector Machine) (0) | 2020.10.19 |
Deep Learning 03 - Tensorflow 2.1 DNN (0) | 2020.10.19 |
Deep Learning 02 - DNN, FC (Dense) layer, Back Propagation, Vanishing Gradient , Relu, Dropout (0) | 2020.10.19 |
Deep Learning 01 DNN (Deep Neural Network) (0) | 2020.10.15 |
댓글