2021. 2. 9. 10:07, 머신러닝/딥러닝
2020/11/13 - [머신러닝/딥러닝] - 합성곱 신경망(Convolutional Neural Network)
드롭아웃(Dropout)
과대적합을 억제하는 기법으로 무작위로 일부 뉴런 혹은 유닛을 비활성화합니다. 이에 따라 일부 유닛을 과도하게 의존하는 것을 방지하는 것입니다. 이러한 드롭아웃은 학습시에 적용하고 테스트에는 모든 유닛을 활성화합니다.
구현
합성곱 신경망 모델에 드롭아웃을 적용합니다.
패션 MNIST 데이터셋을 불러옵니다.
import numpy as np
from tensorflow.keras import datasets
(x_train, y_train), (x_test, y_test) = datasets.fashion_mnist.load_data()
print('data shape:', x_train.shape)
print('target shape:', y_train.shape)
print('target label:', np.unique(y_train, return_counts=True))
data shape: (60000, 28, 28)
target shape: (60000,)
target label: (array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint8), array([6000, 6000, 6000, 6000, 6000, 6000, 6000, 6000, 6000, 6000],
dtype=int64))
데이터 형태를 변형합니다.
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
학습 데이터의 20%를 검증 데이터로 분할합니다.
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2)
신경망을 정의합니다.
import numpy as np
import tensorflow as tf
class Model:
def __init__(self, lr=1e-3):
tf.reset_default_graph()
with tf.name_scope('input'):
self.x = tf.placeholder(tf.float32, [None, 28, 28, 1])
self.y = tf.placeholder(tf.int64)
self.dropout_rate = tf.placeholder(tf.float32)
with tf.name_scope('preprocessing'):
x_norm = self.x / 255.0
y_onehot = tf.one_hot(self.y, 10)
with tf.name_scope('layer'):
conv1 = tf.layers.conv2d(x_norm, 32, [3, 3], padding='VALID', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv1, [2, 2], [2, 2], padding='VALID')
conv2 = tf.layers.conv2d(pool1, 64, [3, 3], padding='VALID', activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv2, [2, 2], [2, 2], padding='VALID')
flat = tf.layers.flatten(pool2)
dropout = tf.nn.dropout(flat, self.dropout_rate)
fc = tf.layers.dense(dropout, 64, tf.nn.relu)
logits = tf.layers.dense(fc, 10)
with tf.name_scope('output'):
self.predict = tf.argmax(tf.nn.softmax(logits), 1)
with tf.name_scope('accuracy'):
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.to_int64(self.predict), self.y), dtype=tf.float32))
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_onehot, logits=logits)
self.loss = tf.reduce_mean(cross_entropy)
with tf.name_scope('optimizer'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
with tf.name_scope('summary'):
self.summary_loss = tf.placeholder(tf.float32)
self.summary_accuracy = tf.placeholder(tf.float32)
tf.summary.scalar('loss', self.summary_loss)
tf.summary.scalar('accuracy', self.summary_accuracy)
self.merge = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter('./tmp/cnn_fashion_mnist/dropout_train', tf.get_default_graph())
self.val_writer = tf.summary.FileWriter('./tmp/cnn_fashion_mnist/dropout_val', tf.get_default_graph())
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def write_summary(self, tl, ta, vl, va, epoch):
train_summary = self.sess.run(self.merge, {self.summary_loss: tl, self.summary_accuracy: ta})
val_summary = self.sess.run(self.merge, {self.summary_loss: vl, self.summary_accuracy: va})
self.train_writer.add_summary(train_summary, epoch)
self.val_writer.add_summary(val_summary, epoch)
def train(self, x_train, y_train, x_val, y_val, epochs, batch_size=32):
data_size = len(x_train)
for e in range(epochs):
t_l, t_a = [], []
idx = np.random.permutation(np.arange(data_size))
_x_train, _y_train = x_train[idx], y_train[idx]
for i in range(0, data_size, batch_size):
si, ei = i, i + batch_size
if ei > data_size:
ei = data_size
x_batch, y_batch = _x_train[si:ei, :, :], _y_train[si:ei]
tl, ta, _ = self.sess.run([self.loss, self.accuracy, self.train_op], {self.x: x_batch, self.y: y_batch, self.dropout_rate: 0.5})
t_l.append(tl)
t_a.append(ta)
vl, va = self.sess.run([self.loss, self.accuracy], {self.x: x_val, self.y: y_val, self.dropout_rate: 1.0})
self.write_summary(np.mean(t_l), np.mean(t_a), vl, va, e)
print('epoch:', e + 1, ' / train_loss:', np.mean(t_l), '/ train_acc:', np.mean(t_a), ' / val_loss:', vl, '/ val_acc:', va)
def score(self, x, y):
return self.sess.run(self.accuracy, {self.x: x, self.y: y, self.dropout_rate: 1.0})
드롭아웃층은 유닛을 유지하는 비율을 지정합니다.(tf.nn.dropout)
with tf.name_scope('input'):
self.x = tf.placeholder(tf.float32, [None, 28, 28, 1])
self.y = tf.placeholder(tf.int64)
self.dropout_rate = tf.placeholder(tf.float32)
...
with tf.name_scope('layer'):
conv1 = tf.layers.conv2d(x_norm, 32, [3, 3], padding='VALID', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv1, [2, 2], [2, 2], padding='VALID')
conv2 = tf.layers.conv2d(pool1, 64, [3, 3], padding='VALID', activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv2, [2, 2], [2, 2], padding='VALID')
flat = tf.layers.flatten(pool2)
dropout = tf.nn.dropout(flat, self.dropout_rate)
fc = tf.layers.dense(dropout, 64, tf.nn.relu)
logits = tf.layers.dense(fc, 10)
비율을 0.3으로 지정한다면 무작위 유닛 70%을 비활성화합니다. 테스트 및 검증에는 1.0으로 지정하여 모든 유닛을 활성화합니다.
모델을 학습하고 테스트합니다.
model = Model()
model.train(x_train, y_train, x_val, y_val, epochs=20)
model.score(x_test, y_test)
epoch: 1 / train_loss: 0.54586285 / train_acc: 0.8024167 / val_loss: 0.3937745 / val_acc: 0.8516667
epoch: 2 / train_loss: 0.37997714 / train_acc: 0.861 / val_loss: 0.32430208 / val_acc: 0.88058335
epoch: 3 / train_loss: 0.33082 / train_acc: 0.8768542 / val_loss: 0.3169227 / val_acc: 0.88075
epoch: 4 / train_loss: 0.3031651 / train_acc: 0.8883333 / val_loss: 0.28263652 / val_acc: 0.8961667
epoch: 5 / train_loss: 0.27801484 / train_acc: 0.89666665 / val_loss: 0.2777464 / val_acc: 0.896
epoch: 6 / train_loss: 0.26787764 / train_acc: 0.89902085 / val_loss: 0.28193024 / val_acc: 0.8936667
epoch: 7 / train_loss: 0.25405225 / train_acc: 0.904875 / val_loss: 0.26641348 / val_acc: 0.90258336
epoch: 8 / train_loss: 0.24449031 / train_acc: 0.9074375 / val_loss: 0.26794034 / val_acc: 0.89975
epoch: 9 / train_loss: 0.23362926 / train_acc: 0.91216666 / val_loss: 0.25061563 / val_acc: 0.90791667
epoch: 10 / train_loss: 0.2289409 / train_acc: 0.9132917 / val_loss: 0.25986716 / val_acc: 0.90566665
epoch: 11 / train_loss: 0.21823755 / train_acc: 0.9184375 / val_loss: 0.2457858 / val_acc: 0.9076667
epoch: 12 / train_loss: 0.21376282 / train_acc: 0.918375 / val_loss: 0.25176075 / val_acc: 0.9073333
epoch: 13 / train_loss: 0.21100517 / train_acc: 0.92108333 / val_loss: 0.252951 / val_acc: 0.90783334
epoch: 14 / train_loss: 0.20125681 / train_acc: 0.9237083 / val_loss: 0.25745368 / val_acc: 0.90758336
epoch: 15 / train_loss: 0.19526783 / train_acc: 0.9266667 / val_loss: 0.24359514 / val_acc: 0.9130833
epoch: 16 / train_loss: 0.19302467 / train_acc: 0.9270833 / val_loss: 0.23960693 / val_acc: 0.91116667
epoch: 17 / train_loss: 0.18864958 / train_acc: 0.92808336 / val_loss: 0.24460652 / val_acc: 0.91275
epoch: 18 / train_loss: 0.18488875 / train_acc: 0.9294583 / val_loss: 0.23526335 / val_acc: 0.9159167
epoch: 19 / train_loss: 0.17939131 / train_acc: 0.932125 / val_loss: 0.2448088 / val_acc: 0.9115
epoch: 20 / train_loss: 0.17674284 / train_acc: 0.93214583 / val_loss: 0.23607413 / val_acc: 0.9155833
0.9164
에포크에 대한 정확도와 손실 함수의 그래프는 다음과 같습니다. (주황: 학습, 파랑: 검증, 빨강: 드롭아웃 학습, 하늘: 드롭아웃 검증)
드롭아웃을 적용하여 과대적합이 발생하지 않으며 성능이 소폭 향상되었습니다.
'머신러닝 > 딥러닝' 카테고리의 다른 글
오토인코더(AutoEncoder) (0) | 2021.02.16 |
---|---|
가중치 규제(Weight Regularization) (0) | 2021.02.09 |
배치 정규화(Batch Normalization) (0) | 2021.02.09 |
가중치 초기화(Weight Initialization) (0) | 2021.02.08 |
LSTM(Long Short-Term Memory) (0) | 2021.02.03 |
Comments, Trackbacks