playdata
Deep Learning(0913_day7) - CIFAR10 성능올리기
_JAEJAE_
2021. 9. 13. 17:25
CIFAR10 Dataset 신경망으로 훈련¶
imports¶
In [ ]:
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Dense
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, BatchNormalization, Activation, GlobalAveragePooling2D
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
데이터 적재¶
In [ ]:
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
In [ ]:
print(train_images.shape, train_labels.shape)
print(test_images.shape, test_labels.shape)
print(train_images[0].shape)
(50000, 32, 32, 3) (50000, 1) (10000, 32, 32, 3) (10000, 1) (32, 32, 3)
In [ ]:
class_names = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
In [ ]:
train_labels[:10]
Out[ ]:
array([[6], [9], [9], [4], [1], [1], [2], [7], [8], [3]], dtype=uint8)
데이터 시각화¶
In [ ]:
train_labels = np.int32(train_labels.flatten())
In [ ]:
figure, axes = plt.subplots(figsize=(22, 10), nrows=3, ncols=8)
for i in range(3):
for j in range(8):
axes[i][j].imshow(train_images[(i*8)+j])
axes[i][j].set_title(class_names[train_labels[(i*8)+j]])
데이터 전처리¶
In [ ]:
train_images = np.array(train_images/255.0, dtype=np.float32)
train_labels = np.array(train_labels, dtype=np.float32)
test_images = np.array(test_images/255.0, dtype=np.float32)
test_labels = np.array(test_labels, dtype=np.float32)
train_oh_labels = to_categorical(train_labels)
test_oh_labels = to_categorical(test_labels)
In [ ]:
test_oh_labels
Out[ ]:
array([[0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 1., 0.], [0., 0., 0., ..., 0., 1., 0.], ..., [0., 0., 0., ..., 0., 0., 0.], [0., 1., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 1., 0., 0.]], dtype=float32)
모델 생성¶
In [ ]:
def build_model():
tf.keras.backend.clear_session()
input_tensor = Input(shape=(32, 32, 3))
x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding="same")(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dropout(rate=0.5)(x)
x = Dense(300, activation="relu")(x)
x = Dropout(rate=0.3)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(rate=0.3)(x)
output = Dense(10, activation="softmax")(x)
model = Model(inputs=input_tensor, outputs=output)
return model
In [ ]:
model = build_model()
모델 컴파일¶
In [ ]:
model.compile(optimizer=Adam(0.001), loss="categorical_crossentropy", metrics=["accuracy"])
모델 학습¶
In [ ]:
batch_size = 32
history = model.fit(x=train_images, y=train_oh_labels, batch_size=batch_size, epochs=30, validation_split=0.2)
Epoch 1/30 1250/1250 [==============================] - 57s 24ms/step - loss: 1.7036 - accuracy: 0.3796 - val_loss: 1.4955 - val_accuracy: 0.4691 Epoch 2/30 1250/1250 [==============================] - 26s 21ms/step - loss: 1.2347 - accuracy: 0.5646 - val_loss: 1.0512 - val_accuracy: 0.6251 Epoch 3/30 1250/1250 [==============================] - 26s 21ms/step - loss: 1.0222 - accuracy: 0.6457 - val_loss: 0.9102 - val_accuracy: 0.6883 Epoch 4/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.9004 - accuracy: 0.6948 - val_loss: 0.8284 - val_accuracy: 0.7161 Epoch 5/30 1250/1250 [==============================] - 26s 20ms/step - loss: 0.8060 - accuracy: 0.7269 - val_loss: 0.9674 - val_accuracy: 0.6810 Epoch 6/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.7394 - accuracy: 0.7510 - val_loss: 0.7857 - val_accuracy: 0.7357 Epoch 7/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.6772 - accuracy: 0.7714 - val_loss: 0.7322 - val_accuracy: 0.7436 Epoch 8/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.6254 - accuracy: 0.7914 - val_loss: 1.0344 - val_accuracy: 0.6743 Epoch 9/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.5869 - accuracy: 0.8044 - val_loss: 0.6846 - val_accuracy: 0.7713 Epoch 10/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.5391 - accuracy: 0.8213 - val_loss: 0.7362 - val_accuracy: 0.7537 Epoch 11/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.5051 - accuracy: 0.8321 - val_loss: 0.6491 - val_accuracy: 0.7922 Epoch 12/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.4700 - accuracy: 0.8437 - val_loss: 0.6870 - val_accuracy: 0.7793 Epoch 13/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.4364 - accuracy: 0.8536 - val_loss: 0.5211 - val_accuracy: 0.8246 Epoch 14/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.4097 - accuracy: 0.8644 - val_loss: 0.6611 - val_accuracy: 0.7942 Epoch 15/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3902 - accuracy: 0.8710 - val_loss: 0.6795 - val_accuracy: 0.7842 Epoch 16/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3661 - accuracy: 0.8784 - val_loss: 0.6497 - val_accuracy: 0.7996 Epoch 17/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3452 - accuracy: 0.8855 - val_loss: 0.5536 - val_accuracy: 0.8268 Epoch 18/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3194 - accuracy: 0.8937 - val_loss: 0.6322 - val_accuracy: 0.8030 Epoch 19/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3083 - accuracy: 0.8970 - val_loss: 0.5713 - val_accuracy: 0.8224 Epoch 20/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2920 - accuracy: 0.9038 - val_loss: 0.5717 - val_accuracy: 0.8419 Epoch 21/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2845 - accuracy: 0.9060 - val_loss: 0.5698 - val_accuracy: 0.8272 Epoch 22/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2658 - accuracy: 0.9104 - val_loss: 0.5669 - val_accuracy: 0.8341 Epoch 23/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2527 - accuracy: 0.9172 - val_loss: 0.6116 - val_accuracy: 0.8264 Epoch 24/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2458 - accuracy: 0.9202 - val_loss: 0.7002 - val_accuracy: 0.8146 Epoch 25/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2270 - accuracy: 0.9248 - val_loss: 0.6205 - val_accuracy: 0.8215 Epoch 26/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2257 - accuracy: 0.9244 - val_loss: 0.6134 - val_accuracy: 0.8262 Epoch 27/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2112 - accuracy: 0.9307 - val_loss: 0.6482 - val_accuracy: 0.8279 Epoch 28/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2083 - accuracy: 0.9312 - val_loss: 0.6227 - val_accuracy: 0.8309 Epoch 29/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2052 - accuracy: 0.9334 - val_loss: 0.6521 - val_accuracy: 0.8268 Epoch 30/30 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1893 - accuracy: 0.9380 - val_loss: 0.6303 - val_accuracy: 0.8271
In [ ]:
plt.plot(history.history["accuracy"], label="train")
plt.plot(history.history["val_accuracy"], label="valid")
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x7f3110b386d0>
테스트 데이터에서 검증¶
In [ ]:
evaluate_accuracy = model.evaluate(test_images, test_oh_labels, batch_size=batch_size, verbose=1)
313/313 [==============================] - 3s 9ms/step - loss: 0.6434 - accuracy: 0.8238
Callback 적용(ModelCheckPoint, EarlyStopping, ReduceLROnPlateau)¶
모델 생성 및 컴파일¶
In [ ]:
model = build_model()
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 32, 32, 32) 896 _________________________________________________________________ batch_normalization (BatchNo (None, 32, 32, 32) 128 _________________________________________________________________ activation (Activation) (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 32, 32, 32) 9248 _________________________________________________________________ batch_normalization_1 (Batch (None, 32, 32, 32) 128 _________________________________________________________________ activation_1 (Activation) (None, 32, 32, 32) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 16, 16, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 16, 16, 64) 18496 _________________________________________________________________ batch_normalization_2 (Batch (None, 16, 16, 64) 256 _________________________________________________________________ activation_2 (Activation) (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 16, 16, 64) 36928 _________________________________________________________________ batch_normalization_3 (Batch (None, 16, 16, 64) 256 _________________________________________________________________ activation_3 (Activation) (None, 16, 16, 64) 0 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 8, 8, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 8, 8, 128) 73856 _________________________________________________________________ batch_normalization_4 (Batch (None, 8, 8, 128) 512 _________________________________________________________________ activation_4 (Activation) (None, 8, 8, 128) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 8, 8, 128) 147584 _________________________________________________________________ batch_normalization_5 (Batch (None, 8, 8, 128) 512 _________________________________________________________________ activation_5 (Activation) (None, 8, 8, 128) 0 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 4, 4, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 2048) 0 _________________________________________________________________ dropout (Dropout) (None, 2048) 0 _________________________________________________________________ dense (Dense) (None, 300) 614700 _________________________________________________________________ dropout_1 (Dropout) (None, 300) 0 _________________________________________________________________ dense_1 (Dense) (None, 100) 30100 _________________________________________________________________ dropout_2 (Dropout) (None, 100) 0 _________________________________________________________________ dense_2 (Dense) (None, 10) 1010 ================================================================= Total params: 934,610 Trainable params: 933,714 Non-trainable params: 896 _________________________________________________________________
In [ ]:
model.compile(optimizer=Adam(0.001), loss="categorical_crossentropy", metrics=["accuracy"])
Callback 정의¶
In [ ]:
checkpoint_cb = ModelCheckpoint("my_keras_model.h5", save_best_only=True, verbose=1)
early_stopping_cb = EarlyStopping(patience=10, restore_best_weights=True)
reducelr_cb = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, mode="min", verbose=1)
In [ ]:
batch_size = 32
history = model.fit(x=train_images, y=train_oh_labels, batch_size=batch_size, epochs=40, validation_split=0.2,
callbacks=[checkpoint_cb, early_stopping_cb, reducelr_cb])
Epoch 1/40 1250/1250 [==============================] - 31s 24ms/step - loss: 1.7270 - accuracy: 0.3663 - val_loss: 1.6567 - val_accuracy: 0.4408 Epoch 00001: val_loss improved from inf to 1.65672, saving model to my_keras_model.h5 Epoch 2/40 1250/1250 [==============================] - 28s 23ms/step - loss: 1.2361 - accuracy: 0.5613 - val_loss: 1.1919 - val_accuracy: 0.5717 Epoch 00002: val_loss improved from 1.65672 to 1.19195, saving model to my_keras_model.h5 Epoch 3/40 1250/1250 [==============================] - 28s 22ms/step - loss: 1.0279 - accuracy: 0.6424 - val_loss: 1.0462 - val_accuracy: 0.6360 Epoch 00003: val_loss improved from 1.19195 to 1.04619, saving model to my_keras_model.h5 Epoch 4/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.9087 - accuracy: 0.6920 - val_loss: 0.8407 - val_accuracy: 0.7099 Epoch 00004: val_loss improved from 1.04619 to 0.84072, saving model to my_keras_model.h5 Epoch 5/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.8151 - accuracy: 0.7250 - val_loss: 0.7887 - val_accuracy: 0.7324 Epoch 00005: val_loss improved from 0.84072 to 0.78870, saving model to my_keras_model.h5 Epoch 6/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.7436 - accuracy: 0.7491 - val_loss: 0.9310 - val_accuracy: 0.6763 Epoch 00006: val_loss did not improve from 0.78870 Epoch 7/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.6743 - accuracy: 0.7742 - val_loss: 0.9520 - val_accuracy: 0.6706 Epoch 00007: val_loss did not improve from 0.78870 Epoch 8/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.6211 - accuracy: 0.7937 - val_loss: 0.7129 - val_accuracy: 0.7521 Epoch 00008: val_loss improved from 0.78870 to 0.71291, saving model to my_keras_model.h5 Epoch 9/40 1250/1250 [==============================] - 27s 21ms/step - loss: 0.5771 - accuracy: 0.8086 - val_loss: 0.6690 - val_accuracy: 0.7727 Epoch 00009: val_loss improved from 0.71291 to 0.66896, saving model to my_keras_model.h5 Epoch 10/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.5344 - accuracy: 0.8210 - val_loss: 0.6299 - val_accuracy: 0.7870 Epoch 00010: val_loss improved from 0.66896 to 0.62987, saving model to my_keras_model.h5 Epoch 11/40 1250/1250 [==============================] - 27s 22ms/step - loss: 0.4921 - accuracy: 0.8350 - val_loss: 0.6935 - val_accuracy: 0.7637 Epoch 00011: val_loss did not improve from 0.62987 Epoch 12/40 1250/1250 [==============================] - 27s 21ms/step - loss: 0.4654 - accuracy: 0.8456 - val_loss: 0.7862 - val_accuracy: 0.7489 Epoch 00012: val_loss did not improve from 0.62987 Epoch 13/40 1250/1250 [==============================] - 27s 21ms/step - loss: 0.4308 - accuracy: 0.8553 - val_loss: 0.5447 - val_accuracy: 0.8225 Epoch 00013: val_loss improved from 0.62987 to 0.54470, saving model to my_keras_model.h5 Epoch 14/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.4126 - accuracy: 0.8637 - val_loss: 0.5707 - val_accuracy: 0.8090 Epoch 00014: val_loss did not improve from 0.54470 Epoch 15/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3871 - accuracy: 0.8711 - val_loss: 0.5641 - val_accuracy: 0.8200 Epoch 00015: val_loss did not improve from 0.54470 Epoch 16/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3655 - accuracy: 0.8776 - val_loss: 0.5912 - val_accuracy: 0.8183 Epoch 00016: val_loss did not improve from 0.54470 Epoch 17/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3410 - accuracy: 0.8865 - val_loss: 0.6671 - val_accuracy: 0.7977 Epoch 00017: val_loss did not improve from 0.54470 Epoch 18/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.3278 - accuracy: 0.8899 - val_loss: 0.7426 - val_accuracy: 0.7871 Epoch 00018: val_loss did not improve from 0.54470 Epoch 00018: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. Epoch 19/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.2311 - accuracy: 0.9234 - val_loss: 0.5059 - val_accuracy: 0.8529 Epoch 00019: val_loss improved from 0.54470 to 0.50591, saving model to my_keras_model.h5 Epoch 20/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1915 - accuracy: 0.9360 - val_loss: 0.4994 - val_accuracy: 0.8566 Epoch 00020: val_loss improved from 0.50591 to 0.49938, saving model to my_keras_model.h5 Epoch 21/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1746 - accuracy: 0.9405 - val_loss: 0.5337 - val_accuracy: 0.8538 Epoch 00021: val_loss did not improve from 0.49938 Epoch 22/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1589 - accuracy: 0.9470 - val_loss: 0.5283 - val_accuracy: 0.8558 Epoch 00022: val_loss did not improve from 0.49938 Epoch 23/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1458 - accuracy: 0.9506 - val_loss: 0.5554 - val_accuracy: 0.8500 Epoch 00023: val_loss did not improve from 0.49938 Epoch 24/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1421 - accuracy: 0.9526 - val_loss: 0.5563 - val_accuracy: 0.8537 Epoch 00024: val_loss did not improve from 0.49938 Epoch 25/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1328 - accuracy: 0.9551 - val_loss: 0.5500 - val_accuracy: 0.8589 Epoch 00025: val_loss did not improve from 0.49938 Epoch 00025: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. Epoch 26/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1117 - accuracy: 0.9626 - val_loss: 0.5447 - val_accuracy: 0.8621 Epoch 00026: val_loss did not improve from 0.49938 Epoch 27/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1095 - accuracy: 0.9627 - val_loss: 0.5633 - val_accuracy: 0.8607 Epoch 00027: val_loss did not improve from 0.49938 Epoch 28/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1032 - accuracy: 0.9650 - val_loss: 0.5715 - val_accuracy: 0.8618 Epoch 00028: val_loss did not improve from 0.49938 Epoch 29/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.1016 - accuracy: 0.9650 - val_loss: 0.5802 - val_accuracy: 0.8604 Epoch 00029: val_loss did not improve from 0.49938 Epoch 30/40 1250/1250 [==============================] - 26s 21ms/step - loss: 0.0984 - accuracy: 0.9663 - val_loss: 0.5865 - val_accuracy: 0.8599 Epoch 00030: val_loss did not improve from 0.49938 Epoch 00030: ReduceLROnPlateau reducing learning rate to 8.000000525498762e-06.
In [ ]:
plt.plot(history.history["accuracy"], label="train")
plt.plot(history.history["val_accuracy"], label="valid")
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x7f30c5e2d910>
In [ ]:
model.evaluate(test_images, test_oh_labels, batch_size=batch_size, verbose=1)
313/313 [==============================] - 3s 9ms/step - loss: 0.5338 - accuracy: 0.8483
Out[ ]:
[0.5337657928466797, 0.8482999801635742]
필터수, 은닉층 수 추가¶
In [ ]:
def build_extended_model():
tf.keras.backend.clear_session()
input_tensor = Input(shape=(32, 32, 3))
x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding="same")(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dropout(rate=0.5)(x)
x = Dense(300, activation="relu")(x)
x = Dropout(rate=0.3)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(rate=0.3)(x)
output = Dense(10, activation="softmax")(x)
model = Model(inputs=input_tensor, outputs=output)
return model
모델 생성 및 컴파일¶
In [ ]:
model = build_extended_model()
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 32, 32, 32) 896 _________________________________________________________________ batch_normalization (BatchNo (None, 32, 32, 32) 128 _________________________________________________________________ activation (Activation) (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ batch_normalization_1 (Batch (None, 32, 32, 64) 256 _________________________________________________________________ activation_1 (Activation) (None, 32, 32, 64) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ batch_normalization_2 (Batch (None, 16, 16, 128) 512 _________________________________________________________________ activation_2 (Activation) (None, 16, 16, 128) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 16, 16, 128) 147584 _________________________________________________________________ batch_normalization_3 (Batch (None, 16, 16, 128) 512 _________________________________________________________________ activation_3 (Activation) (None, 16, 16, 128) 0 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 8, 8, 256) 295168 _________________________________________________________________ batch_normalization_4 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ activation_4 (Activation) (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 8, 8, 256) 590080 _________________________________________________________________ batch_normalization_5 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ activation_5 (Activation) (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ batch_normalization_6 (Batch (None, 8, 8, 512) 2048 _________________________________________________________________ activation_6 (Activation) (None, 8, 8, 512) 0 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 4, 4, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dropout (Dropout) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 300) 2457900 _________________________________________________________________ dropout_1 (Dropout) (None, 300) 0 _________________________________________________________________ dense_1 (Dense) (None, 100) 30100 _________________________________________________________________ dropout_2 (Dropout) (None, 100) 0 _________________________________________________________________ dense_2 (Dense) (None, 10) 1010 ================================================================= Total params: 4,800,754 Trainable params: 4,798,002 Non-trainable params: 2,752 _________________________________________________________________
In [ ]:
model.compile(optimizer=Adam(0.001), loss="categorical_crossentropy", metrics=["accuracy"])
Callback 정의¶
In [ ]:
checkpoint_cb = ModelCheckpoint("my_keras_model.h5", save_best_only=True, verbose=1)
early_stopping_cb = EarlyStopping(patience=10, restore_best_weights=True)
reducelr_cb = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, mode="min", verbose=1)
모델 학습¶
In [ ]:
batch_size = 32
history = model.fit(x=train_images, y=train_oh_labels, batch_size=batch_size, epochs=40, validation_split=0.2,
callbacks=[checkpoint_cb, early_stopping_cb, reducelr_cb])
Epoch 1/40 1250/1250 [==============================] - 44s 34ms/step - loss: 1.9238 - accuracy: 0.2819 - val_loss: 2.1319 - val_accuracy: 0.2787 Epoch 00001: val_loss improved from inf to 2.13189, saving model to my_keras_model.h5 Epoch 2/40 1250/1250 [==============================] - 42s 33ms/step - loss: 1.4577 - accuracy: 0.4533 - val_loss: 1.5571 - val_accuracy: 0.4819 Epoch 00002: val_loss improved from 2.13189 to 1.55712, saving model to my_keras_model.h5 Epoch 3/40 1250/1250 [==============================] - 42s 33ms/step - loss: 1.2312 - accuracy: 0.5560 - val_loss: 1.3039 - val_accuracy: 0.5688 Epoch 00003: val_loss improved from 1.55712 to 1.30385, saving model to my_keras_model.h5 Epoch 4/40 1250/1250 [==============================] - 42s 33ms/step - loss: 1.0752 - accuracy: 0.6200 - val_loss: 1.0629 - val_accuracy: 0.6163 Epoch 00004: val_loss improved from 1.30385 to 1.06294, saving model to my_keras_model.h5 Epoch 5/40 1250/1250 [==============================] - 42s 33ms/step - loss: 0.9580 - accuracy: 0.6638 - val_loss: 1.1420 - val_accuracy: 0.6305 Epoch 00005: val_loss did not improve from 1.06294 Epoch 6/40 1250/1250 [==============================] - 42s 33ms/step - loss: 0.8573 - accuracy: 0.7063 - val_loss: 1.1030 - val_accuracy: 0.6318 Epoch 00006: val_loss did not improve from 1.06294 Epoch 7/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.7809 - accuracy: 0.7356 - val_loss: 0.8965 - val_accuracy: 0.7063 Epoch 00007: val_loss improved from 1.06294 to 0.89652, saving model to my_keras_model.h5 Epoch 8/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.7020 - accuracy: 0.7653 - val_loss: 0.7562 - val_accuracy: 0.7421 Epoch 00008: val_loss improved from 0.89652 to 0.75623, saving model to my_keras_model.h5 Epoch 9/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.6382 - accuracy: 0.7864 - val_loss: 0.6688 - val_accuracy: 0.7723 Epoch 00009: val_loss improved from 0.75623 to 0.66879, saving model to my_keras_model.h5 Epoch 10/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.5479 - accuracy: 0.8186 - val_loss: 0.6772 - val_accuracy: 0.7803 Epoch 00010: val_loss did not improve from 0.66879 Epoch 11/40 1250/1250 [==============================] - 42s 33ms/step - loss: 0.4877 - accuracy: 0.8409 - val_loss: 0.5531 - val_accuracy: 0.8164 Epoch 00011: val_loss improved from 0.66879 to 0.55311, saving model to my_keras_model.h5 Epoch 12/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.4299 - accuracy: 0.8594 - val_loss: 0.5521 - val_accuracy: 0.8094 Epoch 00012: val_loss improved from 0.55311 to 0.55215, saving model to my_keras_model.h5 Epoch 13/40 1250/1250 [==============================] - 44s 35ms/step - loss: 0.3827 - accuracy: 0.8762 - val_loss: 0.6624 - val_accuracy: 0.7792 Epoch 00013: val_loss did not improve from 0.55215 Epoch 14/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.3354 - accuracy: 0.8922 - val_loss: 0.5949 - val_accuracy: 0.8216 Epoch 00014: val_loss did not improve from 0.55215 Epoch 15/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.2998 - accuracy: 0.9046 - val_loss: 0.6030 - val_accuracy: 0.8186 Epoch 00015: val_loss did not improve from 0.55215 Epoch 16/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.2621 - accuracy: 0.9154 - val_loss: 0.7687 - val_accuracy: 0.7766 Epoch 00016: val_loss did not improve from 0.55215 Epoch 17/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.2395 - accuracy: 0.9219 - val_loss: 0.5546 - val_accuracy: 0.8413 Epoch 00017: val_loss did not improve from 0.55215 Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. Epoch 18/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.1311 - accuracy: 0.9587 - val_loss: 0.5208 - val_accuracy: 0.8661 Epoch 00018: val_loss improved from 0.55215 to 0.52075, saving model to my_keras_model.h5 Epoch 19/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0919 - accuracy: 0.9697 - val_loss: 0.5351 - val_accuracy: 0.8759 Epoch 00019: val_loss did not improve from 0.52075 Epoch 20/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0761 - accuracy: 0.9754 - val_loss: 0.5578 - val_accuracy: 0.8733 Epoch 00020: val_loss did not improve from 0.52075 Epoch 21/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0622 - accuracy: 0.9803 - val_loss: 0.5912 - val_accuracy: 0.8744 Epoch 00021: val_loss did not improve from 0.52075 Epoch 22/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0552 - accuracy: 0.9827 - val_loss: 0.6486 - val_accuracy: 0.8708 Epoch 00022: val_loss did not improve from 0.52075 Epoch 23/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0476 - accuracy: 0.9852 - val_loss: 0.6604 - val_accuracy: 0.8714 Epoch 00023: val_loss did not improve from 0.52075 Epoch 00023: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. Epoch 24/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0346 - accuracy: 0.9891 - val_loss: 0.6614 - val_accuracy: 0.8799 Epoch 00024: val_loss did not improve from 0.52075 Epoch 25/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0298 - accuracy: 0.9908 - val_loss: 0.6705 - val_accuracy: 0.8812 Epoch 00025: val_loss did not improve from 0.52075 Epoch 26/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0266 - accuracy: 0.9915 - val_loss: 0.6950 - val_accuracy: 0.8792 Epoch 00026: val_loss did not improve from 0.52075 Epoch 27/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0232 - accuracy: 0.9926 - val_loss: 0.7273 - val_accuracy: 0.8790 Epoch 00027: val_loss did not improve from 0.52075 Epoch 28/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0214 - accuracy: 0.9931 - val_loss: 0.7615 - val_accuracy: 0.8766 Epoch 00028: val_loss did not improve from 0.52075 Epoch 00028: ReduceLROnPlateau reducing learning rate to 8.000000525498762e-06.
In [ ]:
plt.plot(history.history["accuracy"], label="train")
plt.plot(history.history["val_accuracy"], label="valid")
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x7f30c5e2e310>
테스트 데이터에 대해 평가¶
In [ ]:
model.evaluate(test_images, test_oh_labels, batch_size=batch_size, verbose=1)
313/313 [==============================] - 4s 12ms/step - loss: 0.5660 - accuracy: 0.8602
Out[ ]:
[0.5659989714622498, 0.8601999878883362]
Global Average Pooling 적용¶
In [ ]:
def build_extended_model():
tf.keras.backend.clear_session()
input_tensor = Input(shape=(32, 32, 3))
x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding="same")(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# x = Flatten()(x)
x = GlobalAveragePooling2D()(x)
x = Dropout(rate=0.5)(x)
x = Dense(300, activation="relu")(x)
x = Dropout(rate=0.3)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(rate=0.3)(x)
output = Dense(10, activation="softmax")(x)
model = Model(inputs=input_tensor, outputs=output)
return model
모델 생성 및 컴파일¶
In [ ]:
model = build_extended_model()
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 32, 32, 32) 896 _________________________________________________________________ batch_normalization (BatchNo (None, 32, 32, 32) 128 _________________________________________________________________ activation (Activation) (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ batch_normalization_1 (Batch (None, 32, 32, 64) 256 _________________________________________________________________ activation_1 (Activation) (None, 32, 32, 64) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ batch_normalization_2 (Batch (None, 16, 16, 128) 512 _________________________________________________________________ activation_2 (Activation) (None, 16, 16, 128) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 16, 16, 128) 147584 _________________________________________________________________ batch_normalization_3 (Batch (None, 16, 16, 128) 512 _________________________________________________________________ activation_3 (Activation) (None, 16, 16, 128) 0 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 8, 8, 256) 295168 _________________________________________________________________ batch_normalization_4 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ activation_4 (Activation) (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 8, 8, 256) 590080 _________________________________________________________________ batch_normalization_5 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ activation_5 (Activation) (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ batch_normalization_6 (Batch (None, 8, 8, 512) 2048 _________________________________________________________________ activation_6 (Activation) (None, 8, 8, 512) 0 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 4, 4, 512) 0 _________________________________________________________________ global_average_pooling2d (Gl (None, 512) 0 _________________________________________________________________ dropout (Dropout) (None, 512) 0 _________________________________________________________________ dense (Dense) (None, 300) 153900 _________________________________________________________________ dropout_1 (Dropout) (None, 300) 0 _________________________________________________________________ dense_1 (Dense) (None, 100) 30100 _________________________________________________________________ dropout_2 (Dropout) (None, 100) 0 _________________________________________________________________ dense_2 (Dense) (None, 10) 1010 ================================================================= Total params: 2,496,754 Trainable params: 2,494,002 Non-trainable params: 2,752 _________________________________________________________________
In [ ]:
model.compile(optimizer=Adam(0.001), loss="categorical_crossentropy", metrics=["accuracy"])
Callback 정의¶
In [ ]:
checkpoint_cb = ModelCheckpoint("my_keras_model.h5", save_best_only=True, verbose=1)
early_stopping_cb = EarlyStopping(patience=10, restore_best_weights=True)
reducelr_cb = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, mode="min", verbose=1)
모델 학습¶
In [ ]:
batch_size = 32
history = model.fit(x=train_images, y=train_oh_labels, batch_size=batch_size, epochs=40, validation_split=0.2,
callbacks=[checkpoint_cb, early_stopping_cb, reducelr_cb])
Epoch 1/40 1250/1250 [==============================] - 43s 33ms/step - loss: 1.6409 - accuracy: 0.3811 - val_loss: 1.8103 - val_accuracy: 0.3973 Epoch 00001: val_loss improved from inf to 1.81033, saving model to my_keras_model.h5 Epoch 2/40 1250/1250 [==============================] - 41s 32ms/step - loss: 1.2078 - accuracy: 0.5730 - val_loss: 1.2468 - val_accuracy: 0.5722 Epoch 00002: val_loss improved from 1.81033 to 1.24684, saving model to my_keras_model.h5 Epoch 3/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.9970 - accuracy: 0.6583 - val_loss: 1.3840 - val_accuracy: 0.5496 Epoch 00003: val_loss did not improve from 1.24684 Epoch 4/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.8592 - accuracy: 0.7062 - val_loss: 1.0350 - val_accuracy: 0.6700 Epoch 00004: val_loss improved from 1.24684 to 1.03497, saving model to my_keras_model.h5 Epoch 5/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.7464 - accuracy: 0.7508 - val_loss: 0.8347 - val_accuracy: 0.7101 Epoch 00005: val_loss improved from 1.03497 to 0.83466, saving model to my_keras_model.h5 Epoch 6/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.6532 - accuracy: 0.7872 - val_loss: 1.2667 - val_accuracy: 0.6429 Epoch 00006: val_loss did not improve from 0.83466 Epoch 7/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.5674 - accuracy: 0.8133 - val_loss: 0.7381 - val_accuracy: 0.7651 Epoch 00007: val_loss improved from 0.83466 to 0.73809, saving model to my_keras_model.h5 Epoch 8/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.5080 - accuracy: 0.8344 - val_loss: 0.8084 - val_accuracy: 0.7535 Epoch 00008: val_loss did not improve from 0.73809 Epoch 9/40 1250/1250 [==============================] - 43s 34ms/step - loss: 0.4401 - accuracy: 0.8574 - val_loss: 0.7177 - val_accuracy: 0.7732 Epoch 00009: val_loss improved from 0.73809 to 0.71768, saving model to my_keras_model.h5 Epoch 10/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.3880 - accuracy: 0.8753 - val_loss: 0.6913 - val_accuracy: 0.7834 Epoch 00010: val_loss improved from 0.71768 to 0.69131, saving model to my_keras_model.h5 Epoch 11/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.3414 - accuracy: 0.8883 - val_loss: 0.6659 - val_accuracy: 0.7917 Epoch 00011: val_loss improved from 0.69131 to 0.66594, saving model to my_keras_model.h5 Epoch 12/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.3019 - accuracy: 0.9043 - val_loss: 0.6972 - val_accuracy: 0.8058 Epoch 00012: val_loss did not improve from 0.66594 Epoch 13/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.2722 - accuracy: 0.9141 - val_loss: 0.7045 - val_accuracy: 0.7858 Epoch 00013: val_loss did not improve from 0.66594 Epoch 14/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.2307 - accuracy: 0.9262 - val_loss: 0.7205 - val_accuracy: 0.8036 Epoch 00014: val_loss did not improve from 0.66594 Epoch 15/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.2091 - accuracy: 0.9334 - val_loss: 0.6978 - val_accuracy: 0.8194 Epoch 00015: val_loss did not improve from 0.66594 Epoch 16/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.1835 - accuracy: 0.9427 - val_loss: 0.5686 - val_accuracy: 0.8451 Epoch 00016: val_loss improved from 0.66594 to 0.56863, saving model to my_keras_model.h5 Epoch 17/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.1639 - accuracy: 0.9481 - val_loss: 0.7006 - val_accuracy: 0.8084 Epoch 00017: val_loss did not improve from 0.56863 Epoch 18/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.1541 - accuracy: 0.9513 - val_loss: 0.9184 - val_accuracy: 0.7837 Epoch 00018: val_loss did not improve from 0.56863 Epoch 19/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.1323 - accuracy: 0.9582 - val_loss: 1.0587 - val_accuracy: 0.7794 Epoch 00019: val_loss did not improve from 0.56863 Epoch 20/40 1250/1250 [==============================] - 41s 33ms/step - loss: 0.1216 - accuracy: 0.9617 - val_loss: 0.6943 - val_accuracy: 0.8257 Epoch 00020: val_loss did not improve from 0.56863 Epoch 21/40 1250/1250 [==============================] - 43s 34ms/step - loss: 0.1113 - accuracy: 0.9654 - val_loss: 0.7083 - val_accuracy: 0.8313 Epoch 00021: val_loss did not improve from 0.56863 Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. Epoch 22/40 1250/1250 [==============================] - 40s 32ms/step - loss: 0.0462 - accuracy: 0.9858 - val_loss: 0.6361 - val_accuracy: 0.8707 Epoch 00022: val_loss did not improve from 0.56863 Epoch 23/40 1250/1250 [==============================] - 40s 32ms/step - loss: 0.0215 - accuracy: 0.9934 - val_loss: 0.7566 - val_accuracy: 0.8693 Epoch 00023: val_loss did not improve from 0.56863 Epoch 24/40 1250/1250 [==============================] - 40s 32ms/step - loss: 0.0201 - accuracy: 0.9937 - val_loss: 0.8070 - val_accuracy: 0.8692 Epoch 00024: val_loss did not improve from 0.56863 Epoch 25/40 1250/1250 [==============================] - 42s 34ms/step - loss: 0.0174 - accuracy: 0.9951 - val_loss: 0.8913 - val_accuracy: 0.8655 Epoch 00025: val_loss did not improve from 0.56863 Epoch 26/40 1250/1250 [==============================] - 40s 32ms/step - loss: 0.0182 - accuracy: 0.9950 - val_loss: 0.8484 - val_accuracy: 0.8674 Epoch 00026: val_loss did not improve from 0.56863 Epoch 00026: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05.
In [ ]:
plt.plot(history.history["accuracy"], label="train")
plt.plot(history.history["val_accuracy"], label="valid")
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x7f31005c1750>
테스트 데이터에 대해 평가¶
In [ ]:
model.evaluate(test_images, test_oh_labels, batch_size=batch_size, verbose=1)
313/313 [==============================] - 4s 12ms/step - loss: 0.5968 - accuracy: 0.8340
Out[ ]:
[0.5967729091644287, 0.8339999914169312]