In [2]:
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
In [1]:
import tensorflow as tf
from tensorflow import keras
In [39]:
fashion_mnist=keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
In [40]:
X_train_full.shape
Out[40]:
(60000, 28, 28)
In [41]:
X_train_full.dtype
Out[41]:
dtype('uint8')
In [42]:
#validation set 생성
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test/255.
In [6]:
class_names=['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker','Bag', 'Ankle boot']
In [7]:
class_names[y_train[0]]
Out[7]:
'Coat'
Sequential 객체를 통해 모델을 생성하는 방법은 아래 두가지이다.
In [8]:
#Sequential 객체에 layer add하기
model=keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28,28])) #2D 이미지를 1D array로 변환
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
In [9]:
#Sequential 객체 생성시 layer 리스트 전달
model=keras.models.Sequential([
keras.layers.Flatten(input_shape=[28,28]),
keras.layers.Dense(300, activation='relu'),
keras.layers.Dense(100, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
In [10]:
#layer별 parameter 수는 bias까지 포함한 결과를 출력한다.
model.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= flatten_1 (Flatten) (None, 784) 0 _________________________________________________________________ dense_3 (Dense) (None, 300) 235500 _________________________________________________________________ dense_4 (Dense) (None, 100) 30100 _________________________________________________________________ dense_5 (Dense) (None, 10) 1010 ================================================================= Total params: 266,610 Trainable params: 266,610 Non-trainable params: 0 _________________________________________________________________
In [11]:
#객체의 layers 정보를 출력 가능하다.
model.layers
Out[11]:
[<keras.layers.core.Flatten at 0x7f0fd13df910>, <keras.layers.core.Dense at 0x7f0fd13dfbd0>, <keras.layers.core.Dense at 0x7f0fcd559810>, <keras.layers.core.Dense at 0x7f0fcd552f50>]
In [12]:
hidden1=model.layers[1]
hidden1.name
Out[12]:
'dense_3'
In [13]:
# model.get_layer('dense_3')
Dense층은 weight를 무작위로, bias를 0으로 초기화 한다.
In [14]:
weight, biases = hidden1.get_weights()
weight
Out[14]:
array([[-0.02803186, -0.06845329, -0.03220771, ..., 0.06311499, -0.02759992, -0.06233646], [-0.06671423, 0.02240002, 0.00717515, ..., -0.02287043, 0.0473233 , -0.02048782], [-0.03013454, 0.01878259, 0.03860784, ..., -0.01668715, 0.04071316, 0.03547712], ..., [ 0.04909506, -0.05112569, -0.01515996, ..., 0.00030376, 0.01682565, 0.01592625], [-0.05822124, -0.0358655 , 0.02230024, ..., 0.05006564, -0.03168761, 0.01353095], [ 0.06232323, 0.03143685, 0.0651155 , ..., -0.03456565, 0.0018488 , 0.06395857]], dtype=float32)
In [15]:
biases
Out[15]:
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
모델 컴파일¶
In [16]:
#compile메서드를 통해 한 번에 compile하기
model.compile(loss='sparse_categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
In [17]:
loss=keras.losses.sparse_categorical_crossentropy
optimizer=keras.optimizers.SGD()
metircs=[keras.metrics.sparse_categorical_accuracy]
모델 훈련과 평가¶
In [18]:
history=model.fit(X_train, y_train, epochs=30,
validation_data = (X_valid, y_valid))
Epoch 1/30 1719/1719 [==============================] - 6s 3ms/step - loss: 0.7170 - accuracy: 0.7668 - val_loss: 0.5154 - val_accuracy: 0.8260 Epoch 2/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.4876 - accuracy: 0.8292 - val_loss: 0.4344 - val_accuracy: 0.8550 Epoch 3/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.4453 - accuracy: 0.8423 - val_loss: 0.4179 - val_accuracy: 0.8574 Epoch 4/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.4191 - accuracy: 0.8527 - val_loss: 0.4045 - val_accuracy: 0.8614 Epoch 5/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3985 - accuracy: 0.8596 - val_loss: 0.3915 - val_accuracy: 0.8628 Epoch 6/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3816 - accuracy: 0.8649 - val_loss: 0.3762 - val_accuracy: 0.8710 Epoch 7/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3676 - accuracy: 0.8709 - val_loss: 0.3811 - val_accuracy: 0.8696 Epoch 8/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3566 - accuracy: 0.8747 - val_loss: 0.3690 - val_accuracy: 0.8632 Epoch 9/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3449 - accuracy: 0.8773 - val_loss: 0.3543 - val_accuracy: 0.8776 Epoch 10/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3356 - accuracy: 0.8805 - val_loss: 0.3855 - val_accuracy: 0.8626 Epoch 11/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3267 - accuracy: 0.8831 - val_loss: 0.3572 - val_accuracy: 0.8766 Epoch 12/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3189 - accuracy: 0.8862 - val_loss: 0.3387 - val_accuracy: 0.8830 Epoch 13/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3111 - accuracy: 0.8884 - val_loss: 0.3466 - val_accuracy: 0.8774 Epoch 14/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3032 - accuracy: 0.8921 - val_loss: 0.3544 - val_accuracy: 0.8764 Epoch 15/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2956 - accuracy: 0.8940 - val_loss: 0.3298 - val_accuracy: 0.8792 Epoch 16/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2894 - accuracy: 0.8953 - val_loss: 0.3230 - val_accuracy: 0.8852 Epoch 17/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2840 - accuracy: 0.8977 - val_loss: 0.3152 - val_accuracy: 0.8878 Epoch 18/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2780 - accuracy: 0.8996 - val_loss: 0.3206 - val_accuracy: 0.8848 Epoch 19/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2741 - accuracy: 0.9017 - val_loss: 0.3125 - val_accuracy: 0.8856 Epoch 20/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2676 - accuracy: 0.9033 - val_loss: 0.3196 - val_accuracy: 0.8844 Epoch 21/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2631 - accuracy: 0.9045 - val_loss: 0.3191 - val_accuracy: 0.8852 Epoch 22/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2579 - accuracy: 0.9067 - val_loss: 0.3105 - val_accuracy: 0.8890 Epoch 23/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2525 - accuracy: 0.9089 - val_loss: 0.3065 - val_accuracy: 0.8892 Epoch 24/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2484 - accuracy: 0.9108 - val_loss: 0.3150 - val_accuracy: 0.8884 Epoch 25/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2427 - accuracy: 0.9130 - val_loss: 0.3094 - val_accuracy: 0.8900 Epoch 26/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2388 - accuracy: 0.9143 - val_loss: 0.3084 - val_accuracy: 0.8894 Epoch 27/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2355 - accuracy: 0.9150 - val_loss: 0.3084 - val_accuracy: 0.8880 Epoch 28/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2314 - accuracy: 0.9165 - val_loss: 0.2952 - val_accuracy: 0.8892 Epoch 29/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2271 - accuracy: 0.9184 - val_loss: 0.3174 - val_accuracy: 0.8874 Epoch 30/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2231 - accuracy: 0.9203 - val_loss: 0.3029 - val_accuracy: 0.8884
- 훈련이 불균형하게 되었거나, 믿을만한 데이터와 레이블, 가령 크라우드 소싱 플랫폼을 통해 레이블링한 데이터보다 전문가들이 직접 레이블링 한 데이터를 더 높은 가중치를 주고 싶을 때 아래와 같은
fit()
메서드 parameter를 조정할 수 있다. class_weight
: 적게 등장하는 클래스는 높은 가중치를 부여하고 낮게 등장하는 클래스는 가중치를 높게 부여할 수 있다.sample_weight
: 샘플별 가중치를 조절할 수 있다.- 위 두 가지 parameter는 소숫점 첫째 자리 이하의 실수를 통해 설정한다.
시각화¶
In [19]:
#history 객체에 담은 훈련 데이터는 아래와 같이 dataframe으로 변환할 수 있다.
import pandas as pd
pd.DataFrame(history.history)
Out[19]:
loss | accuracy | val_loss | val_accuracy | |
---|---|---|---|---|
0 | 0.716999 | 0.766818 | 0.515371 | 0.8260 |
1 | 0.487569 | 0.829182 | 0.434398 | 0.8550 |
2 | 0.445338 | 0.842309 | 0.417937 | 0.8574 |
3 | 0.419123 | 0.852673 | 0.404473 | 0.8614 |
4 | 0.398548 | 0.859600 | 0.391544 | 0.8628 |
5 | 0.381620 | 0.864873 | 0.376193 | 0.8710 |
6 | 0.367615 | 0.870873 | 0.381107 | 0.8696 |
7 | 0.356572 | 0.874709 | 0.369023 | 0.8632 |
8 | 0.344909 | 0.877291 | 0.354325 | 0.8776 |
9 | 0.335562 | 0.880545 | 0.385482 | 0.8626 |
10 | 0.326686 | 0.883145 | 0.357167 | 0.8766 |
11 | 0.318884 | 0.886200 | 0.338718 | 0.8830 |
12 | 0.311131 | 0.888400 | 0.346564 | 0.8774 |
13 | 0.303175 | 0.892073 | 0.354419 | 0.8764 |
14 | 0.295559 | 0.893964 | 0.329752 | 0.8792 |
15 | 0.289381 | 0.895327 | 0.322977 | 0.8852 |
16 | 0.284024 | 0.897709 | 0.315151 | 0.8878 |
17 | 0.278042 | 0.899582 | 0.320636 | 0.8848 |
18 | 0.274060 | 0.901727 | 0.312549 | 0.8856 |
19 | 0.267634 | 0.903291 | 0.319623 | 0.8844 |
20 | 0.263137 | 0.904527 | 0.319118 | 0.8852 |
21 | 0.257899 | 0.906727 | 0.310493 | 0.8890 |
22 | 0.252484 | 0.908891 | 0.306494 | 0.8892 |
23 | 0.248396 | 0.910836 | 0.315040 | 0.8884 |
24 | 0.242713 | 0.913036 | 0.309438 | 0.8900 |
25 | 0.238843 | 0.914309 | 0.308445 | 0.8894 |
26 | 0.235501 | 0.915000 | 0.308440 | 0.8880 |
27 | 0.231399 | 0.916545 | 0.295220 | 0.8892 |
28 | 0.227098 | 0.918436 | 0.317428 | 0.8874 |
29 | 0.223134 | 0.920327 | 0.302926 | 0.8884 |
In [20]:
#train loss, validation loss 시각화
import matplotlib.pyplot as plt
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
- 훈련 초기 epoch를 살펴보면 train loss보다 validation loss가 더 낮은 것처럼, 즉 더 성능이 좋은 것 처럼 보인다. 하지만, 이는 validation loss가 해당 epoch 종료 후 계산되는 반면 train loss는 epoch가 진행하는 동안에 계산되기 때문이다.
- 때문에 training loss, accuracy curve는 epoch의 절반만큼 왼쪽으로 이동해야 하고, 해당 작업을 시행하면 validation loss와 train loss가 거의 일치하게 된다.
In [21]:
model.evaluate(X_test, y_test)
313/313 [==============================] - 1s 2ms/step - loss: 0.3353 - accuracy: 0.8808
Out[21]:
[0.33531445264816284, 0.8808000087738037]
모델을 사용해 예측을 만들기¶
In [22]:
#3개에 대한 예측 출력
X_new=X_test[:3]
y_proba=model.predict(X_new)
y_proba.round(2)
Out[22]:
array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.03, 0. , 0.97], [0. , 0. , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]], dtype=float32)
In [23]:
import numpy as np
classes_x=np.argmax(y_proba, axis=1)
np.array(class_names)[classes_x]
Out[23]:
array(['Ankle boot', 'Pullover', 'Trouser'], dtype='<U11')
In [24]:
y_new=y_test[:3]
y_new
Out[24]:
array([9, 2, 1], dtype=uint8)
10.2.3 시퀀셜 API를 사용하여 회귀용 다층 퍼셉트론 만들기¶
dataset 생성¶
In [25]:
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing=fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full)
scaler=StandardScaler()
X_train=scaler.fit_transform(X_train)
X_valid=scaler.transform(X_valid)
X_test=scaler.transform(X_test)
Downloading Cal. housing from https://ndownloader.figshare.com/files/5976036 to /root/scikit_learn_data
Model 생성¶
In [26]:
model=keras.models.Sequential([
keras.layers.Dense(30, activation='relu', input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss='mean_squared_error', optimizer='sgd')
history=model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
mse_test=model.evaluate(X_test, y_test)
X_new=X_test[:3]
y_pred=model.predict(X_new)
Epoch 1/20 363/363 [==============================] - 1s 2ms/step - loss: 0.8285 - val_loss: 1.6182 Epoch 2/20 363/363 [==============================] - 1s 1ms/step - loss: 1.1497 - val_loss: 0.4437 Epoch 3/20 363/363 [==============================] - 1s 1ms/step - loss: 1.0110 - val_loss: 6.2648 Epoch 4/20 363/363 [==============================] - 1s 2ms/step - loss: 1.1937 - val_loss: 182.0186 Epoch 5/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 6/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 7/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 8/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 9/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 10/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 11/20 363/363 [==============================] - 0s 1ms/step - loss: nan - val_loss: nan Epoch 12/20 363/363 [==============================] - 1s 1ms/step - loss: nan - val_loss: nan Epoch 13/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 14/20 363/363 [==============================] - 1s 1ms/step - loss: nan - val_loss: nan Epoch 15/20 363/363 [==============================] - 1s 1ms/step - loss: nan - val_loss: nan Epoch 16/20 363/363 [==============================] - 1s 1ms/step - loss: nan - val_loss: nan Epoch 17/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan Epoch 18/20 363/363 [==============================] - 1s 1ms/step - loss: nan - val_loss: nan Epoch 19/20 363/363 [==============================] - 1s 1ms/step - loss: nan - val_loss: nan Epoch 20/20 363/363 [==============================] - 1s 2ms/step - loss: nan - val_loss: nan 162/162 [==============================] - 0s 1ms/step - loss: nan
10.2.4 함수형 API를 사용해 복잡한 모델 만들기¶
Wide & Deep NN¶
- 깊게 쌓은 층을 통해 복잡한 패턴을 학습한다.
- 짧게 쌓은 층을 통해 간단한 규칙을 학습한다.
여러개의 출력이 필요한 경우¶
- 그림에 있는 주요 물체 분류 및, 회귀작업(물체 중심의 좌표와 너비, 높이 찾기)
- 동일한 데이터에서 여러가지 작업을 할 때. 예를 들어 얼굴 인식 모델의 경우 감정 분류와 안경을 썼는지에 대한 구분을 출력을 달리하여 하나의 모델에서 구현할 수 있다.
- 하위 네트워크에서 출력으로 이어지는 보조 출력을 사용함으로서 하위 네트워크가 나머지 네트워크에 의존하지 않고 학습을 잘 하고 있는지를 확인할 수 있음
In [27]:
#더 복잡한 모델 생성을 위해 Sequential api가 아닌, 함수형 api를 통해 모델 생성
input_=keras.layers.Input(shape=X_train.shape[1:])
hidden1=keras.layers.Dense(30, activation='relu')(input_)
hidden2=keras.layers.Dense(30, activation='relu')(hidden1)
concat=keras.layers.Concatenate()([input_, hidden2]) #concat을 통해 input과 hidden2를 병합
output=keras.layers.Dense(1)(concat)
model=keras.Model(inputs=[input_], outputs=[output])
In [28]:
#wide NN과 deep NN의 input이 다를 경우 모델링
input_A=keras.layers.Input(shape=[5], name='wide_input')
input_B=keras.layers.Input(shape=[6], name='deep_input')
hidden1=keras.layers.Dense(30, activation='relu')(input_B)
hidden2=keras.layers.Dense(30, activation='relu')(hidden1)
concat=keras.layers.concatenate([input_A, hidden2])
output=keras.layers.Dense(1, name='output')(concat)
model=keras.Model(inputs=[input_A, input_B], outputs=[output])
In [29]:
model.compile(loss='mse', optimizer=keras.optimizers.SGD(lr=1e-3))
X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:]
X_new_A, X_new_B = X_test_A[:3], X_test_B[:3]
history=model.fit((X_train_A, X_train_B), y_train, epochs=20,
validation_data=((X_valid_A, X_valid_B), y_valid))
mse_test=model.evaluate((X_test_A, X_test_B), y_test)
y_pred=model.predict((X_new_A, X_new_B))
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py:356: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. "The `lr` argument is deprecated, use `learning_rate` instead.")
Epoch 1/20 363/363 [==============================] - 1s 2ms/step - loss: 2.2538 - val_loss: 1.0137 Epoch 2/20 363/363 [==============================] - 1s 2ms/step - loss: 0.8822 - val_loss: 0.7756 Epoch 3/20 363/363 [==============================] - 1s 2ms/step - loss: 0.7339 - val_loss: 0.7132 Epoch 4/20 363/363 [==============================] - 1s 2ms/step - loss: 0.6906 - val_loss: 0.6859 Epoch 5/20 363/363 [==============================] - 1s 2ms/step - loss: 0.6636 - val_loss: 0.6631 Epoch 6/20 363/363 [==============================] - 1s 2ms/step - loss: 0.6408 - val_loss: 0.6413 Epoch 7/20 363/363 [==============================] - 1s 2ms/step - loss: 0.6214 - val_loss: 0.6247 Epoch 8/20 363/363 [==============================] - 1s 2ms/step - loss: 0.6030 - val_loss: 0.6109 Epoch 9/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5873 - val_loss: 0.5927 Epoch 10/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5723 - val_loss: 0.5766 Epoch 11/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5579 - val_loss: 0.5671 Epoch 12/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5448 - val_loss: 0.5550 Epoch 13/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5320 - val_loss: 0.5433 Epoch 14/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5218 - val_loss: 0.5330 Epoch 15/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5126 - val_loss: 0.5239 Epoch 16/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5022 - val_loss: 0.5160 Epoch 17/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4945 - val_loss: 0.5042 Epoch 18/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4848 - val_loss: 0.5031 Epoch 19/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4772 - val_loss: 0.4902 Epoch 20/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4716 - val_loss: 0.4834 162/162 [==============================] - 0s 1ms/step - loss: 0.4828
In [30]:
#보조출력이 있는 모델 생성
input_A=keras.layers.Input(shape=[5], name='wide_input')
input_B=keras.layers.Input(shape=[6], name='deep_input')
hidden1=keras.layers.Dense(30, activation='relu')(input_B)
hidden2=keras.layers.Dense(30, activation='relu')(hidden1)
concat=keras.layers.concatenate([input_A, hidden2])
output=keras.layers.Dense(1, name='main_output')(concat)
aux_output=keras.layers.Dense(1, name='aux_output')(hidden2)
model=keras.Model(inputs=[input_A, input_B], outputs=[output, aux_output])
Compile¶
- 컴파일시 손실함수를 1개만 전달하면 모든 output에 동일한 손실함수가 전달된다. 때문에 손실함수를 각각 설정해줘야 한다.
- 각 output의 손실에 대해 keras는 나열된 손실을 모두 더하여 최종 손실을 도출해낸다.
loss_weights
의 값을 적절히 설정하여 비율을 설정해줄 수 있다.
In [31]:
model.compile(loss=['mse', 'mse'], loss_weights=[0.9, 0.1], optimizer='sgd')
In [32]:
history=model.fit(
[X_train_A, X_train_B], [y_train, y_train], epochs=20,
validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid]))
Epoch 1/20 363/363 [==============================] - 1s 2ms/step - loss: 1.0231 - main_output_loss: 0.9375 - aux_output_loss: 1.7939 - val_loss: 0.6102 - val_main_output_loss: 0.5360 - val_aux_output_loss: 1.2777 Epoch 2/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5768 - main_output_loss: 0.5109 - aux_output_loss: 1.1697 - val_loss: 0.6254 - val_main_output_loss: 0.5701 - val_aux_output_loss: 1.1233 Epoch 3/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5313 - main_output_loss: 0.4795 - aux_output_loss: 0.9975 - val_loss: 0.6520 - val_main_output_loss: 0.6213 - val_aux_output_loss: 0.9275 Epoch 4/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5236 - main_output_loss: 0.4869 - aux_output_loss: 0.8536 - val_loss: 0.4879 - val_main_output_loss: 0.4503 - val_aux_output_loss: 0.8257 Epoch 5/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4616 - main_output_loss: 0.4289 - aux_output_loss: 0.7564 - val_loss: 0.4653 - val_main_output_loss: 0.4370 - val_aux_output_loss: 0.7207 Epoch 6/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4370 - main_output_loss: 0.4090 - aux_output_loss: 0.6892 - val_loss: 0.4476 - val_main_output_loss: 0.4241 - val_aux_output_loss: 0.6597 Epoch 7/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4270 - main_output_loss: 0.4033 - aux_output_loss: 0.6405 - val_loss: 0.4351 - val_main_output_loss: 0.4140 - val_aux_output_loss: 0.6257 Epoch 8/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4198 - main_output_loss: 0.3990 - aux_output_loss: 0.6077 - val_loss: 0.4226 - val_main_output_loss: 0.4041 - val_aux_output_loss: 0.5888 Epoch 9/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4099 - main_output_loss: 0.3898 - aux_output_loss: 0.5911 - val_loss: 0.4159 - val_main_output_loss: 0.3988 - val_aux_output_loss: 0.5692 Epoch 10/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3981 - main_output_loss: 0.3792 - aux_output_loss: 0.5677 - val_loss: 0.4063 - val_main_output_loss: 0.3901 - val_aux_output_loss: 0.5518 Epoch 11/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3915 - main_output_loss: 0.3741 - aux_output_loss: 0.5484 - val_loss: 0.3998 - val_main_output_loss: 0.3851 - val_aux_output_loss: 0.5323 Epoch 12/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3840 - main_output_loss: 0.3675 - aux_output_loss: 0.5328 - val_loss: 0.4180 - val_main_output_loss: 0.4071 - val_aux_output_loss: 0.5160 Epoch 13/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3787 - main_output_loss: 0.3633 - aux_output_loss: 0.5166 - val_loss: 0.3864 - val_main_output_loss: 0.3737 - val_aux_output_loss: 0.5011 Epoch 14/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3750 - main_output_loss: 0.3599 - aux_output_loss: 0.5109 - val_loss: 0.3840 - val_main_output_loss: 0.3716 - val_aux_output_loss: 0.4958 Epoch 15/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3699 - main_output_loss: 0.3557 - aux_output_loss: 0.4976 - val_loss: 0.3788 - val_main_output_loss: 0.3670 - val_aux_output_loss: 0.4848 Epoch 16/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3640 - main_output_loss: 0.3501 - aux_output_loss: 0.4889 - val_loss: 0.4005 - val_main_output_loss: 0.3922 - val_aux_output_loss: 0.4756 Epoch 17/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3643 - main_output_loss: 0.3507 - aux_output_loss: 0.4863 - val_loss: 0.3788 - val_main_output_loss: 0.3671 - val_aux_output_loss: 0.4840 Epoch 18/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3577 - main_output_loss: 0.3449 - aux_output_loss: 0.4731 - val_loss: 0.3802 - val_main_output_loss: 0.3686 - val_aux_output_loss: 0.4846 Epoch 19/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3545 - main_output_loss: 0.3422 - aux_output_loss: 0.4653 - val_loss: 0.3775 - val_main_output_loss: 0.3669 - val_aux_output_loss: 0.4731 Epoch 20/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3509 - main_output_loss: 0.3387 - aux_output_loss: 0.4610 - val_loss: 0.3656 - val_main_output_loss: 0.3553 - val_aux_output_loss: 0.4589
In [33]:
#evaluate시 total loss와 output별 loss를 반환한다.
total_loss, main_loss, aux_loss = model.evaluate([X_test_A, X_test_B], [y_test, y_test])
162/162 [==============================] - 0s 1ms/step - loss: 0.3688 - main_output_loss: 0.3578 - aux_output_loss: 0.4678
In [34]:
#각 출력에 대한 예측을 반환
y_pred_main, y_pred_aux=model.predict([X_new_A, X_new_B])
In [35]:
class WideAndDeepModel(keras.Model):
def __init__(self, units=30, activation='relu', **kwargs):
#keras.Model을 상속
super().__init__(**kwargs)
self.hidden1=keras.layers.Dense(units, activation=activation)
self.hidden2=keras.layers.Dense(units, activation=activation)
self.main_output=keras.layers.Dense(units, activation=activation)
self.aux_output=keras.layers.Dense(1)
def call(self, inputs):
input_A, input_B=inputs
hidden1=self.hidden1(input_B)
hidden2=self.hidden2(hidden1)
concat=keras.layers.concatenate([input_A, hidden2])
main_output=self.main_output(concat)
aux_output=self.aux_output(hidden2)
return main_output, aux_output
model=WideAndDeepModel()
10.2.6 모델 저장과 복원¶
시퀀셜 API와 함수형 API
- 시퀀셜 API와 함수형 API를 사용하면 훈련된 케라스 모델을 HDF5 포맷으로 저장할 수 있다.
- 저장시 모델 구조와 층의 모든 파라미터(weights, biases), 옵티마이저가 저장된다.
- 저장은
model.save()
를 통해, 저장한 모델 로드는keras.models.load_model()
을 객체에 할당함으로서 가능하다.
서브클래싱 API
- 서브클래싱 API를 사용할 경우 파라미터는
save_weights()
를 통해 저장하고, `load_weights()를 통해 로드한다. 그 외에는 모두 수동으로 처리한다. - 혹은 클래스를 pickle모듈을 사용하여 binary로 저장할 수 있다.
- 서브클래싱 API를 사용할 경우 파라미터는
In [36]:
#Sequential API 사용 모델 저장
model=keras.models.Sequential([
keras.layers.Dense(30, activation='relu', input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss='mean_squared_error', optimizer='sgd')
model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
model.save('my_keras_model.h5')
Epoch 1/20 363/363 [==============================] - 1s 2ms/step - loss: 0.7228 - val_loss: 0.7342 Epoch 2/20 363/363 [==============================] - 1s 2ms/step - loss: 0.5897 - val_loss: 0.6212 Epoch 3/20 363/363 [==============================] - 1s 1ms/step - loss: 0.5428 - val_loss: 0.4586 Epoch 4/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4365 - val_loss: 0.4345 Epoch 5/20 363/363 [==============================] - 1s 2ms/step - loss: 0.4167 - val_loss: 0.4403 Epoch 6/20 363/363 [==============================] - 1s 1ms/step - loss: 0.4030 - val_loss: 0.4175 Epoch 7/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3955 - val_loss: 0.4105 Epoch 8/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3897 - val_loss: 0.4026 Epoch 9/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3863 - val_loss: 0.4005 Epoch 10/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3829 - val_loss: 0.3968 Epoch 11/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3794 - val_loss: 0.3932 Epoch 12/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3850 - val_loss: 0.3948 Epoch 13/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3720 - val_loss: 0.3896 Epoch 14/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3813 - val_loss: 0.3882 Epoch 15/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3749 - val_loss: 0.3839 Epoch 16/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3651 - val_loss: 0.3900 Epoch 17/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3629 - val_loss: 0.3876 Epoch 18/20 363/363 [==============================] - 1s 2ms/step - loss: 0.3640 - val_loss: 0.3776 Epoch 19/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3582 - val_loss: 0.3788 Epoch 20/20 363/363 [==============================] - 1s 1ms/step - loss: 0.3609 - val_loss: 0.3787
In [37]:
#model load
model=keras.models.load_model('my_keras_model.h5')
10.2.7 콜백 사용하기¶
callbacks¶
- fit()메서드의 파라미터이고, 에포크의 시작이나 끝, 각 배치 처리 전후에 호출할 수도 있다.
- default는 매 에포크의 끝에 호출되는 것이다.
1) ModelCheckpoint¶
keras.callback.ModelCheckpoint()
는 훈련하는 동안 일정한 간격(period)으로 모델의 체크포인트를 저장한다(period의 default value는 1이다).save_best_only=True
를 통해 최상의 검증 세트 모델을 저장하는 것이 가능하다.
2) Early Stopping¶
- 일정 epoch 동안 검증 세트에 대한 점수가 향상되지 않으면 훈련을 멈춘다.
- epoch는
patience
파라미터를 통해 조절이 가능하다.
ect) 사용자 정의 callback¶
- 원한다면 사용자 정의 callback을 만들 수 있다.
- on_train_begin(), on_train_end(), on_epoch_begin(), on_epoch_end(), on_batch_begin(), on_batch_end() 등을 구현할 수 있다.
In [43]:
#checkpoint 사용
model=keras.models.Sequential([
keras.layers.Flatten(input_shape=[28,28]),
keras.layers.Dense(300, activation='relu'),
curacy'])
checkpoint_cb=keras.callbacks.ModelCheckpoint('my_keras_model.h5')
history=model.fit(X_train, y_train, epochs=10, callbacks=[checkpoint_cb])
Epoch 1/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.7176 - accuracy: 0.7666 Epoch 2/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.4889 - accuracy: 0.8291 Epoch 3/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.4429 - accuracy: 0.8451 Epoch 4/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.4148 - accuracy: 0.8541 Epoch 5/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3952 - accuracy: 0.8609 Epoch 6/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3780 - accuracy: 0.8674 Epoch 7/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3647 - accuracy: 0.8717 Epoch 8/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3534 - accuracy: 0.8747 Epoch 9/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3426 - accuracy: 0.8778 Epoch 10/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3337 - accuracy: 0.8815
In [45]:
#checkpoint save_best_only=True
checkpoint_cb=keras.callbacks.ModelCheckpoint('my_keras_model.h5',
save_best_only=True)
history=model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb])
model=keras.models.load_model('my_keras_model.h5')
Epoch 1/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3248 - accuracy: 0.8840 - val_loss: 0.3565 - val_accuracy: 0.8712 Epoch 2/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3176 - accuracy: 0.8871 - val_loss: 0.3257 - val_accuracy: 0.8846 Epoch 3/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3102 - accuracy: 0.8892 - val_loss: 0.3376 - val_accuracy: 0.8832 Epoch 4/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.3043 - accuracy: 0.8908 - val_loss: 0.3338 - val_accuracy: 0.8810 Epoch 5/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2970 - accuracy: 0.8931 - val_loss: 0.3562 - val_accuracy: 0.8688 Epoch 6/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2910 - accuracy: 0.8963 - val_loss: 0.3249 - val_accuracy: 0.8844 Epoch 7/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2859 - accuracy: 0.8975 - val_loss: 0.3406 - val_accuracy: 0.8802 Epoch 8/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2801 - accuracy: 0.8997 - val_loss: 0.3197 - val_accuracy: 0.8854 Epoch 9/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2758 - accuracy: 0.9011 - val_loss: 0.3141 - val_accuracy: 0.8864 Epoch 10/10 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2705 - accuracy: 0.9025 - val_loss: 0.3103 - val_accuracy: 0.8894
In [47]:
#EarlyStopping
early_stopping_cb=keras.callbacks.EarlyStopping(patience=10,
restore_best_weights=True)
history=model.fit(X_train,y_train,epochs=100,
validation_data=(X_valid,y_valid),
callbacks=[checkpoint_cb, early_stopping_cb])
Epoch 1/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2655 - accuracy: 0.9044 - val_loss: 0.3152 - val_accuracy: 0.8872 Epoch 2/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2610 - accuracy: 0.9059 - val_loss: 0.3209 - val_accuracy: 0.8838 Epoch 3/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2563 - accuracy: 0.9072 - val_loss: 0.3160 - val_accuracy: 0.8888 Epoch 4/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2520 - accuracy: 0.9089 - val_loss: 0.3009 - val_accuracy: 0.8948 Epoch 5/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2477 - accuracy: 0.9109 - val_loss: 0.3082 - val_accuracy: 0.8910 Epoch 6/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2436 - accuracy: 0.9126 - val_loss: 0.3077 - val_accuracy: 0.8912 Epoch 7/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2402 - accuracy: 0.9134 - val_loss: 0.2952 - val_accuracy: 0.8952 Epoch 8/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2349 - accuracy: 0.9151 - val_loss: 0.3054 - val_accuracy: 0.8906 Epoch 9/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2308 - accuracy: 0.9164 - val_loss: 0.3039 - val_accuracy: 0.8908 Epoch 10/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2286 - accuracy: 0.9184 - val_loss: 0.3165 - val_accuracy: 0.8842 Epoch 11/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2252 - accuracy: 0.9199 - val_loss: 0.2963 - val_accuracy: 0.8930 Epoch 12/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2216 - accuracy: 0.9206 - val_loss: 0.2996 - val_accuracy: 0.8900 Epoch 13/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2182 - accuracy: 0.9213 - val_loss: 0.2954 - val_accuracy: 0.8946 Epoch 14/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2143 - accuracy: 0.9226 - val_loss: 0.2941 - val_accuracy: 0.8948 Epoch 15/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2108 - accuracy: 0.9245 - val_loss: 0.3026 - val_accuracy: 0.8952 Epoch 16/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2088 - accuracy: 0.9246 - val_loss: 0.2982 - val_accuracy: 0.8962 Epoch 17/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.2048 - accuracy: 0.9265 - val_loss: 0.3069 - val_accuracy: 0.8900 Epoch 18/100 1719/1719 [==============================] - 6s 3ms/step - loss: 0.2018 - accuracy: 0.9274 - val_loss: 0.2906 - val_accuracy: 0.8978 Epoch 19/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1989 - accuracy: 0.9287 - val_loss: 0.2970 - val_accuracy: 0.8890 Epoch 20/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1954 - accuracy: 0.9298 - val_loss: 0.2915 - val_accuracy: 0.8966 Epoch 21/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1933 - accuracy: 0.9307 - val_loss: 0.2991 - val_accuracy: 0.8936 Epoch 22/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1894 - accuracy: 0.9320 - val_loss: 0.3128 - val_accuracy: 0.8874 Epoch 23/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1868 - accuracy: 0.9326 - val_loss: 0.3093 - val_accuracy: 0.8916 Epoch 24/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1830 - accuracy: 0.9348 - val_loss: 0.3042 - val_accuracy: 0.8978 Epoch 25/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1804 - accuracy: 0.9361 - val_loss: 0.3136 - val_accuracy: 0.8896 Epoch 26/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1782 - accuracy: 0.9373 - val_loss: 0.2982 - val_accuracy: 0.8968 Epoch 27/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1744 - accuracy: 0.9381 - val_loss: 0.2952 - val_accuracy: 0.8984 Epoch 28/100 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1738 - accuracy: 0.9386 - val_loss: 0.3014 - val_accuracy: 0.8936
In [48]:
#사용자 정의 callback
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print('\nval/train: {:.2f}'.format(logs['val_loss']/logs['loss']))
10.2.8 텐서보드를 사용해 시각화하기¶
- 텐서보드는 event file이라고 부르는 binary log file에 시각화 하려는 데이터를 출력해야 한다.
- 각각의 binary data record를 summary라고 부른다.
- 텐서보드 서버는 log directory를 모니터링하고 자동으로 변경사항을 읽어 그래프를 업데이트 한다.
- 일반적으로 텐서보드 서버가 root log directory를 가리키고 프로그램은 실행할 때마다 다른 서브 directory에 이벤트를 기록한다.
In [50]:
#root log directory 정의
import os
root_logdir=os.path.join(os.curdir, 'my_logs')
def get_run_logdir():
import time
run_id=time.strftime('run_%Y_%m_%d-%H_%M_%S')
return os.path.join(root_logdir, run_id)
run_logdir=get_run_logdir() #run_logdir ex) './my_logs/run_2021_09_22-15_17_32'
In [54]:
tensorboard_cb=keras.callbacks.TensorBoard(run_logdir) #tensorboard callback을 통해 log directory 생성
history=model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[tensorboard_cb])
Epoch 1/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1987 - accuracy: 0.9289 - val_loss: 0.2890 - val_accuracy: 0.8942 Epoch 2/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1960 - accuracy: 0.9293 - val_loss: 0.2973 - val_accuracy: 0.8920 Epoch 3/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1931 - accuracy: 0.9318 - val_loss: 0.3028 - val_accuracy: 0.8882 Epoch 4/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1906 - accuracy: 0.9324 - val_loss: 0.2894 - val_accuracy: 0.8970 Epoch 5/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1874 - accuracy: 0.9333 - val_loss: 0.2843 - val_accuracy: 0.9018 Epoch 6/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1832 - accuracy: 0.9346 - val_loss: 0.2880 - val_accuracy: 0.8984 Epoch 7/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1802 - accuracy: 0.9355 - val_loss: 0.2918 - val_accuracy: 0.8962 Epoch 8/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1785 - accuracy: 0.9365 - val_loss: 0.2870 - val_accuracy: 0.9006 Epoch 9/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1752 - accuracy: 0.9383 - val_loss: 0.2892 - val_accuracy: 0.8976 Epoch 10/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1724 - accuracy: 0.9386 - val_loss: 0.3081 - val_accuracy: 0.8942 Epoch 11/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1707 - accuracy: 0.9393 - val_loss: 0.2939 - val_accuracy: 0.8992 Epoch 12/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1674 - accuracy: 0.9405 - val_loss: 0.2936 - val_accuracy: 0.8980 Epoch 13/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1648 - accuracy: 0.9417 - val_loss: 0.3111 - val_accuracy: 0.8960 Epoch 14/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1618 - accuracy: 0.9417 - val_loss: 0.2985 - val_accuracy: 0.8952 Epoch 15/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1604 - accuracy: 0.9430 - val_loss: 0.2962 - val_accuracy: 0.9038 Epoch 16/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1589 - accuracy: 0.9425 - val_loss: 0.2952 - val_accuracy: 0.9002 Epoch 17/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1551 - accuracy: 0.9455 - val_loss: 0.3786 - val_accuracy: 0.8696 Epoch 18/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1522 - accuracy: 0.9468 - val_loss: 0.2972 - val_accuracy: 0.8972 Epoch 19/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1501 - accuracy: 0.9468 - val_loss: 0.3085 - val_accuracy: 0.8978 Epoch 20/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1480 - accuracy: 0.9482 - val_loss: 0.3053 - val_accuracy: 0.8980 Epoch 21/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1461 - accuracy: 0.9487 - val_loss: 0.2987 - val_accuracy: 0.9018 Epoch 22/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1442 - accuracy: 0.9488 - val_loss: 0.3055 - val_accuracy: 0.8942 Epoch 23/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1410 - accuracy: 0.9499 - val_loss: 0.3086 - val_accuracy: 0.8972 Epoch 24/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1386 - accuracy: 0.9509 - val_loss: 0.3215 - val_accuracy: 0.8934 Epoch 25/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1358 - accuracy: 0.9513 - val_loss: 0.3375 - val_accuracy: 0.8842 Epoch 26/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1344 - accuracy: 0.9524 - val_loss: 0.3012 - val_accuracy: 0.9020 Epoch 27/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1320 - accuracy: 0.9533 - val_loss: 0.3132 - val_accuracy: 0.8986 Epoch 28/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1307 - accuracy: 0.9548 - val_loss: 0.3124 - val_accuracy: 0.8984 Epoch 29/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1293 - accuracy: 0.9538 - val_loss: 0.3207 - val_accuracy: 0.8950 Epoch 30/30 1719/1719 [==============================] - 5s 3ms/step - loss: 0.1243 - accuracy: 0.9564 - val_loss: 0.3171 - val_accuracy: 0.8958
In [56]:
%load_ext tensorboard
%tensorboard --logdir=./my_logs --port=6006
In [ ]:
'Deep Learning > Hands On Machine Learning' 카테고리의 다른 글
12.2 넘파이처럼 텐서플로 사용하기 (0) | 2021.09.29 |
---|---|
12.1 텐서플로 훑어보기 (0) | 2021.09.29 |
10.4 연습문제 (0) | 2021.09.24 |
10.3 신경망 하이퍼 파라미터 튜닝하기 (0) | 2021.09.24 |
10.1 생물학적 뉴런에서 인공 뉴런까지 (0) | 2021.09.21 |