널리 알려진 fashion mnist data에 대한 DNN 구현 예제입니다.
fashion mnist dataset : https://github.com/zalandoresearch/fashion-mnist
- 총 70,000개
- 28x28 grayscale image
- 10 classes
tensorflow version : 2.6
import tensorflow as tf
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
tf.__version__
# Load training and eval data from tf.keras
(train_data, train_labels), (test_data, test_labels) = \
tf.keras.datasets.fashion_mnist.load_data()
train_data, valid_data, train_labels, valid_labels = \
train_test_split(train_data, train_labels, test_size=0.1, shuffle=True)
# 이미지 확인
plt.imshow(valid_data[0].reshape(28, 28))
plt.colorbar()
plt.show()
train_data = train_data /255.
train_data = train_data.reshape(-1, 28 * 28)
train_data = train_data.astype(np.float32)
train_labels = train_labels.astype(np.int32)
test_data = test_data / 255.
test_data = test_data.reshape(-1, 28 * 28)
test_data = test_data.astype(np.float32)
test_labels = test_labels.astype(np.int32)
valid_data = valid_data / 255.
valid_data = valid_data.reshape(-1, 28 * 28)
valid_data = valid_data.astype(np.float32)
valid_labels = valid_labels.astype(np.int32)
print(train_data.shape, train_labels.shape)
print(test_data.shape, test_labels.shape)
print(valid_data.shape, valid_labels.shape)
def one_hot_label(image, label):
label = tf.one_hot(label, depth=10)
return image, label
batch_size = 32
max_epochs = 10
# for train
train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
train_dataset = train_dataset.shuffle(buffer_size=10000)
train_dataset = train_dataset.map(one_hot_label)
train_dataset = train_dataset.repeat().batch(batch_size=batch_size)
print(train_dataset)
# for test
test_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels))
test_dataset = test_dataset.map(one_hot_label)
test_dataset = test_dataset.batch(batch_size=batch_size)
print(test_dataset)
# for test
valid_dataset = tf.data.Dataset.from_tensor_slices((valid_data,valid_labels))
valid_dataset = valid_dataset.map(one_hot_label)
valid_dataset = valid_dataset.repeat().batch(batch_size=batch_size)
print(valid_dataset)
model = tf.keras.Sequential()
model.add(layers.Dense(64))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dense(32))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(16))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(1e-4),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
predictions = model(train_data[0:1], training=False)
print("Predictions: ", predictions.numpy())
# using `tf.data.Dataset`
history = model.fit(train_dataset,
epochs=max_epochs,
steps_per_epoch=int(len(train_data) / batch_size),
validation_data=valid_dataset,
validation_steps=int(len(train_data) / batch_size))
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(max_epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Valid Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Valid Loss')
plt.show()
results = model.evaluate(test_dataset)
# loss
print("loss value: {:.3f}".format(results[0]))
# accuracy
print("accuracy value: {:.4f}%".format(results[1]*100)
itemlist = ['T-shirt/top','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankel boot']
np.random.seed(219)
test_batch_size = 16
batch_index = np.random.choice(len(test_data), size=test_batch_size, replace=False)
batch_xs = test_data[batch_index]
batch_ys = test_labels[batch_index]
y_pred_ = model(batch_xs, training=False)
fig = plt.figure(figsize=(16, 10))
for i, (px, py) in enumerate(zip(batch_xs, y_pred_)):
p = fig.add_subplot(4, 8, i+1)
if np.argmax(py) == batch_ys[i]:
p.set_title("y_pred: {}".format(itemlist[np.argmax(py)]), color='blue')
else:
p.set_title("y_pred: {}".format(itemlist[np.argmax(py)]), color='red')
p.imshow(px.reshape(28, 28))
p.axis('off')
<< 결과 >>
[tensorflow] optuna tensorflow cnn 예제 #1 (0) | 2022.01.23 |
---|---|
[tensorflow] 모델 정보와 History 정보 저장하기 (0) | 2021.12.15 |
댓글 영역