DAY 19
0
AI & Data

## 【19】使用 Pooling 和 Conv 來把圖片變小 (subsampling) 的比較實驗

Colab連結

``````NUM_OF_CLASS = 3

ds_data, ds_info = tfds.load(
'beans',
shuffle_files=True,
as_supervised=True,
with_info=True,
)

train_split, test_split = ds_data['train'], ds_data['test']

fig = tfds.show_examples(train_split, ds_info)

print(f'number of train: {len(train_split)}')
print(f'number of test: {len(test_split)}')
``````

``````from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense

def alexnet_modify_max_pooling():
model = Sequential()
model.add(Conv2D(32, (11, 11), padding='valid', input_shape=(227,227,3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))

model.add(Conv2D(64, (7, 7), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))

model.add(Conv2D(96, (3, 3), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))

model.add(Flatten())
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(NUM_OF_CLASS))

return model
``````

``````EPOCH 172/200
loss: 0.0015 - sparse_categorical_accuracy: 1.0000 - val_loss: 0.7871 - val_sparse_categorical_accuracy: 0.8438
``````

``````def alexnet_modify_avg_pooling():
model = Sequential()
model.add(Conv2D(32, (11, 11), padding='valid', input_shape=(227,227,3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(3, 3)))

model.add(Conv2D(64, (7, 7), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(3, 3)))

model.add(Conv2D(96, (3, 3), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(3, 3)))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(3, 3)))

model.add(Flatten())
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(NUM_OF_CLASS))

return model
``````

``````EPOCH 155/200
loss: 0.0060 - sparse_categorical_accuracy: 0.9990 - val_loss: 0.7398 - val_sparse_categorical_accuracy: 0.8438
``````

``````def alexnet_modify_conv_replace_pooling():
model = Sequential()
model.add(Conv2D(32, (11, 11), padding='valid', input_shape=(227,227,3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), strides=(3, 3), padding='valid'))

model.add(Conv2D(64, (7, 7), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), strides=(3, 3), padding='valid'))

model.add(Conv2D(96, (3, 3), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), strides=(3, 3), padding='valid'))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), strides=(3, 3), padding='valid'))

model.add(Flatten())
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(NUM_OF_CLASS))

return model
``````

``````EPOCH 194/200
loss: 7.5052e-04 - sparse_categorical_accuracy: 1.0000 - val_loss: 1.0053 - val_sparse_categorical_accuracy: 0.7734
``````