Training settings
Please provide a valid training processor option
Neural network architecture
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, BatchNormalization
from tensorflow.keras.optimizers import Adam
# model architecture
model = Sequential([
tf.keras.layers.Reshape([24,32, 1]),
tf.keras.layers.Conv2D(8, (3, 3), padding="same", activation="relu", input_shape=(24,32,1)),
tf.keras.layers.Conv2D(8, (3, 3), padding="same", activation="relu"),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(8, (3, 3), padding="same", activation="relu"),
tf.keras.layers.MaxPool2D((2, 2), padding="same"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu"),
tf.keras.layers.MaxPool2D((2, 2), padding="same"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(32, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(classes, activation="softmax", name='y_pred')
])
epochs = 10
batch_size = 64
steps_per_epoch=100
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
def reshape_function(data, label):
reshaped_data = tf.reshape(data, [24, 32, 1])
return reshaped_data, label
# train_dataset = train_dataset.map(reshape_function)
# validation_dataset = validation_dataset.map(reshape_function)
train_dataset = train_dataset.batch(batch_size).repeat()
validation_dataset = validation_dataset.batch(batch_size)
model.fit(
train_dataset,
epochs=epochs,
validation_data=validation_dataset,
steps_per_epoch=steps_per_epoch,
validation_steps=int((10 - 1) / batch_size + 1),
)
# # this controls the learning rate
# opt = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999)
# # this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
# BATCH_SIZE = 64
# train_dataset, validation_dataset = set_batch_size(BATCH_SIZE, train_dataset, validation_dataset)
# callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count))
# # train the neural network
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# model.fit(train_dataset, epochs=10, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
Input layer (768 features)
Dense layer (20 neurons)
Dense layer (10 neurons)
Output layer (3 classes)
Model
Model version: