Training settings
Please provide a valid training processor option
Neural network architecture
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, BatchNormalization, Permute, ReLU, Softmax, Activation
from tensorflow.keras.optimizers.legacy import Adam
EPOCHS = args.epochs or 50
LEARNING_RATE = args.learning_rate or 0.0005
# If True, non-deterministic functions (e.g. shuffling batches) are not used.
# This is False by default.
ENSURE_DETERMINISM = args.ensure_determinism
# this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
BATCH_SIZE = args.batch_size or 32
if not ENSURE_DETERMINISM:
train_dataset = train_dataset.shuffle(buffer_size=BATCH_SIZE*4)
train_dataset=train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
# model architecture
model = Sequential()
model.add(Dense(3375, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(3000, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(2625, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(2250, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(1875, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(1500, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(1125, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(750, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(375, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(250, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(100, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(50, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(20, activation='relu',
activity_regularizer=tf.keras.regularizers.l1(0.00001)))
model.add(Dense(classes, name='y_pred', activation='softmax'))
# this controls the learning rate
opt = Adam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999)
callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count, epochs=EPOCHS, ensure_determinism=ENSURE_DETERMINISM))
# train the neural network
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.fit(train_dataset, epochs=EPOCHS, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
# Use this flag to disable per-channel quantization for a model.
# This can reduce RAM usage for convolutional models, but may have
# an impact on accuracy.
disable_per_channel_quantization = False
Input layer (3,750 features)
Dense layer (3375 neurons)
Dense layer (3000 neurons)
Dense layer (2625 neurons)
Dense layer (2250 neurons)
Dense layer (1875 neurons)
Dense layer (1500 neurons)
Dense layer (1125 neurons)
Dense layer (750 neurons)
Dense layer (375 neurons)
Dense layer (250 neurons)
Dense layer (100 neurons)
Dense layer (50 neurons)
Dense layer (20 neurons)
Output layer (3 classes)
Model
Model version: