Training settings
Please provide a valid training processor option
Neural network architecture
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Flatten
# Standard Edge Impulse arguments
from tensorflow.keras.optimizers.legacy import Adam
# 1. FIXED: Set a realistic learning rate (0.000005 was too low)
EPOCHS = args.epochs or 75
LEARNING_RATE = args.learning_rate or 0.00001
ENSURE_DETERMINISM = args.ensure_determinism
BATCH_SIZE = args.batch_size or 32
if not ENSURE_DETERMINISM:
train_dataset = train_dataset.shuffle(buffer_size=BATCH_SIZE*4)
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
# model architecture
model = Sequential()
# 2. FIXED: Added the required Input Layer
model.add(InputLayer(input_shape=(input_length, ), name='x_input'))
# 3. FIXED: Added 'model.add', increased neurons slightly to 16, kept your L2
model.add(Dense(32, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01)))
# Dropout to prevent overfitting
model.add(Dropout(0.4))
# Second layer (Optional: For 30 samples, 2 layers might be too much, but you can try)
model.add(Dense(8, activation='relu'))
# Output layer
model.add(Dense(classes, name='y_pred', activation='softmax'))
# Optimizer setup
opt = Adam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999)
callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count, epochs=EPOCHS, ensure_determinism=ENSURE_DETERMINISM))
# train the neural network
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.fit(train_dataset, epochs=EPOCHS, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
disable_per_channel_quantization = False
Input layer (1,056 features)
Dense layer (8 neurons)
Dropout (rate 0.25)
Output layer (3 classes)
Model
Model version: