Dmitry / robot-control-english Public

Training settings

Please provide a valid number of training cycles (numeric only)
Please provide a valid number for the learning rate (between 0 and 1)
Please provide a valid training processor option

Augmentation settings

Advanced training settings

Neural network architecture

import math, requests from pathlib import Path import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import ( Dense, InputLayer, Dropout, Conv1D, Flatten, Reshape, MaxPooling1D, BatchNormalization, Conv2D, GlobalMaxPooling2D, Lambda, GlobalAveragePooling2D) from tensorflow.keras.optimizers.legacy import Nadam from tensorflow.keras.losses import categorical_crossentropy sys.path.append('./resources/libraries') import ei_tensorflow.training WEIGHTS_PATH = './transfer-learning-weights/edgeimpulse/kws/kws_mfe-dsp-ver-4-mobilenetv1a0.1_train_f1cf6_00000_0_batch_size=256,lr=0.001_2023-01-13_02-42-20_dnu_2048_009.h5' # Download the model weights root_url = 'https://cdn.edgeimpulse.com/' p = Path(WEIGHTS_PATH) if not p.exists(): print(f"Pretrained weights {WEIGHTS_PATH} unavailable; downloading...") if not p.parent.exists(): p.parent.mkdir(parents=True) weights_data = requests.get(root_url + WEIGHTS_PATH[2:]).content with open(WEIGHTS_PATH, 'wb') as f: f.write(weights_data) print(f"Pretrained weights {WEIGHTS_PATH} unavailable; downloading OK") print("") INPUT_SHAPE = (99, 40, 1) base_model = tf.keras.applications.MobileNet( input_shape=INPUT_SHAPE, alpha=0.1, weights=WEIGHTS_PATH, include_top=False) base_model.trainable = False model = Sequential() model.add(InputLayer( input_shape=X_train[0].shape, name='x_input')) model.add(Reshape(INPUT_SHAPE)) model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.1)) model.add(Dense(128, activation='relu')) model.add(Dense(classes, activation='softmax')) EPOCHS = args.epochs or 30 LEARNING_RATE = args.learning_rate or 0.01 # If True, non-deterministic functions (e.g. shuffling batches) are not used. # This is False by default. ENSURE_DETERMINISM = args.ensure_determinism BATCH_SIZE = args.batch_size or 32 if not ENSURE_DETERMINISM: train_dataset = train_dataset.shuffle(buffer_size=BATCH_SIZE*4) prefetch_policy = 1 if ENSURE_DETERMINISM else tf.data.AUTOTUNE train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False).prefetch(prefetch_policy) validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False).prefetch(prefetch_policy) callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count, epochs=EPOCHS, ensure_determinism=ENSURE_DETERMINISM)) model.compile(optimizer=Nadam(learning_rate=LEARNING_RATE), loss='categorical_crossentropy', metrics=['accuracy']) model.fit(train_dataset, validation_data=validation_dataset, epochs=EPOCHS, verbose=2, callbacks=callbacks, class_weight=ei_tensorflow.training.get_class_weights(Y_train))
Input layer (3,960 features)
MobileNetV1 0.1 (final layer: 128 neurons, 0.1 dropout)
Output layer (5 classes)

Model

Model version: