Edge Impulse Experts / Adiuvo_BrainChip_Anomaly Public

Training settings

Please provide a valid number of training cycles (numeric only)
Please provide a valid number for the learning rate (between 0 and 1)
Please provide a valid training processor option

Augmentation settings

Advanced training settings

Audio training options

Neural network architecture

import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, BatchNormalization, TimeDistributed, Permute, ReLU, Softmax from tensorflow.keras.optimizers import Adam EPOCHS = args.epochs or 1 LEARNING_RATE = args.learning_rate or 0.0001 # this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself BATCH_SIZE = 32 train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False) validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False) # model architecture model = Sequential() model.add(Conv2D(16, kernel_size=3, strides=2, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same')) model.add(ReLU()) model.add(Conv2D(16, kernel_size=3, strides=2, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same')) model.add(ReLU()) model.add(Conv2D(16, kernel_size=3, strides=2, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same')) model.add(ReLU()) model.add(Dropout(0.8)) model.add(Conv2D(8, kernel_size=3, strides=2, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same')) model.add(ReLU()) model.add(Dropout(0.65)) model.add(Conv2D(16, kernel_size=3, strides=2, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same')) model.add(ReLU()) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(classes, name='y_pred')) model.add(Softmax()) # this controls the learning rate opt = Adam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999) callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count, epochs=EPOCHS)) # train the neural network model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model.fit(train_dataset, epochs=EPOCHS, validation_data=validation_dataset, verbose=2, callbacks=callbacks) import tensorflow as tf def akida_quantize_model( keras_model, weight_quantization: int = 4, activ_quantization: int = 4, input_weight_quantization: int = 8, ): import cnn2snn print("Performing post-training quantization...") akida_model = cnn2snn.quantize( keras_model, weight_quantization=weight_quantization, activ_quantization=activ_quantization, input_weight_quantization=input_weight_quantization, ) print("Performing post-training quantization OK") print("") return akida_model def akida_perform_qat( akida_model, train_dataset: tf.data.Dataset, validation_dataset: tf.data.Dataset, optimizer: str, fine_tune_loss: str, fine_tune_metrics: "list[str]", callbacks, stopping_metric: str = "val_accuracy", fit_verbose: int = 2, qat_epochs: int = 30, ): early_stopping = tf.keras.callbacks.EarlyStopping( monitor=stopping_metric, mode="max", verbose=1, min_delta=0, patience=10, restore_best_weights=True, ) callbacks.append(early_stopping) print("Running quantization-aware training...") opt = Adam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999) akida_model.compile( optimizer=optimizer, loss=fine_tune_loss, metrics=fine_tune_metrics ) akida_model.fit( train_dataset, epochs=qat_epochs, verbose=fit_verbose, validation_data=validation_dataset, callbacks=callbacks, ) print("Running quantization-aware training OK") print("") return akida_model akida_model = akida_quantize_model(model) akida_model = akida_perform_qat( akida_model, train_dataset=train_dataset, validation_dataset=validation_dataset, optimizer=opt, fine_tune_loss='categorical_crossentropy', fine_tune_metrics=['accuracy'], callbacks=callbacks)
Input layer (12,935 features)
2D conv / pool layer (16 filters, 3 kernel size, 3 layers)
Dropout (rate 0.1)
2D conv / pool layer (8 filters, 3 kernel size, 1 layer)
Dropout (rate 0.5)
2D conv / pool layer (16 filters, 3 kernel size, 1 layer)
Dropout (rate 0.5)
Flatten layer
Output layer (2 classes)