Arijit Das / AgriAI v2.0 Public

Training settings

Please provide a valid number of training cycles (numeric only)
Please provide a valid number for the learning rate (between 0 and 1)
Please provide a valid training processor option

Augmentation settings

Advanced training settings

Neural network architecture

import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Flatten, Reshape, MaxPooling1D, BatchNormalization, Conv2D, GlobalMaxPooling2D, Lambda from tensorflow.keras.optimizers import Adam, Adadelta from tensorflow.keras.losses import categorical_crossentropy # Load best model from initial training sys.path.append('./resources/libraries') import ei_tensorflow.training INPUT_SHAPE = (96, 96, 3) base_model = tf.keras.applications.MobileNetV2( input_shape=INPUT_SHAPE, alpha=0.35, weights='./transfer-learning-weights/keras/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.35_96.h5', include_top=True ) base_model.trainable = False model = Sequential() model.add(InputLayer(input_shape=INPUT_SHAPE, name='x_input')) # Don't include the base model's top layers last_layer_index = -3 model.add(Model(inputs=base_model.inputs, outputs=base_model.layers[last_layer_index].output)) model.add(Dense(16)) model.add(Dropout(0.1)) model.add(Flatten()) model.add(Dense(classes, activation='softmax')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss='categorical_crossentropy', metrics=['accuracy']) BATCH_SIZE = 32 train_dataset, validation_dataset = ei_tensorflow.training.set_batch_size(BATCH_SIZE, train_dataset, validation_dataset) callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count)) model.fit(train_dataset, validation_data=validation_dataset, epochs=10, verbose=2, callbacks=callbacks) print('') print('Initial training done.', flush=True) # How many epochs we will fine tune the model FINE_TUNE_EPOCHS = 10 # What percentage of the base model's layers we will fine tune FINE_TUNE_PERCENTAGE = 65 print('Fine-tuning best model for {} epochs...'.format(FINE_TUNE_EPOCHS), flush=True) # Load best model from initial training model = ei_tensorflow.training.load_best_model(BEST_MODEL_PATH) # Determine which layer to begin fine tuning at model_layer_count = len(model.layers) fine_tune_from = math.ceil(model_layer_count * ((100 - FINE_TUNE_PERCENTAGE) / 100)) # Allow the entire base model to be trained model.trainable = True # Freeze all the layers before the 'fine_tune_from' layer for layer in model.layers[:fine_tune_from]: layer.trainable = False model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.000045), loss='categorical_crossentropy', metrics=['accuracy']) model.fit(train_dataset, epochs=FINE_TUNE_EPOCHS, verbose=2, validation_data=validation_dataset, callbacks=callbacks)
Input layer (27,648 features)
MobileNetV2 96x96 0.35 (final layer: 16 neurons, 0.1 dropout)
Output layer (2 classes)

Model

Model version: