Training settings
Please provide a valid training processor option
Audio training options
Neural network architecture
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Flatten, Reshape, MaxPooling1D
from tensorflow.keras.optimizers.legacy import Adam
from sklearn.model_selection import KFold
import numpy as np
class_weights = {0: 1.0, 1: 2.0, 2: 1.5} # Assign weights to each class
model.fit(X_train, y_train, class_weight=class_weights)
from tensorflow.keras.losses import CategoricalCrossentropy
loss = CategoricalCrossentropy(label_smoothing=0.1) # Apply smoothing to reduce overconfidence
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
from tensorflow.keras.losses import CategoricalCrossentropy
loss = CategoricalCrossentropy(label_smoothing=0.1) # Apply smoothing to reduce overconfidence
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
# Parameters
EPOCHS = args.epochs or 200
LEARNING_RATE = args.learning_rate or 0.005
BATCH_SIZE = args.batch_size or 32
k = 5 # Number of folds
# Dataset as NumPy arrays (ensure you load these correctly)
X = np.array(X_train) # Features
y = np.array(Y_train) # Labels (one-hot encoded)
# k-Fold Cross-Validation
kfold = KFold(n_splits=k, shuffle=True, random_state=42)
fold_accuracies = []
for fold, (train_index, val_index) in enumerate(kfold.split(X)):
print(f"\n--- Fold {fold + 1} ---")
# Split the data for the current fold
X_train_fold, X_val_fold = X[train_index], X[val_index]
y_train_fold, y_val_fold = y[train_index], y[val_index]
# Create a new model for each fold
model = Sequential()
model.add(Reshape((int(input_length / 40), 40), input_shape=(input_length,)))
model.add(Conv1D(8, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same'))
model.add(Dropout(0.25))
model.add(Conv1D(16, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(classes, name='y_pred', activation='softmax'))
# Optimizer
opt = Adam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999)
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# Train the model
history = model.fit(
X_train_fold,
y_train_fold,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(X_val_fold, y_val_fold),
verbose=1
)
# Evaluate the model on the validation fold
val_loss, val_accuracy = model.evaluate(X_val_fold, y_val_fold, verbose=0)
fold_accuracies.append(val_accuracy)
print(f"Validation accuracy for fold {fold + 1}: {val_accuracy:.4f}")
# Average accuracy across all folds
mean_accuracy = np.mean(fold_accuracies)
print(f"\nMean accuracy across {k} folds: {mean_accuracy:.4f}")
Input layer (3,960 features)
Reshape layer (40 columns)
1D conv / pool layer (8 filters, 3 kernel size, 1 layer)
Dropout (rate 0.25)
1D conv / pool layer (16 filters, 3 kernel size, 1 layer)
Dropout (rate 0.25)
Flatten layer
Output layer (5 classes)
Model
Model version: