generated from streamlit/streamlit-hello
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathHello.py
57 lines (39 loc) · 1.74 KB
/
Hello.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import streamlit as st
import librosa
import numpy as np
import pickle
from tensorflow.keras.models import load_model
import joblib
# Load the label encoder
labelencoder = joblib.load('labelencoder.pkl')
# Load the model from the SavedModel format
#loaded_model = tf.saved_model.load(r'C:\Users\prasa\OneDrive\Desktop\DSA hackathons\AUdio DL\saved_models\audio_classification.hdf5')
model = load_model('audio_classification.hdf5')
# Function to preprocess audio file
def preprocess_audio(uploaded_file):
# Convert the uploaded file to an array
audio, sample_rate = librosa.load(uploaded_file, sr=None)
# Extract MFCC features
mfccs_features = librosa.feature.mfcc(y=audio, sr=sample_rate, n_mfcc=40)
# Compute the mean of MFCC features
mfccs_scaled_features = np.mean(mfccs_features.T, axis=0)
# Reshape features to be a 2-dimensional array with a single row
return mfccs_scaled_features.reshape(1, -1)
# Define Streamlit app
def main():
st.title('Audio Classification')
uploaded_file = st.file_uploader("Upload an audio file", type=['wav'])
if uploaded_file is not None:
st.audio(uploaded_file, format='audio/wav')
if st.button('Classify'):
# Preprocess the uploaded file
preprocessed_features = preprocess_audio(uploaded_file)
# Apply the model to classify the audio file
predicted_label = model.predict(preprocessed_features)
# Convert predicted label to class
predicted_class_index = np.argmax(predicted_label)
prediction_class = labelencoder.inverse_transform(predicted_class_index.reshape(1, -1))
st.write("Predicted class:", prediction_class)
# Run the Streamlit app
if __name__ == '__main__':
main()