fevot commited on
Commit
83a14b6
·
verified ·
1 Parent(s): df5c7f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -52
app.py CHANGED
@@ -1,64 +1,97 @@
 
1
  import torch
2
- import torchaudio
3
- import torchaudio.transforms as transforms
4
- import torch.nn.functional as F
5
  import json
6
- from flask import Flask, request, jsonify
7
  from torchvision import models
8
  import librosa
9
- import numpy as np
10
 
11
- app = Flask(__name__)
 
 
 
 
 
 
12
 
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
14
 
15
- # Load ResNet50 feature extractor
16
- resnet = models.resnet50(pretrained=False)
17
- resnet.fc = torch.nn.Identity() # Remove classification layer
18
- resnet = resnet.to(device)
19
- resnet.eval()
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # Load RNN model for classification
22
- rnn_model = torch.nn.LSTM(input_size=2048, hidden_size=512, num_layers=2, batch_first=True, bidirectional=True)
23
- rnn_fc = torch.nn.Linear(1024, 114) # 114 classes
24
- rnn_model = rnn_model.to(device)
25
- rnn_fc = rnn_fc.to(device)
26
- rnn_model.eval()
27
- rnn_fc.eval()
28
 
29
- # Load class mapping
30
- with open("class_mapping.json", "r") as f:
31
- class_mapping = json.load(f)
32
-
33
- def preprocess_audio(file_path):
34
- y, sr = librosa.load(file_path, sr=22050) # Load at 22.05kHz
35
- mel_spectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000)
36
- mel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max)
37
- mel_spectrogram = torch.tensor(mel_spectrogram).unsqueeze(0).unsqueeze(0) # Add batch & channel dims
38
- return mel_spectrogram.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- def predict_birdcall(audio_tensor):
41
- with torch.no_grad():
42
- features = resnet(audio_tensor.repeat(1, 3, 1, 1)) # Convert grayscale to 3-channel
43
- features = features.unsqueeze(1) # Add sequence dimension
44
- rnn_out, _ = rnn_model(features)
45
- logits = rnn_fc(rnn_out[:, -1, :]) # Take last time step
46
- pred = torch.argmax(logits, dim=1).item()
47
- return class_mapping.get(str(pred), "Unknown")
 
 
48
 
49
- @app.route("/predict", methods=["POST"])
50
- def predict():
51
- if "file" not in request.files:
52
- return jsonify({"error": "No file uploaded"}), 400
53
- file = request.files["file"]
54
-
55
- file_path = "temp_audio.wav"
56
- file.save(file_path)
57
-
58
- audio_tensor = preprocess_audio(file_path)
59
- prediction = predict_birdcall(audio_tensor)
60
-
61
- return jsonify({"prediction": prediction})
62
 
63
- if __name__ == "__main__":
64
- app.run(host="0.0.0.0", port=5000, debug=True)
 
 
 
 
 
 
1
+ import gradio as gr
2
  import torch
3
+ from torch import nn
4
+ import cv2
5
+ import numpy as np
6
  import json
 
7
  from torchvision import models
8
  import librosa
 
9
 
10
+ # Define the BirdCallRNN model
11
+ class BirdCallRNN(nn.Module):
12
+ def __init__(self, resnet, num_features, num_classes):
13
+ super(BirdCallRNN, self).__init__()
14
+ self.resnet = resnet
15
+ self.rnn = nn.LSTM(input_size=num_features, hidden_size=256, num_layers=2, batch_first=True, bidirectional=True)
16
+ self.fc = nn.Linear(512, num_classes)
17
 
18
+ def forward(self, x):
19
+ batch, seq_len, C, H, W = x.size()
20
+ x = x.view(batch * seq_len, C, H, W)
21
+ features = self.resnet(x)
22
+ features = features.view(batch, seq_len, -1)
23
+ rnn_out, _ = self.rnn(features)
24
+ output = self.fc(rnn_out[:, -1, :]) # Note: We’ll use this for single-segment sequences
25
+ return output
26
 
27
+ # Function to convert MP3 to mel spectrogram (unchanged)
28
+ def mp3_to_mel_spectrogram(mp3_file, target_shape=(128, 500), resize_shape=(224, 224)):
29
+ y, sr = librosa.load(mp3_file, sr=None)
30
+ S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000)
31
+ log_S = librosa.power_to_db(S, ref=np.max)
32
+ current_time_steps = log_S.shape[1]
33
+ target_time_steps = target_shape[1]
34
+ if current_time_steps < target_time_steps:
35
+ pad_width = target_time_steps - current_time_steps
36
+ log_S_resized = np.pad(log_S, ((0, 0), (0, pad_width)), mode='constant')
37
+ elif current_time_steps > target_time_steps:
38
+ log_S_resized = log_S[:, :target_time_steps]
39
+ else:
40
+ log_S_resized = log_S
41
+ log_S_resized = cv2.resize(log_S_resized, resize_shape, interpolation=cv2.INTER_CUBIC)
42
+ return log_S_resized
43
 
44
+ # Load class mapping globally
45
+ with open('class_mapping.json', 'r') as f:
46
+ class_names = json.load(f)
 
 
 
 
47
 
48
+ # Revised inference function to predict per segment
49
+ def infer_birdcall(model, mp3_file, segment_length=500, device="cuda"):
50
+ model.eval()
51
+ # Load audio and compute mel spectrogram
52
+ y, sr = librosa.load(mp3_file, sr=None)
53
+ S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000)
54
+ log_S = librosa.power_to_db(S, ref=np.max)
55
+ # Segment the spectrogram
56
+ num_segments = log_S.shape[1] // segment_length
57
+ if num_segments == 0:
58
+ segments = [log_S]
59
+ else:
60
+ segments = [log_S[:, i * segment_length:(i + 1) * segment_length] for i in range(num_segments)]
61
+
62
+ predictions = []
63
+ # Process each segment individually
64
+ for seg in segments:
65
+ seg_resized = cv2.resize(seg, (224, 224), interpolation=cv2.INTER_CUBIC)
66
+ seg_rgb = np.repeat(seg_resized[:, :, np.newaxis], 3, axis=-1)
67
+ # Create a tensor with batch size 1 and sequence length 1
68
+ seg_tensor = torch.from_numpy(seg_rgb).permute(2, 0, 1).float().unsqueeze(0).unsqueeze(0).to(device) # Shape: (1, 1, 3, 224, 224)
69
+ output = model(seg_tensor)
70
+ pred = torch.max(output, dim=1)[1].cpu().numpy()[0]
71
+ predicted_bird = class_names[str(pred)] # Convert pred to string to match JSON keys
72
+ predictions.append(predicted_bird)
73
+ return predictions
74
 
75
+ # Initialize the model
76
+ resnet = models.resnet50(weights='IMAGENET1K_V2')
77
+ num_features = resnet.fc.in_features
78
+ resnet.fc = nn.Identity()
79
+ num_classes = len(class_names) # Should be 114
80
+ model = BirdCallRNN(resnet, num_features, num_classes)
81
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
+ model.to(device)
83
+ model.load_state_dict(torch.load('model_weights.pth', map_location=device))
84
+ model.eval()
85
 
86
+ # Prediction function for Gradio
87
+ def predict_bird(file_path):
88
+ predictions = infer_birdcall(model, file_path, segment_length=500, device=str(device))
89
+ return ", ".join(predictions) # Join predictions into a single string
 
 
 
 
 
 
 
 
 
90
 
91
+ # Launch Gradio interface
92
+ interface = gr.Interface(
93
+ fn=predict_bird,
94
+ inputs=gr.File(label="Upload MP3 file", file_types=['.mp3']),
95
+ outputs=gr.Textbox(label="Predicted Bird Species")
96
+ )
97
+ interface.launch()