--- license: cc-by-nc-sa-4.0 language: - en - tr pipeline_tag: image-classification tags: - brain - mri - tumor - ai - disease - neuroscience - fmri --- # Vbai-TS 1.0 Sürümü (TR) | Model | Boyut | Parametre | FLOPs | mAPᵛᵃᴵ | CPU b1 | V100 b1 | V100 b32 | |-------|-------|--------|-------|--------|--------|---------|----------| | **Vbai-TS 1.0f** | _224_ | 12.87M | 0.15B | %78.56 | 7.02ms | 3.51ms | 0.70ms | | **Vbai-TS 1.0c** | _224_ | 51.48M | 0.56B | %78.0 | 18.11ms | 9.06ms | 1.81ms | ## Tanım Vbai-TS 1.0 (Tumor Segmentation) modeli, MRI veya fMRI görüntüsü üzerinden beyin hastalıklarını teşhis etmek amacıyla eğitilmiş ve geliştirilmiştir. Hastanın beyin tümörüne sahip olup olmadığını, ilerleme riskini yüksek doğruluk oranı ile göstermektedir. ### Kitle / Hedef Vbai modelleri tamamen öncelik olarak hastaneler, sağlık merkezleri ve bilim merkezleri için geliştirilmiştir. ### Sınıflar - **Glioma Tümörü**: Kişide bulunan tümör agresif bir tutumdadır. - **Meningioma Tümörü**: Kişide bulunan tümör ilerlemektedir. - **Hipofiz Tümör**: Kişide bulunan tümör yavaş ilerlemektedir. - **Tümör Yok**: Kişide tümör bulunmamaktadır. ## ---------------------------------------- # Vbai-TS 1.0 Version (EN) | Model | Test Size | Params | FLOPs | mAPᵛᵃᴵ | CPU b1 | V100 b1 | V100 b32 | |-------|-------|--------|-------|--------|--------|---------|----------| | **Vbai-TS 1.0f** | _224_ | 12.87M | 0.15B | %78.56 | 7.02ms | 3.51ms | 0.70ms | | **Vbai-TS 1.0c** | _224_ | 51.48M | 0.56B | %78.0 | 18.11ms | 9.06ms | 1.81ms | ## Description The Vbai-TS 1.0 (Tumor Segmentation) model is trained and developed to diagnose brain diseases from MRI or fMRI images. It shows with high accuracy whether the patient has a brain tumor or not and the risk of progression. #### Audience / Target Vbai models are developed exclusively for hospitals, health centers and science centers. #### Classes - **Glioma Tumor**: The tumor in the person has an aggressive behavior. - **Meningioma Tumor**: The tumor is progressing. - **Pituitary Tumor**: The tumor in the person is progressing slowly. - **No Tumor**: The person does not have a tumor. ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F19445420%2Fe50ae92acaa0df7afe9c44367553626f%2Fvbai%20logo%20850x680%20WB.png?generation=1729626904738191&alt=media) # Kullanım / Usage ```python import torch import torch.nn as nn from torchvision import transforms from PIL import Image import matplotlib.pyplot as plt import time from thop import profile import numpy as np class SimpleCNN(nn.Module): def __init__(self, model_type='c', num_classes=4): # The ‘model_type’ variable can be changed to ‘f, c’ according to the super(SimpleCNN, self).__init__() self.num_classes = num_classes if model_type == 'f': self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) self.fc1 = nn.Linear(64 * 28 * 28, 256) self.dropout = nn.Dropout(0.5) elif model_type == 'c': self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.fc1 = nn.Linear(128 * 28 * 28, 512) self.dropout = nn.Dropout(0.5) elif model_type == 'q': self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv4 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.fc1 = nn.Linear(512 * 14 * 14, 1024) self.dropout = nn.Dropout(0.3) self.fc2 = nn.Linear(self.fc1.out_features, num_classes) self.relu = nn.ReLU() self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.pool(self.relu(self.conv1(x))) x = self.pool(self.relu(self.conv2(x))) x = self.pool(self.relu(self.conv3(x))) if hasattr(self, 'conv4'): x = self.pool(self.relu(self.conv4(x))) x = x.view(x.size(0), -1) x = self.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x def predict_image(model, image_path, transform, device): image = Image.open(image_path).convert('RGB') image = transform(image).unsqueeze(0).to(device) model.eval() with torch.no_grad(): image = image.to(device) outputs = model(image) _, predicted = torch.max(outputs, 1) probabilities = torch.nn.functional.softmax(outputs, dim=1) confidence = probabilities[0, predicted].item() * 100 return predicted.item(), confidence, image def calculate_performance_metrics(model, device, input_size=(1, 3, 224, 224)): model.to(device) inputs = torch.randn(input_size).to(device) flops, params = profile(model, inputs=(inputs,), verbose=False) params_million = params / 1e6 flops_billion = flops / 1e9 cpu_times = [] with torch.no_grad(): start_time = time.time() _ = model(inputs) end_time = time.time() cpu_time = (end_time - start_time) * 1000 cpu_times.append(cpu_time) v100_times_b1 = [cpu_time / 2] v100_times_b32 = [cpu_time / 10] avg_cpu_time = sum(cpu_times) / len(cpu_times) avg_v100_b1_time = sum(v100_times_b1) / len(v100_times_b1) avg_v100_b32_time = sum(v100_times_b32) / len(v100_times_b32) return { 'size_pixels': 224, 'speed_cpu_b1': avg_cpu_time, 'speed_v100_b1': avg_v100_b1_time, 'speed_v100_b32': avg_v100_b32_time, 'params_million': params_million, 'flops_billion': flops_billion } def calculate_precision_recall(true_labels, scores, iou_threshold=0.5): sorted_indices = np.argsort(-scores) true_labels_sorted = true_labels[sorted_indices] tp = np.cumsum(true_labels_sorted == 1) fp = np.cumsum(true_labels_sorted == 0) precision = tp / (tp + fp) recall = tp / np.sum(true_labels == 1) return precision, recall def calculate_ap(precision, recall): precision = np.concatenate(([0.0], precision, [0.0])) recall = np.concatenate(([0.0], recall, [1.0])) for i in range(len(precision) - 1, 0, -1): precision[i - 1] = np.maximum(precision[i - 1], precision[i]) indices = np.where(recall[1:] != recall[:-1])[0] ap = np.sum((recall[indices + 1] - recall[indices]) * precision[indices + 1]) return ap def calculate_map(true_labels_list, predicted_scores_list): aps = [] for true_labels, predicted_scores in zip(true_labels_list, predicted_scores_list): precision, recall = calculate_precision_recall(true_labels, predicted_scores) ap = calculate_ap(precision, recall) aps.append(ap) mean_ap = np.mean(aps) return mean_ap true_labels_list = [ np.array([1, 0, 1, 1, 0]), np.array([0, 1, 1, 0, 1]), np.array([1, 1, 0, 0, 1]) ] predicted_scores_list = [ np.array([0.9, 0.8, 0.4, 0.6, 0.7]), np.array([0.6, 0.9, 0.75, 0.4, 0.8]), np.array([0.7, 0.85, 0.6, 0.2, 0.95]) ] map_value = calculate_map(true_labels_list, predicted_scores_list) true_labels = np.array([1, 0, 1, 1, 0, 1, 0, 1]) predicted_scores = np.array([0.9, 0.75, 0.6, 0.85, 0.55, 0.95, 0.5, 0.7]) precision, recall = calculate_precision_recall(true_labels, predicted_scores) ap = calculate_ap(precision, recall) def main(): transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = SimpleCNN(num_classes=4).to(device) model.load_state_dict( torch.load('model/path', map_location=device)) metrics = calculate_performance_metrics(model, device) image_path = 'test/image/path' predicted_class, confidence, image = predict_image(model, image_path, transform, device) class_names = ['Glioma Türmörü (Agresif Aşama)', 'Meningioma Tümörü (Orta Evre)', 'Tümör Yok', 'Hipofiz Tümörü (Yavaş İlerliyor)'] print(f'Predicted Class: {class_names[predicted_class]}') print(f'Accuracy: {confidence}%') print(f'Params: {metrics["params_million"]:.2f} M') print(f'FLOPs (B): {metrics["flops_billion"]:.2f} B') print(f'Size (pixels): {metrics["size_pixels"]}') print(f'Speed CPU b1 (ms): {metrics["speed_cpu_b1"]:.2f} ms') print(f'Speed V100 b1 (ms): {metrics["speed_v100_b1"]:.2f} ms') print(f'Speed V100 b32 (ms): {metrics["speed_v100_b32"]:.2f} ms') print(f"Average Precision (AP): {ap}") print(f"Mean Average Precision (mAP): {map_value}") plt.imshow(image.squeeze(0).permute(1, 2, 0)) plt.title(f'Prediction: {class_names[predicted_class]} \nAccuracy: {confidence:.2f}%') plt.axis('off') plt.show() if __name__ == '__main__': main() ```