Tony Fang
added identification benchmark
900cef8
raw
history blame contribute delete
5.09 kB
import torch
from torch.utils.data import TensorDataset
import tensorflow as tf
import tensorflow_datasets as tfds
import jax.numpy as jnp
def get_datasets(
features_path='../big_model_inference/resnet18_embeddings.pt',
labels_path='../big_model_inference/all_cow_ids.pt'
):
embeddings_np = torch.load(features_path)
all_cow_ids = torch.load(labels_path) - 1
# Set the seed for reproducibility
seed = 42
torch.manual_seed(seed)
# Assume embeddings_np and all_cow_ids are already loaded as PyTorch tensors
num_samples = len(embeddings_np)
indices = torch.randperm(num_samples)
# Calculate split indices for 70/20/10 split
train_end = int(0.001 * num_samples)
val_end = int(0.2 * num_samples)
train_indices = indices[:train_end]
val_indices = indices[train_end:val_end]
test_indices = indices[val_end:]
# print(train_indices[:10])
# print(val_indices[:10])
# print(test_indices[:10])
# assert torch.equal(train_indices[:10], torch.tensor([292622, 37548, 42432, 353497, 379054, 301165, 47066, 353666, 409458,
# 454581]))
# assert torch.equal(val_indices[:10], torch.tensor([219340, 495317, 522025, 36026, 490924, 179563, 533196, 263518, 139048,
# 72363]))
# assert torch.equal(test_indices[:10], torch.tensor([192226, 477583, 210506, 265639, 82907, 246325, 335726, 395405, 497690,
# 388675]))
# Create datasets for each split
train_dataset = TensorDataset(embeddings_np[train_indices], all_cow_ids[train_indices])
val_dataset = TensorDataset(embeddings_np[val_indices], all_cow_ids[val_indices])
test_dataset = TensorDataset(embeddings_np[test_indices], all_cow_ids[test_indices])
print(f"Train set: {len(train_dataset)} samples")
print(f"Validation set: {len(val_dataset)} samples")
print(f"Test set: {len(test_dataset)} samples")
return train_dataset, val_dataset, test_dataset
def get_time_series(
features_path='../big_model_inference/resnet18_embeddings.pt',
labels_path='../big_model_inference/all_cow_ids.pt'
):
embeddings_np = torch.load(features_path)
all_cow_ids = torch.load(labels_path) - 1
num_samples = len(embeddings_np)
train_end = int(0.33 * num_samples)
val_end = int(0.66 * num_samples)
# Create datasets for each split
train_dataset = TensorDataset(embeddings_np[:train_end], all_cow_ids[:train_end])
val_dataset = TensorDataset(embeddings_np[train_end:val_end], all_cow_ids[train_end:val_end])
test_dataset = TensorDataset(embeddings_np[val_end:], all_cow_ids[val_end:])
print(f"Train set: {len(train_dataset)} samples")
print(f"Validation set: {len(val_dataset)} samples")
print(f"Test set: {len(test_dataset)} samples")
return train_dataset, val_dataset, test_dataset
def get_time_series_tf(
features_path='../big_model_inference/resnet18_embeddings.pt',
labels_path='../big_model_inference/all_cow_ids.pt'
):
embeddings_np = torch.load(features_path)
all_cow_ids = torch.load(labels_path) - 1
embeddings_np = embeddings_np.numpy()
all_cow_ids = all_cow_ids.numpy()
num_samples = len(embeddings_np)
train_end = int(0.33 * num_samples)
val_end = int(0.66 * num_samples)
# Create datasets for each split
train_dataset = tf.data.Dataset.from_tensor_slices((embeddings_np[:train_end], all_cow_ids[:train_end]))
val_dataset = tf.data.Dataset.from_tensor_slices((embeddings_np[train_end:val_end], all_cow_ids[train_end:val_end]))
test_dataset = tf.data.Dataset.from_tensor_slices((embeddings_np[val_end:], all_cow_ids[val_end:]))
print(f"Train set: {len(train_dataset)} samples")
print(f"Validation set: {len(val_dataset)} samples")
print(f"Test set: {len(test_dataset)} samples")
batch_size = 32
train_dataset = train_dataset.shuffle(len(train_dataset)).batch(
batch_size,
num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.batch(
batch_size,
num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(
batch_size,
num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
train_dataset = tfds.as_numpy(train_dataset)
val_dataset = tfds.as_numpy(val_dataset)
test_dataset = tfds.as_numpy(test_dataset)
return train_dataset, val_dataset, test_dataset, len(embeddings_np[0])
if __name__ == "__main__":
train_dataset, val_dataset, test_dataset, in_features = get_time_series_tf(features_path='../big_model_inference/facebook_dinov2_base_embeddings.pt')
print(f"in features : {in_features}")
for batch in train_dataset:
batch = {
'feature' : jnp.array(batch[0]),
"label" : jnp.array(batch[1])
}
print(batch)
break
for batch in val_dataset:
print(batch)
break
for batch in test_dataset:
print(batch)
break