|
import torch |
|
from torch.utils.data import TensorDataset |
|
import tensorflow as tf |
|
import tensorflow_datasets as tfds |
|
import jax.numpy as jnp |
|
|
|
|
|
def get_datasets( |
|
features_path='../big_model_inference/resnet18_embeddings.pt', |
|
labels_path='../big_model_inference/all_cow_ids.pt' |
|
): |
|
embeddings_np = torch.load(features_path) |
|
all_cow_ids = torch.load(labels_path) - 1 |
|
|
|
|
|
|
|
seed = 42 |
|
torch.manual_seed(seed) |
|
|
|
|
|
num_samples = len(embeddings_np) |
|
indices = torch.randperm(num_samples) |
|
|
|
|
|
train_end = int(0.001 * num_samples) |
|
val_end = int(0.2 * num_samples) |
|
train_indices = indices[:train_end] |
|
val_indices = indices[train_end:val_end] |
|
test_indices = indices[val_end:] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_dataset = TensorDataset(embeddings_np[train_indices], all_cow_ids[train_indices]) |
|
val_dataset = TensorDataset(embeddings_np[val_indices], all_cow_ids[val_indices]) |
|
test_dataset = TensorDataset(embeddings_np[test_indices], all_cow_ids[test_indices]) |
|
|
|
print(f"Train set: {len(train_dataset)} samples") |
|
print(f"Validation set: {len(val_dataset)} samples") |
|
print(f"Test set: {len(test_dataset)} samples") |
|
return train_dataset, val_dataset, test_dataset |
|
|
|
|
|
|
|
def get_time_series( |
|
features_path='../big_model_inference/resnet18_embeddings.pt', |
|
labels_path='../big_model_inference/all_cow_ids.pt' |
|
): |
|
embeddings_np = torch.load(features_path) |
|
all_cow_ids = torch.load(labels_path) - 1 |
|
|
|
|
|
num_samples = len(embeddings_np) |
|
|
|
train_end = int(0.33 * num_samples) |
|
val_end = int(0.66 * num_samples) |
|
|
|
|
|
train_dataset = TensorDataset(embeddings_np[:train_end], all_cow_ids[:train_end]) |
|
val_dataset = TensorDataset(embeddings_np[train_end:val_end], all_cow_ids[train_end:val_end]) |
|
test_dataset = TensorDataset(embeddings_np[val_end:], all_cow_ids[val_end:]) |
|
|
|
print(f"Train set: {len(train_dataset)} samples") |
|
print(f"Validation set: {len(val_dataset)} samples") |
|
print(f"Test set: {len(test_dataset)} samples") |
|
return train_dataset, val_dataset, test_dataset |
|
|
|
|
|
|
|
def get_time_series_tf( |
|
features_path='../big_model_inference/resnet18_embeddings.pt', |
|
labels_path='../big_model_inference/all_cow_ids.pt' |
|
): |
|
embeddings_np = torch.load(features_path) |
|
all_cow_ids = torch.load(labels_path) - 1 |
|
embeddings_np = embeddings_np.numpy() |
|
all_cow_ids = all_cow_ids.numpy() |
|
|
|
num_samples = len(embeddings_np) |
|
|
|
train_end = int(0.33 * num_samples) |
|
val_end = int(0.66 * num_samples) |
|
|
|
|
|
train_dataset = tf.data.Dataset.from_tensor_slices((embeddings_np[:train_end], all_cow_ids[:train_end])) |
|
val_dataset = tf.data.Dataset.from_tensor_slices((embeddings_np[train_end:val_end], all_cow_ids[train_end:val_end])) |
|
test_dataset = tf.data.Dataset.from_tensor_slices((embeddings_np[val_end:], all_cow_ids[val_end:])) |
|
|
|
print(f"Train set: {len(train_dataset)} samples") |
|
print(f"Validation set: {len(val_dataset)} samples") |
|
print(f"Test set: {len(test_dataset)} samples") |
|
|
|
batch_size = 32 |
|
|
|
train_dataset = train_dataset.shuffle(len(train_dataset)).batch( |
|
batch_size, |
|
num_parallel_calls=tf.data.AUTOTUNE |
|
).prefetch(tf.data.AUTOTUNE) |
|
|
|
val_dataset = val_dataset.batch( |
|
batch_size, |
|
num_parallel_calls=tf.data.AUTOTUNE |
|
).prefetch(tf.data.AUTOTUNE) |
|
|
|
test_dataset = test_dataset.batch( |
|
batch_size, |
|
num_parallel_calls=tf.data.AUTOTUNE |
|
).prefetch(tf.data.AUTOTUNE) |
|
|
|
train_dataset = tfds.as_numpy(train_dataset) |
|
val_dataset = tfds.as_numpy(val_dataset) |
|
test_dataset = tfds.as_numpy(test_dataset) |
|
return train_dataset, val_dataset, test_dataset, len(embeddings_np[0]) |
|
|
|
if __name__ == "__main__": |
|
train_dataset, val_dataset, test_dataset, in_features = get_time_series_tf(features_path='../big_model_inference/facebook_dinov2_base_embeddings.pt') |
|
print(f"in features : {in_features}") |
|
for batch in train_dataset: |
|
batch = { |
|
'feature' : jnp.array(batch[0]), |
|
"label" : jnp.array(batch[1]) |
|
} |
|
print(batch) |
|
break |
|
for batch in val_dataset: |
|
print(batch) |
|
break |
|
for batch in test_dataset: |
|
print(batch) |
|
break |