Update README.md
Browse files
README.md
CHANGED
@@ -30,16 +30,18 @@ cd unicom/mlcd
|
|
30 |
|
31 |
```python
|
32 |
from vit_rope2d_hf import MLCDVisionModel
|
33 |
-
from transformers import
|
34 |
from PIL import Image
|
|
|
35 |
import torch
|
36 |
|
37 |
# Load model and processor
|
38 |
model = MLCDVisionModel.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-336")
|
39 |
-
processor =
|
40 |
|
41 |
# Process single image
|
42 |
-
|
|
|
43 |
inputs = processor(images=image, return_tensors="pt")
|
44 |
|
45 |
# Get visual features
|
@@ -48,4 +50,5 @@ with torch.no_grad():
|
|
48 |
features = outputs.last_hidden_state
|
49 |
|
50 |
print(f"Extracted features shape: {features.shape}")
|
|
|
51 |
```
|
|
|
30 |
|
31 |
```python
|
32 |
from vit_rope2d_hf import MLCDVisionModel
|
33 |
+
from transformers import CLIPImageProcessor
|
34 |
from PIL import Image
|
35 |
+
import requests
|
36 |
import torch
|
37 |
|
38 |
# Load model and processor
|
39 |
model = MLCDVisionModel.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-336")
|
40 |
+
processor = CLIPImageProcessor.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-336")
|
41 |
|
42 |
# Process single image
|
43 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
44 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
45 |
inputs = processor(images=image, return_tensors="pt")
|
46 |
|
47 |
# Get visual features
|
|
|
50 |
features = outputs.last_hidden_state
|
51 |
|
52 |
print(f"Extracted features shape: {features.shape}")
|
53 |
+
# Extracted features shape: torch.Size([1, 577, 1664])
|
54 |
```
|