Update README.md
Browse files
README.md
CHANGED
@@ -42,16 +42,16 @@ I also evaluated the model on 20K dataset of video from youtube. We extract the
|
|
42 |
|
43 |
How to use the model
|
44 |
|
45 |
-
tokenizer = AutoTokenizer.from_pretrained("banhabang/
|
46 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("banhabang/
|
47 |
model.to('cuda')
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
42 |
|
43 |
How to use the model
|
44 |
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained("banhabang/vit5-base-tag-generation")
|
46 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("banhabang/vit5-base-tag-generation")
|
47 |
model.to('cuda')
|
48 |
|
49 |
+
encoding = tokenizer(ytb['Title'][i], return_tensors="pt")
|
50 |
+
input_ids, attention_masks = encoding["input_ids"].to("cuda"), encoding["attention_mask"].to("cuda")
|
51 |
+
outputs = model.generate(
|
52 |
+
input_ids=input_ids, attention_mask=attention_masks,
|
53 |
+
max_length=30,
|
54 |
+
early_stopping=True
|
55 |
+
)
|
56 |
+
for output in outputs:
|
57 |
+
outputs = tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|