Update README.md
Browse files
README.md
CHANGED
@@ -43,8 +43,8 @@ To use this model for text summarization, you can follow the code example below:
|
|
43 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
44 |
|
45 |
# Load the fine-tuned model and tokenizer
|
46 |
-
model = T5ForConditionalGeneration.from_pretrained("
|
47 |
-
tokenizer = T5Tokenizer.from_pretrained("
|
48 |
|
49 |
# Input text for summarization
|
50 |
input_text = "Your long input text here."
|
@@ -56,3 +56,15 @@ summary_ids = model.generate(inputs["input_ids"], max_length=150, num_beams=4, e
|
|
56 |
# Decode the summary
|
57 |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
58 |
print(summary)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
44 |
|
45 |
# Load the fine-tuned model and tokenizer
|
46 |
+
model = T5ForConditionalGeneration.from_pretrained("kawinduwijewardhane/BriefT5")
|
47 |
+
tokenizer = T5Tokenizer.from_pretrained("kawinduwijewardhane/BriefT5")
|
48 |
|
49 |
# Input text for summarization
|
50 |
input_text = "Your long input text here."
|
|
|
56 |
# Decode the summary
|
57 |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
58 |
print(summary)
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
### Explanation of the YAML metadata:
|
65 |
+
- **`language`**: Specifies the language the model supports, in this case, English (`en`).
|
66 |
+
- **`license`**: Describes the licensing information for your model, here it is set to MIT (you can change it depending on your license).
|
67 |
+
- **`tags`**: These tags help categorize your model on Hugging Face and make it easier for others to discover. I've added tags like `summarization`, `t5`, `text-to-text`, and `fine-tuned`.
|
68 |
+
|
69 |
+
This will help you resolve the warning and provide the necessary metadata for your model card!
|
70 |
+
|