mehnaazasad commited on
Commit
72309c6
·
1 Parent(s): 9f2da0b

Removed extra/unused code and modified description.

Browse files
Files changed (1) hide show
  1. app.py +1 -14
app.py CHANGED
@@ -12,15 +12,6 @@ Original file is located at
12
  # !pip install gradio transformers==4.28.0 datasets
13
 
14
  import gradio as gr
15
-
16
- # def greet(name):
17
- # return "Hello " + name
18
-
19
-
20
- # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
21
-
22
- # demo.launch()
23
-
24
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
25
  from datasets import load_dataset
26
  import numpy as np
@@ -44,12 +35,8 @@ def summarize(text, temperature):
44
  title = tokenizer.decode(output[0], skip_special_tokens=True)
45
  return title
46
 
47
- sample = dataset['test']['abstract'][0]
48
-
49
- summarize(sample, 3.0)
50
-
51
  title = "Title Generator"
52
- description = """This model was trained to summarize text and generate a title.
53
  You can find more details about the fine-tuning of this BART model
54
  [here](https://huggingface.co/mehnaazasad/bart-large-finetuned-arxiv-co-ga-latest).
55
  While default parameter values are shown, feel free to experiment!
 
12
  # !pip install gradio transformers==4.28.0 datasets
13
 
14
  import gradio as gr
 
 
 
 
 
 
 
 
 
15
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
16
  from datasets import load_dataset
17
  import numpy as np
 
35
  title = tokenizer.decode(output[0], skip_special_tokens=True)
36
  return title
37
 
 
 
 
 
38
  title = "Title Generator"
39
+ description = """This model was trained to generate a title given scientific paper abstracts.
40
  You can find more details about the fine-tuning of this BART model
41
  [here](https://huggingface.co/mehnaazasad/bart-large-finetuned-arxiv-co-ga-latest).
42
  While default parameter values are shown, feel free to experiment!