Blaise-g commited on
Commit
ef238ef
Β·
1 Parent(s): de9b441

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -185,7 +185,7 @@ if __name__ == "__main__":
185
  value=2,
186
  )
187
  gr.Markdown(
188
- "_The input text is divided into batches of the selected token lengths to fit within the memory constraints, pre-processed and fed into the model of choice. For optimal results use a GPU as the hosted CPU inference is lacking at times and hinders the models' output summary quality._"
189
  )
190
  with gr.Row():
191
  length_penalty = gr.inputs.Slider(
@@ -211,7 +211,7 @@ if __name__ == "__main__":
211
  input_text = gr.Textbox(
212
  lines=6,
213
  label="Input Text (for summarization)",
214
- placeholder="Enter any scientific text to be condensed into a long and comprehensive digested format or an extreme TLDR summary version, the text will be preprocessed and truncated if necessary to fit within the computational constraints. The models were trained to handle long scientific papers but generalize reasonably well also to shorter text documents like abstracts with an appropriate. Might take a while to produce long summaries :)",
215
  )
216
  gr.Markdown("Upload your own file:")
217
  with gr.Row():
@@ -227,7 +227,7 @@ if __name__ == "__main__":
227
  with gr.Column():
228
  gr.Markdown("## Generate Summary")
229
  gr.Markdown(
230
- "Summary generation should take approximately 1-2 minutes for most generation settings."
231
  )
232
  summarize_button = gr.Button(
233
  "Summarize!",
@@ -253,7 +253,7 @@ if __name__ == "__main__":
253
  "- [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) is a fine-tuned checkpoint of [Stancld/longt5-tglobal-large-16384-pubmed-3k_steps](https://huggingface.co/Stancld/longt5-tglobal-large-16384-pubmed-3k_steps) on the [SumPubMed dataset](https://aclanthology.org/2021.acl-srw.30/). [Blaise-g/longt5_tglobal_large_scitldr](https://huggingface.co/Blaise-g/longt5_tglobal_large_scitldr) is a fine-tuned checkpoint of [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) on the [Scitldr dataset](https://arxiv.org/abs/2004.15011). The goal was to create two models capable of handling the complex information contained in long biomedical documents and subsequently producing scientific summaries according to one of the two possible levels of conciseness: 1) A long explanatory synopsis that retains the majority of domain-specific language used in the original source text. 2)A one sentence long, TLDR style summary."
254
  )
255
  gr.Markdown(
256
- "- The two most important text generation parameters are the number of beams and length penalty : 1) Choosing a higher number of beams for the beam search algorithm results in generating a summary with higher probability (hence theoretically higher quality) at the cost of increasing computation times and memory usage. 2) The length penalty encourages the model to generate longer or shorter summary sequences by placing an exponential penalty on the beam score according to the current sequence length."
257
  )
258
  gr.Markdown("---")
259
 
 
185
  value=2,
186
  )
187
  gr.Markdown(
188
+ "_For optimal results use a GPU as the hosted CPU inference is lacking at times and hinders the models' output summary quality._"
189
  )
190
  with gr.Row():
191
  length_penalty = gr.inputs.Slider(
 
211
  input_text = gr.Textbox(
212
  lines=6,
213
  label="Input Text (for summarization)",
214
+ placeholder="Enter any scientific text to be condensed into a long and comprehensive digested format or an extreme TLDR summary version. The input text is divided into batches of the selected token lengths to fit within the memory constraints, pre-processed and fed into the model of choice. The models were trained to handle long scientific papers but generalize reasonably well also to shorter text documents like scientific abstracts. Might take a while to produce long summaries :)",
215
  )
216
  gr.Markdown("Upload your own file:")
217
  with gr.Row():
 
227
  with gr.Column():
228
  gr.Markdown("## Generate Summary")
229
  gr.Markdown(
230
+ "Summary generation should take approximately 1-2 minutes for most generation settings but can take significantly more for very long documents with the maximum available beam number."
231
  )
232
  summarize_button = gr.Button(
233
  "Summarize!",
 
253
  "- [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) is a fine-tuned checkpoint of [Stancld/longt5-tglobal-large-16384-pubmed-3k_steps](https://huggingface.co/Stancld/longt5-tglobal-large-16384-pubmed-3k_steps) on the [SumPubMed dataset](https://aclanthology.org/2021.acl-srw.30/). [Blaise-g/longt5_tglobal_large_scitldr](https://huggingface.co/Blaise-g/longt5_tglobal_large_scitldr) is a fine-tuned checkpoint of [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) on the [Scitldr dataset](https://arxiv.org/abs/2004.15011). The goal was to create two models capable of handling the complex information contained in long biomedical documents and subsequently producing scientific summaries according to one of the two possible levels of conciseness: 1) A long explanatory synopsis that retains the majority of domain-specific language used in the original source text. 2)A one sentence long, TLDR style summary."
254
  )
255
  gr.Markdown(
256
+ "- The two most important text generation parameters are the number of beams and length penalty : 1) Choosing a higher number of beams for the beam search algorithm results in generating a summary with higher probability (hence theoretically higher quality) at the cost of increasing computation times and memory usage. 2) The length penalty encourages the model to generate longer (with values closer to 1.0) or shorter (with values closer to 0.0) summary sequences by placing an exponential penalty on the beam score according to the current sequence length."
257
  )
258
  gr.Markdown("---")
259