File size: 2,069 Bytes
f7c77c5
 
 
 
 
 
 
 
 
 
 
 
3298fbb
f7c77c5
 
 
fcf9a3b
f7c77c5
 
252c8c0
 
 
 
 
 
 
 
85dbf7d
252c8c0
 
 
 
 
fcf9a3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7c77c5
fcf9a3b
f7c77c5
 
 
 
 
 
 
 
 
 
 
 
3298fbb
f7c77c5
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
import os
import lexrank as lr
import nltk
import metrics


def summarize(in_text):

    if len(in_text)==0:
        return 'Error: No text provided', None

    nltk_file = '/home/user/nltk_data/tokenizers/punkt.zip'
    if os.path.exists(nltk_file):
        print('nltk punkt file exists in ', nltk_file)
    else:
        print("downloading punkt file")
        nltk.download('punkt')

    in_longtext = []
    # Discard all senteces that have less than 10 words in them
    in_text_sentenses = in_text.split('.')
    print(in_text_sentenses)
    for sen in in_text_sentenses:
        if len(sen.split()) > 10:
            in_longtext.append(sen)
    in_text = '.'.join(in_longtext)+'.'
    
    # The size of the summary is limited to 1024
    # The Lexrank algorith accepts only sentences as a limit
    # We start with one sentece and check the token size
    # Then increase the number of sentences until the tokensize 
    # of the next sentence exceed the limit
    target_tokens = 1024

    in_sents = metrics.num_sentences(in_text)

    out_text = lr.get_Summary(in_text,1)
    n_tokens= metrics.num_tokens(out_text)
    prev_n_tokens=0
    for sen in range(2, in_sents):
        if n_tokens >= target_tokens:
            n_tokens = prev_n_tokens
            break
        else:
            out_text = lr.get_Summary(in_text,sen)
            prev_n_tokens = n_tokens
            n_tokens= metrics.num_tokens(out_text)
    
    n_sents = metrics.num_sentences(out_text)
    n_words = metrics.num_words(out_text)
    n_chars = metrics.num_chars(out_text)

    return out_text, n_words, n_sents, n_chars, n_tokens


demo = gr.Interface(summarize, 
       inputs=["text"] , 
       outputs=[gr.Textbox(label="Extractive Summary"), 
                gr.Number(label="Number of Words"),
                gr.Number(label="Number of Sentences"),
                gr.Number(label="Number of Characters"),
                gr.Number(label="Number of Tokens")],
       allow_flagging="never", queue = True)


if __name__ == "__main__":
    demo.launch()