Spaces:
Sleeping
Sleeping
File size: 5,071 Bytes
59d5e33 1f95777 151c2dd 037af6c 9c1234d 151c2dd 96538e7 151c2dd 1f95777 e95e6f0 151c2dd 1ec143e 151c2dd 561abab 151c2dd e95e6f0 1ec143e 151c2dd 1f95777 59d5e33 1f95777 2164d57 59d5e33 2164d57 ad98547 2164d57 14029bd 2decb45 151c2dd 0e7333a 151c2dd 0e7333a 1f95777 561abab 1f95777 561abab 1f95777 14029bd 561abab 164690b 0e7333a 151c2dd 0e7333a 2164d57 164690b ad98547 0e7333a 164690b 2164d57 ad98547 2164d57 9c1234d ad98547 2decb45 14029bd 9c1234d 14029bd 2164d57 0e7333a 2164d57 0e7333a 2164d57 ad98547 2164d57 0e7333a ad98547 0e7333a 164690b 151c2dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
from os import makedirs, remove
from os.path import exists, dirname
from functools import cache
import json
import streamlit as st
from googleapiclient.discovery import build
from slugify import slugify
from transformers import pipeline
import uuid
from beautiful_soup.beautiful_soup import get_url_content
@cache
def google_search_api_request( query ):
api_key = st.secrets["google_search_api_key"]
cx = st.secrets["google_search_engine_id"]
service = build(
"customsearch",
"v1",
developerKey=api_key,
cache_discovery=False
)
# Exclude PDFs from search results.
query = query + ' -filetype:pdf'
return service.cse().list(
q=query,
cx=cx,
num=5,
).execute()
def search_results( query ):
file_path = 'search-results/' + slugify( query ) + '.json'
results = []
makedirs(dirname(file_path), exist_ok=True)
if exists( file_path ):
with open( file_path, 'r' ) as results_file:
results = json.load( results_file )
else:
search_result = google_search_api_request( query )
if ( int( search_result['searchInformation']['totalResults'] ) > 0 ):
results = search_result['items']
with open( file_path, 'w' ) as results_file:
json.dump( results, results_file )
if ( len( results ) == 0 ) :
raise Exception('No results found.')
return results
def content_summary( url_id, content ):
file_path = 'summaries/' + url_id + '.json'
makedirs(dirname(file_path), exist_ok=True)
if exists( file_path ):
with open( file_path, 'r' ) as file:
summary = json.load( file )
else:
try:
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
# https://huggingface.co/docs/transformers/v4.18.0/en/main_classes/pipelines#transformers.SummarizationPipeline
summary = summarizer(content, max_length=130, min_length=30, do_sample=False, truncation=True)
except Exception as exception:
raise exception
with open( file_path, 'w' ) as file:
json.dump( summary, file )
return summary
def exception_notice( exception ):
query_params = st.experimental_get_query_params()
if 'debug' in query_params.keys() and query_params['debug'][0] == 'true':
st.exception(exception)
else:
st.warning(str(exception))
def is_keyword_in_string( keywords, string ):
for keyword in keywords:
if keyword in string:
return True
return False
def main():
st.title('Racoon Search')
query = st.text_input('Search query')
query_params = st.experimental_get_query_params()
if query :
with st.spinner('Loading search results...'):
try:
results = search_results( query )
except Exception as exception:
exception_notice(exception)
return
number_of_results = len( results )
st.success( 'Found {} results for "{}".'.format( number_of_results, query ) )
if 'debug' in query_params.keys() and query_params['debug'][0] == 'true':
with st.expander("Search results JSON"):
if st.button('Delete search result cache', key=query + 'cache'):
remove( 'search-results/' + slugify( query ) + '.json' )
st.json( results )
progress_bar = st.progress(0)
st.header('Search results')
st.markdown('---')
# for result in results:
for index, result in enumerate(results):
with st.container():
st.markdown('### ' + result['title'])
url_id = uuid.uuid5( uuid.NAMESPACE_URL, result['link'] ).hex
try:
strings = get_url_content( result['link'] )
keywords = query.split(' ')
content = ''
for string in strings:
if is_keyword_in_string( keywords, string ):
content += string + '\n'
summary = content_summary( url_id, content )
for sentence in summary:
st.write(sentence['summary_text'])
except Exception as exception:
exception_notice(exception)
progress_bar.progress( ( index + 1 ) / number_of_results )
col1, col2, col3 = st.columns(3)
with col1:
st.markdown('[Website Link]({})'.format(result['link']))
with col2:
if st.button('Delete content from cache', key=url_id + 'content'):
remove( 'page-content/' + url_id + '.txt' )
with col3:
if st.button('Delete summary from cache', key=url_id + 'summary'):
remove( 'summaries/' + url_id + '.json' )
st.markdown('---')
if __name__ == '__main__':
main()
|