Spaces:
Running
Running
import streamlit as st | |
import os | |
import glob | |
import re | |
import base64 | |
import pytz | |
import time | |
import streamlit.components.v1 as components | |
from urllib.parse import quote | |
from gradio_client import Client | |
from datetime import datetime | |
# Page configuration | |
Site_Name = 'AI Knowledge Tree Builder 📈🌿 Grow Smarter with Every Click' | |
title = "🌳✨AI Knowledge Tree Builder🛠️🤓" | |
helpURL = 'https://huggingface.co/spaces/awacke1/AIKnowledgeTreeBuilder/' | |
bugURL = 'https://huggingface.co/spaces/awacke1/AIKnowledgeTreeBuilder/' | |
icons = '🌳✨🛠️🤓' | |
SidebarOutline = """🌳🤖 Designed with the following tenets: | |
1 📱 **Portability** - Universal access via any device & link sharing | |
2. ⚡ **Speed of Build** - Rapid deployments < 2min to production | |
3. 🔗 **Linkiness** - Programmatic access to AI knowledge sources | |
4. 🎯 **Abstractive** - Core stays lean isolating high-maintenance components | |
5. 🧠 **Memory** - Shareable flows deep-linked research paths | |
6. 👤 **Personalized** - Rapidly adapts knowledge base to user needs | |
7. 🐦 **Living Brevity** - Easily cloneable, self modify data public share results. | |
""" | |
st.set_page_config( | |
page_title=title, | |
page_icon=icons, | |
layout="wide", | |
initial_sidebar_state="auto", | |
menu_items={ | |
'Get Help': helpURL, | |
'Report a bug': bugURL, | |
'About': title | |
} | |
) | |
st.sidebar.markdown(SidebarOutline) | |
# Initialize session state variables | |
if 'selected_file' not in st.session_state: | |
st.session_state.selected_file = None | |
if 'view_mode' not in st.session_state: | |
st.session_state.view_mode = 'view' | |
if 'files' not in st.session_state: | |
st.session_state.files = [] | |
# --- Utility Functions --- | |
def get_display_name(filename): | |
"""Extract text from parentheses or return filename as is.""" | |
match = re.search(r'\((.*?)\)', filename) | |
if match: | |
return match.group(1) | |
return filename | |
def get_time_display(filename): | |
"""Extract just the time portion from the filename.""" | |
time_match = re.match(r'(\d{2}\d{2}[AP]M)', filename) | |
if time_match: | |
return time_match.group(1) | |
return filename | |
def sanitize_filename(text): | |
"""Create a safe filename from text while preserving spaces.""" | |
safe_text = re.sub(r'[^\w\s-]', ' ', text) | |
safe_text = re.sub(r'\s+', ' ', safe_text) | |
safe_text = safe_text.strip() | |
return safe_text[:50] | |
def generate_timestamp_filename(query): | |
"""Generate filename with format: 1103AM 11032024 (Query).md""" | |
central = pytz.timezone('US/Central') | |
current_time = datetime.now(central) | |
time_str = current_time.strftime("%I%M%p") | |
date_str = current_time.strftime("%m%d%Y") | |
safe_query = sanitize_filename(query) | |
filename = f"{time_str} {date_str} ({safe_query}).md" | |
return filename | |
def delete_file(file_path): | |
"""Delete a file and return success status.""" | |
try: | |
os.remove(file_path) | |
return True | |
except Exception as e: | |
st.error(f"Error deleting file: {e}") | |
return False | |
def save_ai_interaction(query, ai_result, is_rerun=False): | |
"""Save AI interaction to a markdown file with new filename format.""" | |
filename = generate_timestamp_filename(query) | |
if is_rerun: | |
content = f"""# Rerun Query | |
Original file content used for rerun: | |
{query} | |
# AI Response (Fun Version) | |
{ai_result} | |
""" | |
else: | |
content = f"""# Query: {query} | |
## AI Response | |
{ai_result} | |
""" | |
try: | |
with open(filename, 'w', encoding='utf-8') as f: | |
f.write(content) | |
return filename | |
except Exception as e: | |
st.error(f"Error saving file: {e}") | |
return None | |
def get_file_download_link(file_path): | |
"""Generate a base64 download link for a file.""" | |
try: | |
with open(file_path, 'r', encoding='utf-8') as f: | |
content = f.read() | |
b64 = base64.b64encode(content.encode()).decode() | |
filename = os.path.basename(file_path) | |
return f'<a href="data:text/markdown;base64,{b64}" download="{filename}">{get_display_name(filename)}</a>' | |
except Exception as e: | |
st.error(f"Error creating download link: {e}") | |
return None | |
# --- New Functions for Markdown File Parsing and Link Tree --- | |
def clean_item_text(line): | |
""" | |
Remove emoji and numbered prefix from a line. | |
E.g., "🔧 1. Low-level system integrations compilers Cplusplus" becomes | |
"Low-level system integrations compilers Cplusplus". | |
Also remove any bold markdown markers. | |
""" | |
# Remove leading emoji and number+period | |
cleaned = re.sub(r'^[^\w]*(\d+\.\s*)', '', line) | |
# Remove any remaining emoji (simple unicode range) and ** markers | |
cleaned = re.sub(r'[\U0001F300-\U0001FAFF]', '', cleaned) | |
cleaned = cleaned.replace("**", "") | |
return cleaned.strip() | |
def clean_header_text(header_line): | |
""" | |
Extract header text from a markdown header line. | |
E.g., "🔧 **Systems, Infrastructure & Low-Level Engineering**" becomes | |
"Systems, Infrastructure & Low-Level Engineering". | |
""" | |
match = re.search(r'\*\*(.*?)\*\*', header_line) | |
if match: | |
return match.group(1).strip() | |
return header_line.strip() | |
def parse_markdown_sections(md_text): | |
""" | |
Parse markdown text into sections. | |
Each section starts with a header line containing bold text. | |
Returns a list of dicts with keys: 'header' and 'items' (list of lines). | |
Skips any content before the first header. | |
""" | |
sections = [] | |
current_section = None | |
lines = md_text.splitlines() | |
for line in lines: | |
if line.strip() == "": | |
continue | |
# Check if line is a header (contains bold markdown and an emoji) | |
if '**' in line: | |
header = clean_header_text(line) | |
current_section = {'header': header, 'raw': line, 'items': []} | |
sections.append(current_section) | |
elif current_section is not None: | |
# Only add lines that appear to be list items (start with an emoji and number) | |
if re.match(r'^[^\w]*\d+\.\s+', line): | |
current_section['items'].append(line) | |
else: | |
if current_section['items']: | |
current_section['items'][-1] += " " + line.strip() | |
else: | |
current_section['items'].append(line) | |
return sections | |
def display_section_items(items): | |
""" | |
Display list of items as links. | |
For each item, clean the text and generate search links using your original link set. | |
""" | |
search_urls = { | |
"📚📖ArXiv": lambda k: f"/?q={quote(k)}", | |
"🔮<sup>Google</sup>": lambda k: f"https://www.google.com/search?q={quote(k)}", | |
"📺<sup>Youtube</sup>": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}", | |
"🔭<sup>Bing</sup>": lambda k: f"https://www.bing.com/search?q={quote(k)}", | |
"💡<sup>Claude</sup>": lambda k: f"https://claude.ai/new?q={quote(k)}", | |
"📱X": lambda k: f"https://twitter.com/search?q={quote(k)}", | |
"🤖<sup>GPT</sup>": lambda k: f"https://chatgpt.com/?model=o3-mini-high&q={quote(k)}", | |
} | |
for item in items: | |
cleaned_text = clean_item_text(item) | |
links_md = ' '.join([f"[{emoji}]({url(cleaned_text)})" for emoji, url in search_urls.items()]) | |
st.markdown(f"- **{cleaned_text}** {links_md}", unsafe_allow_html=True) | |
def display_markdown_tree(): | |
""" | |
Allow user to upload a .md file or load README.md. | |
Parse the markdown into sections and display each section in a collapsed expander | |
with the original markdown and a link tree of items. | |
""" | |
st.markdown("## Markdown Tree Parser") | |
uploaded_file = st.file_uploader("Upload a Markdown file", type=["md"]) | |
if uploaded_file is not None: | |
md_content = uploaded_file.read().decode("utf-8") | |
else: | |
if os.path.exists("README.md"): | |
with open("README.md", "r", encoding="utf-8") as f: | |
md_content = f.read() | |
else: | |
st.info("No Markdown file uploaded and README.md not found.") | |
return | |
sections = parse_markdown_sections(md_content) | |
if not sections: | |
st.info("No sections found in the markdown file.") | |
return | |
for sec in sections: | |
with st.expander(sec['header'], expanded=False): | |
st.markdown(f"**Original Markdown:**\n\n{sec['raw']}\n") | |
if sec['items']: | |
st.markdown("**Link Tree:**") | |
display_section_items(sec['items']) | |
else: | |
st.write("No items found in this section.") | |
# --- Existing AI and File Management Functions --- | |
def search_arxiv(query): | |
st.write("Performing AI Lookup...") | |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
result1 = client.predict( | |
prompt=query, | |
llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", | |
stream_outputs=True, | |
api_name="/ask_llm" | |
) | |
st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result") | |
st.markdown(result1) | |
result2 = client.predict( | |
prompt=query, | |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", | |
stream_outputs=True, | |
api_name="/ask_llm" | |
) | |
st.markdown("### Mistral-7B-Instruct-v0.2 Result") | |
st.markdown(result2) | |
combined_result = f"{result1}\n\n{result2}" | |
return combined_result | |
def SpeechSynthesis(result): | |
documentHTML5 = ''' | |
<!DOCTYPE html> | |
<html> | |
<head> | |
<title>Read It Aloud</title> | |
<script type="text/javascript"> | |
function readAloud() { | |
const text = document.getElementById("textArea").value; | |
const speech = new SpeechSynthesisUtterance(text); | |
window.speechSynthesis.speak(speech); | |
} | |
</script> | |
</head> | |
<body> | |
<h1>🔊 Read It Aloud</h1> | |
<textarea id="textArea" rows="10" cols="80"> | |
''' | |
documentHTML5 += result | |
documentHTML5 += ''' | |
</textarea> | |
<br> | |
<button onclick="readAloud()">🔊 Read Aloud</button> | |
</body> | |
</html> | |
''' | |
components.html(documentHTML5, width=1280, height=300) | |
def display_file_content(file_path): | |
"""Display file content with editing capabilities.""" | |
try: | |
with open(file_path, 'r', encoding='utf-8') as f: | |
content = f.read() | |
if st.session_state.view_mode == 'view': | |
st.markdown(content) | |
else: | |
edited_content = st.text_area( | |
"Edit content", | |
content, | |
height=400, | |
key=f"edit_{os.path.basename(file_path)}" | |
) | |
if st.button("Save Changes", key=f"save_{os.path.basename(file_path)}"): | |
try: | |
with open(file_path, 'w', encoding='utf-8') as f: | |
f.write(edited_content) | |
st.success(f"Successfully saved changes to {file_path}") | |
except Exception as e: | |
st.error(f"Error saving changes: {e}") | |
except Exception as e: | |
st.error(f"Error reading file: {e}") | |
def file_management_sidebar(): | |
"""Redesigned sidebar with improved layout and additional functionality.""" | |
st.sidebar.title("📁 File Management") | |
md_files = [file for file in glob.glob("*.md") if file.lower() != 'readme.md'] | |
md_files.sort() | |
st.session_state.files = md_files | |
if md_files: | |
st.sidebar.markdown("### Saved Files") | |
for idx, file in enumerate(md_files): | |
st.sidebar.markdown("---") | |
st.sidebar.text(get_time_display(file)) | |
download_link = get_file_download_link(file) | |
if download_link: | |
st.sidebar.markdown(download_link, unsafe_allow_html=True) | |
col1, col2, col3, col4 = st.sidebar.columns(4) | |
with col1: | |
if st.button("📄View", key=f"view_{idx}"): | |
st.session_state.selected_file = file | |
st.session_state.view_mode = 'view' | |
with col2: | |
if st.button("✏️Edit", key=f"edit_{idx}"): | |
st.session_state.selected_file = file | |
st.session_state.view_mode = 'edit' | |
with col3: | |
if st.button("🔄Run", key=f"rerun_{idx}"): | |
try: | |
with open(file, 'r', encoding='utf-8') as f: | |
content = f.read() | |
rerun_prefix = """For the markdown below reduce the text to a humorous fun outline with emojis and markdown outline levels in outline that convey all the facts and adds wise quotes and funny statements to engage the reader: | |
""" | |
full_prompt = rerun_prefix + content | |
ai_result = perform_ai_lookup(full_prompt) | |
saved_file = save_ai_interaction(content, ai_result, is_rerun=True) | |
if saved_file: | |
st.success(f"Created fun version in {saved_file}") | |
st.session_state.selected_file = saved_file | |
st.session_state.view_mode = 'view' | |
except Exception as e: | |
st.error(f"Error during rerun: {e}") | |
with col4: | |
if st.button("🗑️Delete", key=f"delete_{idx}"): | |
if delete_file(file): | |
st.success(f"Deleted {file}") | |
st.rerun() | |
else: | |
st.error(f"Failed to delete {file}") | |
st.sidebar.markdown("---") | |
if st.sidebar.button("📝 Create New Note"): | |
filename = generate_timestamp_filename("New Note") | |
with open(filename, 'w', encoding='utf-8') as f: | |
f.write("# New Markdown File\n") | |
st.sidebar.success(f"Created: {filename}") | |
st.session_state.selected_file = filename | |
st.session_state.view_mode = 'edit' | |
else: | |
st.sidebar.write("No markdown files found.") | |
if st.sidebar.button("📝 Create First Note"): | |
filename = generate_timestamp_filename("New Note") | |
with open(filename, 'w', encoding='utf-8') as f: | |
f.write("# New Markdown File\n") | |
st.sidebar.success(f"Created: {filename}") | |
st.session_state.selected_file = filename | |
st.session_state.view_mode = 'edit' | |
def perform_ai_lookup(query): | |
start_time = time.strftime("%Y-%m-%d %H:%M:%S") | |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
response1 = client.predict( | |
query, | |
20, | |
"Semantic Search", | |
"mistralai/Mixtral-8x7B-Instruct-v0.1", | |
api_name="/update_with_rag_md" | |
) | |
Question = '### 🔎 ' + query + '\r\n' | |
References = response1[0] | |
ReferenceLinks = "" | |
results = "" | |
RunSecondQuery = True | |
if RunSecondQuery: | |
response2 = client.predict( | |
query, | |
"mistralai/Mixtral-8x7B-Instruct-v0.1", | |
True, | |
api_name="/ask_llm" | |
) | |
if len(response2) > 10: | |
Answer = response2 | |
SpeechSynthesis(Answer) | |
results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks | |
st.markdown(results) | |
st.write('🔍Run of Multi-Agent System Paper Summary Spec is Complete') | |
end_time = time.strftime("%Y-%m-%d %H:%M:%S") | |
start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S")) | |
end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S")) | |
elapsed_seconds = end_timestamp - start_timestamp | |
st.write(f"Start time: {start_time}") | |
st.write(f"Finish time: {end_time}") | |
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds") | |
filename = generate_filename(query, "md") | |
create_file(filename, query, results) | |
return results | |
def generate_filename(prompt, file_type): | |
central = pytz.timezone('US/Central') | |
safe_date_time = datetime.now(central).strftime("%m%d_%H%M") | |
safe_prompt = re.sub(r'\W+', '_', prompt)[:90] | |
return f"{safe_date_time}_{safe_prompt}.{file_type}" | |
def create_file(filename, prompt, response): | |
with open(filename, 'w', encoding='utf-8') as file: | |
file.write(prompt + "\n\n" + response) | |
# --- Main Application --- | |
def main(): | |
st.markdown("### AI Knowledge Tree Builder 🧠🌱 Cultivate Your AI Mindscape!") | |
query_params = st.query_params | |
query = query_params.get('q', '') | |
show_initial_content = True | |
if query: | |
show_initial_content = False | |
st.write(f"### Search query received: {query}") | |
try: | |
ai_result = perform_ai_lookup(query) | |
saved_file = save_ai_interaction(query, ai_result) | |
if saved_file: | |
st.success(f"Saved interaction to {saved_file}") | |
st.session_state.selected_file = saved_file | |
st.session_state.view_mode = 'view' | |
except Exception as e: | |
st.error(f"Error during AI lookup: {e}") | |
file_management_sidebar() | |
if st.session_state.selected_file: | |
show_initial_content = False | |
if os.path.exists(st.session_state.selected_file): | |
st.markdown(f"### Current File: {st.session_state.selected_file}") | |
display_file_content(st.session_state.selected_file) | |
else: | |
st.error("Selected file no longer exists.") | |
st.session_state.selected_file = None | |
st.rerun() | |
if show_initial_content: | |
display_markdown_tree() | |
if __name__ == "__main__": | |
main() | |