import streamlit as st import os import json import random # Imports import base64 import glob import json import math import openai import os import pytz import re import requests import streamlit as st import textract import time import zipfile import huggingface_hub import dotenv import streamlit.components.v1 as components # Import Streamlit Components for HTML5 from audio_recorder_streamlit import audio_recorder from bs4 import BeautifulSoup from collections import deque from datetime import datetime from dotenv import load_dotenv from huggingface_hub import InferenceClient from io import BytesIO from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.embeddings import OpenAIEmbeddings from langchain.memory import ConversationBufferMemory from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS from openai import ChatCompletion from PyPDF2 import PdfReader from templates import bot_template, css, user_template from xml.etree import ElementTree as ET from PIL import Image from urllib.parse import quote # Ensure this import is included # Set page configuration with a title and favicon st.set_page_config( page_title="๐๐GraphicNovelAI", page_icon="๐๐", layout="wide", initial_sidebar_state="expanded", menu_items={ 'Get Help': 'https://huggingface.co/awacke1', 'Report a bug': "https://huggingface.co/spaces/awacke1/GraphicAINovel", 'About': "# Midjourney: https://discord.com/channels/@me/997514686608191558" } ) experimentalSubProgram=""" import streamlit as st import random import numpy as np """ PromptPrefix = 'Create a markdown outline and table with appropriate emojis for graphic novel rules defining the method steps of play for topic of ' PromptPrefix2 = 'Create a streamlit python user app. Show full code listing. Create a UI implementing storytelling features using plot twists and recurring interesting named characters with genius traits and file IO, randomness, ten point choice lists, math distribution tradeoffs, witty humorous dilemnas with emoji , rewards, variables, reusable functions with parameters, and data driven app with python libraries and streamlit components for Javascript and HTML5. Use appropriate emojis for labels to summarize and list parts, function, conditions for topic: ' #PromptPrefix2 = PromptPrefix2 + experimentalSubProgram # Super meta program! st.markdown('''### ๐โจ๐ GraphicNovelAI ''') with st.expander("Help / About ๐", expanded=False): st.markdown(''' - ๐ **Unlock Plots:** Elevate your vocabulary with AI. Turns plots into thrilling experiences. - ๐ **Features:** Creates extensive glossaries & exciting challenges. - ๐งโโ๏ธ **Experience:** Become a graphic novel plot wizard, boost your language skills. - ๐ **Query Use:** Input `?q=Palindrome` or `?query=Anagram` in URL for new challenges. ''') roleplaying_glossary = { "๐จโ๐ฉโ๐งโ๐ฆ Top Graphic Novel Plot Themes": { "Epic Fantasy": [ "Ancient prophecies and mystical artifacts", "Epic battles between good and evil", "Complex world-building with diverse cultures", "Journey of a reluctant hero", "Alliance of unlikely companions", "Betrayal and redemption arcs", "Magic systems and mythical creatures", "Climactic confrontation with a dark lord" ], "Superhero Sagas": [ "Origin stories of heroes and villains", "Struggle with personal identity and responsibility", "Formation of superhero teams", "Epic battles to save the city/world", "Moral dilemmas and ethical questions", "Interdimensional threats and cosmic wars", "Evolution of powers and discovery of new abilities", "Legacy heroes and passing of the mantle" ], "Post-Apocalyptic Survival": [ "Survival in a world after a global catastrophe", "Rebuilding society from the ashes", "Conflict between surviving factions", "Quests for scarce resources", "Encounters with mutated creatures", "Moral ambiguity and survival ethics", "Exploration of human resilience", "Discovery of a safe haven or cure" ], "Science Fiction and Space Opera": [ "Exploration of distant galaxies", "Conflict between alien species", "Advanced technology and space travel", "Utopian and dystopian societies", "Time travel and alternate realities", "Artificial intelligence and robotics", "Quests for knowledge and discovery", "Rebellion against oppressive regimes" ], "Horror and Supernatural": [ "Haunted locations and ghost stories", "Battles against demonic forces", "Survival horror and psychological terror", "Folklore and urban legends", "Vampires, werewolves, and other monsters", "Occult practices and dark magic", "Apocalyptic and Lovecraftian themes", "Investigations into the unknown" ], "Romance and Relationship Dramas": [ "Complex romantic entanglements", "Struggles with identity and societal expectations", "Heartbreak, healing, and growth", "Forbidden love and star-crossed lovers", "Contemporary relationship dynamics", "Cultural and social differences", "Self-discovery and personal fulfillment", "Romantic comedies and tragedies" ] } } # 9. Sidebar with UI controls to review and re-run prompts and continue responses @st.cache_resource def get_table_download_link(file_path): with open(file_path, 'r') as file: data = file.read() b64 = base64.b64encode(data.encode()).decode() file_name = os.path.basename(file_path) ext = os.path.splitext(file_name)[1] # get the file extension if ext == '.txt': mime_type = 'text/plain' elif ext == '.py': mime_type = 'text/plain' elif ext == '.xlsx': mime_type = 'text/plain' elif ext == '.csv': mime_type = 'text/plain' elif ext == '.htm': mime_type = 'text/html' elif ext == '.md': mime_type = 'text/markdown' elif ext == '.wav': mime_type = 'audio/wav' else: mime_type = 'application/octet-stream' # general binary data type href = f'{file_name}' return href def FileSidebar(): # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------ # Compose a file sidebar of markdown md files: all_files = glob.glob("*.md") all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order if st.sidebar.button("๐ Delete All Text"): for file in all_files: os.remove(file) st.experimental_rerun() if st.sidebar.button("โฌ๏ธ Download All"): zip_file = create_zip_of_files(all_files) st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True) file_contents='' next_action='' for file in all_files: col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed with col1: if st.button("๐", key="md_"+file): # md emoji button with open(file, 'r') as f: file_contents = f.read() next_action='md' with col2: st.markdown(get_table_download_link(file), unsafe_allow_html=True) with col3: if st.button("๐", key="open_"+file): # open emoji button with open(file, 'r') as f: file_contents = f.read() next_action='open' with col4: if st.button("๐", key="read_"+file): # search emoji button with open(file, 'r') as f: file_contents = f.read() next_action='search' with col5: if st.button("๐", key="delete_"+file): os.remove(file) st.experimental_rerun() if len(file_contents) > 0: if next_action=='open': file_content_area = st.text_area("File Contents:", file_contents, height=500) if next_action=='md': st.markdown(file_contents) buttonlabel = '๐Run' if st.button(key='RunWithLlamaandGPT', label = buttonlabel): user_prompt = file_contents # gpt try: st.write('๐Running..') response2 = chat_with_model(user_prompt, file_contents, model_choice) filename2 = generate_filename(file_contents, choice) create_file(filename2, user_prompt, response, should_save) all=response2 except: st.markdown('GPT is sleeping. Restart ETA 30 seconds.') SpeechSynthesis(all) if next_action=='search': file_content_area = st.text_area("File Contents:", file_contents, height=500) st.write('๐Running..') user_prompt = file_contents # gpt try: st.write('๐Running with GPT.') response2 = chat_with_model(user_prompt, file_contents, model_choice) filename2 = generate_filename(file_contents, choice) create_file(filename2, user_prompt, response, should_save) all=response2 except: st.markdown('GPT is sleeping. Restart ETA 30 seconds.') SpeechSynthesis(all) # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------ FileSidebar() # ---- Art Card Sidebar with Random Selection of image: def get_image_as_base64(url): response = requests.get(url) if response.status_code == 200: # Convert the image to base64 return base64.b64encode(response.content).decode("utf-8") else: return None def create_download_link(filename, base64_str): href = f'Download Image' return href # List of image URLs image_urls = [ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/f1RmBh0D92Lm7eJXg0q32.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/gv1xmIiXh1NGTeeV-cYF2.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2YsnDyc_nDNW71PPKozdN.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/G_GkRD_IT3f14K7gWlbwi.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2-KfxcuXRcTFiHf4XlNsX.png" ] # Select a random URL from the list selected_image_url = random.choice(image_urls) # Get the base64 encoded string of the selected image selected_image_base64 = get_image_as_base64(selected_image_url) if selected_image_base64 is not None: with st.sidebar: st.markdown("""### Word Game AI""") # Display the image st.markdown(f"") # Create and display the download link download_link = create_download_link("downloaded_image.png", selected_image_base64) st.markdown(download_link, unsafe_allow_html=True) else: st.sidebar.write("Failed to load the image.") # ---- Art Card Sidebar with random selection of image. # Ensure the directory for storing scores exists score_dir = "scores" os.makedirs(score_dir, exist_ok=True) # Function to generate a unique key for each button, including an emoji def generate_key(label, header, idx): return f"{header}_{label}_{idx}_key" # Function to increment and save score def update_score(key, increment=1): score_file = os.path.join(score_dir, f"{key}.json") if os.path.exists(score_file): with open(score_file, "r") as file: score_data = json.load(file) else: score_data = {"clicks": 0, "score": 0} score_data["clicks"] += 1 score_data["score"] += increment with open(score_file, "w") as file: json.dump(score_data, file) return score_data["score"] # Function to load score def load_score(key): score_file = os.path.join(score_dir, f"{key}.json") if os.path.exists(score_file): with open(score_file, "r") as file: score_data = json.load(file) return score_data["score"] return 0 def search_glossary(query): for category, terms in roleplaying_glossary.items(): if query.lower() in (term.lower() for term in terms): st.markdown(f"#### {category}") st.write(f"- {query}") all="" query2 = PromptPrefix + query # Add prompt preface for method step task behavior # st.write('## ' + query2) st.write('## ๐ Running with GPT.') # ------------------------------------------------------------------------------------------------- response = chat_with_model(query2) filename = generate_filename(query2 + ' --- ' + response, "md") create_file(filename, query, response, should_save) query3 = PromptPrefix2 + query + ' creating streamlit functions that implement outline of method steps below: ' + response # Add prompt preface for coding task behavior # st.write('## ' + query3) st.write('## ๐ Coding with GPT.') # ------------------------------------------------------------------------------------------------- response2 = chat_with_model(query3) filename_txt = generate_filename(query + ' --- ' + response2, "py") create_file(filename_txt, query, response2, should_save) all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2 filename_txt2 = generate_filename(query + ' --- ' + all, "md") create_file(filename_txt2, query, all, should_save) SpeechSynthesis(all) return all # Function to display the glossary in a structured format def display_glossary(glossary, area): if area in glossary: st.subheader(f"๐ Glossary for {area}") for game, terms in glossary[area].items(): st.markdown(f"### {game}") for idx, term in enumerate(terms, start=1): st.write(f"{idx}. {term}") # Function to display the entire glossary in a grid format with links def display_glossary_grid(roleplaying_glossary): search_urls = { "๐": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}", "๐": lambda k: f"https://www.google.com/search?q={quote(k)}", "โถ๏ธ": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}", "๐": lambda k: f"https://www.bing.com/search?q={quote(k)}", "๐ฒ": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(k)}", # this url plus query! "๐": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix)}{quote(k)}", # this url plus query! "๐": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix2)}", # this url plus query! } for category, details in roleplaying_glossary.items(): st.write(f"### {category}") cols = st.columns(len(details)) # Create dynamic columns based on the number of games for idx, (game, terms) in enumerate(details.items()): with cols[idx]: st.markdown(f"#### {game}") for term in terms: links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()]) st.markdown(f"{term} {links_md}", unsafe_allow_html=True) game_emojis = { "Dungeons and Dragons": "๐", "Call of Cthulhu": "๐", "GURPS": "๐ฒ", "Pathfinder": "๐บ๏ธ", "Kindred of the East": "๐ ", "Changeling": "๐", } topic_emojis = { "Core Rulebooks": "๐", "Maps & Settings": "๐บ๏ธ", "Game Mechanics & Tools": "โ๏ธ", "Monsters & Adversaries": "๐น", "Campaigns & Adventures": "๐", "Creatives & Assets": "๐จ", "Game Master Resources": "๐ ๏ธ", "Lore & Background": "๐", "Character Development": "๐ง", "Homebrew Content": "๐ง", "General Topics": "๐", } # Adjusted display_buttons_with_scores function def display_buttons_with_scores(): for category, games in roleplaying_glossary.items(): category_emoji = topic_emojis.get(category, "๐") # Default to search icon if no match st.markdown(f"## {category_emoji} {category}") for game, terms in games.items(): game_emoji = game_emojis.get(game, "๐ฎ") # Default to generic game controller if no match for term in terms: key = f"{category}_{game}_{term}".replace(' ', '_').lower() score = load_score(key) if st.button(f"{game_emoji} {term} {score}", key=key): update_score(key) # Create a dynamic query incorporating emojis and formatting for clarity query_prefix = f"{category_emoji} {game_emoji} **{game} - {category}:**" # ---------------------------------------------------------------------------------------------- #query_body = f"Create a detailed outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements." query_body = f"Create a streamlit python app.py that produces a detailed markdown outline and emoji laden user interface with labels with the entity name and emojis in all labels with a set of streamlit UI components with drop down lists and dataframes and buttons with expander and sidebar for the app to run the data as default values mostly in text boxes. Feature a 3 point outline sith 3 subpoints each where each line has about six words describing this and also contain appropriate emoji for creating sumamry of all aspeccts of this topic. an outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements." response = search_glossary(query_prefix + query_body) def fetch_wikipedia_summary(keyword): # Placeholder function for fetching Wikipedia summaries # In a real app, you might use requests to fetch from the Wikipedia API return f"Summary for {keyword}. For more information, visit Wikipedia." def create_search_url_youtube(keyword): base_url = "https://www.youtube.com/results?search_query=" return base_url + keyword.replace(' ', '+') def create_search_url_bing(keyword): base_url = "https://www.bing.com/search?q=" return base_url + keyword.replace(' ', '+') def create_search_url_wikipedia(keyword): base_url = "https://www.wikipedia.org/search-redirect.php?family=wikipedia&language=en&search=" return base_url + keyword.replace(' ', '+') def create_search_url_google(keyword): base_url = "https://www.google.com/search?q=" return base_url + keyword.replace(' ', '+') def create_search_url_ai(keyword): base_url = "https://huggingface.co/spaces/awacke1/GraphicAINovel?q=" return base_url + keyword.replace(' ', '+') def display_images_and_wikipedia_summaries(): image_files = [f for f in os.listdir('.') if f.endswith('.png')] if not image_files: st.write("No PNG images found in the current directory.") return # Sort image_files based on the length of the keyword to create a visually consistent grid image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0])) # Calculate the grid size based on the sorted keywords grid_sizes = [len(f.split('.')[0]) for f in image_files_sorted] # Dynamically adjust column size based on keyword length col_sizes = ['small' if size <= 4 else 'medium' if size <= 8 else 'large' for size in grid_sizes] # Create a map for number of columns to use for each size num_columns_map = {"small": 4, "medium": 3, "large": 2} current_grid_size = 0 for image_file, col_size in zip(image_files_sorted, col_sizes): if current_grid_size != num_columns_map[col_size]: cols = st.columns(num_columns_map[col_size]) current_grid_size = num_columns_map[col_size] col_index = 0 with cols[col_index % current_grid_size]: image = Image.open(image_file) st.image(image, caption=image_file, use_column_width=True) # Display Wikipedia and Google search links keyword = image_file.split('.')[0] # Assumes keyword is the file name without extension wikipedia_url = create_search_url_wikipedia(keyword) google_url = create_search_url_google(keyword) youtube_url = create_search_url_youtube(keyword) bing_url = create_search_url_bing(keyword) ai_url = create_search_url_ai(keyword) links_md = f""" [Wikipedia]({wikipedia_url}) | [Google]({google_url}) | [YouTube]({youtube_url}) | [Bing]({bing_url}) | [AI]({ai_url}) """ st.markdown(links_md) col_index += 1 def display_images_and_wikipedia_summaries_deprecated(): image_files = [f for f in os.listdir('.') if f.endswith('.png')] if not image_files: st.write("No PNG images found in the current directory.") return for image_file in image_files: image = Image.open(image_file) st.image(image, caption=image_file, use_column_width=True) keyword = image_file.split('.')[0] # Assumes keyword is the file name without extension # Display Wikipedia and Google search links wikipedia_url = create_search_url_wikipedia(keyword) google_url = create_search_url_google(keyword) youtube_url = create_search_url_youtube(keyword) bing_url = create_search_url_bing(keyword) ai_url = create_search_url_ai(keyword) links_md = f""" [Wikipedia]({wikipedia_url}) | [Google]({google_url}) | [YouTube]({youtube_url}) | [Bing]({bing_url}) | [AI]({ai_url}) """ st.markdown(links_md) def get_all_query_params(key): return st.query_params().get(key, []) def clear_query_params(): st.query_params() # Function to display content or image based on a query def display_content_or_image(query): # Check if the query matches any glossary term for category, terms in transhuman_glossary.items(): for term in terms: if query.lower() in term.lower(): st.subheader(f"Found in {category}:") st.write(term) return True # Return after finding and displaying the first match # Check for an image match in a predefined directory (adjust path as needed) image_dir = "images" # Example directory where images are stored image_path = f"{image_dir}/{query}.png" # Construct image path with query if os.path.exists(image_path): st.image(image_path, caption=f"Image for {query}") return True # If no content or image is found st.warning("No matching content or image found.") return False # ------------------------------------ def add_Med_Licensing_Exam_Dataset(): import streamlit as st from datasets import load_dataset dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split st.title("USMLE Step 1 Dataset Viewer") if len(dataset) == 0: st.write("๐ข The dataset is empty.") else: st.write(""" ๐ Use the search box to filter questions or use the grid to scroll through the dataset. """) # ๐ฉโ๐ฌ Search Box search_term = st.text_input("Search for a specific question:", "") # ๐ Pagination records_per_page = 100 num_records = len(dataset) num_pages = max(int(num_records / records_per_page), 1) # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page) if num_pages > 1: page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1))) else: page_number = 1 # Only one page # ๐ Display Data start_idx = (page_number - 1) * records_per_page end_idx = start_idx + records_per_page # ๐งช Apply the Search Filter filtered_data = [] for record in dataset[start_idx:end_idx]: if isinstance(record, dict) and 'text' in record and 'id' in record: if search_term: if search_term.lower() in record['text'].lower(): st.markdown(record) filtered_data.append(record) else: filtered_data.append(record) # ๐ Render the Grid for record in filtered_data: st.write(f"## Question ID: {record['id']}") st.write(f"### Question:") st.write(f"{record['text']}") st.write(f"### Answer:") st.write(f"{record['answer']}") st.write("---") st.write(f"๐ Total Records: {num_records} | ๐ Displaying {start_idx+1} to {min(end_idx, num_records)}") # 1. Constants and Top Level UI Variables # My Inference API Copy API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama # Meta's Original - Chat HF Free Version: #API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf" API_KEY = os.getenv('API_KEY') MODEL1="meta-llama/Llama-2-7b-chat-hf" MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf" HF_KEY = os.getenv('HF_KEY') headers = { "Authorization": f"Bearer {HF_KEY}", "Content-Type": "application/json" } key = os.getenv('OPENAI_API_KEY') prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface." should_save = st.sidebar.checkbox("๐พ Save", value=True, help="Save your session data.") # 2. Prompt label button demo for LLM def add_witty_humor_buttons(): with st.expander("Wit and Humor ๐คฃ", expanded=True): # Tip about the Dromedary family st.markdown("๐ฌ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.") # Define button descriptions descriptions = { "Generate Limericks ๐": "Write ten random adult limericks based on quotes that are tweet length and make you laugh ๐ญ", "Wise Quotes ๐ง": "Generate ten wise quotes that are tweet length ๐ฆ", "Funny Rhymes ๐ค": "Create ten funny rhymes that are tweet length ๐ถ", "Medical Jokes ๐": "Create ten medical jokes that are tweet length ๐ฅ", "Minnesota Humor โ๏ธ": "Create ten jokes about Minnesota that are tweet length ๐จ๏ธ", "Top Funny Stories ๐": "Create ten funny stories that are tweet length ๐", "More Funny Rhymes ๐๏ธ": "Create ten more funny rhymes that are tweet length ๐ต" } # Create columns col1, col2, col3 = st.columns([1, 1, 1], gap="small") # Add buttons to columns if col1.button("Wise Limericks ๐"): StreamLLMChatResponse(descriptions["Generate Limericks ๐"]) if col2.button("Wise Quotes ๐ง"): StreamLLMChatResponse(descriptions["Wise Quotes ๐ง"]) #if col3.button("Funny Rhymes ๐ค"): # StreamLLMChatResponse(descriptions["Funny Rhymes ๐ค"]) col4, col5, col6 = st.columns([1, 1, 1], gap="small") if col4.button("Top Ten Funniest Clean Jokes ๐"): StreamLLMChatResponse(descriptions["Top Ten Funniest Clean Jokes ๐"]) if col5.button("Minnesota Humor โ๏ธ"): StreamLLMChatResponse(descriptions["Minnesota Humor โ๏ธ"]) if col6.button("Origins of Medical Science True Stories"): StreamLLMChatResponse(descriptions["Origins of Medical Science True Stories"]) col7 = st.columns(1, gap="small") if col7[0].button("Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"): StreamLLMChatResponse(descriptions["Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"]) def SpeechSynthesis(result): documentHTML5='''