|  | import requests | 
					
						
						|  | import os, sys, json | 
					
						
						|  | import gradio as gr | 
					
						
						|  | import openai | 
					
						
						|  | from openai import OpenAI | 
					
						
						|  | import time | 
					
						
						|  | import re | 
					
						
						|  | import io | 
					
						
						|  | from PIL import Image, ImageDraw, ImageOps, ImageFont | 
					
						
						|  | import base64 | 
					
						
						|  |  | 
					
						
						|  | from langchain.chains import LLMChain, RetrievalQA | 
					
						
						|  | from langchain.chat_models import ChatOpenAI | 
					
						
						|  | from langchain.document_loaders import PyPDFLoader, WebBaseLoader, UnstructuredWordDocumentLoader, DirectoryLoader | 
					
						
						|  | from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader | 
					
						
						|  | from langchain.document_loaders.generic import GenericLoader | 
					
						
						|  | from langchain.document_loaders.parsers import OpenAIWhisperParser | 
					
						
						|  | from langchain.schema import AIMessage, HumanMessage | 
					
						
						|  | from langchain.llms import HuggingFaceHub | 
					
						
						|  | from langchain.llms import HuggingFaceTextGenInference | 
					
						
						|  | from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings | 
					
						
						|  |  | 
					
						
						|  | from langchain.embeddings.openai import OpenAIEmbeddings | 
					
						
						|  | from langchain.prompts import PromptTemplate | 
					
						
						|  | from langchain.text_splitter import RecursiveCharacterTextSplitter | 
					
						
						|  | from langchain.vectorstores import Chroma | 
					
						
						|  | from chromadb.errors import InvalidDimensionException | 
					
						
						|  | from utils import * | 
					
						
						|  | from beschreibungen import * | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | from dotenv import load_dotenv, find_dotenv | 
					
						
						|  | _ = load_dotenv(find_dotenv()) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | splittet = False | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | template = """Antworte in deutsch, wenn es nicht explizit anders gefordert wird. Wenn du die Antwort nicht kennst, antworte einfach, dass du es nicht weißt. Versuche nicht, die Antwort zu erfinden oder aufzumocken. Halte die Antwort kurz aber ausführlich genug und exakt.""" | 
					
						
						|  |  | 
					
						
						|  | llm_template = "Beantworte die Frage am Ende. " + template + "Frage: {question} Hilfreiche Antwort: " | 
					
						
						|  | rag_template = "Nutze die folgenden Kontext Teile, um die Frage zu beantworten am Ende. " + template + "{context} Frage: {question} Hilfreiche Antwort: " | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], | 
					
						
						|  | template = llm_template) | 
					
						
						|  | RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], | 
					
						
						|  | template = rag_template) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ") | 
					
						
						|  | OAI_API_KEY=os.getenv("OPENAI_API_KEY") | 
					
						
						|  | HEADERS = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"} | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | PATH_WORK = "." | 
					
						
						|  | CHROMA_DIR  = "/chroma" | 
					
						
						|  | YOUTUBE_DIR = "/youtube" | 
					
						
						|  | HISTORY_PFAD = "/data/history" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | PDF_URL       = "https://arxiv.org/pdf/2303.08774.pdf" | 
					
						
						|  | WEB_URL       = "https://openai.com/research/gpt-4" | 
					
						
						|  | YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE" | 
					
						
						|  | YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | MODEL_NAME  = "gpt-3.5-turbo-16k" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | MODEL_NAME_IMAGE = "gpt-4-vision-preview" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | repo_id = "HuggingFaceH4/zephyr-7b-alpha" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | MODEL_NAME_HF  = "mistralai/Mixtral-8x7B-Instruct-v0.1" | 
					
						
						|  | MODEL_NAME_OAI_ZEICHNEN = "dall-e-3" | 
					
						
						|  |  | 
					
						
						|  | API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def clear_all(): | 
					
						
						|  | return None, gr.Image(visible=False) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def add_text(chatbot, history, prompt, file): | 
					
						
						|  | if (file == None): | 
					
						
						|  | chatbot = chatbot +[(prompt, None)] | 
					
						
						|  | else: | 
					
						
						|  | if (prompt == ""): | 
					
						
						|  | chatbot=chatbot + [((file.name,), "Prompt fehlt!")] | 
					
						
						|  | else: | 
					
						
						|  | chatbot = chatbot +[((file.name,), None), (prompt, None)] | 
					
						
						|  | print("chatbot nach add_text............") | 
					
						
						|  | print(chatbot) | 
					
						
						|  | return chatbot, history, prompt, "" | 
					
						
						|  |  | 
					
						
						|  | def add_text2(chatbot, prompt): | 
					
						
						|  | if (prompt == ""): | 
					
						
						|  | chatbot = chatbot + [("", "Prompt fehlt!")] | 
					
						
						|  | else: | 
					
						
						|  | chatbot = chatbot + [(prompt, None)] | 
					
						
						|  | print("chatbot nach add_text............") | 
					
						
						|  | print(chatbot) | 
					
						
						|  | return chatbot, prompt, "" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def file_anzeigen(file): | 
					
						
						|  | return gr.Image( width=47,  visible=True, interactive = False,  height=47,  min_width=47, show_download_button=False, show_share_button=False, show_label=False, scale = 0.5), file, file | 
					
						
						|  |  | 
					
						
						|  | def file_loeschen(): | 
					
						
						|  | return None, gr.Image(visible = False) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def cancel_outputing(): | 
					
						
						|  | reset_textbox() | 
					
						
						|  | return "Stop Done" | 
					
						
						|  |  | 
					
						
						|  | def reset_textbox(): | 
					
						
						|  | return gr.update(value=""),"" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def umwandeln_fuer_anzeige(image): | 
					
						
						|  | buffer = io.BytesIO() | 
					
						
						|  | image.save(buffer, format='PNG') | 
					
						
						|  | return buffer.getvalue() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def create_picture(history,  prompt): | 
					
						
						|  | client = OpenAI() | 
					
						
						|  | response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,) | 
					
						
						|  | image_url = response.data[0].url | 
					
						
						|  |  | 
					
						
						|  | response2 = requests.get(image_url) | 
					
						
						|  |  | 
					
						
						|  | image = Image.open(response2.raw) | 
					
						
						|  | return image | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def process_image(image_path, prompt): | 
					
						
						|  |  | 
					
						
						|  | with open(image_path, "rb") as image_file: | 
					
						
						|  | encoded_string = base64.b64encode(image_file.read()).decode('utf-8') | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | headers = { | 
					
						
						|  | "Content-Type": "application/json", | 
					
						
						|  | "Authorization": f"Bearer {OAI_API_KEY}" | 
					
						
						|  | } | 
					
						
						|  |  | 
					
						
						|  | payload = { | 
					
						
						|  | "model": MODEL_NAME_IMAGE, | 
					
						
						|  | "messages": [ | 
					
						
						|  | { | 
					
						
						|  | "role": "user", | 
					
						
						|  | "content": [ | 
					
						
						|  | { | 
					
						
						|  | "type": "text", | 
					
						
						|  | "text": prompt | 
					
						
						|  | }, | 
					
						
						|  | { | 
					
						
						|  | "type": "image_url", | 
					
						
						|  | "image_url": { | 
					
						
						|  | "url": f"data:image/jpeg;base64,{encoded_string}" | 
					
						
						|  | } | 
					
						
						|  | } | 
					
						
						|  | ] | 
					
						
						|  | } | 
					
						
						|  | ], | 
					
						
						|  | "max_tokens": 300 | 
					
						
						|  | } | 
					
						
						|  | return headers, payload | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def transfer_input(inputs): | 
					
						
						|  | textbox = reset_textbox() | 
					
						
						|  | return ( | 
					
						
						|  | inputs, | 
					
						
						|  | gr.update(value=""), | 
					
						
						|  | gr.Button.update(visible=True), | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def create_directory_loader(file_type, directory_path): | 
					
						
						|  |  | 
					
						
						|  | loaders = { | 
					
						
						|  | '.pdf': PyPDFLoader, | 
					
						
						|  | '.word': UnstructuredWordDocumentLoader, | 
					
						
						|  | } | 
					
						
						|  | return DirectoryLoader( | 
					
						
						|  | path=directory_path, | 
					
						
						|  | glob=f"**/*{file_type}", | 
					
						
						|  | loader_cls=loaders[file_type], | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def document_loading_splitting(): | 
					
						
						|  | global splittet | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | docs = [] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pdf_loader = create_directory_loader('.pdf', './chroma/pdf') | 
					
						
						|  | word_loader = create_directory_loader('.word', './chroma/word') | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pdf_documents = pdf_loader.load() | 
					
						
						|  | word_documents = word_loader.load() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | docs.extend(pdf_documents) | 
					
						
						|  | docs.extend(word_documents) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | loader = PyPDFLoader(PDF_URL) | 
					
						
						|  | docs.extend(loader.load()) | 
					
						
						|  |  | 
					
						
						|  | loader = WebBaseLoader(WEB_URL) | 
					
						
						|  | docs.extend(loader.load()) | 
					
						
						|  |  | 
					
						
						|  | loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,YOUTUBE_URL_2], PATH_WORK + YOUTUBE_DIR), OpenAIWhisperParser()) | 
					
						
						|  | docs.extend(loader.load()) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150, chunk_size = 1500) | 
					
						
						|  | splits = text_splitter.split_documents(docs) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | splittet = True | 
					
						
						|  | return splits | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def document_storage_chroma(splits): | 
					
						
						|  |  | 
					
						
						|  | Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(disallowed_special = ()),  persist_directory = PATH_WORK + CHROMA_DIR) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def document_storage_mongodb(splits): | 
					
						
						|  | MongoDBAtlasVectorSearch.from_documents(documents = splits, | 
					
						
						|  | embedding = OpenAIEmbeddings(disallowed_special = ()), | 
					
						
						|  | collection = MONGODB_COLLECTION, | 
					
						
						|  | index_name = MONGODB_INDEX_NAME) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def document_retrieval_chroma(llm, prompt): | 
					
						
						|  |  | 
					
						
						|  | embeddings = OpenAIEmbeddings() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | db = Chroma(embedding_function = embeddings, persist_directory = PATH_WORK + CHROMA_DIR) | 
					
						
						|  | return db | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def document_retrieval_chroma2(): | 
					
						
						|  |  | 
					
						
						|  | embeddings = OpenAIEmbeddings() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | db = Chroma(embedding_function = embeddings, persist_directory = PATH_WORK + CHROMA_DIR) | 
					
						
						|  | print ("Chroma DB bereit ...................") | 
					
						
						|  |  | 
					
						
						|  | return db | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def document_retrieval_mongodb(llm, prompt): | 
					
						
						|  | db = MongoDBAtlasVectorSearch.from_connection_string(MONGODB_URI, | 
					
						
						|  | MONGODB_DB_NAME + "." + MONGODB_COLLECTION_NAME, | 
					
						
						|  | OpenAIEmbeddings(disallowed_special = ()), | 
					
						
						|  | index_name = MONGODB_INDEX_NAME) | 
					
						
						|  | return db | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def llm_chain(llm, prompt): | 
					
						
						|  | llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT) | 
					
						
						|  | result = llm_chain.run({"question": prompt}) | 
					
						
						|  | return result | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def rag_chain(llm, prompt, db): | 
					
						
						|  | rag_chain = RetrievalQA.from_chain_type(llm, | 
					
						
						|  | chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT}, | 
					
						
						|  | retriever = db.as_retriever(search_kwargs = {"k": 3}), | 
					
						
						|  | return_source_documents = True) | 
					
						
						|  | result = rag_chain({"query": prompt}) | 
					
						
						|  | return result["result"] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def rag_chain2(prompt, db, k=3): | 
					
						
						|  | rag_template = "Nutze die folgenden Kontext Teile am Ende, um die Frage zu beantworten . " + template + "Frage: " + prompt + "Kontext Teile: " | 
					
						
						|  | retrieved_chunks  = db.similarity_search(prompt, k) | 
					
						
						|  |  | 
					
						
						|  | neu_prompt = rag_template | 
					
						
						|  | for i, chunk in enumerate(retrieved_chunks): | 
					
						
						|  | neu_prompt += f"{i+1}. {chunk}\n" | 
					
						
						|  |  | 
					
						
						|  | return neu_prompt | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_prompt_with_history(text, history, max_length=4048): | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | prompt="" | 
					
						
						|  | history = ["\n{}\n{}".format(x[0],x[1]) for x in history] | 
					
						
						|  | history.append("\n{}\n".format(text)) | 
					
						
						|  | history_text = "" | 
					
						
						|  | flag = False | 
					
						
						|  | for x in history[::-1]: | 
					
						
						|  | history_text = x + history_text | 
					
						
						|  | flag = True | 
					
						
						|  | print ("Prompt: ..........................") | 
					
						
						|  | print(prompt+history_text) | 
					
						
						|  | if flag: | 
					
						
						|  | return  prompt+history_text | 
					
						
						|  | else: | 
					
						
						|  | return None | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_prompt_with_history_openai(prompt, history): | 
					
						
						|  | history_openai_format = [] | 
					
						
						|  | for human, assistant in history: | 
					
						
						|  | history_openai_format.append({"role": "user", "content": human }) | 
					
						
						|  | history_openai_format.append({"role": "assistant", "content":assistant}) | 
					
						
						|  |  | 
					
						
						|  | history_openai_format.append({"role": "user", "content": prompt}) | 
					
						
						|  | print("openai history und prompt................") | 
					
						
						|  | print(history_openai_format) | 
					
						
						|  | return history_openai_format | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_prompt_with_history_hf(prompt, history): | 
					
						
						|  | history_transformer_format = history + [[prompt, ""]] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) | 
					
						
						|  | for item in history_transformer_format]) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_prompt_with_history_langchain(prompt, history): | 
					
						
						|  | history_langchain_format = [] | 
					
						
						|  | for human, ai in history: | 
					
						
						|  | history_langchain_format.append(HumanMessage(content=human)) | 
					
						
						|  | history_langchain_format.append(AIMessage(content=ai)) | 
					
						
						|  | history_langchain_format.append(HumanMessage(content=prompt)) | 
					
						
						|  |  | 
					
						
						|  | return history_langchain_format | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,): | 
					
						
						|  |  | 
					
						
						|  | if (file == None): | 
					
						
						|  | result = generate_text(prompt, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,) | 
					
						
						|  | history = history + [(prompt, result)] | 
					
						
						|  | else: | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | result= generate_text_zu_bild(file, prompt, k, rag_option, chatbot) | 
					
						
						|  | history = history + [((file,), None),(prompt, result)] | 
					
						
						|  |  | 
					
						
						|  | print("result..................") | 
					
						
						|  | print(result) | 
					
						
						|  | print("history.......................") | 
					
						
						|  | print(chatbot) | 
					
						
						|  | chatbot[-1][1] = "" | 
					
						
						|  | for character in result: | 
					
						
						|  | chatbot[-1][1] += character | 
					
						
						|  | time.sleep(0.03) | 
					
						
						|  | yield chatbot, history, "Generating" | 
					
						
						|  | if shared_state.interrupted: | 
					
						
						|  | shared_state.recover() | 
					
						
						|  | try: | 
					
						
						|  | yield chatbot, history, "Stop: Success" | 
					
						
						|  | except: | 
					
						
						|  | pass | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_bild(prompt, chatbot,  temperature=0.5, max_new_tokens=4048,top_p=0.6, repetition_penalty=1.3): | 
					
						
						|  |  | 
					
						
						|  | data = {"inputs": prompt} | 
					
						
						|  | response = requests.post(API_URL, headers=HEADERS, json=data) | 
					
						
						|  | print("fertig Bild") | 
					
						
						|  | result = response.content | 
					
						
						|  |  | 
					
						
						|  | image = Image.open(io.BytesIO(result)) | 
					
						
						|  | image_64 = umwandeln_fuer_anzeige(image) | 
					
						
						|  | chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8')) | 
					
						
						|  | return chatbot, "Success" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_text_zu_bild(file, prompt, k, rag_option, chatbot): | 
					
						
						|  | global splittet | 
					
						
						|  |  | 
					
						
						|  | prompt_neu = prompt | 
					
						
						|  | if (rag_option == "An"): | 
					
						
						|  |  | 
					
						
						|  | if not splittet: | 
					
						
						|  | splits = document_loading_splitting() | 
					
						
						|  | document_storage_chroma(splits) | 
					
						
						|  | db = document_retrieval_chroma2() | 
					
						
						|  |  | 
					
						
						|  | neu_text_mit_chunks = rag_chain2(prompt, db, k) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, chatbot) | 
					
						
						|  |  | 
					
						
						|  | headers, payload = process_image(file, prompt_neu) | 
					
						
						|  | response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) | 
					
						
						|  | print("response....................") | 
					
						
						|  | print(response) | 
					
						
						|  |  | 
					
						
						|  | data = response.json() | 
					
						
						|  |  | 
					
						
						|  | result = data['choices'][0]['message']['content'] | 
					
						
						|  | return result | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_text (prompt, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,): | 
					
						
						|  | global splittet | 
					
						
						|  |  | 
					
						
						|  | if (openai_api_key == "" or openai_api_key == "sk-"): | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | openai_api_key= OAI_API_KEY | 
					
						
						|  | if (rag_option is None): | 
					
						
						|  | raise gr.Error("Retrieval Augmented Generation ist erforderlich.") | 
					
						
						|  | if (prompt == ""): | 
					
						
						|  | raise gr.Error("Prompt ist erforderlich.") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | try: | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if (model_option == "OpenAI"): | 
					
						
						|  |  | 
					
						
						|  | print("OpenAI Anfrage.......................") | 
					
						
						|  | llm = ChatOpenAI(model_name = MODEL_NAME,  openai_api_key = openai_api_key, temperature=temperature) | 
					
						
						|  |  | 
					
						
						|  | if (rag_option == "An"): | 
					
						
						|  | history_text_und_prompt = generate_prompt_with_history(prompt, history) | 
					
						
						|  | else: | 
					
						
						|  | history_text_und_prompt = generate_prompt_with_history_openai(prompt, history) | 
					
						
						|  | else: | 
					
						
						|  |  | 
					
						
						|  | print("HF Anfrage.......................") | 
					
						
						|  | llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128}) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | print("HF") | 
					
						
						|  |  | 
					
						
						|  | history_text_und_prompt = generate_prompt_with_history(prompt, history) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if (rag_option == "An"): | 
					
						
						|  | print("RAG aktiviert.......................") | 
					
						
						|  |  | 
					
						
						|  | if not splittet: | 
					
						
						|  | splits = document_loading_splitting() | 
					
						
						|  | document_storage_chroma(splits) | 
					
						
						|  | db = document_retrieval_chroma(llm, history_text_und_prompt) | 
					
						
						|  | print("LLM aufrufen mit RAG: ...........") | 
					
						
						|  | result = rag_chain(llm, history_text_und_prompt, db) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | else: | 
					
						
						|  | print("LLM aufrufen ohne RAG: ...........") | 
					
						
						|  | result = llm_chain(llm, history_text_und_prompt) | 
					
						
						|  |  | 
					
						
						|  | except Exception as e: | 
					
						
						|  | raise gr.Error(e) | 
					
						
						|  |  | 
					
						
						|  | return result | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | description2 = "<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> zum Zeichnen verwendet. Zur Zeit wird hier Stable Diffusion verwendet.\n\n" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def vote(data: gr.LikeData): | 
					
						
						|  | if data.liked: print("You upvoted this response: " + data.value) | 
					
						
						|  | else: print("You downvoted this response: " + data.value) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | print ("Start GUIneu") | 
					
						
						|  | with open("custom.css", "r", encoding="utf-8") as f: | 
					
						
						|  | customCSS = f.read() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | additional_inputs = [ | 
					
						
						|  | gr.Slider(label="Temperature", value=0.65, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten", visible=True), | 
					
						
						|  | gr.Slider(label="Max new tokens", value=1024, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens", visible=True), | 
					
						
						|  | gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit.", visible=True), | 
					
						
						|  | gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True) | 
					
						
						|  | ] | 
					
						
						|  | with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | history = gr.State([]) | 
					
						
						|  |  | 
					
						
						|  | user_question = gr.State("") | 
					
						
						|  |  | 
					
						
						|  | user_question2 = gr.State("") | 
					
						
						|  | attached_file = gr.State(None) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | gr.Markdown(description_top) | 
					
						
						|  | with gr.Tab("Chatbot"): | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | gr.HTML("LI Chatot") | 
					
						
						|  | status_display = gr.Markdown("Success", elem_id="status_display") | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | with gr.Column(scale=5): | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | chatbot = gr.Chatbot(elem_id="li-chat",show_copy_button=True) | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | with gr.Column(scale=12): | 
					
						
						|  | user_input = gr.Textbox( | 
					
						
						|  | show_label=False, placeholder="Gib hier deinen Prompt ein...", | 
					
						
						|  | container=False | 
					
						
						|  | ) | 
					
						
						|  | with gr.Column(min_width=70, scale=1): | 
					
						
						|  | submitBtn = gr.Button("Senden") | 
					
						
						|  | with gr.Column(min_width=70, scale=1): | 
					
						
						|  | cancelBtn = gr.Button("Stop") | 
					
						
						|  | with gr.Row(): | 
					
						
						|  |  | 
					
						
						|  | image_display = gr.Image( visible=False) | 
					
						
						|  | upload = gr.UploadButton("📁", file_types=["image", "video", "audio"], scale = 10) | 
					
						
						|  | emptyBtn = gr.ClearButton([user_input, chatbot, history, attached_file, image_display], value="🧹 Neue Session", scale=10) | 
					
						
						|  |  | 
					
						
						|  | with gr.Column(): | 
					
						
						|  | with gr.Column(min_width=50, scale=1): | 
					
						
						|  | with gr.Tab(label="Parameter Einstellung"): | 
					
						
						|  |  | 
					
						
						|  | rag_option = gr.Radio(["Aus", "An"], label="LI Erweiterungen (RAG)", value = "Aus") | 
					
						
						|  | model_option = gr.Radio(["OpenAI", "HuggingFace"], label="Modellauswahl", value = "OpenAI") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | top_p = gr.Slider( | 
					
						
						|  | minimum=-0, | 
					
						
						|  | maximum=1.0, | 
					
						
						|  | value=0.95, | 
					
						
						|  | step=0.05, | 
					
						
						|  | interactive=True, | 
					
						
						|  | label="Top-p", | 
					
						
						|  | visible=False, | 
					
						
						|  | ) | 
					
						
						|  | temperature = gr.Slider( | 
					
						
						|  | minimum=0.1, | 
					
						
						|  | maximum=2.0, | 
					
						
						|  | value=0.5, | 
					
						
						|  | step=0.1, | 
					
						
						|  | interactive=True, | 
					
						
						|  | label="Temperature", | 
					
						
						|  | ) | 
					
						
						|  | max_length_tokens = gr.Slider( | 
					
						
						|  | minimum=0, | 
					
						
						|  | maximum=512, | 
					
						
						|  | value=512, | 
					
						
						|  | step=8, | 
					
						
						|  | interactive=True, | 
					
						
						|  | label="Max Generation Tokens", | 
					
						
						|  | visible=False, | 
					
						
						|  | ) | 
					
						
						|  | max_context_length_tokens = gr.Slider( | 
					
						
						|  | minimum=0, | 
					
						
						|  | maximum=4096, | 
					
						
						|  | value=2048, | 
					
						
						|  | step=128, | 
					
						
						|  | interactive=True, | 
					
						
						|  | label="Max History Tokens", | 
					
						
						|  | visible=False, | 
					
						
						|  | ) | 
					
						
						|  | repetition_penalty=gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=False) | 
					
						
						|  | anzahl_docs = gr.Slider(label="Anzahl Dokumente", value=3, minimum=1, maximum=10, step=1, interactive=True, info="wie viele Dokumententeile aus dem Vektorstore an den prompt gehängt werden", visible=False) | 
					
						
						|  | openai_key = gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1, visible = False) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | with gr.Tab("KI zum Zeichnen"): | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | gr.HTML("LI Zeichnen mit KI") | 
					
						
						|  | status_display2 = gr.Markdown("Success", elem_id="status_display") | 
					
						
						|  | gr.Markdown(description2) | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | with gr.Column(scale=5): | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | chatbot_bild = gr.Chatbot(elem_id="li-zeichnen") | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | with gr.Column(scale=12): | 
					
						
						|  | user_input2 = gr.Textbox( | 
					
						
						|  | show_label=False, placeholder="Gib hier deinen Prompt ein...", | 
					
						
						|  | container=False | 
					
						
						|  | ) | 
					
						
						|  | with gr.Column(min_width=70, scale=1): | 
					
						
						|  | submitBtn2 = gr.Button("Senden") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | with gr.Row(): | 
					
						
						|  | emptyBtn2 = gr.ClearButton([user_input, chatbot_bild], value="🧹 Neue Session", scale=10) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | gr.Markdown(description) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | predict_args = dict( | 
					
						
						|  | fn=generate_auswahl, | 
					
						
						|  | inputs=[ | 
					
						
						|  | user_question, | 
					
						
						|  | attached_file, | 
					
						
						|  | chatbot, | 
					
						
						|  | history, | 
					
						
						|  | rag_option, | 
					
						
						|  | model_option, | 
					
						
						|  | openai_key, | 
					
						
						|  | anzahl_docs, | 
					
						
						|  | top_p, | 
					
						
						|  | temperature, | 
					
						
						|  | max_length_tokens, | 
					
						
						|  | max_context_length_tokens, | 
					
						
						|  | repetition_penalty | 
					
						
						|  | ], | 
					
						
						|  | outputs=[chatbot, history, status_display], | 
					
						
						|  | show_progress=True, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | reset_args = dict( | 
					
						
						|  | fn=reset_textbox, inputs=[], outputs=[user_input, status_display] | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | transfer_input_args = dict( | 
					
						
						|  | fn=add_text, inputs=[chatbot, history, user_input, attached_file], outputs=[chatbot, history, user_question, user_input], show_progress=True | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args) | 
					
						
						|  | predict_event2 = submitBtn.click(**transfer_input_args, queue=False,).then(**predict_args) | 
					
						
						|  | predict_event3 = upload.upload(file_anzeigen, [upload], [image_display, image_display, attached_file] ) | 
					
						
						|  | emptyBtn.click(clear_all, [], [attached_file, image_display]) | 
					
						
						|  | image_display.select(file_loeschen, [], [attached_file, image_display]) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | cancelBtn.click(cancel_outputing, [], [status_display], cancels=[predict_event1,predict_event2, predict_event3]) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | predict_args2 = dict( | 
					
						
						|  | fn=generate_bild, | 
					
						
						|  | inputs=[ | 
					
						
						|  | user_question2, | 
					
						
						|  | chatbot_bild, | 
					
						
						|  |  | 
					
						
						|  | ], | 
					
						
						|  | outputs=[chatbot_bild, status_display2], | 
					
						
						|  | show_progress=True, | 
					
						
						|  | ) | 
					
						
						|  | transfer_input_args2 = dict( | 
					
						
						|  | fn=add_text2, inputs=[chatbot_bild, user_input2], outputs=[chatbot_bild, user_question2, user_input2], show_progress=True | 
					
						
						|  | ) | 
					
						
						|  | predict_event2_1 = user_input2.submit(**transfer_input_args2, queue=False,).then(**predict_args2) | 
					
						
						|  | predict_event2_2 = submitBtn2.click(**transfer_input_args2, queue=False,).then(**predict_args2) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | demo.title = "LI-ChatBot" | 
					
						
						|  | demo.queue().launch(debug=True) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  |