Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -90,10 +90,10 @@ YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE" | |
| 90 | 
             
            ################################################
         | 
| 91 | 
             
            #LLM Model mit dem gearbeitet wird
         | 
| 92 | 
             
            #openai-------------------------------------
         | 
| 93 | 
            -
             | 
| 94 | 
             
            #MODEL_NAME = "gpt-3.5-turbo-1106"
         | 
| 95 | 
             
            #MODEL_NAME= "gpt-4-1106-preview"
         | 
| 96 | 
            -
             | 
| 97 |  | 
| 98 |  | 
| 99 | 
             
            #verfügbare Modelle anzeigen lassen
         | 
| @@ -173,22 +173,33 @@ def process_image(image_path, prompt): | |
| 173 |  | 
| 174 |  | 
| 175 | 
             
                # Prepare the data for the API request (specific to the API you're using)
         | 
| 176 | 
            -
                 | 
| 177 | 
            -
             | 
| 178 | 
            -
             | 
| 179 | 
            -
             | 
| 180 | 
            -
             | 
| 181 | 
            -
             | 
| 182 | 
            -
             | 
| 183 | 
            -
             | 
| 184 | 
            -
             | 
| 185 | 
            -
             | 
| 186 | 
            -
             | 
| 187 | 
            -
             | 
| 188 | 
            -
             | 
| 189 | 
            -
             | 
| 190 | 
            -
             | 
| 191 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 192 |  | 
| 193 |  | 
| 194 | 
             
            def transfer_input(inputs):
         | 
| @@ -412,7 +423,8 @@ def create_picture(history,  prompt): | |
| 412 |  | 
| 413 |  | 
| 414 | 
             
            def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
         | 
| 415 | 
            -
             | 
|  | |
| 416 | 
             
                    response = generate_bild(prompt)
         | 
| 417 | 
             
                    result = response.content 
         | 
| 418 | 
             
                    #Bild ausgeben
         | 
| @@ -420,17 +432,24 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o | |
| 420 | 
             
                    image_64 = umwandeln_fuer_anzeige(image)
         | 
| 421 | 
             
                    chatbot[-1][1] =  "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8')) 
         | 
| 422 | 
             
                    history = history + [(prompt, result)]  
         | 
| 423 | 
            -
             | 
| 424 | 
             
                    return chatbot, history, "Success"
         | 
| 425 | 
             
                 else:
         | 
| 426 | 
            -
                     | 
| 427 | 
            -
                    #Antwort als Stream ausgeben... wenn Textantwort gefordert
         | 
| 428 | 
            -
                    chatbot[-1][1] = result
         | 
| 429 | 
             
                    if (file == None):
         | 
|  | |
| 430 | 
             
                        history = history + [(prompt, result)]
         | 
|  | |
| 431 | 
             
                    else:
         | 
|  | |
|  | |
| 432 | 
             
                        history = history + [((file,), None),(prompt, result)]
         | 
| 433 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 434 | 
             
                    return chatbot, history, "Success" 
         | 
| 435 | 
             
                    """ 
         | 
| 436 | 
             
                    for character in result:
         | 
|  | |
| 90 | 
             
            ################################################
         | 
| 91 | 
             
            #LLM Model mit dem gearbeitet wird
         | 
| 92 | 
             
            #openai-------------------------------------
         | 
| 93 | 
            +
            MODEL_NAME  = "gpt-3.5-turbo-16k"
         | 
| 94 | 
             
            #MODEL_NAME = "gpt-3.5-turbo-1106"
         | 
| 95 | 
             
            #MODEL_NAME= "gpt-4-1106-preview"
         | 
| 96 | 
            +
            MODEL_NAME_IMAGE = "gpt-4-vision-preview"
         | 
| 97 |  | 
| 98 |  | 
| 99 | 
             
            #verfügbare Modelle anzeigen lassen
         | 
|  | |
| 173 |  | 
| 174 |  | 
| 175 | 
             
                # Prepare the data for the API request (specific to the API you're using)
         | 
| 176 | 
            +
                headers = {
         | 
| 177 | 
            +
                          "Content-Type": "application/json",
         | 
| 178 | 
            +
                          "Authorization": f"Bearer {OAI_API_KEY}"
         | 
| 179 | 
            +
                        }
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                payload = {
         | 
| 182 | 
            +
                  "model": MODEL_NAME_IMAGE,
         | 
| 183 | 
            +
                  "messages": [
         | 
| 184 | 
            +
                    {
         | 
| 185 | 
            +
                      "role": "user",
         | 
| 186 | 
            +
                      "content": [
         | 
| 187 | 
            +
                        {
         | 
| 188 | 
            +
                          "type": "text",
         | 
| 189 | 
            +
                          "text": prompt
         | 
| 190 | 
            +
                        },
         | 
| 191 | 
            +
                        {
         | 
| 192 | 
            +
                          "type": "image_url",
         | 
| 193 | 
            +
                          "image_url": {
         | 
| 194 | 
            +
                            "url": f"data:image/jpeg;base64,{encoded_string}"
         | 
| 195 | 
            +
                          }
         | 
| 196 | 
            +
                        }
         | 
| 197 | 
            +
                      ]
         | 
| 198 | 
            +
                    }
         | 
| 199 | 
            +
                  ],
         | 
| 200 | 
            +
                  "max_tokens": 300
         | 
| 201 | 
            +
                }   
         | 
| 202 | 
            +
                return headers, payload
         | 
| 203 |  | 
| 204 |  | 
| 205 | 
             
            def transfer_input(inputs):
         | 
|  | |
| 423 |  | 
| 424 |  | 
| 425 | 
             
            def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
         | 
| 426 | 
            +
                #Bild nach Anweisung zeichnen und in History darstellen...
         | 
| 427 | 
            +
                if (prompt.find('zeichnen') != -1):
         | 
| 428 | 
             
                    response = generate_bild(prompt)
         | 
| 429 | 
             
                    result = response.content 
         | 
| 430 | 
             
                    #Bild ausgeben
         | 
|  | |
| 432 | 
             
                    image_64 = umwandeln_fuer_anzeige(image)
         | 
| 433 | 
             
                    chatbot[-1][1] =  "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8')) 
         | 
| 434 | 
             
                    history = history + [(prompt, result)]  
         | 
|  | |
| 435 | 
             
                    return chatbot, history, "Success"
         | 
| 436 | 
             
                 else:
         | 
| 437 | 
            +
                    #kein Bild hochgeladen -> auf Text antworten...
         | 
|  | |
|  | |
| 438 | 
             
                    if (file == None):
         | 
| 439 | 
            +
                        result = generate_text(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
         | 
| 440 | 
             
                        history = history + [(prompt, result)]
         | 
| 441 | 
            +
                        #Antwort als Stream ausgeben... wenn Textantwort gefordert
         | 
| 442 | 
             
                    else:
         | 
| 443 | 
            +
                        #Es wurde ein Bild angehängt -> wenn prompt dazu, das Bild analysieren
         | 
| 444 | 
            +
                        #geht nur über spezielle OpenAI-Schnittstelle...
         | 
| 445 | 
             
                        history = history + [((file,), None),(prompt, result)]
         | 
| 446 | 
            +
                        headers, payload = process_image
         | 
| 447 | 
            +
                        response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
         | 
| 448 | 
            +
                        result = response.json()
         | 
| 449 | 
            +
                        history = history + [((file,), None),(prompt, result)]
         | 
| 450 | 
            +
                        
         | 
| 451 | 
            +
                    chatbot[-1][1] = result
         | 
| 452 | 
            +
                     
         | 
| 453 | 
             
                    return chatbot, history, "Success" 
         | 
| 454 | 
             
                    """ 
         | 
| 455 | 
             
                    for character in result:
         |