File size: 2,794 Bytes
191f52c 46bf7bb 191f52c 5a7dfae 34ac6dd a32fadb 8b2c681 6cf9cd1 a32fadb 5a7dfae 34ac6dd e2d464d 8b2c681 a32fadb e2d464d 8b2c681 e2d464d a32fadb e2d464d a32fadb e468ed7 53b0d18 6cf9cd1 bb926e9 5a7dfae bb926e9 61aec1f 5a7dfae bb926e9 95e882e a32fadb 41810a3 89e8116 f964ec6 5040744 89e8116 191f52c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
import streamlit as st
import os
import google.generativeai as genai
# Streamlit Seite konfigurieren
st.set_page_config(
page_title="Gemini Chatbot mit Google Search",
page_icon="🤖"
)
genai.configure(api_key=os.environ["geminiapi"])
# Modell-Konfiguration
generation_config = {
"temperature": 0.4,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
grounding_config = genai.types.GroundingConfig(
source_type=genai.types.GroundingSource.GOOGLE_SEARCH
)
model = genai.GenerativeModel(
model_name="gemini-2.0-flash-exp",
generation_config=generation_config,
grounding_config=grounding_config
)
# Chat Session State initialisieren
if "chat_session" not in st.session_state:
st.session_state.chat_session = model.start_chat(history=[])
# UI Komponenten
st.title("🤖 Gemini Chatbot mit Google Search")
user_input = st.text_input("Stelle deine Frage:", key="user_input")
if user_input:
# Prompt mit Sprachaufforderung kombinieren
full_prompt = f"{user_input}\nAntworte immer auf Deutsch"
# Antwort generieren
response = st.session_state.chat_session.send_message(full_prompt)
# Antwort extrahieren
if response.candidates:
response_text = response.candidates[0].content.parts[0].text
else:
response_text = "Keine Antwort erhalten"
# Antwort anzeigen
st.subheader("Antwort:")
st.write(response_text)
# Quellen anzeigen falls vorhanden
if response.grounding_metadata:
st.subheader("Quellen:")
for source in response.grounding_metadata.sources:
st.markdown(f"- [{source.url}]({source.url})")
"""
# Flask API
from flask import Flask, jsonify
import threading
from flask_cors import CORS, cross_origin
# Streamlit Frontend
import streamlit as st
import requests
st.title("huhu")
#st.text(data)
data="ok"
app = Flask(__name__)
cors = CORS(app)
@app.route('/endpoint', methods=['GET'])
def my_endpoint():
#get data from GET Request
data = request.args # Die Daten, die vom Client gesendet wurden
#post request
#data = request.form.get('variable')
#print(prompt)
#st.text(data)
return "Daten erfolgreich empfangen!"
#result = selenium(prompt)
# Das Ergebnis an PHP zurückgeben
#import streamlit as st
#st.write(prompt)
#prompting(prompt)
#return prompt
def run_flask():
app.run(port=5000)
# Starte den Flask-Server in einem separaten Thread
flask_thread = threading.Thread(target=run_flask)
flask_thread.start()
#response = requests.get('https://huggingface.co/spaces/mgokg/PandasAI:5000/api?data=huhu')
#data = response.json()
#st.write(response) # Zeigt die Daten in der Streamlit-Oberfläche an
""" |