louiecerv commited on
Commit
0734928
·
1 Parent(s): 00db82e

sync with main

Browse files
Files changed (5) hide show
  1. .env +1 -0
  2. Dockerfile +20 -0
  3. app.py +40 -0
  4. requirements.txt +4 -0
  5. templates/index.html +27 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GEMINI_API_KEY=AIzaSyAkMiAcpA3fbid9yvCGdenACYkAvWx3P6w
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ # Debugging: Check gunicorn installation and PATH
13
+ RUN echo "PATH: $PATH"
14
+ RUN which gunicorn
15
+ RUN pip list | grep gunicorn
16
+
17
+ EXPOSE 7860
18
+
19
+ COPY --chown=user . /app
20
+ CMD ["gunicorn", "-b", "0.0.0.0:7860", "app:app"]
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, jsonify
2
+ import os
3
+ from google.generativeai import generative_models as gen_models
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+ app = Flask(__name__)
8
+
9
+ # Configure Gemini API key
10
+ os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
11
+
12
+ model = gen_models.GenerativeModel("gemini-pro") # Or "gemini-1.5-flash" if available
13
+
14
+ chat_history = []
15
+
16
+ @app.route("/", methods=["GET", "POST"])
17
+ def index():
18
+ response_text = None
19
+ if request.method == "POST":
20
+ prompt = request.form.get("prompt")
21
+ if prompt:
22
+ try:
23
+ chat_history.append({"role": "user", "parts": prompt})
24
+ response = model.generate_text(
25
+ model=model, # Important: Pass the model object
26
+ prompt=prompt,
27
+ temperature=0.7, # Adjust as needed
28
+ max_output_tokens=800, # Adjust as needed
29
+ history=chat_history # Pass the chat history
30
+ )
31
+ response_text = response.result
32
+ chat_history.append({"role": "model", "parts": response_text})
33
+ except Exception as e:
34
+ response_text = f"Error: {e}"
35
+
36
+ return render_template("index.html", response_text=response_text, chat_history=chat_history)
37
+
38
+ if __name__ == "__main__":
39
+ app.run(debug=True)
40
+ app.run(debug=False, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Flask
2
+ google-generativeai
3
+ python-dotenv
4
+ gunicorn # For Hugging Face deployment
templates/index.html ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Gemini Chat App</title>
5
+ </head>
6
+ <body>
7
+ <h1>Gemini Chat</h1>
8
+
9
+ <div id="chat-area">
10
+ {% for message in chat_history %}
11
+ <div class="message {{ message.role }}">
12
+ <strong>{{ message.role.capitalize() }}:</strong> {{ message.parts }}
13
+ </div>
14
+ {% endfor %}
15
+ </div>
16
+
17
+ <form method="POST">
18
+ <label for="prompt">Your Message:</label><br>
19
+ <textarea id="prompt" name="prompt" rows="4" cols="50"></textarea><br><br>
20
+ <input type="submit" value="Send">
21
+ </form>
22
+
23
+ {% if response_text %}
24
+ {% endif %}
25
+
26
+ </body>
27
+ </html>