Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import tempfile
|
|
5 |
import shutil
|
6 |
import sys
|
7 |
import google.generativeai as genai
|
|
|
8 |
|
9 |
app = Flask(__name__)
|
10 |
|
@@ -16,7 +17,7 @@ current_dir = temp_dir
|
|
16 |
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
17 |
|
18 |
generation_config = {
|
19 |
-
"temperature": 0.
|
20 |
"top_p": 1,
|
21 |
"top_k": 40,
|
22 |
"max_output_tokens": 1024,
|
@@ -27,36 +28,39 @@ model = genai.GenerativeModel(
|
|
27 |
generation_config=generation_config,
|
28 |
)
|
29 |
|
|
|
30 |
system_instruction = """
|
31 |
-
You are
|
32 |
|
33 |
-
1.
|
34 |
-
-
|
35 |
-
-
|
36 |
-
-
|
37 |
-
-
|
38 |
-
- File creation and editing: For creating new files and adding content
|
39 |
|
40 |
-
2.
|
|
|
|
|
41 |
|
42 |
-
3. For
|
|
|
|
|
|
|
43 |
|
44 |
-
4.
|
|
|
|
|
|
|
|
|
45 |
|
46 |
-
5.
|
|
|
|
|
47 |
|
48 |
-
|
|
|
49 |
|
50 |
-
|
51 |
-
- If asked to create a new file, respond with: "CREATE_FILE:filename.py"
|
52 |
-
- If asked to edit a file with specific content, respond with: "EDIT_FILE:filename.py:file_content"
|
53 |
-
Replace 'filename.py' with the actual filename and 'file_content' with the requested content.
|
54 |
-
|
55 |
-
8. If a request requires multiple commands, provide them as a list of commands, each on a new line, prefixed with 'CMD:'.
|
56 |
-
|
57 |
-
9. If a request is unclear or doesn't match any known command type, respond with "Unclear request. Please provide more details."
|
58 |
-
|
59 |
-
Always respond with ONLY the command(s) to be executed or action(s) to be taken, nothing else.
|
60 |
"""
|
61 |
|
62 |
chat = model.start_chat(history=[])
|
@@ -74,10 +78,12 @@ def execute_command(command, cwd=None):
|
|
74 |
stdout, stderr = process.communicate()
|
75 |
return stdout + stderr
|
76 |
|
77 |
-
def
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
81 |
|
82 |
@app.route("/")
|
83 |
def index():
|
@@ -92,111 +98,42 @@ def execute_code():
|
|
92 |
|
93 |
try:
|
94 |
if command.lower().startswith("ai:"):
|
95 |
-
# Process command with Gemini AI
|
96 |
ai_command = command[3:].strip()
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
else:
|
129 |
-
results.append(f"Error: Directory not found: {new_dir}")
|
130 |
-
else:
|
131 |
-
results.append(execute_command(cmd))
|
132 |
-
|
133 |
-
return jsonify({"result": f"AI Executed:\n{ai_result}\n\nOutput:\n" + "\n".join(results)})
|
134 |
-
elif command == "show files":
|
135 |
-
files = os.listdir(current_dir)
|
136 |
-
return jsonify({"result": "Files in current directory:\n" + "\n".join(files)})
|
137 |
-
elif command == "hide files":
|
138 |
-
return jsonify({"result": "Files hidden."})
|
139 |
-
elif command.startswith("new file "):
|
140 |
-
filename = command[9:].strip()
|
141 |
-
filepath = os.path.join(current_dir, filename)
|
142 |
-
with open(filepath, 'w') as f:
|
143 |
-
pass # Create an empty file
|
144 |
-
return jsonify({"result": f"Created new file: {filename}"})
|
145 |
-
elif command.startswith("edit "):
|
146 |
-
filename = command[5:].strip()
|
147 |
-
filepath = os.path.join(current_dir, filename)
|
148 |
-
if os.path.exists(filepath):
|
149 |
-
return jsonify({"result": "Enter code:", "action": "edit", "filename": filename})
|
150 |
-
else:
|
151 |
-
return jsonify({"result": f"Error: File {filename} not found."})
|
152 |
-
elif command.startswith("cd "):
|
153 |
-
new_dir = os.path.join(current_dir, command[3:])
|
154 |
-
if os.path.isdir(new_dir):
|
155 |
-
current_dir = os.path.abspath(new_dir)
|
156 |
-
return jsonify({"result": f"Changed directory to: {current_dir}"})
|
157 |
-
else:
|
158 |
-
return jsonify({"result": f"Error: Directory not found: {new_dir}"})
|
159 |
-
elif command.startswith("!"):
|
160 |
-
result = execute_command(command[1:])
|
161 |
-
elif command.startswith("pip install"):
|
162 |
-
result = execute_command(f"{sys.executable} -m {command}")
|
163 |
-
elif command.startswith("git "):
|
164 |
-
result = execute_command(command)
|
165 |
-
else:
|
166 |
-
if command.endswith(".py"):
|
167 |
-
result = execute_command(f"{sys.executable} {command}")
|
168 |
-
else:
|
169 |
-
result = execute_command(f"{sys.executable} -c \"{command}\"")
|
170 |
-
return jsonify({"result": result})
|
171 |
except Exception as e:
|
172 |
-
return jsonify({"result": f"Error: {str(e)}"})
|
173 |
-
|
174 |
-
|
175 |
-
def save_file():
|
176 |
-
filename = request.json.get("filename")
|
177 |
-
content = request.json.get("content")
|
178 |
-
filepath = os.path.join(current_dir, filename)
|
179 |
-
with open(filepath, 'w') as f:
|
180 |
-
f.write(content)
|
181 |
-
return jsonify({"result": f"File {filename} saved successfully."})
|
182 |
-
|
183 |
-
@app.route("/cleanup", methods=["POST"])
|
184 |
-
def cleanup():
|
185 |
-
global temp_dir, current_dir
|
186 |
-
if os.path.exists(temp_dir):
|
187 |
-
shutil.rmtree(temp_dir)
|
188 |
-
temp_dir = tempfile.mkdtemp()
|
189 |
-
current_dir = temp_dir
|
190 |
-
return jsonify({"result": "Temporary files cleaned up."})
|
191 |
-
|
192 |
-
@app.route("/list_files", methods=["GET"])
|
193 |
-
def list_files():
|
194 |
-
files = os.listdir(current_dir)
|
195 |
-
return jsonify({"files": files})
|
196 |
-
|
197 |
-
@app.route("/download/<path:filename>", methods=["GET"])
|
198 |
-
def download_file(filename):
|
199 |
-
return send_from_directory(current_dir, filename, as_attachment=True)
|
200 |
|
201 |
if __name__ == "__main__":
|
202 |
app.run(host="0.0.0.0", port=7860)
|
|
|
5 |
import shutil
|
6 |
import sys
|
7 |
import google.generativeai as genai
|
8 |
+
import re
|
9 |
|
10 |
app = Flask(__name__)
|
11 |
|
|
|
17 |
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
18 |
|
19 |
generation_config = {
|
20 |
+
"temperature": 0.7,
|
21 |
"top_p": 1,
|
22 |
"top_k": 40,
|
23 |
"max_output_tokens": 1024,
|
|
|
28 |
generation_config=generation_config,
|
29 |
)
|
30 |
|
31 |
+
# Magic Prompt for clean code generation
|
32 |
system_instruction = """
|
33 |
+
You are a code generation assistant. Follow these strict rules:
|
34 |
|
35 |
+
1. When asked to create or modify files:
|
36 |
+
- Respond ONLY with the complete code in a single code block
|
37 |
+
- Use this format: ```python\n[code]\n```
|
38 |
+
- Never include explanations or comments
|
39 |
+
- Ensure the code is executable as-is
|
|
|
40 |
|
41 |
+
2. For terminal commands:
|
42 |
+
- Respond ONLY with the command in a code block
|
43 |
+
- Use this format: ```bash\n[command]\n```
|
44 |
|
45 |
+
3. For file operations:
|
46 |
+
- Create complete, self-contained scripts
|
47 |
+
- Include all necessary imports
|
48 |
+
- Handle errors appropriately
|
49 |
|
50 |
+
4. Never include:
|
51 |
+
- Explanations
|
52 |
+
- Comments
|
53 |
+
- Markdown formatting outside code blocks
|
54 |
+
- Any text outside code blocks
|
55 |
|
56 |
+
5. Example responses:
|
57 |
+
User: Create a Python file that prints "Hello World"
|
58 |
+
Response: ```python\nprint("Hello World")\n```
|
59 |
|
60 |
+
User: Make a script that calculates factorial
|
61 |
+
Response: ```python\ndef factorial(n):\n if n == 0:\n return 1\n return n * factorial(n-1)\n\nprint(factorial(5))\n```
|
62 |
|
63 |
+
6. If the request is unclear, respond with: ```error\nUnclear request\n```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
"""
|
65 |
|
66 |
chat = model.start_chat(history=[])
|
|
|
78 |
stdout, stderr = process.communicate()
|
79 |
return stdout + stderr
|
80 |
|
81 |
+
def extract_code(response):
|
82 |
+
"""Extracts clean code from Gemini response"""
|
83 |
+
code_blocks = re.findall(r'```(?:python|bash)\n(.*?)\n```', response, re.DOTALL)
|
84 |
+
if code_blocks:
|
85 |
+
return code_blocks[0].strip()
|
86 |
+
return None
|
87 |
|
88 |
@app.route("/")
|
89 |
def index():
|
|
|
98 |
|
99 |
try:
|
100 |
if command.lower().startswith("ai:"):
|
|
|
101 |
ai_command = command[3:].strip()
|
102 |
+
response = chat.send_message(f"{system_instruction}\n\nUser request: {ai_command}")
|
103 |
+
code = extract_code(response.text)
|
104 |
+
|
105 |
+
if not code:
|
106 |
+
return jsonify({"result": "Error: No valid code generated"})
|
107 |
+
|
108 |
+
# Handle Python code
|
109 |
+
if "print(" in code or "def " in code:
|
110 |
+
filename = "generated_script.py"
|
111 |
+
filepath = os.path.join(current_dir, filename)
|
112 |
+
with open(filepath, 'w') as f:
|
113 |
+
f.write(code)
|
114 |
+
return jsonify({
|
115 |
+
"result": f"File created: {filename}",
|
116 |
+
"type": "code",
|
117 |
+
"file": filename,
|
118 |
+
"content": code
|
119 |
+
})
|
120 |
+
|
121 |
+
# Handle bash commands
|
122 |
+
elif code.startswith(("pip", "git", "cd", "mkdir")):
|
123 |
+
result = execute_command(code)
|
124 |
+
return jsonify({
|
125 |
+
"result": f"Command executed:\n{result}",
|
126 |
+
"type": "command"
|
127 |
+
})
|
128 |
+
|
129 |
+
return jsonify({"result": "Unsupported code type", "type": "error"})
|
130 |
+
|
131 |
+
# ... rest of your existing command handling ...
|
132 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
except Exception as e:
|
134 |
+
return jsonify({"result": f"Error: {str(e)}", "type": "error"})
|
135 |
+
|
136 |
+
# ... rest of your existing routes ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
|
138 |
if __name__ == "__main__":
|
139 |
app.run(host="0.0.0.0", port=7860)
|