Spaces:
Runtime error
Runtime error
nananie143
commited on
Commit
·
cb13344
1
Parent(s):
e099112
Fixed async handling and imports, improved progress phases
Browse files
app.py
CHANGED
|
@@ -1,11 +1,5 @@
|
|
| 1 |
-
import
|
| 2 |
-
import torch
|
| 3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 4 |
-
from langchain_community.llms import HuggingFacePipeline
|
| 5 |
-
from langchain.agents import initialize_agent, Tool
|
| 6 |
-
from langchain.prompts import PromptTemplate
|
| 7 |
import json
|
| 8 |
-
import subprocess
|
| 9 |
import logging
|
| 10 |
import asyncio
|
| 11 |
import time
|
|
@@ -15,13 +9,17 @@ from typing import Dict, List, Optional, Set, Union, Any
|
|
| 15 |
from dataclasses import dataclass, field
|
| 16 |
from pathlib import Path
|
| 17 |
import hashlib
|
| 18 |
-
import os
|
| 19 |
-
import json
|
| 20 |
-
import networkx as nx
|
| 21 |
-
from dataclasses import asdict, field
|
| 22 |
-
import shutil
|
| 23 |
import tempfile
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
# Configure logging
|
| 27 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
@@ -1062,7 +1060,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as ui:
|
|
| 1062 |
show_copy_button=True
|
| 1063 |
)
|
| 1064 |
|
| 1065 |
-
def stream_output(requirements, progress=gr.Progress()):
|
| 1066 |
"""Stream the output during app generation."""
|
| 1067 |
try:
|
| 1068 |
# Initialize
|
|
@@ -1070,32 +1068,22 @@ with gr.Blocks(theme=gr.themes.Soft()) as ui:
|
|
| 1070 |
yield "Starting...", None, " Starting app generation...", "Initializing"
|
| 1071 |
|
| 1072 |
# Update progress
|
| 1073 |
-
|
| 1074 |
-
|
| 1075 |
-
|
| 1076 |
-
|
| 1077 |
-
|
| 1078 |
-
|
| 1079 |
-
|
| 1080 |
-
|
| 1081 |
-
|
| 1082 |
-
|
| 1083 |
-
|
| 1084 |
-
|
| 1085 |
-
yield None, None, stream_handler.output[-1], "Creating"
|
| 1086 |
-
elif i == 3:
|
| 1087 |
-
msg = " Implementing features..."
|
| 1088 |
-
stream_handler.update(msg, "Implementing")
|
| 1089 |
-
yield None, None, stream_handler.output[-1], "Implementing"
|
| 1090 |
-
elif i == 4:
|
| 1091 |
-
msg = " Finalizing..."
|
| 1092 |
-
stream_handler.update(msg, "Finalizing")
|
| 1093 |
-
yield None, None, stream_handler.output[-1], "Finalizing"
|
| 1094 |
-
time.sleep(1) # Simulate work
|
| 1095 |
|
| 1096 |
# Generate the app
|
| 1097 |
stream_handler.update(" Running AI Flow system...", "Processing")
|
| 1098 |
-
result =
|
| 1099 |
|
| 1100 |
# Extract download path and logs
|
| 1101 |
download_path = None
|
|
@@ -1109,6 +1097,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as ui:
|
|
| 1109 |
logs.append(line)
|
| 1110 |
stream_handler.update(line.split(" - ")[-1], "Processing")
|
| 1111 |
yield None, None, "\n".join(stream_handler.output), "Processing"
|
|
|
|
| 1112 |
|
| 1113 |
if download_path and Path(download_path).exists():
|
| 1114 |
stream_handler.update(" App generated successfully! Download is ready.", "Complete")
|
|
@@ -1123,7 +1112,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as ui:
|
|
| 1123 |
logger.error(error_msg)
|
| 1124 |
stream_handler.update(error_msg, "Error")
|
| 1125 |
yield error_msg, None, "\n".join(stream_handler.output), "Error"
|
| 1126 |
-
|
| 1127 |
def cancel_generation():
|
| 1128 |
"""Cancel the current generation process."""
|
| 1129 |
stream_handler.update(" Generation cancelled by user", "Cancelled")
|
|
|
|
| 1 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import json
|
|
|
|
| 3 |
import logging
|
| 4 |
import asyncio
|
| 5 |
import time
|
|
|
|
| 9 |
from dataclasses import dataclass, field
|
| 10 |
from pathlib import Path
|
| 11 |
import hashlib
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
import tempfile
|
| 13 |
+
import shutil
|
| 14 |
+
import gradio as gr
|
| 15 |
+
import networkx as nx
|
| 16 |
+
from langchain.prompts import PromptTemplate
|
| 17 |
+
import torch
|
| 18 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 19 |
+
from langchain_community.llms import HuggingFacePipeline
|
| 20 |
+
from langchain.agents import initialize_agent, Tool
|
| 21 |
+
import subprocess
|
| 22 |
+
import asyncio
|
| 23 |
|
| 24 |
# Configure logging
|
| 25 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 1060 |
show_copy_button=True
|
| 1061 |
)
|
| 1062 |
|
| 1063 |
+
async def stream_output(requirements, progress=gr.Progress()):
|
| 1064 |
"""Stream the output during app generation."""
|
| 1065 |
try:
|
| 1066 |
# Initialize
|
|
|
|
| 1068 |
yield "Starting...", None, " Starting app generation...", "Initializing"
|
| 1069 |
|
| 1070 |
# Update progress
|
| 1071 |
+
phases = [
|
| 1072 |
+
(" Analyzing requirements...", "Analyzing"),
|
| 1073 |
+
(" Generating architecture...", "Designing"),
|
| 1074 |
+
(" Creating project structure...", "Creating"),
|
| 1075 |
+
(" Implementing features...", "Implementing"),
|
| 1076 |
+
(" Finalizing...", "Finalizing")
|
| 1077 |
+
]
|
| 1078 |
+
|
| 1079 |
+
for msg, status in progress.tqdm(phases):
|
| 1080 |
+
stream_handler.update(msg, status)
|
| 1081 |
+
yield None, None, stream_handler.output[-1], status
|
| 1082 |
+
await asyncio.sleep(1) # Non-blocking sleep
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1083 |
|
| 1084 |
# Generate the app
|
| 1085 |
stream_handler.update(" Running AI Flow system...", "Processing")
|
| 1086 |
+
result = await app_generator(requirements)
|
| 1087 |
|
| 1088 |
# Extract download path and logs
|
| 1089 |
download_path = None
|
|
|
|
| 1097 |
logs.append(line)
|
| 1098 |
stream_handler.update(line.split(" - ")[-1], "Processing")
|
| 1099 |
yield None, None, "\n".join(stream_handler.output), "Processing"
|
| 1100 |
+
await asyncio.sleep(0.1) # Small delay to prevent UI freezing
|
| 1101 |
|
| 1102 |
if download_path and Path(download_path).exists():
|
| 1103 |
stream_handler.update(" App generated successfully! Download is ready.", "Complete")
|
|
|
|
| 1112 |
logger.error(error_msg)
|
| 1113 |
stream_handler.update(error_msg, "Error")
|
| 1114 |
yield error_msg, None, "\n".join(stream_handler.output), "Error"
|
| 1115 |
+
|
| 1116 |
def cancel_generation():
|
| 1117 |
"""Cancel the current generation process."""
|
| 1118 |
stream_handler.update(" Generation cancelled by user", "Cancelled")
|