Spaces:
Running
Running
File size: 4,911 Bytes
feccca5 dbfa293 feccca5 dbfa293 feccca5 dbfa293 feccca5 f392e49 feccca5 dbfa293 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import spaces
import gradio as gr
import torch
from transformers import pipeline
from PIL import Image
# Global model storage
models = {}
@spaces.GPU(duration=120)
def generate_code(image, model_choice, prompt_style):
"""Generate CADQuery code from image."""
if image is None:
return "β Please upload an image first."
# Prompts
prompts = {
"Simple": "Generate CADQuery Python code for this 3D model:",
"Detailed": "Analyze this 3D CAD model and generate Python CADQuery code.\n\nRequirements:\n- Import cadquery as cq\n- Store result in 'result' variable\n- Use proper CADQuery syntax\n\nCode:",
"Chain-of-Thought": "Analyze this 3D CAD model step by step:\n\nStep 1: Identify the basic geometry\nStep 2: Note any features\nStep 3: Generate clean CADQuery Python code\n\n```python\nimport cadquery as cq\n\n# Generated code:"
}
try:
# Model mapping
model_map = {
"GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
"GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
"GLM-4.5V": "zai-org/GLM-4.5V"
}
model_name = model_map[model_choice]
# Load or get cached model
if model_name not in models:
pipe = pipeline(
"image-text-to-text",
model=model_name,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True
)
models[model_name] = pipe
else:
pipe = models[model_name]
# Generate
messages = [{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": prompts[prompt_style]}
]
}]
result = pipe(messages, max_new_tokens=512, temperature=0.7)
if isinstance(result, list) and len(result) > 0:
generated_text = result[0].get("generated_text", str(result))
else:
generated_text = str(result)
# Extract code
code = generated_text.strip()
if "```python" in code:
start = code.find("```python") + 9
end = code.find("```", start)
if end > start:
code = code[start:end].strip()
if "import cadquery" not in code:
code = "import cadquery as cq\n\n" + code
return f"""## π― Generated CADQuery Code
```python
{code}
```
## π Info
- **Model**: {model_choice}
- **Prompt**: {prompt_style}
- **Device**: GPU
## π§ Usage
```bash
pip install cadquery
python your_script.py
```
"""
except Exception as e:
return f"β **Generation Failed**: {str(e)[:500]}"
def system_info():
"""Get system info."""
info = f"""## π₯οΈ System Information
- **CUDA Available**: {torch.cuda.is_available()}
- **CUDA Devices**: {torch.cuda.device_count() if torch.cuda.is_available() else 0}
- **PyTorch Version**: {torch.__version__}
- **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
"""
return info
# Create interface
with gr.Blocks(title="GLM-4.5V CAD Generator", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# π§ GLM-4.5V CAD Generator
Generate CADQuery Python code from 3D CAD model images using GLM-4.5V models!
**Models**: GLM-4.5V-AWQ (fastest) | GLM-4.5V-FP8 (balanced) | GLM-4.5V (best quality)
""")
with gr.Tab("π Generate"):
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="Upload CAD Model Image")
model_choice = gr.Dropdown(
choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
value="GLM-4.5V-AWQ",
label="Select Model"
)
prompt_style = gr.Dropdown(
choices=["Simple", "Detailed", "Chain-of-Thought"],
value="Chain-of-Thought",
label="Prompt Style"
)
generate_btn = gr.Button("π Generate CADQuery Code", variant="primary")
with gr.Column():
output = gr.Markdown("Upload an image and click Generate!")
generate_btn.click(
fn=generate_code,
inputs=[image_input, model_choice, prompt_style],
outputs=output
)
with gr.Tab("βοΈ System"):
info_display = gr.Markdown()
refresh_btn = gr.Button("π Refresh")
demo.load(fn=system_info, outputs=info_display)
refresh_btn.click(fn=system_info, outputs=info_display)
if __name__ == "__main__":
print("π Starting GLM-4.5V CAD Generator...")
print(f"CUDA available: {torch.cuda.is_available()}")
demo.launch() # NO share=True! |