taarhissian commited on
Commit
1b9e4e6
·
verified ·
1 Parent(s): 3390717

Create code.txt

Browse files
Files changed (1) hide show
  1. code.txt +75 -0
code.txt ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install -U adapter-transformers
2
+ !pip install -U transformers
3
+ !pip install torch torchvision torchaudio
4
+ !pip install opencv-python
5
+ import gradio as gr
6
+ from transformers import CLIPProcessor, CLIPModel
7
+ from PIL import Image
8
+ import torch
9
+ import cv2
10
+
11
+ # Load the CLIP model and processor
12
+ model = CLIPModel.from_pretrained("Taarhoinc/TaarhoGen1")
13
+ processor = CLIPProcessor.from_pretrained("Taarhoinc/TaarhoGen1")
14
+
15
+ # Load the object detection model (YOLOv5 example)
16
+ model_path = 'yolov5s.pt' # Replace with the path to your YOLOv5 model
17
+ object_detection_model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_path)
18
+
19
+ # Define the function to describe a floor plan with sizes
20
+ def describe_floorplan_with_sizes(floorplan_image: Image.Image, top_k: int = 3):
21
+ """Describes a floor plan drawing by listing components and their sizes."""
22
+
23
+ # Define a list of common floor plan components
24
+ components = [
25
+ "bedroom",
26
+ "kitchen",
27
+ "bathroom",
28
+ "living room",
29
+ "dining room",
30
+ "hallway",
31
+ "garage",
32
+ "balcony",
33
+ "stairs",
34
+ "door",
35
+ "window",
36
+ ]
37
+
38
+ # Perform object detection
39
+ results = object_detection_model(floorplan_image)
40
+
41
+ # Get detected objects and bounding boxes
42
+ detections = results.pandas().xyxy[0]
43
+
44
+ # Filter detections based on confidence and class names
45
+ threshold = 0.5 # Adjust as needed
46
+ filtered_detections = detections[
47
+ (detections['confidence'] > threshold)
48
+ & (detections['name'].isin(components))
49
+ ]
50
+
51
+ # Estimate sizes (assuming a scale of 1 pixel = 0.1 feet)
52
+ scale = 0.1 # Adjust according to the actual scale of the floor plan
53
+ component_sizes = []
54
+ for index, row in filtered_detections.iterrows():
55
+ width = (row['xmax'] - row['xmin']) * scale
56
+ height = (row['ymax'] - row['ymin']) * scale
57
+ component_sizes.append(f"{row['name']}: {width:.2f}ft x {height:.2f}ft")
58
+
59
+ # Combine with CLIP-based description
60
+ clip_description = describe_floorplan(floorplan_image, top_k)
61
+ final_description = clip_description + ", " + ", ".join(component_sizes)
62
+
63
+ return final_description
64
+
65
+ # Create the Gradio interface
66
+ gr.Interface(
67
+ fn=describe_floorplan_with_sizes,
68
+ inputs=[
69
+ gr.Image(label="Upload a floor plan drawing", type="pil"),
70
+ gr.Slider(1, 10, step=1, value=3, label="Number of components to detect"),
71
+ ],
72
+ outputs=gr.Label(label="Detected Components with Sizes"),
73
+ title="Floor Plan Description with TaarhoGen1 and Sizes",
74
+ description="Upload a floor plan drawing to get a list of detected components and their sizes.",
75
+ ).launch()