AiCoderv2 commited on
Commit
ac3aaa9
·
verified ·
1 Parent(s): 414eaf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -64
app.py CHANGED
@@ -6,6 +6,7 @@ import shutil
6
  import subprocess
7
  import sys
8
  import time
 
9
  import tensorflow as tf
10
  from tensorflow.keras.preprocessing.image import ImageDataGenerator
11
  import numpy as np
@@ -18,28 +19,34 @@ os.makedirs(MODEL_DIR, exist_ok=True)
18
 
19
  def train_and_export(dataset_file, model_name, num_classes, epochs, batch_size, image_size):
20
  try:
21
- # Generate unique ID for this training session
22
  uid = str(uuid.uuid4())
23
  zip_path = os.path.join(UPLOAD_DIR, f"{uid}.zip")
24
-
25
- # Copy uploaded file to our storage
26
  shutil.copyfile(dataset_file.name, zip_path)
27
-
28
- # Extract dataset
29
  extract_path = os.path.join(UPLOAD_DIR, uid)
30
  os.makedirs(extract_path, exist_ok=True)
31
  with zipfile.ZipFile(zip_path, 'r') as zip_ref:
32
  zip_ref.extractall(extract_path)
33
-
34
- # Locate train and validation directories
35
  train_dir = os.path.join(extract_path, "train")
36
  val_dir = os.path.join(extract_path, "validation")
37
-
38
- # Verify dataset structure
39
  if not os.path.exists(train_dir) or not os.path.exists(val_dir):
40
- return "Error: Dataset must contain 'train' and 'validation' folders", None, None, None
41
-
42
- # Create data generators
 
 
 
 
 
 
 
 
 
 
 
43
  train_datagen = ImageDataGenerator(
44
  rescale=1./255,
45
  rotation_range=20,
@@ -48,59 +55,52 @@ def train_and_export(dataset_file, model_name, num_classes, epochs, batch_size,
48
  horizontal_flip=True,
49
  zoom_range=0.2
50
  )
51
-
52
  val_datagen = ImageDataGenerator(rescale=1./255)
53
-
54
  train_generator = train_datagen.flow_from_directory(
55
  train_dir,
56
  target_size=(image_size, image_size),
57
  batch_size=batch_size,
58
  class_mode='categorical'
59
  )
60
-
61
  val_generator = val_datagen.flow_from_directory(
62
  val_dir,
63
  target_size=(image_size, image_size),
64
  batch_size=batch_size,
65
  class_mode='categorical'
66
  )
67
-
68
- # Update num_classes based on actual data
69
  actual_classes = train_generator.num_classes
70
  if actual_classes != num_classes:
71
  num_classes = actual_classes
72
-
73
- # Build model
74
  model = tf.keras.Sequential([
75
  tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(image_size, image_size, 3)),
76
  tf.keras.layers.BatchNormalization(),
77
  tf.keras.layers.MaxPooling2D(),
78
  tf.keras.layers.Dropout(0.25),
79
-
80
  tf.keras.layers.Conv2D(64, 3, activation='relu'),
81
  tf.keras.layers.BatchNormalization(),
82
  tf.keras.layers.MaxPooling2D(),
83
  tf.keras.layers.Dropout(0.25),
84
-
85
  tf.keras.layers.Conv2D(128, 3, activation='relu'),
86
  tf.keras.layers.BatchNormalization(),
87
  tf.keras.layers.MaxPooling2D(),
88
  tf.keras.layers.Dropout(0.25),
89
-
90
  tf.keras.layers.Flatten(),
91
  tf.keras.layers.Dense(256, activation='relu'),
92
  tf.keras.layers.BatchNormalization(),
93
  tf.keras.layers.Dropout(0.5),
94
  tf.keras.layers.Dense(num_classes, activation='softmax')
95
  ])
96
-
97
  model.compile(
98
  optimizer='adam',
99
  loss='categorical_crossentropy',
100
  metrics=['accuracy']
101
  )
102
-
103
- # Train model
104
  start_time = time.time()
105
  history = model.fit(
106
  train_generator,
@@ -111,20 +111,16 @@ def train_and_export(dataset_file, model_name, num_classes, epochs, batch_size,
111
  verbose=0
112
  )
113
  training_time = time.time() - start_time
114
-
115
- # Save models
116
  model_dir = os.path.join(MODEL_DIR, uid)
117
  os.makedirs(model_dir, exist_ok=True)
118
-
119
- # Save H5 model
120
  h5_path = os.path.join(model_dir, f"{model_name}.h5")
121
  model.save(h5_path)
122
-
123
- # Save SavedModel
124
  savedmodel_path = os.path.join(model_dir, "savedmodel")
125
  model.save(savedmodel_path)
126
-
127
- # Convert to TensorFlow.js
128
  tfjs_path = os.path.join(model_dir, "tfjs")
129
  try:
130
  subprocess.run([
@@ -134,7 +130,6 @@ def train_and_export(dataset_file, model_name, num_classes, epochs, batch_size,
134
  tfjs_path
135
  ], check=True)
136
  except Exception:
137
- # Install tensorflowjs if not available
138
  subprocess.run([sys.executable, "-m", "pip", "install", "tensorflowjs"], check=True)
139
  subprocess.run([
140
  "tensorflowjs_converter",
@@ -142,40 +137,31 @@ def train_and_export(dataset_file, model_name, num_classes, epochs, batch_size,
142
  savedmodel_path,
143
  tfjs_path
144
  ], check=True)
145
-
146
- # Calculate model size
147
  model_size = 0
148
  for dirpath, _, filenames in os.walk(model_dir):
149
  for f in filenames:
150
- fp = os.path.join(dirpath, f)
151
- model_size += os.path.getsize(fp)
152
  model_size_mb = model_size / (1024 * 1024)
153
-
154
- # Prepare results
155
  result_text = f"""
156
  ✅ Training completed successfully!
157
- ⏱️ Training time: {training_time:.2f} seconds
158
- 📊 Best validation accuracy: {max(history.history['val_accuracy']):.4f}
159
- 📦 Model size: {model_size_mb:.2f} MB
160
- 🗂️ Number of classes: {num_classes}
161
-
162
- Download links available below ⬇️
163
  """
164
-
165
- # Return paths for download
166
  return result_text, h5_path, savedmodel_path, tfjs_path
167
-
168
  except Exception as e:
169
  return f"❌ Training failed: {str(e)}", None, None, None
170
 
171
- # Gradio interface
172
  with gr.Blocks(title="AI Image Classifier Trainer") as demo:
173
  gr.Markdown("# 🖼️ AI Image Classifier Trainer")
174
- gr.Markdown("""
175
- Upload your dataset (ZIP file containing `train/` and `validation/` folders),
176
- configure training parameters, and download models in multiple formats.
177
- """)
178
-
179
  with gr.Row():
180
  with gr.Column():
181
  dataset = gr.File(label="Dataset ZIP File", file_types=[".zip"])
@@ -192,7 +178,7 @@ with gr.Blocks(title="AI Image Classifier Trainer") as demo:
192
  h5_download = gr.File(label="H5 Model Download")
193
  savedmodel_download = gr.File(label="SavedModel Download")
194
  tfjs_download = gr.File(label="TensorFlow.js Download")
195
-
196
  def toggle_downloads(result, h5_path, saved_path, tfjs_path):
197
  if h5_path:
198
  return (
@@ -207,7 +193,7 @@ with gr.Blocks(title="AI Image Classifier Trainer") as demo:
207
  gr.File(value=None),
208
  gr.File(value=None)
209
  )
210
-
211
  train_btn.click(
212
  fn=train_and_export,
213
  inputs=[dataset, model_name, num_classes, epochs, batch_size, image_size],
@@ -218,11 +204,5 @@ with gr.Blocks(title="AI Image Classifier Trainer") as demo:
218
  outputs=[download_col, h5_download, savedmodel_download, tfjs_download]
219
  )
220
 
221
- # Launch settings for Hugging Face Spaces
222
  if __name__ == "__main__":
223
- demo.launch(
224
- server_name="0.0.0.0",
225
- server_port=7860,
226
- share=False,
227
- max_file_size="100mb" # Allows 100MB file uploads
228
- )
 
6
  import subprocess
7
  import sys
8
  import time
9
+ from PIL import Image
10
  import tensorflow as tf
11
  from tensorflow.keras.preprocessing.image import ImageDataGenerator
12
  import numpy as np
 
19
 
20
  def train_and_export(dataset_file, model_name, num_classes, epochs, batch_size, image_size):
21
  try:
 
22
  uid = str(uuid.uuid4())
23
  zip_path = os.path.join(UPLOAD_DIR, f"{uid}.zip")
 
 
24
  shutil.copyfile(dataset_file.name, zip_path)
25
+
 
26
  extract_path = os.path.join(UPLOAD_DIR, uid)
27
  os.makedirs(extract_path, exist_ok=True)
28
  with zipfile.ZipFile(zip_path, 'r') as zip_ref:
29
  zip_ref.extractall(extract_path)
30
+
 
31
  train_dir = os.path.join(extract_path, "train")
32
  val_dir = os.path.join(extract_path, "validation")
33
+
34
+ # 🛠 Auto-generate folders and dummy images if missing
35
  if not os.path.exists(train_dir) or not os.path.exists(val_dir):
36
+ os.makedirs(train_dir, exist_ok=True)
37
+ os.makedirs(val_dir, exist_ok=True)
38
+
39
+ for split_dir in [train_dir, val_dir]:
40
+ for class_name in ["class_a", "class_b"]:
41
+ class_path = os.path.join(split_dir, class_name)
42
+ os.makedirs(class_path, exist_ok=True)
43
+
44
+ # Generate 2 dummy images per class
45
+ for i in range(2):
46
+ img = Image.new('RGB', (image_size, image_size), color=(i * 50, 100, 150))
47
+ img.save(os.path.join(class_path, f"sample_{i}.jpg"))
48
+
49
+ # Data generators
50
  train_datagen = ImageDataGenerator(
51
  rescale=1./255,
52
  rotation_range=20,
 
55
  horizontal_flip=True,
56
  zoom_range=0.2
57
  )
 
58
  val_datagen = ImageDataGenerator(rescale=1./255)
59
+
60
  train_generator = train_datagen.flow_from_directory(
61
  train_dir,
62
  target_size=(image_size, image_size),
63
  batch_size=batch_size,
64
  class_mode='categorical'
65
  )
66
+
67
  val_generator = val_datagen.flow_from_directory(
68
  val_dir,
69
  target_size=(image_size, image_size),
70
  batch_size=batch_size,
71
  class_mode='categorical'
72
  )
73
+
 
74
  actual_classes = train_generator.num_classes
75
  if actual_classes != num_classes:
76
  num_classes = actual_classes
77
+
 
78
  model = tf.keras.Sequential([
79
  tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(image_size, image_size, 3)),
80
  tf.keras.layers.BatchNormalization(),
81
  tf.keras.layers.MaxPooling2D(),
82
  tf.keras.layers.Dropout(0.25),
 
83
  tf.keras.layers.Conv2D(64, 3, activation='relu'),
84
  tf.keras.layers.BatchNormalization(),
85
  tf.keras.layers.MaxPooling2D(),
86
  tf.keras.layers.Dropout(0.25),
 
87
  tf.keras.layers.Conv2D(128, 3, activation='relu'),
88
  tf.keras.layers.BatchNormalization(),
89
  tf.keras.layers.MaxPooling2D(),
90
  tf.keras.layers.Dropout(0.25),
 
91
  tf.keras.layers.Flatten(),
92
  tf.keras.layers.Dense(256, activation='relu'),
93
  tf.keras.layers.BatchNormalization(),
94
  tf.keras.layers.Dropout(0.5),
95
  tf.keras.layers.Dense(num_classes, activation='softmax')
96
  ])
97
+
98
  model.compile(
99
  optimizer='adam',
100
  loss='categorical_crossentropy',
101
  metrics=['accuracy']
102
  )
103
+
 
104
  start_time = time.time()
105
  history = model.fit(
106
  train_generator,
 
111
  verbose=0
112
  )
113
  training_time = time.time() - start_time
114
+
 
115
  model_dir = os.path.join(MODEL_DIR, uid)
116
  os.makedirs(model_dir, exist_ok=True)
117
+
 
118
  h5_path = os.path.join(model_dir, f"{model_name}.h5")
119
  model.save(h5_path)
120
+
 
121
  savedmodel_path = os.path.join(model_dir, "savedmodel")
122
  model.save(savedmodel_path)
123
+
 
124
  tfjs_path = os.path.join(model_dir, "tfjs")
125
  try:
126
  subprocess.run([
 
130
  tfjs_path
131
  ], check=True)
132
  except Exception:
 
133
  subprocess.run([sys.executable, "-m", "pip", "install", "tensorflowjs"], check=True)
134
  subprocess.run([
135
  "tensorflowjs_converter",
 
137
  savedmodel_path,
138
  tfjs_path
139
  ], check=True)
140
+
 
141
  model_size = 0
142
  for dirpath, _, filenames in os.walk(model_dir):
143
  for f in filenames:
144
+ model_size += os.path.getsize(os.path.join(dirpath, f))
 
145
  model_size_mb = model_size / (1024 * 1024)
146
+
 
147
  result_text = f"""
148
  ✅ Training completed successfully!
149
+ ⏱️ Training time: {training_time:.2f} seconds
150
+ 📊 Best validation accuracy: {max(history.history['val_accuracy']):.4f}
151
+ 📦 Model size: {model_size_mb:.2f} MB
152
+ 🗂️ Number of classes: {num_classes}
 
 
153
  """
154
+
 
155
  return result_text, h5_path, savedmodel_path, tfjs_path
156
+
157
  except Exception as e:
158
  return f"❌ Training failed: {str(e)}", None, None, None
159
 
160
+ # Gradio Interface
161
  with gr.Blocks(title="AI Image Classifier Trainer") as demo:
162
  gr.Markdown("# 🖼️ AI Image Classifier Trainer")
163
+ gr.Markdown("Upload a ZIP of `train/` and `validation/`, or leave it empty to auto-generate dummy data.")
164
+
 
 
 
165
  with gr.Row():
166
  with gr.Column():
167
  dataset = gr.File(label="Dataset ZIP File", file_types=[".zip"])
 
178
  h5_download = gr.File(label="H5 Model Download")
179
  savedmodel_download = gr.File(label="SavedModel Download")
180
  tfjs_download = gr.File(label="TensorFlow.js Download")
181
+
182
  def toggle_downloads(result, h5_path, saved_path, tfjs_path):
183
  if h5_path:
184
  return (
 
193
  gr.File(value=None),
194
  gr.File(value=None)
195
  )
196
+
197
  train_btn.click(
198
  fn=train_and_export,
199
  inputs=[dataset, model_name, num_classes, epochs, batch_size, image_size],
 
204
  outputs=[download_col, h5_download, savedmodel_download, tfjs_download]
205
  )
206
 
 
207
  if __name__ == "__main__":
208
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False, max_file_size="100mb")