Spaces:
Running
Running
sync with remote
Browse files- app.py +160 -2
- logs/20250131-092418/train/events.out.tfevents.1738286658.MIS-DIRECTOR-CE.7000.0.v2 +3 -0
- logs/20250131-092418/validation/events.out.tfevents.1738286684.MIS-DIRECTOR-CE.7000.1.v2 +3 -0
- logs/20250131-094419/train/events.out.tfevents.1738287859.MIS-DIRECTOR-CE.21128.0.v2 +3 -0
- logs/20250131-094419/validation/events.out.tfevents.1738287886.MIS-DIRECTOR-CE.21128.1.v2 +3 -0
- logs/20250131-121144/train/events.out.tfevents.1738296704.MIS-DIRECTOR-CE.21828.0.v2 +3 -0
- logs/20250131-121144/validation/events.out.tfevents.1738296714.MIS-DIRECTOR-CE.21828.1.v2 +3 -0
- model.png +0 -0
app.py
CHANGED
@@ -10,6 +10,37 @@ from tensorflow.keras.losses import SparseCategoricalCrossentropy
|
|
10 |
from io import StringIO
|
11 |
import datetime
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Constants for dataset information
|
15 |
TRAIN_FILE = "train_images.tfrecords"
|
@@ -184,7 +215,112 @@ show_predictions(test_dataset)
|
|
184 |
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
|
185 |
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
|
186 |
|
187 |
-
if st.button("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
# setup and run the model
|
189 |
EPOCHS = 20
|
190 |
STEPS_PER_EPOCH = len(list(parsed_training_dataset))
|
@@ -194,4 +330,26 @@ if st.button("Run Model"):
|
|
194 |
steps_per_epoch=STEPS_PER_EPOCH,
|
195 |
validation_steps=VALIDATION_STEPS,
|
196 |
validation_data=test_dataset,
|
197 |
-
callbacks=[DisplayCallback(), tensorboard_callback])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
from io import StringIO
|
11 |
import datetime
|
12 |
|
13 |
+
import tensorboard
|
14 |
+
from tensorboard import program
|
15 |
+
|
16 |
+
try:
|
17 |
+
# Check if a GPU is available
|
18 |
+
gpu = len(tf.config.list_physical_devices('GPU')) > 0
|
19 |
+
|
20 |
+
if gpu:
|
21 |
+
st.write("GPU is available!") # Inform the user
|
22 |
+
# Set TensorFlow to use the GPU if available (optional, usually automatic)
|
23 |
+
# You can specify which GPU if you have multiple:
|
24 |
+
# tf.config.set_visible_devices(tf.config.list_physical_devices('GPU')[0], 'GPU') # Use the first GPU
|
25 |
+
# or
|
26 |
+
# tf.config.experimental.set_memory_growth(tf.config.list_physical_devices('GPU')[0], True) # Memory growth for the first GPU
|
27 |
+
# or
|
28 |
+
# strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) # Use multiple GPUs
|
29 |
+
|
30 |
+
else:
|
31 |
+
st.write("GPU is not available. Using CPU.")
|
32 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Force CPU usage (optional)
|
33 |
+
|
34 |
+
except RuntimeError as e:
|
35 |
+
st.write(f"Error checking GPU: {e}")
|
36 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Force CPU usage if there is a runtime error
|
37 |
+
|
38 |
+
def run_tensorboard(log_dir):
|
39 |
+
# Start TensorBoard
|
40 |
+
tb = program.TensorBoard()
|
41 |
+
tb.configure(argv=[None, '--logdir', log_dir])
|
42 |
+
url = tb.launch()
|
43 |
+
return url
|
44 |
|
45 |
# Constants for dataset information
|
46 |
TRAIN_FILE = "train_images.tfrecords"
|
|
|
215 |
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
|
216 |
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
|
217 |
|
218 |
+
if st.button("Train Model"):
|
219 |
+
# setup and run the model
|
220 |
+
EPOCHS = 20
|
221 |
+
STEPS_PER_EPOCH = len(list(parsed_training_dataset))
|
222 |
+
VALIDATION_STEPS = 26
|
223 |
+
|
224 |
+
model_history = model.fit(train_dataset, epochs=EPOCHS,
|
225 |
+
steps_per_epoch=STEPS_PER_EPOCH,
|
226 |
+
validation_steps=VALIDATION_STEPS,
|
227 |
+
validation_data=test_dataset,
|
228 |
+
callbacks=[DisplayCallback(), tensorboard_callback])
|
229 |
+
|
230 |
+
# output model statistics
|
231 |
+
loss = model_history.history['loss']
|
232 |
+
val_loss = model_history.history['val_loss']
|
233 |
+
accuracy = model_history.history['accuracy']
|
234 |
+
val_accuracy = model_history.history['val_accuracy']
|
235 |
+
|
236 |
+
epochs = range(EPOCHS)
|
237 |
+
|
238 |
+
st.title('Training and Validation Loss') # Optional title for the Streamlit app
|
239 |
+
|
240 |
+
fig, ax = plt.subplots() # Create a figure and an axes object
|
241 |
+
|
242 |
+
ax.plot(epochs, loss, 'r', label='Training loss')
|
243 |
+
ax.plot(epochs, val_loss, 'bo', label='Validation loss')
|
244 |
+
ax.set_title('Training and Validation Loss') #Set title for the axes
|
245 |
+
ax.set_xlabel('Epoch')
|
246 |
+
ax.set_ylabel('Loss Value')
|
247 |
+
ax.set_ylim([0, 1])
|
248 |
+
ax.legend()
|
249 |
+
|
250 |
+
st.pyplot(fig) # Display the plot in Streamlit
|
251 |
+
|
252 |
+
if st.button("Evaluate Model"):
|
253 |
+
# Evaluate the model
|
254 |
+
evaluation_results = model.evaluate(test_dataset, verbose=0) # Set verbose=0 to suppress console output
|
255 |
+
|
256 |
+
# Assuming model.metrics_names provides labels for evaluation_results
|
257 |
+
results_dict = dict(zip(model.metrics_names, evaluation_results))
|
258 |
+
|
259 |
+
st.subheader("Model Evaluation Results")
|
260 |
+
|
261 |
+
# Display each metric and its corresponding value
|
262 |
+
for metric, value in results_dict.items():
|
263 |
+
st.write(f"**{metric.capitalize()}:** {value:.4f}")
|
264 |
+
|
265 |
+
if st.button("Show TensorBoard"):
|
266 |
+
# Create a log directory for TensorBoard
|
267 |
+
log_dir = "logs"
|
268 |
+
if not os.path.exists(log_dir):
|
269 |
+
os.makedirs(log_dir)
|
270 |
+
|
271 |
+
# Run TensorBoard
|
272 |
+
url = run_tensorboard(log_dir)
|
273 |
+
|
274 |
+
# Display TensorBoard in an iframe
|
275 |
+
st.markdown(f"<iframe src='{url}' width='100%' height='800'></iframe>", unsafe_allow_html=True)
|
276 |
+
|
277 |
+
if st.button("CNN"):
|
278 |
+
tf.keras.backend.clear_session()
|
279 |
+
|
280 |
+
inputs = tf.keras.Input(shape=(256, 256, 1), name="InputLayer")
|
281 |
+
|
282 |
+
x = tf.keras.layers.Conv2D(filters=100, kernel_size=5, strides=2, padding="same",
|
283 |
+
activation="relu", name="Conv1")(inputs)
|
284 |
+
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding="same")(x)
|
285 |
+
|
286 |
+
x = tf.keras.layers.Conv2D(filters=200, kernel_size=5, strides=2, padding="same",
|
287 |
+
activation="relu", name="Conv2")(x)
|
288 |
+
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding="same")(x)
|
289 |
+
|
290 |
+
x = tf.keras.layers.Conv2D(filters=300, kernel_size=3, strides=1, padding="same",
|
291 |
+
activation="relu", name="Conv3")(x)
|
292 |
+
x = tf.keras.layers.Conv2D(filters=300, kernel_size=3, strides=1, padding="same",
|
293 |
+
activation="relu", name="Conv4")(x)
|
294 |
+
|
295 |
+
x = tf.keras.layers.Conv2D(filters=2, kernel_size=1, strides=1, padding="same",
|
296 |
+
activation="relu", name="Conv5")(x)
|
297 |
+
|
298 |
+
outputs = tf.keras.layers.Conv2DTranspose(filters=2, kernel_size=31, strides=16,
|
299 |
+
padding="same", activation="softmax",
|
300 |
+
name="UpSampling")(x)
|
301 |
+
|
302 |
+
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="CNN_Segmentation")
|
303 |
+
|
304 |
+
model.compile(
|
305 |
+
optimizer=tf.keras.optimizers.Adam(),
|
306 |
+
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
|
307 |
+
metrics=['accuracy']
|
308 |
+
)
|
309 |
+
|
310 |
+
# Capture the model summary
|
311 |
+
model_summary = StringIO()
|
312 |
+
model.summary(print_fn=lambda x: model_summary.write(x + '\n'))
|
313 |
+
|
314 |
+
# plot the model including the sizes of the model
|
315 |
+
tf.keras.utils.plot_model(model, show_shapes=True)
|
316 |
+
|
317 |
+
# show a predection, as an example
|
318 |
+
show_predictions(test_dataset)
|
319 |
+
|
320 |
+
# Initialize new directories for new task
|
321 |
+
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
|
322 |
+
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
|
323 |
+
|
324 |
# setup and run the model
|
325 |
EPOCHS = 20
|
326 |
STEPS_PER_EPOCH = len(list(parsed_training_dataset))
|
|
|
330 |
steps_per_epoch=STEPS_PER_EPOCH,
|
331 |
validation_steps=VALIDATION_STEPS,
|
332 |
validation_data=test_dataset,
|
333 |
+
callbacks=[DisplayCallback(), tensorboard_callback])
|
334 |
+
|
335 |
+
# output model statistics
|
336 |
+
loss = model_history.history['loss']
|
337 |
+
val_loss = model_history.history['val_loss']
|
338 |
+
accuracy = model_history.history['accuracy']
|
339 |
+
val_accuracy = model_history.history['val_accuracy']
|
340 |
+
|
341 |
+
epochs = range(EPOCHS)
|
342 |
+
|
343 |
+
st.title('Training and Validation Loss') # Optional title for the Streamlit app
|
344 |
+
|
345 |
+
fig, ax = plt.subplots() # Create a figure and an axes object
|
346 |
+
|
347 |
+
ax.plot(epochs, loss, 'r', label='Training loss')
|
348 |
+
ax.plot(epochs, val_loss, 'bo', label='Validation loss')
|
349 |
+
ax.set_title('Training and Validation Loss') #Set title for the axes
|
350 |
+
ax.set_xlabel('Epoch')
|
351 |
+
ax.set_ylabel('Loss Value')
|
352 |
+
ax.set_ylim([0, 1])
|
353 |
+
ax.legend()
|
354 |
+
|
355 |
+
st.pyplot(fig) # Display the plot in Streamlit
|
logs/20250131-092418/train/events.out.tfevents.1738286658.MIS-DIRECTOR-CE.7000.0.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4ee6fdccada8310f63d3e4e4650dfbcc43861b9681f7aa8ef92aa91703616a7
|
3 |
+
size 72247
|
logs/20250131-092418/validation/events.out.tfevents.1738286684.MIS-DIRECTOR-CE.7000.1.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea6904baf3931bec88aafd431437f0d451ae235c85b413f6685c5a5d66238c70
|
3 |
+
size 6474
|
logs/20250131-094419/train/events.out.tfevents.1738287859.MIS-DIRECTOR-CE.21128.0.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20ea3a216d82b8fda2bb367d5ce0b9f7a528cac609a225ced8391561de895ff3
|
3 |
+
size 72247
|
logs/20250131-094419/validation/events.out.tfevents.1738287886.MIS-DIRECTOR-CE.21128.1.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7d59970d19f8e6b8cf33137a9170223ccc0e373b4a0ca96bda438900ef4c57c
|
3 |
+
size 6474
|
logs/20250131-121144/train/events.out.tfevents.1738296704.MIS-DIRECTOR-CE.21828.0.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae345409300b4881d3213deda821fefa0a2492c2db21adeabdcb251eadc0a728
|
3 |
+
size 206319
|
logs/20250131-121144/validation/events.out.tfevents.1738296714.MIS-DIRECTOR-CE.21828.1.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb7e2a251dc434155e8b41b76470168b66be660f51bba4f5babad3ff861f7e45
|
3 |
+
size 6474
|
model.png
CHANGED
![]() |
![]() |