Prajwal-r-k commited on
Commit
5e8fe36
·
verified ·
1 Parent(s): 3aaa3b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -32
app.py CHANGED
@@ -1,9 +1,12 @@
1
  import gradio as gr
2
  import os
3
- import subprocess
4
  import torch
5
  from PIL import Image
6
- import time
 
 
 
 
7
 
8
  UPLOAD_FOLDER = 'uploads'
9
  OUTPUT_FOLDER = 'outputs'
@@ -19,44 +22,28 @@ def gradio_interface(image):
19
  output_path = os.path.join(OUTPUT_FOLDER, "output.png")
20
 
21
  image.save(input_path)
22
- print(f"Input image saved at: {input_path}")
23
 
24
  try:
25
  # Ensure CUDA memory is freed before running inference
26
  torch.cuda.empty_cache()
27
 
28
- # Run the NAFNet model with controlled memory usage
29
- command = [
30
- "python", "NAFNet/demo.py",
31
- "-opt", "NAFNet/options/test/REDS/NAFNet-width64.yml",
32
- "--input_path", input_path,
33
- "--output_path", output_path
34
- ]
35
 
36
- print("Running model...")
37
- result = subprocess.run(command, capture_output=True, text=True)
38
- print("Model execution completed.")
39
-
40
- # Log the output
41
- print("STDOUT:", result.stdout)
42
- print("STDERR:", result.stderr)
43
-
44
- if result.returncode != 0:
45
- return f"Error: {result.stderr}"
46
-
47
- # Wait for output file to be generated
48
- time.sleep(2) # Give time for file system updates
49
- if not os.path.exists(output_path):
50
- return "Error: Output image not generated."
51
 
52
- print(f"Output image generated: {output_path}")
53
 
54
- # Explicitly reload the image to avoid caching issues
55
- return Image.open(output_path).copy()
56
 
57
  except Exception as e:
58
- print(f"Exception occurred: {str(e)}")
59
- return f"Exception: {str(e)}"
60
 
61
  # Launch Gradio
62
  iface = gr.Interface(
@@ -66,5 +53,4 @@ iface = gr.Interface(
66
  title="Image Restoration with NAFNet"
67
  )
68
 
69
- print("Starting Gradio interface...")
70
- iface.launch()
 
1
  import gradio as gr
2
  import os
 
3
  import torch
4
  from PIL import Image
5
+ import logging
6
+ from NAFNet.demo import run_model # Import the model directly instead of using subprocess
7
+
8
+ # Setup logging
9
+ logging.basicConfig(level=logging.INFO)
10
 
11
  UPLOAD_FOLDER = 'uploads'
12
  OUTPUT_FOLDER = 'outputs'
 
22
  output_path = os.path.join(OUTPUT_FOLDER, "output.png")
23
 
24
  image.save(input_path)
25
+ logging.info(f"Input image saved at: {input_path}")
26
 
27
  try:
28
  # Ensure CUDA memory is freed before running inference
29
  torch.cuda.empty_cache()
30
 
31
+ logging.info("Running model...")
 
 
 
 
 
 
32
 
33
+ # Run NAFNet directly instead of subprocess
34
+ run_model(
35
+ opt_path="NAFNet/options/test/REDS/NAFNet-width64.yml",
36
+ input_path=input_path,
37
+ output_path=output_path
38
+ )
 
 
 
 
 
 
 
 
 
39
 
40
+ logging.info("Model execution completed.")
41
 
42
+ return Image.open(output_path)
 
43
 
44
  except Exception as e:
45
+ logging.error(f"Exception: {str(e)}")
46
+ return f"Error: {str(e)}"
47
 
48
  # Launch Gradio
49
  iface = gr.Interface(
 
53
  title="Image Restoration with NAFNet"
54
  )
55
 
56
+ iface.launch() # No `share=True` for Hugging Face Spaces