jpc commited on
Commit
7463538
·
1 Parent(s): f843aec

Fixed the interface

Browse files
Files changed (2) hide show
  1. app.py +4 -5
  2. tools.py +3 -1
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import gradio as gr
2
  from fastai.vision.all import *
 
3
  import skimage
4
 
5
- learn = load_learner('models/panda-model-1.pth')
6
 
7
  labels = learn.dls.vocab
8
 
@@ -14,8 +15,6 @@ def predict(img):
14
  title = "Prostate cANcer graDe Assessment model"
15
  description = "A model to predict the ISUP grade for prostate cancer based on whole-slide images of digitized H&E-stained biopsies."
16
  # article="<p style='text-align: center'><a href='https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial' target='_blank'>Blog post</a></p>"
17
- examples = ['test.jpg']
18
- interpretation='default'
19
- enable_queue=True
20
 
21
- gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(224, 224)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,article=article,examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch()
 
1
  import gradio as gr
2
  from fastai.vision.all import *
3
+ from tools import *
4
  import skimage
5
 
6
+ learn = load_learner('panda-model-1.pkl')
7
 
8
  labels = learn.dls.vocab
9
 
 
15
  title = "Prostate cANcer graDe Assessment model"
16
  description = "A model to predict the ISUP grade for prostate cancer based on whole-slide images of digitized H&E-stained biopsies."
17
  # article="<p style='text-align: center'><a href='https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial' target='_blank'>Blog post</a></p>"
18
+ examples = ['example.jpg', 'example2.jpg', 'example3.jpg']
 
 
19
 
20
+ gr.Interface(fn=predict,inputs=gr.inputs.Image(),outputs=gr.outputs.Label(num_top_classes=5),title=title,description=description,examples=examples).launch()
tools.py CHANGED
@@ -10,13 +10,15 @@ def get_crops(x):
10
  else:
11
  tiff_file = imgdir/f'{x["image_id"]}.tiff'
12
  img = tifffile.imread(tiff_file, key=1)
 
13
  crop = np.array(img.shape) // tile_size * tile_size; crop
14
  imgc = img[:crop[0],:crop[1]]
15
  imgc = imgc.reshape(imgc.shape[0] // tile_size, tile_size, imgc.shape[1] // tile_size, tile_size, 3)
16
  xs, ys = (imgc.mean(axis=1).mean(axis=2).mean(axis=-1) < 252).nonzero()
17
  if len(xs) == 0:
18
  xs, ys = (imgc.mean(axis=1).mean(axis=2).mean(axis=-1)).nonzero()
19
- # if len(xs) < 2: print("no data in image:", x)
 
20
  pidxs = random.choices(list(range(len(xs))), k=36)
21
  return PILImage.create(imgc[xs[pidxs],:,ys[pidxs],:].reshape(6,6,tile_size,tile_size,3).transpose(0,2,1,3,4).reshape(6*tile_size,6*tile_size,3))
22
  # return imgc.mean(axis=1).mean(axis=2).mean(axis=-1)
 
10
  else:
11
  tiff_file = imgdir/f'{x["image_id"]}.tiff'
12
  img = tifffile.imread(tiff_file, key=1)
13
+ print('input image shape:', img.shape)
14
  crop = np.array(img.shape) // tile_size * tile_size; crop
15
  imgc = img[:crop[0],:crop[1]]
16
  imgc = imgc.reshape(imgc.shape[0] // tile_size, tile_size, imgc.shape[1] // tile_size, tile_size, 3)
17
  xs, ys = (imgc.mean(axis=1).mean(axis=2).mean(axis=-1) < 252).nonzero()
18
  if len(xs) == 0:
19
  xs, ys = (imgc.mean(axis=1).mean(axis=2).mean(axis=-1)).nonzero()
20
+ if len(xs) < 2: print("no data in image:", x)
21
+ print('len xs:', len(xs))
22
  pidxs = random.choices(list(range(len(xs))), k=36)
23
  return PILImage.create(imgc[xs[pidxs],:,ys[pidxs],:].reshape(6,6,tile_size,tile_size,3).transpose(0,2,1,3,4).reshape(6*tile_size,6*tile_size,3))
24
  # return imgc.mean(axis=1).mean(axis=2).mean(axis=-1)