pklot_50 / app.py
ajankelo's picture
Update app.py
dd65808
from gradio import Label
from icevision.all import *
import gradio as gr
from icevision.models.checkpoint import *
import PIL
import gradio as gr
import os
from icevision.models.inference_sahi import IceSahiModel
# Load model
checkpoint_path = "vfnet_resnet50ms2x_640_bs8_maxbbox500_10272022.pth"
checkpoint_and_model = model_from_checkpoint(checkpoint_path)
model = checkpoint_and_model["model"]
model_type = checkpoint_and_model["model_type"]
class_map = checkpoint_and_model["class_map"]
# Transforms
img_size = checkpoint_and_model["img_size"]
valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(img_size), tfms.A.Normalize()])
# Populate examples in Gradio interface
examples = [
['./sqlot.jpg'],
]
sahimodel = IceSahiModel(model_type=model_type, model=model, class_map=class_map, tfms=valid_tfms, confidence_threshold=0.4)
def show_preds(input_image):
img = PIL.Image.fromarray(input_image, "RGB")
pred_dict = sahimodel.get_sliced_prediction(
img,
keep_sahi_format=False,
return_img=True,
slice_height = 512,
slice_width = 512,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2,
display_label=True,
display_bbox=True)
# pred_dict = model_type.end2end_detect(img, valid_tfms, model,
# class_map=class_map,
# detection_threshold=0.5,
# display_label=True,
# display_bbox=True,
# return_img=True,
# font_size=15,
# label_color="#FF59D6")
return pred_dict["img"]
gr_interface = gr.Interface(
fn=show_preds,
inputs=["image"],
outputs=[gr.outputs.Image(type="pil", label="VFNET Inference with Sahi")],
title="Spaces Empty or Not?",
description="A VFNET model that detects whether parking spaces are empty or not. Upload an image or click an example image below to use.",
examples=examples,
)
gr_interface.launch(inline=False, share=False, debug=True)