ajankelo commited on
Commit
b521671
1 Parent(s): 7900509

first commit with sahi vfnet

Browse files
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio import Label
2
+ from icevision.all import *
3
+ import gradio as gr
4
+ from icevision.models.checkpoint import *
5
+ import PIL
6
+ import gradio as gr
7
+ import os
8
+
9
+ from icevision.models.inference_sahi import IceSahiModel
10
+
11
+
12
+
13
+
14
+ # Load model
15
+ checkpoint_path = "labeled_data_vfnet_resnet50ms2x_640_bs8_maxbbox500.pth"
16
+ checkpoint_and_model = model_from_checkpoint(checkpoint_path)
17
+ model = checkpoint_and_model["model"]
18
+ model_type = checkpoint_and_model["model_type"]
19
+ class_map = checkpoint_and_model["class_map"]
20
+
21
+ # Transforms
22
+ img_size = checkpoint_and_model["img_size"]
23
+ valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(img_size), tfms.A.Normalize()])
24
+
25
+ # Populate examples in Gradio interface
26
+ examples = [
27
+ ['./sqlot.jpg'],
28
+ ]
29
+ sahimodel = IceSahiModel(model_type=model_type, model=model, class_map=class_map, tfms=valid_tfms, confidence_threshold=0.4)
30
+
31
+ def show_preds(input_image):
32
+ img = PIL.Image.fromarray(input_image, "RGB")
33
+
34
+ pred_dict = sahimodel.get_sliced_prediction(
35
+ img,
36
+ keep_sahi_format=False,
37
+ return_img=True,
38
+ slice_height = 512,
39
+ slice_width = 512,
40
+ overlap_height_ratio = 0.2,
41
+ overlap_width_ratio = 0.2,
42
+ display_label=True,
43
+ display_bbox=True)
44
+
45
+
46
+ # pred_dict = model_type.end2end_detect(img, valid_tfms, model,
47
+ # class_map=class_map,
48
+ # detection_threshold=0.5,
49
+ # display_label=True,
50
+ # display_bbox=True,
51
+ # return_img=True,
52
+ # font_size=15,
53
+ # label_color="#FF59D6")
54
+ return pred_dict["img"]
55
+
56
+ gr_interface = gr.Interface(
57
+ fn=show_preds,
58
+ inputs=["image"],
59
+ outputs=[gr.outputs.Image(type="pil", label="VFNET Inference with Sahi")],
60
+ title="Spaces Empty or Not?",
61
+ description="A VFNET model that detects whether parking spaces are empty or not. Upload an image or click an example image below to use.",
62
+ examples=examples,
63
+ )
64
+ gr_interface.launch(inline=False, share=False, debug=True)
labeled_data_vfnet_resnet50ms2x_640_bs8_maxbbox500.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5a320b3b5e3b6d91a91d3aecbeaec877dfd8647f9b34396992c31436a1cde40
3
+ size 131192663
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python3-opencv
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ sahi
2
+ gradio
3
+ -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.10.0/index.html
4
+ mmcv-full==1.3.17
5
+ git+https://github.com/airctic/icevision.git#egg=icevision[all]
6
+ git+https://github.com/airctic/icedata.git
7
+ mmdet==2.17.0
8
+
sqlot.jpg ADDED