File size: 6,759 Bytes
3989022
 
a568da9
3989022
 
a568da9
 
 
 
3989022
536a116
3989022
 
536a116
3989022
536a116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3989022
536a116
 
3989022
 
536a116
 
 
3989022
536a116
 
 
 
 
 
 
 
 
3989022
 
536a116
3989022
536a116
3989022
536a116
3989022
a568da9
 
 
 
 
3989022
 
536a116
 
 
3989022
536a116
0b6bd6d
3989022
 
 
 
 
 
 
 
a568da9
 
 
3989022
 
536a116
a568da9
3989022
 
 
536a116
3989022
 
a568da9
 
 
 
 
 
 
 
 
 
536a116
a568da9
 
 
 
536a116
3989022
 
536a116
 
 
a568da9
536a116
 
 
 
 
 
 
a568da9
 
 
 
 
 
536a116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a568da9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
536a116
 
a568da9
 
 
536a116
 
 
a568da9
 
 
 
 
536a116
a568da9
 
 
 
 
 
 
 
 
 
 
 
536a116
 
 
3989022
536a116
3989022
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
# -*- coding: utf-8 -*-

import json
from pathlib import Path

import gradio as gr
import numpy as np
from PIL import Image, ImageDraw

from config import parse_configurations
from tools import UFCNModel

# Load the config
config = parse_configurations(Path("config.yaml"))

# Check that the paths of the examples are valid
for example in config["examples"]:
    assert Path.exists(
        Path(example)
    ), f"The path of the image '{example}' does not exist."

# Cached models, maps model_name to UFCNModel object
MODELS = {
    model["model_name"]: UFCNModel(
        name=model["model_name"],
        colors=model["classes_colors"],
        title=model["title"],
        description=model["description"],
    )
    for model in config["models"]
}

# Create a list of models name
models_name = list(MODELS)


def load_model(model_name) -> UFCNModel:
    """
    Retrieve the model, and load its parameters/files if it wasn't done before.

    :param model_name: The name of the selected model
    :return: The UFCNModel instance selected
    """
    assert model_name in MODELS
    model = MODELS[model_name]
    # Load the model's files if it wasn't done before
    if not model.loaded:
        model.load()
    return model


def query_image(model_name: gr.Dropdown, image: gr.Image) -> list([Image, json]):
    """
    Loads a model and draws the predicted polygons with the color provided by the model on an image

    :param model: A model selected in dropdown
    :param image: An image to predict
    :return: Image and dict, an image with the predictions and a
        dictionary mapping an object idx (starting from 1) to a dictionary describing the detected object:
        - `polygon` key : list, the coordinates of the points of the polygon,
        - `confidence` key : float, confidence of the model,
        - `channel` key : str, the name of the predicted class.
    """

    # Load the model and get its classes, classes_colors and the model
    ufcn_model = load_model(model_name)

    # Make a prediction with the model
    detected_polygons, probabilities, mask, overlap = ufcn_model.model.predict(
        input_image=image, raw_output=True, mask_output=False, overlap_output=False
    )

    # Load image
    image = Image.fromarray(image)

    # Make a copy of the image to keep the source and also to be able to use Pillow's blend method
    img2 = image.copy()

    # Initialize the dictionary which will display the json on the application
    predict = []

    # Create the polygons on the copy of the image for each class with the corresponding color
    # We do not draw polygons of the background channel (channel 0)
    for channel in range(1, ufcn_model.num_channels):
        for i, polygon in enumerate(detected_polygons[channel]):
            # Draw the polygons on the image copy.
            # Loop through the class_colors list (channel 1 has color 0)
            ImageDraw.Draw(img2).polygon(
                polygon["polygon"], fill=ufcn_model.colors[channel - 1]
            )

            # Build the dictionary
            # Add an index to dictionary keys to differentiate predictions of the same class
            predict.append(
                {
                    # The list of coordinates of the points of the polygon.
                    # Cast to list of np.int32 to make it JSON-serializable
                    "polygon": np.asarray(polygon["polygon"], dtype=np.int32).tolist(),
                    # Confidence that the model predicts the polygon in the right place
                    "confidence": polygon["confidence"],
                    # The channel on which the polygon is predicted
                    "channel": ufcn_model.classes[channel],
                }
            )

    # Return the blend of the images and the dictionary formatted in json
    return Image.blend(image, img2, 0.5), json.dumps(predict, indent=2)


def update_model(model_name: gr.Dropdown) -> str:
    """
    Update the model title to the title of the current model

    :param model_name: The name of the selected model
    :return: A new title
    """
    return f"## {MODELS[model_name].title}", MODELS[model_name].description


with gr.Blocks() as process_image:
    # Create app title
    gr.Markdown(f"# {config['title']}")

    # Create app description
    gr.Markdown(config["description"])

    # Create dropdown button
    model_name = gr.Dropdown(models_name, value=models_name[0], label="Models")

    # get models
    selected_model: UFCNModel = MODELS[model_name.value]

    # Create model title
    model_title = gr.Markdown(f"## {selected_model.title}")

    # Create model description
    model_description = gr.Markdown(selected_model.description)

    # Change model title and description when the model_id is update
    model_name.change(update_model, model_name, [model_title, model_description])

    # Create a first row of blocks
    with gr.Row():
        # Create a column on the left
        with gr.Column():
            # Generates an image that can be uploaded by a user
            image = gr.Image()

            # Create a row under the image
            with gr.Row():
                # Generate a button to clear the inputs and outputs
                clear_button = gr.Button("Clear", variant="secondary")

                # Generates a button to submit the prediction
                submit_button = gr.Button("Submit", variant="primary")

            # Create a row under the buttons
            with gr.Row():
                # Generate example images that can be used as input image for every model
                gr.Examples(config["examples"], inputs=image)

        # Create a column on the right
        with gr.Column():
            with gr.Row():
                # Generates an output image that does not support upload
                image_output = gr.Image(interactive=False)

            # Create a row under the predicted image
            with gr.Row():
                # Create a column so that the JSON output doesn't take the full size of the page
                with gr.Column():
                    # # Create a collapsible region
                    with gr.Accordion("JSON"):
                        # Generates a json with the model predictions
                        json_output = gr.JSON()

    # Clear button: set default values to inputs and output objects
    clear_button.click(
        lambda: (None, None, None),
        inputs=[],
        outputs=[image, image_output, json_output],
    )

    # Create the button to submit the prediction
    submit_button.click(
        query_image, inputs=[model_name, image], outputs=[image_output, json_output]
    )

# Launch the application with the public mode (True or False)
process_image.launch()