Paolo-Fraccaro
commited on
Commit
•
dab7700
1
Parent(s):
959befd
solved merge
Browse files- Dockerfile +1 -2
- README.md +1 -1
- app.py +56 -0
Dockerfile
CHANGED
@@ -59,7 +59,6 @@ RUN pip3 install openmim
|
|
59 |
|
60 |
RUN conda install -c conda-forge gradio -y
|
61 |
|
62 |
-
|
63 |
WORKDIR /home/user
|
64 |
|
65 |
RUN --mount=type=secret,id=git_token,mode=0444,required=true \
|
@@ -72,7 +71,7 @@ RUN pip3 install -e .
|
|
72 |
|
73 |
RUN mim install mmcv-full==1.6.2 -f https://download.openmmlab.com/mmcv/dist/11.5/1.11.0/index.html
|
74 |
|
75 |
-
RUN pip3 install rasterio
|
76 |
# Set the working directory to the user's home directory
|
77 |
WORKDIR $HOME/app
|
78 |
|
|
|
59 |
|
60 |
RUN conda install -c conda-forge gradio -y
|
61 |
|
|
|
62 |
WORKDIR /home/user
|
63 |
|
64 |
RUN --mount=type=secret,id=git_token,mode=0444,required=true \
|
|
|
71 |
|
72 |
RUN mim install mmcv-full==1.6.2 -f https://download.openmmlab.com/mmcv/dist/11.5/1.11.0/index.html
|
73 |
|
74 |
+
RUN pip3 install rasterio scikit-image
|
75 |
# Set the working directory to the user's home directory
|
76 |
WORKDIR $HOME/app
|
77 |
|
README.md
CHANGED
@@ -8,4 +8,4 @@ pinned: false
|
|
8 |
license: apache-2.0
|
9 |
---
|
10 |
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
8 |
license: apache-2.0
|
9 |
---
|
10 |
|
11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -5,7 +5,11 @@ config_path=hf_hub_download(repo_id="ibm-nasa-geospatial/Prithvi-100M-multi-temp
|
|
5 |
filename="multi_temporal_crop_classification_Prithvi_100M.py",
|
6 |
token=os.environ.get("token"))
|
7 |
ckpt=hf_hub_download(repo_id="ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification",
|
|
|
8 |
filename='multi_temporal_crop_classification_Prithvi_100M.pth',
|
|
|
|
|
|
|
9 |
token=os.environ.get("token"))
|
10 |
##########
|
11 |
import argparse
|
@@ -133,6 +137,7 @@ def inference_on_file(target_image, model, custom_test_pipeline):
|
|
133 |
|
134 |
# output_image = target_image.replace('.tif', '_pred.tif')
|
135 |
time_taken=-1
|
|
|
136 |
st = time.time()
|
137 |
print('Running inference...')
|
138 |
result = inference_segmentor(model, target_image, custom_test_pipeline)
|
@@ -162,6 +167,41 @@ def inference_on_file(target_image, model, custom_test_pipeline):
|
|
162 |
print(f'Inference completed in {str(time_taken)} seconds')
|
163 |
|
164 |
return rgb1,rgb2,rgb3, result[0][0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
def process_test_pipeline(custom_test_pipeline, bands=None):
|
167 |
|
@@ -184,7 +224,10 @@ def process_test_pipeline(custom_test_pipeline, bands=None):
|
|
184 |
|
185 |
return custom_test_pipeline
|
186 |
|
|
|
187 |
|
|
|
|
|
188 |
config = Config.fromfile(config_path)
|
189 |
config.model.backbone.pretrained=None
|
190 |
model = init_segmentor(config, ckpt, device='cpu')
|
@@ -217,6 +260,7 @@ with gr.Blocks() as demo:
|
|
217 |
|
218 |
btn.click(fn=func, inputs=inp, outputs=[inp1, inp2, inp3, out])
|
219 |
|
|
|
220 |
# with gr.Row():
|
221 |
# gr.Examples(examples=["chip_102_345_merged.tif", "chip_104_104_merged.tif", "chip_109_421_merged.tif"],
|
222 |
# inputs=inp,
|
@@ -225,5 +269,17 @@ with gr.Blocks() as demo:
|
|
225 |
# fn=func,
|
226 |
# cache_examples=True,
|
227 |
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
demo.launch()
|
|
|
5 |
filename="multi_temporal_crop_classification_Prithvi_100M.py",
|
6 |
token=os.environ.get("token"))
|
7 |
ckpt=hf_hub_download(repo_id="ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification",
|
8 |
+
<<<<<<< HEAD
|
9 |
filename='multi_temporal_crop_classification_Prithvi_100M.pth',
|
10 |
+
=======
|
11 |
+
filename='multi_temporal_crop_classification_best_mIoU_epoch_66.pth',
|
12 |
+
>>>>>>> 889a651 (add files)
|
13 |
token=os.environ.get("token"))
|
14 |
##########
|
15 |
import argparse
|
|
|
137 |
|
138 |
# output_image = target_image.replace('.tif', '_pred.tif')
|
139 |
time_taken=-1
|
140 |
+
<<<<<<< HEAD
|
141 |
st = time.time()
|
142 |
print('Running inference...')
|
143 |
result = inference_segmentor(model, target_image, custom_test_pipeline)
|
|
|
167 |
print(f'Inference completed in {str(time_taken)} seconds')
|
168 |
|
169 |
return rgb1,rgb2,rgb3, result[0][0]
|
170 |
+
=======
|
171 |
+
try:
|
172 |
+
st = time.time()
|
173 |
+
print('Running inference...')
|
174 |
+
result = inference_segmentor(model, target_image, custom_test_pipeline)
|
175 |
+
print("Output has shape: " + str(result[0].shape))
|
176 |
+
|
177 |
+
##### get metadata mask
|
178 |
+
mask = open_tiff(target_image)
|
179 |
+
# rgb = mask[[2, 1, 0], :, :].transpose((1,2,0))
|
180 |
+
rgb1 = mask[[2, 1, 0], :, :].transpose((1,2,0))
|
181 |
+
rgb2 = mask[[8, 7, 6], :, :].transpose((1,2,0))
|
182 |
+
rgb3 = mask[[14, 13, 12], :, :].transpose((1,2,0))
|
183 |
+
meta = get_meta(target_image)
|
184 |
+
mask = np.where(mask == meta['nodata'], 1, 0)
|
185 |
+
mask = np.max(mask, axis=0)[None]
|
186 |
+
|
187 |
+
result[0] = np.where(mask == 1, -1, result[0])
|
188 |
+
|
189 |
+
##### Save file to disk
|
190 |
+
meta["count"] = 1
|
191 |
+
meta["dtype"] = "int16"
|
192 |
+
meta["compress"] = "lzw"
|
193 |
+
meta["nodata"] = -1
|
194 |
+
print('Saving output...')
|
195 |
+
# write_tiff(result[0], output_image, meta)
|
196 |
+
et = time.time()
|
197 |
+
time_taken = np.round(et - st, 1)
|
198 |
+
print(f'Inference completed in {str(time_taken)} seconds')
|
199 |
+
|
200 |
+
except:
|
201 |
+
print(f'Error on image {target_image} \nContinue to next input')
|
202 |
+
|
203 |
+
return rgb, result[0][0]*255
|
204 |
+
>>>>>>> 889a651 (add files)
|
205 |
|
206 |
def process_test_pipeline(custom_test_pipeline, bands=None):
|
207 |
|
|
|
224 |
|
225 |
return custom_test_pipeline
|
226 |
|
227 |
+
<<<<<<< HEAD
|
228 |
|
229 |
+
=======
|
230 |
+
>>>>>>> 889a651 (add files)
|
231 |
config = Config.fromfile(config_path)
|
232 |
config.model.backbone.pretrained=None
|
233 |
model = init_segmentor(config, ckpt, device='cpu')
|
|
|
260 |
|
261 |
btn.click(fn=func, inputs=inp, outputs=[inp1, inp2, inp3, out])
|
262 |
|
263 |
+
<<<<<<< HEAD
|
264 |
# with gr.Row():
|
265 |
# gr.Examples(examples=["chip_102_345_merged.tif", "chip_104_104_merged.tif", "chip_109_421_merged.tif"],
|
266 |
# inputs=inp,
|
|
|
269 |
# fn=func,
|
270 |
# cache_examples=True,
|
271 |
# )
|
272 |
+
=======
|
273 |
+
with gr.Row():
|
274 |
+
gr.Examples(examples=["chip_102_345_merged.tif",
|
275 |
+
"chip_104_104_merged.tif",
|
276 |
+
"chip_109_421_merged.tif"],
|
277 |
+
inputs=inp,
|
278 |
+
outputs=[inp1, inp2, inp3, out],
|
279 |
+
preprocess=preprocess_example,
|
280 |
+
fn=func,
|
281 |
+
cache_examples=True,
|
282 |
+
)
|
283 |
+
>>>>>>> 889a651 (add files)
|
284 |
|
285 |
demo.launch()
|