merve HF staff commited on
Commit
060bb0d
1 Parent(s): 85686e5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -34,7 +34,7 @@ Here is how to use this model:
34
  from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
35
  from PIL import Image
36
  import requests
37
- url = "https://huggingface.co/datasets/shi-labs/oneformer_demo/blob/main/cityscapes.png"
38
  image = Image.open(requests.get(url, stream=True).raw)
39
 
40
  # Loading a single model for all three tasks
@@ -45,19 +45,19 @@ model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ci
45
  semantic_inputs = processor(images=image, task_inputs=["semantic"], return_tensors="pt")
46
  semantic_outputs = model(**semantic_inputs)
47
  # pass through image_processor for postprocessing
48
- predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
49
 
50
  # Instance Segmentation
51
  instance_inputs = processor(images=image, task_inputs=["instance"], return_tensors="pt")
52
  instance_outputs = model(**instance_inputs)
53
  # pass through image_processor for postprocessing
54
- predicted_instance_map = processor.post_process_instance_segmentation(outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
55
 
56
  # Panoptic Segmentation
57
  panoptic_inputs = processor(images=image, task_inputs=["panoptic"], return_tensors="pt")
58
  panoptic_outputs = model(**panoptic_inputs)
59
  # pass through image_processor for postprocessing
60
- predicted_semantic_map = processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
61
  ```
62
 
63
  For more examples, please refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/oneformer).
 
34
  from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
35
  from PIL import Image
36
  import requests
37
+ url = "https://huggingface.co/datasets/shi-labs/oneformer_demo/resolve/main/cityscapes.png"
38
  image = Image.open(requests.get(url, stream=True).raw)
39
 
40
  # Loading a single model for all three tasks
 
45
  semantic_inputs = processor(images=image, task_inputs=["semantic"], return_tensors="pt")
46
  semantic_outputs = model(**semantic_inputs)
47
  # pass through image_processor for postprocessing
48
+ predicted_semantic_map = processor.post_process_semantic_segmentation(semantic_outputs, target_sizes=[image.size[::-1]])[0]
49
 
50
  # Instance Segmentation
51
  instance_inputs = processor(images=image, task_inputs=["instance"], return_tensors="pt")
52
  instance_outputs = model(**instance_inputs)
53
  # pass through image_processor for postprocessing
54
+ predicted_instance_map = processor.post_process_instance_segmentation(instance_outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
55
 
56
  # Panoptic Segmentation
57
  panoptic_inputs = processor(images=image, task_inputs=["panoptic"], return_tensors="pt")
58
  panoptic_outputs = model(**panoptic_inputs)
59
  # pass through image_processor for postprocessing
60
+ predicted_semantic_map = processor.post_process_panoptic_segmentation(panoptic_outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
61
  ```
62
 
63
  For more examples, please refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/oneformer).