praeclarumjj3 commited on
Commit
e24e799
1 Parent(s): c71d44b

Update Readme

Browse files
Files changed (1) hide show
  1. README.md +11 -11
README.md CHANGED
@@ -31,33 +31,33 @@ You can use this particular checkpoint for semantic, instance and panoptic segme
31
  Here is how to use this model:
32
 
33
  ```python
34
- from transformers import OneFormerFeatureExtractor, OneFormerForUniversalSegmentation
35
  from PIL import Image
36
  import requests
37
  url = "https://huggingface.co/datasets/shi-labs/oneformer_demo/blob/main/cityscapes.png"
38
  image = Image.open(requests.get(url, stream=True).raw)
39
 
40
  # Loading a single model for all three tasks
41
- feature_extractor = OneFormerFeatureExtractor.from_pretrained("shi-labs/oneformer_cityscapes_dinat_large")
42
  model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_dinat_large")
43
 
44
  # Semantic Segmentation
45
- semantic_inputs = feature_extractor(images=image, ["semantic"] return_tensors="pt")
46
  semantic_outputs = model(**semantic_inputs)
47
- # pass through feature_extractor for postprocessing
48
- predicted_semantic_map = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
49
 
50
  # Instance Segmentation
51
- instance_inputs = feature_extractor(images=image, ["instance"] return_tensors="pt")
52
  instance_outputs = model(**instance_inputs)
53
- # pass through feature_extractor for postprocessing
54
- predicted_instance_map = feature_extractor.post_process_instance_segmentation(outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
55
 
56
  # Panoptic Segmentation
57
- panoptic_inputs = feature_extractor(images=image, ["panoptic"] return_tensors="pt")
58
  panoptic_outputs = model(**panoptic_inputs)
59
- # pass through feature_extractor for postprocessing
60
- predicted_semantic_map = feature_extractor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
61
  ```
62
 
63
  For more examples, please refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/oneformer).
 
31
  Here is how to use this model:
32
 
33
  ```python
34
+ from transformers import OneFormerImageProcessor, OneFormerForUniversalSegmentation
35
  from PIL import Image
36
  import requests
37
  url = "https://huggingface.co/datasets/shi-labs/oneformer_demo/blob/main/cityscapes.png"
38
  image = Image.open(requests.get(url, stream=True).raw)
39
 
40
  # Loading a single model for all three tasks
41
+ image_processor = OneFormerImageProcessor.from_pretrained("shi-labs/oneformer_cityscapes_dinat_large")
42
  model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_dinat_large")
43
 
44
  # Semantic Segmentation
45
+ semantic_inputs = image_processor(images=image, ["semantic"] return_tensors="pt")
46
  semantic_outputs = model(**semantic_inputs)
47
+ # pass through image_processor for postprocessing
48
+ predicted_semantic_map = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
49
 
50
  # Instance Segmentation
51
+ instance_inputs = image_processor(images=image, ["instance"] return_tensors="pt")
52
  instance_outputs = model(**instance_inputs)
53
+ # pass through image_processor for postprocessing
54
+ predicted_instance_map = image_processor.post_process_instance_segmentation(outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
55
 
56
  # Panoptic Segmentation
57
+ panoptic_inputs = image_processor(images=image, ["panoptic"] return_tensors="pt")
58
  panoptic_outputs = model(**panoptic_inputs)
59
+ # pass through image_processor for postprocessing
60
+ predicted_semantic_map = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]["segmentation"]
61
  ```
62
 
63
  For more examples, please refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/oneformer).