anton-l HF staff multimodalart HF staff commited on
Commit
a79ddfe
1 Parent(s): 76196c8

Update README.md with the new API (#2)

Browse files

- Update README.md with the new API (59496942a4509399e19931b26892e1d50ed016ea)


Co-authored-by: Multimodal AI art <[email protected]>

Files changed (1) hide show
  1. README.md +9 -15
README.md CHANGED
@@ -24,29 +24,23 @@ Please raise any concerns you may have.
24
 
25
  ```python
26
  # !pip install diffusers
27
- from diffusers import DiffusionPipeline
28
  import PIL.Image
29
  import numpy as np
30
 
31
- model_id = "CompVis/ldm-text2im-large-256q"
32
 
33
  # load model and scheduler
34
- ldm = DiffusionPipeline.from_pretrained(model_id)
35
 
36
  # run pipeline in inference (sample random noise and denoise)
37
  prompt = "A painting of a squirrel eating a burger"
38
- image = ldm([prompt])
39
 
40
- # process image to PIL
41
- image_processed = image.cpu().permute(0, 2, 3, 1)
42
- image_processed = (image_processed + 1.0) * 127.5
43
- image_processed = image_processed.numpy().astype(np.uint8)
44
- image_pil = PIL.Image.fromarray(image_processed[0])
45
-
46
- # save image
47
- image_pil.save("test.png")
48
  ```
49
 
50
- ## Samples
51
-
52
- TODO ...
 
24
 
25
  ```python
26
  # !pip install diffusers
27
+ from diffusers import LDMTextToImagePipeline
28
  import PIL.Image
29
  import numpy as np
30
 
31
+ model_id = "CompVis/ldm-text2im-large-256"
32
 
33
  # load model and scheduler
34
+ ldm = LDMTextToImagePipeline.from_pretrained(model_id)
35
 
36
  # run pipeline in inference (sample random noise and denoise)
37
  prompt = "A painting of a squirrel eating a burger"
38
+ images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=0.6, batch_size=1)["sample"]
39
 
40
+ # save images
41
+ for idx, image in enumerate(images):
42
+ image.save(f"squirrel-{idx}.png")
 
 
 
 
 
43
  ```
44
 
45
+ ## Demo
46
+ [Hugging Face Spaces](https://huggingface.co/spaces/CompVis/ldm-text2im-large-256-diffusers)