bguisard commited on
Commit
79001f7
1 Parent(s): 12070fa

First version (100k steps)

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: creativeml-openrail-m
4
+ base_model: runwayml/stable-diffusion-v1-5
5
+ tags:
6
+ - stable-diffusion
7
+ - stable-diffusion-diffusers
8
+ - text-to-image
9
+ - diffusers
10
+ - jax-diffusers-event
11
+ inference: true
12
+ ---
13
+
14
+ # Stable Diffusion Nano
15
+
16
+ prompt: A watercolor painting of an otter
17
+ ![images_0)](./images_0.png)
18
+ prompt: Marvel MCU deadpool, red mask, red shirt, red gloves, black shoulders, black elbow pads, black legs, gold buckle, black belt, black mask, white eyes, black boots, fuji low light color 35mm film, downtown Osaka alley at night out of focus in background, neon lights
19
+ ![images_1)](./images_1.png)
20
+
21
+
22
+ ## Training details
23
+ All parameters were initialized from the runwayml/stable-diffusion-v1-5 model. The unet was fine tuned as follows:
24
+
25
+ - 100,000 steps training the full unet, learning rate = 1e-5, batch size = 512 (128 per TPU).
26
+ - Trained on [LAION Improved Aesthetics 6plus](https://huggingface.co/datasets/ChristophSchuhmann/improved_aesthetics_6plus).
27
+
28
+ ## License
29
+ This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies:
30
+
31
+ - You can't use the model to deliberately produce nor share illegal or harmful outputs or content.
32
+ - The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license.
33
+ - You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here.
images_0.png ADDED
images_1.png ADDED
model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlaxStableDiffusionPipeline",
3
+ "_diffusers_version": "0.16.0.dev0",
4
+ "feature_extractor": [
5
+ null,
6
+ null
7
+ ],
8
+ "safety_checker": [
9
+ null,
10
+ null
11
+ ],
12
+ "scheduler": [
13
+ "diffusers",
14
+ "FlaxDPMSolverMultistepScheduler"
15
+ ],
16
+ "text_encoder": [
17
+ "transformers",
18
+ "FlaxCLIPTextModel"
19
+ ],
20
+ "tokenizer": [
21
+ "transformers",
22
+ "CLIPTokenizer"
23
+ ],
24
+ "unet": [
25
+ "diffusers",
26
+ "FlaxUNet2DConditionModel"
27
+ ],
28
+ "vae": [
29
+ "diffusers",
30
+ "FlaxAutoencoderKL"
31
+ ]
32
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlaxDPMSolverMultistepScheduler",
3
+ "_diffusers_version": "0.16.0.dev0",
4
+ "algorithm_type": "dpmsolver++",
5
+ "beta_end": 0.012,
6
+ "beta_schedule": "scaled_linear",
7
+ "beta_start": 0.00085,
8
+ "clip_sample": false,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "lower_order_final": true,
11
+ "num_train_timesteps": 1000,
12
+ "prediction_type": "epsilon",
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
+ "solver_order": 2,
17
+ "solver_type": "midpoint",
18
+ "steps_offset": 1,
19
+ "thresholding": false,
20
+ "trained_betas": null
21
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/clip-vit-large-patch14",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.28.1",
24
+ "vocab_size": 49408
25
+ }
text_encoder/flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41eba2abe3a73b328cd047c899066febb92a8e12b1e6cfe0cb4d5d5ac6b5c978
3
+ size 492248682
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
File without changes
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlaxUNet2DConditionModel",
3
+ "_diffusers_version": "0.16.0.dev0",
4
+ "act_fn": "silu",
5
+ "attention_head_dim": 8,
6
+ "block_out_channels": [
7
+ 320,
8
+ 640,
9
+ 1280,
10
+ 1280
11
+ ],
12
+ "center_input_sample": false,
13
+ "cross_attention_dim": 768,
14
+ "down_block_types": [
15
+ "CrossAttnDownBlock2D",
16
+ "CrossAttnDownBlock2D",
17
+ "CrossAttnDownBlock2D",
18
+ "DownBlock2D"
19
+ ],
20
+ "downsample_padding": 1,
21
+ "dropout": 0.0,
22
+ "flip_sin_to_cos": true,
23
+ "freq_shift": 0,
24
+ "in_channels": 4,
25
+ "layers_per_block": 2,
26
+ "mid_block_scale_factor": 1,
27
+ "norm_eps": 1e-05,
28
+ "norm_num_groups": 32,
29
+ "only_cross_attention": false,
30
+ "out_channels": 4,
31
+ "sample_size": 64,
32
+ "up_block_types": [
33
+ "UpBlock2D",
34
+ "CrossAttnUpBlock2D",
35
+ "CrossAttnUpBlock2D",
36
+ "CrossAttnUpBlock2D"
37
+ ],
38
+ "use_linear_projection": false,
39
+ "use_memory_efficient_attention": false
40
+ }
unet/diffusion_flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa0d87db3b72ac7180d8aa7ac72d6f3153266fc4527117b35955fa13e65a67b
3
+ size 3438108367
vae/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlaxAutoencoderKL",
3
+ "_diffusers_version": "0.16.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "in_channels": 3,
18
+ "latent_channels": 4,
19
+ "layers_per_block": 2,
20
+ "norm_num_groups": 32,
21
+ "out_channels": 3,
22
+ "sample_size": 512,
23
+ "scaling_factor": 0.18215,
24
+ "up_block_types": [
25
+ "UpDecoderBlock2D",
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D"
29
+ ]
30
+ }
vae/diffusion_flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed39fc57b0224dec2c0cc6f9a532633ccef89815e40b544a6b8f38f422023d8a
3
+ size 334623853