ACCC1380 ACCA225 commited on
Commit
6fbea38
1 Parent(s): e2c021e

Upload HunYuan1.2.json (#6)

Browse files

- Upload HunYuan1.2.json (33848a9959f692bbe14c04393d1862600a6eed1b)


Co-authored-by: ABCD9099 <[email protected]>

Files changed (1) hide show
  1. HunYuanDiT-v1.2/HunYuan1.2.json +496 -0
HunYuanDiT-v1.2/HunYuan1.2.json ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "last_node_id": 34,
3
+ "last_link_id": 14,
4
+ "nodes": [
5
+ {
6
+ "id": 28,
7
+ "type": "VAEDecode",
8
+ "pos": [
9
+ 287,
10
+ 387
11
+ ],
12
+ "size": {
13
+ "0": 210,
14
+ "1": 46
15
+ },
16
+ "flags": {},
17
+ "order": 8,
18
+ "mode": 0,
19
+ "inputs": [
20
+ {
21
+ "name": "samples",
22
+ "type": "LATENT",
23
+ "link": 4
24
+ },
25
+ {
26
+ "name": "vae",
27
+ "type": "VAE",
28
+ "link": 7
29
+ }
30
+ ],
31
+ "outputs": [
32
+ {
33
+ "name": "IMAGE",
34
+ "type": "IMAGE",
35
+ "links": [
36
+ 5
37
+ ],
38
+ "shape": 3
39
+ }
40
+ ],
41
+ "properties": {
42
+ "Node name for S&R": "VAEDecode"
43
+ }
44
+ },
45
+ {
46
+ "id": 27,
47
+ "type": "EmptyLatentImage",
48
+ "pos": [
49
+ -147,
50
+ 310
51
+ ],
52
+ "size": {
53
+ "0": 315,
54
+ "1": 106
55
+ },
56
+ "flags": {},
57
+ "order": 0,
58
+ "mode": 0,
59
+ "outputs": [
60
+ {
61
+ "name": "LATENT",
62
+ "type": "LATENT",
63
+ "links": [
64
+ 3
65
+ ],
66
+ "shape": 3
67
+ }
68
+ ],
69
+ "properties": {
70
+ "Node name for S&R": "EmptyLatentImage"
71
+ },
72
+ "widgets_values": [
73
+ 1024,
74
+ 1024,
75
+ 1
76
+ ]
77
+ },
78
+ {
79
+ "id": 26,
80
+ "type": "CLIPTextEncode",
81
+ "pos": [
82
+ -479,
83
+ 574
84
+ ],
85
+ "size": {
86
+ "0": 400,
87
+ "1": 200
88
+ },
89
+ "flags": {},
90
+ "order": 5,
91
+ "mode": 0,
92
+ "inputs": [
93
+ {
94
+ "name": "clip",
95
+ "type": "CLIP",
96
+ "link": 13
97
+ }
98
+ ],
99
+ "outputs": [
100
+ {
101
+ "name": "CONDITIONING",
102
+ "type": "CONDITIONING",
103
+ "links": [
104
+ 2
105
+ ],
106
+ "shape": 3
107
+ }
108
+ ],
109
+ "properties": {
110
+ "Node name for S&R": "CLIPTextEncode"
111
+ },
112
+ "widgets_values": [
113
+ "错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,"
114
+ ]
115
+ },
116
+ {
117
+ "id": 32,
118
+ "type": "DiffusersVAELoader",
119
+ "pos": [
120
+ -123,
121
+ 463
122
+ ],
123
+ "size": {
124
+ "0": 303.9019775390625,
125
+ "1": 58
126
+ },
127
+ "flags": {},
128
+ "order": 1,
129
+ "mode": 0,
130
+ "outputs": [
131
+ {
132
+ "name": "VAE",
133
+ "type": "VAE",
134
+ "links": [
135
+ 7
136
+ ],
137
+ "shape": 3,
138
+ "slot_index": 0
139
+ }
140
+ ],
141
+ "properties": {
142
+ "Node name for S&R": "DiffusersVAELoader"
143
+ },
144
+ "widgets_values": [
145
+ "diffusion_pytorch_model.bin"
146
+ ]
147
+ },
148
+ {
149
+ "id": 24,
150
+ "type": "KSampler",
151
+ "pos": [
152
+ 160,
153
+ 515
154
+ ],
155
+ "size": {
156
+ "0": 315,
157
+ "1": 262
158
+ },
159
+ "flags": {},
160
+ "order": 7,
161
+ "mode": 0,
162
+ "inputs": [
163
+ {
164
+ "name": "model",
165
+ "type": "MODEL",
166
+ "link": 12,
167
+ "slot_index": 0
168
+ },
169
+ {
170
+ "name": "positive",
171
+ "type": "CONDITIONING",
172
+ "link": 1
173
+ },
174
+ {
175
+ "name": "negative",
176
+ "type": "CONDITIONING",
177
+ "link": 2
178
+ },
179
+ {
180
+ "name": "latent_image",
181
+ "type": "LATENT",
182
+ "link": 3
183
+ }
184
+ ],
185
+ "outputs": [
186
+ {
187
+ "name": "LATENT",
188
+ "type": "LATENT",
189
+ "links": [
190
+ 4
191
+ ],
192
+ "shape": 3
193
+ }
194
+ ],
195
+ "properties": {
196
+ "Node name for S&R": "KSampler"
197
+ },
198
+ "widgets_values": [
199
+ 42,
200
+ "fixed",
201
+ 50,
202
+ 6,
203
+ "ddpm",
204
+ "normal",
205
+ 1
206
+ ]
207
+ },
208
+ {
209
+ "id": 34,
210
+ "type": "LoraLoader",
211
+ "pos": [
212
+ 94,
213
+ 125
214
+ ],
215
+ "size": {
216
+ "0": 315,
217
+ "1": 126
218
+ },
219
+ "flags": {},
220
+ "order": 4,
221
+ "mode": 0,
222
+ "inputs": [
223
+ {
224
+ "name": "model",
225
+ "type": "MODEL",
226
+ "link": 10
227
+ },
228
+ {
229
+ "name": "clip",
230
+ "type": "CLIP",
231
+ "link": 11
232
+ }
233
+ ],
234
+ "outputs": [
235
+ {
236
+ "name": "MODEL",
237
+ "type": "MODEL",
238
+ "links": [
239
+ 12
240
+ ],
241
+ "shape": 3,
242
+ "slot_index": 0
243
+ },
244
+ {
245
+ "name": "CLIP",
246
+ "type": "CLIP",
247
+ "links": [
248
+ 13,
249
+ 14
250
+ ],
251
+ "shape": 3,
252
+ "slot_index": 1
253
+ }
254
+ ],
255
+ "properties": {
256
+ "Node name for S&R": "LoraLoader"
257
+ },
258
+ "widgets_values": [
259
+ "downloaded_file.safetensors",
260
+ 0,
261
+ 0
262
+ ]
263
+ },
264
+ {
265
+ "id": 29,
266
+ "type": "PreviewImage",
267
+ "pos": [
268
+ 529,
269
+ 200
270
+ ],
271
+ "size": {
272
+ "0": 539.4452514648438,
273
+ "1": 385.4747619628906
274
+ },
275
+ "flags": {},
276
+ "order": 9,
277
+ "mode": 0,
278
+ "inputs": [
279
+ {
280
+ "name": "images",
281
+ "type": "IMAGE",
282
+ "link": 5
283
+ }
284
+ ],
285
+ "properties": {
286
+ "Node name for S&R": "PreviewImage"
287
+ }
288
+ },
289
+ {
290
+ "id": 25,
291
+ "type": "CLIPTextEncode",
292
+ "pos": [
293
+ -416,
294
+ 43
295
+ ],
296
+ "size": {
297
+ "0": 400,
298
+ "1": 200
299
+ },
300
+ "flags": {},
301
+ "order": 6,
302
+ "mode": 0,
303
+ "inputs": [
304
+ {
305
+ "name": "clip",
306
+ "type": "CLIP",
307
+ "link": 14
308
+ }
309
+ ],
310
+ "outputs": [
311
+ {
312
+ "name": "CONDITIONING",
313
+ "type": "CONDITIONING",
314
+ "links": [
315
+ 1
316
+ ],
317
+ "shape": 3
318
+ }
319
+ ],
320
+ "properties": {
321
+ "Node name for S&R": "CLIPTextEncode"
322
+ },
323
+ "widgets_values": [
324
+ "1 girl"
325
+ ]
326
+ },
327
+ {
328
+ "id": 33,
329
+ "type": "DiffusersCLIPLoader",
330
+ "pos": [
331
+ -501,
332
+ 311
333
+ ],
334
+ "size": {
335
+ "0": 315,
336
+ "1": 82
337
+ },
338
+ "flags": {},
339
+ "order": 2,
340
+ "mode": 0,
341
+ "outputs": [
342
+ {
343
+ "name": "CLIP",
344
+ "type": "CLIP",
345
+ "links": [
346
+ 11
347
+ ],
348
+ "shape": 3,
349
+ "slot_index": 0
350
+ }
351
+ ],
352
+ "properties": {
353
+ "Node name for S&R": "DiffusersCLIPLoader"
354
+ },
355
+ "widgets_values": [
356
+ "pytorch_model-Copy1.bin",
357
+ "pytorch_model.bin"
358
+ ]
359
+ },
360
+ {
361
+ "id": 31,
362
+ "type": "DiffusersCheckpointLoader",
363
+ "pos": [
364
+ -474,
365
+ 433
366
+ ],
367
+ "size": {
368
+ "0": 315,
369
+ "1": 82
370
+ },
371
+ "flags": {},
372
+ "order": 3,
373
+ "mode": 0,
374
+ "outputs": [
375
+ {
376
+ "name": "MODEL",
377
+ "type": "MODEL",
378
+ "links": [
379
+ 10
380
+ ],
381
+ "shape": 3,
382
+ "slot_index": 0
383
+ }
384
+ ],
385
+ "properties": {
386
+ "Node name for S&R": "DiffusersCheckpointLoader"
387
+ },
388
+ "widgets_values": [
389
+ "official.pt",
390
+ "v1.2"
391
+ ]
392
+ }
393
+ ],
394
+ "links": [
395
+ [
396
+ 1,
397
+ 25,
398
+ 0,
399
+ 24,
400
+ 1,
401
+ "CONDITIONING"
402
+ ],
403
+ [
404
+ 2,
405
+ 26,
406
+ 0,
407
+ 24,
408
+ 2,
409
+ "CONDITIONING"
410
+ ],
411
+ [
412
+ 3,
413
+ 27,
414
+ 0,
415
+ 24,
416
+ 3,
417
+ "LATENT"
418
+ ],
419
+ [
420
+ 4,
421
+ 24,
422
+ 0,
423
+ 28,
424
+ 0,
425
+ "LATENT"
426
+ ],
427
+ [
428
+ 5,
429
+ 28,
430
+ 0,
431
+ 29,
432
+ 0,
433
+ "IMAGE"
434
+ ],
435
+ [
436
+ 7,
437
+ 32,
438
+ 0,
439
+ 28,
440
+ 1,
441
+ "VAE"
442
+ ],
443
+ [
444
+ 10,
445
+ 31,
446
+ 0,
447
+ 34,
448
+ 0,
449
+ "MODEL"
450
+ ],
451
+ [
452
+ 11,
453
+ 33,
454
+ 0,
455
+ 34,
456
+ 1,
457
+ "CLIP"
458
+ ],
459
+ [
460
+ 12,
461
+ 34,
462
+ 0,
463
+ 24,
464
+ 0,
465
+ "MODEL"
466
+ ],
467
+ [
468
+ 13,
469
+ 34,
470
+ 1,
471
+ 26,
472
+ 0,
473
+ "CLIP"
474
+ ],
475
+ [
476
+ 14,
477
+ 34,
478
+ 1,
479
+ 25,
480
+ 0,
481
+ "CLIP"
482
+ ]
483
+ ],
484
+ "groups": [],
485
+ "config": {},
486
+ "extra": {
487
+ "ds": {
488
+ "scale": 0.672749994932571,
489
+ "offset": [
490
+ 664.5132440927649,
491
+ 73.75977428794693
492
+ ]
493
+ }
494
+ },
495
+ "version": 0.4
496
+ }