Diffusers
Safetensors
English
AmusedPipeline
art
patrickvonplaten commited on
Commit
03e8165
1 Parent(s): 5f2b573

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -12
README.md CHANGED
@@ -34,7 +34,7 @@ import torch
34
  from diffusers import AmusedPipeline
35
 
36
  pipe = AmusedPipeline.from_pretrained(
37
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
38
  )
39
  pipe = pipe.to("cuda")
40
 
@@ -52,7 +52,7 @@ import torch
52
  from diffusers import AmusedPipeline
53
 
54
  pipe = AmusedPipeline.from_pretrained(
55
- "huggingface/amused-512", variant="fp16", torch_dtype=torch.float16
56
  )
57
  pipe = pipe.to("cuda")
58
 
@@ -73,7 +73,7 @@ from diffusers import AmusedImg2ImgPipeline
73
  from diffusers.utils import load_image
74
 
75
  pipe = AmusedImg2ImgPipeline.from_pretrained(
76
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
77
  )
78
  pipe = pipe.to("cuda")
79
 
@@ -100,7 +100,7 @@ from diffusers import AmusedImg2ImgPipeline
100
  from diffusers.utils import load_image
101
 
102
  pipe = AmusedImg2ImgPipeline.from_pretrained(
103
- "huggingface/amused-512", variant="fp16", torch_dtype=torch.float16
104
  )
105
  pipe = pipe.to("cuda")
106
 
@@ -130,7 +130,7 @@ from diffusers.utils import load_image
130
  from PIL import Image
131
 
132
  pipe = AmusedInpaintPipeline.from_pretrained(
133
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
134
  )
135
  pipe = pipe.to("cuda")
136
 
@@ -166,7 +166,7 @@ from diffusers import AmusedInpaintPipeline
166
  from diffusers.utils import load_image
167
 
168
  pipe = AmusedInpaintPipeline.from_pretrained(
169
- "huggingface/amused-512", variant="fp16", torch_dtype=torch.float16
170
  )
171
  pipe = pipe.to("cuda")
172
 
@@ -233,7 +233,7 @@ import torch
233
  from diffusers import AmusedPipeline
234
 
235
  pipe = AmusedPipeline.from_pretrained(
236
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
237
  )
238
 
239
  # HERE use torch.compile
@@ -317,7 +317,7 @@ accelerate launch training/training.py \
317
  --gradient_accumulation_steps <gradient accumulation steps> \
318
  --learning_rate 2e-5 \
319
  --use_8bit_adam \
320
- --pretrained_model_name_or_path huggingface/amused-256 \
321
  --instance_data_dataset 'm1guelpf/nouns' \
322
  --image_key image \
323
  --prompt_key text \
@@ -402,7 +402,7 @@ accelerate launch training/training.py \
402
  --train_batch_size <batch size> \
403
  --gradient_accumulation_steps <gradient accumulation steps> \
404
  --learning_rate 8e-5 \
405
- --pretrained_model_name_or_path huggingface/amused-512 \
406
  --instance_data_dataset 'monadical-labs/minecraft-preview' \
407
  --prompt_prefix 'minecraft ' \
408
  --image_key image \
@@ -474,7 +474,7 @@ accelerate launch training/training.py \
474
  --train_batch_size <batch size> \
475
  --gradient_accumulation_steps <gradient accumulation steps> \
476
  --learning_rate 1e-4 \
477
- --pretrained_model_name_or_path huggingface/amused-512 \
478
  --instance_data_dataset 'monadical-labs/minecraft-preview' \
479
  --prompt_prefix 'minecraft ' \
480
  --image_key image \
@@ -515,7 +515,7 @@ accelerate launch ./training/training.py \
515
  --mixed_precision fp16 \
516
  --report_to wandb \
517
  --use_lora \
518
- --pretrained_model_name_or_path huggingface/amused-256 \
519
  --train_batch_size 1 \
520
  --lr_scheduler constant \
521
  --learning_rate 4e-4 \
@@ -545,7 +545,7 @@ accelerate launch ./training/training.py \
545
  --mixed_precision fp16 \
546
  --report_to wandb \
547
  --use_lora \
548
- --pretrained_model_name_or_path huggingface/amused-512 \
549
  --train_batch_size 1 \
550
  --lr_scheduler constant \
551
  --learning_rate 1e-3 \
 
34
  from diffusers import AmusedPipeline
35
 
36
  pipe = AmusedPipeline.from_pretrained(
37
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
38
  )
39
  pipe = pipe.to("cuda")
40
 
 
52
  from diffusers import AmusedPipeline
53
 
54
  pipe = AmusedPipeline.from_pretrained(
55
+ "amused/amused-512", variant="fp16", torch_dtype=torch.float16
56
  )
57
  pipe = pipe.to("cuda")
58
 
 
73
  from diffusers.utils import load_image
74
 
75
  pipe = AmusedImg2ImgPipeline.from_pretrained(
76
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
77
  )
78
  pipe = pipe.to("cuda")
79
 
 
100
  from diffusers.utils import load_image
101
 
102
  pipe = AmusedImg2ImgPipeline.from_pretrained(
103
+ "amused/amused-512", variant="fp16", torch_dtype=torch.float16
104
  )
105
  pipe = pipe.to("cuda")
106
 
 
130
  from PIL import Image
131
 
132
  pipe = AmusedInpaintPipeline.from_pretrained(
133
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
134
  )
135
  pipe = pipe.to("cuda")
136
 
 
166
  from diffusers.utils import load_image
167
 
168
  pipe = AmusedInpaintPipeline.from_pretrained(
169
+ "amused/amused-512", variant="fp16", torch_dtype=torch.float16
170
  )
171
  pipe = pipe.to("cuda")
172
 
 
233
  from diffusers import AmusedPipeline
234
 
235
  pipe = AmusedPipeline.from_pretrained(
236
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
237
  )
238
 
239
  # HERE use torch.compile
 
317
  --gradient_accumulation_steps <gradient accumulation steps> \
318
  --learning_rate 2e-5 \
319
  --use_8bit_adam \
320
+ --pretrained_model_name_or_path amused/amused-256 \
321
  --instance_data_dataset 'm1guelpf/nouns' \
322
  --image_key image \
323
  --prompt_key text \
 
402
  --train_batch_size <batch size> \
403
  --gradient_accumulation_steps <gradient accumulation steps> \
404
  --learning_rate 8e-5 \
405
+ --pretrained_model_name_or_path amused/amused-512 \
406
  --instance_data_dataset 'monadical-labs/minecraft-preview' \
407
  --prompt_prefix 'minecraft ' \
408
  --image_key image \
 
474
  --train_batch_size <batch size> \
475
  --gradient_accumulation_steps <gradient accumulation steps> \
476
  --learning_rate 1e-4 \
477
+ --pretrained_model_name_or_path amused/amused-512 \
478
  --instance_data_dataset 'monadical-labs/minecraft-preview' \
479
  --prompt_prefix 'minecraft ' \
480
  --image_key image \
 
515
  --mixed_precision fp16 \
516
  --report_to wandb \
517
  --use_lora \
518
+ --pretrained_model_name_or_path amused/amused-256 \
519
  --train_batch_size 1 \
520
  --lr_scheduler constant \
521
  --learning_rate 4e-4 \
 
545
  --mixed_precision fp16 \
546
  --report_to wandb \
547
  --use_lora \
548
+ --pretrained_model_name_or_path amused/amused-512 \
549
  --train_batch_size 1 \
550
  --lr_scheduler constant \
551
  --learning_rate 1e-3 \