bucket_no_upscale = true bucket_reso_steps = 64 cache_latents = true cache_text_encoder_outputs = true caption_extension = ".txt" clip_skip = 1 dynamo_backend = "no" enable_bucket = true epoch = 10 gradient_accumulation_steps = 1 gradient_checkpointing = true huber_c = 0.1 huber_schedule = "snr" learning_rate = 0.0004 logging_dir = "/workspace/lora/log" loss_type = "l2" lr_scheduler = "constant" lr_scheduler_args = [] lr_scheduler_num_cycles = 1 lr_scheduler_power = 1 max_bucket_reso = 2048 max_data_loader_n_workers = 0 max_grad_norm = 1 max_timestep = 1000 max_token_length = 75 max_train_steps = 1600 min_bucket_reso = 256 mixed_precision = "bf16" multires_noise_discount = 0.3 network_alpha = 1 network_args = [] network_dim = 128 network_module = "networks.lora" no_half_vae = true noise_offset_type = "Original" optimizer_args = [] optimizer_type = "Adafactor" output_dir = "/workspace/lora/model" output_name = "luna" pretrained_model_name_or_path = "/workspace/kohya_ss/models/sd_xl_base_1.0.safetensors" prior_loss_weight = 1 resolution = "1024,1024" sample_prompts = "/workspace/lora/model/prompt.txt" sample_sampler = "euler_a" save_every_n_epochs = 2 save_model_as = "safetensors" save_precision = "bf16" text_encoder_lr = 0.0004 train_batch_size = 1 train_data_dir = "/workspace/lora/img" unet_lr = 0.0004 xformers = true