-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
82 lines (72 loc) · 3.57 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import argparse
import torch
import os
from easyphoto.easyphoto_train import easyphoto_train_forward
from easyphoto.easyphoto_down import download_dataset_from_s3, down_sd_model
from easyphoto.easyphoto_train import models_path
from easyphoto.easyphoto_config import easyphoto_models_path
def download_model_from_s3():
down_sd_model("", os.path.join(models_path, f"Stable-diffusion"))
down_sd_model("", os.path.join(os.path.join(easyphoto_models_path, "stable-diffusion-xl"),
"madebyollin_sdxl_vae_fp16_fix"))
def training():
# download_model_from_s3()
# user_path = f'./datasets/{opt.user_id}/{opt.unique_id}'
# if opt.s3Url != '':
# download_dataset_from_s3(opt.s3Url, user_path)
# print(f'download dataset from s3: {opt.s3Url} success.')
# else:
# print(f'The dataset is empty.')
# return f'The dataset is empty.'
img_list = os.listdir("/opt/ml/input/data/images/")
instance_images = []
for idx, img_path in enumerate(img_list):
img_path = os.path.join("/opt/ml/input/data/images/", img_path)
instance_images.append(img_path)
print("instance_images: ", instance_images)
try:
message = easyphoto_train_forward(
user_id=opt.user_id,
unique_id=opt.unique_id,
train_mode_choose=opt.train_mode_choose,
resolution=opt.resolution,
val_and_checkpointing_steps=opt.val_and_checkpointing_steps,
max_train_steps=opt.max_train_steps,
steps_per_photos=opt.steps_per_photos,
train_batch_size=opt.train_batch_size,
gradient_accumulation_steps=opt.gradient_accumulation_steps,
dataloader_num_workers=opt.dataloader_num_workers,
learning_rate=opt.learning_rate,
rank=opt.rank,
network_alpha=opt.network_alpha,
instance_images=instance_images,
skin_retouching_bool=opt.skin_retouching_bool,
training_prefix_prompt=opt.training_prefix_prompt,
crop_ratio=opt.crop_ratio
)
except Exception as e:
torch.cuda.empty_cache()
message = f"Train error, error info:{str(e)}"
print(message)
return {"message": message}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('--sd_model_checkpoint', type=str, default="sd_xl_base_1.0.safetensors")
parser.add_argument('--user_id', type=str, default="test")
parser.add_argument('--unique_id', type=str, default="test_00")
parser.add_argument('--train_mode_choose', type=str, default="Train Human Lora")
parser.add_argument('--resolution', type=int, default=1024)
parser.add_argument('--val_and_checkpointing_steps', type=int, default=100)
parser.add_argument('--max_train_steps', type=int, default=400)
parser.add_argument('--steps_per_photos', type=int, default=200)
parser.add_argument('--train_batch_size', type=int, default=2)
parser.add_argument('--gradient_accumulation_steps', type=int, default=4)
parser.add_argument('--dataloader_num_workers', type=int, default=16)
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--rank', type=int, default=32)
parser.add_argument('--network_alpha', type=int, default=16)
parser.add_argument('--skin_retouching_bool', type=bool, default=True)
parser.add_argument('--training_prefix_prompt', type=str, default="")
parser.add_argument('--crop_ratio', type=float, default=3)
opt = parser.parse_args()
training()