-
Notifications
You must be signed in to change notification settings - Fork 60
/
main.py
49 lines (39 loc) · 1.36 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import base64
import io
from typing import Optional
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from pydantic import BaseModel
class Item(BaseModel):
prompt: str
height: Optional[int] = 512
width: Optional[int] = 512
num_inference_steps: Optional[int] = 25
num_images_per_prompt: Optional[int] = 1
model_id = "stabilityai/stable-diffusion-2-1"
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
print("Loading model...")
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
print("Loaded.")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
print("Loaded scheduler.")
pipe.enable_xformers_memory_efficient_attention()
print("Enabled memory efficient attention.")
pipe = pipe.to("cuda")
print("Moved to GPU.")
print(pipe.device)
def predict(item):
item = Item(**item)
images = pipe(
prompt=item.prompt,
height=item.height,
width=item.width,
num_images_per_prompt=item.num_images_per_prompt,
num_inference_steps=item.num_inference_steps,
).images
finished_images = []
for image in images:
buffered = io.BytesIO()
image.save(buffered, format="PNG")
finished_images.append(base64.b64encode(buffered.getvalue()).decode("utf-8"))
return finished_images