Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stable diffusion controlnet support #813

Open
wants to merge 15 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions optimum/exporters/openvino/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -604,6 +604,90 @@ def outputs(self) -> Dict[str, Dict[int, str]]:
}


@register_in_tasks_manager("unet-controlnet", *["semantic-segmentation"], library_name="diffusers")
class UNetControlNetOpenVINOConfig(UNetOnnxConfig):
@property
def inputs(self) -> Dict[str, Dict[int, str]]:
common_inputs = {
"sample": {0: "batch_size", 2: "height", 3: "width"},
"timestep": {0: "steps"},
"encoder_hidden_states": {0: "batch_size", 1: "sequence_length"},
"mid_block_additional_residual": {0: "batch_size", 2: "height", 3: "width"},
}
for a in range(1, 25, 2):
if a == 23:
common_inputs["down_block_additional_residual"] = {0: "batch_size", 2: "height", 3: "width"}
break
else:
common_inputs[f"down_block_additional_residual.{a}"] = {0: "batch_size", 2: "height", 3: "width"}
# TODO : add text_image, image and image_embeds
if getattr(self._normalized_config, "addition_embed_type", None) == "text_time":
common_inputs["text_embeds"] = {0: "batch_size"}
common_inputs["time_ids"] = {0: "batch_size"}

if getattr(self._normalized_config, "time_cond_proj_dim", None) is not None:
common_inputs["timestep_cond"] = {0: "batch_size"}
return common_inputs

@property
def outputs(self) -> Dict[str, Dict[int, str]]:
return {
"out_sample": {0: "batch_size", 2: "height", 3: "width"},
}

def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
dummy_inputs_generators = self._create_dummy_input_generator_classes(**kwargs)

dummy_inputs = {}
for input_name in self.inputs:
for dummy_input_gen in dummy_inputs_generators:
if dummy_input_gen.supports_input(input_name):
dummy_inputs[input_name] = dummy_input_gen.generate(
input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype
)
break

import torch

dummy_inputs["down_block_additional_residual_1"] = torch.randn(2, 320, 64, 64)
dummy_inputs["down_block_additional_residual_3"] = torch.randn(2, 320, 64, 64)
dummy_inputs["down_block_additional_residual_5"] = torch.randn(2, 320, 64, 64)
dummy_inputs["down_block_additional_residual_7"] = torch.randn(2, 320, 32, 32)
dummy_inputs["down_block_additional_residual_9"] = torch.randn(2, 640, 32, 32)
dummy_inputs["down_block_additional_residual_11"] = torch.randn(2, 640, 32, 32)
dummy_inputs["down_block_additional_residual_13"] = torch.randn(2, 640, 16, 16)
dummy_inputs["down_block_additional_residual_15"] = torch.randn(2, 1280, 16, 16)
dummy_inputs["down_block_additional_residual_17"] = torch.randn(2, 1280, 16, 16)
dummy_inputs["down_block_additional_residual_19"] = torch.randn(2, 1280, 8, 8)
dummy_inputs["down_block_additional_residual_21"] = torch.randn(2, 1280, 8, 8)
dummy_inputs["down_block_additional_residual"] = torch.randn(2, 1280, 8, 8)
dummy_inputs["mid_block_additional_residual"] = torch.randn(2, 1280, 8, 8)
dummy_inputs["encoder_hidden_states"] = dummy_inputs["encoder_hidden_states"][0]

if getattr(self._normalized_config, "addition_embed_type", None) == "text_time":
dummy_inputs["added_cond_kwargs"] = {
"text_embeds": dummy_inputs.pop("text_embeds"),
"time_ids": dummy_inputs.pop("time_ids"),
}

return dummy_inputs

def rename_ambiguous_inputs(self, inputs) -> Dict[str, Dict[int, str]]:
"""
Updates the input names of the model to export.
Override the function when the model input names are ambiguous or too generic.

Returns:
`Dict[str, Dict[int, str]]`: Updated inputs.
"""
new_inputs = {}
for name, v in inputs.items():
if name.startswith("down_block_additional_residual"):
new_inputs[name.replace(".", "_")] = v
else:
new_inputs[name] = v
return new_inputs

@register_in_tasks_manager("vae-encoder", *["semantic-segmentation"], library_name="diffusers")
class VaeEncoderOpenVINOConfig(VaeEncoderOnnxConfig):
@property
Expand Down
12 changes: 6 additions & 6 deletions optimum/exporters/openvino/model_patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,9 +382,9 @@ def _llama_gemma_update_causal_mask_legacy(self, attention_mask, input_tensor, c
offset = 0
mask_shape = attention_mask.shape
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
causal_mask[
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
] = mask_slice
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
mask_slice
)

if (
self.config._attn_implementation == "sdpa"
Expand Down Expand Up @@ -1655,9 +1655,9 @@ def _dbrx_update_causal_mask_legacy(
offset = 0
mask_shape = attention_mask.shape
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
causal_mask[
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
] = mask_slice
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
mask_slice
)

if (
self.config._attn_implementation == "sdpa"
Expand Down
4 changes: 4 additions & 0 deletions optimum/intel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@
"OVStableDiffusionXLPipeline",
"OVStableDiffusionXLImg2ImgPipeline",
"OVLatentConsistencyModelPipeline",
"OVStableDiffusionControlNetPipeline",
]
else:
_import_structure["openvino"].extend(
Expand All @@ -110,6 +111,7 @@
"OVStableDiffusionXLPipeline",
"OVStableDiffusionXLImg2ImgPipeline",
"OVLatentConsistencyModelPipeline",
"OVStableDiffusionControlNetPipeline",
]
)

Expand Down Expand Up @@ -233,6 +235,7 @@
except OptionalDependencyNotAvailable:
from .utils.dummy_openvino_and_diffusers_objects import (
OVLatentConsistencyModelPipeline,
OVStableDiffusionControlNetPipeline,
OVStableDiffusionImg2ImgPipeline,
OVStableDiffusionInpaintPipeline,
OVStableDiffusionPipeline,
Expand All @@ -242,6 +245,7 @@
else:
from .openvino import (
OVLatentConsistencyModelPipeline,
OVStableDiffusionControlNetPipeline,
OVStableDiffusionImg2ImgPipeline,
OVStableDiffusionInpaintPipeline,
OVStableDiffusionPipeline,
Expand Down
1 change: 1 addition & 0 deletions optimum/intel/openvino/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
if is_diffusers_available():
from .modeling_diffusion import (
OVLatentConsistencyModelPipeline,
OVStableDiffusionControlNetPipeline,
OVStableDiffusionImg2ImgPipeline,
OVStableDiffusionInpaintPipeline,
OVStableDiffusionPipeline,
Expand Down
Loading