diff --git a/modelconverter/cli/utils.py b/modelconverter/cli/utils.py index 1fad5d3..5278b36 100644 --- a/modelconverter/cli/utils.py +++ b/modelconverter/cli/utils.py @@ -381,6 +381,7 @@ def get_target_specific_options( json_cfg = cfg.model_dump(mode="json") options = { "disable_onnx_simplification": cfg.disable_onnx_simplification, + "disable_onnx_optimisation": cfg.disable_onnx_optimisation, "inputs": json_cfg["inputs"], } if target == "rvc4": diff --git a/modelconverter/packages/base_exporter.py b/modelconverter/packages/base_exporter.py index 6ef47a7..d98b4b7 100644 --- a/modelconverter/packages/base_exporter.py +++ b/modelconverter/packages/base_exporter.py @@ -39,6 +39,7 @@ def __init__( self.outputs = {out.name: out for out in config.outputs} self.keep_intermediate_outputs = config.keep_intermediate_outputs self.disable_onnx_simplification = config.disable_onnx_simplification + self.disable_onnx_optimisation = config.disable_onnx_optimisation self.model_name = self.input_model.stem diff --git a/modelconverter/packages/rvc4/exporter.py b/modelconverter/packages/rvc4/exporter.py index 3f61f98..6ac62f4 100644 --- a/modelconverter/packages/rvc4/exporter.py +++ b/modelconverter/packages/rvc4/exporter.py @@ -60,18 +60,22 @@ def __init__(self, config: SingleStageConfig, output_dir: Path): self.inputs, ) - onnx_modifier = ONNXModifier( - model_path=self.input_model, - output_path=self._attach_suffix( - self.input_model, "modified_optimised.onnx" - ), - ) - onnx_modifier.modify_onnx() - if onnx_modifier.compare_outputs(): - logger.info("ONNX model has been optimised for RVC4.") - shutil.move(onnx_modifier.output_path, self.input_model) - else: - os.remove(onnx_modifier.output_path) + if not config.disable_onnx_optimisation: + onnx_modifier = ONNXModifier( + model_path=self.input_model, + output_path=self._attach_suffix( + self.input_model, "modified_optimised.onnx" + ), + ) + + if ( + onnx_modifier.modify_onnx() + and onnx_modifier.compare_outputs() + ): + logger.info("ONNX model has been optimised for RVC4.") + shutil.move(onnx_modifier.output_path, self.input_model) + else: + os.remove(onnx_modifier.output_path) else: logger.warning( "Input file type is not ONNX. Skipping pre-processing." diff --git a/modelconverter/utils/config.py b/modelconverter/utils/config.py index f970cf6..eb117e9 100644 --- a/modelconverter/utils/config.py +++ b/modelconverter/utils/config.py @@ -280,6 +280,7 @@ class SingleStageConfig(CustomBaseModel): keep_intermediate_outputs: bool = True disable_onnx_simplification: bool = False + disable_onnx_optimisation: bool = False output_remote_url: Optional[str] = None put_file_plugin: Optional[str] = None diff --git a/modelconverter/utils/onnx_tools.py b/modelconverter/utils/onnx_tools.py index 361684d..76f058f 100644 --- a/modelconverter/utils/onnx_tools.py +++ b/modelconverter/utils/onnx_tools.py @@ -1,5 +1,4 @@ import logging -from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Tuple @@ -8,7 +7,6 @@ import onnx_graphsurgeon as gs import onnxoptimizer from onnx import checker, helper -from onnx.onnx_pb import TensorProto from onnxsim import simplify from modelconverter.utils.config import InputConfig @@ -42,6 +40,7 @@ def onnx_attach_normalization_to_inputs( for input_tensor in graph.input: input_name = input_tensor.name + input_dtype = input_tensor.type.tensor_type.elem_type if input_name not in input_configs: continue cfg = input_configs[input_name] @@ -114,7 +113,7 @@ def onnx_attach_normalization_to_inputs( mean_tensor = helper.make_tensor( f"mean_{input_name}", - TensorProto.FLOAT, + input_dtype, [1, len(cfg.mean_values), 1, 1] if layout == "NCHW" else [1, 1, 1, len(cfg.mean_values)], @@ -142,7 +141,7 @@ def onnx_attach_normalization_to_inputs( scale_tensor = helper.make_tensor( f"scale_{input_name}", - TensorProto.FLOAT, + input_dtype, [1, len(cfg.scale_values), 1, 1] if layout == "NCHW" else [1, 1, 1, len(cfg.scale_values)], @@ -183,7 +182,6 @@ def onnx_attach_normalization_to_inputs( return save_path -@dataclass class ONNXModifier: """ONNX model modifier class to optimize and modify the ONNX model. @@ -195,11 +193,12 @@ class ONNXModifier: Path to save the modified ONNX model """ - model_path: Path - output_path: Path - - def __post_init__(self): + def __init__(self, model_path: Path, output_path: Path) -> None: + self.model_path = model_path + self.output_path = output_path self.load_onnx() + self.prev_onnx_model = self.onnx_model + self.prev_onnx_gs = self.onnx_gs def load_onnx(self) -> None: """Load the ONNX model and store it as onnx.ModelProto and @@ -207,12 +206,23 @@ def load_onnx(self) -> None: logger.info(f"Loading model: {self.model_path.stem}") - # Load the ONNX model self.onnx_model, _ = simplify( self.model_path.as_posix(), perform_optimization=True ) - # Load the ONNX model as a GraphSurgeon graph + self.dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[ + self.onnx_model.graph.input[0].type.tensor_type.elem_type + ] + self.input_shape = [ + dim.dim_value + for dim in self.onnx_model.graph.input[ + 0 + ].type.tensor_type.shape.dim + ] + self.has_dynamic_shape = any( + dim == 0 or dim is None for dim in self.input_shape + ) + self.onnx_gs = gs.import_onnx(self.onnx_model) def optimize_onnx(self, passes: Optional[List[str]] = None) -> None: @@ -222,17 +232,14 @@ def optimize_onnx(self, passes: Optional[List[str]] = None) -> None: @type passes: Optional[List[str]] """ - # Optimize the onnx model based on the optimization passes optimised_onnx_model = onnxoptimizer.optimize( self.onnx_model, passes=passes ) - # Simplify the onnx model optimised_onnx_model, _ = simplify( optimised_onnx_model, perform_optimization=False ) - # Check the model onnx.checker.check_model(optimised_onnx_model) self.onnx_model, self.onnx_gs = ( @@ -249,7 +256,6 @@ def export_onnx(self, passes: Optional[List[str]] = None) -> None: self.optimize_onnx(passes) - # Save the modified model onnx.save(self.onnx_model, self.output_path) def add_outputs(self, output_names: List[str]) -> None: @@ -259,7 +265,6 @@ def add_outputs(self, output_names: List[str]) -> None: @type output_names: List[str] """ - # Add output nodes to the graph graph_outputs = [output.name for output in self.onnx_gs.outputs] for name, tensor in self.onnx_gs.tensors().items(): if name in output_names and name not in graph_outputs: @@ -335,22 +340,18 @@ def graph_cleanup( @type connections_to_fix: List[Tuple[gs.Variable, gs.Variable]] """ - # Add new nodes for node in nodes_to_add: self.onnx_gs.nodes.append(node) - # Fix input connections for old_input, new_input in connections_to_fix: for node in self.onnx_gs.nodes: for idx, input in enumerate(node.inputs): if input == old_input: node.inputs[idx] = new_input - # Remove old nodes for node in nodes_to_remove: self.onnx_gs.nodes.remove(node) - # Cleanup the graph self.onnx_gs.cleanup( remove_unused_node_outputs=True, remove_unused_graph_inputs=True ).toposort() @@ -368,7 +369,6 @@ def substitute_node_by_type( @type target_node: str """ - # Allow only substitution from Sub to Add and Div to Mul if source_node not in ["Sub", "Div"] or target_node not in [ "Add", "Mul", @@ -377,7 +377,6 @@ def substitute_node_by_type( "Invalid source or target node type. Valid source types: Sub, Div. Valid target types: Add, Mul." ) - # Ensure the combination is valid if ( source_node == "Sub" and target_node == "Mul" @@ -388,69 +387,55 @@ def substitute_node_by_type( "Invalid substitution. Available substitutions: Sub -> Add, Div -> Mul" ) - # Extract constant tensors from the graph constant_map = self.get_constant_map(self.onnx_gs) def create_new_node( node: gs.Node, target_node: str, const_idx: int - ) -> Optional[List[gs.Node]]: + ) -> Optional[gs.Node]: if const_idx == 0: return None - new_nodes = [] + # new_nodes = [] first_input = node.inputs[0] second_input = node.inputs[const_idx] if target_node == "Add": new_cost_val = -second_input.values - new_nodes.append( - gs.Node( - op="Add", - inputs=[ - first_input, - gs.Constant( - name=f"{second_input.name}/Subtitute", - values=np.array( - new_cost_val, dtype=second_input.dtype - ), + return gs.Node( + op="Add", + inputs=[ + first_input, + gs.Constant( + name=f"{second_input.name}/Subtitute", + values=np.array( + new_cost_val, dtype=second_input.dtype ), - ], - outputs=[gs.Variable(name=f"{node.name}/Add_output")], - name=f"{node.name}/To_Add", - ) + ), + ], + outputs=[gs.Variable(name=f"{node.name}/Add_output")], + name=f"{node.name}/To_Add", ) elif target_node == "Mul": new_cost_val = 1.0 / second_input.values - if first_input.dtype in [np.float32, np.float64]: - new_dtype = first_input.dtype - else: - # If the first input is not a float, cast it to float32 due to the constant value being a float - new_dtype = np.float32 - cast_node = gs.Node( - op="Cast", - attrs={"to": new_dtype}, - inputs=[first_input], - outputs=[gs.Variable(name=f"{node.name}/Cast_output")], - name=f"{node.name}/Cast", - ) - new_nodes.append(cast_node) - first_input = cast_node.outputs[0] - - new_nodes.append( - gs.Node( - op="Mul", - inputs=[ - first_input, - gs.Constant( - name=f"{second_input.name}/Subtitute", - values=np.array(new_cost_val, dtype=new_dtype), + if second_input.dtype not in [ + np.float16, + np.float32, + np.float64, + ]: + return None + return gs.Node( + op="Mul", + inputs=[ + first_input, + gs.Constant( + name=f"{second_input.name}/Subtitute", + values=np.array( + new_cost_val, dtype=second_input.dtype ), - ], - outputs=[gs.Variable(name=f"{node.name}/Mul_output")], - name=f"{node.name}/To_Mul", - ) + ), + ], + outputs=[gs.Variable(name=f"{node.name}/Mul_output")], + name=f"{node.name}/To_Mul", ) - return new_nodes - nodes_to_add = [] nodes_to_remove = [] connections_to_fix = [] @@ -460,24 +445,15 @@ def create_new_node( constant = self.get_constant_value(node, constant_map) if constant is not None: _, const_idx = constant - new_nodes = create_new_node(node, target_node, const_idx) - if new_nodes is not None: - if len(new_nodes) > 1: - nodes_to_add.extend(new_nodes) - connections_to_fix.append( - ( - node.outputs[0], - new_nodes[1].outputs[0], - ) - ) - else: - nodes_to_add.append(new_nodes[0]) - connections_to_fix.append( - ( - node.outputs[0], - new_nodes[0].outputs[0], - ) + new_node = create_new_node(node, target_node, const_idx) + if new_node is not None: + nodes_to_add.append(new_node) + connections_to_fix.append( + ( + node.outputs[0], + new_node.outputs[0], ) + ) nodes_to_remove.append(node) self.graph_cleanup(nodes_to_add, nodes_to_remove, connections_to_fix) @@ -491,25 +467,18 @@ def fuse_add_mul_to_bn(self) -> None: The fusion patterns considered are: 1. Conv -> Add -> Mul - 2. Conv -> Sub -> Mul - 3. Conv -> Mul -> Add - 4. Conv -> Mul -> Sub - 5. Conv -> Mul - 6. Conv -> Add - 7. Conv -> Sub + 2. Conv -> Mul -> Add + 3. Conv -> Mul + 4. Conv -> Add """ FUSION_PATTERNS = [ ("Conv", "Add", "Mul"), - ("Conv", "Sub", "Mul"), ("Conv", "Mul", "Add"), - ("Conv", "Mul", "Sub"), ("Conv", "Mul"), ("Conv", "Add"), - ("Conv", "Sub"), ] - # Extract constant tensors from the graph constant_map = self.get_constant_map(self.onnx_gs) def create_batch_norm_node( @@ -517,10 +486,10 @@ def create_batch_norm_node( ) -> gs.Node: conv_channels = input_tensor.shape[1] scale_values = np.array( - [scale] * conv_channels, dtype=np.float32 + [scale] * conv_channels, dtype=self.dtype ).squeeze() bias_values = np.array( - [bias] * conv_channels, dtype=np.float32 + [bias] * conv_channels, dtype=self.dtype ).squeeze() mean_values = np.zeros_like(scale_values) var_values = np.ones_like(scale_values) @@ -560,7 +529,7 @@ def create_batch_norm_node( for node in self.onnx_gs.nodes: if node.op != pattern[0]: continue - # Attempt to match the rest of the pattern + sequence = [node] current_node = node for op_type in pattern[1:]: @@ -579,7 +548,6 @@ def create_batch_norm_node( if len(sequence) == len(pattern): all_sequences.append(sequence) - # Filter out sequences that are subsets of others longest_sequences = [] for seq in all_sequences: is_subset = any( @@ -595,7 +563,6 @@ def create_batch_norm_node( connections_to_fix = [] for sequence in longest_sequences: - # Validate if the pattern is feasible for fusion valid_fusion = True scale, bias = 1.0, 0.0 @@ -657,18 +624,14 @@ def fuse_single_add_mul_to_conv(self) -> None: nodes_to_remove = [] connections_to_fix = [] - # Extract constant tensors from the graph constant_map = self.get_constant_map(self.onnx_gs) - # Iterate over the nodes in the graph for node in self.onnx_gs.nodes: - # Look for Mul -> Conv pattern if node.op == "Mul": mul_node = node if len(mul_node.outputs[0].outputs) > 1: continue - # Check for Conv node conv_node = next( (n for n in mul_node.outputs[0].outputs if n.op == "Conv"), None, @@ -676,17 +639,14 @@ def fuse_single_add_mul_to_conv(self) -> None: if conv_node is None: continue - # Get the values of the Add and Mul nodes constant = self.get_constant_value(mul_node, constant_map) if constant is None: continue mul_value, _ = constant - # Get the weights and bias of the Conv node conv_weights = conv_node.inputs[1] - # Adjust the Conv weights by the Mul value on a per-channel basis on axis 1 new_weights = conv_weights.values * mul_value conv_node.inputs[1] = gs.Constant( @@ -694,20 +654,20 @@ def fuse_single_add_mul_to_conv(self) -> None: values=new_weights, ) - # Remove Mul nodes nodes_to_remove.append(mul_node) connections_to_fix.append( - (mul_node.outputs[0], mul_node.inputs[0]) + ( + mul_node.outputs[0], + mul_node.inputs[0], + ) ) - # Look for Add -> Conv pattern if node.op == "Add": add_node = node if len(add_node.outputs[0].outputs) > 1: continue - # Check for Conv node conv_node = next( (n for n in add_node.outputs[0].outputs if n.op == "Conv"), None, @@ -726,14 +686,12 @@ def fuse_single_add_mul_to_conv(self) -> None: ): continue - # Get the values of the Add and Mul nodes constant = self.get_constant_value(add_node, constant_map) if constant is None: continue add_value, _ = constant - # Get the weights and bias of the Conv node conv_weights = conv_node.inputs[1] conv_bias = ( conv_node.inputs[2] if len(conv_node.inputs) > 2 else None @@ -769,11 +727,13 @@ def fuse_single_add_mul_to_conv(self) -> None: ) ) - # Remove Add nodes nodes_to_remove.append(add_node) connections_to_fix.append( - (add_node.outputs[0], add_node.inputs[0]) + ( + add_node.outputs[0], + add_node.inputs[0], + ) ) self.graph_cleanup([], nodes_to_remove, connections_to_fix) @@ -793,16 +753,12 @@ def fuse_comb_add_mul_to_conv(self) -> None: nodes_to_remove = [] connections_to_fix = [] - # Extract constant tensors from the graph constant_map = self.get_constant_map(self.onnx_gs) - # Iterate over the nodes in the graph for node in self.onnx_gs.nodes: - # Look for Mul -> Add -> Conv pattern if node.op == "Mul": mul_node = node - # Check for Add node add_node = next( (n for n in mul_node.outputs[0].outputs if n.op == "Add"), None, @@ -828,7 +784,6 @@ def fuse_comb_add_mul_to_conv(self) -> None: ): continue - # Get the values of the Add and Mul nodes constant = self.get_constant_value(mul_node, constant_map) if constant is None: continue @@ -839,13 +794,11 @@ def fuse_comb_add_mul_to_conv(self) -> None: continue add_value, _ = constant - # Get the weights and bias of the Conv node conv_weights = conv_node.inputs[1] conv_bias = ( conv_node.inputs[2] if len(conv_node.inputs) > 2 else None ) - # Adjust the Conv weights by the Mul value on a per-channel basis on axis 1 new_weights = conv_weights.values * mul_value conv_node.inputs[1] = gs.Constant( @@ -895,11 +848,9 @@ def fuse_comb_add_mul_to_conv(self) -> None: ) ) - # Look for Add -> Mul -> Conv pattern if node.op == "Add": add_node = node - # Check for Mul node mul_node = next( (n for n in add_node.outputs[0].outputs if n.op == "Mul"), None, @@ -925,7 +876,6 @@ def fuse_comb_add_mul_to_conv(self) -> None: ): continue - # Get the values of the Add and Mul nodes constant = self.get_constant_value(add_node, constant_map) if constant is None: continue @@ -938,7 +888,6 @@ def fuse_comb_add_mul_to_conv(self) -> None: add_value *= mul_value - # Get the weights and bias of the Conv node conv_weights = conv_node.inputs[1] conv_bias = ( conv_node.inputs[2] if len(conv_node.inputs) > 2 else None @@ -1008,16 +957,13 @@ def fuse_split_concat_to_conv(self) -> None: nodes_to_remove = [] connections_to_fix = [] - # Iterate over the nodes in the graph for node in self.onnx_gs.nodes: if node.op == "Conv": break - # Look for Split -> Concat -> ... -> Conv pattern if node.op == "Split": split_node = node - # Check for Add node concat_node = next( ( n @@ -1051,6 +997,8 @@ def fuse_split_concat_to_conv(self) -> None: ) channels_axis = split_node.attrs["axis"] + if conv_weights.shape[channels_axis] not in [1, 3]: + break for inter_node in intermediate_nodes[:-1]: constant = self.get_constant_value( @@ -1066,16 +1014,14 @@ def fuse_split_concat_to_conv(self) -> None: constant_value.shape[channels_axis] != conv_weights.values.shape[1] ): - raise ValueError( - f"Spatial dimensions mismatch between Conv and intermediate node {inter_node.name}: {constant_value.shape[channels_axis]} != {conv_weights.values.shape[1]}" + logger.warning( + f"Spatial dimensions mismatch between Conv and intermediate node {inter_node.name}: {constant_value.shape[channels_axis]} != {conv_weights.values.shape[1]}, discarding this step." ) - # Reverse the order of the constant tensor channels inter_node.inputs[constant_idx].values = np.flip( constant_value, axis=channels_axis ) - # Reverse the order of the channels of the conv node at the axis conv_weights.values = np.flip( conv_weights.values, axis=channels_axis ) @@ -1097,30 +1043,83 @@ def fuse_split_concat_to_conv(self) -> None: self.optimize_onnx() - def modify_onnx(self) -> None: + def modify_onnx(self) -> bool: """Modify the ONNX model by applying a series of optimizations. @param passes: List of optimization passes to apply to the ONNX model @type passes: Optional[List[str]] """ + if self.has_dynamic_shape: + logger.warning( + "Identified dynamic input shape, skipping model modifications..." + ) + return False - logger.debug("Substituting Div -> Mul nodes...") - self.substitute_node_by_type(source_node="Div", target_node="Mul") - logger.debug("Substituting Sub -> Add nodes...") - self.substitute_node_by_type(source_node="Sub", target_node="Add") - logger.debug( - "Fusing Add and Mul nodes to BatchNormalization nodes and then into Conv nodes..." - ) - self.fuse_add_mul_to_bn() - logger.debug("Fusing Add and Mul nodes to Conv nodes...") - self.fuse_comb_add_mul_to_conv() - self.fuse_single_add_mul_to_conv() - logger.debug("Fusing Split and Concat nodes to Conv nodes...") - self.fuse_split_concat_to_conv() + half = self.dtype == np.float16 + try: + logger.debug("Substituting Div -> Mul nodes...") + self.substitute_node_by_type(source_node="Div", target_node="Mul") + if not self.compare_outputs(from_modelproto=True, half=half): + logger.warning( + "Failed to substitute Div -> Mul nodes, reverting changes..." + ) + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs - self.export_onnx() + logger.debug("Substituting Sub -> Add nodes...") + self.substitute_node_by_type(source_node="Sub", target_node="Add") + if not self.compare_outputs(from_modelproto=True, half=half): + logger.warning( + "Failed to substitute Sub -> Add nodes, reverting changes..." + ) + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs - def compare_outputs(self, half: bool = False) -> bool: + logger.debug( + "Fusing Add and Mul nodes to BatchNormalization nodes and then into Conv nodes..." + ) + self.fuse_add_mul_to_bn() + if not self.compare_outputs(from_modelproto=True, half=half): + logger.warning( + "Failed to fuse Add and Mul nodes to BatchNormalization nodes, reverting changes..." + ) + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs + + logger.debug("Fusing Add and Mul nodes to Conv nodes...") + self.fuse_comb_add_mul_to_conv() + if not self.compare_outputs(from_modelproto=True, half=half): + logger.warning( + "Failed to fuse Add and Mul nodes (combined) to Conv nodes, reverting changes..." + ) + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs + self.fuse_single_add_mul_to_conv() + if not self.compare_outputs(from_modelproto=True, half=half): + logger.warning( + "Failed to fuse Add and Mul nodes (single) to Conv nodes, reverting changes..." + ) + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs + + logger.debug("Fusing Split and Concat nodes to Conv nodes...") + self.fuse_split_concat_to_conv() + if not self.compare_outputs(from_modelproto=True, half=half): + logger.warning( + "Failed to fuse Split and Concat nodes to Conv nodes, reverting changes..." + ) + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs + + self.export_onnx() + except Exception as e: + logger.error(f"Failed to modify the ONNX model: {e}") + return False + return True + + def compare_outputs( + self, from_modelproto: bool = False, half: bool = False + ) -> bool: """Compare the outputs of two ONNX models. @param half: Flag to use half precision for the input tensors @@ -1129,8 +1128,17 @@ def compare_outputs(self, half: bool = False) -> bool: import onnxruntime as ort - ort_session_1 = ort.InferenceSession(self.model_path.as_posix()) - ort_session_2 = ort.InferenceSession(self.output_path.as_posix()) + ort.set_default_logger_severity(3) + + if from_modelproto: + onnx_model_1 = self.prev_onnx_model.SerializeToString() + onnx_model_2 = self.onnx_model.SerializeToString() + else: + onnx_model_1 = self.model_path.as_posix() + onnx_model_2 = self.output_path.as_posix() + + ort_session_1 = ort.InferenceSession(onnx_model_1) + ort_session_2 = ort.InferenceSession(onnx_model_2) inputs = dict() for input in ort_session_1.get_inputs(): @@ -1140,10 +1148,8 @@ def compare_outputs(self, half: bool = False) -> bool: else np.random.rand(*input.shape).astype(np.float16) ) - # Run inference on the first model outputs_1 = ort_session_1.run(None, inputs) - # Run inference on the second model outputs_2 = ort_session_2.run(None, inputs) equal_outputs = True diff --git a/requirements-dev.txt b/requirements-dev.txt index d018341..c66bc48 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,3 @@ pre-commit==3.2.1 pytest-cov>=4.1.0 -docker-squash>=1.1.0 -onnx_graphsurgeon -onnxoptimizer +docker-squash>=1.1.0 \ No newline at end of file