Skip to content

Commit

Permalink
Integrate onnx 1.17.0 (#21897)
Browse files Browse the repository at this point in the history
### Description
<!-- Describe your changes. -->
for ORT 1.21.0 release

Create following related issues to track skipped tests due to updated
ONNX operators in the ONNX 1.17.0 release:
#23162
#23164
#23163
#23161

### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->

---------

Signed-off-by: Liqun Fu <[email protected]>
Signed-off-by: Liqun Fu <[email protected]>
Co-authored-by: Guenther Schmuelling <[email protected]>
Co-authored-by: Yifan Li <[email protected]>
Co-authored-by: yf711 <[email protected]>
  • Loading branch information
4 people authored Dec 24, 2024
1 parent 81cd6ea commit a9a881c
Show file tree
Hide file tree
Showing 20 changed files with 95 additions and 982 deletions.
2 changes: 1 addition & 1 deletion cgmanifests/generated/cgmanifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
"component": {
"type": "git",
"git": {
"commitHash": "595228d99e3977ac27cb79d5963adda262af99ad",
"commitHash": "b8baa8446686496da4cc8fda09f2b6fe65c2a02c",
"repositoryUrl": "https://github.com/onnx/onnx.git"
},
"comments": "git submodule at cmake/external/onnx"
Expand Down
2 changes: 1 addition & 1 deletion cmake/deps.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ microsoft_gsl;https://github.com/microsoft/GSL/archive/refs/tags/v4.0.0.zip;cf36
microsoft_wil;https://github.com/microsoft/wil/archive/refs/tags/v1.0.230629.1.zip;e4a542a323c070376f7c2d1973d0f7ddbc1d2fa5
mimalloc;https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.1.zip;d5ee7d34223d0567892db5179849939c8769dc41
mp11;https://github.com/boostorg/mp11/archive/refs/tags/boost-1.82.0.zip;9bc9e01dffb64d9e0773b2e44d2f22c51aace063
onnx;https://github.com/onnx/onnx/archive/refs/tags/v1.16.1.zip;2eb9198bb352757d5ff13977cbe0634898e0837c
onnx;https://github.com/onnx/onnx/archive/refs/tags/v1.17.0.zip;13a60ac5217c104139ce0fd024f48628e7bcf5bc
# Use the latest commit of 10.7-GA
onnx_tensorrt;https://github.com/onnx/onnx-tensorrt/archive/9c69a24bc2e20c8a511a4e6b06fd49639ec5300a.zip;ff1fe9af78eb129b4a4cdcb7450b7390b4436dd3
protobuf;https://github.com/protocolbuffers/protobuf/archive/refs/tags/v21.12.zip;7cf2733949036c7d52fda017badcab093fe73bfa
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/onnx
Submodule onnx updated 908 files
941 changes: 0 additions & 941 deletions cmake/patches/onnx/onnx.patch

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions js/web/docs/webgl-operators.md

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ class GraphRef {
} // namespace api

constexpr int64_t kMinSupportedOpset = 7;
constexpr int64_t kMaxSupportedOpset = 21;
constexpr int64_t kMaxSupportedOpset = 22;

// enum of results that a CostCheckFn can return.
enum class CostCheckResult {
Expand Down
8 changes: 8 additions & 0 deletions onnxruntime/core/providers/cpu/reduction/reduction_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,14 @@ class ReduceAggregatorMax : public ReduceAggregator<T> {
}
inline void update(const T& v) { this->accumulator_ = v > this->accumulator_ ? v : this->accumulator_; }

static void fill_for_empty_set(Tensor& output) {
if constexpr (std::is_same_v<bool, T>) { /* bool specific impl */
ORT_NOT_IMPLEMENTED();
} else {
EigenMap<T>(output).array() = -std::numeric_limits<T>::infinity();
}
}

// Fast reduction
static inline FastReduceKind WhichFastReduce() {
return FastReduceKind::kKR | FastReduceKind::kRK | FastReduceKind::kKRK | FastReduceKind::kRKR;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
optimum>=1.14.1
transformers>=4.33.2,<= 4.38.0
torch>=2.2.0
onnx==1.16.1
onnx==1.17.0
datasets>=2.8.0
protobuf==3.20.2
psutil
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
onnx==1.16.1
onnx==1.17.0
transformers>=4.36.2
onnxscript>=0.1.0.dev20240126
8 changes: 4 additions & 4 deletions onnxruntime/test/contrib_ops/fused_matmul_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -222,10 +222,10 @@ TEST(FusedMatMulOpTest, FloatTypeNoTranspose) {
}

#if defined(USE_CUDA) || defined(USE_ROCM) // double support only implemented in CUDA/ROCM kernel

TEST(FusedMatMulOpTest, DoubleTypeNoTranspose) {
RunFusedMatMulTest<double>("FusedMatMul", 1);
}
// CUDAExecutionProvider cannot be used with this model due to its ONNX opset not being supported by the layout transformer.
// TEST(FusedMatMulOpTest, DoubleTypeNoTranspose) {
// RunFusedMatMulTest<double>("FusedMatMul", 1);
// }
#endif

TEST(FusedMatMulOpTest, FloatTypeTransposeA) {
Expand Down
4 changes: 4 additions & 0 deletions onnxruntime/test/onnx/TestCase.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1396,6 +1396,10 @@ std::unique_ptr<std::set<BrokenTest>> GetBrokenTests(const std::string& provider
broken_tests->insert({"resize_upsample_sizes_nearest", "result differs"});
broken_tests->insert({"resize_upsample_sizes_nearest_axes_2_3", "result differs"});
broken_tests->insert({"resize_upsample_sizes_nearest_axes_3_2", "result differs"});
broken_tests->insert({"convtranspose_group_2", "group attribute (new of opset(22)) not supported"});
broken_tests->insert({"convtranspose_group_2_image_3", "group attribute (new of opset(22)) not supported"});
broken_tests->insert({"resize_upsample_sizes_nearest_not_larger",
"output=Y:expected 1 (3f800000), got 4 (40800000), diff: 3, tol=0.002 idx=24. 13 of 49 differ. CPU test passed."});
}

#ifdef DISABLE_CONTRIB_OPS
Expand Down
10 changes: 5 additions & 5 deletions onnxruntime/test/providers/xnnpack/xnnpack_basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ TEST(XnnpackEP, DISABLED_TestQDQAveragePool) { // [ONNXRuntimeError] : 9 : NOT
});
}

TEST(XnnpackEP, TestMaxPool) {
TEST(XnnpackEP, DISABLED_TestMaxPool) { // NOT_IMPLEMENTED : Could not find an implementation for MaxPool(22) node with name 'node'
const std::vector<int64_t> input_shape = {1, 2, 13, 13};
auto modelBuilder = [&input_shape](ModelTestBuilder& builder) {
auto* input_arg = builder.MakeInput<float>(input_shape, -1.f, 1.f);
Expand Down Expand Up @@ -360,7 +360,7 @@ TEST(XnnpackEP, TestQDQSoftMax_axisZero_v13) {
{ExpectedEPNodeAssignment::None});
}

TEST(XnnpackEP, TestSoftMax_axisLast) {
TEST(XnnpackEP, TestSoftMax_axisLast) { // error: Expected equality of these values
const std::vector<int64_t> input_shape = {1, 2, 3, 5};
int64_t axis = input_shape.size() - 1;
auto modelCreater = [input_shape, axis](ModelTestBuilder& builder) {
Expand All @@ -379,7 +379,7 @@ TEST(XnnpackEP, TestSoftMax_axisLast) {
{ExpectedEPNodeAssignment::All});
}

TEST(XnnpackEP, TestQDQSoftMax_axisLast) {
TEST(XnnpackEP, TestQDQSoftMax_axisLast) { // error: Expected equality of these values
RunModelTest(BuildQDQSoftMaxTestCase<uint8_t, uint8_t>(
{1, 2, 3, 5} /* input_shape */,
static_cast<int64_t>(3) /* axis */,
Expand All @@ -395,7 +395,7 @@ TEST(XnnpackEP, TestConvTranspose) {
RunModelTestWithPath(ort_model_path, "test_conv_follow_convtrans", nullptr);
}

TEST(XnnpackEP, TestConvTranspose_With_Outputpadding) {
TEST(XnnpackEP, DISABLED_TestConvTranspose_With_Outputpadding) { // NOT_IMPLEMENTED : Could not find an implementation for ConvTranspose(22) node with name 'node'
const std::vector<int64_t> input_shape = {1, 4, 15, 15};
auto modelBuilder = [&input_shape](ModelTestBuilder& builder) {
auto* input_arg = builder.MakeInput<float>(input_shape, -127.f, 127.f);
Expand All @@ -415,7 +415,7 @@ TEST(XnnpackEP, TestConvTranspose_With_Outputpadding) {
});
}

TEST(XnnpackEP, TestConvTranspose_With_OutputShape) {
TEST(XnnpackEP, DISABLED_TestConvTranspose_With_OutputShape) { // NOT_IMPLEMENTED : Could not find an implementation for ConvTranspose(22) node with name 'node'
const std::vector<int64_t> input_shape = {1, 4, 15, 15};
auto modelBuilder = [&input_shape](ModelTestBuilder& builder) {
auto* input_arg = builder.MakeInput<float>(input_shape, -127.f, 127.f);
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/python/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
onnx==1.16.1
onnx==1.17.0
pytest
4 changes: 2 additions & 2 deletions onnxruntime/test/shared_lib/test_inference.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1222,7 +1222,7 @@ TEST(CApiTest, invalid_variadic_input_min_arity_custom_op) {
Ort::Session session(*ort_env, VARIADIC_INPUT_OUTPUT_CUSTOM_OP_MODEL_URI, session_options);
FAIL();
} catch (const Ort::Exception& excpt) {
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node (VariadicNode0) has input size 3 not in range [min=4"));
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node(VariadicNode0) with schema(test::VariadicNode:1) has input size 3 not in range [min=4,"));
}
}

Expand Down Expand Up @@ -1252,7 +1252,7 @@ TEST(CApiTest, invalid_variadic_output_min_arity_custom_op) {
Ort::Session session(*ort_env, VARIADIC_INPUT_OUTPUT_CUSTOM_OP_MODEL_URI, session_options);
FAIL();
} catch (const Ort::Exception& excpt) {
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node (VariadicNode0) has output size 3 not in range [min=4"));
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node(VariadicNode0) with schema(test::VariadicNode:1) has output size 3 not in range [min=4"));
}
}

Expand Down
46 changes: 44 additions & 2 deletions onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,46 @@
"^test_dequantizelinear_int4",
"^test_dequantizelinear_uint4",
"^test_quantizelinear_int4",
"^test_quantizelinear_uint4"
"^test_quantizelinear_uint4",
// onnx 1.17.0 op tests: skip until implemented in ORT
"^test_acos*", // Could not find an implementation for Acos(22)
"^test_acosh*", // Could not find an implementation for Acosh(22)
"^test_asin*", // Could not find an implementation for Asin(22)
"^test_asinh*", // Could not find an implementation for Asinh(22)
"^test_atan*", // Could not find an implementation for Atan(22)
"^test_atanh*", // Could not find an implementation for Atanh(22)
"^test_basic_conv_with_padding*", // Could not find an implementation for Conv(22)
"^test_basic_conv_without_padding*", // Could not find an implementation for Conv(22)
"^test_conv*", // Could not find an implementation for Conv(22)
"^test_convtranspose*", // Could not find an implementation for ConvTranspose(22)
"^test_cos*", // Could not find an implementation for Cos(22)
"^test_cosh*", // Could not find an implementation for Cosh(22)
"^test_det*", // Could not find an implementation for Det(22)
"^test_dropout*", // Could not find an implementation for Dropout(22)
"^test_elu*", // Could not find an implementation for Elu(22)
"^test_eyelike*", // Could not find an implementation for EyeLike(22)
"^test_globalaveragepool*", // Could not find an implementation for GlobalAveragePool(22)
"^test_globalmaxpool*", // Could not find an implementation for GlobalMaxPool(22)
"^test_gridsample*", // Could not find an implementation for GridSample(22)
"^test_gru*", // Could not find an implementation for GRU(22)
"^test_hardsigmoid*", // Could not find an implementation for HardSigmoid(22)
"^test_hardswish*", // Could not find an implementation for HardSigmoid(22)
"^test_instancenorm*", // Could not find an implementation for InstanceNormalization(22)
"^test_lppool*", // Could not find an implementation for LpPool(22)
"^test_lstm*", // Could not find an implementation for LSTM(22)
"^test_maxpool*", // Could not find an implementation for MaxPool(22)
"^test_maxunpool*", // Could not find an implementation for MaxUnpool(22)
"^test_mish*", // Could not find an implementation for Softplus(22)
"^test_rnn*", // Could not find an implementation for RNN(22)
"^test_round*", // Could not find an implementation for Round(22)
"^test_selu*", // Could not find an implementation for Selu(22)
"^test_simple_rnn*", // Could not find an implementation for RNN(22)
"^test_sin*", // Could not find an implementation for Sin(22)
"^test_sinh*", // Could not find an implementation for Sinh(22)
"^test_softplus*", // Could not find an implementation for Softplus(22)
"^test_softsign*", // Could not find an implementation for Softsign(22)
"^test_tan*", // Could not find an implementation for Tan(22)
"^test_thresholdedrelu*" // Could not find an implementation for ThresholdedRelu(22)
],
"current_failing_tests_x86": [
"^test_vgg19",
Expand Down Expand Up @@ -426,6 +465,7 @@
"^test_gelu_tanh_2_expanded_cpu",
"^test_reduce_max_bool_inputs",
"^test_reduce_min_bool_inputs",
"^test_reduce_max_empty_set", // DNNL result in "(shapes (2, 1, 4), (1, 0, 1) mismatch)". this is the same for test_reduce_min_empty_set which is already in the list
"^test_reduce_min_empty_set",
"^test_reduce_l1_empty_set",
"^test_reduce_l1_empty_set_expanded",
Expand Down Expand Up @@ -752,7 +792,9 @@
"^test_reduce_prod_empty_set_cpu",
//Bug: DML EP does not execute operators with an empty input tensor
//TODO: Resolve as a graph implementation that returns a constant inf tensor with appropriate strides
"^test_reduce_min_empty_set_cpu"
"^test_reduce_max_empty_set_cpu", // DNNL result in "(shapes (2, 1, 4), (1, 0, 1) mismatch)". this is the same for test_reduce_min_empty_set which is already in the list
"^test_reduce_min_empty_set_cpu",
"^test_resize_upsample_sizes_nearest_not_smaller_cpu"
],
// ORT first supported opset 7, so models with nodes that require versions prior to opset 7 are not supported
"tests_with_pre_opset7_dependencies": [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ steps:
packageType: upack
feed: '/7424c8e4-5c62-490e-95c4-79446f31017c'
definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0'
version: 1.0.202
version: 1.0.203
downloadPath: $(Build.BinariesDirectory)/deps

# The private ADO project
Expand All @@ -22,7 +22,7 @@ steps:
packageType: upack
feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325'
definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a'
version: 1.0.202
version: 1.0.203
downloadPath: $(Build.BinariesDirectory)/deps

# You can add more ADO accounts at here.
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ beartype==0.15.0
flatbuffers
cerberus
h5py
onnx==1.16.1
onnx==1.17.0
# Python dependencies required for pytorch development
astunparse
expecttest!=0.2.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ mypy
pytest
setuptools>=68.2.2
wheel
onnx==1.16.1
onnx==1.17.0
protobuf==4.21.12
sympy==1.12 ; python_version < '3.9'
sympy==1.13 ; python_version >= '3.9'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ mypy
pytest
setuptools==69.0.3
wheel==0.42.0
onnx==1.16.1
onnx==1.17.0
argparse
sympy==1.12
flatbuffers
Expand Down
2 changes: 1 addition & 1 deletion tools/ci_build/github/linux/python/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ mypy
pytest
setuptools>=68.2.2
wheel
onnx==1.16.1
onnx==1.17.0
protobuf==4.21.12
sympy==1.12
flatbuffers
Expand Down

0 comments on commit a9a881c

Please sign in to comment.