Skip to content

Commit

Permalink
Fix set-batch-size test
Browse files Browse the repository at this point in the history
  • Loading branch information
lgeiger committed Apr 5, 2022
1 parent 528c819 commit 391eae5
Showing 1 changed file with 7 additions and 4 deletions.
11 changes: 7 additions & 4 deletions larq_compute_engine/mlir/tests/set_batch_size.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,11 @@ func @simple(%arg0: tensor<?x6xf32>, %arg1: tensor<2x6xf32>) -> (tensor<?x6xf32>
// return tf.keras.Model([img1, img2], x)
// Both inputs have a dynamic batch size

"tf_saved_model.global_tensor"() { sym_name = "dense/kernel", type = tensor<4x6xf32>, value = dense<42.0> : tensor<4x6xf32> } : () -> ()
"tf_saved_model.global_tensor"() { sym_name = "dense/bias", type = tensor<6xf32>, value = dense<43.0> : tensor<6xf32> } : () -> ()

// CHECK-LABEL: @dual_input_model
func @dual_input_model(%arg0: tensor<?x6xf32> {tf_saved_model.index_path = ["input_2"]}, %arg1: tensor<?x4xf32> {tf_saved_model.index_path = ["input_1"]}, %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
func @dual_input_model(%arg0: tensor<?x6xf32> {tf_saved_model.index_path = ["input_2"]}, %arg1: tensor<?x4xf32> {tf_saved_model.index_path = ["input_1"]}, %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}} {
%0 = "tf.ReadVariableOp"(%arg2) {device = ""} : (tensor<!tf_type.resource<tensor<6xf32>>>) -> tensor<6xf32>
%1 = "tf.ReadVariableOp"(%arg3) {device = ""} : (tensor<!tf_type.resource<tensor<4x6xf32>>>) -> tensor<4x6xf32>
%2 = "tf.MatMul"(%arg1, %1) {device = "", transpose_a = false, transpose_b = false} : (tensor<?x4xf32>, tensor<4x6xf32>) -> tensor<?x6xf32>
Expand All @@ -30,13 +33,13 @@ func @dual_input_model(%arg0: tensor<?x6xf32> {tf_saved_model.index_path = ["inp
// CHECK: %arg0: tensor<1x6xf32> {tf_saved_model.index_path = ["input_2"]}
// CHECK: %arg1: tensor<1x4xf32> {tf_saved_model.index_path = ["input_1"]}
// The resource objects and attributes should be unchanged
// CHECK: %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
// CHECK: %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}} {
}

// This is the same model, but one of the two inputs has been given a fixed batch size in Python

// CHECK-LABEL: @dual_input_one_fixed_size
func @dual_input_one_fixed_size(%arg0: tensor<?x6xf32> {tf_saved_model.index_path = ["input_2"]}, %arg1: tensor<1x4xf32> {tf_saved_model.index_path = ["input_1"]}, %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
func @dual_input_one_fixed_size(%arg0: tensor<?x6xf32> {tf_saved_model.index_path = ["input_2"]}, %arg1: tensor<1x4xf32> {tf_saved_model.index_path = ["input_1"]}, %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}} {
%0 = "tf.ReadVariableOp"(%arg2) {device = ""} : (tensor<!tf_type.resource<tensor<6xf32>>>) -> tensor<6xf32>
%1 = "tf.ReadVariableOp"(%arg3) {device = ""} : (tensor<!tf_type.resource<tensor<4x6xf32>>>) -> tensor<4x6xf32>
%2 = "tf.MatMul"(%arg1, %1) {device = "", transpose_a = false, transpose_b = false} : (tensor<1x4xf32>, tensor<4x6xf32>) -> tensor<1x6xf32>
Expand All @@ -47,5 +50,5 @@ func @dual_input_one_fixed_size(%arg0: tensor<?x6xf32> {tf_saved_model.index_pat
return %6 : tensor<?x6xf32>
// CHECK: %arg0: tensor<1x6xf32> {tf_saved_model.index_path = ["input_2"]}
// CHECK: %arg1: tensor<1x4xf32> {tf_saved_model.index_path = ["input_1"]}
// CHECK: %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
// CHECK: %arg2: tensor<!tf_type.resource<tensor<6xf32>>> {tf_saved_model.bound_input = @"dense/bias"}, %arg3: tensor<!tf_type.resource<tensor<4x6xf32>>> {tf_saved_model.bound_input = @"dense/kernel"}) -> (tensor<?x6xf32> {tf_saved_model.index_path = ["tf.__operators__.add"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_2:0,serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}} {
}

0 comments on commit 391eae5

Please sign in to comment.