Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Run tosa_reference_model using python binding" #6729

Merged
merged 1 commit into from
Nov 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions backends/arm/arm_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

import logging
import os
from typing import cast, final, List, Optional
from typing import final, List, Optional

import serializer.tosa_serializer as ts
from executorch.backends.arm.arm_vela import vela_compile
Expand All @@ -31,7 +31,6 @@
from executorch.exir.backend.backend_details import BackendDetails, PreprocessResult
from executorch.exir.backend.compile_spec_schema import CompileSpec
from torch.export.exported_program import ExportedProgram
from torch.fx import Node

# TOSA backend debug functionality
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -226,7 +225,6 @@ def preprocess( # noqa: C901
node_visitors = get_node_visitors(edge_program)

for node in graph_module.graph.nodes:
node = cast(Node, node)
if node.op == "call_function":
process_call_function(node, tosa_graph, node_visitors)
elif node.op == "placeholder":
Expand All @@ -238,6 +236,9 @@ def preprocess( # noqa: C901
# any checking of compatibility.
dbg_fail(node, tosa_graph, artifact_path)

# TODO: It would be awesome if this dump could somehow be done on top level and not here.
# Problem is that the desc.json has to be created on the tosa_graph object, which we can't
# access from top level.
if artifact_path:
tag = _get_first_delegation_tag(graph_module)
dbg_tosa_dump(
Expand All @@ -258,4 +259,6 @@ def preprocess( # noqa: C901
else:
raise RuntimeError(f"Unknown format {output_format}")

# Continueing from above. Can I put tosa_graph into this function?
# debug_handle_map = ...
return PreprocessResult(processed_bytes=binary)
12 changes: 8 additions & 4 deletions backends/arm/test/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,15 +192,19 @@ def get_tosa_compile_spec_unbuilt(
the compile spec before calling .build() to finalize it.
"""
if not custom_path:
custom_path = maybe_get_tosa_collate_path()
intermediate_path = maybe_get_tosa_collate_path() or tempfile.mkdtemp(
prefix="arm_tosa_"
)
else:
intermediate_path = custom_path

if custom_path is not None and not os.path.exists(custom_path):
os.makedirs(custom_path, exist_ok=True)
if not os.path.exists(intermediate_path):
os.makedirs(intermediate_path, exist_ok=True)
compile_spec_builder = (
ArmCompileSpecBuilder()
.tosa_compile_spec()
.set_permute_memory_format(permute_memory_to_nhwc)
.dump_intermediate_artifacts_to(custom_path)
.dump_intermediate_artifacts_to(intermediate_path)
)

return compile_spec_builder
Expand Down
5 changes: 1 addition & 4 deletions backends/arm/test/misc/test_debug_feats.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,7 @@ def test_numerical_diff_prints(self):
ArmTester(
model,
example_inputs=model.get_inputs(),
compile_spec=common.get_tosa_compile_spec(
permute_memory_to_nhwc=True,
custom_path=tempfile.mkdtemp("diff_print_test"),
),
compile_spec=common.get_tosa_compile_spec(permute_memory_to_nhwc=False),
)
.export()
.to_edge()
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/test/ops/test_cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_cat_tosa_MI(self, operands: tuple[torch.Tensor, ...], dim: int):
def test_cat_4d_tosa_MI(self):
square = torch.ones((2, 2, 2, 2))
for dim in range(-3, 3):
test_data = ((square, square.clone()), dim)
test_data = ((square, square), dim)
self._test_cat_tosa_MI_pipeline(self.Cat(), test_data)

@parameterized.expand(Cat.test_parameters)
Expand Down
4 changes: 2 additions & 2 deletions backends/arm/test/ops/test_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ def _test_select_tosa_BI_pipeline(
.check(["torch.ops.quantized_decomposed"])
.to_edge()
.partition()
.dump_artifact()
.dump_operator_distribution()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.run_method_and_compare_outputs(inputs=test_data)
Expand Down Expand Up @@ -160,14 +162,12 @@ def test_select_int_tosa_MI(self, test_data: test_data_t):
)

@parameterized.expand(test_data_suite)
@unittest.skip
def test_select_copy_tosa_BI(self, test_data: test_data_t):
self._test_select_tosa_BI_pipeline(
self.SelectCopy(), test_data, export_target="torch.ops.aten.select_copy.int"
)

@parameterized.expand(test_data_suite)
@unittest.skip
def test_select_int_tosa_BI(self, test_data: test_data_t):
self._test_select_tosa_BI_pipeline(
self.SelectInt(), test_data, export_target="torch.ops.aten.select.int"
Expand Down
81 changes: 14 additions & 67 deletions backends/arm/test/runner_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,11 @@
import numpy as np
import torch

import tosa_reference_model

from torch.export import ExportedProgram
from torch.fx.node import Node
from tosa import TosaGraph

logger = logging.getLogger(__name__)
logger.setLevel(logging.CRITICAL)
logger.setLevel(logging.WARNING)


class QuantizationParams:
Expand Down Expand Up @@ -170,7 +167,7 @@ def __init__(
):
self.intermediate_path = intermediate_path
self.tosa_ref_model_path = tosa_ref_model_path or "tosa_reference_model"
assert self.intermediate_path is None or os.path.exists(
assert os.path.exists(
self.intermediate_path
), f"TOSA artifact path don't exist! Path: {self.intermediate_path}"

Expand Down Expand Up @@ -326,46 +323,7 @@ def run_corstone(
tosa_ref_output = np.fromfile(out_path_with_suffix, dtype=np.float32)
output_shape = self.output_node.args[0][0].meta["val"].shape
tosa_ref_output = torch.from_numpy(tosa_ref_output).reshape(output_shape)
return tosa_ref_output

def run_tosa_graph(
self, graph: TosaGraph, inputs: list[np.ndarray] | list[torch.Tensor]
) -> torch.Tensor:
"""Runs the TOSA reference model with inputs and returns the result."""
data_np = [
prep_data_for_save(
input, self.is_quantized, self.input_names[i], self.qp_input[i]
)
for i, input in enumerate(inputs)
]
# tosa_profile: 0 = Base Inference, 1 = Main Inference, 2 = Main Training.
tosa_profile = 0 if self.is_quantized else 1
debug_mode = "ALL" if logger.level <= logging.DEBUG else None
outputs, status = tosa_reference_model.run(
graph,
data_np,
verbosity=_tosa_refmodel_loglevel(logger.level),
tosa_profile=tosa_profile,
initialize_variable_tensor_from_numpy=1, # True
debug_mode=debug_mode,
)

assert (
status == tosa_reference_model.GraphStatus.TOSA_VALID
), "Non-valid TOSA given to reference model."

outputs_torch = []
for output in outputs:
output = output.astype(np.float32)
if self.is_quantized:
# Need to dequant back to FP32 for comparison with torch output
quant_param = self.qp_output
assert (
quant_param is not None
), "There are no quantization parameters, check output parameters"
output = (output - quant_param.zp) * quant_param.scale
outputs_torch.append(torch.from_numpy(output))
return tuple(outputs_torch)
return [tosa_ref_output]

def run_tosa_ref_model(
self,
Expand Down Expand Up @@ -450,13 +408,21 @@ def run_tosa_ref_model(
assert (
shutil.which(self.tosa_ref_model_path) is not None
), f"tosa_reference_model tool not found, did you run examples/arm/setup.sh? Path: {self.tosa_ref_model_path}"

loglevel_map = {
logging.INFO: "INFO",
logging.CRITICAL: "LOW",
logging.ERROR: "LOW",
logging.WARNING: "MED",
logging.DEBUG: "HIGH",
logging.NOTSET: "MED",
}
clamped_logging_level = max(min(logger.level // 10 * 10, 50), 0)
cmd_ref_model = [
self.tosa_ref_model_path,
"--test_desc",
desc_file_path,
"-l",
_tosa_refmodel_loglevel(logger.level),
loglevel_map[clamped_logging_level],
]
_run_cmd(cmd_ref_model)

Expand Down Expand Up @@ -492,10 +458,7 @@ def run_tosa_ref_model(


def prep_data_for_save(
data: torch.Tensor,
is_quantized: bool,
input_name: str,
quant_param: QuantizationParams,
data, is_quantized: bool, input_name: str, quant_param: QuantizationParams
):
data_np = np.array(data.detach(), order="C").astype(
f"{data.dtype}".replace("torch.", "")
Expand Down Expand Up @@ -639,19 +602,3 @@ def dbg_tosa_fb_to_json(tosa_fb: bytes) -> Dict:
pass

return json_out


def _tosa_refmodel_loglevel(loglevel: int) -> str:
"""Converts a logging loglevel to tosa_reference_model logginglevel,
returned as string.
"""
loglevel_map = {
logging.INFO: "INFO",
logging.CRITICAL: "LOW",
logging.ERROR: "LOW",
logging.WARNING: "MED",
logging.DEBUG: "HIGH",
logging.NOTSET: "MED",
}
clamped_logging_level = max(min(loglevel // 10 * 10, 50), 0)
return loglevel_map[clamped_logging_level]
15 changes: 5 additions & 10 deletions backends/arm/test/tester/arm_tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

from executorch.backends.xnnpack.test.tester import Tester
from executorch.devtools.backend_debug import get_delegation_info
from executorch.exir import EdgeCompileConfig, EdgeProgramManager
from executorch.exir import EdgeCompileConfig
from executorch.exir.backend.compile_spec_schema import CompileSpec

from executorch.exir.lowered_backend_module import LoweredBackendModule
Expand Down Expand Up @@ -120,15 +120,10 @@ def __init__(
super().__init__(dynamic_shapes)
self.tosa_test_util = tosa_test_util

def run(self, artifact: EdgeProgramManager, inputs=None):
self.executorch_program = artifact.to_executorch(self.config)
if module := getattr(
artifact.exported_program().graph_module, "lowered_module_0", None
):
self.buffer = module.processed_bytes

def run_artifact(self, inputs):
tosa_output = self.tosa_test_util.run_tosa_graph(self.buffer, inputs)
tosa_output = self.tosa_test_util.run_tosa_ref_model(
inputs=inputs,
)
return tosa_output


Expand Down Expand Up @@ -321,7 +316,7 @@ def run_method_and_compare_outputs(
logger.info(f"Run #{run_iteration}, input shapes: {input_shape_str}")

reference_output = reference_stage.run_artifact(reference_input)
test_output = test_stage.run_artifact(test_input)
test_output = tuple(test_stage.run_artifact(test_input))
if (
is_nhwc
and test_stage == self.stages[self.stage_name(tester.ToExecutorch)]
Expand Down
27 changes: 22 additions & 5 deletions examples/arm/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ ethos_u_base_rev="24.08"

# tosa reference model
tosa_reference_model_url="https://review.mlplatform.org/tosa/reference_model"
tosa_reference_model_rev="ef31e7222e99cb1c24b2aff9fc52b2d609612283"
tosa_reference_model_rev="f9ea4ab7da19318fe36b1c34d68a3e40fd6e56c5"

########
### Mandatory user args
Expand Down Expand Up @@ -227,13 +227,30 @@ function setup_tosa_reference_model() {
cd reference_model
git checkout ${tosa_reference_model_rev}
git submodule update --init --recursive
cd ..
fi
cd reference_model
mkdir -p build
cd build
cmake ..

# make use of half the cores for building
if [[ "${OS}" == "Linux" ]]; then
n=$(( $(nproc) / 2 ))
elif [[ "${OS}" == "Darwin" ]]; then
n=$(( $(sysctl -n hw.logicalcpu) / 2 ))
else
n=1
fi

echo "pip installing reference_model..."
repo_dir="${root_dir}/reference_model"
cd $repo_dir
pip install .
if [[ "$n" -lt 1 ]]; then
n=1
fi

make -j"${n}"
cd reference_model
tosa_bin_path=`pwd`
echo "export PATH=\${PATH}:${tosa_bin_path}" >> "${setup_path_script}"
}

function setup_vela() {
Expand Down
Loading