Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Disable TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS #3654

Draft
wants to merge 11 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,14 @@ option(TORCH_MLIR_OUT_OF_TREE_BUILD "Specifies an out of tree build" OFF)

# PyTorch native extension gate. If OFF, then no features which depend on
# native extensions will be built.
# TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS will be disabled by default shortly,
# once some reorganizations of the testing codebase makes that possible.
option(TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS "Enables PyTorch native extension features" ON)
AmosLewis marked this conversation as resolved.
Show resolved Hide resolved
cmake_dependent_option(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER "Enables JIT IR Importer" ON TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF)
# NOTE: The JIT_IR_IMPORTER paths have become unsupportable due to age and lack of maintainers.
# Turning this off disables the old TorchScript path, leaving FX based import as the current supported option.
# The option will be retained for a time, and if a maintainer is interested in setting up testing for it,
# please reach out on the list and speak up for it.
cmake_dependent_option(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER "Enables JIT IR Importer" OFF TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF)
AmosLewis marked this conversation as resolved.
Show resolved Hide resolved
cmake_dependent_option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" OFF TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF)

option(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER "Enables the ONNX C importer" OFF)
Expand Down
16 changes: 0 additions & 16 deletions build_tools/ci/test_posix.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,6 @@ torch_version="${1:-unknown}"

export PYTHONPATH="$repo_root/build/tools/torch-mlir/python_packages/torch_mlir:$repo_root/projects/pt1"

echo "::group::Run Linalg e2e integration tests"
python -m e2e_testing.main --config=linalg -v
echo "::endgroup::"

echo "::group::Run make_fx + TOSA e2e integration tests"
python -m e2e_testing.main --config=make_fx_tosa -v
echo "::endgroup::"

echo "::group::Run TOSA e2e integration tests"
python -m e2e_testing.main --config=tosa -v
echo "::endgroup::"

echo "::group::Run Stablehlo e2e integration tests"
python -m e2e_testing.main --config=stablehlo -v
echo "::endgroup::"

echo "::group::Run ONNX e2e integration tests"
python -m e2e_testing.main --config=onnx -v
echo "::endgroup::"
Expand Down
67 changes: 29 additions & 38 deletions projects/pt1/e2e_testing/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,9 @@
# Available test configs.
from torch_mlir_e2e_test.configs import (
LazyTensorCoreTestConfig,
LinalgOnTensorsBackendTestConfig,
StablehloBackendTestConfig,
NativeTorchTestConfig,
OnnxBackendTestConfig,
TorchScriptTestConfig,
TosaBackendTestConfig,
TorchDynamoTestConfig,
FxImporterTestConfig,
)

Expand Down Expand Up @@ -74,12 +70,7 @@ def _get_argparse():
config_choices = [
"native_torch",
"torchscript",
"linalg",
"stablehlo",
"make_fx_tosa",
"tosa",
"lazy_tensor_core",
"torchdynamo",
"onnx",
"onnx_tosa",
"fx_importer",
Expand All @@ -91,16 +82,16 @@ def _get_argparse():
"-c",
"--config",
choices=config_choices,
default="linalg",
default="fx_importer",
help=f"""
Meaning of options:
"linalg": run through torch-mlir"s default Linalg-on-Tensors backend.
"tosa": run through torch-mlir"s default TOSA backend.
"stablehlo": run through torch-mlir"s default Stablehlo backend.
# "linalg": run through torch-mlir"s default Linalg-on-Tensors backend.
# "tosa": run through torch-mlir"s default TOSA backend.
# "stablehlo": run through torch-mlir"s default Stablehlo backend.
"native_torch": run the torch.nn.Module as-is without compiling (useful for verifying model is deterministic; ALL tests should pass in this configuration).
"torchscript": compile the model to a torch.jit.ScriptModule, and then run that as-is (useful for verifying TorchScript is modeling the program correctly).
"lazy_tensor_core": run the model through the Lazy Tensor Core frontend and execute the traced graph.
"torchdynamo": run the model through the TorchDynamo frontend and execute the graph using Linalg-on-Tensors.
# "torchdynamo": run the model through the TorchDynamo frontend and execute the graph using Linalg-on-Tensors.
"onnx": export to the model via onnx and reimport using the torch-onnx-to-torch path.
"fx_importer": run the model through the fx importer frontend and execute the graph using Linalg-on-Tensors.
"fx_importer_stablehlo": run the model through the fx importer frontend and execute the graph using Stablehlo backend.
Expand Down Expand Up @@ -154,23 +145,23 @@ def main():
all_test_unique_names = set(test.unique_name for test in GLOBAL_TEST_REGISTRY)

# Find the selected config.
if args.config == "linalg":
config = LinalgOnTensorsBackendTestConfig(RefBackendLinalgOnTensorsBackend())
xfail_set = LINALG_XFAIL_SET
crashing_set = LINALG_CRASHING_SET
elif args.config == "stablehlo":
config = StablehloBackendTestConfig(LinalgOnTensorsStablehloBackend())
xfail_set = all_test_unique_names - STABLEHLO_PASS_SET
crashing_set = STABLEHLO_CRASHING_SET
elif args.config == "tosa":
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend())
xfail_set = all_test_unique_names - TOSA_PASS_SET
crashing_set = TOSA_CRASHING_SET
elif args.config == "make_fx_tosa":
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend(), use_make_fx=True)
xfail_set = all_test_unique_names - MAKE_FX_TOSA_PASS_SET
crashing_set = MAKE_FX_TOSA_CRASHING_SET
elif args.config == "native_torch":
# if args.config == "linalg":
# config = LinalgOnTensorsBackendTestConfig(RefBackendLinalgOnTensorsBackend())
# xfail_set = LINALG_XFAIL_SET
# crashing_set = LINALG_CRASHING_SET
# elif args.config == "stablehlo":
# config = StablehloBackendTestConfig(LinalgOnTensorsStablehloBackend())
# xfail_set = all_test_unique_names - STABLEHLO_PASS_SET
# crashing_set = STABLEHLO_CRASHING_SET
# elif args.config == "tosa":
# config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend())
# xfail_set = all_test_unique_names - TOSA_PASS_SET
# crashing_set = TOSA_CRASHING_SET
# elif args.config == "make_fx_tosa":
# config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend(), use_make_fx=True)
# xfail_set = all_test_unique_names - MAKE_FX_TOSA_PASS_SET
# crashing_set = MAKE_FX_TOSA_CRASHING_SET
if args.config == "native_torch":
config = NativeTorchTestConfig()
xfail_set = set()
crashing_set = set()
Expand All @@ -194,13 +185,13 @@ def main():
config = FxImporterTestConfig(LinalgOnTensorsTosaBackend(), "tosa")
xfail_set = FX_IMPORTER_TOSA_XFAIL_SET
crashing_set = FX_IMPORTER_TOSA_CRASHING_SET
elif args.config == "torchdynamo":
# TODO: Enanble runtime verification and extend crashing set.
config = TorchDynamoTestConfig(
RefBackendLinalgOnTensorsBackend(generate_runtime_verification=False)
)
xfail_set = TORCHDYNAMO_XFAIL_SET
crashing_set = TORCHDYNAMO_CRASHING_SET
# elif args.config == "torchdynamo":
# # TODO: Enanble runtime verification and extend crashing set.
# config = TorchDynamoTestConfig(
# RefBackendLinalgOnTensorsBackend(generate_runtime_verification=False)
# )
# xfail_set = TORCHDYNAMO_XFAIL_SET
# crashing_set = TORCHDYNAMO_CRASHING_SET
elif args.config == "onnx":
AmosLewis marked this conversation as resolved.
Show resolved Hide resolved
config = OnnxBackendTestConfig(RefBackendLinalgOnTensorsBackend())
xfail_set = ONNX_XFAIL_SET
Expand Down
6 changes: 6 additions & 0 deletions projects/pt1/python/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,11 @@ endif()
if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
add_subdirectory(torch_mlir/jit_ir_importer)
add_subdirectory(torch_mlir/csrc/jit_ir_importer)
else()
add_subdirectory(torch_mlir_e2e_test)
endif()


################################################################################
# Custom op example
# Required for running the update_torch_ods.sh and update_abstract_interp_lib.sh
Expand All @@ -61,6 +63,10 @@ if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
TorchMLIRJITIRImporterPybind
TorchMLIRE2ETestPythonModules
)
else()
add_dependencies(TorchMLIRPythonTorchExtensionsSources
TorchMLIRE2ETestPythonModules
)
endif()

if(TORCH_MLIR_ENABLE_LTC)
Expand Down
57 changes: 0 additions & 57 deletions projects/pt1/python/test/annotations-sugar.py

This file was deleted.

33 changes: 0 additions & 33 deletions projects/pt1/python/test/compile_api/already_scripted.py

This file was deleted.

32 changes: 0 additions & 32 deletions projects/pt1/python/test/compile_api/already_traced.py

This file was deleted.

32 changes: 0 additions & 32 deletions projects/pt1/python/test/compile_api/backend_legal_ops.py

This file was deleted.

Loading
Loading