From df71001c76ac1cc0c1cbce7a1785c08ec2f49a72 Mon Sep 17 00:00:00 2001 From: Michiel Olieslagers Date: Wed, 8 Oct 2025 15:54:32 +0100 Subject: [PATCH 1/3] Make upstream testing more target specific This patch splits up our upstream testing such that only relevant items are installed and only required things are tested upstream. It removes any duplicate testing and makes the upstream testing structure more transparent and readable. (Rebased & fixed certain syntactical issues in CI tests) Change-Id: I14cf329eed03d9859aef97204a9f12185b9533a2 --- .github/workflows/pull.yml | 13 +- .github/workflows/trunk.yml | 16 +- backends/arm/README.md | 25 ++- backends/arm/test/test_arm_baremetal.sh | 279 +++++++++--------------- backends/arm/test/test_arm_ootb.sh | 9 + 5 files changed, 147 insertions(+), 195 deletions(-) diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 6bdf08c3e30..74373a570f5 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -490,8 +490,8 @@ jobs: build-tool: buck2 docker-image: ci-image:executorch-ubuntu-22.04-clang12 - unittest-arm-backend-with-no-fvp: - name: unittest-arm-backend-with-no-fvp + unittest-arm-backend-with-no-deps: + name: unittest-arm-backend-with-no-deps uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main permissions: id-token: write @@ -499,8 +499,11 @@ jobs: strategy: matrix: include: - - test_arm_baremetal: test_pytest_ops - - test_arm_baremetal: test_pytest_models + - test_arm_baremetal: test_pytest_ops_no_target + - test_arm_baremetal: test_pytest_models_no_target + - test_arm_baremetal: test_pytest_ops_tosa + - test_arm_baremetal: test_pytest_models_tosa + - test_arm_baremetal: test_run_tosa fail-fast: false with: runner: linux.2xlarge @@ -516,7 +519,7 @@ jobs: source .ci/scripts/utils.sh install_executorch "--use-pt-pinned-commit" - .ci/scripts/setup-arm-baremetal-tools.sh + .ci/scripts/setup-arm-baremetal-tools.sh --disable-ethos-u-deps ARM_TEST=${{ matrix.test_arm_baremetal }} diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index 2cd284b059b..cd735d75adf 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -273,8 +273,8 @@ jobs: # Test selective build PYTHON_EXECUTABLE=python bash examples/portable/scripts/test_demo_backend_delegation.sh "${BUILD_TOOL}" - test-arm-backend: - name: test-arm-backend + test-arm-backend-ethos-u: + name: test-arm-backend-ethos-u uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main permissions: id-token: write @@ -282,12 +282,12 @@ jobs: strategy: matrix: include: - - test_arm_baremetal: test_pytest_ops_ethosu_fvp - - test_arm_baremetal: test_pytest_models_ethosu_fvp - - test_arm_baremetal: test_run_ethosu_fvp - - test_arm_baremetal: test_models_tosa - - test_arm_baremetal: test_models_ethos-u55 - - test_arm_baremetal: test_models_ethos-u85 + - test_arm_baremetal: test_pytest_ops_ethos_u55 + - test_arm_baremetal: test_pytest_models_ethos_u55 + - test_arm_baremetal: test_run_ethos_u55 + - test_arm_baremetal: test_pytest_ops_ethos_u85 + - test_arm_baremetal: test_pytest_models_ethos_u85 + - test_arm_baremetal: test_run_ethos_u85 - test_arm_baremetal: test_smaller_stories_llama - test_arm_baremetal: test_memory_allocation - test_arm_baremetal: test_model_smollm2-135M diff --git a/backends/arm/README.md b/backends/arm/README.md index 0abf5e9bf55..2e314ca6221 100644 --- a/backends/arm/README.md +++ b/backends/arm/README.md @@ -110,13 +110,24 @@ These scripts also install the necessary dependencies to run the tests. Below is an overview of some of the testing options this script provides: | Command | Description | -| ---------------------------------------------- | -------------------------------------------- | -| `test_arm_baremetal.sh test_pytest` | Runs all unit tests. | -| `test_arm_baremetal.sh test_pytest_ethosu_fvp` | Same as `test_pytest` but uses Corstone FVP. | -| `test_arm_baremetal.sh test_run_ethosu_fvp` | Runs some models with Corstone FVP. | -| `test_arm_baremetal.sh test_full_ethosu_fvp` | Runs E2E model tests on Corstone FVP. | -| `test_arm_baremetal.sh test_pytest_vkml` | Runs all unit tests with Vulkan ML. | -| `test_arm_baremetal.sh test_full_vkml` | Run E2E models test with Vulkan ML. | +| ---------------------------------------------------- | ------------------------------------------------------------ | +| `test_arm_baremetal.sh test_pytest_ops_no_target` | Runs operator unit tests for non-target specific use-cases. | +| `test_arm_baremetal.sh test_pytest_models_no_target` | Runs model unit tests for non-target specific use-cases. | +| `test_arm_baremetal.sh test_pytest_ops_tosa` | Runs operator unit tests for TOSA specific use-cases. | +| `test_arm_baremetal.sh test_pytest_models_tosa` | Runs model unit tests for TOSA specific use-cases. | +| `test_arm_baremetal.sh test_run_tosa` | Runs end-to-end unit tests for TOSA specific use-cases. | +| `test_arm_baremetal.sh test_pytest_ops_ethos_u55` | Runs operator unit tests for Ethos-U55 specific use-cases. | +| `test_arm_baremetal.sh test_pytest_models_ethos_u55` | Runs model unit tests for Ethos-U55 specific use-cases. | +| `test_arm_baremetal.sh test_run_ethos_u55` | Runs end-to-end unit tests for Ethos-U55 specific use-cases. | +| `test_arm_baremetal.sh test_pytest_ops_ethos_u85` | Runs operator unit tests for Ethos-U85 specific use-cases. | +| `test_arm_baremetal.sh test_pytest_models_ethos_u85` | Runs model unit tests for Ethos-U85 specific use-cases. | +| `test_arm_baremetal.sh test_run_ethos_u85` | Runs end-to-end unit tests for Ethos-U85 specific use-cases. | +| `test_arm_baremetal.sh test_pytest_ops_vkml` | Runs operator unit tests for VGF specific use-cases. | +| `test_arm_baremetal.sh test_pytest_models_vkml` | Runs model unit tests for VGF specific use-cases. | +| `test_arm_baremetal.sh test_run_vkml` | Runs end-to-end unit tests for VGF specific use-cases. | +| `test_arm_baremetal.sh test_model_smollm2-135M` | Runs some models with Corstone FVP. | +| `test_arm_baremetal.sh test_smaller_stories_llama` | Runs E2E model tests on Corstone FVP. | +| `test_arm_baremetal.sh test_memory_allocation` | Runs memory allocation tests for Ethos-U specific targets | For more information, please refer to the `backends/arm/test/test_arm_baremetal.sh` script. diff --git a/backends/arm/test/test_arm_baremetal.sh b/backends/arm/test/test_arm_baremetal.sh index 5a168637214..1180c115a3c 100755 --- a/backends/arm/test/test_arm_baremetal.sh +++ b/backends/arm/test/test_arm_baremetal.sh @@ -14,7 +14,8 @@ script_dir=$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) et_root_dir=$(cd ${script_dir}/../../.. && pwd) cd "${et_root_dir}" pwd -setup_path_script=${et_root_dir}/examples/arm/arm-scratch/setup_path.sh +scratch_dir=${et_root_dir}/examples/arm/ethos-u-scratch +setup_path_script=${scratch_dir}/setup_path.sh _setup_msg="please refer to ${et_root_dir}/examples/arm/setup.sh to properly install necessary tools." @@ -24,7 +25,6 @@ TEST_SUITE=$1 # This should be prepared by the setup.sh [[ -f ${setup_path_script} ]] \ || { echo "Missing ${setup_path_script}. ${_setup_msg}"; exit 1; } - source ${setup_path_script} help() { @@ -71,142 +71,87 @@ all() { # Run all tests echo "${TEST_SUITE_NAME}: PASS" } -test_pytest_ops() { # Test ops and other things - echo "${TEST_SUITE_NAME}: Run pytest" - - # Make sure to not run this tests on FVP by removing the elf builds, - # as they are detected by the unit tests and used if they exists - rm -Rf arm_test/arm_semihosting_executor_runner_corstone-300 - rm -Rf arm_test/arm_semihosting_executor_runner_corstone-320 - - # Prepare for pytest - backends/arm/scripts/build_executorch.sh +# ------------------------------------------- +# -------- Non target-specific tests -------- +# ------------------------------------------- +test_pytest_ops_no_target() { + echo "${TEST_SUITE_NAME}: Run pytest ops for target-less tests" - # Run arm baremetal pytest tests without FVP - pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models + # Run arm baremetal pytest tests without target + pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models -k "not (tosa or vgf or u55 or u85)" echo "${TEST_SUITE_NAME}: PASS" } -test_pytest_models() { # Test ops and other things - echo "${TEST_SUITE_NAME}: Run pytest" - - # Make sure to not run this tests on FVP by removing the elf builds, - # as they are detected by the unit tests and used if they exists - rm -Rf arm_test/arm_semihosting_executor_runner_corstone-300 - rm -Rf arm_test/arm_semihosting_executor_runner_corstone-320 - - # Prepare for pytest - backends/arm/scripts/build_executorch.sh +test_pytest_models_no_target() { + echo "${TEST_SUITE_NAME}: Run pytest models for target-less tests" # Install model dependencies for pytest source backends/arm/scripts/install_models_for_test.sh # Run arm baremetal pytest tests without FVP - pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models - echo "${TEST_SUITE_NAME}: PASS" -} - -test_pytest() { # Test ops and other things - echo "${TEST_SUITE_NAME}: Run pytest" - test_pytest_ops - test_pytest_models + pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k "not (tosa or vgf or u55 or u85)" echo "${TEST_SUITE_NAME}: PASS" } -test_pytest_ops_ethosu_fvp() { # Same as test_pytest but also sometime verify using Corstone FVP - echo "${TEST_SUITE_NAME}: Run pytest with fvp" - - # Prepare Corstone-3x0 FVP for pytest - backends/arm/scripts/build_executorch.sh - # Build semihosting version of the runner used by pytest testing. This builds: - # arm_test/arm_semihosting_executor_runner_corstone-300 - # arm_test/arm_semihosting_executor_runner_corstone-320 - backends/arm/test/setup_testing.sh +# ------------------------------------- +# -------- TOSA specific tests -------- +# ------------------------------------- +test_pytest_ops_tosa() { + echo "${TEST_SUITE_NAME}: Run pytest ops for TOSA" - # Run arm baremetal pytest tests with FVP - pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models + pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models -k tosa echo "${TEST_SUITE_NAME}: PASS" } -test_pytest_models_ethosu_fvp() { # Same as test_pytest but also sometime verify using Corstone FVP - echo "${TEST_SUITE_NAME}: Run pytest with fvp" - - # Prepare Corstone-3x0 FVP for pytest - backends/arm/scripts/build_executorch.sh - # Build semihosting version of the runner used by pytest testing. This builds: - # arm_test/arm_semihosting_executor_runner_corstone-300 - # arm_test/arm_semihosting_executor_runner_corstone-320 - backends/arm/test/setup_testing.sh +test_pytest_models_tosa() { + echo "${TEST_SUITE_NAME}: Run pytest models for TOSA" # Install model dependencies for pytest source backends/arm/scripts/install_models_for_test.sh - # Run arm baremetal pytest tests with FVP - pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models + pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k tosa echo "${TEST_SUITE_NAME}: PASS" } -test_pytest_ethosu_fvp() { # Same as test_pytest but also sometime verify using Corstone FVP - echo "${TEST_SUITE_NAME}: Run pytest with fvp" - test_pytest_ops_ethosu_fvp - test_pytest_models_ethosu_fvp - echo "${TEST_SUITE_NAME}: PASS" -} +test_run_tosa() { + echo "${TEST_SUITE_NAME}: Test TOSA delegate examples with run.sh" + echo "${TEST_SUITE_NAME}: Test target TOSA" + examples/arm/run.sh --et_build_root=arm_test/test_run --target=TOSA-1.0+INT --model_name=add + examples/arm/run.sh --et_build_root=arm_test/test_run --target=TOSA-1.0+INT --model_name=mul -test_pytest_ops_vkml() { # Same as test_pytest but also sometime verify using VKML runtime - echo "${TEST_SUITE_NAME}: Run pytest operator tests with VKML runtime" - - backends/arm/test/setup_testing_vkml.sh - - pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ \ - --ignore=backends/arm/test/models -k _vgf_ echo "${TEST_SUITE_NAME}: PASS" } -test_pytest_models_vkml() { # Same as test_pytest but also sometime verify VKML runtime - echo "${TEST_SUITE_NAME}: Run pytest model tests with VKML runtime" +# ---------------------------------------------- +# -------- Arm Ethos-U55 specific tests -------- +# ---------------------------------------------- +test_pytest_ops_ethos_u55() { + echo "${TEST_SUITE_NAME}: Run pytest ops for Arm Ethos-U55" backends/arm/scripts/build_executorch.sh - backends/arm/test/setup_testing_vkml.sh - - # Install model dependencies for pytest - source backends/arm/scripts/install_models_for_test.sh - - pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k _vgf_ - echo "${TEST_SUITE_NAME}: PASS" -} + backends/arm/test/setup_testing.sh -test_pytest_vkml() { # Same as test_pytest but also sometime verify VKML runtime - echo "${TEST_SUITE_NAME}: Run pytest with VKML" - test_pytest_ops_vkml - test_pytest_models_vkml + pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models -k u55 echo "${TEST_SUITE_NAME}: PASS" } -test_run_vkml() { # End to End model tests using run.sh - echo "${TEST_SUITE_NAME}: Test VKML delegate examples with run.sh" +test_pytest_models_ethos_u55() { + echo "${TEST_SUITE_NAME}: Run pytest models for Arm Ethos-U55" - echo "${TEST_SUITE_NAME}: Test VKML" - out_folder="arm_test/test_run" - examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=add --output=${out_folder}/runner --bundleio - examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=mul --output=${out_folder}/runner --bundleio + backends/arm/scripts/build_executorch.sh + backends/arm/test/setup_testing.sh - examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=qadd --output=${out_folder}/runner --bundleio - examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=qops --output=${out_folder}/runner --bundleio + # Install model dependencies for pytest + source backends/arm/scripts/install_models_for_test.sh + pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k u55 echo "${TEST_SUITE_NAME}: PASS" } -test_run_ethosu_fvp() { # End to End model tests using run.sh - echo "${TEST_SUITE_NAME}: Test ethos-u delegate examples with run.sh" +test_run_ethos_u55() { + echo "${TEST_SUITE_NAME}: Test ethos-u55 delegate examples with run.sh" - # TOSA quantized - echo "${TEST_SUITE_NAME}: Test target TOSA" - examples/arm/run.sh --et_build_root=arm_test/test_run --target=TOSA-1.0+INT --model_name=add - examples/arm/run.sh --et_build_root=arm_test/test_run --target=TOSA-1.0+INT --model_name=mul - - # Ethos-U55 echo "${TEST_SUITE_NAME}: Test target Ethos-U55" examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-64 --model_name=add examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=add --bundleio @@ -221,124 +166,108 @@ test_run_ethosu_fvp() { # End to End model tests using run.sh examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=examples/arm/example_modules/add.py examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=examples/arm/example_modules/add.py --bundleio - # Ethos-U85 - echo "${TEST_SUITE_NAME}: Test target Ethos-U85" - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=add - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-256 --model_name=add --bundleio - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-512 --model_name=add --bundleio --etdump - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-1024 --model_name=add --etdump - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-2048 --model_name=mul --pte_placement=elf - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=mul --pte_placement=0x38000000 - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=mul --bundleio --pte_placement=elf - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-256 --model_name=mul --bundleio --pte_placement=0x38000000 - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=examples/arm/example_modules/add.py - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-1024 --model_name=examples/arm/example_modules/add.py --bundleio - # Cortex-M op tests - echo "${TEST_SUITE_NAME}: Test target Cortex-M55 (on a Ethos-U)" + echo "${TEST_SUITE_NAME}: Test target Cortex-M55 (on Ethos-U55)" examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=add --bundleio --no_delegate --select_ops_list="aten::add.out" examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=qadd --bundleio examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=qops --bundleio examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u55-128 --model_name=qops --bundleio --no_delegate --select_ops_list="aten::sub.out,aten::add.out,aten::mul.out" - examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=qops --bundleio echo "${TEST_SUITE_NAME}: PASS" } -test_models_vkml() { # End to End model tests using model_test.py - echo "${TEST_SUITE_NAME}: Test VKML delegated models with test_model.py" +# ---------------------------------------------- +# -------- Arm Ethos-U85 specific tests -------- +# ---------------------------------------------- +test_pytest_ops_ethos_u85() { + echo "${TEST_SUITE_NAME}: Run pytest ops for Arm Ethos-U85" - # Build common libs once - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --build_libs - - # VKML - echo "${TEST_SUITE_NAME}: Test target VKML" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=vgf --model=resnet18 --extra_runtime_flags="--bundleio_atol=0.2 --bundleio_rtol=0.2" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=vgf --model=resnet50 --extra_runtime_flags="--bundleio_atol=0.2 --bundleio_rtol=0.2" + backends/arm/scripts/build_executorch.sh + backends/arm/test/setup_testing.sh + # Run arm baremetal pytest tests with FVP + pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models -k u85 echo "${TEST_SUITE_NAME}: PASS" } -test_models_tosa() { # End to End model tests using model_test.py - echo "${TEST_SUITE_NAME}: Test TOSA delegated models with test_model.py" +test_pytest_models_ethos_u85() { + echo "${TEST_SUITE_NAME}: Run pytest models for Arm Ethos-U85" - # Build common libs once - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --build_libs + backends/arm/scripts/build_executorch.sh + backends/arm/test/setup_testing.sh - # TOSA quantized - echo "${TEST_SUITE_NAME}: Test ethos-u target TOSA" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=mv2 - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=mv3 - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=lstm - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=edsr - # python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=emformer_transcribe # Takes long time to run - # python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=emformer_join # Takes long time to run - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=w2l - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=ic3 - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=ic4 - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=resnet18 - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=TOSA-1.0+INT --model=resnet50 + # Install model dependencies for pytest + source backends/arm/scripts/install_models_for_test.sh + pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k u85 echo "${TEST_SUITE_NAME}: PASS" } -test_models_ethos-u55() { # End to End model tests using model_test.py - echo "${TEST_SUITE_NAME}: Test Ethos-U55 delegated models with test_model.py" +test_run_ethos_u85() { + echo "${TEST_SUITE_NAME}: Test ethos-u85 delegate examples with run.sh" - # Build common libs once - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --build_libs + echo "${TEST_SUITE_NAME}: Test target Ethos-U85" + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=add + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-256 --model_name=add --bundleio + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-512 --model_name=add --bundleio --etdump + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-1024 --model_name=add --etdump + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-2048 --model_name=mul --pte_placement=elf + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=mul --pte_placement=0x38000000 + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=mul --bundleio --pte_placement=elf + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-256 --model_name=mul --bundleio --pte_placement=0x38000000 + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=examples/arm/example_modules/add.py + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-1024 --model_name=examples/arm/example_modules/add.py --bundleio - # Ethos-U55 - echo "${TEST_SUITE_NAME}: Test ethos-u target Ethos-U55" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u55-128 --model=mv2 --extra_flags="-DET_ATOL=2.00 -DET_RTOL=2.00" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u55-64 --model=mv3 --extra_flags="-DET_ATOL=5.00 -DET_RTOL=5.00" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u55-256 --model=lstm --extra_flags="-DET_ATOL=0.03 -DET_RTOL=0.03" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u55-128 --model=resnet18 --extra_flags="-DET_ATOL=0.2 -DET_RTOL=0.2" - # TODO: Output performance for resnet50 is bad with per-channel quantization (MLETORCH-1149). - # Also we get OOM when running this model. Disable it for now. - #python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u55-128 --model=resnet50 --extra_flags="-DET_ATOL=6.2 -DET_RTOL=6.2" + # Cortex-M op tests + echo "${TEST_SUITE_NAME}: Test target Cortex-M55 (on Ethos-U85)" + examples/arm/run.sh --et_build_root=arm_test/test_run --target=ethos-u85-128 --model_name=qops --bundleio echo "${TEST_SUITE_NAME}: PASS" } -test_models_ethos-u85() { # End to End model tests using model_test.py - echo "${TEST_SUITE_NAME}: Test Ethos-U85 delegated models with test_model.py" - - # Build common libs once - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --build_libs +# ---------------------------------------------------------- +# -------- Vulkan Graph Format (VGF) specific tests -------- +# ---------------------------------------------------------- +test_pytest_ops_vkml() { + echo "${TEST_SUITE_NAME}: Run pytest operator tests with VKML runtime" - # Ethos-U85 - echo "${TEST_SUITE_NAME}: Test ethos-u target Ethos-U85" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-256 --model=mv2 --extra_flags="-DET_ATOL=2.00 -DET_RTOL=2.00" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-512 --model=mv3 --extra_flags="-DET_ATOL=5.00 -DET_RTOL=5.00" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-128 --model=lstm --extra_flags="-DET_ATOL=0.03 -DET_RTOL=0.03" - #python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-128 --model=w2l --extra_flags="-DET_ATOL=0.01 -DET_RTOL=0.01" # Takes long time to run - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-256 --model=ic4 --extra_flags="-DET_ATOL=0.8 -DET_RTOL=0.8" --timeout=2400 - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-128 --model=resnet18 --extra_flags="-DET_ATOL=0.2 -DET_RTOL=0.2" - python3 backends/arm/test/test_model.py --test_output=arm_test/test_model --target=ethos-u85-128 --model=resnet50 --extra_flags="-DET_ATOL=0.2 -DET_RTOL=0.2" + source backends/arm/test/setup_testing_vkml.sh + pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ \ + --ignore=backends/arm/test/models -k _vgf_ echo "${TEST_SUITE_NAME}: PASS" } +test_pytest_models_vkml() { + echo "${TEST_SUITE_NAME}: Run pytest model tests with VKML runtime" -test_full_ethosu_fvp() { # All End to End model tests - echo "${TEST_SUITE_NAME}: Test ethos-u delegate models and examples on fvp" + source backends/arm/test/setup_testing_vkml.sh - test_run_ethosu_fvp - test_models_tosa - test_models_ethos-u55 - test_models_ethos-u85 + # Install model dependencies for pytest + source backends/arm/scripts/install_models_for_test.sh + + pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k _vgf_ echo "${TEST_SUITE_NAME}: PASS" } -test_full_vkml() { # All End to End model tests - echo "${TEST_SUITE_NAME}: Test VGF delegate models and examples with VKML" +test_run_vkml() { + echo "${TEST_SUITE_NAME}: Test VKML delegate examples with run.sh" + + echo "${TEST_SUITE_NAME}: Test VKML" + out_folder="arm_test/test_run" + + examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=add --output=${out_folder}/runner + examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=mul --output=${out_folder}/runner + + examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=qadd --output=${out_folder}/runner + examples/arm/run.sh --et_build_root=${out_folder} --target=vgf --model_name=qops --output=${out_folder}/runner - test_run_vkml - test_models_vkml echo "${TEST_SUITE_NAME}: PASS" } +# ------------------------------------ +# -------- Miscelaneous tests -------- +# ------------------------------------ test_model_smollm2-135M() { echo "${TEST_SUITE_NAME}: Test SmolLM2-135M on Ethos-U85" diff --git a/backends/arm/test/test_arm_ootb.sh b/backends/arm/test/test_arm_ootb.sh index 186092e21f9..6687e9b836c 100755 --- a/backends/arm/test/test_arm_ootb.sh +++ b/backends/arm/test/test_arm_ootb.sh @@ -15,4 +15,13 @@ run_ootb_tests_ethos_u() { echo "${FUNCNAME}: PASS" } +run_ootb_tests_tosa() { + echo "$FUNCNAME: Running out-of-the-box tests for TOSA" + jupyter nbconvert \ + --to notebook \ + --execute backends/arm/scripts/TOSA_minimal_example.ipynb + echo "${FUNCNAME}: PASS" +} + run_ootb_tests_ethos_u +run_ootb_tests_tosa From 4bb87431925832dba8da3953f9d92d3b4b8f2fa9 Mon Sep 17 00:00:00 2001 From: Michiel Olieslagers Date: Thu, 13 Nov 2025 14:49:38 +0000 Subject: [PATCH 2/3] Fix and update run.sh script for VGF target. Change-Id: I695c2563b3f1ebcbd6b6eb660dff36c305c0325a --- examples/arm/run.sh | 100 ++++++++++++++++++++++++-------------------- 1 file changed, 54 insertions(+), 46 deletions(-) diff --git a/examples/arm/run.sh b/examples/arm/run.sh index bd1c696d8ff..73a845e9e7b 100755 --- a/examples/arm/run.sh +++ b/examples/arm/run.sh @@ -45,6 +45,7 @@ model_explorer=false perf_overlay=false visualize_tosa=false visualize_pte=false +model_converter=false function help() { echo "Usage: $(basename $0) [options]" @@ -131,21 +132,10 @@ if ! [[ ${pte_placement} == "elf" ]]; then fi # Default Ethos-u tool folder override with --scratch-dir= -arm_scratch_dir=$(realpath ${arm_scratch_dir}) -setup_path_script=${arm_scratch_dir}/setup_path.sh -if [[ ${toolchain} == "arm-none-eabi-gcc" ]]; then - toolchain_cmake=${et_root_dir}/examples/arm/ethos-u-setup/${toolchain}.cmake -elif [[ ${toolchain} == "arm-zephyr-eabi-gcc" ]]; then - toolchain_cmake=${et_root_dir}/examples/zephyr/x86_64-linux-arm-zephyr-eabi-gcc.cmake -else - echo "Error: Invalid toolchain selection, provided: ${toolchain}" - echo " Valid options are {arm-none-eabi-gcc, arm-zephyr-eabi-gcc}" - exit 1; -fi -toolchain_cmake=$(realpath ${toolchain_cmake}) +ethos_u_scratch_dir=$(realpath ${ethos_u_scratch_dir}) +setup_path_script=${ethos_u_scratch_dir}/setup_path.sh _setup_msg="please refer to ${script_dir}/setup.sh to properly install necessary tools." - # Set target based variables if [[ ${system_config} == "" ]] then @@ -170,26 +160,68 @@ then config="Arm/vela.ini" fi +# Build executorch libraries +cd $et_root_dir +devtools_flag="" +bundleio_flag="" +etrecord_flag="" +et_dump_flag="" +qdq_fusion_op_flag="" +fvp_pmu_flag="" +if [ "$build_with_etdump" = true ] ; then + et_dump_flag="--etdump" + etrecord_flag="--etrecord" +fi + +if [ "$bundleio" = true ] ; then + devtools_flag="--devtools" + bundleio_flag="--bundleio" +fi + +if [ "$qdq_fusion_op" = true ] ; then + qdq_fusion_op_flag="--enable_qdq_fusion_pass" +fi + function check_setup () { # basic checks that setup.sh did everything needed before we get started # check if setup_path_script was created, if so source it if [[ -f ${setup_path_script} ]]; then source $setup_path_script + echo $setup_path_script else echo "Could not find ${setup_path_script} file, ${_setup_msg}" return 1 fi - # If setup_path_script was correct all these checks should now pass - hash ${toolchain} \ - || { echo "Could not find ${toolchain} toolchain on PATH, ${_setup_msg}"; return 1; } - - [[ -f ${toolchain_cmake} ]] \ - || { echo "Could not find ${toolchain_cmake} file, ${_setup_msg}"; return 1; } - - [[ -f ${et_root_dir}/CMakeLists.txt ]] \ - || { echo "Executorch repo doesn't contain CMakeLists.txt file at root level"; return 1; } + if [[ ${target} =~ "ethos-u" ]]; then + if [[ ${toolchain} == "arm-none-eabi-gcc" ]]; then + toolchain_cmake=${et_root_dir}/examples/arm/ethos-u-setup/${toolchain}.cmake + elif [[ ${toolchain} == "arm-zephyr-eabi-gcc" ]]; then + toolchain_cmake=${et_root_dir}/examples/zephyr/x86_64-linux-arm-zephyr-eabi-gcc.cmake + else + echo "Error: Invalid toolchain selection, provided: ${toolchain}" + echo " Valid options are {arm-none-eabi-gcc, arm-zephyr-eabi-gcc}" + exit 1; + fi + toolchain_cmake=$(realpath ${toolchain_cmake}) + hash ${toolchain} \ + || { echo "Could not find ${toolchain} toolchain on PATH, ${_setup_msg}"; return 1; } + + [[ -f ${toolchain_cmake} ]] \ + || { echo "Could not find ${toolchain_cmake} file, ${_setup_msg}"; return 1; } + + [[ -f ${et_root_dir}/CMakeLists.txt ]] \ + || { echo "Executorch repo doesn't contain CMakeLists.txt file at root level"; return 1; } + + + backends/arm/scripts/build_executorch.sh --et_build_root="${et_build_root}" --build_type=$build_type $devtools_flag $et_dump_flag --toolchain="${toolchain}" + elif [[ ${target} =~ "vgf" ]]; then + model_converter=$(which model-converter) + echo "${model_converter}" + [[ "${model_converter}" == "model-converter not found" ]] \ + && { echo "Could not find model-converter, ${_setup_msg}"; return 1; } + fi return 0 } @@ -212,30 +244,6 @@ if ! check_setup; then fi fi -# Build executorch libraries -cd $et_root_dir -devtools_flag="" -bundleio_flag="" -etrecord_flag="" -et_dump_flag="" -qdq_fusion_op_flag="" -fvp_pmu_flag="" -if [ "$build_with_etdump" = true ] ; then - et_dump_flag="--etdump" - etrecord_flag="--etrecord" -fi - -if [ "$bundleio" = true ] ; then - devtools_flag="--devtools" - bundleio_flag="--bundleio" -fi - -if [ "$qdq_fusion_op" = true ] ; then - qdq_fusion_op_flag="--enable_qdq_fusion_pass" -fi - -backends/arm/scripts/build_executorch.sh --et_build_root="${et_build_root}" --build_type=$build_type $devtools_flag $et_dump_flag --toolchain="${toolchain}" - if [[ -z "$model_name" ]]; then # the test models run, and whether to delegate test_model=( From 17f97d262d491aa8671e071479b70afe82fb6e70 Mon Sep 17 00:00:00 2001 From: Michiel Olieslagers Date: Mon, 22 Dec 2025 17:21:31 +0100 Subject: [PATCH 3/3] Arm backend: Fixed scratch-dir naming issues Fixed call to wrong location for arm-scratch-dir and changed no_target function behaviour. Change-Id: Idf292ee747ffcf8ffed13d725849cc686d4c60af Signed-off-by: Michiel Olieslagers --- .github/workflows/pull.yml | 1 - backends/arm/test/test_arm_baremetal.sh | 6 +++--- examples/arm/run.sh | 5 ++--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 74373a570f5..3d83e2de692 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -500,7 +500,6 @@ jobs: matrix: include: - test_arm_baremetal: test_pytest_ops_no_target - - test_arm_baremetal: test_pytest_models_no_target - test_arm_baremetal: test_pytest_ops_tosa - test_arm_baremetal: test_pytest_models_tosa - test_arm_baremetal: test_run_tosa diff --git a/backends/arm/test/test_arm_baremetal.sh b/backends/arm/test/test_arm_baremetal.sh index 1180c115a3c..4f21ae18a01 100755 --- a/backends/arm/test/test_arm_baremetal.sh +++ b/backends/arm/test/test_arm_baremetal.sh @@ -14,7 +14,7 @@ script_dir=$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) et_root_dir=$(cd ${script_dir}/../../.. && pwd) cd "${et_root_dir}" pwd -scratch_dir=${et_root_dir}/examples/arm/ethos-u-scratch +scratch_dir=${et_root_dir}/examples/arm/arm-scratch setup_path_script=${scratch_dir}/setup_path.sh _setup_msg="please refer to ${et_root_dir}/examples/arm/setup.sh to properly install necessary tools." @@ -78,7 +78,7 @@ test_pytest_ops_no_target() { echo "${TEST_SUITE_NAME}: Run pytest ops for target-less tests" # Run arm baremetal pytest tests without target - pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models -k "not (tosa or vgf or u55 or u85)" + pytest --verbose --color=yes --numprocesses=auto --durations=10 backends/arm/test/ --ignore=backends/arm/test/models -k no_target echo "${TEST_SUITE_NAME}: PASS" } @@ -89,7 +89,7 @@ test_pytest_models_no_target() { source backends/arm/scripts/install_models_for_test.sh # Run arm baremetal pytest tests without FVP - pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k "not (tosa or vgf or u55 or u85)" + pytest --verbose --color=yes --numprocesses=auto --durations=0 backends/arm/test/models -k no_target echo "${TEST_SUITE_NAME}: PASS" } diff --git a/examples/arm/run.sh b/examples/arm/run.sh index 73a845e9e7b..b8ecfde024f 100755 --- a/examples/arm/run.sh +++ b/examples/arm/run.sh @@ -132,8 +132,8 @@ if ! [[ ${pte_placement} == "elf" ]]; then fi # Default Ethos-u tool folder override with --scratch-dir= -ethos_u_scratch_dir=$(realpath ${ethos_u_scratch_dir}) -setup_path_script=${ethos_u_scratch_dir}/setup_path.sh +arm_scratch_dir=$(realpath ${arm_scratch_dir}) +setup_path_script=${arm_scratch_dir}/setup_path.sh _setup_msg="please refer to ${script_dir}/setup.sh to properly install necessary tools." # Set target based variables @@ -188,7 +188,6 @@ function check_setup () { # check if setup_path_script was created, if so source it if [[ -f ${setup_path_script} ]]; then source $setup_path_script - echo $setup_path_script else echo "Could not find ${setup_path_script} file, ${_setup_msg}" return 1