From 324ef0cbf4f9af969904627e5b91b893293b5d4c Mon Sep 17 00:00:00 2001 From: Jeroen Bakker Date: Mon, 29 Jan 2024 15:39:14 +0100 Subject: [PATCH] RenderTest: GPU Render Tests Pass Silently Add silently fail option to GPU based render tests. This is a pre-requisite to enable render tests on the buildbot. By default these render tests will pass silently. * Test will pass when using the `--pass-silently` arguments. * Only crashes will be reported as failed tests. * To find out failing test, review the test reports. `WITH_GPU_RENDER_TESTS_SILENT` compile option can be used to let tests pass (default) or fail (default for developers). Although some tests fail, they still passed. In the generated render report, the silently passed failures are correctly reported to be failures. Pull Request: https://projects.blender.org/blender/blender/pulls/117629 --- CMakeLists.txt | 1 + .../cmake/config/blender_developer.cmake | 1 + build_files/windows/parse_arguments.cmd | 2 +- tests/python/CMakeLists.txt | 12 ++++++++++ tests/python/eevee_next_render_tests.py | 3 ++- tests/python/eevee_render_tests.py | 3 ++- tests/python/modules/render_report.py | 22 ++++++++++++------- tests/python/storm_render_tests.py | 3 ++- tests/python/workbench_render_tests.py | 3 ++- 9 files changed, 37 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8369241155d..c12dc749ce3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -761,6 +761,7 @@ endif() # Unit testing option(WITH_GTESTS "Enable GTest unit testing" OFF) option(WITH_GPU_RENDER_TESTS "Enable GPU render related unit testing (EEVEE, Workbench and Grease Pencil)" OFF) +option(WITH_GPU_RENDER_TESTS_SILENT "Run GPU render tests silently (finished tests will pass). Generated report will show failing tests" ON) option(WITH_GPU_DRAW_TESTS "Enable GPU drawing related unit testing (GPU backends and draw manager)" OFF) option(WITH_COMPOSITOR_REALTIME_TESTS "Enable regression testing for realtime compositor" OFF) if(UNIX AND NOT (APPLE OR HAIKU)) diff --git a/build_files/cmake/config/blender_developer.cmake b/build_files/cmake/config/blender_developer.cmake index 0cfcf3286f8..ddf729f3131 100644 --- a/build_files/cmake/config/blender_developer.cmake +++ b/build_files/cmake/config/blender_developer.cmake @@ -20,6 +20,7 @@ endif() set(WITH_CYCLES_NATIVE_ONLY ON CACHE BOOL "" FORCE) set(WITH_DOC_MANPAGE OFF CACHE BOOL "" FORCE) set(WITH_GTESTS ON CACHE BOOL "" FORCE) +set(WITH_GPU_RENDER_TESTS_SILENT OFF CACHE BOOL "" FORCE) set(WITH_LIBMV_SCHUR_SPECIALIZATIONS OFF CACHE BOOL "" FORCE) set(WITH_PYTHON_SAFETY ON CACHE BOOL "" FORCE) if(WIN32) diff --git a/build_files/windows/parse_arguments.cmd b/build_files/windows/parse_arguments.cmd index a947cdf1843..5a8d17e440d 100644 --- a/build_files/windows/parse_arguments.cmd +++ b/build_files/windows/parse_arguments.cmd @@ -18,7 +18,7 @@ if NOT "%1" == "" ( ) else if "%1" == "with_tests" ( set TESTS_CMAKE_ARGS=%TESTS_CMAKE_ARGS% -DWITH_GTESTS=On ) else if "%1" == "with_gpu_tests" ( - set TESTS_CMAKE_ARGS=%TESTS_CMAKE_ARGS% -DWITH_GPU_DRAW_TESTS=On -DWITH_GPU_RENDER_TESTS=On + set TESTS_CMAKE_ARGS=%TESTS_CMAKE_ARGS% -DWITH_GPU_DRAW_TESTS=On -DWITH_GPU_RENDER_TESTS=On -DWITH_GPU_RENDER_TESTS_SILENT=Off ) else if "%1" == "full" ( set TARGET=Full set BUILD_CMAKE_ARGS=%BUILD_CMAKE_ARGS% ^ diff --git a/tests/python/CMakeLists.txt b/tests/python/CMakeLists.txt index 4be909a9420..9c2f4ace034 100644 --- a/tests/python/CMakeLists.txt +++ b/tests/python/CMakeLists.txt @@ -696,6 +696,12 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) if(WITH_GPU_RENDER_TESTS) list(APPEND gpu_render_tests ${render_tests}) list(FILTER gpu_render_tests EXCLUDE REGEX light_group|light_linking|shadow_catcher|denoise|guiding|reports) + + set(_gpu_render_tests_arguments) + if(WITH_GPU_RENDER_TESTS_SILENT) + list(APPEND _gpu_render_tests_arguments --fail-silently) + endif() + # Eevee foreach(render_test ${gpu_render_tests}) add_render_test( @@ -703,6 +709,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) ${CMAKE_CURRENT_LIST_DIR}/eevee_render_tests.py -testdir "${TEST_SRC_DIR}/render/${render_test}" -outdir "${TEST_OUT_DIR}/eevee" + ${_gpu_render_tests_arguments} ) endforeach() @@ -713,6 +720,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) ${CMAKE_CURRENT_LIST_DIR}/eevee_next_render_tests.py -testdir "${TEST_SRC_DIR}/render/${render_test}" -outdir "${TEST_OUT_DIR}/eevee_next" + ${_gpu_render_tests_arguments} ) endforeach() @@ -723,6 +731,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) ${CMAKE_CURRENT_LIST_DIR}/workbench_render_tests.py -testdir "${TEST_SRC_DIR}/render/${render_test}" -outdir "${TEST_OUT_DIR}/workbench" + ${_gpu_render_tests_arguments} ) endforeach() @@ -735,6 +744,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) -testdir "${TEST_SRC_DIR}/render/${render_test}" -outdir "${TEST_OUT_DIR}/storm_hydra" -export_method "HYDRA" + ${_gpu_render_tests_arguments} ) endforeach() @@ -745,9 +755,11 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) -testdir "${TEST_SRC_DIR}/render/${render_test}" -outdir "${TEST_OUT_DIR}/storm_usd" -export_method "USD" + ${_gpu_render_tests_arguments} ) endforeach() endif() + unset(_gpu_render_tests_arguments) endif() endif() endif() diff --git a/tests/python/eevee_next_render_tests.py b/tests/python/eevee_next_render_tests.py index 983a9b3d16f..6b66ac0d19d 100644 --- a/tests/python/eevee_next_render_tests.py +++ b/tests/python/eevee_next_render_tests.py @@ -116,6 +116,7 @@ def create_argparse(): parser.add_argument("-outdir", nargs=1) parser.add_argument("-oiiotool", nargs=1) parser.add_argument('--batch', default=False, action='store_true') + parser.add_argument('--fail-silently', default=False, action='store_true') return parser @@ -145,7 +146,7 @@ def main(): if test_dir_name.startswith('image'): report.set_fail_threshold(0.051) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok) diff --git a/tests/python/eevee_render_tests.py b/tests/python/eevee_render_tests.py index 623a8a6e19b..19b7b929a41 100644 --- a/tests/python/eevee_render_tests.py +++ b/tests/python/eevee_render_tests.py @@ -144,6 +144,7 @@ def create_argparse(): parser.add_argument("-outdir", nargs=1) parser.add_argument("-oiiotool", nargs=1) parser.add_argument('--batch', default=False, action='store_true') + parser.add_argument('--fail-silently', default=False, action='store_true') return parser @@ -172,7 +173,7 @@ def main(): if test_dir_name.startswith('image'): report.set_fail_threshold(0.051) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok) diff --git a/tests/python/modules/render_report.py b/tests/python/modules/render_report.py index da21993076a..537f5d2edca 100755 --- a/tests/python/modules/render_report.py +++ b/tests/python/modules/render_report.py @@ -147,10 +147,10 @@ class Report: def set_engine_name(self, engine_name): self.engine_name = engine_name - def run(self, dirpath, blender, arguments_cb, batch=False): + def run(self, dirpath, blender, arguments_cb, batch=False, fail_silently=False): # Run tests and output report. dirname = os.path.basename(dirpath) - ok = self._run_all_tests(dirname, dirpath, blender, arguments_cb, batch) + ok = self._run_all_tests(dirname, dirpath, blender, arguments_cb, batch, fail_silently) self._write_data(dirname) self._write_html() if self.compare_engine: @@ -534,9 +534,10 @@ class Report: return errors - def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch): + def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch, fail_silently): passed_tests = [] failed_tests = [] + silently_failed_tests = [] all_files = list(blend_list(dirpath, self.device, self.blacklist)) all_files.sort() print_message("Running {} tests from 1 test case." . @@ -551,7 +552,11 @@ class Report: return False elif error == "NO_START": return False - failed_tests.append(testname) + + if fail_silently and error != 'CRASH': + silently_failed_tests.append(testname) + else: + failed_tests.append(testname) else: passed_tests.append(testname) self._write_test_html(dirname, filepath, error) @@ -564,12 +569,13 @@ class Report: print_message("{} tests." . format(len(passed_tests)), 'SUCCESS', 'PASSED') - if failed_tests: + all_failed_tests = silently_failed_tests + failed_tests + if all_failed_tests: print_message("{} tests, listed below:" . - format(len(failed_tests)), + format(len(all_failed_tests)), 'FAILURE', 'FAILED') - failed_tests.sort() - for test in failed_tests: + all_failed_tests.sort() + for test in all_failed_tests: print_message("{}" . format(test), 'FAILURE', "FAILED") return not bool(failed_tests) diff --git a/tests/python/storm_render_tests.py b/tests/python/storm_render_tests.py index c2d66eeb971..642626a48a5 100644 --- a/tests/python/storm_render_tests.py +++ b/tests/python/storm_render_tests.py @@ -59,6 +59,7 @@ def create_argparse(): parser.add_argument("-oiiotool", nargs=1) parser.add_argument("-export_method", nargs=1) parser.add_argument('--batch', default=False, action='store_true') + parser.add_argument('--fail-silently', default=False, action='store_true') return parser @@ -89,7 +90,7 @@ def main(): os.environ['BLENDER_HYDRA_EXPORT_METHOD'] = export_method - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok) diff --git a/tests/python/workbench_render_tests.py b/tests/python/workbench_render_tests.py index f57cd312a85..a9a90492649 100644 --- a/tests/python/workbench_render_tests.py +++ b/tests/python/workbench_render_tests.py @@ -58,6 +58,7 @@ def create_argparse(): parser.add_argument("-outdir", nargs=1) parser.add_argument("-oiiotool", nargs=1) parser.add_argument('--batch', default=False, action='store_true') + parser.add_argument('--fail-silently', default=False, action='store_true') return parser @@ -80,7 +81,7 @@ def main(): if test_dir_name.startswith('hair') and platform.system() == "Darwin": report.set_fail_threshold(0.050) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok)