RenderTest: GPU Render Tests Pass Silently

Add silently fail option to GPU based render tests. This is a pre-requisite to enable
render tests on the buildbot. By default these render tests will pass silently.

* Test will pass when using the `--pass-silently` arguments.
* Only crashes will be reported as failed tests.
* To find out failing test, review the test reports.

`WITH_GPU_RENDER_TESTS_SILENT` compile option can be used to let tests pass (default)
or fail (default for developers).

Although some tests fail, they still passed. In the generated render report,
the silently passed failures are correctly reported to be failures.

Pull Request: https://projects.blender.org/blender/blender/pulls/117629
This commit is contained in:
Jeroen Bakker 2024-01-29 15:39:14 +01:00
parent 7163873420
commit 324ef0cbf4
9 changed files with 37 additions and 13 deletions

@ -761,6 +761,7 @@ endif()
# Unit testing
option(WITH_GTESTS "Enable GTest unit testing" OFF)
option(WITH_GPU_RENDER_TESTS "Enable GPU render related unit testing (EEVEE, Workbench and Grease Pencil)" OFF)
option(WITH_GPU_RENDER_TESTS_SILENT "Run GPU render tests silently (finished tests will pass). Generated report will show failing tests" ON)
option(WITH_GPU_DRAW_TESTS "Enable GPU drawing related unit testing (GPU backends and draw manager)" OFF)
option(WITH_COMPOSITOR_REALTIME_TESTS "Enable regression testing for realtime compositor" OFF)
if(UNIX AND NOT (APPLE OR HAIKU))

@ -20,6 +20,7 @@ endif()
set(WITH_CYCLES_NATIVE_ONLY ON CACHE BOOL "" FORCE)
set(WITH_DOC_MANPAGE OFF CACHE BOOL "" FORCE)
set(WITH_GTESTS ON CACHE BOOL "" FORCE)
set(WITH_GPU_RENDER_TESTS_SILENT OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS OFF CACHE BOOL "" FORCE)
set(WITH_PYTHON_SAFETY ON CACHE BOOL "" FORCE)
if(WIN32)

@ -18,7 +18,7 @@ if NOT "%1" == "" (
) else if "%1" == "with_tests" (
set TESTS_CMAKE_ARGS=%TESTS_CMAKE_ARGS% -DWITH_GTESTS=On
) else if "%1" == "with_gpu_tests" (
set TESTS_CMAKE_ARGS=%TESTS_CMAKE_ARGS% -DWITH_GPU_DRAW_TESTS=On -DWITH_GPU_RENDER_TESTS=On
set TESTS_CMAKE_ARGS=%TESTS_CMAKE_ARGS% -DWITH_GPU_DRAW_TESTS=On -DWITH_GPU_RENDER_TESTS=On -DWITH_GPU_RENDER_TESTS_SILENT=Off
) else if "%1" == "full" (
set TARGET=Full
set BUILD_CMAKE_ARGS=%BUILD_CMAKE_ARGS% ^

@ -696,6 +696,12 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS)
if(WITH_GPU_RENDER_TESTS)
list(APPEND gpu_render_tests ${render_tests})
list(FILTER gpu_render_tests EXCLUDE REGEX light_group|light_linking|shadow_catcher|denoise|guiding|reports)
set(_gpu_render_tests_arguments)
if(WITH_GPU_RENDER_TESTS_SILENT)
list(APPEND _gpu_render_tests_arguments --fail-silently)
endif()
# Eevee
foreach(render_test ${gpu_render_tests})
add_render_test(
@ -703,6 +709,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS)
${CMAKE_CURRENT_LIST_DIR}/eevee_render_tests.py
-testdir "${TEST_SRC_DIR}/render/${render_test}"
-outdir "${TEST_OUT_DIR}/eevee"
${_gpu_render_tests_arguments}
)
endforeach()
@ -713,6 +720,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS)
${CMAKE_CURRENT_LIST_DIR}/eevee_next_render_tests.py
-testdir "${TEST_SRC_DIR}/render/${render_test}"
-outdir "${TEST_OUT_DIR}/eevee_next"
${_gpu_render_tests_arguments}
)
endforeach()
@ -723,6 +731,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS)
${CMAKE_CURRENT_LIST_DIR}/workbench_render_tests.py
-testdir "${TEST_SRC_DIR}/render/${render_test}"
-outdir "${TEST_OUT_DIR}/workbench"
${_gpu_render_tests_arguments}
)
endforeach()
@ -735,6 +744,7 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS)
-testdir "${TEST_SRC_DIR}/render/${render_test}"
-outdir "${TEST_OUT_DIR}/storm_hydra"
-export_method "HYDRA"
${_gpu_render_tests_arguments}
)
endforeach()
@ -745,9 +755,11 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS)
-testdir "${TEST_SRC_DIR}/render/${render_test}"
-outdir "${TEST_OUT_DIR}/storm_usd"
-export_method "USD"
${_gpu_render_tests_arguments}
)
endforeach()
endif()
unset(_gpu_render_tests_arguments)
endif()
endif()
endif()

@ -116,6 +116,7 @@ def create_argparse():
parser.add_argument("-outdir", nargs=1)
parser.add_argument("-oiiotool", nargs=1)
parser.add_argument('--batch', default=False, action='store_true')
parser.add_argument('--fail-silently', default=False, action='store_true')
return parser
@ -145,7 +146,7 @@ def main():
if test_dir_name.startswith('image'):
report.set_fail_threshold(0.051)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently)
sys.exit(not ok)

@ -144,6 +144,7 @@ def create_argparse():
parser.add_argument("-outdir", nargs=1)
parser.add_argument("-oiiotool", nargs=1)
parser.add_argument('--batch', default=False, action='store_true')
parser.add_argument('--fail-silently', default=False, action='store_true')
return parser
@ -172,7 +173,7 @@ def main():
if test_dir_name.startswith('image'):
report.set_fail_threshold(0.051)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently)
sys.exit(not ok)

@ -147,10 +147,10 @@ class Report:
def set_engine_name(self, engine_name):
self.engine_name = engine_name
def run(self, dirpath, blender, arguments_cb, batch=False):
def run(self, dirpath, blender, arguments_cb, batch=False, fail_silently=False):
# Run tests and output report.
dirname = os.path.basename(dirpath)
ok = self._run_all_tests(dirname, dirpath, blender, arguments_cb, batch)
ok = self._run_all_tests(dirname, dirpath, blender, arguments_cb, batch, fail_silently)
self._write_data(dirname)
self._write_html()
if self.compare_engine:
@ -534,9 +534,10 @@ class Report:
return errors
def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch):
def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch, fail_silently):
passed_tests = []
failed_tests = []
silently_failed_tests = []
all_files = list(blend_list(dirpath, self.device, self.blacklist))
all_files.sort()
print_message("Running {} tests from 1 test case." .
@ -551,7 +552,11 @@ class Report:
return False
elif error == "NO_START":
return False
failed_tests.append(testname)
if fail_silently and error != 'CRASH':
silently_failed_tests.append(testname)
else:
failed_tests.append(testname)
else:
passed_tests.append(testname)
self._write_test_html(dirname, filepath, error)
@ -564,12 +569,13 @@ class Report:
print_message("{} tests." .
format(len(passed_tests)),
'SUCCESS', 'PASSED')
if failed_tests:
all_failed_tests = silently_failed_tests + failed_tests
if all_failed_tests:
print_message("{} tests, listed below:" .
format(len(failed_tests)),
format(len(all_failed_tests)),
'FAILURE', 'FAILED')
failed_tests.sort()
for test in failed_tests:
all_failed_tests.sort()
for test in all_failed_tests:
print_message("{}" . format(test), 'FAILURE', "FAILED")
return not bool(failed_tests)

@ -59,6 +59,7 @@ def create_argparse():
parser.add_argument("-oiiotool", nargs=1)
parser.add_argument("-export_method", nargs=1)
parser.add_argument('--batch', default=False, action='store_true')
parser.add_argument('--fail-silently', default=False, action='store_true')
return parser
@ -89,7 +90,7 @@ def main():
os.environ['BLENDER_HYDRA_EXPORT_METHOD'] = export_method
ok = report.run(test_dir, blender, get_arguments, batch=args.batch)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently)
sys.exit(not ok)

@ -58,6 +58,7 @@ def create_argparse():
parser.add_argument("-outdir", nargs=1)
parser.add_argument("-oiiotool", nargs=1)
parser.add_argument('--batch', default=False, action='store_true')
parser.add_argument('--fail-silently', default=False, action='store_true')
return parser
@ -80,7 +81,7 @@ def main():
if test_dir_name.startswith('hair') and platform.system() == "Darwin":
report.set_fail_threshold(0.050)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch)
ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently)
sys.exit(not ok)