Tests: Cache revision builds in benchmark script

In the mode where the script automatically builds Blender at given git
hashes, cache the builds instead of rebuilding them before running.
This commit is contained in:
Brecht Van Lommel 2024-01-16 15:14:09 +01:00
parent 821b077149
commit 9c8a9662c0
4 changed files with 37 additions and 17 deletions

@ -53,7 +53,6 @@ class TestQueue:
def __init__(self, filepath: pathlib.Path):
self.filepath = filepath
self.has_multiple_revisions_to_build = False
self.has_multiple_categories = False
self.entries = []
@ -117,6 +116,7 @@ class TestConfig:
self.name = name
self.base_dir = env.base_dir / name
self.logs_dir = self.base_dir / 'logs'
self.builds_dir = self.base_dir / 'builds'
config = TestConfig._read_config_module(self.base_dir)
self.tests = TestCollection(env,
@ -206,13 +206,6 @@ class TestConfig:
date = env.git_hash_date(git_hash)
entries += self._get_entries(revision_name, git_hash, '', environment, date)
# Optimization to avoid rebuilds.
revisions_to_build = set()
for entry in entries:
if entry.status in {'queued', 'outdated'}:
revisions_to_build.add(entry.git_hash)
self.queue.has_multiple_revisions_to_build = len(revisions_to_build) > 1
# Get entries for revisions based on existing builds.
for revision_name, executable in self.builds.items():
executable, environment = self._split_environment_variables(executable)

@ -24,6 +24,7 @@ class TestEnvironment:
self.base_dir = base_dir
self.blender_dir = base_dir / 'blender'
self.build_dir = base_dir / 'build'
self.install_dir = self.build_dir / "bin"
self.lib_dir = base_dir / 'lib'
self.benchmarks_dir = self.blender_git_dir.parent / 'lib' / 'benchmarks'
self.git_executable = 'git'
@ -81,7 +82,7 @@ class TestEnvironment:
print('Done')
def checkout(self, git_hash) -> None:
def checkout(self, git_hash: str) -> None:
# Checkout Blender revision
if not self.blender_dir.exists():
sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
@ -91,16 +92,34 @@ class TestEnvironment:
self.call([self.git_executable, 'reset', '--hard', 'HEAD'], self.blender_dir)
self.call([self.git_executable, 'checkout', '--detach', git_hash], self.blender_dir)
def build(self) -> bool:
def build(self, git_hash: str, install_dir: pathlib.Path) -> bool:
# Build Blender revision
if not self.build_dir.exists():
sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
sys.exit(1)
# Skip if build with same hash is already done.
if install_dir.resolve() != self.install_dir.resolve():
complete_txt = pathlib.Path(install_dir) / "complete.txt"
if complete_txt.is_file():
if complete_txt.read_text().strip() == git_hash:
self._init_default_blender_executable()
return True
# Different hash, build again.
complete_txt.unlink()
else:
complete_txt = None
self.checkout(git_hash)
jobs = str(multiprocessing.cpu_count())
cmake_options = list(self.cmake_options)
cmake_options += [f"-DCMAKE_INSTALL_PREFIX={install_dir}"]
try:
self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
self.call([self.cmake_executable, '.'] + cmake_options, self.build_dir)
self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
if complete_txt:
complete_txt.write_text(git_hash)
except KeyboardInterrupt as e:
raise e
except:
@ -110,6 +129,9 @@ class TestEnvironment:
return True
def set_blender_executable(self, executable_path: pathlib.Path, environment: Dict = {}) -> None:
if executable_path.is_dir():
executable_path = self._blender_executable_from_path(executable_path)
# Run all Blender commands with this executable.
self.blender_executable = executable_path
self.blender_executable_environment = environment
@ -138,7 +160,7 @@ class TestEnvironment:
def _init_default_blender_executable(self) -> None:
# Find a default executable to run commands independent of testing a specific build.
# Try own built executable.
built_executable = self._blender_executable_from_path(self.build_dir / 'bin')
built_executable = self._blender_executable_from_path(self.install_dir)
if built_executable:
self.default_blender_executable = built_executable
return

@ -52,6 +52,8 @@ class TestGraph:
outputs.add(output)
chart_type = 'line' if entries[0].benchmark_type == 'time_series' else 'comparison'
if chart_type == 'comparison':
entries = sorted(entries, key=lambda entry: (entry.revision, entry.test))
for output in outputs:
chart_name = f"{category} ({output})"

@ -32,8 +32,7 @@ def get_tests_base_dir(blender_git_dir: pathlib.Path) -> pathlib.Path:
def use_revision_columns(config: api.TestConfig) -> bool:
return (
config.benchmark_type == "comparison" and
len(config.queue.entries) > 0 and
not config.queue.has_multiple_revisions_to_build
len(config.queue.entries) > 0
)
@ -135,13 +134,17 @@ def run_entry(env: api.TestEnvironment,
if len(entry.executable):
env.set_blender_executable(pathlib.Path(entry.executable), environment)
else:
env.checkout(git_hash)
executable_ok = env.build()
if config.benchmark_type == "comparison":
install_dir = config.builds_dir / revision
else:
install_dir = env.install_dir
executable_ok = env.build(git_hash, install_dir)
if not executable_ok:
entry.status = 'failed'
entry.error_msg = 'Failed to build'
else:
env.set_blender_executable(env.blender_executable, environment)
env.set_blender_executable(install_dir, environment)
# Run test and update output and status.
if executable_ok: