CMake: Add support of Ninja's pools to ease building on limited amount of RAM.

Many modern computers support a lot of threads (parrallel building
jobs), but are somewhat restricted in memory, when some building jobs
can require several GB each.

Ninja builder has pools, which extend the usual `-j X` make
parallelizing option, by allowing to specify different numbers of
parallel jobs for different targets.

This commit defines three pools, one for linking, one for usual compile,
and one for compiling some 'heavy' cpp libs, when a single file can
require GB of RAM in full debug builds.

Simply enabling WITH_NINJA_POOL_JOBS will try to set default sensible
values for those three pools based on your machine specifications, you
can then tweak further the values of NINJA_MAX_NUM_PARALLEL_ settings,
if you like.

On my system (8 cores, 16GB RAM), it allows to build a full debug with
all ASAN options build with roughly 7GB of RAM used at most, pretty much
as quickly as without that option (which would require up to 11GB of
available RAM at some points).

Review task: D4780.
This commit is contained in:
Bastien Montagne 2019-05-07 20:32:14 +02:00
parent 27b9a0cd67
commit 0d43d0bcab
2 changed files with 103 additions and 0 deletions

@ -565,6 +565,14 @@ if(WIN32)
mark_as_advanced(WINDOWS_PYTHON_DEBUG)
endif()
# The following only works with the Ninja generator in CMake >= 3.0.
if("${CMAKE_GENERATOR}" MATCHES "Ninja")
option(WITH_NINJA_POOL_JOBS
"Enable Ninja pools of jobs, to try to ease building on machines with 16GB of RAM or less (if not yet defined, will try to set best values based on detected machine specifications)."
OFF)
mark_as_advanced(WITH_NINJA_POOL_JOBS)
endif()
# avoid using again
option_defaults_clear()
@ -1286,6 +1294,76 @@ if(WITH_LIBMV)
set(CERES_DEFINES -DCERES_STD_UNORDERED_MAP)
endif()
#-----------------------------------------------------------------------------
# Extra limits to number of jobs running in parallel for some kind os tasks.
# Only supported by Ninja build system currently.
if("${CMAKE_GENERATOR}" MATCHES "Ninja" AND WITH_NINJA_POOL_JOBS)
if(NOT NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS AND
NOT NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS AND
NOT NINJA_MAX_NUM_PARALLEL_LINK_JOBS)
# Try to define good default values.
# Max mem of heavy cpp files compilation: about 2.5GB
# Max mem during linking: about 3.3GB
cmake_host_system_information(RESULT _NUM_CORES QUERY NUMBER_OF_LOGICAL_CORES)
# Note: this gives mem in MB.
cmake_host_system_information(RESULT _TOT_MEM QUERY TOTAL_PHYSICAL_MEMORY)
# Heuristics... the more cores we have, the more free mem we have to keep for the non-heavy tasks too.
if(${_TOT_MEM} LESS 8000 AND ${_NUM_CORES} GREATER 2)
set(_compile_heavy_jobs "1")
elseif(${_TOT_MEM} LESS 16000 AND ${_NUM_CORES} GREATER 4)
set(_compile_heavy_jobs "2")
elseif(${_TOT_MEM} LESS 24000 AND ${_NUM_CORES} GREATER 8)
set(_compile_heavy_jobs "3")
elseif(${_TOT_MEM} LESS 32000 AND ${_NUM_CORES} GREATER 16)
set(_compile_heavy_jobs "4")
elseif(${_TOT_MEM} LESS 64000 AND ${_NUM_CORES} GREATER 32)
set(_compile_heavy_jobs "8")
else()
set(_compile_heavy_jobs "")
endif()
set(NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS "${_compile_heavy_jobs}" CACHE STRING
"Define the maximum number of concurrent heavy compilation jobs, for ninja build system (used for some targets which cpp files can take several GB each during compilation)." FORCE)
mark_as_advanced(NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
set(_compile_heavy_jobs)
# Only set regular compile jobs if we set heavy jobs, otherwise default (using all cores) if fine.
if(NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
math(EXPR _compile_jobs "${_NUM_CORES} - 1")
else()
set(_compile_jobs "")
endif()
set(NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS "${_compile_jobs}" CACHE STRING
"Define the maximum number of concurrent compilation jobs, for ninja build system." FORCE)
mark_as_advanced(NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS)
set(_compile_jobs)
# In practice, even when there is RAM available, this proves to be quicker than running in parallel
# (due to slow disks accesses).
set(NINJA_MAX_NUM_PARALLEL_LINK_JOBS "1" CACHE STRING
"Define the maximum number of concurrent link jobs, for ninja build system." FORCE)
mark_as_advanced(NINJA_MAX_NUM_PARALLEL_LINK_JOBS)
set(_NUM_CORES)
set(_TOT_MEM)
endif()
if(NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS)
set_property(GLOBAL APPEND PROPERTY JOB_POOLS compile_job_pool=${NINJA_MAX_NUM_PARALLEL_COMPILE_JOBS})
set(CMAKE_JOB_POOL_COMPILE compile_job_pool)
endif()
if(NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
set_property(GLOBAL APPEND PROPERTY JOB_POOLS compile_heavy_job_pool=${NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS})
endif()
if(NINJA_MAX_NUM_PARALLEL_LINK_JOBS)
set_property(GLOBAL APPEND PROPERTY JOB_POOLS link_job_pool=${NINJA_MAX_NUM_PARALLEL_LINK_JOBS})
set(CMAKE_JOB_POOL_LINK link_job_pool)
endif()
endif()
#-----------------------------------------------------------------------------
# Extra compile flags
@ -1621,6 +1699,11 @@ endif()
add_subdirectory(tests)
#-----------------------------------------------------------------------------
# Define 'heavy' submodules (for Ninja builder when using pools).
setup_heavy_lib_pool()
#-----------------------------------------------------------------------------
# CPack for generating packages
include(build_files/cmake/packaging.cmake)

@ -285,6 +285,26 @@ function(blender_add_lib
set_property(GLOBAL APPEND PROPERTY BLENDER_LINK_LIBS ${name})
endfunction()
# Ninja only: assign 'heavy pool' to some targets that are especially RAM-consuming to build.
function(setup_heavy_lib_pool)
if(WITH_NINJA_POOL_JOBS AND NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
if(WITH_CYCLES)
list(APPEND _HEAVY_LIBS "cycles_device" "cycles_kernel")
endif()
if(WITH_LIBMV)
list(APPEND _HEAVY_LIBS "bf_intern_libmv")
endif()
if(WITH_OPENVDB)
list(APPEND _HEAVY_LIBS "bf_intern_openvdb")
endif()
foreach(TARGET ${_HEAVY_LIBS})
if(TARGET ${TARGET})
set_property(TARGET ${TARGET} PROPERTY JOB_POOL_COMPILE compile_heavy_job_pool)
endif()
endforeach()
endif()
endfunction()
function(SETUP_LIBDIRS)