Merge branch 'master' of https://gitlab.kitware.com/vtk/vtk-m into sync_streamlines

This commit is contained in:
Dave Pugmire 2023-02-10 14:50:12 -05:00
commit c8bdb50a95
338 changed files with 4532 additions and 2748 deletions

@ -38,7 +38,7 @@
- .docker_image
.ubuntu1804_cuda_kokkos: &ubuntu1804_cuda_kokkos
image: "kitware/vtkm:ci-ubuntu1804_cuda11_kokkos-20220407"
image: "kitware/vtkm:ci-ubuntu1804_cuda11_kokkos-20230125"
extends:
- .docker_image
@ -53,12 +53,12 @@
- .docker_image
.ubuntu2004_kokkos: &ubuntu2004_kokkos
image: "kitware/vtkm:ci-ubuntu2004_kokkos-20210916"
image: "kitware/vtkm:ci-ubuntu2004_kokkos-20230125"
extends:
- .docker_image
.ubuntu2004_hip_kokkos: &ubuntu2004_hip_kokkos
image: "kitware/vtkm:ci-ubuntu2004_hip_kokkos-20220620"
image: "kitware/vtkm:ci-ubuntu2004_hip_kokkos-20230125"
extends:
- .docker_image

@ -6,8 +6,7 @@ build:centos7_gcc73:
- build
- vtkm
- docker
- linux
- cuda-rt
- linux-x86_64
- large-memory
extends:
- .centos7
@ -24,7 +23,7 @@ test:centos7_gcc73:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- turing
extends:
@ -42,7 +41,7 @@ test:rhel8_test_centos7:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- turing
extends:

@ -6,7 +6,7 @@ build:centos8_sanitizer:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .centos8
- .cmake_build_linux
@ -21,7 +21,7 @@ test:centos8_sanitizer:
- test
- vtkm
- docker
- linux
- linux-x86_64
- privileged
extends:
- .centos8

@ -25,13 +25,14 @@ RUN mkdir /opt/cmake/ && \
ENV PATH "/opt/cmake/bin:${PATH}"
# Build and install Kokkos
ARG KOKKOS_VERSION=3.7.01
RUN mkdir -p /opt/kokkos/build && \
cd /opt/kokkos/build && \
curl -L https://github.com/kokkos/kokkos/archive/refs/tags/3.4.01.tar.gz > kokkos-3.4.01.tar.gz && \
tar -xf kokkos-3.4.01.tar.gz && \
curl -L https://github.com/kokkos/kokkos/archive/refs/tags/$KOKKOS_VERSION.tar.gz > kokkos-$KOKKOS_VERSION.tar.gz && \
tar -xf kokkos-$KOKKOS_VERSION.tar.gz && \
mkdir bld && cd bld && \
CXX=/opt/kokkos/build/kokkos-3.4.01/bin/nvcc_wrapper \
cmake -B . -S ../kokkos-3.4.01 \
CXX=/opt/kokkos/build/kokkos-$KOKKOS_VERSION/bin/nvcc_wrapper \
cmake -B . -S ../kokkos-$KOKKOS_VERSION \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/opt/kokkos \
-DCMAKE_CXX_FLAGS=-fPIC \

@ -44,7 +44,7 @@ ENV CMAKE_PREFIX_PATH "/opt/rocm/lib/cmake:/opt/rocm/lib:${CMAKE_PREFIX_PATH}"
ENV CMAKE_GENERATOR "Ninja"
# Build and install Kokkos
ARG KOKKOS_VERSION=3.6.00
ARG KOKKOS_VERSION=3.7.01
COPY kokkos_cmake_config.cmake kokkos_cmake_config.cmake
RUN curl -L https://github.com/kokkos/kokkos/archive/refs/tags/$KOKKOS_VERSION.tar.gz | tar -xzf - && \
cmake -S kokkos-$KOKKOS_VERSION -B build -C kokkos_cmake_config.cmake && \

@ -31,11 +31,12 @@ RUN mkdir /opt/cmake/ && \
ENV PATH "${PATH}:/opt/cmake/bin"
# Build and install Kokkos
ARG KOKKOS_VERSION=3.7.01
RUN mkdir -p /opt/kokkos/build && \
cd /opt/kokkos/build && \
curl -L https://github.com/kokkos/kokkos/archive/refs/tags/3.4.01.tar.gz > kokkos-3.4.01.tar.gz && \
tar -xf kokkos-3.4.01.tar.gz && \
curl -L https://github.com/kokkos/kokkos/archive/refs/tags/$KOKKOS_VERSION.tar.gz > kokkos-$KOKKOS_VERSION.tar.gz && \
tar -xf kokkos-$KOKKOS_VERSION.tar.gz && \
mkdir bld && cd bld && \
cmake -GNinja -DCMAKE_INSTALL_PREFIX=/opt/kokkos -DCMAKE_CXX_FLAGS=-fPIC -DKokkos_ENABLE_SERIAL=ON ../kokkos-3.4.01 &&\
cmake -GNinja -DCMAKE_INSTALL_PREFIX=/opt/kokkos -DCMAKE_CXX_FLAGS=-fPIC -DKokkos_ENABLE_SERIAL=ON ../kokkos-$KOKKOS_VERSION &&\
ninja all && \
ninja install

@ -1,9 +1,10 @@
.doxygen_submit: &doxygen_submit
stage: build
tags:
- build
- vtkm
- docker
- linux
- linux-x86_64
before_script:
- "cmake -V -P .gitlab/ci/config/gitlab_ci_setup.cmake"
- "ctest -VV -S .gitlab/ci/ctest_configure.cmake"

@ -79,6 +79,6 @@ test:macos_xcode13:
.macos_build_tags:
tags:
- vtk-m
- macos
- macos-x86_64
- xcode-13.3
- nonconcurrent

@ -6,7 +6,7 @@ build:rhel8:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .rhel8
- .cmake_build_linux
@ -20,7 +20,7 @@ test:rhel8:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .rhel8
- .cmake_test_linux
@ -37,7 +37,7 @@ build:rhel8_vtk_types:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .rhel8
- .cmake_build_linux
@ -51,7 +51,7 @@ test:rhel8_vtk_types:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .rhel8
- .cmake_test_linux

@ -6,8 +6,7 @@ build:ubuntu1604_gcc5:
- build
- vtkm
- docker
- linux
- cuda-rt
- linux-x86_64
- large-memory
extends:
- .ubuntu1604_cuda
@ -25,7 +24,7 @@ test:ubuntu1604_gcc5:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- pascal
extends:
@ -45,8 +44,7 @@ build:ubuntu1604_gcc5_2:
- build
- vtkm
- docker
- linux
- cuda-rt
- linux-x86_64
- large-memory
extends:
- .ubuntu1604_cuda
@ -64,7 +62,7 @@ test:ubuntu1804_test_ubuntu1604_gcc5_2:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- pascal
extends:
@ -86,7 +84,7 @@ build:ubuntu1604_clang5:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1604
- .cmake_build_linux
@ -103,7 +101,7 @@ test:ubuntu1604_clang5:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1604
- .cmake_test_linux

@ -7,7 +7,7 @@ build:ubuntu1804_gcc9:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804
- .cmake_build_linux
@ -23,7 +23,7 @@ test:ubuntu1804_gcc9:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804
- .cmake_test_linux
@ -45,8 +45,7 @@ build:ubuntu1804_gcc7:
- build
- vtkm
- docker
- linux
- cuda-rt
- linux-x86_64
- large-memory
extends:
- .ubuntu1804_cuda
@ -63,7 +62,7 @@ test:ubuntu1804_gcc7:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- turing
extends:
@ -84,8 +83,7 @@ build:ubuntu1804_clang_cuda:
- build
- vtkm
- docker
- linux
- cuda-rt
- linux-x86_64
- large-memory
extends:
- .ubuntu1804_cuda
@ -103,7 +101,7 @@ test:ubuntu1804_clang_cuda:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- pascal
extends:
@ -123,7 +121,7 @@ build:ubuntu1804_gcc6:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804
- .cmake_build_linux
@ -138,7 +136,7 @@ test:ubuntu1804_gcc6:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804
- .cmake_test_linux
@ -159,7 +157,7 @@ build:ubuntu1804_clang8:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804
- .cmake_build_linux
@ -175,7 +173,7 @@ test:ubuntu1804_clang8:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804
- .cmake_test_linux
@ -192,8 +190,7 @@ build:ubuntu1804_kokkos:
- build
- vtkm
- docker
- linux
- cuda-rt
- linux-x86_64
- large-memory
extends:
- .ubuntu1804_cuda_kokkos
@ -209,7 +206,7 @@ test:ubuntu1804_kokkos:
- test
- vtkm
- docker
- linux
- linux-x86_64
- cuda-rt
- turing
extends:
@ -226,7 +223,7 @@ build:ubuntu1804_cuda_perftest:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu1804_cuda
- .cmake_build_linux
@ -241,7 +238,7 @@ test:ubuntu1804_cuda_perftest:
- vtkm
- docker
- cuda-rt
- linux
- linux-x86_64
extends:
- .ubuntu1804_cuda
- .cmake_test_linux

@ -3,7 +3,7 @@ build:ubuntu2004_gcc9:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu2004
- .cmake_build_linux
@ -17,7 +17,7 @@ test:ubuntu2004_gcc9:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu2004
- .cmake_test_linux
@ -36,7 +36,7 @@ build:ubuntu2004_kokkos:
- build
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu2004_kokkos
- .cmake_build_linux
@ -50,7 +50,7 @@ test:ubuntu2004_kokkos:
- test
- vtkm
- docker
- linux
- linux-x86_64
extends:
- .ubuntu2004_kokkos
- .cmake_test_linux
@ -65,6 +65,7 @@ build:ubuntu2004_hip_kokkos:
- build
- vtkm
- docker
- linux-x86_64
- radeon
extends:
- .ubuntu2004_hip_kokkos
@ -95,6 +96,7 @@ test:ubuntu2004_hip_kokkos:
- build
- vtkm
- docker
- linux-x86_64
- radeon
extends:
- .ubuntu2004_hip_kokkos
@ -118,6 +120,8 @@ build:ascent:
tags:
- vtkm
- docker
- build
- linux-x86_64
extends:
- .ubuntu2004
- .run_only_merge_requests

@ -90,7 +90,7 @@ build:windows_vs2019:
- vtkm # Since this is a bare runner, pin to a project.
- nonconcurrent
- build
- windows
- windows-x86_64
- shell
- vs2019
- msvc-19.25
@ -110,7 +110,7 @@ test:windows_vs2019:
- vtkm # Since this is a bare runner, pin to a project.
- nonconcurrent
- test
- windows
- windows-x86_64
- shell
- vs2019
- msvc-19.25

@ -62,6 +62,12 @@ if(TARGET vtkm_vectorization_flags)
endif()
add_library(vtkm_vectorization_flags INTERFACE)
set_target_properties(
vtkm_vectorization_flags
PROPERTIES
EXPORT_NAME vectorization_flags
)
if(NOT VTKm_INSTALL_ONLY_LIBRARIES)
install(TARGETS vtkm_vectorization_flags EXPORT ${VTKm_EXPORT_NAME})
endif()
@ -193,7 +199,7 @@ target_compile_options(vtkm_vectorization_flags
INTERFACE $<$<COMPILE_LANGUAGE:CXX>:${flags}>
)
if(TARGET vtkm::cuda AND flags AND NOT CMAKE_CUDA_HOST_COMPILER)
if(TARGET vtkm_cuda AND flags AND NOT CMAKE_CUDA_HOST_COMPILER)
# Also propagate down these optimizations when building host side code
# with cuda. To be safe we only do this when we know the C++ and CUDA
# host compiler are from the same vendor

@ -30,6 +30,11 @@ endif()
# vtkm_compiler_flags is used by all the vtkm targets and consumers of VTK-m
# The flags on vtkm_compiler_flags are needed when using/building vtk-m
add_library(vtkm_compiler_flags INTERFACE)
set_target_properties(
vtkm_compiler_flags
PROPERTIES
EXPORT_NAME compiler_flags
)
# When building libraries/tests that are part of the VTK-m repository
# inherit the properties from vtkm_vectorization_flags.
@ -50,12 +55,12 @@ target_compile_features(vtkm_compiler_flags INTERFACE cxx_std_14)
# they don't use.
if(VTKM_COMPILER_IS_MSVC)
target_compile_options(vtkm_compiler_flags INTERFACE $<$<COMPILE_LANGUAGE:CXX>:/Gy>)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
target_compile_options(vtkm_compiler_flags INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler="/Gy">)
endif()
elseif(NOT (VTKM_COMPILER_IS_PGI OR VTKM_COMPILER_IS_XL)) #can't find an equivalant PGI/XL flag
target_compile_options(vtkm_compiler_flags INTERFACE $<$<COMPILE_LANGUAGE:CXX>:-ffunction-sections>)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
target_compile_options(vtkm_compiler_flags INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-ffunction-sections>)
endif()
endif()
@ -63,7 +68,7 @@ endif()
# Enable large object support so we can have 2^32 addressable sections
if(VTKM_COMPILER_IS_MSVC)
target_compile_options(vtkm_compiler_flags INTERFACE $<$<COMPILE_LANGUAGE:CXX>:/bigobj>)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
target_compile_options(vtkm_compiler_flags INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler="/bigobj">)
endif()
endif()
@ -79,6 +84,11 @@ target_include_directories(vtkm_compiler_flags INTERFACE
# vtkm_developer_flags is used ONLY BY libraries that are built as part of this
# repository
add_library(vtkm_developer_flags INTERFACE)
set_target_properties(
vtkm_developer_flags
PROPERTIES
EXPORT_NAME developer_flags
)
# Additional warnings just for Clang 3.5+, and AppleClang 7+
# about failures to vectorize.
@ -101,7 +111,7 @@ if(VTKM_COMPILER_IS_MSVC)
#Setup MSVC warnings with CUDA and CXX
target_compile_options(vtkm_developer_flags INTERFACE $<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
target_compile_options(vtkm_developer_flags INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:${cuda_flags} -Xcudafe=--diag_suppress=1394,--diag_suppress=766>)
endif()
@ -109,7 +119,7 @@ if(VTKM_COMPILER_IS_MSVC)
# In VS2013 the C4127 warning has a bug in the implementation and
# generates false positive warnings for lots of template code
target_compile_options(vtkm_developer_flags INTERFACE $<$<COMPILE_LANGUAGE:CXX>:-wd4127>)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
target_compile_options(vtkm_developer_flags INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-wd4127>)
endif()
endif()
@ -165,7 +175,7 @@ elseif(VTKM_COMPILER_IS_GNU OR VTKM_COMPILER_IS_CLANG)
endif()
target_compile_options(vtkm_developer_flags INTERFACE $<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
target_compile_options(vtkm_developer_flags INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:${cuda_flags}>)
endif()
endif()
@ -176,10 +186,10 @@ function(setup_cuda_flags)
endfunction()
#common warnings for all platforms when building cuda
if ((TARGET vtkm::cuda) OR (TARGET vtkm::kokkos_cuda))
if ((TARGET vtkm_cuda) OR (TARGET vtkm_kokkos_cuda))
setup_cuda_flags()
endif()
if(NOT VTKm_INSTALL_ONLY_LIBRARIES)
install(TARGETS vtkm_compiler_flags vtkm_developer_flags EXPORT ${VTKm_EXPORT_NAME})
vtkm_install_targets(TARGETS vtkm_compiler_flags vtkm_developer_flags)
endif()

@ -10,22 +10,22 @@
# When this file is run by CMake through the find_package command, the
# following targets will exist:
# vtkm_cont Target that contains most of VTK-m
# vtkm::cont Target that contains most of VTK-m
#
# vtkm_rendering Target that contains all the rendering code
# vtkm::rendering Target that contains all the rendering code
#
# vtkm_filter Target that contains all of VTK-m pre-built filters
# vtkm::filter Target that contains all of VTK-m pre-built filters
#
# vtkm_source Target that contains all of VTK-m pre-built sources
# vtkm::source Target that contains all of VTK-m pre-built sources
#
# vtkm::tbb Target that contains tbb related link information
# implicitly linked to by `vtkm_cont` if tbb is enabled
# vtkm::tbb Target that contains tbb related link information
# implicitly linked to by `vtkm_cont` if tbb is enabled
#
# vtkm::openmp Target that contains openmp related link information
# implicitly linked to by `vtkm_cont` if openmp is enabled
# vtkm::openmp Target that contains openmp related link information
# implicitly linked to by `vtkm_cont` if openmp is enabled
#
# vtkm::cuda Target that contains cuda related link information
# implicitly linked to by `vtkm_cont` if cuda is enabled
# vtkm::cuda Target that contains cuda related link information
# implicitly linked to by `vtkm_cont` if cuda is enabled
#
# The following local variables will also be defined:
#
@ -47,7 +47,6 @@
# VTKm_ENABLE_OSMESA_CONTEXT Will be enabled if VTK-m rendering was built with a osmesa context
# VTKm_ENABLE_EGL_CONTEXT Will be enabled if VTK-m rendering was built with a EGL context
#
#
if (CMAKE_VERSION VERSION_LESS "3.12")
message(FATAL_ERROR "VTK-m requires CMake 3.12+")
@ -78,6 +77,7 @@ set(VTKm_ENABLE_OSMESA_CONTEXT "@VTKm_ENABLE_OSMESA_CONTEXT@")
set(VTKm_ENABLE_EGL_CONTEXT "@VTKm_ENABLE_EGL_CONTEXT@")
set(VTKm_ENABLE_MPI "@VTKm_ENABLE_MPI@")
set(VTKm_ENABLE_TESTING_LIBRARY "@VTKm_ENABLE_TESTING_LIBRARY@")
set(VTKm_USE_DEFAULT_TYPES_FOR_ASCENT "@VTKm_USE_DEFAULT_TYPES_FOR_ASCENT@")
# This is true when the package is still in the build directory (not installed)
if(CMAKE_CURRENT_LIST_DIR STREQUAL "@VTKm_BUILD_CMAKE_BASE_DIR@/@VTKm_INSTALL_CONFIG_DIR@")
@ -109,11 +109,20 @@ set(VTKM_FROM_INSTALL_DIR FALSE)
if(NOT "${CMAKE_BINARY_DIR}" STREQUAL "@VTKm_BINARY_DIR@")
set(VTKM_FROM_INSTALL_DIR TRUE)
include(${VTKm_CONFIG_DIR}/VTKmTargets.cmake)
if(DEFINED PACKAGE_FIND_VERSION AND PACKAGE_FIND_VERSION VERSION_LESS 2.0)
add_library(vtkm_cont ALIAS vtkm::cont)
add_library(vtkm_filter ALIAS vtkm::filter)
add_library(vtkm_io ALIAS vtkm::io)
add_library(vtkm_rendering ALIAS vtkm::rendering)
add_library(vtkm_source ALIAS vtkm::source)
add_library(vtkm_worklet ALIAS vtkm::worklet)
endif()
endif()
# Once we can require CMake 3.15 for all cuda builds we can
# replace this with setting `cuda_architecture_flags` as part of the
# EXPORT_PROPERTIES of the vtkm_cuda target
# EXPORT_PROPERTIES of the vtkm::cuda target
if(VTKm_ENABLE_CUDA AND VTKM_FROM_INSTALL_DIR)
# Canonical way of setting CUDA arch
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18)

@ -8,6 +8,14 @@
## PURPOSE. See the above copyright notice for more information.
##============================================================================
macro(vtkm_diy_get_general_target target)
if(PROJECT_NAME STREQUAL "VTKm" OR CMAKE_PROJECT_NAME STREQUAL "VTKm")
set(${target} "vtkm_diy")
else()
set(${target} "vtkm::diy")
endif()
endmacro()
macro(_vtkm_diy_target flag target)
set(${target} "vtkmdiympi")
if (NOT ${flag})
@ -19,7 +27,8 @@ function(vtkm_diy_init_target)
set(vtkm_diy_default_flag "${VTKm_ENABLE_MPI}")
_vtkm_diy_target(vtkm_diy_default_flag vtkm_diy_default_target)
set_target_properties(vtkm_diy PROPERTIES
vtkm_diy_get_general_target(diy_target)
set_target_properties(${diy_target} PROPERTIES
vtkm_diy_use_mpi_stack ${vtkm_diy_default_flag}
vtkm_diy_target ${vtkm_diy_default_target})
endfunction()
@ -30,30 +39,33 @@ function(vtkm_diy_use_mpi_push)
if (NOT ARGC EQUAL 0)
set(topval ${ARGV0})
endif()
get_target_property(stack vtkm_diy vtkm_diy_use_mpi_stack)
vtkm_diy_get_general_target(diy_target)
get_target_property(stack ${diy_target} vtkm_diy_use_mpi_stack)
list (APPEND stack ${topval})
_vtkm_diy_target(topval target)
set_target_properties(vtkm_diy PROPERTIES
set_target_properties(${diy_target} PROPERTIES
vtkm_diy_use_mpi_stack "${stack}"
vtkm_diy_target "${target}")
endfunction()
function(vtkm_diy_use_mpi value)
get_target_property(stack vtkm_diy vtkm_diy_use_mpi_stack)
vtkm_diy_get_general_target(diy_target)
get_target_property(stack ${diy_target} vtkm_diy_use_mpi_stack)
list (REMOVE_AT stack -1)
list (APPEND stack ${value})
_vtkm_diy_target(value target)
set_target_properties(vtkm_diy PROPERTIES
set_target_properties(${diy_target} PROPERTIES
vtkm_diy_use_mpi_stack "${stack}"
vtkm_diy_target "${target}")
endfunction()
function(vtkm_diy_use_mpi_pop)
get_target_property(stack vtkm_diy vtkm_diy_use_mpi_stack)
vtkm_diy_get_general_target(diy_target)
get_target_property(stack ${diy_target} vtkm_diy_use_mpi_stack)
list (GET stack -1 value)
list (REMOVE_AT stack -1)
_vtkm_diy_target(value target)
set_target_properties(vtkm_diy PROPERTIES
set_target_properties(${diy_target} PROPERTIES
vtkm_diy_use_mpi_stack "${stack}"
vtkm_diy_target "${target}")
endfunction()

@ -41,38 +41,39 @@ function(vtkm_extract_real_library library real_library)
endif()
endfunction()
if(VTKm_ENABLE_TBB AND NOT TARGET vtkm::tbb)
if(VTKm_ENABLE_TBB AND NOT (TARGET vtkm_tbb OR TARGET vtkm::tbb))
# Skip find_package(TBB) if we already have it
if (NOT TARGET TBB::tbb)
find_package(TBB REQUIRED)
endif()
add_library(vtkmTBB INTERFACE)
add_library(vtkm::tbb ALIAS vtkmTBB)
target_link_libraries(vtkmTBB INTERFACE TBB::tbb)
target_compile_definitions(vtkmTBB INTERFACE "TBB_VERSION_MAJOR=${TBB_VERSION_MAJOR}")
set_target_properties(vtkmTBB PROPERTIES EXPORT_NAME vtkm::tbb)
install(TARGETS vtkmTBB EXPORT ${VTKm_EXPORT_NAME})
add_library(vtkm_tbb INTERFACE)
target_link_libraries(vtkm_tbb INTERFACE TBB::tbb)
target_compile_definitions(vtkm_tbb INTERFACE "TBB_VERSION_MAJOR=${TBB_VERSION_MAJOR}")
set_target_properties(vtkm_tbb PROPERTIES EXPORT_NAME tbb)
install(TARGETS vtkm_tbb EXPORT ${VTKm_EXPORT_NAME})
endif()
if(VTKm_ENABLE_OPENMP AND NOT TARGET vtkm::openmp)
if(VTKm_ENABLE_OPENMP AND NOT (TARGET vtkm_openmp OR TARGET vtkm::openmp))
find_package(OpenMP 4.0 REQUIRED COMPONENTS CXX QUIET)
add_library(vtkm::openmp INTERFACE IMPORTED GLOBAL)
add_library(vtkm_openmp INTERFACE)
set_target_properties(vtkm_openmp PROPERTIES EXPORT_NAME openmp)
if(OpenMP_CXX_FLAGS)
set_property(TARGET vtkm::openmp
set_property(TARGET vtkm_openmp
APPEND PROPERTY INTERFACE_COMPILE_OPTIONS $<$<COMPILE_LANGUAGE:CXX>:${OpenMP_CXX_FLAGS}>)
if(VTKm_ENABLE_CUDA)
string(REPLACE ";" "," openmp_cuda_flags "-Xcompiler=${OpenMP_CXX_FLAGS}")
set_property(TARGET vtkm::openmp
set_property(TARGET vtkm_openmp
APPEND PROPERTY INTERFACE_COMPILE_OPTIONS $<$<COMPILE_LANGUAGE:CUDA>:${openmp_cuda_flags}>)
endif()
endif()
if(OpenMP_CXX_LIBRARIES)
set_target_properties(vtkm::openmp PROPERTIES
set_target_properties(vtkm_openmp PROPERTIES
INTERFACE_LINK_LIBRARIES "${OpenMP_CXX_LIBRARIES}")
endif()
install(TARGETS vtkm_openmp EXPORT ${VTKm_EXPORT_NAME})
endif()
if(VTKm_ENABLE_CUDA)
@ -84,10 +85,9 @@ if(VTKm_ENABLE_CUDA)
message(FATAL_ERROR "VTK-m CUDA support requires version 9.2+")
endif()
if(NOT TARGET vtkm::cuda)
if(NOT (TARGET vtkm_cuda OR TARGET vtkm::cuda))
add_library(vtkm_cuda INTERFACE)
add_library(vtkm::cuda ALIAS vtkm_cuda)
set_target_properties(vtkm_cuda PROPERTIES EXPORT_NAME vtkm::cuda)
set_target_properties(vtkm_cuda PROPERTIES EXPORT_NAME cuda)
install(TARGETS vtkm_cuda EXPORT ${VTKm_EXPORT_NAME})
# Reserve `requires_static_builds` to potential work around issues
@ -313,16 +313,16 @@ function(kokkos_fix_compile_options)
endforeach()
endwhile()
set_property(TARGET vtkm::kokkos PROPERTY INTERFACE_LINK_OPTIONS "$<DEVICE_LINK:${cuda_arch}>")
set_property(TARGET vtkm_kokkos PROPERTY INTERFACE_LINK_OPTIONS "$<DEVICE_LINK:${cuda_arch}>")
if (OPENMP IN_LIST Kokkos_DEVICES)
set_property(TARGET vtkm::kokkos PROPERTY INTERFACE_LINK_OPTIONS "$<HOST_LINK:-fopenmp>")
set_property(TARGET vtkm_kokkos PROPERTY INTERFACE_LINK_OPTIONS "$<HOST_LINK:-fopenmp>")
endif()
endfunction()
if(VTKm_ENABLE_KOKKOS AND NOT TARGET vtkm::kokkos)
if(VTKm_ENABLE_KOKKOS AND NOT TARGET vtkm_kokkos)
cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
find_package(Kokkos REQUIRED)
find_package(Kokkos 3.7 REQUIRED)
# We must empty this property for every kokkos backend device since it
# contains a generator expresion which breaks some of our users builds.
@ -344,19 +344,25 @@ if(VTKm_ENABLE_KOKKOS AND NOT TARGET vtkm::kokkos)
set(CMAKE_CUDA_ARCHITECTURES ${cuda_arch})
message(STATUS "Detected Cuda arch from Kokkos: ${cuda_arch}")
add_library(vtkm::kokkos_cuda INTERFACE IMPORTED GLOBAL)
add_library(vtkm_kokkos_cuda INTERFACE)
set_property(TARGET vtkm_kokkos_cuda PROPERTY EXPORT_NAME kokkos_cuda)
install(TARGETS vtkm_kokkos_cuda EXPORT ${VTKm_EXPORT_NAME})
elseif(HIP IN_LIST Kokkos_DEVICES)
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
enable_language(HIP)
add_library(vtkm::kokkos_hip INTERFACE IMPORTED GLOBAL)
set_property(TARGET Kokkos::kokkoscore PROPERTY INTERFACE_COMPILE_OPTIONS "")
set_property(TARGET Kokkos::kokkoscore PROPERTY INTERFACE_LINK_OPTIONS "")
set_target_properties(Kokkos::kokkoscore PROPERTIES
INTERFACE_COMPILE_OPTIONS ""
INTERFACE_LINK_OPTIONS ""
)
add_library(vtkm_kokkos_hip INTERFACE)
set_property(TARGET vtkm_kokkos_hip PROPERTY EXPORT_NAME kokkos_hip)
install(TARGETS vtkm_kokkos_hip EXPORT ${VTKm_EXPORT_NAME})
endif()
add_library(vtkm::kokkos INTERFACE IMPORTED GLOBAL)
set_target_properties(vtkm::kokkos PROPERTIES INTERFACE_LINK_LIBRARIES "Kokkos::kokkos")
add_library(vtkm_kokkos INTERFACE IMPORTED GLOBAL)
set_target_properties(vtkm_kokkos PROPERTIES INTERFACE_LINK_LIBRARIES "Kokkos::kokkos")
if (TARGET vtkm::kokkos_cuda)
if (TARGET vtkm_kokkos_cuda)
kokkos_fix_compile_options()
endif()
endif()

@ -92,10 +92,9 @@ endif()
#-----------------------------------------------------------------------------
if(VTKm_ENABLE_RENDERING AND NOT TARGET vtkm_rendering_gl_context)
add_library(vtkm_rendering_gl_context INTERFACE)
if(NOT VTKm_INSTALL_ONLY_LIBRARIES)
install(TARGETS vtkm_rendering_gl_context
EXPORT ${VTKm_EXPORT_NAME}
)
vtkm_install_targets(TARGETS vtkm_rendering_gl_context)
endif()
endif()

@ -197,7 +197,7 @@ endfunction()
#
# If VTK-m was built with CMake 3.18+ and you are using CMake 3.18+ and have
# a cmake_minimum_required of 3.18 or have set policy CMP0105 to new, this will
# return an empty string as the `vtkm::cuda` target will correctly propagate
# return an empty string as the `vtkm_cuda` target will correctly propagate
# all the necessary flags.
#
# This is required for CMake < 3.18 as they don't support the `$<DEVICE_LINK>`
@ -211,11 +211,11 @@ endfunction()
#
function(vtkm_get_cuda_flags settings_var)
if(TARGET vtkm::cuda)
if(TARGET vtkm_cuda)
if(POLICY CMP0105)
cmake_policy(GET CMP0105 does_device_link)
get_property(arch_flags
TARGET vtkm::cuda
TARGET vtkm_cuda
PROPERTY INTERFACE_LINK_OPTIONS)
if(arch_flags AND CMP0105 STREQUAL "NEW")
return()
@ -223,7 +223,7 @@ function(vtkm_get_cuda_flags settings_var)
endif()
get_property(arch_flags
TARGET vtkm::cuda
TARGET vtkm_cuda
PROPERTY cuda_architecture_flags)
set(${settings_var} "${${settings_var}} ${arch_flags}" PARENT_SCOPE)
endif()
@ -259,6 +259,66 @@ function(vtkm_add_drop_unused_function_flags uses_vtkm_target)
endif()
endfunction()
#-----------------------------------------------------------------------------
# This function takes a target name and returns the mangled version of its name
# in a form that complies with the VTK-m export target naming scheme.
macro(vtkm_target_mangle output target)
string(REPLACE "vtkm_" "" ${output} ${target})
endmacro()
#-----------------------------------------------------------------------------
# Enable VTK-m targets to be installed.
#
# This function decorates the `install` CMake function mangling the exported
# target names to comply with the VTK-m exported target names scheme. Use this
# function instead of the canonical CMake `install` function for VTK-m targets.
#
# Signature:
# vtkm_install_targets(
# TARGETS <target[s]>
# [EXPORT <export_name>]
# [ARGS <cmake_install_args>]
# )
#
# Usage:
# add_library(vtkm_library)
# vtkm_install_targets(TARGETS vtkm_library ARGS COMPONENT core)
#
# TARGETS: List of targets to be installed.
#
# EXPORT: [OPTIONAL] The name of the export set to which this target will be
# added. If omitted vtkm_install_targets will use the value of
# VTKm_EXPORT_NAME by default.
#
# ARGS: [OPTIONAL] Any argument other than `TARGETS` and `EXPORT` accepted
# by the `install` CMake function:
# <https://cmake.org/cmake/help/latest/command/install.html>
#
function(vtkm_install_targets)
set(oneValueArgs EXPORT)
set(multiValueArgs TARGETS ARGS)
cmake_parse_arguments(VTKm_INSTALL "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(export_name ${VTKm_EXPORT_NAME})
if(VTKm_INSTALL_EXPORT)
set(export_name ${VTKm_INSTALL_EXPORT})
endif()
if(NOT DEFINED VTKm_INSTALL_TARGETS)
message(FATAL_ERROR "vtkm_install_targets invoked without TARGETS arguments.")
endif()
if(DEFINED VTKm_INSTALL_UNPARSED_ARGUMENTS)
message(FATAL_ERROR "vtkm_install_targets missing ARGS keyword prepending install arguments")
endif()
foreach(tgt IN LISTS VTKm_INSTALL_TARGETS)
vtkm_target_mangle(tgt_mangled ${tgt})
set_target_properties(${tgt} PROPERTIES EXPORT_NAME ${tgt_mangled})
endforeach()
install(TARGETS ${VTKm_INSTALL_TARGETS} EXPORT ${export_name} ${VTKm_INSTALL_ARGS})
endfunction(vtkm_install_targets)
#-----------------------------------------------------------------------------
# Add a relevant information to target that wants to use VTK-m.
@ -301,7 +361,7 @@ endfunction()
#
# If VTK-m was built with CMake 3.18+ and you are using CMake 3.18+ and have
# a cmake_minimum_required of 3.18 or have set policy CMP0105 to new, this will
# return an empty string as the `vtkm::cuda` target will correctly propagate
# return an empty string as the `vtkm_cuda` target will correctly propagate
# all the necessary flags.
#
# Note: calling `vtkm_add_target_information` multiple times with
@ -374,9 +434,9 @@ function(vtkm_add_target_information uses_vtkm_target)
endforeach()
endif()
if((TARGET vtkm::cuda) OR (TARGET vtkm::kokkos_cuda))
if(TARGET vtkm_cuda OR TARGET vtkm::cuda OR TARGET vtkm_kokkos_cuda OR TARGET vtkm::kokkos_cuda)
set_source_files_properties(${VTKm_TI_DEVICE_SOURCES} PROPERTIES LANGUAGE "CUDA")
elseif(TARGET vtkm::kokkos_hip)
elseif(TARGET vtkm_kokkos_hip OR TARGET vtkm::kokkos_hip)
set_source_files_properties(${VTKm_TI_DEVICE_SOURCES} PROPERTIES LANGUAGE "HIP")
kokkos_compilation(SOURCE ${VTKm_TI_DEVICE_SOURCES})
endif()
@ -513,8 +573,7 @@ function(vtkm_library)
endif()
#install the library itself
install(TARGETS ${lib_name}
EXPORT ${VTKm_EXPORT_NAME}
vtkm_install_targets(TARGETS ${lib_name} ARGS
ARCHIVE DESTINATION ${VTKm_INSTALL_LIB_DIR}
LIBRARY DESTINATION ${VTKm_INSTALL_LIB_DIR}
RUNTIME DESTINATION ${VTKm_INSTALL_BIN_DIR}

@ -11,18 +11,25 @@
#-----------------------------------------------------------------------------
# Adds a performance benchmark test
#
# add_benchmark_test(benchmark [ <filter_regex...> ])
#
# Usage:
# add_benchmark_test(FiltersBenchmark BenchThreshold BenchTetrahedralize)
# add_benchmark_test(benchmark
# [ NAME <name> ]
# [ ARGS <args...> ]
# [ REGEX <benchmark_regex...> ]
# )
#
# benchmark: Target of an executable that uses Google Benchmark.
#
# filter_regex: CMake regexes that selects the specific benchmarks within the binary
# NAME: The name given to the CMake tests. The benchmark target name is used
# if NAME is not specified.
#
# ARGS: Extra arguments passed to the benchmark executable when run.
#
# REGEX: Regular expressions that select the specific benchmarks within the binary
# to be used. It populates the Google Benchmark
# --benchmark_filter parameter. When multiple regexes are passed
# as independent positional arguments, they are joined using the "|"
# regex operator before populating the `--benchmark_filter` parameter
# regex operator before populating the `--benchmark_filter` parameter.
#
function(add_benchmark_test benchmark)
@ -34,10 +41,22 @@ function(add_benchmark_test benchmark)
###TEST VARIABLES############################################################
# Optional positional parameters for filter_regex
set(VTKm_PERF_FILTER_NAME ".*")
if (${ARGC} GREATER_EQUAL 2)
string(REPLACE ";" "|" VTKm_PERF_FILTER_NAME "${ARGN}")
set(options)
set(one_value_keywords NAME)
set(multi_value_keywords ARGS REGEX)
cmake_parse_arguments(PARSE_ARGV 1 VTKm_PERF "${options}" "${one_value_keywords}" "${multi_value_keywords}")
if (VTKm_PERF_UNPARSED_ARGUMENTS)
message(FATAL_ERROR "Bad arguments to add_benchmark_test (${VTKm_PERF_UNPARSED_ARGUMENTS}).")
endif()
if (NOT VTKm_PERF_NAME)
set(VTKm_PERF_NAME ${benchmark})
endif()
if (VTKm_PERF_REGEX)
string(REPLACE ";" "|" VTKm_PERF_REGEX "${VTKm_PERF_REGEX}")
else()
set(VTKm_PERF_REGEX ".*")
endif()
set(VTKm_PERF_REMOTE_URL "https://gitlab.kitware.com/vbolea/vtk-m-benchmark-records.git")
@ -49,22 +68,41 @@ function(add_benchmark_test benchmark)
set(VTKm_PERF_DIST "normal")
set(VTKm_PERF_REPO "${CMAKE_BINARY_DIR}/vtk-m-benchmark-records")
set(VTKm_PERF_COMPARE_JSON "${CMAKE_BINARY_DIR}/nocommit_${benchmark}.json")
set(VTKm_PERF_STDOUT "${CMAKE_BINARY_DIR}/benchmark_${benchmark}.stdout")
set(VTKm_PERF_COMPARE_STDOUT "${CMAKE_BINARY_DIR}/compare_${benchmark}.stdout")
set(VTKm_PERF_COMPARE_JSON "${CMAKE_BINARY_DIR}/nocommit_${VTKm_PERF_NAME}.json")
set(VTKm_PERF_STDOUT "${CMAKE_BINARY_DIR}/benchmark_${VTKm_PERF_NAME}.stdout")
set(VTKm_PERF_COMPARE_STDOUT "${CMAKE_BINARY_DIR}/compare_${VTKm_PERF_NAME}.stdout")
if (DEFINED ENV{CI_COMMIT_SHA})
set(VTKm_PERF_COMPARE_JSON "${CMAKE_BINARY_DIR}/$ENV{CI_COMMIT_SHA}_${benchmark}.json")
set(VTKm_PERF_COMPARE_JSON "${CMAKE_BINARY_DIR}/$ENV{CI_COMMIT_SHA}_${VTKm_PERF_NAME}.json")
endif()
set(test_name "PerformanceTest${benchmark}")
set(test_name "PerformanceTest${VTKm_PERF_NAME}")
###TEST INVOKATIONS##########################################################
if (NOT TEST PerformanceTestFetch)
add_test(NAME "PerformanceTestFetch"
COMMAND ${CMAKE_COMMAND}
"-DVTKm_PERF_REPO=${VTKm_PERF_REPO}"
"-DVTKm_SOURCE_DIR=${VTKm_SOURCE_DIR}"
"-DVTKm_PERF_REMOTE_URL=${VTKm_PERF_REMOTE_URL}"
-P "${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestFetch.cmake"
)
set_property(TEST PerformanceTestFetch PROPERTY FIXTURES_SETUP "FixturePerformanceTestSetup")
endif()
if (NOT TEST PerformanceTestCleanUp)
add_test(NAME "PerformanceTestCleanUp"
COMMAND ${CMAKE_COMMAND} -E rm -rf "${VTKm_PERF_REPO}"
)
set_property(TEST PerformanceTestCleanUp PROPERTY FIXTURES_CLEANUP "FixturePerformanceTestCleanUp")
endif()
add_test(NAME "${test_name}Run"
COMMAND ${CMAKE_COMMAND}
"-DVTKm_PERF_BENCH_DEVICE=Any"
"-DVTKm_PERF_BENCH_PATH=${CMAKE_BINARY_DIR}/bin/${benchmark}"
"-DVTKm_PERF_FILTER_NAME=${VTKm_PERF_FILTER_NAME}"
"-DVTKm_PERF_ARGS=${VTKm_PERF_ARGS}"
"-DVTKm_PERF_REGEX=${VTKm_PERF_REGEX}"
"-DVTKm_PERF_REPETITIONS=${VTKm_PERF_REPETITIONS}"
"-DVTKm_PERF_MIN_TIME=${VTKm_PERF_MIN_TIME}"
"-DVTKm_PERF_COMPARE_JSON=${VTKm_PERF_COMPARE_JSON}"
@ -73,14 +111,6 @@ function(add_benchmark_test benchmark)
-P "${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestRun.cmake"
)
add_test(NAME "${test_name}Fetch"
COMMAND ${CMAKE_COMMAND}
"-DVTKm_PERF_REPO=${VTKm_PERF_REPO}"
"-DVTKm_SOURCE_DIR=${VTKm_SOURCE_DIR}"
"-DVTKm_PERF_REMOTE_URL=${VTKm_PERF_REMOTE_URL}"
-P "${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestFetch.cmake"
)
add_test(NAME "${test_name}Upload"
COMMAND ${CMAKE_COMMAND}
"-DVTKm_PERF_REPO=${VTKm_PERF_REPO}"
@ -91,33 +121,32 @@ function(add_benchmark_test benchmark)
add_test(NAME "${test_name}Report"
COMMAND ${CMAKE_COMMAND}
"-DBENCHMARK_NAME=${benchmark}"
"-DVTKm_PERF_ALPHA=${VTKm_PERF_ALPHA}"
"-DVTKm_PERF_DIST=${VTKm_PERF_DIST}"
"-DVTKm_PERF_REPO=${VTKm_PERF_REPO}"
"-DVTKm_PERF_COMPARE_JSON=${VTKm_PERF_COMPARE_JSON}"
"-DVTKm_SOURCE_DIR=${VTKm_SOURCE_DIR}"
"-DVTKm_BINARY_DIR=${VTKm_BINARY_DIR}"
"-DVTKm_PERF_ALPHA=${VTKm_PERF_ALPHA}"
"-DVTKm_PERF_COMPARE_JSON=${VTKm_PERF_COMPARE_JSON}"
"-DVTKm_PERF_COMPARE_STDOUT=${VTKm_PERF_COMPARE_STDOUT}"
"-DVTKm_PERF_DIST=${VTKm_PERF_DIST}"
"-DVTKm_PERF_NAME=${VTKm_PERF_NAME}"
"-DVTKm_PERF_REPO=${VTKm_PERF_REPO}"
"-DVTKm_SOURCE_DIR=${VTKm_SOURCE_DIR}"
-P "${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestReport.cmake"
)
add_test(NAME "${test_name}CleanUp"
COMMAND ${CMAKE_COMMAND} -E rm -rf "${VTKm_PERF_REPO}"
)
###TEST PROPERTIES###########################################################
set_property(TEST ${test_name}Upload PROPERTY DEPENDS ${test_name}Report)
set_property(TEST ${test_name}Report PROPERTY DEPENDS ${test_name}Run)
set_property(TEST ${test_name}Report PROPERTY FIXTURES_REQUIRED "FixturePerformanceTestSetup")
set_property(TEST ${test_name}Upload PROPERTY FIXTURES_REQUIRED "FixturePerformanceTestCleanUp")
set_tests_properties("${test_name}Report" "${test_name}Upload"
PROPERTIES
FIXTURE_REQUIRED "${test_name}Run;${test_name}Fetch"
FIXTURE_CLEANUP "${test_name}CleanUp"
REQUIRED_FILES "${VTKm_PERF_COMPARE_JSON}")
set_tests_properties("${test_name}Run"
"${test_name}Report"
"${test_name}Upload"
"${test_name}Fetch"
"${test_name}CleanUp"
"PerformanceTestFetch"
"PerformanceTestCleanUp"
PROPERTIES RUN_SERIAL ON)
set_tests_properties(${test_name}Run PROPERTIES TIMEOUT 1800)

@ -10,8 +10,17 @@
include(${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestLib.cmake)
REQUIRE_FLAG("VTKm_SOURCE_DIR")
REQUIRE_FLAG_MUTABLE("VTKm_PERF_REPO")
REQUIRE_FLAG_MUTABLE("VTKm_PERF_REMOTE_URL")
set(upstream_url "https://gitlab.kitware.com/vtk/vtk-m.git")
file(REMOVE_RECURSE vtk-m-benchmark-records)
execute(COMMAND /usr/bin/git clone -b records ${VTKm_PERF_REMOTE_URL} ${VTKm_PERF_REPO})
# Fetch VTK-m main git repo objects, this is needed to ensure that when running the CI
# from a fork project of VTK-m it will have access to the latest git commits in
# the upstream vtk-m git repo.
execute(COMMAND /usr/bin/git -C ${VTKm_SOURCE_DIR} remote add upstream ${upstream_url})
execute(COMMAND /usr/bin/git -C ${VTKm_SOURCE_DIR} fetch upstream)

@ -10,7 +10,7 @@
include("${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestLib.cmake")
REQUIRE_FLAG("BENCHMARK_NAME")
REQUIRE_FLAG("VTKm_PERF_NAME")
REQUIRE_FLAG("VTKm_PERF_COMPARE_JSON")
REQUIRE_FLAG("VTKm_PERF_COMPARE_STDOUT")
@ -19,7 +19,7 @@ REQUIRE_FLAG_MUTABLE("VTKm_PERF_ALPHA")
REQUIRE_FLAG_MUTABLE("VTKm_PERF_DIST")
###FIND MOST RECENT BASELINE####################################################
execute(COMMAND /usr/bin/git -C "${VTKm_SOURCE_DIR}" merge-base origin/master @
execute(COMMAND /usr/bin/git -C "${VTKm_SOURCE_DIR}" merge-base upstream/master @
OUTPUT_VARIABLE GIT_BASE_COMMIT)
string(STRIP "${GIT_BASE_COMMIT}" GIT_BASE_COMMIT)
@ -34,8 +34,8 @@ execute_process(COMMAND /usr/bin/git -C "${VTKm_SOURCE_DIR}" log --format=%H --f
string(REPLACE "\n" ";" GIT_ANCESTOR_COMMITS ${GIT_ANCESTOR_COMMITS})
foreach(commit IN LISTS GIT_ANCESTOR_COMMITS)
if (EXISTS "${VTKm_PERF_REPO}/${commit}_${BENCHMARK_NAME}.json")
set(BASELINE_REPORT "${VTKm_PERF_REPO}/${commit}_${BENCHMARK_NAME}.json")
if (EXISTS "${VTKm_PERF_REPO}/${commit}_${VTKm_PERF_NAME}.json")
set(BASELINE_REPORT "${VTKm_PERF_REPO}/${commit}_${VTKm_PERF_NAME}.json")
break()
endif()
endforeach()

@ -11,7 +11,7 @@
include("${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTestLib.cmake")
REQUIRE_FLAG("VTKm_PERF_BENCH_PATH")
REQUIRE_FLAG("VTKm_PERF_FILTER_NAME")
REQUIRE_FLAG("VTKm_PERF_REGEX")
REQUIRE_FLAG("VTKm_PERF_COMPARE_JSON")
REQUIRE_FLAG("VTKm_PERF_STDOUT")
@ -22,7 +22,8 @@ REQUIRE_FLAG_MUTABLE("VTKm_PERF_MIN_TIME")
execute(
COMMAND "${VTKm_PERF_BENCH_PATH}"
--vtkm-device "${VTKm_PERF_BENCH_DEVICE}"
"--benchmark_filter=${VTKm_PERF_FILTER_NAME}"
${VTKm_PERF_ARGS}
"--benchmark_filter=${VTKm_PERF_REGEX}"
"--benchmark_out=${VTKm_PERF_COMPARE_JSON}"
"--benchmark_repetitions=${VTKm_PERF_REPETITIONS}"
"--benchmark_min_time=${VTKm_PERF_MIN_TIME}"

@ -116,7 +116,7 @@ function(vtkm_test_against_install dir)
set(args -C ${build_config})
endif()
if(WIN32 AND TARGET vtkm::tbb)
if(WIN32 AND TARGET vtkm_tbb)
#on windows we need to specify these as FindTBB won't
#find the installed version just with the prefix path
list(APPEND args
@ -126,7 +126,7 @@ function(vtkm_test_against_install dir)
)
endif()
if(TARGET vtkm::kokkos)
if(TARGET vtkm_kokkos)
list(APPEND args "-DKokkos_DIR=${Kokkos_DIR}")
endif()

@ -289,6 +289,12 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \
list(GET per_device_timeout ${index} timeout)
list(GET per_device_serial ${index} run_serial)
# If set, remove the VTK-m specified timeouts for CTest
set(extra_args)
if (NOT VTKm_OVERRIDE_CTEST_TIMEOUT)
list(APPEND extra_args TIMEOUT ${timeout})
endif()
foreach (test ${VTKm_UT_SOURCES} ${VTKm_UT_DEVICE_SOURCES})
get_filename_component(tname ${test} NAME_WE)
if(VTKm_UT_MPI)
@ -300,7 +306,7 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \
)
set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES
LABELS "${upper_backend};${VTKm_UT_LABEL}"
TIMEOUT ${timeout}
${extra_args}
RUN_SERIAL ${run_serial}
FAIL_REGULAR_EXPRESSION "runtime error")
endif() # VTKm_ENABLE_MPI
@ -311,7 +317,7 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \
)
set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES
LABELS "${upper_backend};${VTKm_UT_LABEL}"
TIMEOUT ${timeout}
${extra_args}
RUN_SERIAL ${run_serial}
FAIL_REGULAR_EXPRESSION "runtime error")
@ -323,11 +329,12 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \
)
set_tests_properties("${tname}${upper_backend}" PROPERTIES
LABELS "${upper_backend};${VTKm_UT_LABEL}"
TIMEOUT ${timeout}
${extra_args}
RUN_SERIAL ${run_serial}
FAIL_REGULAR_EXPRESSION "runtime error")
endif() # VTKm_UT_MPI
endforeach()
unset(extra_args)
endforeach()
endfunction(vtkm_unit_tests)

@ -181,6 +181,11 @@ vtkm_option(VTKm_NO_INSTALL_README_LICENSE "disable the installation of README a
# Allow VTK to turn off these symlinks for its wheel distribution.
vtkm_option(VTKm_SKIP_LIBRARY_VERSIONS "Skip versioning VTK-m libraries" OFF)
# During development, running unit tests with the default values can be too lengthy.
# Allow for the developer to skip the majority of the default values and control them
# through ctest's command-line. Doesn't affect CI unless enabled.
vtkm_option(VTKm_OVERRIDE_CTEST_TIMEOUT "Disable default ctest timeout" OFF)
mark_as_advanced(
VTKm_ENABLE_LOGGING
VTKm_NO_ASSERT
@ -191,6 +196,7 @@ mark_as_advanced(
VTKm_ENABLE_DEVELOPER_FLAGS
VTKm_NO_INSTALL_README_LICENSE
VTKm_SKIP_LIBRARY_VERSIONS
VTKm_OVERRIDE_CTEST_TIMEOUT
)
#-----------------------------------------------------------------------------
@ -412,9 +418,11 @@ if(NOT VTKm_INSTALL_ONLY_LIBRARIES)
# Create and install exports for external projects
export(EXPORT ${VTKm_EXPORT_NAME}
NAMESPACE vtkm::
FILE ${VTKm_BUILD_CMAKE_BASE_DIR}/${VTKm_INSTALL_CONFIG_DIR}/VTKmTargets.cmake
)
install(EXPORT ${VTKm_EXPORT_NAME}
NAMESPACE vtkm::
DESTINATION ${VTKm_INSTALL_CONFIG_DIR}
FILE VTKmTargets.cmake
)

@ -1,4 +1,4 @@
VTKm License Version 1.9
VTKm License Version 2.0
========================================================================
Copyright (c) 2014-2022

135
README.md

@ -76,6 +76,8 @@ VTK-m Requires:
Optional dependencies are:
+ Kokkos Device Adapter
+ [Kokkos](https://kokkos.github.io/) 3.7+
+ CUDA Device Adapter
+ [Cuda Toolkit 9.2, >= 10.2](https://developer.nvidia.com/cuda-toolkit)
+ Note CUDA >= 10.2 is required on Windows
@ -132,10 +134,10 @@ and then build. Here are some example *nix commands for the process
(individual commands may vary).
```sh
$ tar xvzf ~/Downloads/vtk-m-v1.4.0.tar.gz
$ tar xvzf ~/Downloads/vtk-m-v2.0.0.tar.gz
$ mkdir vtkm-build
$ cd vtkm-build
$ cmake-gui ../vtk-m-v1.4.0
$ cmake-gui ../vtk-m-v2.0.0
$ cmake --build -j . # Runs make (or other build program)
```
@ -147,76 +149,109 @@ Users Guide].
The VTK-m source distribution includes a number of examples. The goal of the
VTK-m examples is to illustrate specific VTK-m concepts in a consistent and
simple format. However, these examples only cover a small part of the
simple format. However, these examples only cover a small portion of the
capabilities of VTK-m.
Below is a simple example of using VTK-m to load a VTK image file, run the
Marching Cubes algorithm on it, and render the results to an image:
Below is a simple example of using VTK-m to create a simple data set and use VTK-m's rendering
engine to render an image and write that image to a file. It then computes an isosurface on the
input data set and renders this output data set in a separate image file:
```cpp
#include <vtkm/Bounds.h>
#include <vtkm/Range.h>
#include <vtkm/cont/ColorTable.h>
#include <vtkm/filter/Contour.h>
#include <vtkm/io/VTKDataSetReader.h>
#include <vtkm/cont/Initialize.h>
#include <vtkm/source/Tangle.h>
#include <vtkm/rendering/Actor.h>
#include <vtkm/rendering/Camera.h>
#include <vtkm/rendering/CanvasRayTracer.h>
#include <vtkm/rendering/Color.h>
#include <vtkm/rendering/MapperRayTracer.h>
#include <vtkm/rendering/MapperVolume.h>
#include <vtkm/rendering/MapperWireframer.h>
#include <vtkm/rendering/Scene.h>
#include <vtkm/rendering/View3D.h>
vtkm::io::VTKDataSetReader reader("path/to/vtk_image_file.vtk");
vtkm::cont::DataSet inputData = reader.ReadDataSet();
std::string fieldName = "scalars";
#include <vtkm/filter/contour/Contour.h>
vtkm::Range range;
inputData.GetPointField(fieldName).GetRange(&range);
vtkm::Float64 isovalue = range.Center();
using vtkm::rendering::CanvasRayTracer;
using vtkm::rendering::MapperRayTracer;
using vtkm::rendering::MapperVolume;
using vtkm::rendering::MapperWireframer;
// Create an isosurface filter
vtkm::filter::Contour filter;
filter.SetIsoValue(0, isovalue);
filter.SetActiveField(fieldName);
vtkm::cont::DataSet outputData = filter.Execute(inputData);
int main(int argc, char* argv[])
{
vtkm::cont::Initialize(argc, argv, vtkm::cont::InitializeOptions::Strict);
// compute the bounds and extends of the input data
vtkm::Bounds coordsBounds = inputData.GetCoordinateSystem().GetBounds();
auto tangle = vtkm::source::Tangle(vtkm::Id3{ 50, 50, 50 });
vtkm::cont::DataSet tangleData = tangle.Execute();
std::string fieldName = "tangle";
// setup a camera and point it to towards the center of the input data
vtkm::rendering::Camera camera;
camera.ResetToBounds(coordsBounds);
vtkm::cont::ColorTable colorTable("inferno");
// Set up a camera for rendering the input data
vtkm::rendering::Camera camera;
camera.SetLookAt(vtkm::Vec3f_32(0.5, 0.5, 0.5));
camera.SetViewUp(vtkm::make_Vec(0.f, 1.f, 0.f));
camera.SetClippingRange(1.f, 10.f);
camera.SetFieldOfView(60.f);
camera.SetPosition(vtkm::Vec3f_32(1.5, 1.5, 1.5));
vtkm::cont::ColorTable colorTable("inferno");
// Create a mapper, canvas and view that will be used to render the scene
vtkm::rendering::Scene scene;
vtkm::rendering::MapperRayTracer mapper;
vtkm::rendering::CanvasRayTracer canvas(512, 512);
vtkm::rendering::Color bg(0.2f, 0.2f, 0.2f, 1.0f);
// Background color:
vtkm::rendering::Color bg(0.2f, 0.2f, 0.2f, 1.0f);
vtkm::rendering::Actor actor(tangleData.GetCellSet(),
tangleData.GetCoordinateSystem(),
tangleData.GetField(fieldName),
colorTable);
vtkm::rendering::Scene scene;
scene.AddActor(actor);
// 2048x2048 pixels in the canvas:
CanvasRayTracer canvas(2048, 2048);
// Create a view and use it to render the input data using OS Mesa
// Render an image of the output isosurface
scene.AddActor(vtkm::rendering::Actor(outputData.GetCellSet(),
outputData.GetCoordinateSystem(),
outputData.GetField(fieldName),
colorTable));
vtkm::rendering::View3D view(scene, mapper, canvas, camera, bg);
view.Paint();
view.SaveAs("demo_output.png");
vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg);
view.Paint();
view.SaveAs("volume.png");
// Compute an isosurface:
vtkm::filter::contour::Contour filter;
// [min, max] of the tangle field is [-0.887, 24.46]:
filter.SetIsoValue(3.0);
filter.SetActiveField(fieldName);
vtkm::cont::DataSet isoData = filter.Execute(tangleData);
// Render a separate image with the output isosurface
vtkm::rendering::Actor isoActor(
isoData.GetCellSet(), isoData.GetCoordinateSystem(), isoData.GetField(fieldName), colorTable);
// By default, the actor will automatically scale the scalar range of the color table to match
// that of the data. However, we are coloring by the scalar that we just extracted a contour
// from, so we want the scalar range to match that of the previous image.
isoActor.SetScalarRange(actor.GetScalarRange());
vtkm::rendering::Scene isoScene;
isoScene.AddActor(isoActor);
// Wireframe surface:
vtkm::rendering::View3D isoView(isoScene, MapperWireframer(), canvas, camera, bg);
isoView.Paint();
isoView.SaveAs("isosurface_wireframer.png");
// Smooth surface:
vtkm::rendering::View3D solidView(isoScene, MapperRayTracer(), canvas, camera, bg);
solidView.Paint();
solidView.SaveAs("isosurface_raytracer.png");
return 0;
}
```
A minimal CMakeLists.txt such as the following one can be used to build this
example.
```CMake
project(example)
cmake_minimum_required(VERSION 3.12...3.15 FATAL_ERROR)
project(VTKmDemo CXX)
set(VTKm_DIR "/somepath/lib/cmake/vtkm-XYZ")
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
find_package(VTKm REQUIRED)
add_executable(example example.cxx)
target_link_libraries(example vtkm_cont vtkm_rendering)
if(TARGET vtkm::rendering)
add_executable(Demo Demo.cxx)
target_link_libraries(Demo PRIVATE vtkm::filter vtkm::rendering vtkm::source)
endif()
```
## License ##
@ -229,11 +264,11 @@ See [LICENSE.txt](LICENSE.txt) for details.
[VTK-m Coding Conventions]: docs/CodingConventions.md
[VTK-m Doxygen latest]: https://docs-m.vtk.org/latest/index.html
[VTK-m Doxygen nightly]: https://docs-m.vtk.org/nightly/
[VTK-m download page]: http://m.vtk.org/index.php/VTK-m_Releases
[VTK-m download page]: https://gitlab.kitware.com/vtk/vtk-m/-/releases
[VTK-m git repository]: https://gitlab.kitware.com/vtk/vtk-m/
[VTK-m Issue Tracker]: https://gitlab.kitware.com/vtk/vtk-m/-/issues
[VTK-m Overview]: http://m.vtk.org/images/2/29/VTKmVis2016.pptx
[VTK-m Users Guide]: http://m.vtk.org/images/c/c8/VTKmUsersGuide.pdf
[VTK-m Users Guide]: https://gitlab.kitware.com/vtk/vtk-m-user-guide/-/wikis/home
[VTK-m users email list]: http://vtk.org/mailman/listinfo/vtkm
[VTK-m Wiki]: http://m.vtk.org/
[VTK-m Tutorial]: http://m.vtk.org/index.php/Tutorial

@ -23,6 +23,7 @@
#include <vtkm/cont/DataSet.h>
#include <vtkm/cont/ErrorInternal.h>
#include <vtkm/cont/Logging.h>
#include <vtkm/cont/PartitionedDataSet.h>
#include <vtkm/cont/RuntimeDeviceTracker.h>
#include <vtkm/cont/Timer.h>
@ -96,6 +97,17 @@ vtkm::cont::DataSet& GetUnstructuredInputDataSet()
return *UnstructuredInputDataSet;
}
vtkm::cont::PartitionedDataSet* InputPartitionedData;
vtkm::cont::PartitionedDataSet* UnstructuredInputPartitionedData;
vtkm::cont::PartitionedDataSet& GetInputPartitionedData()
{
return *InputPartitionedData;
}
vtkm::cont::PartitionedDataSet& GetUnstructuredInputPartitionedData()
{
return *UnstructuredInputPartitionedData;
}
// The point scalars to use:
static std::string PointScalarsName;
// The cell scalars to use:
@ -120,7 +132,8 @@ enum GradOpts : int
Vorticity = 1 << 3,
QCriterion = 1 << 4,
RowOrdering = 1 << 5,
ScalarInput = 1 << 6
ScalarInput = 1 << 6,
PartitionedInput = 1 << 7
};
void BenchGradient(::benchmark::State& state, int options)
@ -162,11 +175,21 @@ void BenchGradient(::benchmark::State& state, int options)
vtkm::cont::Timer timer{ device };
//vtkm::cont::DataSet input = static_cast<bool>(options & Structured) ? GetInputDataSet() : GetUnstructuredInputDataSet();
vtkm::cont::PartitionedDataSet input;
if (options & PartitionedInput)
{
input = GetInputPartitionedData();
}
else
{
input = GetInputDataSet();
}
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
@ -179,7 +202,9 @@ void BenchGradient(::benchmark::State& state, int options)
VTKM_BENCHMARK(BenchGradient##Name)
VTKM_PRIVATE_GRADIENT_BENCHMARK(Scalar, Gradient | ScalarInput);
VTKM_PRIVATE_GRADIENT_BENCHMARK(ScalarPartitionedData, Gradient | ScalarInput | PartitionedInput);
VTKM_PRIVATE_GRADIENT_BENCHMARK(Vector, Gradient);
VTKM_PRIVATE_GRADIENT_BENCHMARK(VectorPartitionedData, Gradient | PartitionedInput);
VTKM_PRIVATE_GRADIENT_BENCHMARK(VectorRow, Gradient | RowOrdering);
VTKM_PRIVATE_GRADIENT_BENCHMARK(Point, PointGradient);
VTKM_PRIVATE_GRADIENT_BENCHMARK(Divergence, Divergence);
@ -190,7 +215,7 @@ VTKM_PRIVATE_GRADIENT_BENCHMARK(All,
#undef VTKM_PRIVATE_GRADIENT_BENCHMARK
void BenchThreshold(::benchmark::State& state)
void BenchThreshold(::benchmark::State& state, bool partitionedInput)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
@ -210,24 +235,33 @@ void BenchThreshold(::benchmark::State& state)
filter.SetLowerThreshold(mid - quarter);
filter.SetUpperThreshold(mid + quarter);
auto input = partitionedInput ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK(BenchThreshold);
#define VTKM_PRIVATE_THRESHOLD_BENCHMARK(Name, Opts) \
void BenchThreshold##Name(::benchmark::State& state) { BenchThreshold(state, Opts); } \
VTKM_BENCHMARK(BenchThreshold##Name)
VTKM_PRIVATE_THRESHOLD_BENCHMARK(BenchThreshold, false);
VTKM_PRIVATE_THRESHOLD_BENCHMARK(BenchThresholdPartitioned, true);
void BenchThresholdPoints(::benchmark::State& state)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
const bool compactPoints = static_cast<bool>(state.range(0));
const bool partitionedInput = static_cast<bool>(state.range(1));
// Lookup the point scalar range
const auto range = []() -> vtkm::Range {
@ -246,19 +280,33 @@ void BenchThresholdPoints(::benchmark::State& state)
filter.SetUpperThreshold(mid + quarter);
filter.SetCompactPoints(compactPoints);
vtkm::cont::PartitionedDataSet input;
input = partitionedInput ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK_OPTS(BenchThresholdPoints, ->ArgName("CompactPts")->DenseRange(0, 1));
void BenchThresholdPointsGenerator(::benchmark::internal::Benchmark* bm)
{
bm->ArgNames({ "CompactPts", "PartitionedInput" });
bm->Args({ 0, 0 });
bm->Args({ 1, 0 });
bm->Args({ 0, 1 });
bm->Args({ 1, 1 });
}
VTKM_BENCHMARK_APPLY(BenchThresholdPoints, BenchThresholdPointsGenerator);
void BenchCellAverage(::benchmark::State& state)
{
@ -284,68 +332,79 @@ VTKM_BENCHMARK(BenchCellAverage);
void BenchPointAverage(::benchmark::State& state)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
const bool isPartitioned = static_cast<bool>(state.range(0));
vtkm::filter::field_conversion::PointAverage filter;
filter.SetActiveField(CellScalarsName, vtkm::cont::Field::Association::Cells);
vtkm::cont::PartitionedDataSet input;
input = isPartitioned ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK(BenchPointAverage);
VTKM_BENCHMARK_OPTS(BenchPointAverage, ->ArgName("PartitionedInput")->DenseRange(0, 1));
void BenchWarpScalar(::benchmark::State& state)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
const bool isPartitioned = static_cast<bool>(state.range(0));
vtkm::filter::field_transform::WarpScalar filter{ 2. };
filter.SetUseCoordinateSystemAsField(true);
filter.SetNormalField(PointVectorsName, vtkm::cont::Field::Association::Points);
filter.SetScalarFactorField(PointScalarsName, vtkm::cont::Field::Association::Points);
vtkm::cont::PartitionedDataSet input;
input = isPartitioned ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK(BenchWarpScalar);
VTKM_BENCHMARK_OPTS(BenchWarpScalar, ->ArgName("PartitionedInput")->DenseRange(0, 1));
void BenchWarpVector(::benchmark::State& state)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
const bool isPartitioned = static_cast<bool>(state.range(0));
vtkm::filter::field_transform::WarpVector filter{ 2. };
filter.SetUseCoordinateSystemAsField(true);
filter.SetVectorField(PointVectorsName, vtkm::cont::Field::Association::Points);
vtkm::cont::PartitionedDataSet input;
input = isPartitioned ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK(BenchWarpVector);
VTKM_BENCHMARK_OPTS(BenchWarpVector, ->ArgName("PartitionedInput")->DenseRange(0, 1));
void BenchContour(::benchmark::State& state)
{
@ -356,6 +415,7 @@ void BenchContour(::benchmark::State& state)
const bool mergePoints = static_cast<bool>(state.range(2));
const bool normals = static_cast<bool>(state.range(3));
const bool fastNormals = static_cast<bool>(state.range(4));
const bool isPartitioned = static_cast<bool>(state.range(5));
vtkm::filter::contour::Contour filter;
filter.SetActiveField(PointScalarsName, vtkm::cont::Field::Association::Points);
@ -383,7 +443,15 @@ void BenchContour(::benchmark::State& state)
vtkm::cont::Timer timer{ device };
vtkm::cont::DataSet input = isStructured ? GetInputDataSet() : GetUnstructuredInputDataSet();
vtkm::cont::PartitionedDataSet input;
if (isPartitioned)
{
input = isStructured ? GetInputPartitionedData() : GetUnstructuredInputPartitionedData();
}
else
{
input = isStructured ? GetInputDataSet() : GetUnstructuredInputDataSet();
}
for (auto _ : state)
{
@ -399,17 +467,31 @@ void BenchContour(::benchmark::State& state)
void BenchContourGenerator(::benchmark::internal::Benchmark* bm)
{
bm->ArgNames({ "IsStructuredDataSet", "NIsoVals", "MergePts", "GenNormals", "FastNormals" });
bm->ArgNames({ "IsStructuredDataSet",
"NIsoVals",
"MergePts",
"GenNormals",
"FastNormals",
"MultiPartitioned" });
auto helper = [&](const vtkm::Id numIsoVals) {
bm->Args({ 0, numIsoVals, 0, 0, 0 });
bm->Args({ 0, numIsoVals, 1, 0, 0 });
bm->Args({ 0, numIsoVals, 0, 1, 0 });
bm->Args({ 0, numIsoVals, 0, 1, 1 });
bm->Args({ 1, numIsoVals, 0, 0, 0 });
bm->Args({ 1, numIsoVals, 1, 0, 0 });
bm->Args({ 1, numIsoVals, 0, 1, 0 });
bm->Args({ 1, numIsoVals, 0, 1, 1 });
bm->Args({ 0, numIsoVals, 0, 0, 0, 0 });
bm->Args({ 0, numIsoVals, 1, 0, 0, 0 });
bm->Args({ 0, numIsoVals, 0, 1, 0, 0 });
bm->Args({ 0, numIsoVals, 0, 1, 1, 0 });
bm->Args({ 1, numIsoVals, 0, 0, 0, 0 });
bm->Args({ 1, numIsoVals, 1, 0, 0, 0 });
bm->Args({ 1, numIsoVals, 0, 1, 0, 0 });
bm->Args({ 1, numIsoVals, 0, 1, 1, 0 });
bm->Args({ 0, numIsoVals, 0, 0, 0, 1 });
bm->Args({ 0, numIsoVals, 1, 0, 0, 1 });
bm->Args({ 0, numIsoVals, 0, 1, 0, 1 });
bm->Args({ 0, numIsoVals, 0, 1, 1, 1 });
bm->Args({ 1, numIsoVals, 0, 0, 0, 1 });
bm->Args({ 1, numIsoVals, 1, 0, 0, 1 });
bm->Args({ 1, numIsoVals, 0, 1, 0, 1 });
bm->Args({ 1, numIsoVals, 0, 1, 1, 1 });
};
helper(1);
@ -417,34 +499,48 @@ void BenchContourGenerator(::benchmark::internal::Benchmark* bm)
helper(12);
}
// :TODO: Disabled until SIGSEGV in Countour when passings field is resolved
VTKM_BENCHMARK_APPLY(BenchContour, BenchContourGenerator);
void BenchExternalFaces(::benchmark::State& state)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
const bool compactPoints = static_cast<bool>(state.range(0));
const bool isPartitioned = false; //static_cast<bool>(state.range(1));
vtkm::filter::entity_extraction::ExternalFaces filter;
filter.SetCompactPoints(compactPoints);
vtkm::cont::PartitionedDataSet input;
input = isPartitioned ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK_OPTS(BenchExternalFaces, ->ArgName("Compact")->DenseRange(0, 1));
void BenchExternalFacesGenerator(::benchmark::internal::Benchmark* bm)
{
bm->ArgNames({ "Compact", "PartitionedInput" });
bm->Args({ 0, 0 });
bm->Args({ 1, 0 });
bm->Args({ 0, 1 });
bm->Args({ 1, 1 });
}
VTKM_BENCHMARK_APPLY(BenchExternalFaces, BenchExternalFacesGenerator);
void BenchTetrahedralize(::benchmark::State& state)
{
const vtkm::cont::DeviceAdapterId device = Config.Device;
const bool isPartitioned = static_cast<bool>(state.range(0));
// This filter only supports structured datasets:
if (FileAsInput && !InputIsStructured())
@ -453,20 +549,23 @@ void BenchTetrahedralize(::benchmark::State& state)
}
vtkm::filter::geometry_refinement::Tetrahedralize filter;
vtkm::cont::PartitionedDataSet input;
input = isPartitioned ? GetInputPartitionedData() : GetInputDataSet();
vtkm::cont::Timer timer{ device };
for (auto _ : state)
{
(void)_;
timer.Start();
auto result = filter.Execute(GetInputDataSet());
auto result = filter.Execute(input);
::benchmark::DoNotOptimize(result);
timer.Stop();
state.SetIterationTime(timer.GetElapsedTime());
}
}
VTKM_BENCHMARK(BenchTetrahedralize);
VTKM_BENCHMARK_OPTS(BenchTetrahedralize, ->ArgName("PartitionedInput")->DenseRange(0, 1));
void BenchVertexClustering(::benchmark::State& state)
{
@ -803,6 +902,7 @@ enum optionIndex
CELL_SCALARS,
POINT_VECTORS,
WAVELET_DIM,
NUM_PARTITIONS,
TETRA
};
@ -810,6 +910,7 @@ void InitDataSet(int& argc, char** argv)
{
std::string filename;
vtkm::Id waveletDim = 256;
vtkm::Id numPartitions = 1;
bool tetra = false;
namespace option = vtkm::cont::internal::option;
@ -854,6 +955,12 @@ void InitDataSet(int& argc, char** argv)
Arg::Number,
" --wavelet-dim <N> \tThe size in each dimension of the wavelet grid "
"(if generated)." });
usage.push_back({ NUM_PARTITIONS,
0,
"",
"num-partitions",
Arg::Number,
" --num-partitions <N> \tThe number of partitions to create" });
usage.push_back({ TETRA,
0,
"",
@ -903,6 +1010,12 @@ void InitDataSet(int& argc, char** argv)
parse >> waveletDim;
}
if (options[NUM_PARTITIONS])
{
std::istringstream parse(options[NUM_PARTITIONS].arg);
parse >> numPartitions;
}
tetra = (options[TETRA] != nullptr);
// Now go back through the arg list and remove anything that is not in the list of
@ -992,6 +1105,19 @@ void InitDataSet(int& argc, char** argv)
GetInputDataSet() = GetUnstructuredInputDataSet();
}
//Create partitioned data.
if (numPartitions > 0)
{
std::cerr << "[InitDataSet] Creating " << numPartitions << " partitions." << std::endl;
InputPartitionedData = new vtkm::cont::PartitionedDataSet;
UnstructuredInputPartitionedData = new vtkm::cont::PartitionedDataSet;
for (vtkm::Id i = 0; i < numPartitions; i++)
{
GetInputPartitionedData().AppendPartition(GetInputDataSet());
GetUnstructuredInputPartitionedData().AppendPartition(GetUnstructuredInputDataSet());
}
}
inputGenTimer.Stop();
std::cerr << "[InitDataSet] DataSet initialization took " << inputGenTimer.GetElapsedTime()
@ -1027,4 +1153,6 @@ int main(int argc, char* argv[])
VTKM_EXECUTE_BENCHMARKS_PREAMBLE(argc, args.data(), dataSetSummary);
delete InputDataSet;
delete UnstructuredInputDataSet;
delete InputPartitionedData;
delete UnstructuredInputPartitionedData;
}

@ -118,7 +118,9 @@ void BuildInputDataSet(uint32_t cycle, bool isStructured, bool isMultiBlock, vtk
PointVectorsName = "perlinnoisegrad";
// Generate uniform dataset(s)
const vtkm::Id3 dims{ dim, dim, dim };
vtkm::source::PerlinNoise noise;
noise.SetPointDimensions({ dim, dim, dim });
noise.SetSeed(static_cast<vtkm::IdComponent>(cycle));
if (isMultiBlock)
{
for (auto i = 0; i < 2; ++i)
@ -127,12 +129,9 @@ void BuildInputDataSet(uint32_t cycle, bool isStructured, bool isMultiBlock, vtk
{
for (auto k = 0; k < 2; ++k)
{
const vtkm::Vec3f origin{ static_cast<vtkm::FloatDefault>(i),
static_cast<vtkm::FloatDefault>(j),
static_cast<vtkm::FloatDefault>(k) };
const vtkm::source::PerlinNoise noise{ dims,
origin,
static_cast<vtkm::IdComponent>(cycle) };
noise.SetOrigin({ static_cast<vtkm::FloatDefault>(i),
static_cast<vtkm::FloatDefault>(j),
static_cast<vtkm::FloatDefault>(k) });
const auto dataset = noise.Execute();
partitionedInputDataSet.AppendPartition(dataset);
}
@ -141,7 +140,6 @@ void BuildInputDataSet(uint32_t cycle, bool isStructured, bool isMultiBlock, vtk
}
else
{
const vtkm::source::PerlinNoise noise{ dims, static_cast<vtkm::IdComponent>(cycle) };
inputDataSet = noise.Execute();
}

@ -39,9 +39,8 @@ vtkm::cont::InitializeResult Config;
void BenchRayTracing(::benchmark::State& state)
{
const vtkm::Id3 dims(128, 128, 128);
vtkm::source::Tangle maker(dims);
vtkm::source::Tangle maker;
maker.SetPointDimensions({ 128, 128, 128 });
vtkm::cont::DataSet dataset = maker.Execute();
vtkm::cont::CoordinateSystem coords = dataset.GetCoordinateSystem();

@ -72,14 +72,35 @@ target_compile_definitions(BenchmarkDeviceAdapter PUBLIC VTKm_BENCHS_RANGE_UPPER
if(VTKm_ENABLE_PERFORMANCE_TESTING)
include("${VTKm_SOURCE_DIR}/CMake/testing/VTKmPerformanceTest.cmake")
add_benchmark_test(BenchmarkFilters
BenchThreshold
BenchContour/IsStructuredDataSet:1/NIsoVals:12/MergePts:1/GenNormals:0
BenchContour/IsStructuredDataSet:1/NIsoVals:12/MergePts:0/GenNormals:1/FastNormals:1
BenchContour/IsStructuredDataSet:0/NIsoVals:12/MergePts:1/GenNormals:0
BenchContour/IsStructuredDataSet:0/NIsoVals:12/MergePts:0/GenNormals:/FastNormals:1
BenchTetrahedralize
BenchVertexClustering/NumDivs:256)
NAME BenchThreshold
REGEX BenchThreshold
)
add_benchmark_test(BenchmarkFilters
NAME BenchTetrahedralize
REGEX BenchTetrahedralize
)
add_benchmark_test(BenchmarkFilters
NAME BenchContour
REGEX
BenchContour/IsStructuredDataSet:1/NIsoVals:12/MergePts:1/GenNormals:0.*/MultiPartitioned:0
BenchContour/IsStructuredDataSet:1/NIsoVals:12/MergePts:0/GenNormals:1/FastNormals:1.*/MultiPartitioned:0
BenchContour/IsStructuredDataSet:0/NIsoVals:12/MergePts:1/GenNormals:0.*/MultiPartitioned:0
BenchContour/IsStructuredDataSet:0/NIsoVals:12/MergePts:0/GenNormals:1/FastNormals:1.*/MultiPartitioned:0
)
add_benchmark_test(BenchmarkFilters
NAME BenchContourPartitioned
ARGS --wavelet-dim=32 --num-partitions=128
REGEX
BenchContour/IsStructuredDataSet:1/NIsoVals:12/MergePts:1/GenNormals:0.*/MultiPartitioned:1
BenchContour/IsStructuredDataSet:1/NIsoVals:12/MergePts:0/GenNormals:1/FastNormals:1.*/MultiPartitioned:1
BenchContour/IsStructuredDataSet:0/NIsoVals:12/MergePts:1/GenNormals:0.*/MultiPartitioned:1
BenchContour/IsStructuredDataSet:0/NIsoVals:12/MergePts:0/GenNormals:1/FastNormals:1.*/MultiPartitioned:1
)
add_benchmark_test(BenchmarkFilters
NAME BenchVertexClustering
REGEX BenchVertexClustering/NumDivs:256
)
if(TARGET vtkm_rendering)
add_benchmark_test(BenchmarkInSitu "BenchContour")
add_benchmark_test(BenchmarkInSitu REGEX "BenchContour")
endif()
endif()

@ -120,6 +120,9 @@ $if(PATCH == 0 and RC == "")\
- [ ] Tag new version of the [VTK-m User Guide][2].
$endif\
- [ ] Post an [Email Announcements](#email-announcements) VTK-m mailing list.
$if(RC == "")\
- [ ] Update this release Date, Delay and End-of-Support fields in `docs/ReleaseRoadmap.md`.
$endif\
$if(PATCH == 0)\
- [ ] Ensure that the content of `version.txt` in master is `$(MAJOR).$(MINOR).9999`.
$endif\

@ -4,7 +4,7 @@
| --------- | ------------ | ------- | ----------- | ---------------- |
| 1.7.0 | 2021-12-01 | +8 | Long Term | 2022-12-01 |
| 1.8.0 | 2022-06-01 | +14 | Long Term | 2023-06-01 |
| 1.9.0 | 2022-09-01 | | Short Term* | TBD |
| 1.9.0 | 2022-09-01 | +41 | Short Term | next release |
| 2.0.0 | 2022-12-01 | | Long Term* | TBD |
| 2.1.0 | 2023-03-01 | | Short Term* | TBD |
| 2.2.0 | 2023-06-01 | | Long Term* | TBD |

@ -0,0 +1,377 @@
VTK-m 2.0 Release Notes
=======================
# Table of Contents
1. [Core](#Core)
- Added modules to the build system
- Remove deprecated features from VTK-m
2. [ArrayHandle](#ArrayHandle)
- Support providing a Token to ReadPortal and WritePortal
3. [Control Environment](#Control-Environment)
- Coordinate systems are stored as Fields
- Check to make sure that the fields in a DataSet are the proper length
- Change name of method to set the cell ghost levels in a DataSet
- Automatically make the fields with the global cell ghosts the cell ghosts
- Particle class members are hidden
- Allow FieldSelection to simultaneously include and exclude fields
- New partitioned cell locator class
- Fix reading global ids of permuted cells
- Setting source parameters is more clear
- Attach compressed ZFP data as WholeDataSet field
4. [Execution Environment](#Execution-Environment)
- Removed ExecutionWholeArray class
5. [Worklets and Filters](#Worklets-and-Filters)
- Correct particle density filter output field
- Rename NewFilter base classes to Filter
- Fix handling of cell fields in Tube filter
6. [Build](#Build)
- More performance test options
- Output complete list of libraries for external Makefiles
- VTK-m namespace for its exported CMake targets
7. [Other](#Other)
- Expose the Variant helper class
- Fix VTKM_LOG_SCOPE
# Core
## Added modules to the build system
VTK-m libraries and other targets can now be built as modules. The
advantage of modules is that you can selectively choose which
modules/libraries will be built. This makes it easy to create a more
stripped down compile of VTK-m. For example, you might want a reduced set
of libraries to save memory or you might want to turn off certain libraries
to save compile time.
The module system will automatically determine dependencies among the
modules. It is capable of weakly turning off a module where it will still
be compiled if needed. Likewise, it is capable of weakly turning on a
module where the build will still work if it cannot be created.
The use of modules is described in the `Modules.md` file in the `docs`
directory of the VTK-m source.
## Remove deprecated features from VTK-m
With the major revision 2.0 of VTK-m, many items previously marked as
deprecated were removed. If updating to a new version of VTK-m, it is
recommended to first update to VTK-m 1.9, which will include the deprecated
features but provide warnings (with the right compiler) that will point to
the replacement code. Once the deprecations have been fixed, updating to
2.0 should be smoother.
# ArrayHandle
## Support providing a Token to ReadPortal and WritePortal
When managing portals in the execution environment, `ArrayHandle` uses the
`Token` object to ensure that the memory associated with a portal exists
for the length of time that it is needed. This is done by creating the
portal with a `Token` object, and the associated portal objects are
guaranteed to be valid while that `Token` object exists. This is supported
by essentially locking the array from further changes.
`Token` objects are typically used when creating a control-side portal with
the `ReadPortal` or `WritePortal`. This is not to say that a `Token` would
not be useful; a control-side portal going out of scope is definitely a
problem. But the creation and destruction of portals in the control
environment is generally too much work for the possible benefits.
However, under certain circumstances it could be useful to use a `Token` to
get a control-side portal. For example, if the `PrepareForExecution` method
of an `ExecutionObjectBase` needs to fill a small `ArrayHandle` on the
control side to pass to the execution side, it would be better to use the
provided `Token` object when doing so. This change allows you to optionally
provide that `Token` when creating these control-side portals.
# Control Environment
## Coordinate systems are stored as Fields
Previously, `DataSet` managed `CoordinateSystem`s separately from `Field`s.
However, a `CoordinateSystem` is really just a `Field` with some special
attributes. Thus, coordinate systems are now just listed along with the
rest of the fields, and the coordinate systems are simply strings that
point back to the appropriate field. (This was actually the original
concept for `DataSet`, but the coordinate systems were separated from
fields for some now obsolete reasons.)
This change should not be very noticeable, but there are a few consequences
that should be noted.
1. The `GetCoordinateSystem` methods no longer return a reference to a
`CoordinateSystem` object. This is because the `CoordinateSystem` object
is made on the fly from the field.
2. When mapping fields in filters, the coordinate systems get mapped as
part of this process. This has allowed us to remove some of the special
cases needed to set the coordinate system in the output.
3. If a filter is generating a coordinate system in a special way
(different than mapping other point fields), then it can use the special
`CreateResultCoordinateSystem` method to attach this custom coordinate
system to the output.
4. The `DataSet::GetCoordinateSystems()` method to get a `vector<>` of all
coordinate systems is removed. `DataSet` no longer internally has this
structure. Although it could be built, the only reason for its existence
was to support passing coordinate systems in filters. Now that this is
done automatically, the method is no longer needed.
## Check to make sure that the fields in a DataSet are the proper length
It is possible in a `DataSet` to add a point field (or coordinate system)
that has a different number of points than reported in the cell set.
Likewise for the number of cells in cell fields. This is very bad practice
because it is likely to lead to crashes in worklets that are expecting
arrays of an appropriate length.
Although `DataSet` will still allow this, a warning will be added to the
VTK-m logging to alert users of the inconsistency introduced into the
`DataSet`. Since warnings are by default printed to standard error, users
are likely to see it.
## Change name of method to set the cell ghost levels in a DataSet
Previously, the method was named `AddGhostCellField`. However, only one
ghost cell field can be marked at a time, so `SetGhostCellField` is more
appropriate.
## Automatically make the fields with the global cell ghosts the cell ghosts
Previously, if you added a cell field to a `DataSet` with a name that was
the same as that returned from `GetGlobalCellFieldName`, it was still only
recognized as a normal field. Now, that field is automatically recognized
as the cell ghost levels (unless the global cell field name is changed or
a different field is explicitly set as the cell ghost levels).
## Particle class members are hidden
The member variables of the `vtkm::Particle` classes are now hidden. This
means that external code will not be directly able to access member
variables like `Pos`, `Time`, and `ID`. Instead, these need to be retrieved
and changed through accessor methods.
This follows standard C++ principles. It also helps us future-proof the
classes. It means that we can provide subclasses or alternate forms of
`Particle` that operate differently. It also makes it possible to change
interfaces while maintaining a deprecated interface.
## Allow FieldSelection to simultaneously include and exclude fields
The basic use of `FieldSelection` is to construct the class with a mode
(`None`, `Any`, `Select`, `Exclude`), and then specify particular fields
based off of this mode. This works fine for basic uses where the same code
that constructs a `FieldSelection` sets all the fields.
But what happens, for example, if you have code that takes an existing
`FieldSelection` and wants to exclude the field named `foo`? If the
`FieldSelection` mode happens to be anything other than `Exclude`, the code
would have to go through several hoops to construct a new `FieldSelection`
object with this modified selection.
To make this case easier, `FieldSelection` now has the ability to specify
the mode independently for each field. The `AddField` method now has an
optional mode argument the specifies whether the mode for that field should
be `Select` or `Exclude`.
In the example above, the code can simply add the `foo` field with the
`Exclude` mode. Regardless of whatever state the `FieldSelection` was in
before, it will now report the `foo` field as not selected.
## New partitioned cell locator class
A new version of a locator, `CellLocatorParitioned`, is now available. This version of a
locator takes a `PartitionedDataSet` and builds a structure that will find the partition Ids and
cell Ids for the input array of locations. It runs CellLocatorGeneral for each partition. We
expect multiple hits and only return the first one (lowest partition Id) where the detected cell
is of type REGULAR (no ghost, not blanked) in the vtkGhostType array. If this array does not
exist in a partition, we assume that all cells are regular.
vtkm::cont::CellLocatorPartitioned produces an Arrayhandle of the size of the number of
partitions filled with the execution objects of CellLocatorGeneral. It further produces an
Arrayhandle filled with the ReadPortals of the vtkGhost arrays to then select the non-blanked
cells from the potentially multiple detected cells on the different partitions. Its counterpart
on the exec side, vtkm::exec::CellLocatorPartitioned, contains the actual FindCell function.
## Fix reading global ids of permuted cells
The legacy VTK reader sometimes has to permute cell data because some VTK
cells are not directly supported in VTK-m. (For example, triangle strips
are not supported. They have to be converted to triangles.)
The global and pedigree identifiers were not properly getting permuted.
This is now fixed.
## Setting source parameters is more clear
Originally, most of the sources used constructor parameters to set the
various options of the source. Although convenient, it was difficult to
keep track of what each parameter meant. To make the code more clear,
source parameters are now set with accessor functions (e.g.
`SetPointDimensions`). Although this makes code more verbose, it helps
prevent mistakes and makes the changes more resilient to future changes.
## Attach compressed ZFP data as WholeDataSet field
Previously, point fields compressed by ZFP were attached as point fields
on the output. However, using them as a point field would cause
problems. So, instead attache them as `WholeDataSet` fields.
Also fixed a problem where the 1D decompressor created an output of the
wrong size.
# Execution Environment
## Removed ExecutionWholeArray class
`ExecutionWholeArray` is an archaic class in VTK-m that is a thin wrapper
around an array portal. In the early days of VTK-m, this class was used to
transfer whole arrays to the execution environment. However, now the
supported method is to use `WholeArray*` tags in the `ControlSignature` of
a worklet.
Nevertheless, the `WholeArray*` tags caused the array portal transferred to
the worklet to be wrapped inside of an `ExecutionWholeArray` class. This
is unnecessary and can cause confusion about the types of data being used.
Most code is unaffected by this change. Some code that had to work around
the issue of the portal wrapped in another class used the `GetPortal`
method which is no longer needed (for obvious reasons). One extra feature
that `ExecutionWholeArray` had was that it provided an subscript operator
(somewhat incorrectly). Thus, any use of '[..]' to index the array portal
have to be changed to use the `Get` method.
# Worklets and Filters
## Correct particle density filter output field
The field being created by `ParticleDensityNearestGridPoint` was supposed
to be associated with cells, but it was sized to the number of points.
Although the number of points will always be more than the number of cells
(so the array will be big enough), having inappropriately sized arrays can
cause further problems downstream.
## Rename NewFilter base classes to Filter
During the VTK-m 1.8 and 1.9 development, the filter infrastructure was
overhauled. Part of this created a completely new set of base classes. To
avoid confusion with the original filter base classes and ease transition,
the new filter base classes were named `NewFilter*`. Eventually after all
filters were transitioned, the old filter base classes were deprecated.
With the release of VTK-m 2.0, the old filter base classes are removed. The
"new" filter base classes are no longer new. Thus, they have been renamed
simply `Filter` (and `FilterField`).
## Fix handling of cell fields in Tube filter
The `Tube` filter wraps a tube of polygons around poly line cells.
During this process it had a strange (and wrong) handling of cell data.
It assumed that each line had an independent field entry for each
segment of each line. It thus had lots of extra code to find the length
and offsets of the segment data in the cell data.
This is simply not how cell fields work in VTK-m. In VTK-m, each cell
has exactly one entry in the cell field array. Even if a polyline has
100 segments, it only gets one cell field value. This behavior is
consistent with how VTK treats cell field arrays.
The behavior the `Tube` filter was trying to implement was closer to an
"edge" field. However, edge fields are currently not supported in VTK-m.
The proper implementation would be to add edge fields to VTK-m. (This
would also get around some problems with the implementation that was
removed here when mixing polylines with other cell types and degenerate
lines.)
# Build
## More performance test options
More options are available for adding performance regression tests. These
options allow you to pass custom options to the benchmark test so that you
are not limited to the default values. They also allow multiple tests to be
created from the same benchmark executable. Separating out the benchmarks
allows the null hypothesis testing to better catch performance problems
when only one of the tested filters regresses. It also allows passing
different arguments to different benchmarks.
## Output complete list of libraries for external Makefiles
There is a Makefile include, `vtkm_config.mk`, and a package include,
`vtkm.pc`, that are configured so that external programs that do not use
CMake have a way of importing VTK-m's configuration. However, the set of
libraries was hardcoded. In particular, many of the new filter libraries
were missing.
Rather than try to maintain this list manually, the new module mechanism
in the CMake configuration is used to get a list of libraries built and
automatically build these lists.
## VTK-m namespace for its exported CMake targets
VTK-m exported CMake targets are now prefixed with the `vtkm::` namespace.
### What it means for VTK-m users
VTK-m users will now need to prepend a `vtkm::` prefix when they refer to a
VTK-m CMake target in their projects as shown below:
```
add_executable(example example.cxx)
## Before:
target_link_libraries(example vtkm_cont vtkm_rendering)
## Now:
target_link_libraries(example vtkm::cont vtkm::rendering)
```
For compatibility purposes we still provide additional exported targets with the
previous naming scheme, in the form of `vtkm_TARGET`, when VTK-m is found
using:
```
## With any version less than 2.0
find_package(VTK-m 1.9)
add_executable(example example.cxx)
## This is still valid
target_link_libraries(example vtkm_cont vtkm_rendering)
```
Use with care since we might remove those targets in future releases.
### What it means for VTK-m developers
While VTK-m exported targets are now prefixed with the `vtkm::` prefix, internal
target names are still in the form of `vtkm_TARGET`.
To perform this name transformation in VTK-m targets a new CMake function has
been provided that decorates the canonical `install` routine. Use this functions
instead of `install` when creating new `VTK-m` targets, further information can
be found at the `vtkm_install_targets` function header at
`CMake/VTKmWrappers.cmake`.
# Other
## Expose the Variant helper class
For several versions, VTK-m has had a `Variant` templated class. This acts
like a templated union where the object will store one of a list of types
specified as the template arguments. (There are actually 2 versions for the
control and execution environments, respectively.)
Because this is a complex class that required several iterations to work
through performance and compiler issues, `Variant` was placed in the
`internal` namespace to avoid complications with backward compatibility.
However, the class has been stable for a while, so let us expose this
helpful tool for wider use.
## Fix VTKM_LOG_SCOPE
The `VTKM_LOG_SCOPE` macro was not working as intended. It was supposed to
print a log message immediately and then print a second log message when
leaving the scope along with the number of seconds that elapsed between the
two messages.
This was not what was happening. The second log message was being printed
immediately after the first. This is because the scope was taken inside of
the `LogScope` method. The macro has been rewritten to put the tracking in
the right scope.

@ -1,16 +0,0 @@
# Added modules to the build system
VTK-m libraries and other targets can now be built as modules. The
advantage of modules is that you can selectively choose which
modules/libraries will be built. This makes it easy to create a more
stripped down compile of VTK-m. For example, you might want a reduced set
of libraries to save memory or you might want to turn off certain libraries
to save compile time.
The module system will automatically determine dependencies among the
modules. It is capable of weakly turning off a module where it will still
be compiled if needed. Likewise, it is capabile of weakly turning on a
module where the build will still work if it cannot be created.
The use of modules is described in the `Modules.md` file in the `docs`
directory of the VTK-m source.

@ -0,0 +1,3 @@
# New Composite Vector filter
The composite vector filter combines multiple scalar fields into a single vector field. Scalar fields are selected as the active input fields, and the combined vector field is set at the output. The current composite vector filter only supports 2d and 3d scalar field composition. Users may use `vtkm::cont::make_ArrayHandleCompositeVector` to execute a more flexible scalar field composition.

@ -1,21 +0,0 @@
# Support providing a Token to ReadPortal and WritePortal
When managing portals in the execution environment, `ArrayHandle` uses the
`Token` object to ensure that the memory associated with a portal exists
for the length of time that it is needed. This is done by creating the
portal with a `Token` object, and the associated portal objects are
guaranteed to be valid while that `Token` object exists. This is supported
by essentially locking the array from further changes.
`Token` objects are typically used when creating a control-side portal with
the `ReadPortal` or `WritePortal`. This is not to say that a `Token` would
not be useful; a control-side portal going out of scope is definitely a
problem. But the creation and distruction of portals in the control
environment is generally too much work for the possible benefits.
However, under certain circumstances it could be useful to use a `Token` to
get a control-side portal. For example, if the `PrepareForExecution` method
of an `ExecutionObjectBase` needs to fill a small `ArrayHandle` on the
control side to pass to the execution side, it would be better to use the
provided `Token` object when doing so. This change allows you to optionally
provide that `Token` when creating these control-side portals.

@ -1,29 +0,0 @@
# Coordiante systems are stored as Fields
Previously, `DataSet` managed `CoordinateSystem`s separately from `Field`s.
However, a `CoordinateSystem` is really just a `Field` with some special
attributes. Thus, coordiante systems are now just listed along with the
rest of the fields, and the coordinate systems are simply strings that
point back to the appropriate field. (This was actually the original
concept for `DataSet`, but the coordinate systems were separated from
fields for some now obsolete reasons.)
This change should not be very noticible, but there are a few consequences
that should be noted.
1. The `GetCoordinateSystem` methods no longer return a reference to a
`CoordinateSystem` object. This is because the `CoordinateSystem` object
is made on the fly from the field.
2. When mapping fields in filters, the coordinate systems get mapped as
part of this process. This has allowed us to remove some of the special
cases needed to set the coordinate system in the output.
3. If a filter is generating a coordinate system in a special way
(different than mapping other point fields), then it can use the special
`CreateResultCoordianteSystem` method to attach this custom coordinate
system to the output.
4. The `DataSet::GetCoordianteSystems()` method to get a `vector<>` of all
coordiante systems is removed. `DataSet` no longer internally has this
structure. Although it could be built, the only reason for its existance
was to support passing coordinate systems in filters. Now that this is
done autmoatically, the method is no longer needed.

@ -1,7 +0,0 @@
# Delete deprecated features
With the major release of VTK-m 2.0, we are cleaning up the code by
removing deprecated features. For software using VTK-m 1.9, you should have
gotten deprecation warnings about any feature that is removed. Thus, to
ease porting to VTK-m 2.0, software should consider updating to VTK-m 1.9
first.

@ -0,0 +1,10 @@
# Clarify field index ordering in Doxygen
The fields in a `DataSet` are indexed from `0` to `GetNumberOfFields() - 1`.
It is natural to assume that the fields will be indexed in the order that
they are added, but they are not. Rather, the indexing is arbitrary and can
change any time a field is added to the dataset.
To make this more clear, Doxygen documentation is added to the `DataSet`
methods to inform users to not make any assumptions about the order of
field indexing.

@ -1,12 +0,0 @@
# Expose the Variant helper class
For several versions, VTK-m has had a `Variant` templated class. This acts
like a templated union where the object will store one of a list of types
specified as the template arguments. (There are actually 2 versions for the
control and execution environments, respectively.)
Because this is a complex class that required several iterations to work
through performance and compiler issues, `Variant` was placed in the
`internal` namespace to avoid complications with backward compatibility.
However, the class has been stable for a while, so let us expose this
helpful tool for wider use.

@ -1,11 +0,0 @@
# Output complete list of libraries for external Makefiles
There is a Makefile include, `vtkm_config.mk`, and a package include,
`vtkm.pc`, that are configured so that external programs that do not use
CMake have a way of importing VTK-m's configuration. However, the set of
libraries was hardcoded. In particular, many of the new filter libraries
were missing.
Rather than try to maintain this list manually, the new module mechanism
in the CMake configuration is used to get a list of libraries built and
automatically build these lists.

@ -1,12 +0,0 @@
# Check to make sure that the fields in a DataSet are the proper length
It is possible in a `DataSet` to add a point field (or coordinate system)
that has a different number of points than reported in the cell set.
Likewise for the number of cells in cell fields. This is very bad practice
because it is likely to lead to crashes in worklets that are expecting
arrays of an appropriate length.
Although `DataSet` will still allow this, a warning will be added to the
VTK-m logging to alert users of the inconsistency introduced into the
`DataSet`. Since warnings are by default printed to standard error, users
are likely to see it.

@ -1,11 +0,0 @@
# Fix VTKM_LOG_SCOPE
The `VTKM_LOG_SCOPE` macro was not working as intended. It was supposed to
print a log message immediately and then print a second log message when
leaving the scope along with the number of seconds that elapsed between the
two messages.
This was not what was happening. The second log message was being printed
immediately after the first. This is because the scope was taken inside of
the `LogScope` method. The macro has been rewritten to put the tracking in
the right scope.

@ -1,5 +0,0 @@
# Change name of method to set the cell ghost levels in a DataSet
Previously, the method was named `AddGhostCellField`. However, only one
ghost cell field can be marked at a time, so `SetGhostCellField` is more
appropriate.

@ -1,7 +0,0 @@
# Automatically make the fields with the global cell ghosts the cell ghosts
Previously, if you added a cell field to a `DataSet` with a name that was
the same as that returned from `GetGlobalCellFieldName`, it was still only
recognized as a normal field. Now, that field is automatically recognized
as a the cell ghost levels (unless the global cell field name is changed or
a different field is explicitly set as the cell ghost levels).

@ -1,21 +0,0 @@
# Allow FieldSelection to simultaneously include and exclude fields
The basic use of `FieldSelection` is to construct the class with a mode
(`None`, `Any`, `Select`, `Exclude`), and then specify particular fields
based off of this mode. This works fine for basic uses where the same code
that constructs a `FieldSelection` sets all the fields.
But what happens, for example, if you have code that takes an existing
`FieldSelection` and wants to exclude the field named `foo`? If the
`FieldSelection` mode happens to be anything other than `Exclude`, the code
would have to go through several hoops to construct a new `FieldSelection`
object with this modified selection.
To make this case easier, `FieldSelection` now has the ability to specify
the mode independently for each field. The `AddField` method now has an
optional mode argument the specifies whether the mode for that field should
be `Select` or `Exclude`.
In the example above, the code can simply add the `foo` field with the
`Exclude` mode. Regardless of whatever state the `FieldSelection` was in
before, it will now report the `foo` field as not selected.

@ -0,0 +1,27 @@
# Update filters' field map and execute to work on any field type
Several filters implemented their map field by checking for common field
types and interpolated those. Although there was a float fallback to catch
odd component types, there were still a couple of issues. First, it meant
that several types got converted to `vtkm::FloatDefault`, which is often at
odds with how VTK handles it. Second, it does not handle all `Vec` lengths,
so it is still possible to drop fields.
The map field functions for these filters have been changed to support all
possible types. This is done by using the extract component functionality
to get data from any type of array. The following filters have been
updated. In some circumstances where it makes sense, a simple float
fallback is used.
* `CleanGrid`
* `CellAverage`
* `ClipWithField`
* `ClipWithImplicitFunction`
* `Contour`
* `MIRFilter`
* `NDHistogram`
* `ParticleDensityCloudInCell`
* `ParticleDensityNearestGridPoint`
* `PointAverage`
* `Probe`
* `VectorMagnitude`

@ -1,18 +0,0 @@
# Removed ExecutionWholeArray class
`ExecutionWholeArray` is an archaic class in VTK-m that is a thin wrapper
around an array portal. In the early days of VTK-m, this class was used to
transfer whole arrays to the execution environment. However, now the
supported method is to use `WholeArray*` tags in the `ControlSignature` of
a worklet.
Nevertheless, the `WholeArray*` tags caused the array portal transferred to
the worklet to be wrapped inside of an `ExecutionWholeArray` class. This
is unnecessary and can cause confusion about the types of data being used.
Most code is unaffected by this change. Some code that had to work around
the issue of the portal wrapped in another class used the `GetPortal`
method which is no longer needed (for obvious reasons). One extra feature
that `ExecutionWholeArray` had was that it provided an subscript operator
(somewhat incorrectly). Thus, any use of '[..]' to index the array portal
have to be changed to use the `Get` method.

@ -0,0 +1,36 @@
# Support using arrays with dynamic Vec-likes as output arrays
When you use an `ArrayHandle` as an output array in a worklet (for example,
as a `FieldOut`), the fetch operation does not read values from the array
during the `Load`. Instead, it just constructs a new object. This makes
sense as an output array is expected to have garbage in it anyway.
This is a problem for some special arrays that contain `Vec`-like objects
that are sized dynamically. For example, if you use an
`ArrayHandleGroupVecVariable`, each entry is a dynamically sized `Vec`. The
array is referenced by creating a special version of `Vec` that holds a
reference to the array portal and an index. Components are retrieved and
set by accessing the memory in the array portal. This allows us to have a
dynamically sized `Vec` in the execution environment without having to
allocate within the worklet.
The problem comes when we want to use one of these arrays with `Vec`-like
objects for an output. The typical fetch fails because you cannot construct
one of these `Vec`-like objects without an array portal to bind it to. In
these cases, we need the fetch to create the `Vec`-like object by reading
it from the array. Even though the data will be garbage, you get the
necessary buffer into the array (and nothing more).
Previously, the problem was fixed by creating partial specializations of
the `Fetch` for these `ArrayHandle`s. This worked OK as long as you were
using the array directly. However, the approach failed if the `ArrayHandle`
was wrapped in another `ArrayHandle` (for example, if an `ArrayHandleView`
was applied to an `ArrayHandleGroupVecVariable`).
To get around this problem and simplify things, the basic `Fetch` for
direct output arrays is changed to handle all cases where the values in the
`ArrayHandle` cannot be directly constructed. A compile-time check of the
array's value type is checked with `std::is_default_constructible`. If it
can be constructed, then the array is not accessed. If it cannot be
constructed, then it grabs a value out of the array.

@ -1,7 +0,0 @@
# Correct particle density filter output field
The field being created by `ParticleDensityNearestGridPoint` was supposed
to be associated with cells, but it was sized to the number of points.
Although the number of points will always be more than the number of cells
(so the array will be big enough), having inappropriately sized arrays can
cause further problems downstream.

@ -1,14 +0,0 @@
# New partitioned cell locator class
A new version of a locator, `CellLocatorParitioned`, is now available. This version of a
locator takes a `PartitionedDataSet` and builds a structure that will find the partition Ids and
cell Ids for the input array of locations. It runs CellLocatorGeneral for each partition. We
expect multiple hits and only return the first one (lowest partition Id) where the detected cell
is of type REGULAR (no ghost, not blanked) in the vtkGhostType array. If this array does not
exist in a partition, we assume that all cells are regular.
vtkm::cont::CellLocatorPartitioned produces an Arrayhandle of the size of the number of
partitions filled with the execution objects of CellLocatorGeneral. It further produces an
Arrayhandle filled with the ReadPortals of the vtkGhost arrays to then select the non-blanked
cells from the potentially multiple detected cells on the different partitions. Its counterpart
on the exec side, vtkm::exec::CellLocatorPartitioned, contains the actual FindCell function.

@ -1,8 +0,0 @@
# Fix reading global ids of permuted cells
The legacy VTK reader sometimes has to permute cell data because some VTK
cells are not directly supported in VTK-m. (For example, triangle strips
are not supported. They have to be converted to triangles.)
The global and petigree identifiers were not properly getting permuted.
This is now fixed.

@ -1,8 +0,0 @@
# Remove deprecated features from VTK-m
With the major revision 2.0 of VTK-m, many items previously marked as
deprecated were removed. If updating to a new version of VTK-m, it is
recommended to first update to VTK-m 1.9, which will include the deprecated
features but provide warnings (with the right compiler) that will point to
the replacement code. Once the deprecations have been fixed, updating to
2.0 should be smoother.

@ -0,0 +1,4 @@
# Require Kokkos 3.7
The minimum version of Kokkos supported is now set to Kokkos 3.7. This is
to synchronize with the development of the Kokkos team.

@ -0,0 +1,18 @@
# Added ability to resize strided arrays from ArrayExtractComponent
Previously, it was not possible to resize an `ArrayHandleStride` because
the operation is a bit ambiguous. The actual array is likely to be padded
by some amount, and there could be an unknown amount of space skipped at
the beginning.
However, there is a good reason to want to resize `ArrayHandleStride`. This
is the array used to implement the `ArrayExtractComponent` feature, and
this in turn is used when extracting arrays from an `UnknownArrayHandle`
whether independent or as an `ArrayHandleRecombineVec`.
The problem really happens when you create an array of an unknown type in
an `UnknownArrayHandle` (such as with `NewInstance`) and then use that as
an output to a worklet. Sure, you could use `ArrayHandle::Allocate` to
resize before getting the array, but that is awkward for programers.
Instead, allow the extracted arrays to be resized as normal output arrays
would be.

@ -0,0 +1,3 @@
# New Statistics filter
The statistics filter computes the descriptive statistics of the fields specified by users based on `DescriptiveStatistics`. Users can set `RequiredStatsList` to specify which statistics will be stored in the output data set.

@ -1,19 +0,0 @@
# Fix handling of cell fields in Tube filter
The `Tube` filter wraps a tube of polygons around poly line cells.
During this process it had a strange (and wrong) handling of cell data.
It assumed that each line had an independent field entry for each
segment of each line. It thus had lots of extra code to find the length
and offsets of the segment data in the cell data.
This is simply not how cell fields work in VTK-m. In VTK-m, each cell
has exactly one entry in the cell field array. Even if a polyline has
100 segments, it only gets one cell field value. This behavior is
consistent with how VTK treats cell field arrays.
The behavior the `Tube` filter was trying to implement was closer to an
"edge" field. However, edge fields are currently not supported in VTK-m.
The proper implementation would be to add edge fields to VTK-m. (This
would also get around some problems with the implementation that was
removed here when mixing polylines with other cell types and degenerate
lines.)

@ -1,8 +0,0 @@
# Attach compressed ZFP data as WholeDatSet field
Previously, point fields compressed by ZFP were attached as point fields
on the output. However, using them as a point field would cause
problems. So, instead attache them as `WholeDataSet` fields.
Also fixed a problem where the 1D decompressor created an output of the
wrong size.

@ -8,10 +8,27 @@
## PURPOSE. See the above copyright notice for more information.
##============================================================================
#add the directory that contains the VTK-m config file to the cmake
#path so that our examples can find VTK-m
if(VTKm_ENABLE_EXAMPLES)
# VTKm examples expects vtkm libraries to be namespaced with the prefix vtkm::.
# However as the examples are also built as just another part of the VTK-m code
# those prefix are not added to the targets (This happens during the
# installation). To workaround this issue here, we create IMPORTED libs linking
# to the vtkm libraries used by the examples with expected the vtkm:: prefix.
vtkm_module_get_list(module_list)
foreach(tgt IN LISTS module_list)
if(TARGET ${tgt})
# The reason of creating this phony IMPORTED libraries instead of making
# ALIAS libraries is that ALIAS libraries are GLOBAL whereas IMPORTED are
# local at the directory level where they are created. We do not want these
# phony targets to be visible outside of the example directory.
vtkm_target_mangle(tgt_name_mangled ${tgt})
add_library("vtkm::${tgt_name_mangled}" INTERFACE IMPORTED)
target_link_libraries("vtkm::${tgt_name_mangled}" INTERFACE ${tgt})
endif()
endforeach()
#add the directory that contains the VTK-m config file to the cmake
#path so that our examples can find VTK-m
set(CMAKE_PREFIX_PATH ${VTKm_BINARY_DIR}/${VTKm_INSTALL_CONFIG_DIR})
add_subdirectory(clipping)
add_subdirectory(contour_tree)

@ -13,7 +13,7 @@ project(Clipping CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_filter_contour AND TARGET vtkm_io)
if(TARGET vtkm::filter_contour AND TARGET vtkm::io)
add_executable(Clipping Clipping.cxx)
target_link_libraries(Clipping PRIVATE vtkm_filter_contour vtkm_io)
target_link_libraries(Clipping PRIVATE vtkm::filter_contour vtkm::io)
endif()

@ -13,10 +13,10 @@ project(ContourTree CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if (TARGET vtkm_filter_scalar_topology)
if (TARGET vtkm::filter_scalar_topology)
add_executable(ContourTreeMesh2D ContourTreeMesh2D.cxx)
target_link_libraries(ContourTreeMesh2D vtkm_filter_core vtkm_filter_scalar_topology)
target_link_libraries(ContourTreeMesh2D vtkm::filter_core vtkm::filter_scalar_topology)
add_executable(ContourTreeMesh3D ContourTreeMesh3D.cxx)
target_link_libraries(ContourTreeMesh3D vtkm_filter_core vtkm_filter_scalar_topology)
target_link_libraries(ContourTreeMesh3D vtkm::filter_core vtkm::filter_scalar_topology)
endif()

@ -55,7 +55,7 @@ cmake_minimum_required(VERSION 3.12...3.15 FATAL_ERROR)
# Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(NOT TARGET vtkm_io OR NOT TARGET vtkm_filter_scalar_topology)
if(NOT TARGET vtkm::io OR NOT TARGET vtkm::filter_scalar_topology)
# Libraries needed are not built.
return()
endif()
@ -64,7 +64,7 @@ endif()
# Serial
####################################
add_executable(ContourTree_Augmented ContourTreeApp.cxx)
target_link_libraries(ContourTree_Augmented vtkm_filter_scalar_topology vtkm_io)
target_link_libraries(ContourTree_Augmented vtkm::filter_scalar_topology vtkm::io)
vtkm_add_target_information(ContourTree_Augmented
DROP_UNUSED_SYMBOLS MODIFY_CUDA_FLAGS
DEVICE_SOURCES ContourTreeApp.cxx)
@ -84,7 +84,7 @@ endif()
####################################
if (VTKm_ENABLE_MPI)
add_executable(ContourTree_Augmented_MPI ContourTreeApp.cxx)
target_link_libraries(ContourTree_Augmented_MPI vtkm_filter_scalar_topology vtkm_io MPI::MPI_CXX)
target_link_libraries(ContourTree_Augmented_MPI vtkm::filter_scalar_topology vtkm::io MPI::MPI_CXX)
vtkm_add_target_information(ContourTree_Augmented_MPI
MODIFY_CUDA_FLAGS
DEVICE_SOURCES ContourTreeApp.cxx)

@ -58,9 +58,9 @@ find_package(VTKm REQUIRED QUIET)
####################################
# MPI
####################################
if (VTKm_ENABLE_MPI AND TARGET vtkm_filter_scalar_topology AND TARGET vtkm_io)
add_executable(ContourTree_Distributed ContourTreeApp.cxx)
target_link_libraries(ContourTree_Distributed vtkm_filter_scalar_topology vtkm_io MPI::MPI_CXX)
if (VTKm_ENABLE_MPI AND TARGET vtkm::filter_scalar_topology AND TARGET vtkm::io)
add_executable(ContourTree_Distributed ContourTreeApp.cxx ContourTreeAppDataIO.h)
target_link_libraries(ContourTree_Distributed vtkm::filter_scalar_topology vtkm::io MPI::MPI_CXX)
vtkm_add_target_information(ContourTree_Distributed
MODIFY_CUDA_FLAGS
DEVICE_SOURCES ContourTreeApp.cxx)
@ -76,13 +76,18 @@ if (VTKm_ENABLE_MPI AND TARGET vtkm_filter_scalar_topology AND TARGET vtkm_io)
if (TARGET vtkm::tbb)
target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_SET_NUM_THREADS")
endif()
if (VTKm_ENABLE_HDF5_IO)
target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_HDFIO")
target_include_directories(ContourTree_Distributed PRIVATE ${HDF5_INCLUDE_DIR})
target_link_libraries(ContourTree_Distributed ${HDF5_LIBRARIES})
endif ()
add_executable(TreeCompiler TreeCompilerApp.cxx)
target_link_libraries(TreeCompiler vtkm_filter_core)
target_link_libraries(TreeCompiler vtkm::filter_core)
vtkm_add_target_information(TreeCompiler DROP_UNUSED_SYMBOLS)
add_executable(BranchCompiler BranchCompilerApp.cxx)
target_link_libraries(BranchCompiler vtkm_filter)
target_link_libraries(BranchCompiler vtkm::filter_scalar_topology)
vtkm_add_target_information(BranchCompiler DROP_UNUSED_SYMBOLS)
configure_file(split_data_2d.py split_data_2d.py COPYONLY)

@ -64,13 +64,14 @@
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/DataSet.h>
#include <vtkm/cont/DataSetBuilderUniform.h>
#include <vtkm/cont/DataSetFieldAdd.h>
#include <vtkm/cont/DeviceAdapterTag.h>
#include <vtkm/cont/Initialize.h>
#include <vtkm/cont/RuntimeDeviceTracker.h>
#include <vtkm/cont/Timer.h>
#include <vtkm/io/BOVDataSetReader.h>
#include "ContourTreeAppDataIO.h"
#include <vtkm/filter/scalar_topology/ContourTreeUniformDistributed.h>
#include <vtkm/filter/scalar_topology/DistributedBranchDecompositionFilter.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/HierarchicalVolumetricBranchDecomposer.h>
@ -98,7 +99,7 @@ VTKM_THIRDPARTY_POST_INCLUDE
#include <utility>
#include <vector>
using ValueType = vtkm::Float64;
using ValueType = vtkm::Float32;
#define SINGLE_FILE_STDOUT_STDERR
@ -272,6 +273,48 @@ int main(int argc, char* argv[])
}
}
#ifdef ENABLE_HDFIO
std::string dataset_name = "data";
if (parser.hasOption("--dataset"))
{
dataset_name = parser.getOption("--dataset");
}
vtkm::Id3 blocksPerDimIn(1, 1, size);
if (parser.hasOption("--blocksPerDim"))
{
std::string temp = parser.getOption("--blocksPerDim");
if (std::count(temp.begin(), temp.end(), ',') != 2)
{
std::cerr << "Invalid --blocksPerDim option. Expected string of the form 'x,y,z' got" << temp
<< std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
char* tempC = (char*)temp.c_str();
blocksPerDimIn[0] = std::stoi(std::strtok(tempC, ","));
blocksPerDimIn[1] = std::stoi(std::strtok(nullptr, ","));
blocksPerDimIn[2] = std::stoi(std::strtok(nullptr, ","));
}
vtkm::Id3 selectSize(-1, -1, -1);
if (parser.hasOption("--selectSize"))
{
std::string temp = parser.getOption("--selectSize");
if (std::count(temp.begin(), temp.end(), ',') != 2)
{
std::cerr << "Invalid --selectSize option. Expected string of the form 'x,y,z' got" << temp
<< std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
char* tempC = (char*)temp.c_str();
selectSize[0] = std::stoi(std::strtok(tempC, ","));
selectSize[1] = std::stoi(std::strtok(nullptr, ","));
selectSize[2] = std::stoi(std::strtok(nullptr, ","));
}
#endif
if (argc < 2 || parser.hasOption("--help") || parser.hasOption("-h"))
{
if (rank == 0)
@ -307,11 +350,24 @@ int main(int argc, char* argv[])
<< " computation (Default=False). " << std::endl;
std::cout << "--saveOutputData Save data files with hierarchical tree or volume data"
<< std::endl;
std::cout << "--numBlocks Number of blocks to use during computation "
std::cout << "--numBlocks Number of blocks to use during computation. (Sngle block "
"ASCII/BOV file reader only)"
<< "(Default=number of MPI ranks.)" << std::endl;
std::cout << "--forwardSummary Forward the summary timings also to the per-rank " << std::endl
<< " log files. Default is to round-robin print the " << std::endl
<< " summary instead" << std::endl;
#ifdef ENABLE_HDFIO
std::cout << "--dataset Name of the dataset to load (HDF5 reader only)(Default=data)"
<< std::endl;
std::cout << "--blocksPerDim Number of blocks to split the data into. This is a string of "
"the form 'x,y,z'."
"(HDF5 reader only)(Default='1,1,#ranks')"
<< std::endl;
std::cout
<< "--selectSize Size of the subblock to read. This is a string of the form 'x,y,z'."
"(HDF5 reader only)(Default='-1,-1,-1')"
<< std::endl;
#endif
std::cout << std::endl;
}
MPI_Finalize();
@ -334,7 +390,15 @@ int main(int argc, char* argv[])
<< computeHierarchicalVolumetricBranchDecomposition << std::endl
<< " saveOutputData=" << saveOutputData << std::endl
<< " forwardSummary=" << forwardSummary << std::endl
<< " nblocks=" << numBlocks << std::endl);
<< " nblocks=" << numBlocks << std::endl
#ifdef ENABLE_HDFIO
<< " dataset=" << dataset_name << " (HDF5 only)" << std::endl
<< " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << ","
<< blocksPerDimIn[2] << " (HDF5 only)" << std::endl
<< " selectSize=" << selectSize[0] << "," << selectSize[1] << ","
<< selectSize[2] << " (HDF5 only)" << std::endl
#endif
);
}
// Redirect stdout to file if we are using MPI with Debugging
@ -376,6 +440,30 @@ int main(int argc, char* argv[])
return 255;
}
VTKM_LOG_S(exampleLogLevel,
std::endl
<< " ------------ Settings -----------" << std::endl
<< " filename=" << filename << std::endl
<< " preSplitFiles=" << preSplitFiles << std::endl
<< " device=" << device.GetName() << std::endl
<< " mc=" << useMarchingCubes << std::endl
<< " useFullBoundary=" << !useBoundaryExtremaOnly << std::endl
<< " saveDot=" << saveDotFiles << std::endl
<< " saveOutputData=" << saveOutputData << std::endl
<< " forwardSummary=" << forwardSummary << std::endl
<< " numBlocks=" << numBlocks << std::endl
<< " augmentHierarchicalTree=" << augmentHierarchicalTree << std::endl
<< " numRanks=" << size << std::endl
<< " rank=" << rank << std::endl
#ifdef ENABLE_HDFIO
<< " dataset=" << dataset_name << " (HDF5 only)" << std::endl
<< " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << ","
<< blocksPerDimIn[2] << " (HDF5 only)" << std::endl
<< " selectSize=" << selectSize[0] << "," << selectSize[1] << "," << selectSize[2]
<< " (HDF5 only)" << std::endl
#endif
);
// Measure our time for startup
currTime = totalTime.GetElapsedTime();
vtkm::Float64 startUpTime = currTime - prevTime;
@ -405,429 +493,88 @@ int main(int argc, char* argv[])
auto localBlockIndicesPortal = localBlockIndices.WritePortal();
// Read the pre-split data files
bool readOk = true;
if (preSplitFiles)
{
for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo)
{
// Translate pattern into filename for this block
char block_filename[256];
snprintf(block_filename,
sizeof(block_filename),
filename.c_str(),
static_cast<int>(rank * blocksPerRank + blockNo));
std::cout << "Reading file " << block_filename << std::endl;
// Open file
std::ifstream inFile(block_filename);
if (!inFile.is_open() || inFile.bad())
{
std::cerr << "Error: Couldn't open file " << block_filename << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
// Read header with dimensions
std::string line;
std::string tag;
vtkm::Id dimVertices;
getline(inFile, line);
std::istringstream global_extents_stream(line);
global_extents_stream >> tag;
if (tag != "#GLOBAL_EXTENTS")
{
std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
std::vector<vtkm::Id> global_extents;
while (global_extents_stream >> dimVertices)
global_extents.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(global_extents[0], global_extents[1]);
if (blockNo == 0)
{ // First block: Set globalSize
globalSize =
vtkm::Id3{ static_cast<vtkm::Id>(global_extents[0]),
static_cast<vtkm::Id>(global_extents[1]),
static_cast<vtkm::Id>(global_extents.size() > 2 ? global_extents[2] : 1) };
}
else
{ // All other blocks: Consistency check of globalSize
if (globalSize !=
vtkm::Id3{ static_cast<vtkm::Id>(global_extents[0]),
static_cast<vtkm::Id>(global_extents[1]),
static_cast<vtkm::Id>(global_extents.size() > 2 ? global_extents[2] : 1) })
{
std::cerr << "Error: Global extents mismatch between blocks!" << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
}
getline(inFile, line);
std::istringstream offset_stream(line);
offset_stream >> tag;
if (tag != "#OFFSET")
{
std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
std::vector<vtkm::Id> offset;
while (offset_stream >> dimVertices)
offset.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(offset[0], offset[1]);
getline(inFile, line);
std::istringstream bpd_stream(line);
bpd_stream >> tag;
if (tag != "#BLOCKS_PER_DIM")
{
std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
std::vector<vtkm::Id> bpd;
while (bpd_stream >> dimVertices)
bpd.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(bpd[0], bpd[1]);
getline(inFile, line);
std::istringstream blockIndex_stream(line);
blockIndex_stream >> tag;
if (tag != "#BLOCK_INDEX")
{
std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
std::vector<vtkm::Id> blockIndex;
while (blockIndex_stream >> dimVertices)
blockIndex.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(blockIndex[0], blockIndex[1]);
getline(inFile, line);
std::istringstream linestream(line);
std::vector<vtkm::Id> dims;
while (linestream >> dimVertices)
{
dims.push_back(dimVertices);
}
if (dims.size() != global_extents.size() || dims.size() != offset.size())
{
std::cerr << "Error: Dimension mismatch" << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(dims[0], dims[1]);
// Compute the number of vertices, i.e., xdim * ydim * zdim
nDims = static_cast<unsigned short>(dims.size());
std::size_t numVertices = static_cast<std::size_t>(
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>()));
// Check for fatal input errors
// Check that the number of dimensiosn is either 2D or 3D
bool invalidNumDimensions = (nDims < 2 || nDims > 3);
// Log any errors if found on rank 0
VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error,
invalidNumDimensions && (rank == 0),
"The input mesh is " << nDims
<< "D. "
"The input data must be either 2D or 3D.");
// If we found any errors in the setttings than finalize MPI and exit the execution
if (invalidNumDimensions)
{
MPI_Finalize();
return EXIT_FAILURE;
}
// Read data
std::vector<ValueType> values(numVertices);
if (filename.compare(filename.length() - 5, 5, ".bdem") == 0)
{
inFile.read(reinterpret_cast<char*>(values.data()),
static_cast<std::streamsize>(numVertices * sizeof(ValueType)));
}
else
{
for (std::size_t vertex = 0; vertex < numVertices; ++vertex)
{
inFile >> values[vertex];
}
}
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
// Create vtk-m data set
vtkm::cont::DataSetBuilderUniform dsb;
vtkm::cont::DataSet ds;
if (nDims == 2)
{
const vtkm::Id2 v_dims{
static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
};
const vtkm::Vec<ValueType, 2> v_origin{ static_cast<ValueType>(offset[0]),
static_cast<ValueType>(offset[1]) };
const vtkm::Vec<ValueType, 2> v_spacing{ 1, 1 };
ds = dsb.Create(v_dims, v_origin, v_spacing);
vtkm::cont::CellSetStructured<2> cs;
cs.SetPointDimensions(v_dims);
cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] });
cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] });
ds.SetCellSet(cs);
}
else
{
VTKM_ASSERT(nDims == 3);
const vtkm::Id3 v_dims{ static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
static_cast<vtkm::Id>(dims[2]) };
const vtkm::Vec<ValueType, 3> v_origin{ static_cast<ValueType>(offset[0]),
static_cast<ValueType>(offset[1]),
static_cast<ValueType>(offset[2]) };
vtkm::Vec<ValueType, 3> v_spacing(1, 1, 1);
ds = dsb.Create(v_dims, v_origin, v_spacing);
vtkm::cont::CellSetStructured<3> cs;
cs.SetPointDimensions(v_dims);
cs.SetGlobalPointDimensions(globalSize);
cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] });
ds.SetCellSet(cs);
}
ds.AddPointField("values", values);
// and add to partition
useDataSet.AppendPartition(ds);
localBlockIndicesPortal.Set(
blockNo,
vtkm::Id3{ static_cast<vtkm::Id>(blockIndex[0]),
static_cast<vtkm::Id>(blockIndex[1]),
static_cast<vtkm::Id>(nDims == 3 ? blockIndex[2] : 0) });
if (blockNo == 0)
{
blocksPerDim = vtkm::Id3{ static_cast<vtkm::Id>(bpd[0]),
static_cast<vtkm::Id>(bpd[1]),
static_cast<vtkm::Id>(nDims == 3 ? bpd[2] : 1) };
}
}
// Print the mesh metadata
if (rank == 0)
{
VTKM_LOG_S(exampleLogLevel,
std::endl
<< " ---------------- Input Mesh Properties --------------" << std::endl
<< " Number of dimensions: " << nDims << std::endl);
}
readOk = readPreSplitFiles<ValueType>(
// inputs
rank,
filename,
blocksPerRank,
// outputs
nDims,
useDataSet,
globalSize,
blocksPerDim,
localBlockIndices,
// output timers
dataReadTime,
buildDatasetTime);
}
// Read single-block data and split it for the ranks
else
{
vtkm::cont::DataSet inDataSet;
// Currently FloatDefualt would be fine, but it could cause problems if we ever
// read binary files here.
std::vector<ValueType> values;
std::vector<vtkm::Id> dims;
// Read BOV data file
if (filename.compare(filename.length() - 3, 3, "bov") == 0)
bool isHDF5 = (0 == filename.compare(filename.length() - 3, 3, ".h5"));
if (isHDF5)
{
std::cout << "Reading BOV file" << std::endl;
vtkm::io::BOVDataSetReader reader(filename);
inDataSet = reader.ReadDataSet();
nDims = 3;
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
// Copy the data into the values array so we can construct a multiblock dataset
// TODO All we should need to do to implement BOV support is to copy the values
// in the values vector and copy the dimensions in the dims vector
vtkm::Id3 pointDimensions;
auto cellSet = inDataSet.GetCellSet();
vtkm::cont::CastAndCall(
cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions);
std::cout << "Point dimensions are " << pointDimensions << std::endl;
dims.resize(3);
dims[0] = pointDimensions[0];
dims[1] = pointDimensions[1];
dims[2] = pointDimensions[2];
auto tempFieldData = inDataSet.GetField(0).GetData();
values.resize(static_cast<std::size_t>(tempFieldData.GetNumberOfValues()));
auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off);
vtkm::cont::ArrayCopy(tempFieldData, valuesHandle);
valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU
#ifdef ENABLE_HDFIO
blocksPerDim = blocksPerDimIn;
readOk = read3DHDF5File<ValueType>(
// inputs (blocksPerDim is being modified to swap dimension to fit we re-ordering of dimension)
rank,
filename,
dataset_name,
blocksPerRank,
blocksPerDim,
selectSize,
// outputs
nDims,
useDataSet,
globalSize,
localBlockIndices,
// output timers
dataReadTime,
buildDatasetTime);
#else
VTKM_LOG_S(vtkm::cont::LogLevel::Error,
"Can't read HDF5 file. HDF5 reader disabled for this build.");
readOk = false;
#endif
}
// Read ASCII data input
else
{
std::cout << "Reading ASCII file" << std::endl;
std::ifstream inFile(filename);
if (inFile.bad())
return 0;
// Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z
std::string line;
getline(inFile, line);
std::istringstream linestream(line);
vtkm::Id dimVertices;
while (linestream >> dimVertices)
{
dims.push_back(dimVertices);
}
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(dims[0], dims[1]);
// Compute the number of vertices, i.e., xdim * ydim * zdim
nDims = static_cast<unsigned short>(dims.size());
std::size_t numVertices = static_cast<std::size_t>(
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>()));
// Check the the number of dimensiosn is either 2D or 3D
bool invalidNumDimensions = (nDims < 2 || nDims > 3);
// Log any errors if found on rank 0
VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error,
invalidNumDimensions && (rank == 0),
"The input mesh is " << nDims << "D. The input data must be either 2D or 3D.");
// If we found any errors in the setttings than finalize MPI and exit the execution
if (invalidNumDimensions)
{
MPI_Finalize();
return EXIT_FAILURE;
}
// Read data
values.resize(numVertices);
for (std::size_t vertex = 0; vertex < numVertices; ++vertex)
{
inFile >> values[vertex];
}
// finish reading the data
inFile.close();
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
} // END ASCII Read
// Print the mesh metadata
if (rank == 0)
{
VTKM_LOG_S(exampleLogLevel,
std::endl
<< " ---------------- Input Mesh Properties --------------" << std::endl
<< " Number of dimensions: " << nDims);
readOk = readSingleBlockFile<ValueType>(
// inputs
rank,
size,
filename,
numBlocks,
blocksPerRank,
// outputs
nDims,
useDataSet,
globalSize,
blocksPerDim,
localBlockIndices,
// output timers
dataReadTime,
buildDatasetTime);
}
}
if (!readOk)
{
VTKM_LOG_S(vtkm::cont::LogLevel::Error, "Data read failed.");
MPI_Finalize();
return EXIT_FAILURE;
}
// Create a multi-block dataset for multi-block DIY-paralle processing
blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks)
: vtkm::Id3(1, numBlocks, 1); // Decompose the data into
globalSize = nDims == 3 ? vtkm::Id3(static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
static_cast<vtkm::Id>(dims[2]))
: vtkm::Id3(static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
static_cast<vtkm::Id>(1));
{
vtkm::Id lastDimSize =
(nDims == 2) ? static_cast<vtkm::Id>(dims[1]) : static_cast<vtkm::Id>(dims[2]);
if (size > (lastDimSize / 2.))
{
VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error,
rank == 0,
"Number of ranks too large for data. Use " << lastDimSize / 2
<< "or fewer ranks");
MPI_Finalize();
return EXIT_FAILURE;
}
vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks);
vtkm::Id blockSize = standardBlockSize;
vtkm::Id blockSliceSize =
nDims == 2 ? static_cast<vtkm::Id>(dims[0]) : static_cast<vtkm::Id>((dims[0] * dims[1]));
vtkm::Id blockNumValues = blockSize * blockSliceSize;
vtkm::Id startBlock = blocksPerRank * rank;
vtkm::Id endBlock = startBlock + blocksPerRank;
for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex)
{
vtkm::Id localBlockIndex = blockIndex - startBlock;
vtkm::Id blockStart = blockIndex * blockNumValues;
vtkm::Id blockEnd = blockStart + blockNumValues;
if (blockIndex < (numBlocks - 1)) // add overlap between regions
{
blockEnd += blockSliceSize;
}
else
{
blockEnd = lastDimSize * blockSliceSize;
}
vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize);
vtkm::cont::DataSetBuilderUniform dsb;
vtkm::cont::DataSet ds;
// 2D data
if (nDims == 2)
{
vtkm::Id2 vdims;
vdims[0] = static_cast<vtkm::Id>(dims[0]);
vdims[1] = static_cast<vtkm::Id>(currBlockSize);
vtkm::Vec<ValueType, 2> origin(0, blockIndex * blockSize);
vtkm::Vec<ValueType, 2> spacing(1, 1);
ds = dsb.Create(vdims, origin, spacing);
vtkm::cont::CellSetStructured<2> cs;
cs.SetPointDimensions(vdims);
cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] });
cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) });
ds.SetCellSet(cs);
localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0));
}
// 3D data
else
{
vtkm::Id3 vdims;
vdims[0] = static_cast<vtkm::Id>(dims[0]);
vdims[1] = static_cast<vtkm::Id>(dims[1]);
vdims[2] = static_cast<vtkm::Id>(currBlockSize);
vtkm::Vec<ValueType, 3> origin(0, 0, (blockIndex * blockSize));
vtkm::Vec<ValueType, 3> spacing(1, 1, 1);
ds = dsb.Create(vdims, origin, spacing);
vtkm::cont::CellSetStructured<3> cs;
cs.SetPointDimensions(vdims);
cs.SetGlobalPointDimensions(globalSize);
cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize));
ds.SetCellSet(cs);
localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex));
}
std::vector<vtkm::Float32> subValues((values.begin() + blockStart),
(values.begin() + blockEnd));
ds.AddPointField("values", subValues);
useDataSet.AppendPartition(ds);
}
}
// Print the mesh metadata
if (rank == 0)
{
VTKM_LOG_S(exampleLogLevel,
std::endl
<< " ---------------- Input Mesh Properties --------------" << std::endl
<< " Number of dimensions: " << nDims);
}
// Check if marching cubes is enabled for non 3D data
@ -838,16 +585,15 @@ int main(int argc, char* argv[])
<< nDims << "D. "
<< "Contour tree using marching cubes is only supported for 3D data.");
// If we found any errors in the setttings than finalize MPI and exit the execution
// If we found any errors in the settings than finalize MPI and exit the execution
if (invalidMCOption)
{
MPI_Finalize();
return EXIT_FAILURE;
}
currTime = totalTime.GetElapsedTime();
buildDatasetTime = currTime - prevTime;
prevTime = currTime;
// reset timer after read. the dataReadTime and buildDatasetTime are measured by the read functions
prevTime = totalTime.GetElapsedTime();
// Make sure that all ranks have started up before we start the data read
MPI_Barrier(comm);
@ -855,6 +601,20 @@ int main(int argc, char* argv[])
vtkm::Float64 dataReadSyncTime = currTime - prevTime;
prevTime = currTime;
// Log information of the (first) local data block
// TODO: Get localBlockSize and localBlockOrigins from the cell set to log results
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
"" //<< std::setw(42) << std::left << "blockSize"
//<< ":" << localBlockSizesPortal.Get(0) << std::endl
//<< std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0)
//<< std::endl
<< std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0)
<< std::endl
<< std::setw(42) << std::left << "blocksPerDim=" << blocksPerDim << std::endl
<< std::setw(42) << std::left << "globalSize=" << globalSize << std::endl
);
// Convert the mesh of values into contour tree, pairs of vertex ids
vtkm::filter::scalar_topology::ContourTreeUniformDistributed filter(timingsLogLevel,
treeLogLevel);
@ -872,21 +632,30 @@ int main(int argc, char* argv[])
vtkm::Float64 computeContourTreeTime = currTime - prevTime;
prevTime = currTime;
// Make sure that all ranks have started up before we start the data read
// Record the time to synchronize after the filter has finished
MPI_Barrier(comm);
currTime = totalTime.GetElapsedTime();
vtkm::Float64 postFilterSyncTime = currTime - prevTime;
prevTime = currTime;
// Compute branch decomposition if requested
vtkm::cont::PartitionedDataSet bd_result;
if (computeHierarchicalVolumetricBranchDecomposition)
{
vtkm::filter::scalar_topology::DistributedBranchDecompositionFilter bd_filter;
bd_result = bd_filter.Execute(result);
}
currTime = totalTime.GetElapsedTime();
vtkm::Float64 branchDecompTime = currTime - prevTime;
prevTime = currTime;
// Save output
if (saveOutputData)
{
if (augmentHierarchicalTree)
{
if (computeHierarchicalVolumetricBranchDecomposition)
{
vtkm::filter::scalar_topology::DistributedBranchDecompositionFilter bd_filter;
auto bd_result = bd_filter.Execute(result);
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
{
auto ds = bd_result.GetPartition(ds_no);
@ -1003,6 +772,8 @@ int main(int argc, char* argv[])
<< ": " << computeContourTreeTime << " seconds" << std::endl
<< std::setw(42) << std::left << " Post filter Sync"
<< ": " << postFilterSyncTime << " seconds" << std::endl
<< std::setw(42) << std::left << " Branch Decomposition"
<< ": " << branchDecompTime << " seconds" << std::endl
<< std::setw(42) << std::left << " Save Tree Compiler Data"
<< ": " << saveOutputDataTime << " seconds" << std::endl
<< std::setw(42) << std::left << " Total Time"

@ -0,0 +1,853 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// I/O functions used by the ContourTreeApp for data read.
//==============================================================================
#ifndef vtk_m_examples_ContourTreeAppDataIO_hxx
#define vtk_m_examples_ContourTreeAppDataIO_hxx
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayCopy.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/CellSetStructured.h>
#include <vtkm/cont/DataSet.h>
#include <vtkm/cont/DataSetBuilderUniform.h>
#include <vtkm/cont/PartitionedDataSet.h>
#include <vtkm/cont/Timer.h>
#include <vtkm/io/BOVDataSetReader.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/Types.h>
#include <cstdio>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <numeric>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#ifdef ENABLE_HDFIO
// #include "H5Cpp.h"
#include "hdf5.h"
//using namespace H5;
#include <mpi.h>
/// Convert a 3D index of a cube to rank index
vtkm::Id to1DIndex(vtkm::Id3 idx, vtkm::Id3 dims)
{
// return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0];
// Swap first and second dimension
return (idx[2] * dims[0] * dims[1]) + (idx[0] * dims[1]) + idx[1];
}
/// Convert the rank index to the index of the cube
vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims)
{
vtkm::Id3 res;
res[2] = idx / (dims[0] * dims[1]);
idx -= (res[2] * dims[0] * dims[1]);
// Swap index 0 and 1
// res[0] = idx / dims[0];
// res[1] = idx % dims[0];
// Don't swap index here, because this function is used with the original
// HDF5 layout and the 3D index is swapped later on
res[1] = idx / dims[0];
res[0] = idx % dims[0];
return res;
}
/// Read data from pre-split ASCII files
/// @param[in] rank The current MPI rank the function is called from
/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block
/// @param[in] dataset_name Name of the dataset in the HDF5 file to read
/// @param[in] blocksPerRank Number of data blocks to process on each rank
/// @param[in] blocksPerDim Number of data blocks to use per dimension
/// @param[in] selectSize Select subset of this size from the dataset. Set to (-1,-1,-1) to select the full size
/// @param[out] nDims Number of data dimensions (i.e, 2 or 3)
/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter
/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension)
/// @param[out] localBlockIndices Array with the (x,y,z) index of each local data block with
/// with respect to blocksPerDim
/// @param[out] dataReadTime Time to read the data
/// @param[out] buildDatasetTime Time to construct the VTKm datasets
/// @returns bool indicating whether the read was successful or not
template <typename ValueType>
bool read3DHDF5File(const int& mpi_rank,
const std::string& filename,
const std::string& dataset_name,
const int& blocksPerRank,
vtkm::Id3& blocksPerDim,
const vtkm::Id3& selectSize,
std::vector<vtkm::Float32>::size_type& nDims,
vtkm::cont::PartitionedDataSet& useDataSet,
vtkm::Id3& globalSize,
vtkm::cont::ArrayHandle<vtkm::Id3>& localBlockIndices,
vtkm::Float64& dataReadTime,
vtkm::Float64& buildDatasetTime)
{
vtkm::cont::Timer totalTime;
totalTime.Start();
vtkm::Float64 prevTime = 0;
vtkm::Float64 currTime = 0;
// TODO not supported yet
if (blocksPerRank > 1)
{
VTKM_LOG_S(
vtkm::cont::LogLevel::Error,
"HDF5 reader for ContourTreeDistributed does not support multiple blocks per rank yet");
return false;
}
vtkm::Id blockNo = 0; // TODO: Update this if we have multiple blocks per rank
localBlockIndices.Allocate(blocksPerRank);
auto localBlockIndicesPortal = localBlockIndices.WritePortal();
herr_t status;
//Set up file access property list with parallel I/O access
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
H5Pset_fapl_mpio(plist_id, comm, info);
// Open the file and the dataset
//hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);//
hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id); //
hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT);
// Get filespace for rank and dimension
hid_t filespace = H5Dget_space(dataset);
// Get number of dimensions in the file dataspace
nDims = H5Sget_simple_extent_ndims(filespace);
if (nDims != 3)
{
VTKM_LOG_S(vtkm::cont::LogLevel::Error,
"HDF5 reader for ContourTreeDistributed requires 3D dataset");
return false;
}
hsize_t dims[nDims]; // dataset dimensions
status = H5Sget_simple_extent_dims(filespace, dims, NULL);
globalSize[0] = selectSize[0] < 0 ? dims[0] : selectSize[0];
globalSize[1] = selectSize[1] < 0 ? dims[1] : selectSize[1];
globalSize[2] = selectSize[2] < 0 ? dims[2] : selectSize[2];
// Define the memory space to read dataset.
hid_t dataspace = H5Dget_space(dataset);
// Read a hyperslap
// define the hyperslap
hsize_t count[3]; // size of the hyperslab in the file
hsize_t offset[3]; // hyperslab offset in the file
// Compute the origin and count
vtkm::Id3 blockSize(std::floor(vtkm::Id(globalSize[0] / blocksPerDim[0])),
std::floor(vtkm::Id(globalSize[1] / blocksPerDim[1])),
std::floor(vtkm::Id(globalSize[2] / blocksPerDim[2])));
vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim);
// compute the offset and count for the block for this rank
offset[0] = blockSize[0] * blockIndex[0];
offset[1] = blockSize[1] * blockIndex[1];
offset[2] = blockSize[2] * blockIndex[2];
count[0] = blockSize[0];
count[1] = blockSize[1];
count[2] = blockSize[2];
// add ghost zone on the left
if (blockIndex[0] > 0)
{
offset[0] = offset[0] - 1;
count[0] = count[0] + 1;
}
if (blockIndex[1] > 0)
{
offset[1] = offset[1] - 1;
count[1] = count[1] + 1;
}
if (blockIndex[2] > 0)
{
offset[2] = offset[2] - 1;
count[2] = count[2] + 1;
}
// Check that we are not running over the end of the dataset
if (vtkm::Id(offset[0] + count[0]) > globalSize[0])
{
count[0] = globalSize[0] - offset[0];
}
if (vtkm::Id(offset[1] + count[1]) > globalSize[1])
{
count[1] = globalSize[1] - offset[1];
}
if (vtkm::Id(offset[2] + count[2]) > globalSize[2])
{
count[2] = globalSize[2] - offset[2];
}
blockSize = vtkm::Id3{ static_cast<vtkm::Id>(count[0]),
static_cast<vtkm::Id>(count[1]),
static_cast<vtkm::Id>(count[2]) };
/*vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast<vtkm::Id>(offset[0]),
static_cast<vtkm::Id>(offset[1]),
static_cast<vtkm::Id>(offset[2]) };*/
// Define the hyperslap to read the data into memory
status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL);
// Define the memory space for reading
hid_t memspace = H5Screate_simple(nDims, count, NULL);
// Read data from hyperslab in the file into the hyperslab in
std::size_t numVertices = count[0] * count[1] * count[2];
std::vector<ValueType> values(numVertices);
{
if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_DOUBLE))
{
double data_out[count[0]][count[1]][count[2]]; // output buffer
status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out);
// Copy data to 1D array of the expected ValueType
for (hsize_t k = 0; k < count[0]; k++)
{
for (hsize_t j = 0; j < count[1]; j++)
{
for (hsize_t i = 0; i < count[2]; i++)
{
values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]);
}
}
}
}
else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_FLOAT))
{
float data_out[count[0]][count[1]][count[2]]; // output buffer
status = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_out);
// Copy data to 1D array of the expected ValueType
for (hsize_t k = 0; k < count[0]; k++)
{
for (hsize_t j = 0; j < count[1]; j++)
{
for (hsize_t i = 0; i < count[2]; i++)
{
values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]);
}
}
}
}
else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_INT))
{
int data_out[count[0]][count[1]][count[2]]; // output buffer
status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out);
// Copy data to 1D array of the expected ValueType
for (hsize_t k = 0; k < count[0]; k++)
{
for (hsize_t j = 0; j < count[1]; j++)
{
for (hsize_t i = 0; i < count[2]; i++)
{
values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]);
}
}
}
}
else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_UCHAR))
{
unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer
status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out);
// Copy data to 1D array of the expected ValueType
for (hsize_t k = 0; k < count[0]; k++)
{
for (hsize_t j = 0; j < count[1]; j++)
{
for (hsize_t i = 0; i < count[2]; i++)
{
values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]);
}
}
}
}
else
{
VTKM_LOG_S(vtkm::cont::LogLevel::Error, "Data type not supported by the example HDF5 reader");
throw "Data type not supported by the example HDF5 reader";
}
}
// Release HDF5 resources
H5Sclose(dataspace);
H5Dclose(dataset);
H5Fclose(file);
// Create vtk-m data set
vtkm::cont::DataSetBuilderUniform dsb;
vtkm::cont::DataSet ds;
VTKM_ASSERT(nDims == 3);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
// Swap the dimensions to match the pre-split file reader
globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]);
// Swap also the blocks per dimension accordingly
blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]);
// Swap first and second dimenion here as well for consistency
const vtkm::Vec<ValueType, 3> v_origin{ static_cast<ValueType>(offset[1]),
static_cast<ValueType>(offset[0]),
static_cast<ValueType>(offset[2]) };
const vtkm::Id3 v_dims{ static_cast<vtkm::Id>(blockSize[1]),
static_cast<vtkm::Id>(blockSize[0]),
static_cast<vtkm::Id>(blockSize[2]) };
vtkm::Vec<ValueType, 3> v_spacing(1, 1, 1);
ds = dsb.Create(v_dims, v_origin, v_spacing);
vtkm::cont::CellSetStructured<3> cs;
cs.SetPointDimensions(v_dims);
cs.SetGlobalPointDimensions(globalSize);
cs.SetGlobalPointIndexStart(vtkm::Id3{ v_origin[0], v_origin[1], v_origin[2] });
ds.SetCellSet(cs);
ds.AddPointField("values", values);
// and add to partition
useDataSet.AppendPartition(ds);
// Swap order to match pre-splot
localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2]));
// Log information of the (first) local data block
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
"" << std::setw(42) << std::left << "blockSize"
<< ":" << v_dims << std::endl
<< std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl
<< std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0)
<< std::endl
<< std::setw(42) << std::left << "globalSize=" << globalSize << std::endl);
// Finished data read
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
currTime = totalTime.GetElapsedTime();
buildDatasetTime = currTime - prevTime;
return true;
}
#endif
/// Read data from pre-split ASCII files
/// @param[in] rank The current MPI rank the function is called from
/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block
/// @param[in] blocksPerRank Number of data blocks to process on each rank
/// @param[out] nDims Number of data dimensions (i.e, 2 or 3)
/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter
/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension)
/// @param[in] blocksPerDim Number of data blocks used in each data dimension
/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with
/// with respect to blocksPerDim
/// @param[in] dataReadTime Time to read the data
/// @param[in] buildDatasetTime Time to construct the VTKm datasets
/// @returns bool indicating whether the read was successful or not
template <typename ValueType>
bool readPreSplitFiles(const int& rank,
const std::string& filename,
const int& blocksPerRank,
std::vector<vtkm::Float32>::size_type& nDims,
vtkm::cont::PartitionedDataSet& useDataSet,
vtkm::Id3& globalSize,
vtkm::Id3& blocksPerDim,
vtkm::cont::ArrayHandle<vtkm::Id3>& localBlockIndices,
vtkm::Float64& dataReadTime,
vtkm::Float64& buildDatasetTime)
{
vtkm::cont::Timer totalTime;
totalTime.Start();
vtkm::Float64 prevTime = 0;
vtkm::Float64 currTime = 0;
localBlockIndices.Allocate(blocksPerRank);
auto localBlockIndicesPortal = localBlockIndices.WritePortal();
for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo)
{
// Translate pattern into filename for this block
char block_filename[256];
snprintf(block_filename,
sizeof(block_filename),
filename.c_str(),
static_cast<int>(rank * blocksPerRank + blockNo));
std::cout << "Reading file " << block_filename << std::endl;
// Open file
std::ifstream inFile(block_filename);
if (!inFile.is_open() || inFile.bad())
{
std::cerr << "Error: Couldn't open file " << block_filename << std::endl;
return false;
}
// Read header with dimensions
std::string line;
std::string tag;
vtkm::Id dimVertices;
getline(inFile, line);
std::istringstream global_extents_stream(line);
global_extents_stream >> tag;
if (tag != "#GLOBAL_EXTENTS")
{
std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl;
return false;
}
std::vector<vtkm::Id> global_extents;
while (global_extents_stream >> dimVertices)
global_extents.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(global_extents[0], global_extents[1]);
if (blockNo == 0)
{ // First block: Set globalSize
globalSize =
vtkm::Id3{ static_cast<vtkm::Id>(global_extents[0]),
static_cast<vtkm::Id>(global_extents[1]),
static_cast<vtkm::Id>(global_extents.size() > 2 ? global_extents[2] : 1) };
}
else
{ // All other blocks: Consistency check of globalSize
if (globalSize !=
vtkm::Id3{ static_cast<vtkm::Id>(global_extents[0]),
static_cast<vtkm::Id>(global_extents[1]),
static_cast<vtkm::Id>(global_extents.size() > 2 ? global_extents[2] : 1) })
{
std::cerr << "Error: Global extents mismatch between blocks!" << std::endl;
return false;
}
}
getline(inFile, line);
std::istringstream offset_stream(line);
offset_stream >> tag;
if (tag != "#OFFSET")
{
std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl;
return false;
}
std::vector<vtkm::Id> offset;
while (offset_stream >> dimVertices)
offset.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(offset[0], offset[1]);
getline(inFile, line);
std::istringstream bpd_stream(line);
bpd_stream >> tag;
if (tag != "#BLOCKS_PER_DIM")
{
std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl;
return false;
}
std::vector<vtkm::Id> bpd;
while (bpd_stream >> dimVertices)
bpd.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(bpd[0], bpd[1]);
getline(inFile, line);
std::istringstream blockIndex_stream(line);
blockIndex_stream >> tag;
if (tag != "#BLOCK_INDEX")
{
std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl;
return false;
}
std::vector<vtkm::Id> blockIndex;
while (blockIndex_stream >> dimVertices)
blockIndex.push_back(dimVertices);
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(blockIndex[0], blockIndex[1]);
getline(inFile, line);
std::istringstream linestream(line);
std::vector<vtkm::Id> dims;
while (linestream >> dimVertices)
{
dims.push_back(dimVertices);
}
if (dims.size() != global_extents.size() || dims.size() != offset.size())
{
std::cerr << "Error: Dimension mismatch" << std::endl;
return false;
}
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(dims[0], dims[1]);
// Compute the number of vertices, i.e., xdim * ydim * zdim
nDims = static_cast<unsigned short>(dims.size());
std::size_t numVertices = static_cast<std::size_t>(
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>()));
// Check for fatal input errors
// Check that the number of dimensiosn is either 2D or 3D
bool invalidNumDimensions = (nDims < 2 || nDims > 3);
// Log any errors if found on rank 0
VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error,
invalidNumDimensions && (rank == 0),
"The input mesh is " << nDims
<< "D. "
"The input data must be either 2D or 3D.");
// If we found any errors in the setttings than finalize MPI and exit the execution
if (invalidNumDimensions)
{
return false;
}
// Read data
std::vector<ValueType> values(numVertices);
if (filename.compare(filename.length() - 5, 5, ".bdem") == 0)
{
inFile.read(reinterpret_cast<char*>(values.data()),
static_cast<std::streamsize>(numVertices * sizeof(ValueType)));
}
else
{
for (std::size_t vertex = 0; vertex < numVertices; ++vertex)
{
inFile >> values[vertex];
}
}
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
// Create vtk-m data set
vtkm::cont::DataSetBuilderUniform dsb;
vtkm::cont::DataSet ds;
if (nDims == 2)
{
const vtkm::Id2 v_dims{
static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
};
const vtkm::Vec<ValueType, 2> v_origin{ static_cast<ValueType>(offset[0]),
static_cast<ValueType>(offset[1]) };
const vtkm::Vec<ValueType, 2> v_spacing{ 1, 1 };
ds = dsb.Create(v_dims, v_origin, v_spacing);
vtkm::cont::CellSetStructured<2> cs;
cs.SetPointDimensions(v_dims);
cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] });
cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] });
ds.SetCellSet(cs);
}
else
{
VTKM_ASSERT(nDims == 3);
const vtkm::Id3 v_dims{ static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
static_cast<vtkm::Id>(dims[2]) };
const vtkm::Vec<ValueType, 3> v_origin{ static_cast<ValueType>(offset[0]),
static_cast<ValueType>(offset[1]),
static_cast<ValueType>(offset[2]) };
vtkm::Vec<ValueType, 3> v_spacing(1, 1, 1);
ds = dsb.Create(v_dims, v_origin, v_spacing);
vtkm::cont::CellSetStructured<3> cs;
cs.SetPointDimensions(v_dims);
cs.SetGlobalPointDimensions(globalSize);
cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] });
ds.SetCellSet(cs);
}
ds.AddPointField("values", values);
// and add to partition
useDataSet.AppendPartition(ds);
localBlockIndicesPortal.Set(blockNo,
vtkm::Id3{ static_cast<vtkm::Id>(blockIndex[0]),
static_cast<vtkm::Id>(blockIndex[1]),
static_cast<vtkm::Id>(nDims == 3 ? blockIndex[2] : 0) });
if (blockNo == 0)
{
blocksPerDim = vtkm::Id3{ static_cast<vtkm::Id>(bpd[0]),
static_cast<vtkm::Id>(bpd[1]),
static_cast<vtkm::Id>(nDims == 3 ? bpd[2] : 1) };
}
}
currTime = totalTime.GetElapsedTime();
buildDatasetTime = currTime - prevTime;
return true;
}
/// Read data from a single file and split the data into blocks across ranks
/// This is a simple implementation that will read the full data on all ranks
/// and then extract only the relevant subblock for that rank.
/// The function support reading from BOV as well from ASCII files
///
/// @param[in] rank The current MPI rank the function is called from
/// @param[in] size The number of MPI ranks
/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block
/// @param[in] numBlocks Number of blocks to use during computation
/// @param[in] blocksPerRank Number of data blocks to process on each rank
/// @param[out] nDims Number of data dimensions (i.e, 2 or 3)
/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter
/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension)
/// @param[in] blocksPerDim Number of data blocks used in each data dimension
/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with
/// with respect to blocksPerDim
/// @param[in] dataReadTime Time to read the data
/// @param[in] buildDatasetTime Time to construct the VTKm datasets
/// @returns bool indicating whether the read was successful or not
template <typename ValueType>
bool readSingleBlockFile(const int& rank,
const int& size,
const std::string& filename,
const int& numBlocks,
const int& blocksPerRank,
std::vector<vtkm::Float32>::size_type& nDims,
vtkm::cont::PartitionedDataSet& useDataSet,
vtkm::Id3& globalSize,
vtkm::Id3& blocksPerDim,
vtkm::cont::ArrayHandle<vtkm::Id3>& localBlockIndices,
vtkm::Float64& dataReadTime,
vtkm::Float64& buildDatasetTime)
{
vtkm::cont::Timer totalTime;
totalTime.Start();
vtkm::Float64 prevTime = 0;
vtkm::Float64 currTime = 0;
localBlockIndices.Allocate(blocksPerRank);
auto localBlockIndicesPortal = localBlockIndices.WritePortal();
vtkm::cont::DataSet inDataSet;
// TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here.
std::vector<ValueType> values;
std::vector<vtkm::Id> dims;
// Read BOV data file
if (filename.compare(filename.length() - 3, 3, "bov") == 0)
{
std::cout << "Reading BOV file" << std::endl;
vtkm::io::BOVDataSetReader reader(filename);
inDataSet = reader.ReadDataSet();
nDims = 3;
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
// Copy the data into the values array so we can construct a multiblock dataset
vtkm::Id3 pointDimensions;
auto cellSet = inDataSet.GetCellSet();
vtkm::cont::CastAndCall(
cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions);
std::cout << "Point dimensions are " << pointDimensions << std::endl;
dims.resize(3);
dims[0] = pointDimensions[0];
dims[1] = pointDimensions[1];
dims[2] = pointDimensions[2];
auto tempFieldData = inDataSet.GetField(0).GetData();
values.resize(static_cast<std::size_t>(tempFieldData.GetNumberOfValues()));
auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off);
vtkm::cont::ArrayCopy(tempFieldData, valuesHandle);
valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU
}
// Read ASCII data input
else
{
std::cout << "Reading ASCII file" << std::endl;
std::ifstream inFile(filename);
if (inFile.bad())
return 0;
// Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z
std::string line;
getline(inFile, line);
std::istringstream linestream(line);
vtkm::Id dimVertices;
while (linestream >> dimVertices)
{
dims.push_back(dimVertices);
}
// Swap dimensions so that they are from fastest to slowest growing
// dims[0] -> col; dims[1] -> row, dims[2] ->slice
std::swap(dims[0], dims[1]);
// Compute the number of vertices, i.e., xdim * ydim * zdim
nDims = static_cast<unsigned short>(dims.size());
std::size_t numVertices = static_cast<std::size_t>(
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>()));
// Check the the number of dimensiosn is either 2D or 3D
bool invalidNumDimensions = (nDims < 2 || nDims > 3);
// Log any errors if found on rank 0
VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error,
invalidNumDimensions && (rank == 0),
"The input mesh is " << nDims << "D. The input data must be either 2D or 3D.");
// If we found any errors in the setttings than finalize MPI and exit the execution
if (invalidNumDimensions)
{
return false;
}
// Read data
values.resize(numVertices);
for (std::size_t vertex = 0; vertex < numVertices; ++vertex)
{
inFile >> values[vertex];
}
// finish reading the data
inFile.close();
currTime = totalTime.GetElapsedTime();
dataReadTime = currTime - prevTime;
prevTime = currTime;
} // END ASCII Read
// Create a multi-block dataset for multi-block DIY-paralle processing
blocksPerDim =
nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into
globalSize = nDims == 3 ? vtkm::Id3(static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
static_cast<vtkm::Id>(dims[2]))
: vtkm::Id3(static_cast<vtkm::Id>(dims[0]),
static_cast<vtkm::Id>(dims[1]),
static_cast<vtkm::Id>(1));
std::cout << blocksPerDim << " " << globalSize << std::endl;
{
vtkm::Id lastDimSize =
(nDims == 2) ? static_cast<vtkm::Id>(dims[1]) : static_cast<vtkm::Id>(dims[2]);
if (size > (lastDimSize / 2.))
{
VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error,
rank == 0,
"Number of ranks too large for data. Use " << lastDimSize / 2
<< "or fewer ranks");
return false;
}
vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks);
vtkm::Id blockSize = standardBlockSize;
vtkm::Id blockSliceSize =
nDims == 2 ? static_cast<vtkm::Id>(dims[0]) : static_cast<vtkm::Id>((dims[0] * dims[1]));
vtkm::Id blockNumValues = blockSize * blockSliceSize;
vtkm::Id startBlock = blocksPerRank * rank;
vtkm::Id endBlock = startBlock + blocksPerRank;
for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex)
{
vtkm::Id localBlockIndex = blockIndex - startBlock;
vtkm::Id blockStart = blockIndex * blockNumValues;
vtkm::Id blockEnd = blockStart + blockNumValues;
if (blockIndex < (numBlocks - 1)) // add overlap between regions
{
blockEnd += blockSliceSize;
}
else
{
blockEnd = lastDimSize * blockSliceSize;
}
vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize);
vtkm::cont::DataSetBuilderUniform dsb;
vtkm::cont::DataSet ds;
// 2D data
if (nDims == 2)
{
vtkm::Id2 vdims;
vdims[0] = static_cast<vtkm::Id>(dims[0]);
vdims[1] = static_cast<vtkm::Id>(currBlockSize);
vtkm::Vec<ValueType, 2> origin(0, blockIndex * blockSize);
vtkm::Vec<ValueType, 2> spacing(1, 1);
ds = dsb.Create(vdims, origin, spacing);
vtkm::cont::CellSetStructured<2> cs;
cs.SetPointDimensions(vdims);
cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] });
cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) });
ds.SetCellSet(cs);
localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0));
}
// 3D data
else
{
vtkm::Id3 vdims;
vdims[0] = static_cast<vtkm::Id>(dims[0]);
vdims[1] = static_cast<vtkm::Id>(dims[1]);
vdims[2] = static_cast<vtkm::Id>(currBlockSize);
vtkm::Vec<ValueType, 3> origin(0, 0, (blockIndex * blockSize));
vtkm::Vec<ValueType, 3> spacing(1, 1, 1);
ds = dsb.Create(vdims, origin, spacing);
vtkm::cont::CellSetStructured<3> cs;
cs.SetPointDimensions(vdims);
cs.SetGlobalPointDimensions(globalSize);
cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize));
ds.SetCellSet(cs);
localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex));
}
std::vector<vtkm::Float32> subValues((values.begin() + blockStart),
(values.begin() + blockEnd));
ds.AddPointField("values", subValues);
useDataSet.AppendPartition(ds);
}
}
currTime = totalTime.GetElapsedTime();
buildDatasetTime = currTime - prevTime;
return true;
}
#endif

@ -6,16 +6,24 @@ import sys
# For readBOV
from functools import reduce
import operator
try:
import h5py
USE_HDF = True
except:
USE_HDF = False
# Read a 3D text file from disk into a NumPy array
# ... Plain text
def read_file(fn):
"""
Read a 3D plain text file from disk into a NumPy array
"""
data = np.fromfile(fn, dtype=float, sep=" ")
data = data[3:].reshape((int(data[2]),int(data[0]),int(data[1])))
return data
# ... VisItBOV
def readBOV(filename):
"""
Read data from a VisIt BOV file
"""
with open(filename, 'r') as f:
header = dict([(lambda x: (x[0].strip().lower(), x[1].strip()))(l.strip().split(':')) for l in f.readlines()])
if 'data_endian' in header:
@ -32,11 +40,22 @@ def readBOV(filename):
return (header['variable'], header['centering'].lower(), np.fromfile(dataname, dtype, count).reshape(tuple(reversed(shape))))
return None
# Save a block from a 3D NumPy array to disk
# Python order is slice, row, col
# TXT file order is row, col, slice
# offset and size are in file order
def save_piece(fn, array, offset, n_blocks, block_index, size):
"""
Save a block from a 3D NumPy array to disk.
Python order is slice, row, col
TXT file order is row, col, slice
offset and size are in file order
Args:
fn (str): filename
array (np.array) : Array with the full data
offset (tuple) : Tuple of int offsets
n_blocks (tuple) : Tuple of ints with the number of blocks per dimension
block_index (tuple) : Tuple of ints with index of the block
size (tuple) : Tuple of ints with the size of the block in each dimension
"""
with open(fn, 'w') as f:
perm = [1, 2, 0]
f.write('#GLOBAL_EXTENTS ' + ' '.join(map(str, [array.shape[i] for i in perm])) + '\n')
@ -51,51 +70,79 @@ def save_piece(fn, array, offset, n_blocks, block_index, size):
np.savetxt(f, array[s, offset[0]:offset[0]+size[0],offset[1]:offset[1]+size[1]], fmt='%.16g')
f.write('\n')
# Compute split points for splitting into n blocks
def split_points(shape, nblocks):
"""
Compute split points for splitting into n blocks:
Args:
shape (int): Length of the axis
nblocks (int): Number of blocks to split the axis into
Return:
List of split points along the axis
"""
dx = float(shape-1) / nblocks
return [ math.floor(i*dx) for i in range(nblocks)] + [ shape - 1 ]
if len(sys.argv) < 2:
print("Error: Usage split_data_3d.py <filename> <outfilepattern> [<n_blocks_per_axis>|<n_blocks_x> <n_blocks_y> <n_blocks_z>]", file=sys.stderr)
sys.exit(1)
def save_hdf(filename, data, **kwargs):
"""
Save the data to HDF5.
The axes of the data will be transposed and reorded to match the order of save_piece function.
# Parse parameters
in_filename = sys.argv[1]
Args:
filename (str) : Name fo the HDF5 file
data (np.array): 3D array with the data
kwargs (dict) : Dict with keyword arguments for the h5py create_dataset function
"""
f = h5py.File(filename, 'w')
f.create_dataset(name='data', data=np.swapaxes(np.transpose(data), 0, 1), **kwargs)
name, ext = os.path.splitext(in_filename)
#out_filename_pattern = name + '_split_%d.txt'
out_filename_pattern = sys.argv[2]
if __name__ == '__main__':
n_blocks = (2, 2, 2)
if len(sys.argv) > 3:
if len(sys.argv) >= 6:
n_blocks = (int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
if len(sys.argv) < 2:
print("Error: Usage split_data_3d.py <filename> <outfilepattern> [<n_blocks_per_axis>|<n_blocks_x> <n_blocks_y> <n_blocks_z>]", file=sys.stderr)
sys.exit(1)
# Parse parameters
in_filename = sys.argv[1]
name, ext = os.path.splitext(in_filename)
#out_filename_pattern = name + '_split_%d.txt'
out_filename_pattern = sys.argv[2]
n_blocks = (2, 2, 2)
if len(sys.argv) > 3:
if len(sys.argv) >= 6:
n_blocks = (int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
else:
n_blocks = (int(sys.argv[3]), int(sys.argv[3]), int(sys.argv[3]))
# Read data
if ext == '.bov':
data = readBOV(in_filename)[2]
else:
n_blocks = (int(sys.argv[3]), int(sys.argv[3]), int(sys.argv[3]))
data = read_file(in_filename)
# Read data
if ext == '.bov':
data = readBOV(in_filename)[2]
else:
data = read_file(in_filename)
# export to hdf5 as well
if USE_HDF:
save_hdf((out_filename_pattern % 0).replace('.txt', '.h5'), data)
# Python order is slice, row, col
# Compute split points
split_points_s = split_points(data.shape[0], n_blocks[2])
split_points_r = split_points(data.shape[1], n_blocks[0])
split_points_c = split_points(data.shape[2], n_blocks[1])
# Python order is slice, row, col
# Compute split points
split_points_s = split_points(data.shape[0], n_blocks[2])
split_points_r = split_points(data.shape[1], n_blocks[0])
split_points_c = split_points(data.shape[2], n_blocks[1])
# Create the file that records the slice values
slice_filename = name + '_slices.txt'
# Create the file that records the slice values
slice_filename = name + '_slices.txt'
# Save blocks
block_no = 0
for block_index_s, (s_start, s_stop) in enumerate(zip(split_points_s, split_points_s[1:])):
for block_index_r, (r_start, r_stop) in enumerate(zip(split_points_r, split_points_r[1:])):
for block_index_c, (c_start, c_stop) in enumerate(zip(split_points_c, split_points_c[1:])):
n_s = s_stop - s_start + 1
n_r = r_stop - r_start + 1
n_c = c_stop - c_start + 1
save_piece(out_filename_pattern % block_no, data, (r_start, c_start, s_start), n_blocks, (block_index_r, block_index_c, block_index_s), (n_r, n_c, n_s))
block_no += 1
# Save blocks
block_no = 0
for block_index_s, (s_start, s_stop) in enumerate(zip(split_points_s, split_points_s[1:])):
for block_index_r, (r_start, r_stop) in enumerate(zip(split_points_r, split_points_r[1:])):
for block_index_c, (c_start, c_stop) in enumerate(zip(split_points_c, split_points_c[1:])):
n_s = s_stop - s_start + 1
n_r = r_stop - r_start + 1
n_c = c_stop - c_start + 1
save_piece(out_filename_pattern % block_no, data, (r_start, c_start, s_start), n_blocks, (block_index_r, block_index_c, block_index_s), (n_r, n_c, n_s))
block_no += 1

@ -15,8 +15,8 @@ find_package(VTKm REQUIRED QUIET)
add_executable(CosmoCenterFinder CosmoCenterFinder.cxx)
add_executable(CosmoHaloFinder CosmoHaloFinder.cxx)
target_link_libraries(CosmoCenterFinder PRIVATE vtkm_filter_core)
target_link_libraries(CosmoHaloFinder PRIVATE vtkm_filter_core)
target_link_libraries(CosmoCenterFinder PRIVATE vtkm::filter_core)
target_link_libraries(CosmoHaloFinder PRIVATE vtkm::filter_core)
vtkm_add_target_information(CosmoCenterFinder CosmoHaloFinder
DROP_UNUSED_SYMBOLS

@ -13,7 +13,7 @@ project(VTKmDemo CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_rendering AND TARGET vtkm_filter_contour AND TARGET vtkm_source)
if(TARGET vtkm::rendering AND TARGET vtkm::filter_contour AND TARGET vtkm::source)
add_executable(Demo Demo.cxx)
target_link_libraries(Demo PRIVATE vtkm_rendering vtkm_filter_contour vtkm_source)
target_link_libraries(Demo PRIVATE vtkm::rendering vtkm::filter_contour vtkm::source)
endif()

@ -34,7 +34,8 @@ int main(int argc, char* argv[])
{
vtkm::cont::Initialize(argc, argv, vtkm::cont::InitializeOptions::Strict);
auto tangle = vtkm::source::Tangle(vtkm::Id3{ 50, 50, 50 });
vtkm::source::Tangle tangle;
tangle.SetPointDimensions({ 50, 50, 50 });
vtkm::cont::DataSet tangleData = tangle.Execute();
std::string fieldName = "tangle";

@ -19,7 +19,7 @@ if(TARGET OpenGL::GL AND
TARGET GLEW::GLEW)
add_executable(GameOfLife GameOfLife.cxx LoadShaders.h)
target_link_libraries(GameOfLife PRIVATE vtkm_filter OpenGL::GL GLEW::GLEW GLUT::GLUT)
target_link_libraries(GameOfLife PRIVATE vtkm::filter_core OpenGL::GL GLEW::GLEW GLUT::GLUT)
vtkm_add_target_information(GameOfLife
DROP_UNUSED_SYMBOLS MODIFY_CUDA_FLAGS
DEVICE_SOURCES GameOfLife.cxx)

@ -26,7 +26,7 @@
#include <vtkm/interop/TransferToOpenGL.h>
#include <vtkm/filter/NewFilterField.h>
#include <vtkm/filter/FilterField.h>
#include <vtkm/worklet/WorkletPointNeighborhood.h>
#include <vtkm/cont/Invoker.h>
@ -95,7 +95,7 @@ struct UpdateLifeState : public vtkm::worklet::WorkletPointNeighborhood
};
class GameOfLife : public vtkm::filter::NewFilterField
class GameOfLife : public vtkm::filter::FilterField
{
public:
VTKM_CONT GameOfLife() { this->SetActiveField("state", vtkm::cont::Field::Association::Points); }

@ -13,9 +13,9 @@ project(HelloWorklet CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_io)
if(TARGET vtkm::io)
add_executable(HelloWorklet HelloWorklet.cxx)
target_link_libraries(HelloWorklet PRIVATE vtkm_filter_core vtkm_io)
target_link_libraries(HelloWorklet PRIVATE vtkm::filter_core vtkm::io)
vtkm_add_target_information(HelloWorklet
DROP_UNUSED_SYMBOLS

@ -10,7 +10,7 @@
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/filter/NewFilterField.h>
#include <vtkm/filter/FilterField.h>
#include <vtkm/io/VTKDataSetReader.h>
#include <vtkm/io/VTKDataSetWriter.h>
@ -43,7 +43,7 @@ namespace vtkm
namespace filter
{
class HelloField : public vtkm::filter::NewFilterField
class HelloField : public vtkm::filter::FilterField
{
public:
VTKM_CONT vtkm::cont::DataSet DoExecute(const vtkm::cont::DataSet& inDataSet)

@ -12,12 +12,12 @@ project(Histogram CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if (VTKm_ENABLE_MPI AND vtkm_filter_density_estimate)
if (VTKm_ENABLE_MPI AND vtkm::filter_density_estimate)
# TODO: this example desperately needs to be updated. The histogram filter has
# improved immensely since this has originally been written, and the code can
# be simplified a lot, which will make it more illustrative of using VTK-m.
add_executable(Histogram Histogram.cxx HistogramMPI.h HistogramMPI.cxx)
target_link_libraries(Histogram PRIVATE vtkm_filter_density_estimate MPI::MPI_CXX)
target_link_libraries(Histogram PRIVATE vtkm::filter_density_estimate MPI::MPI_CXX)
vtkm_add_target_information(Histogram
DROP_UNUSED_SYMBOLS MODIFY_CUDA_FLAGS
DEVICE_SOURCES HistogramMPI.cxx)

@ -160,7 +160,7 @@ VTKM_CONT vtkm::cont::PartitionedDataSet HistogramMPI::DoExecutePartitions(
const vtkm::cont::PartitionedDataSet& input)
{
this->PreExecute(input);
auto result = this->NewFilter::DoExecutePartitions(input);
auto result = this->Filter::DoExecutePartitions(input);
this->PostExecute(input, result);
return result;
}

@ -10,7 +10,7 @@
#ifndef vtk_m_examples_histogram_HistogramMPI_h
#define vtk_m_examples_histogram_HistogramMPI_h
#include <vtkm/filter/NewFilterField.h>
#include <vtkm/filter/FilterField.h>
namespace example
{
@ -19,7 +19,7 @@ namespace example
///
/// Construct a HistogramMPI with a default of 10 bins.
///
class HistogramMPI : public vtkm::filter::NewFilterField
class HistogramMPI : public vtkm::filter::FilterField
{
public:
//currently the HistogramMPI filter only works on scalar data.

@ -13,9 +13,9 @@ project(IsingModel CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_rendering)
if(TARGET vtkm::rendering)
add_executable(Ising Ising.cxx)
target_link_libraries(Ising PRIVATE vtkm_worklet vtkm_rendering)
target_link_libraries(Ising PRIVATE vtkm::worklet vtkm::rendering)
vtkm_add_target_information(Ising
DROP_UNUSED_SYMBOLS

@ -12,7 +12,7 @@ cmake_minimum_required(VERSION 3.12...3.15 FATAL_ERROR)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_filter_flow)
if(TARGET vtkm::filter_flow)
add_executable(Lagrangian lagrangian.cxx ABCfield.h)
target_link_libraries(Lagrangian PRIVATE vtkm_filter_flow)
target_link_libraries(Lagrangian PRIVATE vtkm::filter_flow)
endif()

@ -16,7 +16,7 @@ project(ParticleAdvection CXX)
find_package(VTKm REQUIRED QUIET)
add_executable(ftle LagrangianStructures.cxx)
target_link_libraries(ftle PRIVATE vtkm_cont vtkm_worklet)
target_link_libraries(ftle PRIVATE vtkm::cont vtkm::worklet)
vtkm_add_target_information(ftle
DROP_UNUSED_SYMBOLS MODIFY_CUDA_FLAGS
DEVICE_SOURCES LagrangianStructures.cxx)

@ -12,9 +12,9 @@ project(LogisticMap CXX)
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_io)
if(TARGET vtkm::io)
add_executable(LogisticMap LogisticMap.cxx)
target_link_libraries(LogisticMap PRIVATE vtkm_io)
target_link_libraries(LogisticMap PRIVATE vtkm::io)
vtkm_add_target_information(LogisticMap
DROP_UNUSED_SYMBOLS

@ -25,7 +25,7 @@ project(MeshQuality CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_filter_mesh_info AND TARGET vtkm_io)
if(TARGET vtkm::filter_mesh_info AND TARGET vtkm::io)
add_executable(MeshQuality MeshQuality.cxx)
target_link_libraries(MeshQuality PRIVATE vtkm_filter_mesh_info vtkm_io)
target_link_libraries(MeshQuality PRIVATE vtkm::filter_mesh_info vtkm::io)
endif()

@ -26,9 +26,9 @@ set(srcs
IOGenerator.cxx
)
if(TARGET vtkm_filter_vector_analysis)
if(TARGET vtkm::filter_vector_analysis)
add_executable(MultiBackend ${srcs} ${headers})
target_link_libraries(MultiBackend PRIVATE vtkm_filter_vector_analysis Threads::Threads)
target_link_libraries(MultiBackend PRIVATE vtkm::filter_vector_analysis Threads::Threads)
vtkm_add_target_information(MultiBackend
DROP_UNUSED_SYMBOLS
MODIFY_CUDA_FLAGS

@ -10,7 +10,7 @@
#ifndef vtk_m_examples_multibackend_MultiDeviceGradient_h
#define vtk_m_examples_multibackend_MultiDeviceGradient_h
#include <vtkm/filter/NewFilterField.h>
#include <vtkm/filter/FilterField.h>
#include "TaskQueue.h"
@ -22,7 +22,7 @@ using RuntimeTaskQueue = TaskQueue<std::function<void()>>;
///
/// The Policy used with MultiDeviceGradient must include the TBB and CUDA
/// backends.
class MultiDeviceGradient : public vtkm::filter::NewFilterField
class MultiDeviceGradient : public vtkm::filter::FilterField
{
public:
//Construct a MultiDeviceGradient and worker pool

@ -13,7 +13,7 @@ project(Oscillator CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_source)
if(TARGET vtkm::source)
add_executable(Oscillator Oscillator.cxx)
target_link_libraries(Oscillator PRIVATE vtkm_source)
target_link_libraries(Oscillator PRIVATE vtkm::source)
endif()

@ -13,7 +13,7 @@ project(ParticleAdvection CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_filter_flow AND TARGET vtkm_io)
if(TARGET vtkm::filter_flow AND TARGET vtkm::io)
add_executable(Particle_Advection ParticleAdvection.cxx)
target_link_libraries(Particle_Advection PRIVATE vtkm_filter_flow vtkm_io)
target_link_libraries(Particle_Advection PRIVATE vtkm::filter_flow vtkm::io)
endif()

@ -66,10 +66,10 @@ int main(int argc, char** argv)
vtkm::FloatDefault rx = (vtkm::FloatDefault)rand() / (vtkm::FloatDefault)RAND_MAX;
vtkm::FloatDefault ry = (vtkm::FloatDefault)rand() / (vtkm::FloatDefault)RAND_MAX;
vtkm::FloatDefault rz = (vtkm::FloatDefault)rand() / (vtkm::FloatDefault)RAND_MAX;
p.Pos[0] = static_cast<vtkm::FloatDefault>(bounds.X.Min + rx * bounds.X.Length());
p.Pos[1] = static_cast<vtkm::FloatDefault>(bounds.Y.Min + ry * bounds.Y.Length());
p.Pos[2] = static_cast<vtkm::FloatDefault>(bounds.Z.Min + rz * bounds.Z.Length());
p.ID = i;
p.SetPosition({ static_cast<vtkm::FloatDefault>(bounds.X.Min + rx * bounds.X.Length()),
static_cast<vtkm::FloatDefault>(bounds.Y.Min + ry * bounds.Y.Length()),
static_cast<vtkm::FloatDefault>(bounds.Z.Min + rz * bounds.Z.Length()) });
p.SetID(i);
seeds.push_back(p);
}
auto seedArray = vtkm::cont::make_ArrayHandle(seeds, vtkm::CopyFlag::Off);

@ -12,13 +12,13 @@ project(PolyLineArchimedeanHelix CXX)
find_package(VTKm REQUIRED QUIET)
if (TARGET vtkm_rendering)
if (TARGET vtkm::rendering)
# TODO: This example should be changed from using the Tube worklet to using
# the Tube filter (in the vtkm_filter_geometry_refinement library). Then
# the Tube filter (in the vtkm::filter_geometry_refinement library). Then
# compiling it would no longer require a device compiler and the example
# would generally be simpler.
add_executable(PolyLineArchimedeanHelix PolyLineArchimedeanHelix.cxx)
target_link_libraries(PolyLineArchimedeanHelix PRIVATE vtkm_rendering)
target_link_libraries(PolyLineArchimedeanHelix PRIVATE vtkm::rendering)
vtkm_add_target_information(PolyLineArchimedeanHelix
DROP_UNUSED_SYMBOLS
MODIFY_CUDA_FLAGS

@ -13,9 +13,9 @@ project(RedistributePoints CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_io AND TARGET vtkm_filter_entity_extraction)
if(TARGET vtkm::io AND TARGET vtkm::filter_entity_extraction)
add_executable(RedistributePoints RedistributePoints.cxx RedistributePoints.h main.cxx)
target_link_libraries(RedistributePoints PRIVATE vtkm_io vtkm_filter_entity_extraction)
target_link_libraries(RedistributePoints PRIVATE vtkm::io vtkm::filter_entity_extraction)
vtkm_add_target_information(RedistributePoints
DROP_UNUSED_SYMBOLS
MODIFY_CUDA_FLAGS

@ -10,12 +10,12 @@
#ifndef example_RedistributePoints_h
#define example_RedistributePoints_h
#include <vtkm/filter/NewFilter.h>
#include <vtkm/filter/Filter.h>
namespace example
{
class RedistributePoints : public vtkm::filter::NewFilter
class RedistributePoints : public vtkm::filter::Filter
{
public:
VTKM_CONT RedistributePoints() {}

@ -14,9 +14,9 @@ include(CTest)
find_package(VTKm REQUIRED)
if(TARGET vtkm_source)
if(TARGET vtkm::source)
add_executable(smoke_test smoke_test.cxx)
target_link_libraries(smoke_test PRIVATE vtkm_source)
target_link_libraries(smoke_test PRIVATE vtkm::source)
# Only add this test when this a standalone project
if (PROJECT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)

@ -13,8 +13,8 @@ project(StreamlineMPI CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if (VTKm_ENABLE_MPI AND TARGET vtkm_io AND TARGET vtkm_filter_flow)
if (VTKm_ENABLE_MPI AND TARGET vtkm::io AND TARGET vtkm::filter_flow)
add_executable(StreamlineMPI StreamlineMPI.cxx)
target_compile_definitions(StreamlineMPI PRIVATE "MPI_ENABLED")
target_link_libraries(StreamlineMPI PRIVATE vtkm_filter_flow vtkm_io MPI::MPI_CXX)
target_link_libraries(StreamlineMPI PRIVATE vtkm::filter_flow vtkm::io MPI::MPI_CXX)
endif()

@ -18,7 +18,6 @@
#include <vtkm/filter/flow/ParticleAdvection.h>
#include <vtkm/io/VTKDataSetReader.h>
#include <vtkm/io/VTKDataSetWriter.h>
#include <vtkm/io/reader/VTKDataSetReader.h>
#include <mpi.h>
#include <vtkm/thirdparty/diy/diy.h>

@ -15,10 +15,10 @@ project(TemporalAdvection CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_filter_flow AND TARGET vtkm_io)
if(TARGET vtkm::filter_flow AND TARGET vtkm::io)
add_executable(Temporal_Advection TemporalAdvection.cxx)
vtkm_add_target_information(Temporal_Advection
DROP_UNUSED_SYMBOLS MODIFY_CUDA_FLAGS
DEVICE_SOURCES TemporalAdvection.cxx)
target_link_libraries(Temporal_Advection PRIVATE vtkm_filter_flow vtkm_io)
target_link_libraries(Temporal_Advection PRIVATE vtkm::filter_flow vtkm::io)
endif()

@ -87,8 +87,8 @@ int main(int argc, char** argv)
for (vtkm::Id i = 0; i < numPts; i++)
{
vtkm::Particle p;
p.Pos = ptsPortal.Get(i);
p.ID = i;
p.SetPosition(ptsPortal.Get(i));
p.SetID(i);
seedPortal.Set(i, p);
}

@ -13,10 +13,10 @@ project(Tetrahedra CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if(TARGET vtkm_filter_geometry_refinement AND TARGET vtkm_io)
if(TARGET vtkm::filter_geometry_refinement AND TARGET vtkm::io)
add_executable(Tetrahedralize Tetrahedralize.cxx)
target_link_libraries(Tetrahedralize PRIVATE vtkm_filter_geometry_refinement vtkm_io)
target_link_libraries(Tetrahedralize PRIVATE vtkm::filter_geometry_refinement vtkm::io)
add_executable(Triangulate Triangulate.cxx)
target_link_libraries(Triangulate PRIVATE vtkm_filter_geometry_refinement vtkm_io)
target_link_libraries(Triangulate PRIVATE vtkm::filter_geometry_refinement vtkm::io)
endif()

@ -20,9 +20,9 @@
#include <vtkm/io/VTKDataSetReader.h>
#include <vtkm/io/VTKDataSetWriter.h>
#include <vtkm/filter/Filter.h>
#include <vtkm/filter/MapFieldMergeAverage.h>
#include <vtkm/filter/MapFieldPermutation.h>
#include <vtkm/filter/NewFilter.h>
#include <vtkm/filter/contour/Contour.h>
#include <vtkm/worklet/WorkletMapTopology.h>
@ -149,7 +149,7 @@ VTKM_CONT bool DoMapField(
} // anonymous namespace
class ExtractEdges : public vtkm::filter::NewFilter
class ExtractEdges : public vtkm::filter::Filter
{
public:
VTKM_CONT vtkm::cont::DataSet DoExecute(const vtkm::cont::DataSet& inData) override;

@ -12,7 +12,7 @@
#include <vtkm/cont/Initialize.h>
#include <vtkm/cont/Invoker.h>
#include <vtkm/filter/NewFilterField.h>
#include <vtkm/filter/FilterField.h>
#include <vtkm/filter/vector_analysis/Gradient.h>
#include <vtkm/io/VTKDataSetReader.h>
#include <vtkm/io/VTKDataSetWriter.h>
@ -32,7 +32,7 @@ struct ComputeMagnitude : vtkm::worklet::WorkletMapField
// The filter class used by external code to run the algorithm. Normally the class definition
// is in a separate header file.
class FieldMagnitude : public vtkm::filter::NewFilterField
class FieldMagnitude : public vtkm::filter::FilterField
{
protected:
VTKM_CONT vtkm::cont::DataSet DoExecute(const vtkm::cont::DataSet& inDataSet) override;

Some files were not shown because too many files have changed in this diff Show More