From 2a1e1dcfd9e201df4f44c326d1832e913a21fcb7 Mon Sep 17 00:00:00 2001 From: Sujin Philip Date: Wed, 28 Aug 2019 14:34:42 -0400 Subject: [PATCH] Parallel Contour Tree This work updates Gunther Contour tree work to be parallel. With contributions from: @oruebel, @dcamp and @sujin.philip. --- .../contour_tree_augmented/CMakeLists.txt | 48 +- .../contour_tree_augmented/ContourTreeApp.cxx | 651 ++++++++++++--- vtkm/filter/ContourTreeUniformAugmented.h | 62 +- vtkm/filter/ContourTreeUniformAugmented.hxx | 760 +++++++++++++++++- vtkm/thirdparty/diy/diy.h | 1 + vtkm/worklet/ContourTreeUniformAugmented.h | 68 +- .../contourtree_augmented/ActiveGraph.h | 13 +- .../contourtree_augmented/ArrayTransforms.h | 19 + .../contourtree_augmented/ContourTree.h | 34 +- .../contourtree_augmented/ContourTreeMaker.h | 119 ++- .../worklet/contourtree_augmented/MergeTree.h | 25 +- .../contourtree_augmented/MeshExtrema.h | 7 +- .../Mesh_DEM_Triangulation.h | 4 + .../contourtree_augmented/PointerDoubling.h | 14 +- .../contourtree_augmented/PrintVectors.h | 4 +- .../ProcessContourTree.h | 9 +- vtkm/worklet/contourtree_augmented/Types.h | 7 + .../activegraph/InitializeActiveEdges.h | 19 +- ...nitializeNeighbourhoodMasksAndOutDegrees.h | 14 +- .../ComputeRegularStructure_LocateSuperarcs.h | 378 +++++++++ .../ComputeRegularStructure_SetArcs.h | 79 ++ .../mesh_dem/CMakeLists.txt | 1 + .../mesh_dem/IdRelabler.h | 134 +++ .../mesh_dem/MeshStructure2D.h | 4 + .../mesh_dem/MeshStructure3D.h | 4 + .../mesh_dem_meshtypes/CMakeLists.txt | 5 + .../mesh_dem_meshtypes/ContourTreeMesh.h | 732 +++++++++++++++++ .../Freudenthal_2D_Triangulation.h | 1 + .../Freudenthal_3D_Triangulation.h | 1 + .../MarchingCubes_3D_Triangulation.h | 1 + .../MeshStructureContourTreeMesh.h | 200 +++++ .../MeshStructureFreudenthal2D.h | 93 +-- .../MeshStructureFreudenthal3D.h | 73 +- .../MeshStructureMarchingCubes.h | 102 ++- .../contourtreemesh/ArcComparator.h | 144 ++++ .../contourtreemesh/CMakeLists.txt | 80 ++ ...ombinedOtherStartIndexNNeighboursWorklet.h | 127 +++ ...mbinedSimulatedSimplicityIndexComparator.h | 161 ++++ .../contourtreemesh/CombinedVector.h | 139 ++++ .../CombinedVectorDifferentFromNext.h | 122 +++ .../CompressNeighboursWorklet.h | 126 +++ .../ComputeMaxNeighboursWorklet.h | 139 ++++ .../contourtreemesh/FindStartIndexWorklet.h | 139 ++++ .../InitToCombinedSortOrderArraysWorklet.h | 128 +++ .../MergeCombinedOtherStartIndexWorklet.h | 181 +++++ .../ReplaceArcNumWithToVertexWorklet.h | 112 +++ .../contourtreemesh/SubtractAssignWorklet.h | 106 +++ .../UpdateCombinedNeighboursWorklet.h | 155 ++++ .../meshextrema/SetStarts.h | 9 +- .../processcontourtree/Branch.h | 66 +- 50 files changed, 5239 insertions(+), 381 deletions(-) create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem/IdRelabler.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/ContourTreeMesh.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureContourTreeMesh.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ArcComparator.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CMakeLists.txt create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedOtherStartIndexNNeighboursWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedSimulatedSimplicityIndexComparator.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedVector.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedVectorDifferentFromNext.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CompressNeighboursWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ComputeMaxNeighboursWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/FindStartIndexWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/InitToCombinedSortOrderArraysWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/MergeCombinedOtherStartIndexWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ReplaceArcNumWithToVertexWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/SubtractAssignWorklet.h create mode 100644 vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/UpdateCombinedNeighboursWorklet.h diff --git a/examples/contour_tree_augmented/CMakeLists.txt b/examples/contour_tree_augmented/CMakeLists.txt index f6e2454c3..797f4b0fa 100644 --- a/examples/contour_tree_augmented/CMakeLists.txt +++ b/examples/contour_tree_augmented/CMakeLists.txt @@ -50,22 +50,46 @@ ## Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and ## Oliver Ruebel (LBNL) ##============================================================================== +cmake_minimum_required(VERSION 3.8...3.15 FATAL_ERROR) -#Find the VTK-m package +# Find the VTK-m package find_package(VTKm REQUIRED QUIET) -add_executable(ContourTree ContourTreeApp.cxx) -target_link_libraries(ContourTree vtkm_filter) -vtkm_add_target_information(ContourTree +#################################### +# Serial +#################################### +add_executable(ContourTree_Augmented ContourTreeApp.cxx) +target_link_libraries(ContourTree_Augmented vtkm_filter) +vtkm_add_target_information(ContourTree_Augmented DROP_UNUSED_SYMBOLS MODIFY_CUDA_FLAGS DEVICE_SOURCES ContourTreeApp.cxx) -#################################### -# Debug algorithm build -#################################### -# target_compile_definitions(ContourTree PRIVATE DEBUG_PRINT) - -if(TARGET vtkm::tbb) - # TBB 2D/3D/MC - target_compile_definitions(ContourTree PRIVATE ENABLE_SET_NUM_THREADS) +option (VTKM_EXAMPLE_CONTOURTREE_ENABLE_DEBUG_PRINT Off) +if (VTKM_EXAMPLE_CONTOURTREE_ENABLE_DEBUG_PRINT) + target_compile_definitions(ContourTree_Augmented PRIVATE "DEBUG_PRINT") +endif() + +if (TARGET vtkm::tbb) + target_compile_definitions(ContourTree_Augmented PRIVATE "ENABLE_SET_NUM_THREADS") +endif() + +#################################### +# MPI +#################################### +if (VTKm_ENABLE_MPI) + add_executable(ContourTree_Augmented_MPI ContourTreeApp.cxx) + target_link_libraries(ContourTree_Augmented_MPI vtkm_filter) + vtkm_add_target_information(ContourTree_Augmented_MPI + MODIFY_CUDA_FLAGS + DEVICE_SOURCES ContourTreeApp.cxx) + target_compile_definitions(ContourTree_Augmented_MPI PRIVATE "WITH_MPI") + + option (VTKM_EXAMPLE_CONTOURTREE_ENABLE_DEBUG_PRINT Off) + if (VTKM_EXAMPLE_CONTOURTREE_ENABLE_DEBUG_PRINT) + target_compile_definitions(ContourTree_Augmented_MPI PRIVATE "DEBUG_PRINT") + endif() + + if (TARGET vtkm::tbb) + target_compile_definitions(ContourTree_Augmented_MPI PRIVATE "ENABLE_SET_NUM_THREADS") + endif() endif() diff --git a/examples/contour_tree_augmented/ContourTreeApp.cxx b/examples/contour_tree_augmented/ContourTreeApp.cxx index 38d51d353..48cecb794 100644 --- a/examples/contour_tree_augmented/ContourTreeApp.cxx +++ b/examples/contour_tree_augmented/ContourTreeApp.cxx @@ -2,10 +2,20 @@ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. -// // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. //============================================================================ // Copyright (c) 2018, The Regents of the University of California, through // Lawrence Berkeley National Laboratory (subject to receipt of any required approvals @@ -50,32 +60,47 @@ // Oliver Ruebel (LBNL) //============================================================================== -#define DEBUG_TIMING - -#ifdef ENABLE_SET_NUM_THREADS -#include "tbb/task_scheduler_init.h" -#endif - #include #include #include #include #include -#include +#include #include - #include #include #include #include +#include + +#ifdef ENABLE_SET_NUM_THREADS +#include "tbb/task_scheduler_init.h" +#endif + +// clang-format off +VTKM_THIRDPARTY_PRE_INCLUDE +#include +#include +VTKM_THIRDPARTY_POST_INCLUDE +// clang-format on + +#ifdef WITH_MPI +#include +#endif #include #include #include +#include #include #include #include +#define DEBUG_TIMING + +using ValueType = vtkm::Float32; +using BranchType = vtkm::worklet::contourtree_augmented::process_contourtree_inc::Branch; + namespace cppp2_ns = vtkm::worklet::contourtree_augmented; // Simple helper class for parsing the command line options @@ -84,10 +109,10 @@ class ParseCL public: ParseCL() {} - void parse(std::vector::size_type argc, char** argv) + void parse(int& argc, char** argv) { - mCLOptions.resize(std::vector::size_type(argc)); - for (std::vector::size_type i = 1; i < argc; ++i) + mCLOptions.resize(static_cast(argc)); + for (std::size_t i = 1; i < static_cast(argc); ++i) { this->mCLOptions[i] = std::string(argv[i]); } @@ -105,7 +130,7 @@ public: } else { - return static_cast(it - this->mCLOptions.begin()); + return (it - this->mCLOptions.begin()); } } @@ -113,10 +138,10 @@ public: std::string getOption(const std::string& option) const { - vtkm::Id index = this->findOption(option); + auto index = this->findOption(option); if (index >= 0) { - std::string val = this->mCLOptions[std::vector::size_type(index)]; + std::string val = this->mCLOptions[static_cast(index)]; auto valPos = val.find("="); if (valPos) { @@ -137,19 +162,44 @@ private: // Compute and render an isosurface for a uniform grid example int main(int argc, char* argv[]) { - auto opts = vtkm::cont::InitializeOptions::DefaultAnyDevice; - vtkm::cont::InitializeResult config = vtkm::cont::Initialize(argc, argv, opts); +#ifdef WITH_MPI + // Setup the MPI environment. + MPI_Init(&argc, &argv); + auto comm = MPI_COMM_WORLD; + + // Tell VTK-m which communicator it should use. + vtkm::cont::EnvironmentTracker::SetCommunicator(vtkmdiy::mpi::communicator(comm)); + + // get the rank and size + int rank, size; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &size); + int numBlocks = size; + int blocksPerRank = 1; + if (rank == 0) + { + std::cout << "Running with MPI. #ranks=" << size << std::endl; + } +#else + std::cout << "Single node run" << std::endl; + int rank = 0; +#endif - vtkm::cont::Timer totalTime; - totalTime.Start(); vtkm::Float64 prevTime = 0; vtkm::Float64 currTime = 0; + vtkm::cont::Timer totalTime; + + totalTime.Start(); + if (rank == 0) + { + std::cout << "ContourTreePPP2Mesh " << std::endl; + } //////////////////////////////////////////// // Parse the command line options //////////////////////////////////////////// ParseCL parser; - parser.parse(std::vector::size_type(argc), argv); + parser.parse(argc, argv); std::string filename = parser.getOptions().back(); bool computeRegularStructure = true; bool useMarchingCubes = false; @@ -163,14 +213,98 @@ int main(int argc, char* argv[]) printContourTree = true; if (parser.hasOption("--branchDecomp")) computeBranchDecomposition = std::stoi(parser.getOption("--branchDecomp")); + if (computeBranchDecomposition && (!computeRegularStructure)) + { + std::cout << "Regular structure is required for branch decomposition." + " Disabling branch decomposition" + << std::endl; + computeBranchDecomposition = false; + } + + std::string device("default"); + if (parser.hasOption("--device")) + { + device = parser.getOption("--device"); + auto& rtTracker = vtkm::cont::GetRuntimeDeviceTracker(); + if (device == "serial" && rtTracker.CanRunOn(vtkm::cont::DeviceAdapterTagSerial())) + { + rtTracker.ForceDevice(vtkm::cont::DeviceAdapterTagSerial()); + } + else if (device == "openmp" && rtTracker.CanRunOn(vtkm::cont::DeviceAdapterTagOpenMP())) + { + rtTracker.ForceDevice(vtkm::cont::DeviceAdapterTagOpenMP()); + } + else if (device == "tbb" && rtTracker.CanRunOn(vtkm::cont::DeviceAdapterTagTBB())) + { + rtTracker.ForceDevice(vtkm::cont::DeviceAdapterTagTBB()); + } + else if (device == "cuda" && rtTracker.CanRunOn(vtkm::cont::DeviceAdapterTagCuda())) + { + rtTracker.ForceDevice(vtkm::cont::DeviceAdapterTagCuda()); + } + else + { + std::cout << "Invalid or unavialable device adapter: " << device << std::endl; + return EXIT_FAILURE; + } + } + #ifdef ENABLE_SET_NUM_THREADS int numThreads = tbb::task_scheduler_init::default_num_threads(); if (parser.hasOption("--numThreads")) + { + if (device == "default" && + vtkm::cont::GetRuntimeDeviceTracker().CanRunOn(vtkm::cont::DeviceAdapterTagTBB())) + { + std::cout << "--numThreads specified without device. Forcing device as tbb."; + device = "tbb"; + vtkm::cont::GetRuntimeDeviceTracker().ForceDevice(vtkm::cont::DeviceAdapterTagTBB()); + } + numThreads = std::stoi(parser.getOption("--numThreads")); + if (device != "tbb") + { + std::cout << "numThreads will be ignored for devices other than tbb"; + } + } tbb::task_scheduler_init schedulerInit(numThreads); #endif - if (argc < 2 || parser.hasOption("--help") || parser.hasOption("-h")) + // Iso value selection parameters + // Approach to be used to select contours based on the tree + vtkm::Id contourType = 0; + // Error away from critical point + ValueType eps = 0.00001f; + // Number of iso levels to be selected. By default we disable the isovalue selection. + vtkm::Id numLevels = 0; + // Number of components the tree should be simplified to + vtkm::Id numComp = numLevels + 1; + // Method to be used to compute the relevant iso values + vtkm::Id contourSelectMethod = 0; + bool usePersistenceSorter = true; + if (parser.hasOption("--levels")) + numLevels = std::stoi(parser.getOption("--levels")); + if (parser.hasOption("--type")) + contourType = std::stoi(parser.getOption("--type")); + if (parser.hasOption("--eps")) + eps = std::stof(parser.getOption("--eps")); + if (parser.hasOption("--method")) + contourSelectMethod = std::stoi(parser.getOption("--method")); + if (parser.hasOption("--comp")) + numComp = std::stoi(parser.getOption("--comp")); + if (contourSelectMethod == 0) + numComp = numLevels + 1; + if (parser.hasOption("--useVolumeSorter")) + usePersistenceSorter = false; + if ((numLevels > 0) && (!computeBranchDecomposition)) + { + std::cout << "Iso level selection only available when branch decomposition is enabled." + " Disabling iso value selection" + << std::endl; + numLevels = 0; + } + + if (rank == 0 && (argc < 2 || parser.hasOption("--help") || parser.hasOption("-h"))) { std::cout << "Parameter is " << std::endl; std::cout << "File is expected to be ASCII with either: " << std::endl; @@ -190,31 +324,106 @@ int main(int argc, char* argv[]) "Requires --augmentTree (Default=True)" << std::endl; std::cout << "--printCT Print the contour tree. (Default=False)" << std::endl; + std::cout << "--device Set the device to use (serial, openmp, tbb, cuda). " + "Use the default device if unspecified" + << std::endl; #ifdef ENABLE_SET_NUM_THREADS - std::cout << "--numThreads Specify the number of threads to use. Available only with TBB." + std::cout << "--numThreads Specifiy the number of threads to use. Available only with TBB." << std::endl; #endif - std::cout << config.Usage << std::endl; - return 0; + std::cout << std::endl; + std::cout << "Isovalue selection options: (require --branchDecomp=1 and augmentTree=1)" + << std::endl; + std::cout << "--levels= Number of iso-contour levels to be used (default=0, i.e., " + "disable isovalue computation)" + << std::endl; + std::cout << "--comp= Number of components the contour tree should be simplified to. " + "Only used if method==1. (default=0)" + << std::endl; + std::cout + << "--eps= Floating point offset awary from the critical point. (default=0.00001)" + << std::endl; + std::cout << "--type= Approach to be used for selection of iso-values. 0=saddle+-eps; " + "1=mid point between saddle and extremum, 2=extremum+-eps. (default=0)" + << std::endl; + std::cout << "--method= Method used for selecting relevant iso-values. (default=0)" + << std::endl; +#ifdef WITH_MPI + MPI_Finalize(); +#endif + return EXIT_SUCCESS; } - std::cout << "ContourTree " << std::endl; - std::cout << "Settings:" << std::endl; - std::cout << " filename=" << filename << std::endl; - std::cout << " mc=" << useMarchingCubes << std::endl; - std::cout << " augmentTree=" << computeRegularStructure << std::endl; - std::cout << " branchDecomp=" << computeBranchDecomposition << std::endl; -#ifdef ENABLE_SET_NUM_THREADS - std::cout << " numThreads=" << numThreads << std::endl; + if (rank == 0) + { + std::cout << "Settings:" << std::endl; + std::cout << " filename=" << filename << std::endl; + std::cout << " mc=" << useMarchingCubes << std::endl; + std::cout << " augmentTree=" << computeRegularStructure << std::endl; + std::cout << " branchDecomp=" << computeBranchDecomposition << std::endl; +#ifdef WITH_MPI + std::cout << " nblocks=" << numBlocks << std::endl; #endif - std::cout << config.Usage << std::endl; - std::cout << std::endl; - - +#ifdef ENABLE_SET_NUM_THREADS + std::cout << " numThreads=" << numThreads << std::endl; +#endif + std::cout << " computeIsovalyes=" << (numLevels > 0) << std::endl; + if (numLevels > 0) + { + std::cout << " levels=" << numLevels << std::endl; + std::cout << " eps=" << eps << std::endl; + std::cout << " comp" << numComp << std::endl; + std::cout << " type=" << contourType << std::endl; + std::cout << " method=" << contourSelectMethod << std::endl; + std::cout << " mc=" << useMarchingCubes << std::endl; + std::cout << " use" << (usePersistenceSorter ? "PersistenceSorter" : "VolumeSorter") + << std::endl; + } + std::cout << std::endl; + } currTime = totalTime.GetElapsedTime(); vtkm::Float64 startUpTime = currTime - prevTime; prevTime = currTime; +// Redirect stdout to file if we are using MPI with Debugging +#ifdef WITH_MPI +#ifdef DEBUG_PRINT + // From https://www.unix.com/302983597-post2.html + char* cstr_filename = new char[15]; + snprintf(cstr_filename, sizeof(filename), "cout_%d.log", rank); + int out = open(cstr_filename, O_RDWR | O_CREAT | O_APPEND, 0600); + if (-1 == out) + { + perror("opening cout.log"); + return 255; + } + + snprintf(cstr_filename, sizeof(cstr_filename), "cerr_%d.log", rank); + int err = open(cstr_filename, O_RDWR | O_CREAT | O_APPEND, 0600); + if (-1 == err) + { + perror("opening cerr.log"); + return 255; + } + + int save_out = dup(fileno(stdout)); + int save_err = dup(fileno(stderr)); + + if (-1 == dup2(out, fileno(stdout))) + { + perror("cannot redirect stdout"); + return 255; + } + if (-1 == dup2(err, fileno(stderr))) + { + perror("cannot redirect stderr"); + return 255; + } + + delete[] cstr_filename; +#endif +#endif + /////////////////////////////////////////////// // Read the input data /////////////////////////////////////////////// @@ -223,42 +432,54 @@ int main(int argc, char* argv[]) return 0; // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::vector dims; + std::vector dims; std::string line; getline(inFile, line); std::istringstream linestream(line); - vtkm::Id dimVertices; + std::size_t dimVertices; while (linestream >> dimVertices) { dims.push_back(dimVertices); } // Compute the number of vertices, i.e., xdim * ydim * zdim - auto nDims = dims.size(); - std::vector::size_type nVertices = std::vector::size_type( - std::accumulate(dims.begin(), dims.end(), vtkm::Id(1), std::multiplies())); + unsigned short nDims = static_cast(dims.size()); + std::size_t nVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), 1, std::multiplies())); // Print the mesh metadata - std::cout << "Number of dimensions: " << nDims << std::endl; - std::cout << "Number of mesh vertices: " << nVertices << std::endl; - + if (rank == 0) + { + std::cout << " Number of dimensions: " << nDims << std::endl; + std::cout << "Number of mesh vertices: " << nVertices << std::endl; + } // Check the the number of dimensiosn is either 2D or 3D - if (nDims < 2 || nDims > 3) + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + bool invalidMCOption = (useMarchingCubes && nDims != 3); + if (rank == 0) { - std::cout << "The input mesh is " << nDims << "D. Input data must be either 2D or 3D." - << std::endl; - return 0; + if (invalidNumDimensions) + { + std::cout << "The input mesh is " << nDims << "D. Input data must be either 2D or 3D." + << std::endl; + } + if (invalidMCOption) + { + std::cout << "The input mesh is " << nDims + << "D. Contour tree using marching cubes only supported for 3D data." << std::endl; + } } - if (useMarchingCubes && nDims != 3) + if (invalidNumDimensions || invalidMCOption) { - std::cout << "The input mesh is " << nDims - << "D. Contour tree using marching cubes only supported for 3D data." << std::endl; - return 0; +#ifdef WITH_MPI + MPI_Finalize(); +#endif + return EXIT_SUCCESS; } - // read data - std::vector values(nVertices); - for (std::vector::size_type vertex = 0; vertex < nVertices; vertex++) + // Read data + std::vector values(nVertices); + for (std::size_t vertex = 0; vertex < nVertices; ++vertex) { inFile >> values[vertex]; } @@ -269,64 +490,200 @@ int main(int argc, char* argv[]) currTime = totalTime.GetElapsedTime(); vtkm::Float64 dataReadTime = currTime - prevTime; prevTime = currTime; - // build the input dataset + vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet inDataSet; - // 2D data - if (nDims == 2) +#ifndef WITH_MPI // construct regular, single-block VTK-M input dataset + vtkm::cont::DataSet inDataSet; // Single block dataset { - vtkm::Id2 vdims; - vdims[0] = dims[0]; - vdims[1] = dims[1]; - inDataSet = dsb.Create(vdims); + // build the input dataset + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + inDataSet = dsb.Create(vdims); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(dims[2]); + inDataSet = dsb.Create(vdims); + } + vtkm::cont::DataSetFieldAdd dsf; + dsf.AddPointField(inDataSet, "values", values); } - // 3D data - else +#else // Create a multi-block dataset for multi-block DIY-paralle processing + vtkm::cont::MultiBlock inDataSet; // Multi-block variant of the input dataset + vtkm::Id3 blocksPerDim = + nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + vtkm::Id3 globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(0)); + vtkm::cont::ArrayHandle localBlockIndices; + vtkm::cont::ArrayHandle localBlockOrigins; + vtkm::cont::ArrayHandle localBlockSizes; + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.GetPortalControl(); + auto localBlockOriginsPortal = localBlockOrigins.GetPortalControl(); + auto localBlockSizesPortal = localBlockSizes.GetPortalControl(); + { - vtkm::Id3 vdims; - vdims[0] = dims[0]; - vdims[1] = dims[1]; - vdims[2] = dims[2]; - inDataSet = dsb.Create(vdims); + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + if (rank == 0) + { + std::cout << "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks" << std::endl; + } + MPI_Finalize(); + return EXIT_FAILURE; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = + nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + /*//Print the input values + for(auto i = values.begin(); i != values.end(); ++i) + std::cout << *i << ' '; + std::cout<(currBlockSize); + vdims[1] = static_cast(dims[0]); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(blockIndex, 0, 0)); + localBlockOriginsPortal.Set(localBlockIndex, + vtkm::Id3((blockStart / blockSliceSize), 0, 0)); + localBlockSizesPortal.Set(localBlockIndex, + vtkm::Id3(currBlockSize, static_cast(dims[0]), 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + + //localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(blockIndex, 0, 0)); + //localBlockOriginsPortal.Set(localBlockIndex, vtkm::Id3((blockStart / blockSliceSize), 0, 0)); + //localBlockSizesPortal.Set(localBlockIndex, vtkm::Id3(currBlockSize, dims[1], dims[2])); + + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); + localBlockOriginsPortal.Set(localBlockIndex, + vtkm::Id3(0, 0, (blockStart / blockSliceSize))); + localBlockSizesPortal.Set( + localBlockIndex, + vtkm::Id3(static_cast(dims[0]), static_cast(dims[1]), currBlockSize)); + } + + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); + + vtkm::cont::DataSetFieldAdd dsf; + dsf.AddPointField(ds, "values", subValues); + inDataSet.AddBlock(ds); + } } - vtkm::cont::DataSetFieldAdd dsf; - dsf.AddPointField(inDataSet, "values", values); +#endif // WITH_MPI construct input dataset currTime = totalTime.GetElapsedTime(); vtkm::Float64 buildDatasetTime = currTime - prevTime; prevTime = currTime; - // Output data set is pairs of saddle and peak vertex IDs - vtkm::cont::DataSet result; - // Convert the mesh of values into contour tree, pairs of vertex ids vtkm::filter::ContourTreePPP2 filter(useMarchingCubes, computeRegularStructure); + +#ifdef WITH_MPI + filter.SetSpatialDecomposition( + blocksPerDim, globalSize, localBlockIndices, localBlockOrigins, localBlockSizes); +#endif filter.SetActiveField("values"); - result = filter.Execute(inDataSet); //, std::string("values")); + + // Execute the contour tree analysis. NOTE: If MPI is used the result will be + // a vtkm::cont::MultiBlock instead of a vtkm::cont::DataSet + auto result = filter.Execute(inDataSet); currTime = totalTime.GetElapsedTime(); vtkm::Float64 computeContourTreeTime = currTime - prevTime; prevTime = currTime; -#ifdef DEBUG_TIMING - std::cout << "-------------------------------------------------------------" << std::endl; - std::cout << "-------------------Contour Tree Timings----------------------" << std::endl; - // Get the timings from the contour tree computation - const std::vector>& contourTreeTimings = - filter.GetTimings(); - for (std::vector>::size_type i = 0; - i < contourTreeTimings.size(); - i++) - std::cout << std::setw(42) << std::left << contourTreeTimings[i].first << ": " - << contourTreeTimings[i].second << " seconds" << std::endl; +#ifdef WITH_MPI +#ifdef DEBUG_PRINT + if (rank == 0) + { + std::cout << "----- rank=" << rank << " ----Final Contour Tree Data----------------------------" + << std::endl; + filter.GetContourTree().PrintContent(); + vtkm::worklet::contourtree_augmented::printIndices("Mesh Sort Order", filter.GetSortOrder()); + } #endif +#endif + +#ifdef DEBUG_TIMING + if (rank == 0) + { + std::cout << "----------------------- " << rank << " --------------------------------------" + << std::endl; + std::cout << "-------------------Contour Tree Timings----------------------" << std::endl; + + // Get the timings from the contour tree computation + const std::vector>& contourTreeTimings = + filter.GetTimings(); + for (std::size_t i = 0; i < contourTreeTimings.size(); ++i) + std::cout << std::setw(42) << std::left << contourTreeTimings[i].first << ": " + << contourTreeTimings[i].second << " seconds" << std::endl; + } +#endif + //////////////////////////////////////////// // Compute the branch decomposition //////////////////////////////////////////// - if (computeBranchDecomposition) + if (rank == 0 && computeBranchDecomposition && computeRegularStructure) { - // TODO: Change timing to use logging in vtkm/cont/Logging.h vtkm::cont::Timer branchDecompTimer; branchDecompTimer.Start(); // compute the volume for each hyperarc and superarc @@ -343,7 +700,6 @@ int main(int argc, char* argv[]) hyperarcDependentWeight); // (output) std::cout << std::setw(42) << std::left << "Compute Volume Weights" << ": " << branchDecompTimer.GetElapsedTime() << " seconds" << std::endl; - branchDecompTimer.Reset(); branchDecompTimer.Start(); // compute the branch decomposition by volume @@ -363,7 +719,86 @@ int main(int argc, char* argv[]) branchParent); // (output) std::cout << std::setw(42) << std::left << "Compute Volume Branch Decomposition" << ": " << branchDecompTimer.GetElapsedTime() << " seconds" << std::endl; + + //----main branch decompostion end + //----Isovalue seleciton start + if (numLevels > 0) // if compute isovalues + { +// Get the data values for computing the explicit branch decomposition +// TODO Can we cast the handle we get from GetData() instead of doing a CopyTo? +#ifdef WITH_MPI + vtkm::cont::ArrayHandle dataField; + result.GetBlocks()[0].GetField(0).GetData().CopyTo(dataField); + bool dataFieldIsSorted = true; +#else + vtkm::cont::ArrayHandle dataField; + inDataSet.GetField(0).GetData().CopyTo(dataField); + bool dataFieldIsSorted = false; +#endif + + // create explicit representation of the branch decompostion from the array representation + BranchType* branchDecompostionRoot = + cppp2_ns::ProcessContourTree::ComputeBranchDecomposition( + filter.GetContourTree().superparents, + filter.GetContourTree().supernodes, + whichBranch, + branchMinimum, + branchMaximum, + branchSaddle, + branchParent, + filter.GetSortOrder(), + dataField, + dataFieldIsSorted); + +#ifdef DEBUG_PRINT + branchDecompostionRoot->print(std::cout); +#endif + + // Simplify the contour tree of the branch decompostion + branchDecompostionRoot->simplifyToSize(numComp, usePersistenceSorter); + + // Compute the relevant iso-values + std::vector isoValues; + switch (contourSelectMethod) + { + default: + case 0: + { + branchDecompostionRoot->getRelevantValues(static_cast(contourType), eps, isoValues); + } + break; + case 1: + { + vtkm::worklet::contourtree_augmented::process_contourtree_inc::PiecewiseLinearFunction< + ValueType> + plf; + branchDecompostionRoot->accumulateIntervals(static_cast(contourType), eps, plf); + isoValues = plf.nLargest(static_cast(numLevels)); + } + break; + } + + // Print the compute iso values + std::cout << std::endl; + std::cout << "Isovalue Suggestions" << std::endl; + std::cout << "====================" << std::endl; + std::sort(isoValues.begin(), isoValues.end()); + std::cout << "Isovalues: "; + for (ValueType val : isoValues) + std::cout << val << " "; + std::cout << std::endl; + + // Unique isovalues + std::vector::iterator it = std::unique(isoValues.begin(), isoValues.end()); + isoValues.resize(static_cast(std::distance(isoValues.begin(), it))); + std::cout << isoValues.size() << " Unique Isovalues: "; + for (ValueType val : isoValues) + std::cout << val << " "; + std::cout << std::endl; + std::cout << std::endl; + } //end if compute isovalue } + currTime = totalTime.GetElapsedTime(); vtkm::Float64 computeBranchDecompTime = currTime - prevTime; prevTime = currTime; @@ -372,8 +807,8 @@ int main(int argc, char* argv[]) //vtkm::cont::ArrayHandle > saddlePeak; //resultField.GetData().CopyTo(saddlePeak); - // dump out contour tree for comparison - if (printContourTree) + // Dump out contour tree for comparison + if (rank == 0 && printContourTree) { std::cout << "Contour Tree" << std::endl; std::cout << "============" << std::endl; @@ -384,7 +819,19 @@ int main(int argc, char* argv[]) } #ifdef DEBUG_TIMING - std::cout << "-------------------------------------------------------------" << std::endl; +#ifdef WITH_MPI + // Force a simple round-robin on the ranks for the summary prints. Its not perfect for MPI but + // it works well enough to sort the summaries from the ranks for small-scale debugging. + if (rank > 0) + { + int temp; + MPI_Status status; + MPI_Recv(&temp, 1, MPI_INT, (rank - 1), 0, comm, &status); + } +#endif + + std::cout << "---------------------------" << rank << "----------------------------------" + << std::endl; std::cout << "--------------------------Totals-----------------------------" << std::endl; std::cout << std::setw(42) << std::left << "Start-up" << ": " << startUpTime << " seconds" << std::endl; @@ -401,14 +848,14 @@ int main(int argc, char* argv[]) } currTime = totalTime.GetElapsedTime(); //vtkm::Float64 miscTime = currTime - startUpTime - dataReadTime - buildDatasetTime - computeContourTreeTime; - //if (computeBranchDecomposition) miscTime -= computeBranchDecompTime; + //if(computeBranchDecomposition) miscTime -= computeBranchDecompTime; //std::cout< #include +#include #include namespace vtkm @@ -68,6 +69,11 @@ namespace vtkm namespace filter { +namespace detail +{ +class MultiBlockContourTreeHelper; +} // namespace detail + class ContourTreePPP2 : public vtkm::filter::FilterCell { @@ -76,6 +82,14 @@ public: VTKM_CONT ContourTreePPP2(bool useMarchingCubes = false, bool computeRegularStructure = true); + // Define the spatial decomposition of the data in case we run in parallel with a multi-block dataset + VTKM_CONT + void SetSpatialDecomposition(vtkm::Id3 blocksPerDim, + vtkm::Id3 globalSize, + const vtkm::cont::ArrayHandle& localBlockIndices, + const vtkm::cont::ArrayHandle& localBlockOrigins, + const vtkm::cont::ArrayHandle& localBlockSizes); + // Output field "saddlePeak" which is pairs of vertex ids indicating saddle and peak of contour template VTKM_CONT vtkm::cont::DataSet DoExecute(const vtkm::cont::DataSet& input, @@ -83,20 +97,55 @@ public: const vtkm::filter::FieldMetadata& fieldMeta, vtkm::filter::PolicyBase policy); + //@{ + /// when operating on vtkm::cont::MultiBlock we want to + /// do processing across ranks as well. Just adding pre/post handles + /// for the same does the trick. + template + VTKM_CONT void PreExecute(const vtkm::cont::PartitionedDataSet& input, + const vtkm::filter::PolicyBase& policy); + + template + VTKM_CONT void PostExecute(const vtkm::cont::PartitionedDataSet& input, + vtkm::cont::PartitionedDataSet& output, + const vtkm::filter::PolicyBase&); + + template + VTKM_CONT void DoPostExecute( + const vtkm::cont::PartitionedDataSet& input, + vtkm::cont::PartitionedDataSet& output, + const vtkm::filter::FieldMetadata& fieldMeta, + const vtkm::cont::ArrayHandle&, // dummy parameter to get the type + vtkm::filter::PolicyBase policy); + const vtkm::worklet::contourtree_augmented::ContourTree& GetContourTree() const; const vtkm::worklet::contourtree_augmented::IdArrayType& GetSortOrder() const; vtkm::Id GetNumIterations() const; const std::vector>& GetTimings() const; private: + // Given the input dataset determine the number of rows, cols, and slices + void getDims(const vtkm::cont::DataSet& input, + vtkm::Id& nrows, + vtkm::Id& ncols, + vtkm::Id nslices) const; + bool UseMarchingCubes; + // 0=no augmentation, 1=full augmentation bool ComputeRegularStructure; + // Store timings about the contour tree computation std::vector> Timings; - vtkm::worklet::contourtree_augmented::ContourTree ContourTreeData; // The contour tree - vtkm::Id NumIterations; // Number of iterations used to compute the contour tree - vtkm::worklet::contourtree_augmented::IdArrayType - MeshSortOrder; // Array with the sorted order of the mesh vertices + // TODO Should the additional fields below be add to the vtkm::filter::ResultField and what is the best way to represent them + // Additional result fields not included in the vtkm::filter::ResultField returned by DoExecute + // The contour tree + vtkm::worklet::contourtree_augmented::ContourTree ContourTreeData; + // Number of iterations used to compute the contour tree + vtkm::Id NumIterations; + // Array with the sorted order of the mesh vertices + vtkm::worklet::contourtree_augmented::IdArrayType MeshSortOrder; + // Helper object to help with the parallel merge when running with DIY in parallel with MulitBlock data + detail::MultiBlockContourTreeHelper* MultiBlockTreeHelper; }; @@ -133,8 +182,9 @@ struct GetRowsColsSlices throw vtkm::cont::ErrorBadValue("Expected 2D or 3D structured cell cet! "); } }; -} -} // namespace vtkm::filter + +} // namespace filter +} // namespace vtkm #include diff --git a/vtkm/filter/ContourTreeUniformAugmented.hxx b/vtkm/filter/ContourTreeUniformAugmented.hxx index 232ab5010..8d9fa7ce5 100644 --- a/vtkm/filter/ContourTreeUniformAugmented.hxx +++ b/vtkm/filter/ContourTreeUniformAugmented.hxx @@ -55,11 +55,455 @@ #include #include +#include + +#include +#include + +#include +//#include +#include +#include +#include +#include +#include +// clang-format off +VTKM_THIRDPARTY_PRE_INCLUDE +#include +#include +VTKM_THIRDPARTY_POST_INCLUDE +// clang-format on + + +// TODO, this should be moved into a namespace but gcc seems to have trouble with the template specialization +template +struct ContourTreeBlockData +{ + static void* create() { return new ContourTreeBlockData; } + static void destroy(void* b) { delete static_cast*>(b); } + + // ContourTreeMesh data + vtkm::Id nVertices; + vtkm::worklet::contourtree_augmented::IdArrayType + sortOrder; // TODO we should be able to remove this one, but we need to figure out what we need to return in the worklet instead + vtkm::cont::ArrayHandle sortedValues; + vtkm::worklet::contourtree_augmented::IdArrayType globalMeshIndex; + vtkm::worklet::contourtree_augmented::IdArrayType neighbours; + vtkm::worklet::contourtree_augmented::IdArrayType firstNeighbour; + vtkm::Id maxNeighbours; + + // Block metadata + vtkm::Id3 blockOrigin; // Origin of the data block + vtkm::Id3 blockSize; // Extends of the data block + vtkm::Id3 globalSize; // Extends of the global mesh +}; + +namespace vtkmdiy +{ + +// Struct to serialize ContourBlockData objects (i.e., load/save) needed in parralle for DIY +template +struct Serialization> +{ + static void save(vtkmdiy::BinaryBuffer& bb, const ContourTreeBlockData& block) + { + vtkmdiy::save(bb, block.nVertices); + vtkmdiy::save(bb, block.sortOrder); + vtkmdiy::save(bb, block.sortedValues); + vtkmdiy::save(bb, block.globalMeshIndex); + vtkmdiy::save(bb, block.neighbours); + vtkmdiy::save(bb, block.firstNeighbour); + vtkmdiy::save(bb, block.maxNeighbours); + vtkmdiy::save(bb, block.blockOrigin); + vtkmdiy::save(bb, block.blockSize); + vtkmdiy::save(bb, block.globalSize); + } + + static void load(vtkmdiy::BinaryBuffer& bb, ContourTreeBlockData& block) + { + vtkmdiy::load(bb, block.nVertices); + vtkmdiy::load(bb, block.sortOrder); + vtkmdiy::load(bb, block.sortedValues); + vtkmdiy::load(bb, block.globalMeshIndex); + vtkmdiy::load(bb, block.neighbours); + vtkmdiy::load(bb, block.firstNeighbour); + vtkmdiy::load(bb, block.maxNeighbours); + vtkmdiy::load(bb, block.blockOrigin); + vtkmdiy::load(bb, block.blockSize); + vtkmdiy::load(bb, block.globalSize); + } +}; + +} // namespace mangled_vtkmdiy_namespace + namespace vtkm { namespace filter { +namespace detail +{ +//----Helper struct to call DoPostExecute. This is used to be able to +// wrap the PostExecute work in a functor so that we can use VTK-M's +// vtkm::cont::CastAndCall to infer the FieldType template parameters +struct PostExecuteCaller +{ + template + VTKM_CONT void operator()(const vtkm::cont::ArrayHandle&, + ContourTreePPP2* self, + const vtkm::cont::PartitionedDataSet& input, + vtkm::cont::PartitionedDataSet& output, + const vtkm::filter::FieldMetadata& fieldMeta, + vtkm::filter::PolicyBase policy) const + { + vtkm::cont::ArrayHandle dummy; + self->DoPostExecute(input, output, fieldMeta, dummy, policy); + } +}; + + +// --- Helper class to store the spatial decomposition defined by the PartitionedDataSet input data +class SpatialDecomposition +{ +public: + VTKM_CONT + SpatialDecomposition(vtkm::Id3 blocksPerDim, + vtkm::Id3 globalSize, + const vtkm::cont::ArrayHandle& localBlockIndices, + const vtkm::cont::ArrayHandle& localBlockOrigins, + const vtkm::cont::ArrayHandle& localBlockSizes) + : mBlocksPerDimension(blocksPerDim) + , mGlobalSize(globalSize) + , mLocalBlockIndices(localBlockIndices) + , mLocalBlockOrigins(localBlockOrigins) + , mLocalBlockSizes(localBlockSizes) + { + } + + inline vtkmdiy::DiscreteBounds getvtkmdiyBounds() const + { + if (this->numberOfDimensions() == 2) + { + vtkmdiy::DiscreteBounds + domain; //(2); // may need to change back when porting ot later verison of VTKM/vtkmdiy + domain.min[0] = domain.min[1] = 0; + domain.max[0] = (int)this->mGlobalSize[0]; + domain.max[1] = (int)this->mGlobalSize[1]; + return domain; + } + else + { + vtkmdiy::DiscreteBounds + domain; //(3); // may need to change back when porting to later version of VTMK/vtkmdiy + domain.min[0] = domain.min[1] = domain.min[2] = 0; + domain.max[0] = (int)this->mGlobalSize[0]; + domain.max[1] = (int)this->mGlobalSize[1]; + domain.max[2] = (int)this->mGlobalSize[2]; + return domain; + } + } + + inline vtkm::Id numberOfDimensions() const { return mGlobalSize[2] > 1 ? 3 : 2; } + + inline vtkm::Id getGlobalNumberOfBlocks() const + { + return mBlocksPerDimension[0] * mBlocksPerDimension[1] * mBlocksPerDimension[2]; + } + + inline vtkm::Id getLocalNumberOfBlocks() const { return mLocalBlockSizes.GetNumberOfValues(); } + + // Number of blocks along each dimension + vtkm::Id3 mBlocksPerDimension; + // Size of the global mesh + vtkm::Id3 mGlobalSize; + // Index of the local blocks in x,y,z, i.e., in i,j,k mesh coordinates + vtkm::cont::ArrayHandle mLocalBlockIndices; + // Origin of the local blocks in mesh index space + vtkm::cont::ArrayHandle mLocalBlockOrigins; + // Size of each local block in x, y,z + vtkm::cont::ArrayHandle mLocalBlockSizes; +}; + + +//--- Helper class to help with the contstuction of the GlobalContourTree +class MultiBlockContourTreeHelper +{ +public: + VTKM_CONT + MultiBlockContourTreeHelper(vtkm::Id3 blocksPerDim, + vtkm::Id3 globalSize, + const vtkm::cont::ArrayHandle& localBlockIndices, + const vtkm::cont::ArrayHandle& localBlockOrigins, + const vtkm::cont::ArrayHandle& localBlockSizes) + : mSpatialDecomposition(blocksPerDim, + globalSize, + localBlockIndices, + localBlockOrigins, + localBlockSizes) + { + vtkm::Id localNumBlocks = this->getLocalNumberOfBlocks(); + mLocalContourTrees.resize((std::size_t)localNumBlocks); + mLocalSortOrders.resize((std::size_t)localNumBlocks); + } + + VTKM_CONT + ~MultiBlockContourTreeHelper(void) + { + mLocalContourTrees.clear(); + mLocalSortOrders.clear(); + } + + inline static void GetGlobalMeshIndex( + vtkm::worklet::contourtree_augmented::IdArrayType& globalMeshIndex, + const vtkm::worklet::contourtree_augmented::IdArrayType& sortOrder, + vtkm::Id startRow, + vtkm::Id startCol, + vtkm::Id startSlice, + vtkm::Id numRows, + vtkm::Id numCols, + vtkm::Id totalNumRows, + vtkm::Id totalNumCols) + { + auto transformedIndex = + vtkm::cont::ArrayHandleTransform( + sortOrder, + vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabler( + startRow, startCol, startSlice, numRows, numCols, totalNumRows, totalNumCols)); + vtkm::cont::Algorithm::Copy(transformedIndex, globalMeshIndex); + } + + inline vtkm::Id getLocalNumberOfBlocks() const + { + return this->mSpatialDecomposition.getLocalNumberOfBlocks(); + } + + inline vtkm::Id getGlobalNumberOfBlocks() const + { + return this->mSpatialDecomposition.getGlobalNumberOfBlocks(); + } + + inline static vtkm::Id getGlobalNumberOfBlocks(const vtkm::cont::PartitionedDataSet& input) + { + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + vtkm::Id localSize = input.GetNumberOfPartitions(); + vtkm::Id globalSize = 0; + vtkmdiy::mpi::all_reduce(comm, localSize, globalSize, std::plus{}); + return globalSize; + } + + template + inline static vtkm::worklet::contourtree_augmented::ContourTreeMesh* + computeLocalContourTreeMesh(const vtkm::Id3 localBlockOrigin, + const vtkm::Id3 localBlockSize, + const vtkm::Id3 globalSize, + const vtkm::cont::ArrayHandle& field, + const vtkm::worklet::contourtree_augmented::ContourTree& contourTree, + const vtkm::worklet::contourtree_augmented::IdArrayType& sortOrder) + + { + vtkm::Id startRow = localBlockOrigin[0]; + vtkm::Id startCol = localBlockOrigin[1]; + vtkm::Id startSlice = localBlockOrigin[2]; + vtkm::Id numRows = localBlockSize[0]; + vtkm::Id numCols = localBlockSize[1]; + vtkm::Id totalNumRows = globalSize[0]; + vtkm::Id totalNumCols = globalSize[1]; + + // compute the global mesh index + vtkm::worklet::contourtree_augmented::IdArrayType localGlobalMeshIndex; + MultiBlockContourTreeHelper::GetGlobalMeshIndex(localGlobalMeshIndex, + sortOrder, + startRow, + startCol, + startSlice, + numRows, + numCols, + totalNumRows, + totalNumCols); + + // Compute the local contour tree mesh + auto localContourTreeMesh = new vtkm::worklet::contourtree_augmented::ContourTreeMesh( + contourTree.arcs, sortOrder, field, localGlobalMeshIndex); + return localContourTreeMesh; + } + + template + inline static vtkm::worklet::contourtree_augmented::ContourTreeMesh* + computeMergedContourTreeMesh( + const vtkm::worklet::contourtree_augmented::IdArrayType& contourTreeArcs, + const vtkm::worklet::contourtree_augmented::ContourTreeMesh& contourTreeMesh) + { + auto localContourTreeMesh = + new vtkm::worklet::contourtree_augmented::ContourTreeMesh(contourTreeArcs, + contourTreeMesh); + return localContourTreeMesh; + } + + SpatialDecomposition mSpatialDecomposition; + std::vector mLocalContourTrees; + std::vector mLocalSortOrders; + +}; // end MultiBlockContourTreeHelper + +// Functor needed so we can discover the FieldType and DeviceAdapter template parameters to call mergeWith +struct MergeFunctor +{ + template + bool operator()(DeviceAdapterTag, + vtkm::worklet::contourtree_augmented::ContourTreeMesh& in, + vtkm::worklet::contourtree_augmented::ContourTreeMesh& out) const + { + out.template mergeWith(in); + return true; + } +}; + +// Functor used by DIY reduce the merge data blocks in parallel +template +void merge_block_functor( + ContourTreeBlockData* block, // local Block. + const vtkmdiy::ReduceProxy& rp, // communication proxy + const vtkmdiy::RegularMergePartners& partners // partners of the current block + ) +{ //merge_block_functor + (void)partners; // Avoid unused parameter warning + + const auto selfid = rp.gid(); + + // TODO This should be changed so that we have the ContourTree itself as the block and then the + // ContourTreeMesh would still be used for exchange. In this case we would need to compute + // the ContourTreeMesh at the beginning of the function for the current block every time + // but then we would not need to compute those meshes when we initialize vtkmdiy + // and we don't need to have the special case for rank 0. + + // Here we do the deque first before the send due to the way the iteration is handled in DIY, i.e., in each iteration + // A block needs to first collect the data from its neighours and then send the combined block to its neighbours + // for the next iteration. + + // 1. dequeue the block and compute the new contour tree and contour tree mesh for the block if we have the hight GID + std::vector incoming; + rp.incoming(incoming); + for (const int ingid : incoming) + { + if (ingid != selfid) + { + ContourTreeBlockData recvblock; + rp.dequeue(ingid, recvblock); + + // Construct the two contour tree mesh by assignign the block data + vtkm::worklet::contourtree_augmented::ContourTreeMesh contourTreeMeshIn; + contourTreeMeshIn.nVertices = recvblock.nVertices; + contourTreeMeshIn.sortOrder = recvblock.sortOrder; + contourTreeMeshIn.sortedValues = recvblock.sortedValues; + contourTreeMeshIn.globalMeshIndex = recvblock.globalMeshIndex; + contourTreeMeshIn.neighbours = recvblock.neighbours; + contourTreeMeshIn.firstNeighbour = recvblock.firstNeighbour; + contourTreeMeshIn.maxNeighbours = recvblock.maxNeighbours; + + vtkm::worklet::contourtree_augmented::ContourTreeMesh contourTreeMeshOut; + contourTreeMeshOut.nVertices = block->nVertices; + contourTreeMeshOut.sortOrder = block->sortOrder; + contourTreeMeshOut.sortedValues = block->sortedValues; + contourTreeMeshOut.globalMeshIndex = block->globalMeshIndex; + contourTreeMeshOut.neighbours = block->neighbours; + contourTreeMeshOut.firstNeighbour = block->firstNeighbour; + contourTreeMeshOut.maxNeighbours = block->maxNeighbours; + + // Merge the two contour tree meshes + vtkm::cont::TryExecute(MergeFunctor{}, contourTreeMeshIn, contourTreeMeshOut); + + // Compute the origin and size of the new block + vtkm::Id3 globalSize = block->globalSize; + vtkm::Id3 currBlockOrigin; + currBlockOrigin[0] = std::min(recvblock.blockOrigin[0], block->blockOrigin[0]); + currBlockOrigin[1] = std::min(recvblock.blockOrigin[1], block->blockOrigin[1]); + currBlockOrigin[2] = std::min(recvblock.blockOrigin[2], block->blockOrigin[2]); + vtkm::Id3 currBlockMaxIndex; // Needed only to compute the block size + currBlockMaxIndex[0] = std::max(recvblock.blockOrigin[0] + recvblock.blockSize[0], + block->blockOrigin[0] + block->blockSize[0]); + currBlockMaxIndex[1] = std::max(recvblock.blockOrigin[1] + recvblock.blockSize[1], + block->blockOrigin[1] + block->blockSize[1]); + currBlockMaxIndex[2] = std::max(recvblock.blockOrigin[2] + recvblock.blockSize[2], + block->blockOrigin[2] + block->blockSize[2]); + vtkm::Id3 currBlockSize; + currBlockSize[0] = currBlockMaxIndex[0] - currBlockOrigin[0]; + currBlockSize[1] = currBlockMaxIndex[1] - currBlockOrigin[1]; + currBlockSize[2] = currBlockMaxIndex[2] - currBlockOrigin[2]; + + // On rank 0 we compute the contour tree at the end when the merge is done, so we don't need to do it here + if (selfid == 0) + { + // Copy the data from newContourTreeMesh into block + block->nVertices = contourTreeMeshOut.nVertices; + block->sortOrder = contourTreeMeshOut.sortOrder; + block->sortedValues = contourTreeMeshOut.sortedValues; + block->globalMeshIndex = contourTreeMeshOut.globalMeshIndex; + block->neighbours = contourTreeMeshOut.neighbours; + block->firstNeighbour = contourTreeMeshOut.firstNeighbour; + block->maxNeighbours = contourTreeMeshOut.maxNeighbours; + block->blockOrigin = currBlockOrigin; + block->blockSize = currBlockSize; + block->globalSize = globalSize; + } + else // If we are a block that will continue to be merged then we need compute the contour tree here + { + // Compute the contour tree from our merged mesh + std::vector> currTimings; + vtkm::Id currNumIterations; + vtkm::worklet::contourtree_augmented::ContourTree currContourTree; + vtkm::worklet::contourtree_augmented::IdArrayType currSortOrder; + vtkm::worklet::ContourTreePPP2 worklet; + vtkm::cont::ArrayHandle currField; + + // Run the contour tree compute. NOTE, the first paramerter is unused here. We need to provide + // something for the field to keep the API happy + worklet.Run( + contourTreeMeshOut.sortedValues, + contourTreeMeshOut, + currTimings, + currContourTree, + currSortOrder, + currNumIterations, + 1 // TODO eventually replace with boundary augmentation but for now compute the full regular structure + ); + auto newContourTreeMesh = + vtkm::filter::detail::MultiBlockContourTreeHelper::computeMergedContourTreeMesh( + currContourTree.arcs, contourTreeMeshOut); + + // Copy the data from newContourTreeMesh into block + block->nVertices = newContourTreeMesh->nVertices; + block->sortOrder = newContourTreeMesh->sortOrder; + block->sortedValues = newContourTreeMesh->sortedValues; + block->globalMeshIndex = newContourTreeMesh->globalMeshIndex; + block->neighbours = newContourTreeMesh->neighbours; + block->firstNeighbour = newContourTreeMesh->firstNeighbour; + block->maxNeighbours = newContourTreeMesh->maxNeighbours; + block->blockOrigin = currBlockOrigin; + block->blockSize = currBlockSize; + block->globalSize = globalSize; + + // TODO delete newContourTreeMesh + // TODO Clean up the pointers from the local data blocks from the previous iteration + } + } + } + + // Send our current block (which is either our original block or the one we just combined from the ones we received) to our next neighbour. + // Once a rank has send his block (either in its orignal or merged form) it is done with the reduce + for (int cc = 0; cc < rp.out_link().size(); ++cc) + { + auto target = rp.out_link().target(cc); + if (target.gid != selfid) + { + rp.enqueue(target, *block); + } + } +} //end merge_block_functor + +} // end namespace detail + + //----------------------------------------------------------------------------- ContourTreePPP2::ContourTreePPP2(bool useMarchingCubes, bool computeRegularStructure) @@ -67,8 +511,25 @@ ContourTreePPP2::ContourTreePPP2(bool useMarchingCubes, bool computeRegularStruc , UseMarchingCubes(useMarchingCubes) , ComputeRegularStructure(computeRegularStructure) , Timings() + , MultiBlockTreeHelper(nullptr) { - this->SetOutputFieldName("arcs"); + this->SetOutputFieldName("resultData"); +} + +void ContourTreePPP2::SetSpatialDecomposition( + vtkm::Id3 blocksPerDim, + vtkm::Id3 globalSize, + const vtkm::cont::ArrayHandle& localBlockIndices, + const vtkm::cont::ArrayHandle& localBlockOrigins, + const vtkm::cont::ArrayHandle& localBlockSizes) +{ + if (this->MultiBlockTreeHelper) + { + delete this->MultiBlockTreeHelper; + this->MultiBlockTreeHelper = nullptr; + } + this->MultiBlockTreeHelper = new detail::MultiBlockContourTreeHelper( + blocksPerDim, globalSize, localBlockIndices, localBlockOrigins, localBlockSizes); } const vtkm::worklet::contourtree_augmented::ContourTree& ContourTreePPP2::GetContourTree() const @@ -118,12 +579,16 @@ vtkm::cont::DataSet ContourTreePPP2::DoExecute(const vtkm::cont::DataSet& input, const auto& cells = input.GetCellSet(); vtkm::filter::ApplyPolicyCellSet(cells, policy) .CastAndCall(GetRowsColsSlices(), nRows, nCols, nSlices); + // TODO blockIndex=0 --- This needs to change if we have multiple blocks per MPI rank and DoExecute is called for multiple blocks + std::size_t blockIndex = 0; // Run the worklet worklet.Run(field, this->Timings, - this->ContourTreeData, - this->MeshSortOrder, + MultiBlockTreeHelper ? MultiBlockTreeHelper->mLocalContourTrees[blockIndex] + : this->ContourTreeData, + MultiBlockTreeHelper ? MultiBlockTreeHelper->mLocalSortOrders[blockIndex] + : this->MeshSortOrder, this->NumIterations, nRows, nCols, @@ -131,23 +596,296 @@ vtkm::cont::DataSet ContourTreePPP2::DoExecute(const vtkm::cont::DataSet& input, this->UseMarchingCubes, this->ComputeRegularStructure); - // Compute the saddle peak dataset for return - // vtkm::cont::ArrayHandle > saddlePeak; - // ProcessContourTree::CollectSortedSuperarcs(ContourTreeData, MeshSortOrder, saddlePeak); - // Update the total timings vtkm::Float64 totalTimeWorklet = 0; for (std::vector>::size_type i = 0; i < Timings.size(); i++) totalTimeWorklet += Timings[i].second; - std::cout << "Total time measured by worklet: " << totalTimeWorklet << std::endl; Timings.push_back(std::pair( "Others (ContourTreePPP2 Filter): ", timer.GetElapsedTime() - totalTimeWorklet)); - - return CreateResult(input, ContourTreeData.arcs, this->GetOutputFieldName(), fieldMeta); - + // If we run in parallel but with only one global block, then we need set our outputs correctly + // here to match the expected behavior in parallel + if (this->MultiBlockTreeHelper) + { + if (this->MultiBlockTreeHelper->getGlobalNumberOfBlocks() == 1) + { + // Copy the contour tree and mesh sort order to the output + this->ContourTreeData = this->MultiBlockTreeHelper->mLocalContourTrees[0]; + this->MeshSortOrder = this->MultiBlockTreeHelper->mLocalSortOrders[0]; + // In parallel we need the sorted values as output resulti + // Construct the sorted values by permutting the input field + auto fieldPermutted = vtkm::cont::make_ArrayHandlePermutation(this->MeshSortOrder, field); + vtkm::cont::ArrayHandle sortedValues; + vtkm::cont::Algorithm::Copy(fieldPermutted, sortedValues); + // Create the result object + vtkm::cont::DataSet result; + vtkm::cont::Field rfield( + this->GetOutputFieldName(), vtkm::cont::Field::Association::WHOLE_MESH, sortedValues); + result.AddField(rfield); + return result; + } + } + // Construct the expected result for serial execution. Note, in serial the result currently + // not actually being used, but in parallel we need the sorted mesh values as output + // This part is being hit when we run in serial or parallel with more then one rank + return CreateResultFieldPoint(input, ContourTreeData.arcs, this->GetOutputFieldName()); } // ContourTreePPP2::DoExecute +//----------------------------------------------------------------------------- +template +inline VTKM_CONT void ContourTreePPP2::PreExecute(const vtkm::cont::PartitionedDataSet& input, + const vtkm::filter::PolicyBase&) +{ + //if( input.GetNumberOfBlocks() != 1){ + // throw vtkm::cont::ErrorBadValue("Expected MultiBlock data with 1 block per rank "); + //} + if (this->MultiBlockTreeHelper) + { + if (this->MultiBlockTreeHelper->getGlobalNumberOfBlocks(input) != + this->MultiBlockTreeHelper->getGlobalNumberOfBlocks()) + { + throw vtkm::cont::ErrorFilterExecution( + "Global number of block in MultiBlock dataset does not match the SpatialDecomposition"); + } + if (this->MultiBlockTreeHelper->getLocalNumberOfBlocks() != input.GetNumberOfPartitions()) + { + throw vtkm::cont::ErrorFilterExecution( + "Global number of block in MultiBlock dataset does not match the SpatialDecomposition"); + } + } //else + //{ + // throw vtkm::cont::ErrorFilterExecution("Spatial decomposition not defined for MultiBlock execution. Use ContourTreePPP2::SetSpatialDecompoistion to define the domain decomposition."); + //} +} + + +//----------------------------------------------------------------------------- +template +VTKM_CONT void ContourTreePPP2::DoPostExecute( + const vtkm::cont::PartitionedDataSet& input, + vtkm::cont::PartitionedDataSet& output, + const vtkm::filter::FieldMetadata& fieldMeta, + const vtkm::cont::ArrayHandle&, // dummy parameter to get the type + vtkm::filter::PolicyBase policy) +{ + (void)fieldMeta; // avoid unused parameter warning + (void)policy; // avoid unused parameter warning + + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + vtkm::Id size = comm.size(); + vtkm::Id rank = comm.rank(); + + std::vector*> localContourTreeMeshes; + localContourTreeMeshes.resize(static_cast(input.GetNumberOfPartitions())); + // TODO need to allocate and free these ourselves. May need to update detail::MultiBlockContourTreeHelper::computeLocalContourTreeMesh + std::vector*> localDataBlocks; + localDataBlocks.resize(static_cast(input.GetNumberOfPartitions())); + std::vector localLinks; // dummy links needed to make DIY happy + localLinks.resize(static_cast(input.GetNumberOfPartitions())); + for (std::size_t bi = 0; bi < static_cast(input.GetNumberOfPartitions()); bi++) + { + // create the local contour tree mesh + localLinks[bi] = new vtkmdiy::Link; + auto currBlock = input.GetPartition(static_cast(bi)); + auto currField = + currBlock.GetField(this->GetActiveFieldName(), this->GetActiveFieldAssociation()); + //const vtkm::cont::ArrayHandle &fieldData = currField.GetData().Cast >(); + vtkm::cont::ArrayHandle fieldData; + vtkm::cont::ArrayCopy(currField.GetData().AsVirtual(), fieldData); + auto currContourTreeMesh = + vtkm::filter::detail::MultiBlockContourTreeHelper::computeLocalContourTreeMesh( + this->MultiBlockTreeHelper->mSpatialDecomposition.mLocalBlockOrigins.GetPortalConstControl() + .Get(static_cast(bi)), + this->MultiBlockTreeHelper->mSpatialDecomposition.mLocalBlockSizes.GetPortalConstControl() + .Get(static_cast(bi)), + this->MultiBlockTreeHelper->mSpatialDecomposition.mGlobalSize, + fieldData, + MultiBlockTreeHelper->mLocalContourTrees[bi], + MultiBlockTreeHelper->mLocalSortOrders[bi]); + localContourTreeMeshes[bi] = currContourTreeMesh; + // create the local data block structure + localDataBlocks[bi] = new ContourTreeBlockData(); + localDataBlocks[bi]->nVertices = currContourTreeMesh->nVertices; + localDataBlocks[bi]->sortOrder = currContourTreeMesh->sortOrder; + localDataBlocks[bi]->sortedValues = currContourTreeMesh->sortedValues; + localDataBlocks[bi]->globalMeshIndex = currContourTreeMesh->globalMeshIndex; + localDataBlocks[bi]->neighbours = currContourTreeMesh->neighbours; + localDataBlocks[bi]->firstNeighbour = currContourTreeMesh->firstNeighbour; + localDataBlocks[bi]->maxNeighbours = currContourTreeMesh->maxNeighbours; + localDataBlocks[bi]->blockOrigin = + this->MultiBlockTreeHelper->mSpatialDecomposition.mLocalBlockOrigins.GetPortalConstControl() + .Get(static_cast(bi)); + localDataBlocks[bi]->blockSize = + this->MultiBlockTreeHelper->mSpatialDecomposition.mLocalBlockSizes.GetPortalConstControl() + .Get(static_cast(bi)); + localDataBlocks[bi]->globalSize = this->MultiBlockTreeHelper->mSpatialDecomposition.mGlobalSize; + } + + // Setup vtkmdiy to do global binary reduction of neighbouring blocks. See also RecuctionOperation struct for example + + // Create the vtkmdiy master + vtkmdiy::Master master(comm, + 1, // Use 1 thread, VTK-M will do the treading + -1 // All block in memory + ); + + // Compute the gids for our local blocks + const detail::SpatialDecomposition& spatialDecomp = + this->MultiBlockTreeHelper->mSpatialDecomposition; + auto localBlockIndicesPortal = spatialDecomp.mLocalBlockIndices.GetPortalConstControl(); + std::vector vtkmdiyLocalBlockGids(static_cast(input.GetNumberOfPartitions())); + for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++) + { + std::vector tempCoords; // DivisionsVector type in DIY + std::vector tempDivisions; // DivisionsVector type in DIY + tempCoords.resize(static_cast(spatialDecomp.numberOfDimensions())); + tempDivisions.resize(static_cast(spatialDecomp.numberOfDimensions())); + auto currentCoords = localBlockIndicesPortal.Get(bi); + for (std::size_t di = 0; di < static_cast(spatialDecomp.numberOfDimensions()); di++) + { + tempCoords[di] = static_cast(currentCoords[static_cast(di)]); + tempDivisions[di] = + static_cast(spatialDecomp.mBlocksPerDimension[static_cast(di)]); + } + vtkmdiyLocalBlockGids[static_cast(bi)] = + vtkmdiy::RegularDecomposer::coords_to_gid(tempCoords, tempDivisions); + } + + // Add my local blocks to the vtkmdiy master. + for (std::size_t bi = 0; bi < static_cast(input.GetNumberOfPartitions()); bi++) + { + master.add(static_cast(vtkmdiyLocalBlockGids[bi]), // block id + localDataBlocks[bi], + localLinks[bi]); + } + + // Define the decomposition of the domain into regular blocks + vtkmdiy::RegularDecomposer decomposer( + static_cast(spatialDecomp.numberOfDimensions()), // number of dims + spatialDecomp.getvtkmdiyBounds(), + static_cast(spatialDecomp.getGlobalNumberOfBlocks())); + + // Define which blocks live on which rank so that vtkmdiy can manage them + vtkmdiy::DynamicAssigner assigner( + comm, static_cast(size), static_cast(spatialDecomp.getGlobalNumberOfBlocks())); + for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++) + { + assigner.set_rank(static_cast(rank), + static_cast(vtkmdiyLocalBlockGids[static_cast(bi)])); + } + + // Fix the vtkmdiy links. TODO Remove changes to the vtkmdiy in VTKM when we update to the new VTK-M + vtkmdiy::fix_links(master, assigner); + + // partners for merge over regular block grid + vtkmdiy::RegularMergePartners partners( + decomposer, // domain decomposition + 2, // raix of k-ary reduction. TODO check this value + true // contiguous: true=distance doubling , false=distnace halving TODO check this value + ); + + // reduction + vtkmdiy::reduce(master, assigner, partners, &detail::merge_block_functor); + + comm.barrier(); // Be safe! + + if (rank == 0) + { + // Now run the contour tree algorithm on the last block to compute the final tree + std::vector> currTimings; + vtkm::Id currNumIterations; + vtkm::worklet::contourtree_augmented::ContourTree currContourTree; + vtkm::worklet::contourtree_augmented::IdArrayType currSortOrder; + vtkm::worklet::ContourTreePPP2 worklet; + vtkm::cont::ArrayHandle currField; + // Construct the contour tree mesh from the last block + vtkm::worklet::contourtree_augmented::ContourTreeMesh contourTreeMeshOut; + contourTreeMeshOut.nVertices = localDataBlocks[0]->nVertices; + contourTreeMeshOut.sortOrder = localDataBlocks[0]->sortOrder; + contourTreeMeshOut.sortedValues = localDataBlocks[0]->sortedValues; + contourTreeMeshOut.globalMeshIndex = localDataBlocks[0]->globalMeshIndex; + contourTreeMeshOut.neighbours = localDataBlocks[0]->neighbours; + contourTreeMeshOut.firstNeighbour = localDataBlocks[0]->firstNeighbour; + contourTreeMeshOut.maxNeighbours = localDataBlocks[0]->maxNeighbours; + //auto fieldPermutted = vtkm::cont::make_ArrayHandlePermutation(contourTreeMeshOut.sortOrder, contourTreeMeshOut.sortedValues); + //vtkm::cont::Algorithm::Copy(fieldPermutted, currField); + // Run the worklet to compute the final contour tree + worklet.Run( + contourTreeMeshOut + .sortedValues, // This array is not really used we just need to provide something to keep the API happy currField, + contourTreeMeshOut, + currTimings, + this->ContourTreeData, + this->MeshSortOrder, + currNumIterations, + this + ->ComputeRegularStructure // true // TODO eventually replace with boundary augmentation but for now compute the full regular structure + ); + this->MeshSortOrder = + contourTreeMeshOut.globalMeshIndex; // Set the final mesh sort order we need to use + this->NumIterations = currNumIterations; // Remeber the number of iterations for the output + + // Return the sorted values of the contour tree as the result + // TODO the result we return for the parallel and serial case are different right now. This should be made consistent. However, only in the parallel case are we useing the result output + vtkm::cont::DataSet temp; + vtkm::cont::Field rfield(this->GetOutputFieldName(), + vtkm::cont::Field::Association::WHOLE_MESH, + contourTreeMeshOut.sortedValues); + temp.AddField(rfield); + output = vtkm::cont::PartitionedDataSet(temp); + } + else + { + this->ContourTreeData = MultiBlockTreeHelper->mLocalContourTrees[0]; + this->MeshSortOrder = MultiBlockTreeHelper->mLocalSortOrders[0]; + + // Free allocated temporary pointers + for (std::size_t bi = 0; bi < static_cast(input.GetNumberOfPartitions()); bi++) + { + delete localContourTreeMeshes[bi]; + delete localDataBlocks[bi]; + // delete localLinks[bi]; + } + } + localContourTreeMeshes.clear(); + localDataBlocks.clear(); + localLinks.clear(); +} + + +//----------------------------------------------------------------------------- +template +inline VTKM_CONT void ContourTreePPP2::PostExecute( + const vtkm::cont::PartitionedDataSet& input, + vtkm::cont::PartitionedDataSet& result, + const vtkm::filter::PolicyBase& policy) +{ + if (this->MultiBlockTreeHelper) + { + // We are running in parallel and need to merge the contour tree in PostExecute + if (MultiBlockTreeHelper->getGlobalNumberOfBlocks() == 1) + { + return; + } + auto field = + input.GetPartition(0).GetField(this->GetActiveFieldName(), this->GetActiveFieldAssociation()); + vtkm::filter::FieldMetadata metaData(field); + + vtkm::filter::FilterTraits traits; + vtkm::cont::CastAndCall(vtkm::filter::ApplyPolicyFieldActive(field, policy, traits), + detail::PostExecuteCaller{}, + this, + input, + result, + metaData, + policy); + + delete this->MultiBlockTreeHelper; + this->MultiBlockTreeHelper = nullptr; + } +} + + } // namespace filter } // namespace vtkm::filter diff --git a/vtkm/thirdparty/diy/diy.h b/vtkm/thirdparty/diy/diy.h index c305e53c4..a0737eb37 100644 --- a/vtkm/thirdparty/diy/diy.h +++ b/vtkm/thirdparty/diy/diy.h @@ -34,6 +34,7 @@ VTKM_THIRDPARTY_PRE_INCLUDE #include VTKM_DIY_INCLUDE(partners/swap.hpp) #include VTKM_DIY_INCLUDE(reduce.hpp) #include VTKM_DIY_INCLUDE(reduce-operations.hpp) +#include VTKM_DIY_INCLUDE(resolve.hpp) #include VTKM_DIY_INCLUDE(serialization.hpp) #undef VTKM_DIY_INCLUDE VTKM_THIRDPARTY_POST_INCLUDE diff --git a/vtkm/worklet/ContourTreeUniformAugmented.h b/vtkm/worklet/ContourTreeUniformAugmented.h index bf9c9d0c7..aed702129 100644 --- a/vtkm/worklet/ContourTreeUniformAugmented.h +++ b/vtkm/worklet/ContourTreeUniformAugmented.h @@ -75,6 +75,7 @@ #include #include #include +#include namespace vtkm { @@ -84,6 +85,30 @@ namespace worklet class ContourTreePPP2 { public: + /*! + * Run the contour tree to merge an existing set of contour trees + */ + template + void Run(const vtkm::cont::ArrayHandle + fieldArray, // TODO: We really should not need this + contourtree_augmented::ContourTreeMesh& mesh, + std::vector>& timings, + contourtree_augmented::ContourTree& contourTree, + contourtree_augmented::IdArrayType sortOrder, + vtkm::Id& nIterations, + bool computeRegularStructure = true) + { + RunContourTree( + fieldArray, // Just a place-holder to fill the required field. Used when calling SortData on the contour tree which is a no-op + timings, + contourTree, + sortOrder, + nIterations, + mesh, + computeRegularStructure); + return; + } + /*! * Run the contour tree analysis. This helper function is used to * allow one to run the contour tree in a consistent fashion independent @@ -109,16 +134,9 @@ public: // Build the mesh and fill in the values Mesh_DEM_Triangulation_2D_Freudenthal mesh(nRows, nCols); // Run the contour tree on the mesh - return RunContourTree(fieldArray, - timings, - contourTree, - sortOrder, - nIterations, - nRows, - nCols, - 1, - mesh, - computeRegularStructure); + RunContourTree( + fieldArray, timings, contourTree, sortOrder, nIterations, mesh, computeRegularStructure); + return; } // 3D Contour Tree using marching cubes else if (useMarchingCubes) @@ -126,16 +144,9 @@ public: // Build the mesh and fill in the values Mesh_DEM_Triangulation_3D_MarchingCubes mesh(nRows, nCols, nSlices); // Run the contour tree on the mesh - return RunContourTree(fieldArray, - timings, - contourTree, - sortOrder, - nIterations, - nRows, - nCols, - nSlices, - mesh, - computeRegularStructure); + RunContourTree( + fieldArray, timings, contourTree, sortOrder, nIterations, mesh, computeRegularStructure); + return; } // 3D Contour Tree with Freudenthal else @@ -143,16 +154,9 @@ public: // Build the mesh and fill in the values Mesh_DEM_Triangulation_3D_Freudenthal mesh(nRows, nCols, nSlices); // Run the contour tree on the mesh - return RunContourTree(fieldArray, - timings, - contourTree, - sortOrder, - nIterations, - nRows, - nCols, - nSlices, - mesh, - computeRegularStructure); + RunContourTree( + fieldArray, timings, contourTree, sortOrder, nIterations, mesh, computeRegularStructure); + return; } } @@ -169,9 +173,6 @@ private: contourtree_augmented::ContourTree& contourTree, contourtree_augmented::IdArrayType& sortOrder, vtkm::Id& nIterations, - const vtkm::Id /*nRows*/, // FIXME: Remove unused parameter? - const vtkm::Id /*nCols*/, // FIXME: Remove unused parameter? - const vtkm::Id /*nSlices*/, // FIXME: Remove unused parameter? MeshClass& mesh, bool computeRegularStructure) { @@ -205,7 +206,6 @@ private: #ifdef DEBUG_PRINT joinGraph.DebugPrint("Active Graph Instantiated", __FILE__, __LINE__); - joinGraph.DebugPrint("Active Graph Instantiated", __FILE__, __LINE__); #endif timer.Start(); diff --git a/vtkm/worklet/contourtree_augmented/ActiveGraph.h b/vtkm/worklet/contourtree_augmented/ActiveGraph.h index 6d4313908..49191d236 100644 --- a/vtkm/worklet/contourtree_augmented/ActiveGraph.h +++ b/vtkm/worklet/contourtree_augmented/ActiveGraph.h @@ -257,11 +257,12 @@ void ActiveGraph::Initialise(Mesh& mesh, const MeshExtrema& meshExtrema) // Initialize the nerighborhoodMasks and outDegrees arrays mesh.setPrepareForExecutionBehavior(isJoinGraph); + vtkm::cont::ArrayHandleIndex sortIndexArray(mesh.nVertices); active_graph_inc_ns::InitializeNeighbourhoodMasksAndOutDegrees initNeighMasksAndOutDegWorklet( isJoinGraph); this->Invoke(initNeighMasksAndOutDegWorklet, - mesh.sortIndices, + sortIndexArray, mesh, neighbourhoodMasks, // output outDegrees); // output @@ -302,14 +303,14 @@ void ActiveGraph::Initialise(Mesh& mesh, const MeshExtrema& meshExtrema) // activeIndex gets the next available ID in the active graph (was called nearIndex before) // globalIndex stores the index in the join tree for later access IdArrayType activeIndices; - activeIndices.Allocate(mesh.sortIndices.GetNumberOfValues()); - vtkm::cont::ArrayHandleConstant noSuchElementArray( - (vtkm::Id)NO_SUCH_ELEMENT, mesh.sortIndices.GetNumberOfValues()); + activeIndices.Allocate(mesh.nVertices); + vtkm::cont::ArrayHandleConstant noSuchElementArray((vtkm::Id)NO_SUCH_ELEMENT, + mesh.nVertices); vtkm::cont::Algorithm::Copy(noSuchElementArray, activeIndices); active_graph_inc_ns::InitializeActiveGraphVertices initActiveGraphVerticesWorklet; this->Invoke(initActiveGraphVerticesWorklet, - mesh.sortIndices, + sortIndexArray, outDegrees, inverseIndex, extrema, @@ -337,8 +338,6 @@ void ActiveGraph::Initialise(Mesh& mesh, const MeshExtrema& meshExtrema) active_graph_inc_ns::InitializeActiveEdges initActiveEdgesWorklet; this->Invoke(initActiveEdgesWorklet, outdegree, - mesh.sortOrder, - mesh.sortIndices, mesh, firstEdge, globalIndex, diff --git a/vtkm/worklet/contourtree_augmented/ArrayTransforms.h b/vtkm/worklet/contourtree_augmented/ArrayTransforms.h index 88e78a0ca..8714eae45 100644 --- a/vtkm/worklet/contourtree_augmented/ArrayTransforms.h +++ b/vtkm/worklet/contourtree_augmented/ArrayTransforms.h @@ -121,6 +121,25 @@ void permuteIndices(IdArrayType &input, IdArrayType &permute, IdArrayType &outpu {} */ +// tranform functor needed for ScanExclusive calculation. Return 0 if noSuchElement else 1 +struct oneIfArcValid +{ + VTKM_EXEC_CONT + oneIfArcValid() {} + + VTKM_EXEC_CONT + vtkm::Id operator()(vtkm::Id a) const { return noSuchElement(a) ? 0 : 1; } +}; + +// transform functor used in ContourTreeMesh to flag indicies as other when using the CombinedVectorClass +struct markOther +{ + VTKM_EXEC_CONT + markOther() {} + + VTKM_EXEC_CONT + vtkm::Id operator()(vtkm::Id idx) const { return idx | CV_OTHER_FLAG; } +}; // transform functor needed for ScanExclusive calculation. Return 1 if vertex is critical else 0. struct onefIfCritical diff --git a/vtkm/worklet/contourtree_augmented/ContourTree.h b/vtkm/worklet/contourtree_augmented/ContourTree.h index 90a2e23d6..104626c19 100644 --- a/vtkm/worklet/contourtree_augmented/ContourTree.h +++ b/vtkm/worklet/contourtree_augmented/ContourTree.h @@ -126,6 +126,10 @@ public: // stored as supernode indices IdArrayType superarcs; + // for boundary augmented contour tree (note: these use the same convention as supernodes/superarcs) + IdArrayType augmentnodes; + IdArrayType augmentarcs; + // vector of hyperarcs to which each supernode/arc belongs IdArrayType hyperparents; @@ -159,6 +163,9 @@ public: // debug routine inline void DebugPrint(const char* message, const char* fileName, long lineNum); + // print contents + inline void PrintContent() const; + // print routines //void PrintDotHyperStructure(); inline void PrintDotSuperStructure(); @@ -189,17 +196,8 @@ void ContourTree::Init(vtkm::Id dataSize) } // Init() -void ContourTree::DebugPrint(const char* message, const char* fileName, long lineNum) -{ // DebugPrint() -#ifdef DEBUG_PRINT - std::cout << "---------------------------" << std::endl; - std::cout << std::setw(30) << std::left << fileName << ":" << std::right << std::setw(4) - << lineNum << std::endl; - std::cout << std::left << std::string(message) << std::endl; - std::cout << "Contour Tree Contains: " << std::endl; - std::cout << "---------------------------" << std::endl; - std::cout << std::endl; - +inline void ContourTree::PrintContent() const +{ printHeader(arcs.GetNumberOfValues()); printIndices("Arcs", arcs); printIndices("Superparents", superparents); @@ -214,6 +212,20 @@ void ContourTree::DebugPrint(const char* message, const char* fileName, long lin printIndices("Hypernodes", hypernodes); printIndices("Hyperarcs", hyperarcs); std::cout << std::endl; +} + +void ContourTree::DebugPrint(const char* message, const char* fileName, long lineNum) +{ // DebugPrint() +#ifdef DEBUG_PRINT + std::cout << "---------------------------" << std::endl; + std::cout << std::setw(30) << std::left << fileName << ":" << std::right << std::setw(4) + << lineNum << std::endl; + std::cout << std::left << std::string(message) << std::endl; + std::cout << "Contour Tree Contains: " << std::endl; + std::cout << "---------------------------" << std::endl; + std::cout << std::endl; + + this->PrintContent(); #else // Avoid unused parameter warnings (void)message; diff --git a/vtkm/worklet/contourtree_augmented/ContourTreeMaker.h b/vtkm/worklet/contourtree_augmented/ContourTreeMaker.h index e201c720c..2831e9af2 100644 --- a/vtkm/worklet/contourtree_augmented/ContourTreeMaker.h +++ b/vtkm/worklet/contourtree_augmented/ContourTreeMaker.h @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -141,6 +142,9 @@ public: // computes the regular arcs in the contour tree void ComputeRegularStructure(MeshExtrema& meshExtrema); + template + void ComputeRegularStructure(MeshExtrema& meshExtrema, const Mesh& mesh); + // routine that augments the join & split tree with each other's supernodes // the augmented trees will be stored in the joinSuperarcs / mergeSuperarcs arrays // the sort IDs will be stored in the ContourTree's arrays, &c. @@ -296,15 +300,6 @@ void ContourTreeMaker::ComputeHyperAndSuperStructure() vtkm::cont::Algorithm::Copy(permutedWhenTransferred, contourTree.whenTransferred); // now we compress both the hypernodes & hyperarcs - // The following commented code block is variant ported directly from PPP2 using std::partial_sum. This has been replaced here with vtkm's ScanExclusive. - /*IdArrayType newHypernodePosition; - newHypernodePosition.Allocate(contourTree.whenTransferred.GetNumberOfValues()); - newHypernodePosition.GetPortalControl().Set(0, 0); - auto oneIfHypernode = [](vtkm::Id v) { return isHypernode(v) ? 1 : 0; }; - std::partial_sum( - boost::make_transform_iterator(vtkm::cont::ArrayPortalToIteratorBegin(contourTree.whenTransferred.GetPortalControl()), oneIfHypernode), - boost::make_transform_iterator(vtkm::cont::ArrayPortalToIteratorEnd(contourTree.whenTransferred.GetPortalControl()) - 1, oneIfHypernode), - vtkm::cont::ArrayPortalToIteratorBegin(newHypernodePosition.GetPortalControl()) + 1);*/ IdArrayType newHypernodePosition; onefIfHypernode oneIfHypernodeFunctor; auto oneIfHypernodeArrayHandle = vtkm::cont::ArrayHandleTransform( @@ -422,6 +417,112 @@ void ContourTreeMaker::ComputeRegularStructure(MeshExtrema& meshExtrema) DebugPrint("Regular Structure Computed", __FILE__, __LINE__); } // ComputeRegularStructure() +struct ContourTreeNoSuchElementSuperParents +{ + template + VTKM_EXEC_CONT bool operator()(const T& x) const + { + return (!noSuchElement(x)); + } +}; + +void InitIdArrayTypeNoSuchElement(IdArrayType& idArray, vtkm::Id size) +{ + idArray.Allocate(size); + + vtkm::cont::ArrayHandleConstant noSuchElementArray((vtkm::Id)NO_SUCH_ELEMENT, size); + vtkm::cont::Algorithm::Copy(noSuchElementArray, idArray); +} + +template +void ContourTreeMaker::ComputeRegularStructure(MeshExtrema& meshExtrema, const Mesh& mesh) +{ // ComputeRegularStructure() + + // First step - use the superstructure to set the superparent for all supernodes + auto supernodesIndex = vtkm::cont::ArrayHandleIndex(contourTree.supernodes.GetNumberOfValues()); + IdArrayType superparents; + InitIdArrayTypeNoSuchElement(superparents, mesh.GetNumberOfVertices()); + // superparents array permmuted by the supernodes array + auto permutedSuperparents = + vtkm::cont::make_ArrayHandlePermutation(contourTree.supernodes, superparents); + vtkm::cont::Algorithm::Copy(supernodesIndex, permutedSuperparents); + // The above copy is equivlant to + // for (indexType supernode = 0; supernode < contourTree.supernodes.size(); supernode++) + // superparents[contourTree.supernodes[supernode]] = supernode; + + // Second step - for all remaining (regular) nodes, locate the superarc to which they belong + contourtree_maker_inc_ns::ComputeRegularStructure_LocateSuperarcsOnBoundary + locateSuperarcsOnBoundaryWorklet(contourTree.hypernodes.GetNumberOfValues(), + contourTree.supernodes.GetNumberOfValues(), + mesh.nRows, + mesh.nCols, + mesh.nSlices, + mesh.nDims); + this->Invoke(locateSuperarcsOnBoundaryWorklet, + superparents, // (input/output) + contourTree.whenTransferred, // (input) + contourTree.hyperparents, // (input) + contourTree.hyperarcs, // (input) + contourTree.hypernodes, // (input) + contourTree.supernodes, // (input) + meshExtrema.peaks, // (input) + meshExtrema.pits, // (input) + mesh.sortOrder); // (input) + + // We have now set the superparent correctly for each node, and need to sort them to get the correct regular arcs + // DAVID "ContourTreeMaker.h" line 338 + IdArrayType node; + vtkm::cont::Algorithm::Copy(vtkm::cont::ArrayHandleIndex(superparents.GetNumberOfValues()), + contourTree.augmentnodes); + vtkm::cont::Algorithm::Copy(vtkm::cont::ArrayHandleIndex(superparents.GetNumberOfValues()), node); + vtkm::cont::Algorithm::CopyIf( + node, superparents, contourTree.augmentnodes, ContourTreeNoSuchElementSuperParents()); + + IdArrayType toCompressed; + InitIdArrayTypeNoSuchElement(toCompressed, superparents.GetNumberOfValues()); + vtkm::cont::Algorithm::Copy( + vtkm::cont::ArrayHandleIndex(contourTree.augmentnodes.GetNumberOfValues()), node); + auto permutedToCompressed = + vtkm::cont::make_ArrayHandlePermutation(contourTree.augmentnodes, // index array + toCompressed); // value array + vtkm::cont::Algorithm::Copy(node, // source value array + permutedToCompressed); // target array + + // Make superparents correspond to nodes + IdArrayType tmpsuperparents; + vtkm::cont::Algorithm::CopyIf( + superparents, superparents, tmpsuperparents, ContourTreeNoSuchElementSuperParents()); + vtkm::cont::Algorithm::Copy(tmpsuperparents, superparents); + //superparents.erase(std::remove(superparents.begin(), superparents.end(), NO_SUCH_ELEMENT), superparents.end()); + + // Create array for sorting + IdArrayType augmentnodes_sorted; + vtkm::cont::Algorithm::Copy( + vtkm::cont::ArrayHandleIndex(contourTree.augmentnodes.GetNumberOfValues()), + augmentnodes_sorted); + + // use a comparator to do the sort + vtkm::cont::Algorithm::Sort( + augmentnodes_sorted, + contourtree_maker_inc_ns::ContourTreeNodeComparator(superparents, contourTree.superarcs)); + + // now set the arcs based on the array + InitIdArrayTypeNoSuchElement(contourTree.augmentarcs, + contourTree.augmentnodes.GetNumberOfValues()); + contourtree_maker_inc_ns::ComputeRegularStructure_SetAugmentArcs setAugmentArcsWorklet( + contourTree.augmentarcs.GetNumberOfValues()); + this->Invoke(setAugmentArcsWorklet, + augmentnodes_sorted, // (input) arcSorter array + superparents, // (input) + contourTree.superarcs, // (input) + contourTree.supernodes, // (input) + toCompressed, // (input) + contourTree.augmentarcs); // (output) + + DebugPrint("Regular Structure Computed", __FILE__, __LINE__); +} // ComputeRegularStructure() + + // routine that augments the join & split tree with each other's supernodes // the augmented trees will be stored in the joinSuperarcs / mergeSuperarcs arrays diff --git a/vtkm/worklet/contourtree_augmented/MergeTree.h b/vtkm/worklet/contourtree_augmented/MergeTree.h index 1e697c805..956b9bebb 100644 --- a/vtkm/worklet/contourtree_augmented/MergeTree.h +++ b/vtkm/worklet/contourtree_augmented/MergeTree.h @@ -59,6 +59,8 @@ // local includes #include #include +#include + //VTKM includes #include @@ -123,7 +125,13 @@ public: // debug routine void DebugPrint(const char* message, const char* fileName, long lineNum); - // debug routine for printing the tree + // debug routine for printing the tree for contourtree meshes + template + void DebugPrintTree(const char* message, + const char* fileName, + long lineNum, + const ContourTreeMesh& mesh); + // debug routine for printing the tree for regular meshes template void DebugPrintTree(const char* message, const char* fileName, @@ -191,6 +199,21 @@ inline void MergeTree::DebugPrint(const char* message, const char* fileName, lon } // DebugPrint() +template +inline void MergeTree::DebugPrintTree(const char* message, + const char* fileName, + long lineNum, + const ContourTreeMesh& mesh) +{ + (void)mesh; // prevent unused parameter warning + std::cout << std::setw(30) << std::left << fileName << ":" << std::right << std::setw(4) + << lineNum << std::endl; + std::cout << std::left << std::string(message) << std::endl; + std::cout << "MergeTree::DebugPrintTree not implemented for ContourTreeMesh" << std::endl; +} + + + template inline void MergeTree::DebugPrintTree(const char* message, const char* fileName, diff --git a/vtkm/worklet/contourtree_augmented/MeshExtrema.h b/vtkm/worklet/contourtree_augmented/MeshExtrema.h index 0a483330d..ea00421c7 100644 --- a/vtkm/worklet/contourtree_augmented/MeshExtrema.h +++ b/vtkm/worklet/contourtree_augmented/MeshExtrema.h @@ -134,7 +134,7 @@ inline void MeshExtrema::BuildRegularChains(bool isMaximal) IdArrayType& extrema = isMaximal ? peaks : pits; // Create the PointerDoubling worklet and corresponding dispatcher - PointerDoubling pointerDoubler; + vtkm::worklet::contourtree_augmented::PointerDoubling pointerDoubler; // Iterate to perform pointer-doubling to build chains to extrema (i.e., maxima or minima) // depending on whether we are computing a JoinTree or a SplitTree @@ -152,13 +152,14 @@ inline void MeshExtrema::SetStarts(MeshType& mesh, bool isMaximal) { mesh.setPrepareForExecutionBehavior(isMaximal); mesh_extrema_inc_ns::SetStarts setStartsWorklet; + vtkm::cont::ArrayHandleIndex sortIndexArray(mesh.nVertices); if (isMaximal) { - this->Invoke(setStartsWorklet, mesh.sortIndices, mesh, peaks); + this->Invoke(setStartsWorklet, sortIndexArray, mesh, peaks); } else { - this->Invoke(setStartsWorklet, mesh.sortIndices, mesh, pits); + this->Invoke(setStartsWorklet, sortIndexArray, mesh, pits); } DebugPrint("Regular Starts Set", __FILE__, __LINE__); } diff --git a/vtkm/worklet/contourtree_augmented/Mesh_DEM_Triangulation.h b/vtkm/worklet/contourtree_augmented/Mesh_DEM_Triangulation.h index c3f4e157c..a6f83fa09 100644 --- a/vtkm/worklet/contourtree_augmented/Mesh_DEM_Triangulation.h +++ b/vtkm/worklet/contourtree_augmented/Mesh_DEM_Triangulation.h @@ -82,6 +82,7 @@ #include #include +#include #include #include @@ -121,6 +122,9 @@ public: { } + // Getter function for nVertices + vtkm::Id GetNumberOfVertices() const { return nVertices; } + // sorts the data and initializes the sortIndex & indexReverse void SortData(const vtkm::cont::ArrayHandle& values); diff --git a/vtkm/worklet/contourtree_augmented/PointerDoubling.h b/vtkm/worklet/contourtree_augmented/PointerDoubling.h index e4cf840cb..e0428adf0 100644 --- a/vtkm/worklet/contourtree_augmented/PointerDoubling.h +++ b/vtkm/worklet/contourtree_augmented/PointerDoubling.h @@ -50,9 +50,8 @@ // Oliver Ruebel (LBNL) //============================================================================== - -#ifndef vtkm_worklet_contourtree_chain_doubler_h -#define vtkm_worklet_contourtree_chain_doubler_h +#ifndef vtkm_worklet_contourtree_augmented_pointer_doubling_h +#define vtkm_worklet_contourtree_augmented_pointer_doubling_h #include #include @@ -94,8 +93,9 @@ public: } // else, if the vertex is terminal then do nothing } }; // PointerDoubling -} -} -} -#endif +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif // vtkm_worklet_contourtree_augmented_pointer_doubling_h diff --git a/vtkm/worklet/contourtree_augmented/PrintVectors.h b/vtkm/worklet/contourtree_augmented/PrintVectors.h index dc2aeaa02..b2f274e29 100644 --- a/vtkm/worklet/contourtree_augmented/PrintVectors.h +++ b/vtkm/worklet/contourtree_augmented/PrintVectors.h @@ -217,7 +217,9 @@ inline void printSortedValues(std::string label, } // printValues() -inline void printIndices(std::string label, IdArrayType& iVec, vtkm::Id nIndices = -1) +inline void printIndices(std::string label, + const vtkm::cont::ArrayHandle& iVec, + vtkm::Id nIndices) { // printIndices() // -1 means full size if (nIndices == -1) diff --git a/vtkm/worklet/contourtree_augmented/ProcessContourTree.h b/vtkm/worklet/contourtree_augmented/ProcessContourTree.h index a3b0ba28e..1419e4fb7 100644 --- a/vtkm/worklet/contourtree_augmented/ProcessContourTree.h +++ b/vtkm/worklet/contourtree_augmented/ProcessContourTree.h @@ -137,7 +137,8 @@ public: const IdArrayType& branchSaddle, const IdArrayType& branchParent, const IdArrayType& sortOrder, - const vtkm::cont::ArrayHandle& dataField); + const vtkm::cont::ArrayHandle& dataField, + bool dataFieldIsSorted); }; // class ProcessContourTree @@ -160,7 +161,8 @@ process_contourtree_inc_ns::Branch* ProcessContourTree::ComputeBranchDecompos const IdArrayType& branchSaddle, const IdArrayType& branchParent, const IdArrayType& sortOrder, - const vtkm::cont::ArrayHandle& dataField) + const vtkm::cont::ArrayHandle& dataField, + bool dataFieldIsSorted) { return process_contourtree_inc_ns::Branch::ComputeBranchDecomposition(contourTreeSuperparents, contourTreeSupernodes, @@ -170,7 +172,8 @@ process_contourtree_inc_ns::Branch* ProcessContourTree::ComputeBranchDecompos branchSaddle, branchParent, sortOrder, - dataField); + dataField, + dataFieldIsSorted); } diff --git a/vtkm/worklet/contourtree_augmented/Types.h b/vtkm/worklet/contourtree_augmented/Types.h index ec954cb26..92318303a 100644 --- a/vtkm/worklet/contourtree_augmented/Types.h +++ b/vtkm/worklet/contourtree_augmented/Types.h @@ -117,6 +117,13 @@ inline vtkm::Id maskedIndex(vtkm::Id flaggedIndex) return (flaggedIndex & INDEX_MASK); } // maskedIndex() +// Used in the context of CombinedVector class used in ContourTreeMesh to merge the mesh of contour trees +VTKM_EXEC_CONT +inline bool isThis(vtkm::Id flaggedIndex) +{ // isThis + return ((flaggedIndex & CV_OTHER_FLAG) == 0); +} // isThis + template struct MaskedIndexFunctor { diff --git a/vtkm/worklet/contourtree_augmented/activegraph/InitializeActiveEdges.h b/vtkm/worklet/contourtree_augmented/activegraph/InitializeActiveEdges.h index eeee31bcf..4fe623d59 100644 --- a/vtkm/worklet/contourtree_augmented/activegraph/InitializeActiveEdges.h +++ b/vtkm/worklet/contourtree_augmented/activegraph/InitializeActiveEdges.h @@ -78,17 +78,16 @@ public: typedef void ControlSignature( FieldIn outdegree, // (input) outdegree - WholeArrayIn sortOrder, // (input) sort order - WholeArrayIn sortIndices, // (input) sort indices ExecObject meshStructure, // (input) execution object with the mesh structure FieldIn firstEdge, // (input) - FieldIn globalIndex, // (input) + FieldIn globalIndex, // (input) ActiveGraph.globalIndex WholeArrayIn extrema, // (input) WholeArrayIn neighbourhoodMasks, // (input) WholeArrayOut edgeNear, // (output) edgeNear WholeArrayOut edgeFar, // (output) edgeFar WholeArrayOut activeEdges); // (output) activeEdges - typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11); + typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6, _7, _8, _9); + using InputDomain = _1; // Default Constructor @@ -98,11 +97,9 @@ public: template VTKM_EXEC void operator()(const vtkm::Id& outDegree, const vtkm::Id activeIndex, - const InFieldPortalType& sortOrder, - const InFieldPortalType& sortIndices, const MeshStructureType& meshStructure, const vtkm::Id& firstEdgeIndex, - const vtkm::Id& sortIndex, + const vtkm::Id& sortIndex, // = globalIndex.Get(activeIndex) const InFieldPortalType& extrema, const InFieldPortalType& neighbourhoodMasks, const OutFieldPortalType& edgeNear, @@ -111,16 +108,13 @@ public: { if (outDegree != 0) { - vtkm::Id meshIndex = sortOrder.Get(sortIndex); - // temporary array for storing edges vtkm::Id neigbourComponents[MeshClassType::MAX_OUTDEGREE]; int currNbrNo = 0; for (vtkm::Id nbrNo = 0; nbrNo < meshStructure.GetMaxNumberOfNeighbours(); ++nbrNo) if (neighbourhoodMasks.Get(sortIndex) & (static_cast(1) << nbrNo)) { - neigbourComponents[currNbrNo++] = - sortIndices.Get(meshStructure.GetNeighbourIndex(meshIndex, nbrNo)); + neigbourComponents[currNbrNo++] = meshStructure.GetNeighbourIndex(sortIndex, nbrNo); } @@ -155,7 +149,6 @@ public: if (outDegree != 0) { indexType sortIndex = globalIndex[activeIndex]; - indexType meshIndex = mesh.sortOrder[sortIndex]; // temporary array for storing edges indexType neigbourComponents[Mesh::MAX_OUTDEGREE]; @@ -163,7 +156,7 @@ public: for (vtkm::Int32 nbrNo = 0; nbrNo < mesh.GetMaxNumberOfNeighbours(); ++nbrNo) if (neighbourhoodMasks[sortIndex] & 1 << nbrNo) { - neigbourComponents[currNbrNo++] = mesh.sortIndices[mesh.GetNeighbourIndex(meshIndex, nbrNo)]; + neigbourComponents[currNbrNo++] = mesh.GetNeighbourIndex(sortIndex, nbrNo); } diff --git a/vtkm/worklet/contourtree_augmented/activegraph/InitializeNeighbourhoodMasksAndOutDegrees.h b/vtkm/worklet/contourtree_augmented/activegraph/InitializeNeighbourhoodMasksAndOutDegrees.h index 98e8dc556..2e7ead695 100644 --- a/vtkm/worklet/contourtree_augmented/activegraph/InitializeNeighbourhoodMasksAndOutDegrees.h +++ b/vtkm/worklet/contourtree_augmented/activegraph/InitializeNeighbourhoodMasksAndOutDegrees.h @@ -74,7 +74,7 @@ public: ExecObject meshStructure, // (input) execution object with the mesh structure WholeArrayOut neighbourhoodMasks, // (output) neighbourhoodMask for each vertex WholeArrayOut outDegrees); // (outpur) outDegress for each vertex - typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4); + typedef void ExecutionSignature(_1, _2, _3, _4); using InputDomain = _1; // Default Constructor @@ -93,22 +93,20 @@ public: template VTKM_EXEC void operator()(const vtkm::Id& sortIndex, - const vtkm::Id vertexIndex, const MeshStructureType& meshStructure, const OutFieldPortalType& neighbourhoodMasksPortal, const OutFieldPortalType& outDegreesPortal) const { const vtkm::Pair& maskAndDegree = - meshStructure.GetNeighbourComponentsMaskAndDegree(vertexIndex, isJoinGraph); + meshStructure.GetNeighbourComponentsMaskAndDegree(sortIndex, isJoinGraph); neighbourhoodMasksPortal.Set(sortIndex, maskAndDegree.first); outDegreesPortal.Set(sortIndex, maskAndDegree.second); // In serial this worklet implements the following operation - // for (indexType vertex = 0; vertex < mesh.sortIndices.GetNumberOfValues(); ++vertex) - // { - // indexType sortIndex = mesh.sortIndices[vertex]; - // std::tie(neighbourhoodMasks[sortIndex], outDegrees[sortIndex]) = mesh.GetNeighbourComponentsMaskAndDegree(vertex, isJoinGraph); - // } + // for (indexType sortIndex = 0; sortIndex < mesh.GetNumberOfVertices(); ++sortIndex) + // { + // std::tie(neighbourhoodMasks[sortIndex], outDegrees[sortIndex]) = mesh.GetNeighbourComponentsMaskAndDegree(sortIndex, isJoinGraph); + // } } private: diff --git a/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_LocateSuperarcs.h b/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_LocateSuperarcs.h index a2256411c..1849e73a3 100644 --- a/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_LocateSuperarcs.h +++ b/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_LocateSuperarcs.h @@ -398,6 +398,384 @@ public: }; // ComputeRegularStructure_LocateSuperarcs.h +// Worklet for the second step of template void ContourTreeMaker::ComputeRegularStructure --- +// for all remaining (regular) nodes on the boundary, locate the superarc to which they belong +class ComputeRegularStructure_LocateSuperarcsOnBoundary : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(WholeArrayInOut contourTreeSuperparents, // (input/output) + WholeArrayIn contourTreeWhenTransferred, // (input) + WholeArrayIn contourTreeHyperparents, // (input) + WholeArrayIn contourTreeHyperarcs, // (input) + WholeArrayIn contourTreeHypernodes, // (input) + WholeArrayIn contourTreeSupernodes, // (input) + WholeArrayIn meshExtremaPeaks, // (input) + WholeArrayIn meshExtremaPits, // (input) + FieldIn meshSortOrderPortal); // (input) + + typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6, _7, _8, _9); + using InputDomain = _1; + + vtkm::Id numHypernodes; // contourTree.hypernodes.GetNumberOfValues() + vtkm::Id numSupernodes; // contourTree.supernodes.GetNumberOfValues() + + vtkm::Id nRows, nCols, nSlices; // Mesh 2D or 3D - nRows, nCols, nSlices + vtkm::Id nDims; // Mesh 2D or 3D - 2 or 3 + + // Default Constructor + VTKM_EXEC_CONT + ComputeRegularStructure_LocateSuperarcsOnBoundary(vtkm::Id NumHypernodes, + vtkm::Id NumSupernodes, + vtkm::Id _nRows, + vtkm::Id _nCols, + vtkm::Id _nSlices, + vtkm::Id _nDims) + : numHypernodes(NumHypernodes) + , numSupernodes(NumSupernodes) + , nRows(_nRows) + , nCols(_nCols) + , nSlices(_nSlices) + , nDims(_nDims) + { + } + + vtkm::Id vertexRow(const vtkm::Id v) const { return (v % (nRows * nCols)) / nCols; } + vtkm::Id vertexColumn(const vtkm::Id v) const { return v % nCols; } + vtkm::Id vertexSlice(const vtkm::Id v) const { return v / (nRows * nCols); } + + bool liesOnBoundary(const vtkm::Id meshSortOrderValue) const + { + if (nDims == 2) + return liesOnBoundary2(meshSortOrderValue); + else + return liesOnBoundary3(meshSortOrderValue); + } + + bool liesOnBoundary2(const vtkm::Id meshSortOrderValue) const + { + const vtkm::Id v = meshSortOrderValue; + const vtkm::Id row = vertexRow(v); + const vtkm::Id col = vertexColumn(v); + return (row == 0) || (col == 0) || (row == nRows - 1) || (col == nCols - 1); + } + + bool liesOnBoundary3(const vtkm::Id meshSortOrderValue) const + { + const vtkm::Id v = meshSortOrderValue; + const vtkm::Id row = vertexRow(v); + const vtkm::Id col = vertexColumn(v); + const vtkm::Id sli = vertexSlice(v); + return (row == 0) || (col == 0) || (sli == 0) || (row == nRows - 1) || (col == nCols - 1) || + (sli == nSlices - 1); + } + + template + VTKM_EXEC void operator()(const InOutFieldPortalType& contourTreeSuperparentsPortal, + const vtkm::Id node, + const InFieldPortalType& contourTreeWhenTransferredPortal, + const InFieldPortalType& contourTreeHyperparentsPortal, + const InFieldPortalType& contourTreeHyperarcsPortal, + const InFieldPortalType& contourTreeHypernodesPortal, + const InFieldPortalType& contourTreeSupernodesPortal, + const InFieldPortalType& meshExtremaPeaksPortal, + const InFieldPortalType& meshExtremaPitsPortal, + const vtkm::Id& meshSortOrderValue) const + { + // per node + // if the superparent is already set, it's a supernode, so skip it. + if (noSuchElement(contourTreeSuperparentsPortal.Get(node)) && + liesOnBoundary(meshSortOrderValue)) + { // regular nodes only + // we will need to prune top and bottom until one of them prunes past the node + vtkm::Id top = meshExtremaPeaksPortal.Get(node); + vtkm::Id bottom = meshExtremaPitsPortal.Get(node); + // these are the regular IDs of supernodes, so their superparents are already set + vtkm::Id topSuperparent = contourTreeSuperparentsPortal.Get(maskedIndex(top)); + vtkm::Id bottomSuperparent = contourTreeSuperparentsPortal.Get(maskedIndex(bottom)); + // and we can also find out when they transferred + vtkm::Id topWhen = contourTreeWhenTransferredPortal.Get(topSuperparent); + vtkm::Id bottomWhen = contourTreeWhenTransferredPortal.Get(bottomSuperparent); + // and their hyperparent + vtkm::Id topHyperparent = contourTreeHyperparentsPortal.Get(topSuperparent); + vtkm::Id bottomHyperparent = contourTreeHyperparentsPortal.Get(bottomSuperparent); + // our goal is to work out the true hyperparent of the node + vtkm::Id hyperparent = (vtkm::Id)NO_SUCH_ELEMENT; + + // now we loop until one of them goes past the vertex + // the invariant here is that the first direction to prune past the vertex prunes it + while (noSuchElement(hyperparent)) + { // loop to find pruner + // we test the one that prunes first + if (maskedIndex(topWhen) < maskedIndex(bottomWhen)) + { // top pruned first + // we prune down to the bottom of the hyperarc in either case, by updating the top superparent + topSuperparent = contourTreeHyperarcsPortal.Get(maskedIndex(topHyperparent)); + top = contourTreeSupernodesPortal.Get(maskedIndex(topSuperparent)); + + topWhen = contourTreeWhenTransferredPortal.Get(maskedIndex(topSuperparent)); + // test to see if we've passed the node + if (top < node) + { // just pruned past + hyperparent = topHyperparent; + } // just pruned past + // == is not possible, since node is regular + else // top < node + { // not pruned past + topHyperparent = contourTreeHyperparentsPortal.Get(maskedIndex(topSuperparent)); + } // not pruned past + } // top pruned first + else if (maskedIndex(topWhen) > maskedIndex(bottomWhen)) + { // bottom pruned first + // we prune up to the top of the hyperarc in either case, by updating the bottom superparent + bottomSuperparent = contourTreeHyperarcsPortal.Get(maskedIndex(bottomHyperparent)); + bottom = contourTreeSupernodesPortal.Get(maskedIndex(bottomSuperparent)); + bottomWhen = contourTreeWhenTransferredPortal.Get(maskedIndex(bottomSuperparent)); + // test to see if we've passed the node + if (bottom > node) + { // just pruned past + hyperparent = bottomHyperparent; + } // just pruned past + // == is not possible, since node is regular + else // bottom > node + { // not pruned past + bottomHyperparent = contourTreeHyperparentsPortal.Get(maskedIndex(bottomSuperparent)); + } // not pruned past + } // bottom pruned first + else + { // both prune simultaneously + // this can happen when both top & bottom prune in the same pass because they belong to the same hyperarc + // but this means that they must have the same hyperparent, so we know the correct hyperparent & can check whether it ascends + hyperparent = bottomHyperparent; + } // both prune simultaneously + } // loop to find pruner + // we have now set the hyperparent correctly, so we retrieve it's hyperarc to find whether it ascends or descends + if (isAscending(contourTreeHyperarcsPortal.Get(hyperparent))) + { // ascending hyperarc + // the supernodes on the hyperarc are in sorted low-high order + vtkm::Id lowSupernode = contourTreeHypernodesPortal.Get(hyperparent); + vtkm::Id highSupernode; + // if it's at the right hand end, take the last supernode in the array + if (maskedIndex(hyperparent) == numHypernodes - 1) + highSupernode = numSupernodes - 1; + // otherwise, take the supernode just before the next hypernode + else + highSupernode = contourTreeHypernodesPortal.Get(maskedIndex(hyperparent) + 1) - 1; + // now, the high supernode may be lower than the element, because the node belongs + // between it and the high end of the hyperarc + if (contourTreeSupernodesPortal.Get(highSupernode) < node) + contourTreeSuperparentsPortal.Set(node, highSupernode); + // otherwise, we do a binary search of the superarcs + else + { // node between high & low + // keep going until we span exactly + while (highSupernode - lowSupernode > 1) + { // binary search + // find the midway supernode + vtkm::Id midSupernode = (lowSupernode + highSupernode) / 2; + // test against the node + if (contourTreeSupernodesPortal.Get(midSupernode) > node) + highSupernode = midSupernode; + // == can't happen since node is regular + else + lowSupernode = midSupernode; + } // binary search + + // now we can use the low node as the superparent + contourTreeSuperparentsPortal.Set(node, lowSupernode); + } // node between high & low + } // ascending hyperarc + else + { // descending hyperarc + // the supernodes on the hyperarc are in sorted high-low order + vtkm::Id highSupernode = contourTreeHypernodesPortal.Get(hyperparent); + vtkm::Id lowSupernode; + // if it's at the right hand end, take the last supernode in the array + if (maskedIndex(hyperparent) == numHypernodes - 1) + { // last hyperarc + lowSupernode = numSupernodes - 1; + } // last hyperarc + // otherwise, take the supernode just before the next hypernode + else + { // other hyperarc + lowSupernode = contourTreeHypernodesPortal.Get(maskedIndex(hyperparent) + 1) - 1; + } // other hyperarc + // now, the low supernode may be higher than the element, because the node belongs + // between it and the low end of the hyperarc + if (contourTreeSupernodesPortal.Get(lowSupernode) > node) + contourTreeSuperparentsPortal.Set(node, lowSupernode); + // otherwise, we do a binary search of the superarcs + else + { // node between low & high + // keep going until we span exactly + while (lowSupernode - highSupernode > 1) + { // binary search + // find the midway supernode + vtkm::Id midSupernode = (highSupernode + lowSupernode) / 2; + // test against the node + if (contourTreeSupernodesPortal.Get(midSupernode) > node) + highSupernode = midSupernode; + // == can't happen since node is regular + else + lowSupernode = midSupernode; + } // binary search + // now we can use the high node as the superparent + contourTreeSuperparentsPortal.Set(node, highSupernode); + } // node between low & high + } // descending hyperarc + } // regular nodes only + /* + // In serial this worklet implements the following operation + for (indexType node = 0; node < contourTree.arcs.size(); node++) + { // per node + // if the superparent is already set, it's a supernode, so skip it. + if (noSuchElement(contourTree.superparents[node]) && mesh.liesOnBoundary(node)) + { // regular nodes only + // we will need to prune top and bottom until one of them prunes past the node + indexType top = meshExtrema.peaks[node]; + indexType bottom = meshExtrema.pits[node]; + // these are the regular IDs of supernodes, so their superparents are already set + indexType topSuperparent = contourTree.superparents[top]; + indexType bottomSuperparent = contourTree.superparents[bottom]; + // and we can also find out when they transferred + indexType topWhen = contourTree.whenTransferred[topSuperparent]; + indexType bottomWhen = contourTree.whenTransferred[bottomSuperparent]; + // and their hyperparent + indexType topHyperparent = contourTree.hyperparents[topSuperparent]; + indexType bottomHyperparent = contourTree.hyperparents[bottomSuperparent]; + + // our goal is to work out the true hyperparent of the node + indexType hyperparent = NO_SUCH_ELEMENT; + + // now we loop until one of them goes past the vertex + // the invariant here is that the first direction to prune past the vertex prunes it + while (noSuchElement(hyperparent)) + { // loop to find pruner + + // we test the one that prunes first + if (maskedIndex(topWhen) < maskedIndex(bottomWhen)) + { // top pruned first + // we prune down to the bottom of the hyperarc in either case, by updating the top superparent + topSuperparent = contourTree.hyperarcs[maskedIndex(topHyperparent)]; + top = contourTree.supernodes[maskedIndex(topSuperparent)]; + + topWhen = contourTree.whenTransferred[maskedIndex(topSuperparent)]; + // test to see if we've passed the node + if (top < node) + { // just pruned past + hyperparent = topHyperparent; + } // just pruned past + // == is not possible, since node is regular + else // top < node + { // not pruned past + topHyperparent = contourTree.hyperparents[maskedIndex(topSuperparent)]; + } // not pruned past + } // top pruned first + else if (maskedIndex(topWhen) > maskedIndex(bottomWhen)) + { // bottom pruned first + // we prune up to the top of the hyperarc in either case, by updating the bottom superparent + bottomSuperparent = contourTree.hyperarcs[maskedIndex(bottomHyperparent)]; + bottom = contourTree.supernodes[maskedIndex(bottomSuperparent)]; + bottomWhen = contourTree.whenTransferred[maskedIndex(bottomSuperparent)]; + // test to see if we've passed the node + if (bottom > node) + { // just pruned past + hyperparent = bottomHyperparent; + } // just pruned past + // == is not possible, since node is regular + else // bottom > node + { // not pruned past + bottomHyperparent = contourTree.hyperparents[maskedIndex(bottomSuperparent)]; + } // not pruned past + } // bottom pruned first + else + { // both prune simultaneously + // this can happen when both top & bottom prune in the same pass because they belong to the same hyperarc + // but this means that they must have the same hyperparent, so we know the correct hyperparent & can check whether it ascends + hyperparent = bottomHyperparent; + } // both prune simultaneously + } // loop to find pruner + + + // we have now set the hyperparent correctly, so we retrieve it's hyperarc to find whether it ascends or descends + if (isAscending(contourTree.hyperarcs[hyperparent])) + { // ascending hyperarc + // the supernodes on the hyperarc are in sorted low-high order + indexType lowSupernode = contourTree.hypernodes[hyperparent]; + indexType highSupernode; + // if it's at the right hand end, take the last supernode in the array + if (maskedIndex(hyperparent) == contourTree.hypernodes.size() - 1) + highSupernode = contourTree.supernodes.size() - 1; + // otherwise, take the supernode just before the next hypernode + else + highSupernode = contourTree.hypernodes[maskedIndex(hyperparent) + 1] - 1; + // now, the high supernode may be lower than the element, because the node belongs + // between it and the high end of the hyperarc + if (contourTree.supernodes[highSupernode] < node) + contourTree.superparents[node] = highSupernode; + // otherwise, we do a binary search of the superarcs + else + { // node between high & low + // keep going until we span exactly + while (highSupernode - lowSupernode > 1) + { // binary search + // find the midway supernode + indexType midSupernode = (lowSupernode + highSupernode) / 2; + // test against the node + if (contourTree.supernodes[midSupernode] > node) + highSupernode = midSupernode; + // == can't happen since node is regular + else + lowSupernode = midSupernode; + } // binary search + // now we can use the low node as the superparent + contourTree.superparents[node] = lowSupernode; + } // node between high & low + } // ascending hyperarc + else + { // descending hyperarc + // the supernodes on the hyperarc are in sorted high-low order + indexType highSupernode = contourTree.hypernodes[hyperparent]; + indexType lowSupernode; + // if it's at the right hand end, take the last supernode in the array + if (maskedIndex(hyperparent) == contourTree.hypernodes.size() - 1) + { // last hyperarc + lowSupernode = contourTree.supernodes.size() - 1; + } // last hyperarc + // otherwise, take the supernode just before the next hypernode + else + { // other hyperarc + lowSupernode = contourTree.hypernodes[maskedIndex(hyperparent) + 1] - 1; + } // other hyperarc + // now, the low supernode may be higher than the element, because the node belongs + // between it and the low end of the hyperarc + if (contourTree.supernodes[lowSupernode] > node) + contourTree.superparents[node] = lowSupernode; + // otherwise, we do a binary search of the superarcs + else + { // node between low & high + // keep going until we span exactly + while (lowSupernode - highSupernode > 1) + { // binary search + // find the midway supernode + indexType midSupernode = (highSupernode + lowSupernode) / 2; + // test against the node + if (contourTree.supernodes[midSupernode] > node) + highSupernode = midSupernode; + // == can't happen since node is regular + else + lowSupernode = midSupernode; + } // binary search + // now we can use the high node as the superparent + contourTree.superparents[node] = highSupernode; + } // node between low & high + } // descending hyperarc + } // regular nodes only + } // per node + */ + } + +}; // ComputeRegularStructure_LocateSuperarcsOnBoundary.h + } // namespace contourtree_maker_inc } // namespace contourtree_augmented } // namespace worklet diff --git a/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_SetArcs.h b/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_SetArcs.h index 15e9f28ad..b14daea75 100644 --- a/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_SetArcs.h +++ b/vtkm/worklet/contourtree_augmented/contourtreemaker/ComputeRegularStructure_SetArcs.h @@ -176,6 +176,85 @@ public: }; // ComputeRegularStructure_SetArcs +// Worklet for setting the arcs of the contour tree based on the sorted augmented nodes +class ComputeRegularStructure_SetAugmentArcs : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(WholeArrayIn arcSorter, // (input) arcSorter array + WholeArrayIn contourTreeSuperparents, // (input) + WholeArrayIn contourTreeSuperarcs, // (input) + WholeArrayIn contourTreeSupernodes, // (input) + WholeArrayIn toCompressed, // (input) + WholeArrayOut contourTreeArcs); // (output) + typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6); + using InputDomain = _1; + + vtkm::Id numArcs; // contourTree.arcs.GetNumberOfValues() + + // Default Constructor + VTKM_EXEC_CONT + ComputeRegularStructure_SetAugmentArcs(vtkm::Id NumArcs) + : numArcs(NumArcs) + { + } + + template + VTKM_EXEC void operator()(const InFieldPortalType& arcSorterPortal, + const vtkm::Id sortedNode, + const InFieldPortalType& contourTreeSuperparentsPortal, + const InFieldPortalType& contourTreeSuperarcsPortal, + const InFieldPortalType& contourTreeSupernodesPortal, + const InFieldPortalType& toCompressedPortal, + const OutFieldPortalType& contourTreeArcsPortal) const + { + // per node + // convert arcSorter to node ID + vtkm::Id nodeID = arcSorterPortal.Get(sortedNode); + vtkm::Id superparent = contourTreeSuperparentsPortal.Get(nodeID); + + // the end element is always the last + bool isLastOnSuperarc = false; + if (sortedNode == numArcs - 1) + { + isLastOnSuperarc = true; + } + // otherwise look for a change in the superparent + else + { + isLastOnSuperarc = + (superparent != contourTreeSuperparentsPortal.Get(arcSorterPortal.Get(sortedNode + 1))); + } + + // if it's the last on the superarc + if (isLastOnSuperarc) + { // last on superarc + // retrieve the superarc's far end + vtkm::Id superarcEnd = contourTreeSuperarcsPortal.Get(superparent); + // this only happens for the root of the tree, but is still needed + if (noSuchElement(superarcEnd)) + { + contourTreeArcsPortal.Set(nodeID, (vtkm::Id)NO_SUCH_ELEMENT); + } + else + { + contourTreeArcsPortal.Set( + nodeID, + toCompressedPortal.Get(contourTreeSupernodesPortal.Get(maskedIndex(superarcEnd))) | + (superarcEnd & IS_ASCENDING)); + } + } // last on superarc + else + { // not last on superarc + vtkm::Id neighbour = arcSorterPortal.Get(sortedNode + 1); + contourTreeArcsPortal.Set(nodeID, neighbour | ((neighbour > nodeID) ? IS_ASCENDING : 0)); + } // not last on superarc + } + +}; // ComputeRegularStructure_SetAugmentArcs + + + + } // namespace contourtree_maker_inc } // namespace contourtree_augmented } // namespace worklet diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem/CMakeLists.txt b/vtkm/worklet/contourtree_augmented/mesh_dem/CMakeLists.txt index 16201aaa8..6c0c6d442 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem/CMakeLists.txt +++ b/vtkm/worklet/contourtree_augmented/mesh_dem/CMakeLists.txt @@ -12,6 +12,7 @@ set(headers MeshStructure3D.h SimulatedSimplicityComperator.h SortIndices.h + IdRelabler.h ) #----------------------------------------------------------------------------- diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem/IdRelabler.h b/vtkm/worklet/contourtree_augmented/mesh_dem/IdRelabler.h new file mode 100644 index 000000000..6516fb96d --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem/IdRelabler.h @@ -0,0 +1,134 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_ppp2_contourtree_mesh_inc_id_relabler_h +#define vtkm_worklet_contourtree_ppp2_contourtree_mesh_inc_id_relabler_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem +{ + + +class IdRelabler +{ +public: + VTKM_EXEC_CONT + IdRelabler() + : inputStartRow(0) + , inputStartCol(0) + , inputStartSlice(0) + , inputNRows(1) + , inputNCols(1) + , outputNRows(1) + , outputNCols(1) + { + } + + VTKM_EXEC_CONT + IdRelabler(vtkm::Id iSR, + vtkm::Id iSC, + vtkm::Id iSS, + vtkm::Id iNR, + vtkm::Id iNC, + vtkm::Id oNR, + vtkm::Id oNC) + : inputStartRow(iSR) + , inputStartCol(iSC) + , inputStartSlice(iSS) + , inputNRows(iNR) + , inputNCols(iNC) + , outputNRows(oNR) + , outputNCols(oNC) + { + } + + VTKM_EXEC_CONT + vtkm::Id operator()(vtkm::Id v) const + { + vtkm::Id r = inputStartRow + ((v % (inputNRows * inputNCols)) / inputNCols); + vtkm::Id c = inputStartCol + (v % inputNCols); + vtkm::Id s = inputStartSlice + v / (inputNRows * inputNCols); + + return (s * outputNRows + r) * outputNCols + c; + } + +private: + vtkm::Id inputStartRow, inputStartCol, inputStartSlice; + vtkm::Id inputNRows, inputNCols; + vtkm::Id outputNRows, outputNCols; +}; + + +} // namespace mesh_dem +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure2D.h b/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure2D.h index aaecbd2bc..08e4bedfc 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure2D.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure2D.h @@ -84,6 +84,10 @@ public: { } + // number of mesh vertices + VTKM_EXEC_CONT + vtkm::Id GetNumberOfVertices() const { return (this->nRows * this->nCols); } + // vertex row - integer divide by columns VTKM_EXEC inline vtkm::Id vertexRow(vtkm::Id v) const { return v / nCols; } diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure3D.h b/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure3D.h index fbdd2dfdc..af5cb2033 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure3D.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem/MeshStructure3D.h @@ -86,6 +86,10 @@ public: { } + // number of mesh vertices + VTKM_EXEC_CONT + vtkm::Id GetNumberOfVertices() const { return (this->nRows * this->nCols * this->nSlices); } + // vertex row - integer modulus by (nRows&nCols) and integer divide by columns VTKM_EXEC vtkm::Id vertexRow(vtkm::Id v) const { return (v % (nRows * nCols)) / nCols; } diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/CMakeLists.txt b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/CMakeLists.txt index a92077886..eee0e5780 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/CMakeLists.txt +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/CMakeLists.txt @@ -11,11 +11,16 @@ set(headers Freudenthal_2D_Triangulation.h Freudenthal_3D_Triangulation.h MarchingCubes_3D_Triangulation.h + ContourTreeMesh.h MeshStructureFreudenthal2D.h MeshStructureFreudenthal3D.h MeshStructureMarchingCubes.h + MeshStructureContourTreeMesh.h ) +#---------------------------------------------------------------------------- +add_subdirectory(contourtreemesh) + #----------------------------------------------------------------------------- vtkm_declare_headers(${headers}) diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/ContourTreeMesh.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/ContourTreeMesh.h new file mode 100644 index 000000000..6796e2d8a --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/ContourTreeMesh.h @@ -0,0 +1,732 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_mesh_dem_contour_tree_mesh_h +#define vtkm_worklet_contourtree_augmented_mesh_dem_contour_tree_mesh_h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include // TODO remove should not be needed + +#include + +#include + +#include + + +VTKM_THIRDPARTY_PRE_INCLUDE +#include +#include +VTKM_THIRDPARTY_POST_INCLUDE +// clang-format on + +namespace contourtree_mesh_inc_ns = + vtkm::worklet::contourtree_augmented::mesh_dem_contourtree_mesh_inc; + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ + +template +class ContourTreeMesh : public vtkm::cont::ExecutionObjectBase +{ // class ContourTreeMesh +public: + //Mesh dependent helper functions + void setPrepareForExecutionBehavior(bool getMax); + + template + contourtree_mesh_inc_ns::MeshStructureContourTreeMesh PrepareForExecution( + DeviceTag) const; + + ContourTreeMesh() {} + + // Constructors + ContourTreeMesh(const IdArrayType& arcs, + const IdArrayType& inSortOrder, + const vtkm::cont::ArrayHandle& values, + const IdArrayType& inGlobalMeshIndex); + + // Construct a ContourTreeMesh from nodes/arcs and another ContourTreeMesh (instead of a Mesh_DEM_Triangulation) + // nodes/arcs: From the contour tree + // ContourTreeMesh: the contour tree mesh used to compute the contour tree described by nodes/arcs + ContourTreeMesh(const IdArrayType& nodes, + const IdArrayType& arcs, + const ContourTreeMesh& mesh); + + // Initalize contour tree mesh from mesh and arcs. For fully augmented contour tree with all + // mesh vertices as nodes. Same as using { 0, 1, ..., nodes.size()-1 } as nodes for the + // ContourTreeMeshh(nodes, arcsm mesh) constructor above + ContourTreeMesh(const IdArrayType& arcs, const ContourTreeMesh& mesh); + + // Load contour tree mesh from file + ContourTreeMesh(const char* filename) + { + load(filename); + this->nVertices = this->sortedValues.GetNumberOfValues(); + } + + // Combine two ContourTreeMeshes + template + void mergeWith(ContourTreeMesh& other); + + // Save/load the mesh helpers + void save(const char* filename) const; + void load(const char* filename); + + // Empty placeholder function to ensure compliance of this class with the interface + // the other mesh classes. This is a no-op here since this class is initalized + // from a known contour tree so sort is already done + template + void SortData(const vtkm::cont::ArrayHandle& values) const + { + (void)values; // Do nothink but avoid unsused param warning + } + + // Public fields + static const int MAX_OUTDEGREE = 20; + + vtkm::Id nVertices; + // TODO we should be able to remove this one, but we need to figure out what we need to return in the worklet instead + IdArrayType sortOrder; + vtkm::cont::ArrayHandle sortedValues; + IdArrayType globalMeshIndex; + // neighbours stores for each vertex the indices of its neighbours. For each vertex + // the indices are sorted by value, i.e, the first neighbour has the lowest and + // the last neighbour the highest value for the vertex. In the array we just + // concatinate the list of neighbours from all vertices, i.e., we first + // have the list of neighbours of the first vertex, then the second vertex and so on, i.e.: + // [ n_1_1, n_1_2, n_2_1, n_2_2, n_2_3, etc.] + IdArrayType neighbours; + // firstNeighour gives us for each vertex an index into the neighours array indicating + // the index where the list of neighbours for the vertex begins + IdArrayType firstNeighbour; + // the maximum number of neighbours of a vertex + vtkm::Id maxNeighbours; + + // Debug print routine + void DebugPrint(const char* message, const char* fileName, long lineNum); + +private: + vtkm::cont::Invoker Invoke; + + bool mGetMax; // Define the behavior for the PrepareForExecution function + + // Private init and helper functions + void InitialiseNeighboursFromArcs(const IdArrayType& arcs); + void ComputeNNeighboursVector(IdArrayType& nNeighbours) const; + void ComputeMaxNeighbours(); + + // Private helper functions for saving data vectors + template + void saveVector(std::ostream& os, const vtkm::cont::ArrayHandle& vec) + const; // Internal helper function to save 1D index array to file + template + void loadVector(std::istream& is, + const vtkm::cont::ArrayHandle& + vec); // Internal helper function to load 1D index array from file + + +}; // ContourTreeMesh + + +// debug routine +template +void ContourTreeMesh::DebugPrint(const char* message, const char* fileName, long lineNum) +{ // DebugPrint() +#ifdef DEBUG_PRINT + std::cout << "---------------------------" << std::endl; + std::cout << std::setw(30) << std::left << fileName << ":" << std::right << std::setw(4) + << lineNum << std::endl; + std::cout << std::left << std::string(message) << std::endl; + std::cout << "Contour Tree Mesh Contains: " << std::endl; + std::cout << "---------------------------" << std::endl; + std::cout << std::endl; + + printHeader(this->nVertices); + printIndices("sortOrder", sortOrder); + printValues("sortedValues", sortedValues); + printIndices("globalMeshIndex", globalMeshIndex); + printIndices("neighbours", neighbours); + printIndices("firstNeighbour", firstNeighbour); + std::cout << "maxNeighbours=" << maxNeighbours << std::endl; + std::cout << "mGetMax=" << mGetMax << std::endl; + +#else + (void)message; + (void)fileName; + (void)lineNum; +#endif +} // DebugPrint() + + + + +// create the contour tree mesh from contour tree data +template +ContourTreeMesh::ContourTreeMesh(const IdArrayType& arcs, + const IdArrayType& inSortOrder, + const vtkm::cont::ArrayHandle& values, + const IdArrayType& inGlobalMeshIndex) + : sortOrder(inSortOrder) + , globalMeshIndex(inGlobalMeshIndex) + , neighbours() + , firstNeighbour() +{ + this->nVertices = this->sortOrder.GetNumberOfValues(); + // values permuted by sortOrder to sort the values + auto permutedValues = vtkm::cont::make_ArrayHandlePermutation(this->sortOrder, values); + // TODO check if we actually need to make this copy here. we could just store the permutedValues array to save memory + vtkm::cont::Algorithm::Copy(permutedValues, this->sortedValues); + this->InitialiseNeighboursFromArcs(arcs); +} + +template +ContourTreeMesh::ContourTreeMesh(const IdArrayType& arcs, + const ContourTreeMesh& mesh) + : sortOrder(mesh.sortOrder) + , sortedValues(mesh.sortedValues) + , globalMeshIndex(mesh.globalMeshIndex) + , neighbours() + , firstNeighbour() +{ + this->nVertices = this->sortedValues.GetNumberOfValues(); + this->InitialiseNeighboursFromArcs(arcs); +} + + +template +ContourTreeMesh::ContourTreeMesh(const IdArrayType& nodes, + const IdArrayType& arcs, + const ContourTreeMesh& mesh) + : sortOrder(mesh.SortOrder) + , neighbours() + , firstNeighbour() +{ + // Initatlize the global mesh index with the globalMeshIndex permutted by the nodes + vtkm::cont::ArrayHandlePermutation permutedGlobalMeshIndex( + nodes, mesh.globalMeshIndex); + vtkm::cont::Algorithm::Copy(permutedGlobalMeshIndex, this->globalMeshIndex); + // Initialize the sortedValues array with the sortedValues permutted by the nodes + vtkm::cont::ArrayHandlePermutation permutedSortedValues( + nodes, mesh.sortedValues); + vtkm::cont::Algorithm::Copy(permutedSortedValues, this->sortedValues); + // Initialize the neighbours from the arcs + this->nVertices = this->sortedValues.GetNumberOfValues(); + this->InitialiseNeighboursFromArcs(arcs); +} + + +// Initalize the contour tree from the arcs array and sort order +template +void ContourTreeMesh::InitialiseNeighboursFromArcs(const IdArrayType& arcs) +{ + // Find target indices for valid arcs in neighbours array ... + IdArrayType arcTargetIndex; + arcTargetIndex.Allocate(arcs.GetNumberOfValues()); + oneIfArcValid oneIfArcValidFunctor; + auto oneIfArcValidArrayHandle = + vtkm::cont::ArrayHandleTransform(arcs, oneIfArcValidFunctor); + vtkm::cont::Algorithm::ScanExclusive(oneIfArcValidArrayHandle, arcTargetIndex); + vtkm::Id nValidArcs = + arcTargetIndex.GetPortalConstControl().Get(arcTargetIndex.GetNumberOfValues() - 1) + + oneIfArcValidFunctor(arcs.GetPortalConstControl().Get(arcs.GetNumberOfValues() - 1)); + + // ... and compress array + this->neighbours.ReleaseResources(); + this->neighbours.Allocate(2 * nValidArcs); + + contourtree_mesh_inc_ns::CompressNeighboursWorklet compressNeighboursWorklet; + this->Invoke(compressNeighboursWorklet, arcs, arcTargetIndex, this->neighbours); + + // Sort arcs so that all arcs from the same vertex are adjacent. All arcs are in neighbours array based on + // sort index of their 'from' vertex (and then within a run sorted by sort index of their 'to' vertex). + vtkm::cont::Algorithm::Sort(this->neighbours, contourtree_mesh_inc_ns::ArcComparator(arcs)); + + // Find start index for each vertex into neighbours array + this->firstNeighbour.Allocate(this->nVertices); + + contourtree_mesh_inc_ns::FindStartIndexWorklet findStartIndexWorklet; + this->Invoke(findStartIndexWorklet, + this->neighbours, + arcs, + this->firstNeighbour // output + ); + + + // Replace arc number with 'to' vertex in neighbours array + contourtree_mesh_inc_ns::ReplaceArcNumWithToVertexWorklet replaceArcNumWithToVertexWorklet; + this->Invoke(replaceArcNumWithToVertexWorklet, + this->neighbours, // input/output + arcs // input + ); + + // Compute maximum number of neighbours + this->ComputeMaxNeighbours(); + +#ifdef DEBUG_PRINT + std::cout << std::setw(30) << std::left << __FILE__ << ":" << std::right << std::setw(4) + << __LINE__ << std::endl; + auto firstNeighbourPortal = this->firstNeighbour.GetPortalConstControl(); + auto neighboursPortal = this->neighbours.GetPortalConstControl(); + for (vtkm::Id vtx = 0; vtx < firstNeighbour.GetNumberOfValues(); ++vtx) + { + std::cout << vtx << ": "; + vtkm::Id neighboursBeginIndex = firstNeighbourPortal.Get(vtx); + vtkm::Id neighboursEndIndex = (vtx < this->nVertices - 1) ? firstNeighbourPortal.Get(vtx + 1) + : neighbours.GetNumberOfValues(); + + for (vtkm::Id ni = neighboursBeginIndex; ni < neighboursEndIndex; ++ni) + { + std::cout << neighboursPortal.Get(ni) << " "; + } + std::cout << std::endl; + } + std::cout << "Max neighbours: " << this->maxNeighbours << std::endl; +#endif +} + +template +void ContourTreeMesh::ComputeNNeighboursVector(IdArrayType& nNeighbours) const +{ + nNeighbours.Allocate(this->firstNeighbour.GetNumberOfValues()); // same as this->nVertices + contourtree_mesh_inc_ns::ComputeMaxNeighboursWorklet computeMaxNeighboursWorklet( + this->neighbours.GetNumberOfValues()); + this->Invoke(computeMaxNeighboursWorklet, this->firstNeighbour, nNeighbours); +} + +template +void ContourTreeMesh::ComputeMaxNeighbours() +{ + // Compute maximum number of neighbours + IdArrayType nNeighbours; + this->ComputeNNeighboursVector(nNeighbours); + vtkm::cont::ArrayHandle rangeArray = vtkm::cont::ArrayRangeCompute(nNeighbours); + this->maxNeighbours = static_cast(rangeArray.GetPortalConstControl().Get(0).Max); +} + +// Define the behavior for the execution object generate by the PrepareForExecution function +template +void ContourTreeMesh::setPrepareForExecutionBehavior(bool getMax) +{ + this->mGetMax = getMax; +} + +// Get VTKM execution object that represents the structure of the mesh and provides the mesh helper functions on the device +template +template +contourtree_mesh_inc_ns::MeshStructureContourTreeMesh + ContourTreeMesh::PrepareForExecution(DeviceTag) const +{ + return contourtree_mesh_inc_ns::MeshStructureContourTreeMesh( + this->neighbours, this->firstNeighbour, this->maxNeighbours, this->mGetMax); +} + +struct NotNoSuchElement +{ + VTKM_EXEC_CONT bool operator()(vtkm::Id x) const { return x != NO_SUCH_ELEMENT; } +}; + +// Combine two ContourTreeMeshes +template +template +void ContourTreeMesh::mergeWith(ContourTreeMesh& other) +{ +#ifdef DEBUG_PRINT + this->DebugPrint("THIS ContourTreeMesh", __FILE__, __LINE__); + other.DebugPrint("OTHER ContourTreeMesh", __FILE__, __LINE__); +#endif + + mesh_dem_contourtree_mesh_inc::CombinedVectorExecObj allGlobalIndicesExecObj( + this->globalMeshIndex, other.globalMeshIndex); + auto allGlobalIndices = allGlobalIndicesExecObj.PrepareForExecution(DeviceTag()); + mesh_dem_contourtree_mesh_inc::CombinedVectorExecObj allSortedValuesExecObj( + this->sortedValues, other.sortedValues); + auto allSortedValues = allSortedValuesExecObj.PrepareForExecution(DeviceTag()); + //auto allGlobalIndices = CombinedVectorthisGlobalMeshIndex, other.globalMeshIndex); + + // Create combined sort order + IdArrayType + overallSortOrder; // TODO This vector could potentially be implemented purely as a smart array handle to reduce memory usage + overallSortOrder.Allocate(this->nVertices + other.nVertices); + + { // Create a new scope so that the following two vectors get deleted when leaving the scope + auto thisIndices = vtkm::cont::ArrayHandleIndex(this->nVertices); // A regular index array + markOther markOtherFunctor; + auto otherIndices = vtkm::cont::make_ArrayHandleTransform( + vtkm::cont::ArrayHandleIndex(other.nVertices), markOtherFunctor); + contourtree_mesh_inc_ns::CombinedSimulatedSimplicityIndexComparator + cssicFunctor(allSortedValues, allGlobalIndices); + std::merge(vtkm::cont::ArrayPortalToIteratorBegin(thisIndices.GetPortalConstControl()), + vtkm::cont::ArrayPortalToIteratorEnd(thisIndices.GetPortalConstControl()), + vtkm::cont::ArrayPortalToIteratorBegin(otherIndices.GetPortalConstControl()), + vtkm::cont::ArrayPortalToIteratorEnd(otherIndices.GetPortalConstControl()), + vtkm::cont::ArrayPortalToIteratorBegin(overallSortOrder.GetPortalControl()), + cssicFunctor); + } + +#ifdef DEBUG_PRINT + std::cout << "OverallSortOrder.size " << overallSortOrder.GetNumberOfValues() << std::endl; + printIndices("overallSortOrder", overallSortOrder); + std::cout << std::endl; +#endif + + IdArrayType overallSortIndex; + overallSortIndex.Allocate(overallSortOrder.GetNumberOfValues()); + { + // Functor return 0,1 for each element of a CombinedVector depending on whethern the current value is different from the next + mesh_dem_contourtree_mesh_inc::CombinedVectorDifferentFromNext + differentFromNextFunctor(&allGlobalIndices, overallSortOrder); + auto differentFromNextArr = vtkm::cont::make_ArrayHandleTransform( + vtkm::cont::ArrayHandleIndex(overallSortIndex.GetNumberOfValues() - 1), + differentFromNextFunctor); + + // Compute the exclusive scan of our transformed combined vector + overallSortIndex.GetPortalControl().Set(0, 0); + IdArrayType tempArr; + vtkm::cont::Algorithm::ScanInclusive(differentFromNextArr, tempArr); + vtkm::cont::Algorithm::CopySubRange( + tempArr, 0, tempArr.GetNumberOfValues(), overallSortIndex, 1); + } + vtkm::Id nVerticesCombined = + overallSortIndex.GetPortalConstControl().Get(overallSortIndex.GetNumberOfValues() - 1) + 1; +#ifdef DEBUG_PRINT + std::cout << "OverallSortIndex.size " << overallSortIndex.GetNumberOfValues() << std::endl; + printIndices("overallSortIndex", overallSortIndex); + std::cout << "nVerticesCombined: " << nVerticesCombined << std::endl; + std::cout << std::endl; +#endif + + // thisToCombinedSortOrder and otherToCombinedSortOrder + IdArrayType thisToCombinedSortOrder; + thisToCombinedSortOrder.Allocate(this->firstNeighbour.GetNumberOfValues()); + IdArrayType otherToCombinedSortOrder; + otherToCombinedSortOrder.Allocate(other.firstNeighbour.GetNumberOfValues()); + contourtree_mesh_inc_ns::InitToCombinedSortOrderArraysWorklet + initToCombinedSortOrderArraysWorklet; + this->Invoke(initToCombinedSortOrderArraysWorklet, + overallSortIndex, + overallSortOrder, + thisToCombinedSortOrder, + otherToCombinedSortOrder); + +#ifdef DEBUG_PRINT + printIndices("thisToCombinedSortOrder", thisToCombinedSortOrder); + printIndices("otherToCombinedSortOrder", otherToCombinedSortOrder); +#endif + + IdArrayType combinedNNeighbours; + vtkm::cont::Algorithm::Copy(vtkm::cont::ArrayHandleConstant(0, nVerticesCombined), + combinedNNeighbours); + { // New scope so that array gets deleted when leaving scope + IdArrayType nNeighbours; + this->ComputeNNeighboursVector(nNeighbours); + auto permutedCombinedNNeighbours = + vtkm::cont::make_ArrayHandlePermutation(thisToCombinedSortOrder, combinedNNeighbours); + vtkm::cont::Algorithm::Copy(nNeighbours, permutedCombinedNNeighbours); + } + + IdArrayType combinedOtherStartIndex; + vtkm::cont::Algorithm::Copy(vtkm::cont::ArrayHandleConstant(0, nVerticesCombined), + combinedOtherStartIndex); + { // New scope so that array gets deleted when leaving scope + IdArrayType nNeighbours; + other.ComputeNNeighboursVector(nNeighbours); + contourtree_mesh_inc_ns::CombinedOtherStartIndexNNeighboursWorklet + combinedOtherStartIndexNNeighboursWorklet; + this->Invoke(combinedOtherStartIndexNNeighboursWorklet, + nNeighbours, // input + otherToCombinedSortOrder, // input + combinedNNeighbours, // input/output + combinedOtherStartIndex // input/output + ); + } + +#ifdef DEBUG_PRINT + printIndices("combinedNNeighbours", combinedNNeighbours); + printIndices("combinedOtherStartIndex", combinedOtherStartIndex); +#endif + + IdArrayType combinedFirstNeighbour; + combinedFirstNeighbour.Allocate(nVerticesCombined); + vtkm::cont::Algorithm::ScanExclusive(combinedNNeighbours, combinedFirstNeighbour); + vtkm::Id nCombinedNeighbours = combinedFirstNeighbour.GetPortalConstControl().Get( + combinedFirstNeighbour.GetNumberOfValues() - 1) + + combinedNNeighbours.GetPortalConstControl().Get(combinedNNeighbours.GetNumberOfValues() - 1); + + IdArrayType combinedNeighbours; + combinedNeighbours.Allocate(nCombinedNeighbours); + + // Update combined neighbours + contourtree_mesh_inc_ns::UpdateCombinedNeighboursWorklet updateCombinedNeighboursWorklet; + // Updata neighbours from this + this->Invoke( + updateCombinedNeighboursWorklet, + this->firstNeighbour, + this->neighbours, + thisToCombinedSortOrder, + combinedFirstNeighbour, + vtkm::cont::ArrayHandleConstant( + 0, + nVerticesCombined), // Constant 0 array. Just needed so we can use the same worklet for both cases + combinedNeighbours); + // Update neighbours from other + this->Invoke(updateCombinedNeighboursWorklet, + other.firstNeighbour, + other.neighbours, + otherToCombinedSortOrder, + combinedFirstNeighbour, + combinedOtherStartIndex, + combinedNeighbours); + + // TODO VTKM -Version MergedCombinedOtherStartIndex. Replace 1r block with the 1s block. Need to check for Segfault in contourtree_mesh_inc_ns::MergeCombinedOtherStartIndexWorklet. This workloat also still uses a number of stl algorithms that should be replaced with VTKm code (which is porbably also why the worklet fails). + /* // 1s--start + contourtree_mesh_inc_ns::MergeCombinedOtherStartIndexWorklet mergeCombinedOtherStartIndexWorklet; + vtkm::worklet::DispatcherMapField< contourtree_mesh_inc_ns::MergeCombinedOtherStartIndexWorklet> mergeCombinedOtherStartIndexDispatcher(mergeCombinedOtherStartIndexWorklet); + this->Invoke(mergeCombinedOtherStartIndexWorklet, + combinedOtherStartIndex, // (input/output) + combinedNeighbours, // (input/output) + combinedFirstNeighbour // (input) + ); + // 1s--end + */ + + // TODO Fix the MergedCombinedOtherStartIndex worklet and remove //1r block below + // 1r--start + auto combinedOtherStartIndexPortal = combinedOtherStartIndex.GetPortalControl(); + auto combinedFirstNeighbourPortal = combinedFirstNeighbour.GetPortalConstControl(); + auto combinedNeighboursPortal = combinedNeighbours.GetPortalControl(); + std::vector tempCombinedNeighours((std::size_t)combinedNeighbours.GetNumberOfValues()); + for (vtkm::Id vtx = 0; vtx < combinedNeighbours.GetNumberOfValues(); ++vtx) + { + tempCombinedNeighours[(std::size_t)vtx] = combinedNeighboursPortal.Get(vtx); + } + for (vtkm::Id vtx = 0; vtx < combinedFirstNeighbour.GetNumberOfValues(); ++vtx) + { + if (combinedOtherStartIndexPortal.Get(vtx)) // Needs merge + { + auto neighboursBegin = tempCombinedNeighours.begin() + combinedFirstNeighbourPortal.Get(vtx); + auto neighboursEnd = (vtx < combinedFirstNeighbour.GetNumberOfValues() - 1) + ? tempCombinedNeighours.begin() + combinedFirstNeighbourPortal.Get(vtx + 1) + : tempCombinedNeighours.end(); + std::inplace_merge( + neighboursBegin, neighboursBegin + combinedOtherStartIndexPortal.Get(vtx), neighboursEnd); + auto it = std::unique(neighboursBegin, neighboursEnd); + combinedOtherStartIndexPortal.Set(vtx, static_cast(neighboursEnd - it)); + while (it != neighboursEnd) + *(it++) = NO_SUCH_ELEMENT; + } + } + // copy the values back to VTKm + for (vtkm::Id vtx = 0; vtx < combinedNeighbours.GetNumberOfValues(); ++vtx) + { + combinedNeighboursPortal.Set(vtx, tempCombinedNeighours[(std::size_t)vtx]); + } + // 1r--end + + IdArrayType combinedFirstNeighbourShift; + combinedFirstNeighbourShift.Allocate(combinedFirstNeighbour.GetNumberOfValues()); + vtkm::cont::Algorithm::ScanExclusive(combinedOtherStartIndex, combinedFirstNeighbourShift); + + { + IdArrayType tempCombinedNeighbours; + vtkm::cont::Algorithm::CopyIf( + combinedNeighbours, combinedNeighbours, tempCombinedNeighbours, NotNoSuchElement()); + combinedNeighbours = tempCombinedNeighbours; // Swap the two arrays + } + + // Adjust firstNeigbour indices by deleted elements + { // make sure variables created are deleted after the context + contourtree_mesh_inc_ns::SubtractAssignWorklet subAssignWorklet; + this->Invoke(subAssignWorklet, combinedFirstNeighbour, combinedFirstNeighbourShift); + } + + // Compute combined global mesh index arrays + IdArrayType combinedGlobalMeshIndex; + combinedGlobalMeshIndex.Allocate(combinedFirstNeighbour.GetNumberOfValues()); + { // make sure arrays used for copy go out of scope + auto permutedCombinedGlobalMeshIndex = + vtkm::cont::make_ArrayHandlePermutation(thisToCombinedSortOrder, combinedGlobalMeshIndex); + vtkm::cont::Algorithm::Copy(globalMeshIndex, permutedCombinedGlobalMeshIndex); + } + { // make sure arrays used for copy go out of scope + auto permutedCombinedGlobalMeshIndex = + vtkm::cont::make_ArrayHandlePermutation(otherToCombinedSortOrder, combinedGlobalMeshIndex); + vtkm::cont::Algorithm::Copy(other.globalMeshIndex, permutedCombinedGlobalMeshIndex); + } + + // Compute combined sorted values + vtkm::cont::ArrayHandle combinedSortedValues; + combinedSortedValues.Allocate(combinedFirstNeighbour.GetNumberOfValues()); + { // make sure arrays used for copy go out of scope + auto permutedCombinedSortedValues = + vtkm::cont::make_ArrayHandlePermutation(thisToCombinedSortOrder, combinedSortedValues); + vtkm::cont::Algorithm::Copy(sortedValues, permutedCombinedSortedValues); + } + { // make sure arrays used for copy go out of scope + auto permutedCombinedSortedValues = + vtkm::cont::make_ArrayHandlePermutation(otherToCombinedSortOrder, combinedSortedValues); + vtkm::cont::Algorithm::Copy(other.sortedValues, permutedCombinedSortedValues); + } + + // Swap in combined version. VTKM ArrayHandles are smart so we can just swap in the new for the old + this->sortedValues = combinedSortedValues; + this->globalMeshIndex = combinedGlobalMeshIndex; + this->neighbours = combinedNeighbours; + this->firstNeighbour = combinedFirstNeighbour; + this->nVertices = sortedValues.GetNumberOfValues(); + // TODO Do we need to set the sort order as well? + + // Re-compute maximum number of neigbours + ComputeMaxNeighbours(); + +#ifdef DEBUG_PRINT + // Print the contents fo this for debugging + DebugPrint("ContourTreeMeshes merged", __FILE__, __LINE__); +#endif +} + + +template +void ContourTreeMesh::save(const char* filename) const +{ + std::ofstream os(filename); + saveVector(os, this->sortOrder); + saveVector(os, this->sortedValues); + saveVector(os, this->globalMeshIndex); + saveVector(os, this->neighbours); + saveVector(os, this->firstNeighbour); +} + +template +void ContourTreeMesh::load(const char* filename) +{ + std::ifstream is(filename); + loadVector(is, this->sortOrder); + loadVector(is, this->sortedValues); + loadVector(is, this->globalMeshIndex); + loadVector(is, this->neighbours); + loadVector(is, this > firstNeighbour); + this->ComputeMaxNeighbours(); +} + +template +template +void ContourTreeMesh::saveVector(std::ostream& os, + const vtkm::cont::ArrayHandle& vec) const +{ + vtkm::Id numVals = vec.GetNumberOfValues(); + os.write(reinterpret_cast(&numVals), sizeof(ValueType)); + auto vecPortal = vec.GetPortalConstControl(); + for (vtkm::Id i = 0; i < numVals; ++i) + os.write(reinterpret_cast(vecPortal.Get(i)), sizeof(ValueType)); +} + +template +template +void ContourTreeMesh::loadVector(std::istream& is, + const vtkm::cont::ArrayHandle& vec) +{ + vtkm::Id numVals; + is.read(reinterpret_cast(&numVals), sizeof(ValueType)); + vec.Allocate(numVals); + auto vecPortal = vec.GetPortalControl(); + vtkm::Id val; + for (vtkm::Id i = 0; i < numVals; ++i) + { + is.read(reinterpret_cast(val), sizeof(ValueType)); + vecPortal.Set(i, val); + } +} + +} // namespace contourtree_augmented +} // worklet +} // vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_2D_Triangulation.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_2D_Triangulation.h index 4907f2af4..79d11b5dd 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_2D_Triangulation.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_2D_Triangulation.h @@ -118,6 +118,7 @@ MeshStructureFreudenthal2D m2d_freudenthal::N_INCIDENT_EDGES, this->useGetMax, this->sortIndices, + this->sortOrder, edgeBoundaryDetectionMasks); } diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_3D_Triangulation.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_3D_Triangulation.h index b3034fdcd..f20d7b3a3 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_3D_Triangulation.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/Freudenthal_3D_Triangulation.h @@ -131,6 +131,7 @@ MeshStructureFreudenthal3D m3d_freudenthal::N_INCIDENT_EDGES, this->useGetMax, this->sortIndices, + this->sortOrder, edgeBoundaryDetectionMasks, neighbourOffsets, linkComponentCaseTable); diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MarchingCubes_3D_Triangulation.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MarchingCubes_3D_Triangulation.h index f9ff12255..3de090a0c 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MarchingCubes_3D_Triangulation.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MarchingCubes_3D_Triangulation.h @@ -158,6 +158,7 @@ MeshStructureMarchingCubes this->nSlices, this->useGetMax, this->sortIndices, + this->sortOrder, edgeBoundaryDetectionMasks, cubeVertexPermutations, linkVertexConnectionsSix, diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureContourTreeMesh.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureContourTreeMesh.h new file mode 100644 index 000000000..a8fa2a287 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureContourTreeMesh.h @@ -0,0 +1,200 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_mesh_dem_triangulation_contourtree_mesh_execution_obect_mesh_structure_h +#define vtkm_worklet_contourtree_augmented_mesh_dem_triangulation_contourtree_mesh_execution_obect_mesh_structure_h + +#include +#include +#include +#include + + + +//Define namespace alias for the freudenthal types to make the code a bit more readable +namespace cpp2_ns = vtkm::worklet::contourtree_augmented; + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + +// Worklet for computing the sort indices from the sort order +template +class MeshStructureContourTreeMesh +{ +public: + typedef typename cpp2_ns::IdArrayType::template ExecutionTypes::PortalConst + IdArrayPortalType; + + // Default constucture. Needed for the CUDA built to work + VTKM_EXEC_CONT + MeshStructureContourTreeMesh() + : getMax(false) + { + } + + // Main constructure used in the code + VTKM_CONT + MeshStructureContourTreeMesh(const cpp2_ns::IdArrayType neighbours, + const cpp2_ns::IdArrayType firstNeighbour, + const vtkm::Id maxneighbours, + bool getmax) + : maxNeighbours(maxneighbours) + , getMax(getmax) + { + neighboursPortal = neighbours.PrepareForInput(DeviceAdapter()); + firstNeighbourPortal = firstNeighbour.PrepareForInput(DeviceAdapter()); + } + + VTKM_EXEC + vtkm::Id GetNumberOfVertices() const { return this->firstNeighbourPortal.GetNumberOfValues(); } + + VTKM_EXEC + constexpr vtkm::Id GetMaxNumberOfNeighbours() const { return this->maxNeighbours; } + + VTKM_EXEC + inline vtkm::Id GetNeighbourIndex(vtkm::Id sortIndex, vtkm::Id neighbourNo) const + { // GetNeighbourIndex + return neighboursPortal.Get(firstNeighbourPortal.Get(sortIndex) + neighbourNo); + } // GetNeighbourIndex + + // sets outgoing paths for saddles + VTKM_EXEC + inline vtkm::Id GetExtremalNeighbour(vtkm::Id sortIndex) const + { // GetExtremalNeighbour() + vtkm::Id neighboursBeginIndex = firstNeighbourPortal.Get(sortIndex); + vtkm::Id neighboursEndIndex = (sortIndex < this->GetNumberOfVertices() - 1) + ? (firstNeighbourPortal.Get(sortIndex + 1) - 1) + : (neighboursPortal.GetNumberOfValues() - 1); + vtkm::Id neighboursBegin = neighboursPortal.Get(neighboursBeginIndex); + vtkm::Id neighboursEnd = neighboursPortal.Get(neighboursEndIndex); + + if (neighboursBeginIndex == neighboursEndIndex + 1) + { // Empty list of neighbours, this should never happen + return sortIndex | TERMINAL_ELEMENT; + } + + vtkm::Id ret; + if (this->getMax) + { + ret = neighboursEnd; + if (ret < sortIndex) + ret = sortIndex | TERMINAL_ELEMENT; + } + else + { + ret = neighboursBegin; + if (ret > sortIndex) + ret = sortIndex | TERMINAL_ELEMENT; + } + return ret; + } // GetExtremalNeighbour() + + + // NOTE/FIXME: The following also iterates over all values and could be combined with GetExtremalNeighbour(). However, the + // results are needed at different places and splitting the two functions leads to a cleaner design + VTKM_EXEC + inline vtkm::Pair GetNeighbourComponentsMaskAndDegree( + vtkm::Id sortIndex, + bool getMaxComponents) const + { // GetNeighbourComponentsMaskAndDegree() + vtkm::Id neighboursBeginIndex = firstNeighbourPortal.Get(sortIndex); + vtkm::Id neighboursEndIndex = (sortIndex < this->GetNumberOfVertices() - 1) + ? firstNeighbourPortal.Get(sortIndex + 1) + : neighboursPortal.GetNumberOfValues(); + vtkm::Id numNeighbours = neighboursEndIndex - neighboursBeginIndex; + vtkm::Id outDegree = 0; + vtkm::Id neighbourComponentMask = 0; + vtkm::Id currNeighbour = 0; + for (vtkm::Id nbrNo = 0; nbrNo < numNeighbours; ++nbrNo) + { + currNeighbour = neighboursPortal.Get(neighboursBeginIndex + nbrNo); + if ((getMaxComponents && (currNeighbour > sortIndex)) || + (!getMaxComponents && (currNeighbour < sortIndex))) + { + ++outDegree; + neighbourComponentMask |= vtkm::Id{ 1 } << nbrNo; + } + } + vtkm::Pair re(neighbourComponentMask, outDegree); + return re; + } // GetNeighbourComponentsMaskAndDegree() + +private: + IdArrayPortalType neighboursPortal; + IdArrayPortalType firstNeighbourPortal; + vtkm::Id maxNeighbours; + bool getMax; + +}; // ExecutionObjec_MeshStructure_3Dt + +} // namespace mesh_dem_2d_freudenthal_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal2D.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal2D.h index d5bd48963..7ff704f8f 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal2D.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal2D.h @@ -95,12 +95,14 @@ public: vtkm::Int32 nincident_edges, bool getmax, const IdArrayType& sortIndices, + const IdArrayType& sortOrder, const m2d_freudenthal::edgeBoundaryDetectionMasksType& edgeBoundaryDetectionMasksIn) : mesh_dem::MeshStructure2D(nrows, ncols) , getMax(getmax) , nIncidentEdges(nincident_edges) { sortIndicesPortal = sortIndices.PrepareForInput(DeviceAdapter()); + sortOrderPortal = sortOrder.PrepareForInput(DeviceAdapter()); edgeBoundaryDetectionMasksPortal = edgeBoundaryDetectionMasksIn.PrepareForInput(DeviceAdapter()); } @@ -109,35 +111,28 @@ public: constexpr vtkm::Id GetMaxNumberOfNeighbours() const { return m2d_freudenthal::N_INCIDENT_EDGES; } VTKM_EXEC - inline vtkm::Id GetNeighbourIndex(vtkm::Id vertex, vtkm::Id edgeNo) const + inline vtkm::Id GetNeighbourIndex(vtkm::Id sortIndex, vtkm::Id edgeNo) const { // GetNeighbourIndex + vtkm::Id meshIndex = this->sortOrderPortal.Get(sortIndex); switch (edgeNo) { case 0: - return vertex + 1; - //break; // row , col + 1 + return this->sortIndicesPortal.Get(meshIndex + 1); // row , col + 1 case 1: - return vertex + this->nCols + 1; - //break; // row + 1, col + 1 + return this->sortIndicesPortal.Get(meshIndex + this->nCols + 1); // row + 1, col + 1 case 2: - return vertex + this->nCols; - //break; // row + 1, col + return this->sortIndicesPortal.Get(meshIndex + this->nCols); // row + 1, col case 3: - return vertex - 1; - //break; // row , col - 1 + return this->sortIndicesPortal.Get(meshIndex - 1); // row , col - 1 case 4: - return vertex - this->nCols - 1; - //break; // row - 1, col - 1 + return this->sortIndicesPortal.Get(meshIndex - this->nCols - 1); // row - 1, col - 1 case 5: - return vertex - this->nCols; - //break; // row - 1, col + return this->sortIndicesPortal.Get(meshIndex - this->nCols); // row - 1, col default: - VTKM_ASSERT(false); - return vertex; + return -1; // TODO How to generate a meaningful error message from a device (in particular when using CUDA?) } } // GetNeighbourIndex - // Disable conversion warnings for Add, Subtract, Multiply, Divide on GCC only. // GCC creates false positive warnings for signed/unsigned char* operations. // This occurs because the values are implicitly casted up to int's for the @@ -151,16 +146,15 @@ public: // sets outgoing paths for saddles VTKM_EXEC - inline vtkm::Id GetExtremalNeighbour(vtkm::Id vertex) const - { + inline vtkm::Id GetExtremalNeighbour(vtkm::Id sortIndex) const + { // GetExtremalNeighbour() using namespace m2d_freudenthal; - // operator() - // convert to a sort index - vtkm::Id sortIndex = sortIndicesPortal.Get(vertex); + // convert to a mesh index + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); // get the row and column - vtkm::Id row = this->vertexRow(vertex); - vtkm::Id col = this->vertexColumn(vertex); + vtkm::Id row = this->vertexRow(meshIndex); + vtkm::Id col = this->vertexColumn(meshIndex); vtkm::Int8 boundaryConfig = ((col == 0) ? leftBit : 0) | ((col == this->nCols - 1) ? rightBit : 0) | ((row == 0) ? topBit : 0) | ((row == this->nRows - 1) ? bottomBit : 0); @@ -172,40 +166,35 @@ public: if (!(boundaryConfig & edgeBoundaryDetectionMasksPortal.Get(edgeNo))) { // calculate neighbour's ID and sort order - vtkm::Id nbr = GetNeighbourIndex(vertex, edgeNo); - - // retrieve the sort index - vtkm::Id nbrIndex = sortIndicesPortal.Get(nbr); + vtkm::Id nbrSortIndex = GetNeighbourIndex(sortIndex, edgeNo); // if it's not a valid destination, ignore it - if (getMax ? (nbrIndex > sortIndex) : (nbrIndex < sortIndex)) + if (getMax ? (nbrSortIndex > sortIndex) : (nbrSortIndex < sortIndex)) { // and save the destination - return nbrIndex; + return nbrSortIndex; } } } // per edge - return sortIndex | TERMINAL_ELEMENT; - } // operator() + } // GetExtremalNeighbour() // NOTE/FIXME: The following also iterates over all values and could be combined with GetExtremalNeighbour(). However, the // results are needed at different places and splitting the two functions leads to a cleaner design VTKM_EXEC inline vtkm::Pair GetNeighbourComponentsMaskAndDegree( - vtkm::Id vertex, + vtkm::Id sortIndex, bool getMaxComponents) const - { + { // GetNeighbourComponentsMaskAndDegree() using namespace m2d_freudenthal; - // GetNeighbourComponentsMaskAndDegree() // get data portals - // convert to a sort index - vtkm::Id sortIndex = sortIndicesPortal.Get(vertex); + // convert to a mesh index + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); // get the row and column - vtkm::Id row = this->vertexRow(vertex); - vtkm::Id col = this->vertexColumn(vertex); + vtkm::Id row = this->vertexRow(meshIndex); + vtkm::Id col = this->vertexColumn(meshIndex); vtkm::Int8 boundaryConfig = ((col == 0) ? leftBit : 0) | ((col == this->nCols - 1) ? rightBit : 0) | ((row == 0) ? topBit : 0) | ((row == this->nRows - 1) ? bottomBit : 0); @@ -219,16 +208,13 @@ public: if (!(boundaryConfig & edgeBoundaryDetectionMasksPortal.Get(edgeNo))) { // calculate neighbour's ID and sort order - vtkm::Id nbr = GetNeighbourIndex(vertex, edgeNo); - - // retrieve the sort index - vtkm::Id nbrIndex = sortIndicesPortal.Get(nbr); + vtkm::Id nbrSortIndex = GetNeighbourIndex(sortIndex, edgeNo); // if it's not a valid destination, ignore it - if (getMaxComponents ? (nbrIndex > sortIndex) : (nbrIndex < sortIndex)) + if (getMaxComponents ? (nbrSortIndex > sortIndex) : (nbrSortIndex < sortIndex)) { // now set the flag in the neighbourhoodMask - neighbourhoodMask |= static_cast(1) << edgeNo; + neighbourhoodMask |= vtkm::Id{ 1 } << edgeNo; } } } // per edge @@ -238,42 +224,44 @@ public: vtkm::Id neighbourComponentMask = 0; // special case for local minimum if (neighbourhoodMask == 0x3F) + { outDegree = 1; + } else { // not a local minimum if ((neighbourhoodMask & 0x30) == 0x20) { ++outDegree; - neighbourComponentMask |= static_cast(1) << 5; + neighbourComponentMask |= vtkm::Id{ 1 } << 5; } if ((neighbourhoodMask & 0x18) == 0x10) { ++outDegree; - neighbourComponentMask |= static_cast(1) << 4; + neighbourComponentMask |= vtkm::Id{ 1 } << 4; } if ((neighbourhoodMask & 0x0C) == 0x08) { ++outDegree; - neighbourComponentMask |= static_cast(1) << 3; + neighbourComponentMask |= vtkm::Id{ 1 } << 3; } if ((neighbourhoodMask & 0x06) == 0x04) { ++outDegree; - neighbourComponentMask |= static_cast(1) << 2; + neighbourComponentMask |= vtkm::Id{ 1 } << 2; } if ((neighbourhoodMask & 0x03) == 0x02) { ++outDegree; - neighbourComponentMask |= static_cast(1) << 1; + neighbourComponentMask |= vtkm::Id{ 1 } << 1; } if ((neighbourhoodMask & 0x21) == 0x01) { ++outDegree; - neighbourComponentMask |= static_cast(1) << 0; + neighbourComponentMask |= vtkm::Id{ 1 } << 0; } } // not a local minimum - - return vtkm::make_Pair(neighbourComponentMask, outDegree); + vtkm::Pair re(neighbourComponentMask, outDegree); + return re; } // GetNeighbourComponentsMaskAndDegree() #if (defined(VTKM_GCC) || defined(VTKM_CLANG)) @@ -282,6 +270,7 @@ public: private: sortIndicesPortalType sortIndicesPortal; + sortIndicesPortalType sortOrderPortal; edgeBoundaryDetectionMasksPortalType edgeBoundaryDetectionMasksPortal; bool getMax; vtkm::Id nIncidentEdges; diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal3D.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal3D.h index 509a8bbdc..af7f0724e 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal3D.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureFreudenthal3D.h @@ -106,6 +106,7 @@ public: vtkm::Id nincident_edges, bool getmax, const IdArrayType& sortIndices, + const IdArrayType& sortOrder, const m3d_freudenthal::edgeBoundaryDetectionMasksType& edgeBoundaryDetectionMasksIn, const m3d_freudenthal::neighbourOffsetsType& neighbourOffsetsIn, const m3d_freudenthal::linkComponentCaseTableType& linkComponentCaseTableIn) @@ -114,6 +115,7 @@ public: , nIncidentEdges(nincident_edges) { sortIndicesPortal = sortIndices.PrepareForInput(DeviceAdapter()); + sortOrderPortal = sortOrder.PrepareForInput(DeviceAdapter()); edgeBoundaryDetectionMasksPortal = edgeBoundaryDetectionMasksIn.PrepareForInput(DeviceAdapter()); neighbourOffsetsPortal = neighbourOffsetsIn.PrepareForInput(DeviceAdapter()); @@ -125,15 +127,17 @@ public: VTKM_EXEC - inline vtkm::Id GetNeighbourIndex(vtkm::Id vertex, vtkm::Id edgeNo) const + inline vtkm::Id GetNeighbourIndex(vtkm::Id sortIndex, vtkm::Id edgeNo) const { // GetNeighbourIndex - return vertex + - (neighbourOffsetsPortal.Get(edgeNo)[0] * this->nRows + - neighbourOffsetsPortal.Get(edgeNo)[1]) * - this->nCols + - neighbourOffsetsPortal.Get(edgeNo)[2]; + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); + return sortIndicesPortal.Get(meshIndex + + (neighbourOffsetsPortal.Get(edgeNo)[0] * this->nRows + + neighbourOffsetsPortal.Get(edgeNo)[1]) * + this->nCols + + neighbourOffsetsPortal.Get(edgeNo)[2]); } // GetNeighbourIndex + // Disable conversion warnings for Add, Subtract, Multiply, Divide on GCC only. // GCC creates false positive warnings for signed/unsigned char* operations. // This occurs because the values are implicitly casted up to int's for the @@ -147,17 +151,16 @@ public: // sets outgoing paths for saddles VTKM_EXEC - inline vtkm::Id GetExtremalNeighbour(vtkm::Id vertex) const - { + inline vtkm::Id GetExtremalNeighbour(vtkm::Id sortIndex) const + { // GetExtremalNeighbour() + // convert to a mesh index using namespace m3d_freudenthal; - // GetExtremalNeighbour() - // convert to a sort index - vtkm::Id sortIndex = sortIndicesPortal.Get(vertex); + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); + + vtkm::Id slice = this->vertexSlice(meshIndex); + vtkm::Id row = this->vertexRow(meshIndex); + vtkm::Id col = this->vertexColumn(meshIndex); - // get the row and column - vtkm::Id slice = this->vertexSlice(vertex); - vtkm::Id row = this->vertexRow(vertex); - vtkm::Id col = this->vertexColumn(vertex); vtkm::Int8 boundaryConfig = ((slice == 0) ? frontBit : 0) | ((slice == this->nSlices - 1) ? backBit : 0) | ((col == 0) ? leftBit : 0) | ((col == this->nCols - 1) ? rightBit : 0) | ((row == 0) ? topBit : 0) | @@ -170,35 +173,33 @@ public: // only consider valid edges if (!(boundaryConfig & edgeBoundaryDetectionMasksPortal.Get(nbrNo))) { - vtkm::Id nbrIndex = sortIndicesPortal.Get(GetNeighbourIndex(vertex, nbrNo)); + vtkm::Id nbrSortIndex = GetNeighbourIndex(sortIndex, nbrNo); // explicit test allows reversal between join and split trees - if (getMax ? (nbrIndex > sortIndex) : (nbrIndex < sortIndex)) + if (getMax ? (nbrSortIndex > sortIndex) : (nbrSortIndex < sortIndex)) { // valid edge and outbound - return nbrIndex; + return nbrSortIndex; } // valid edge and outbound } } // per edge - return sortIndex | TERMINAL_ELEMENT; - } // GetExtremalNeighbour() + // NOTE/FIXME: The following also iterates over all values and could be combined with GetExtremalNeighbour(). However, the // results are needed at different places and splitting the two functions leads to a cleaner design VTKM_EXEC inline vtkm::Pair GetNeighbourComponentsMaskAndDegree( - vtkm::Id vertex, + vtkm::Id sortIndex, bool getMaxComponents) const - { + { // GetNeighbourComponentsMaskAndDegree() + // convert to a meshIndex using namespace m3d_freudenthal; - // GetNeighbourComponentsMaskAndDegree() - // convert to a sort index - vtkm::Id sortIndex = sortIndicesPortal.Get(vertex); + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); // get the row and column - vtkm::Id slice = this->vertexSlice(vertex); - vtkm::Id row = this->vertexRow(vertex); - vtkm::Id col = this->vertexColumn(vertex); + vtkm::Id slice = this->vertexSlice(meshIndex); + vtkm::Id row = this->vertexRow(meshIndex); + vtkm::Id col = this->vertexColumn(meshIndex); vtkm::Int8 boundaryConfig = ((slice == 0) ? frontBit : 0) | ((slice == this->nSlices - 1) ? backBit : 0) | ((col == 0) ? leftBit : 0) | ((col == this->nCols - 1) ? rightBit : 0) | ((row == 0) ? topBit : 0) | @@ -212,11 +213,10 @@ public: { if (!(boundaryConfig & edgeBoundaryDetectionMasksPortal.Get(edgeNo))) { - vtkm::Id nbrIndex = sortIndicesPortal.Get(this->GetNeighbourIndex(vertex, edgeNo)); - - if (getMaxComponents ? (sortIndex < nbrIndex) : (sortIndex > nbrIndex)) + vtkm::Id nbrSortIndex = GetNeighbourIndex(sortIndex, edgeNo); + if (getMaxComponents ? (sortIndex < nbrSortIndex) : (sortIndex > nbrSortIndex)) { - caseNo |= static_cast(1) << edgeNo; + caseNo |= vtkm::Id{ 1 } << edgeNo; } } // inside grid } // for each edge @@ -226,15 +226,17 @@ public: vtkm::Id neighbourComponentMask = 0; for (int nbrNo = 0; nbrNo < N_INCIDENT_EDGES; ++nbrNo) - if (linkComponentCaseTablePortal.Get(caseNo) & (static_cast(1) << nbrNo)) + if (linkComponentCaseTablePortal.Get(caseNo) & (1 << nbrNo)) { outDegree++; - neighbourComponentMask |= static_cast(1) << nbrNo; + neighbourComponentMask |= vtkm::Id{ 1 } << nbrNo; } - return vtkm::make_Pair(neighbourComponentMask, outDegree); + vtkm::Pair re(neighbourComponentMask, outDegree); + return re; } // GetNeighbourComponentsMaskAndDegree() + #if (defined(VTKM_GCC) || defined(VTKM_CLANG)) #pragma GCC diagnostic pop #endif // gcc || clang @@ -242,6 +244,7 @@ public: private: sortIndicesPortalType sortIndicesPortal; + sortIndicesPortalType sortOrderPortal; edgeBoundaryDetectionMasksPortalType edgeBoundaryDetectionMasksPortal; neighbourOffsetsPortalType neighbourOffsetsPortal; linkComponentCaseTablePortalType linkComponentCaseTablePortal; diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureMarchingCubes.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureMarchingCubes.h index 97e8156cb..cd9001658 100644 --- a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureMarchingCubes.h +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/MeshStructureMarchingCubes.h @@ -112,6 +112,7 @@ public: vtkm::Id nslices, bool getmax, const IdArrayType& sortIndices, + const IdArrayType& sortOrder, const m3d_marchingcubes::edgeBoundaryDetectionMasksType& edgeBoundaryDetectionMasksIn, const m3d_marchingcubes::cubeVertexPermutationsType& cubeVertexPermutationsIn, const m3d_marchingcubes::linkVertexConnectionsType& linkVertexConnectionsSixIn, @@ -122,6 +123,7 @@ public: , getMax(getmax) { sortIndicesPortal = sortIndices.PrepareForInput(DeviceAdapter()); + sortOrderPortal = sortOrder.PrepareForInput(DeviceAdapter()); edgeBoundaryDetectionMasksPortal = edgeBoundaryDetectionMasksIn.PrepareForInput(DeviceAdapter()); cubeVertexPermutationsPortal = cubeVertexPermutationsIn.PrepareForInput(DeviceAdapter()); @@ -139,70 +141,83 @@ public: } VTKM_EXEC - inline vtkm::Id GetNeighbourIndex(vtkm::Id vertex, vtkm::Id nbrNo) const + inline vtkm::Id GetNeighbourIndex(vtkm::Id sortIndex, vtkm::Id nbrNo) const { using namespace m3d_marchingcubes; + vtkm::Id meshIndex = this->sortOrderPortal.Get(sortIndex); // GetNeighbourIndex switch (nbrNo) { // Edge connected neighbours case 0: - return vertex - (this->nRows * this->nCols); // { -1, 0, 0 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols)); // { -1, 0, 0 } case 1: - return vertex - this->nCols; // { 0, -1, 0 } + return sortIndicesPortal.Get(meshIndex - this->nCols); // { 0, -1, 0 } case 2: - return vertex - 1; // { 0, 0, -1 } + return sortIndicesPortal.Get(meshIndex - 1); // { 0, 0, -1 } case 3: - return vertex + 1; // { 0, 0, 1 } + return sortIndicesPortal.Get(meshIndex + 1); // { 0, 0, 1 } case 4: - return vertex + this->nCols; // { 0, 1, 0 } + return sortIndicesPortal.Get(meshIndex + this->nCols); // { 0, 1, 0 } // Face connected neighbours case 5: - return vertex + (this->nRows * this->nCols); // { 1, 0, 0 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols)); // { 1, 0, 0 } case 6: - return vertex - (this->nRows * this->nCols) - this->nCols; // { -1, -1, 0 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) - + this->nCols); // { -1, -1, 0 } case 7: - return vertex - (this->nRows * this->nCols) - 1; // { -1, 0, -1 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) - 1); // { -1, 0, -1 } case 8: - return vertex - (this->nRows * this->nCols) + 1; // { -1, 0, 1 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) + 1); // { -1, 0, 1 } case 9: - return vertex - (this->nRows * this->nCols) + this->nCols; // { -1, 1, 0 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) + + this->nCols); // { -1, 1, 0 } case 10: - return vertex - this->nCols - 1; // { 0, -1, -1 } + return sortIndicesPortal.Get(meshIndex - this->nCols - 1); // { 0, -1, -1 } case 11: - return vertex - this->nCols + 1; // { 0, -1, 1 } + return sortIndicesPortal.Get(meshIndex - this->nCols + 1); // { 0, -1, 1 } case 12: - return vertex + this->nCols - 1; // { 0, 1, -1 } + return sortIndicesPortal.Get(meshIndex + this->nCols - 1); // { 0, 1, -1 } case 13: - return vertex + this->nCols + 1; // { 0, 1, 1 } + return sortIndicesPortal.Get(meshIndex + this->nCols + 1); // { 0, 1, 1 } case 14: - return vertex + (this->nRows * this->nCols) - this->nCols; // { 1, -1, 0 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) - + this->nCols); // { 1, -1, 0 } case 15: - return vertex + (this->nRows * this->nCols) - 1; // { 1, 0, -1 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) - 1); // { 1, 0, -1 } case 16: - return vertex + (this->nRows * this->nCols) + 1; // { 1, 0, 1 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) + 1); // { 1, 0, 1 } case 17: - return vertex + (this->nRows * this->nCols) + this->nCols; // { 1, 1, 0 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) + + this->nCols); // { 1, 1, 0 } // Diagonal connected neighbours case 18: - return vertex - (this->nRows * this->nCols) - this->nCols - 1; // { -1, -1, -1 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) - this->nCols - + 1); // { -1, -1, -1 } case 19: - return vertex - (this->nRows * this->nCols) - this->nCols + 1; // { -1, -1, 1 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) - this->nCols + + 1); // { -1, -1, 1 } case 20: - return vertex - (this->nRows * this->nCols) + this->nCols - 1; // { -1, 1, -1 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) + this->nCols - + 1); // { -1, 1, -1 } case 21: - return vertex - (this->nRows * this->nCols) + this->nCols + 1; // { -1, 1, 1 } + return sortIndicesPortal.Get(meshIndex - (this->nRows * this->nCols) + this->nCols + + 1); // { -1, 1, 1 } case 22: - return vertex + (this->nRows * this->nCols) - this->nCols - 1; // { 1, -1, -1 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) - this->nCols - + 1); // { 1, -1, -1 } case 23: - return vertex + (this->nRows * this->nCols) - this->nCols + 1; // { 1, -1, 1 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) - this->nCols + + 1); // { 1, -1, 1 } case 24: - return vertex + (this->nRows * this->nCols) + this->nCols - 1; // { 1, 1, -1 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) + this->nCols - + 1); // { 1, 1, -1 } case 25: - return vertex + (this->nRows * this->nCols) + this->nCols + 1; // { 1, 1, 1 } + return sortIndicesPortal.Get(meshIndex + (this->nRows * this->nCols) + this->nCols + + 1); // { 1, 1, 1 } default: VTKM_ASSERT(false); - return vertex; + return meshIndex; // Need to error out here } } // GetNeighbourIndex @@ -218,16 +233,16 @@ public: #endif // gcc || clang VTKM_EXEC - inline vtkm::Id GetExtremalNeighbour(vtkm::Id vertex) const + inline vtkm::Id GetExtremalNeighbour(vtkm::Id sortIndex) const { using namespace m3d_marchingcubes; // GetExtremalNeighbour() // convert to a sort index - vtkm::Id sortIndex = sortIndicesPortal.Get(vertex); + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); - vtkm::Id slice = this->vertexSlice(vertex); - vtkm::Id row = this->vertexRow(vertex); - vtkm::Id col = this->vertexColumn(vertex); + vtkm::Id slice = this->vertexSlice(meshIndex); + vtkm::Id row = this->vertexRow(meshIndex); + vtkm::Id col = this->vertexColumn(meshIndex); vtkm::Int8 boundaryConfig = ((slice == 0) ? frontBit : 0) | ((slice == this->nSlices - 1) ? backBit : 0) | ((col == 0) ? leftBit : 0) | ((col == this->nCols - 1) ? rightBit : 0) | ((row == 0) ? topBit : 0) | @@ -241,11 +256,11 @@ public: // only consider valid edges if (!(boundaryConfig & edgeBoundaryDetectionMasksPortal.Get(nbrNo))) { - vtkm::Id nbrIndex = sortIndicesPortal.Get(GetNeighbourIndex(vertex, nbrNo)); + vtkm::Id nbrSortIndex = GetNeighbourIndex(sortIndex, nbrNo); // explicit test allows reversal between join and split trees - if (getMax ? (nbrIndex > sortIndex) : (nbrIndex < sortIndex)) + if (getMax ? (nbrSortIndex > sortIndex) : (nbrSortIndex < sortIndex)) { // valid edge and outbound - return nbrIndex; + return nbrSortIndex; } // valid edge and outbound } } // per edge @@ -255,17 +270,17 @@ public: VTKM_EXEC inline vtkm::Pair GetNeighbourComponentsMaskAndDegree( - vtkm::Id vertex, + vtkm::Id sortIndex, bool getMaxComponents) const { using namespace m3d_marchingcubes; // GetNeighbourComponentsMaskAndDegree() // convert to a sort index - vtkm::Id sortIndex = sortIndicesPortal.Get(vertex); + vtkm::Id meshIndex = sortOrderPortal.Get(sortIndex); - vtkm::Id slice = this->vertexSlice(vertex); - vtkm::Id row = this->vertexRow(vertex); - vtkm::Id col = this->vertexColumn(vertex); + vtkm::Id slice = this->vertexSlice(meshIndex); + vtkm::Id row = this->vertexRow(meshIndex); + vtkm::Id col = this->vertexColumn(meshIndex); vtkm::Int8 boundaryConfig = ((slice == 0) ? frontBit : 0) | ((slice == this->nSlices - 1) ? backBit : 0) | ((col == 0) ? leftBit : 0) | ((col == this->nCols - 1) ? rightBit : 0) | ((row == 0) ? topBit : 0) | @@ -279,9 +294,9 @@ public: { if (!(boundaryConfig & edgeBoundaryDetectionMasksPortal.Get(edgeNo))) { - vtkm::Id nbrIndex = sortIndicesPortal.Get(GetNeighbourIndex(vertex, edgeNo)); + vtkm::Id nbrSortIndex = GetNeighbourIndex(sortIndex, edgeNo); - if (getMaxComponents ? (sortIndex < nbrIndex) : (sortIndex > nbrIndex)) + if (getMaxComponents ? (sortIndex < nbrSortIndex) : (sortIndex > nbrSortIndex)) { parentId[edgeNo] = edgeNo; } @@ -373,6 +388,7 @@ public: private: sortIndicesPortalType sortIndicesPortal; + sortIndicesPortalType sortOrderPortal; edgeBoundaryDetectionMasksPortalType edgeBoundaryDetectionMasksPortal; cubeVertexPermutationsPortalType cubeVertexPermutationsPortal; linkVertexConnectionsPortalType linkVertexConnectionsSixPortal; diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ArcComparator.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ArcComparator.h new file mode 100644 index 000000000..d80724ea4 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ArcComparator.h @@ -0,0 +1,144 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_arc_comparator_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_arc_comparator_h + +#include +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +// comparator used for initial sort of data values +template +class ArcComparatorImpl +{ +public: + using IdPortalType = + typename vtkm::cont::ArrayHandle::template ExecutionTypes::PortalConst; + + // constructor - takes vectors as parameters + VTKM_CONT + ArcComparatorImpl(const IdArrayType& ct_arcs) + { // constructor + arcsPortal = ct_arcs.PrepareForInput(DeviceAdapter()); + } // constructor + + // () operator - gets called to do comparison + VTKM_EXEC + bool operator()(const vtkm::Id& x, const vtkm::Id& y) const + { // operator() + vtkm::Id from1 = (x % 2 == 0) ? x / 2 : maskedIndex(arcsPortal.Get(x / 2)); + vtkm::Id from2 = (y % 2 == 0) ? y / 2 : maskedIndex(arcsPortal.Get(y / 2)); + if (from1 != from2) + { + return from1 < from2; + } + else + { + vtkm::Id to1 = (x % 2 == 0) ? maskedIndex(arcsPortal.Get(x / 2)) : x / 2; + vtkm::Id to2 = (y % 2 == 0) ? maskedIndex(arcsPortal.Get(y / 2)) : y / 2; + return to1 < to2; + } + } // operator() + +private: + IdPortalType arcsPortal; + +}; // ArcComparator + +class ArcComparator : public vtkm::cont::ExecutionObjectBase +{ +public: + // constructor - takes vectors as parameters + VTKM_CONT + ArcComparator(const IdArrayType& arcs) + : Arcs(arcs) + { + } + + template + VTKM_CONT ArcComparatorImpl PrepareForExecution(DeviceAdapter) const + { + return ArcComparatorImpl(this->Arcs); + } + +private: + IdArrayType Arcs; +}; // EdgePeakComparator + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CMakeLists.txt b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CMakeLists.txt new file mode 100644 index 000000000..c6d4b5e22 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CMakeLists.txt @@ -0,0 +1,80 @@ +##============================================================================ +## Copyright (c) Kitware, Inc. +## All rights reserved. +## See LICENSE.txt for details. +## This software is distributed WITHOUT ANY WARRANTY; without even +## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +## PURPOSE. See the above copyright notice for more information. +## +## Copyright 2016 Sandia Corporation. +## Copyright 2016 UT-Battelle, LLC. +## Copyright 2016 Los Alamos National Security. +## +## Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +## the U.S. Government retains certain rights in this software. +## +## Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +## Laboratory (LANL), the U.S. Government retains certain rights in +## this software. +##============================================================================ +## Copyright (c) 2018, The Regents of the University of California, through +## Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +## from the U.S. Dept. of Energy). All rights reserved. +## +## Redistribution and use in source and binary forms, with or without modification, +## are permitted provided that the following conditions are met: +## +## (1) Redistributions of source code must retain the above copyright notice, this +## list of conditions and the following disclaimer. +## +## (2) Redistributions in binary form must reproduce the above copyright notice, +## this list of conditions and the following disclaimer in the documentation +## and/or other materials provided with the distribution. +## +## (3) Neither the name of the University of California, Lawrence Berkeley National +## Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +## used to endorse or promote products derived from this software without +## specific prior written permission. +## +## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +## IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +## OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +## OF THE POSSIBILITY OF SUCH DAMAGE. +## +##============================================================================= +## +## This code is an extension of the algorithm presented in the paper: +## Parallel Peak Pruning for Scalable SMP Contour Tree Computation +## Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +## Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +## (LDAV), October 2016, Baltimore, Maryland. +## +## The PPP2 algorithm and software were jointly developed by +## Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +## Oliver Ruebel (LBNL) +##============================================================================== + +set(headers + ArcComparator.h + CombinedVectorDifferentFromNext.h + CompressNeighboursWorklet.h + ComputeMaxNeighboursWorklet.h + FindStartIndexWorklet.h + ReplaceArcNumWithToVertexWorklet.h + CombinedVector.h + CombinedSimulatedSimplicityIndexComparator.h + InitToCombinedSortOrderArraysWorklet.h + CombinedOtherStartIndexNNeighboursWorklet.h + SubtractAssignWorklet.h + UpdateCombinedNeighboursWorklet.h + MergeCombinedOtherStartIndexWorklet.h + ) + +#----------------------------------------------------------------------------- +vtkm_declare_headers(${headers}) diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedOtherStartIndexNNeighboursWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedOtherStartIndexNNeighboursWorklet.h new file mode 100644 index 000000000..82f70a5ce --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedOtherStartIndexNNeighboursWorklet.h @@ -0,0 +1,127 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_combined_other_start_index_nneighbours_worklet_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_combined_other_start_index_nneighbours_worklet_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + +class CombinedOtherStartIndexNNeighboursWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature( + FieldIn nNeighbours, // (input) nNeighbours + FieldIn otherToCombinedSortOrder, // (input) otherToCombinedSortOrder + WholeArrayInOut combinedNNeighbours, // (input/output) combinedNNeighbours + WholeArrayInOut combinedOtherStartIndex // (input/output) combinedOthertStartIndex + ); + typedef void ExecutionSignature(_1, _2, _3, _4); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + CombinedOtherStartIndexNNeighboursWorklet() {} + + + template + VTKM_EXEC void operator()(const vtkm::Id& nneighboursVal, + const vtkm::Id& otherToCombinedSortOrderVal, + const InOutPortalType combinedNNeighboursPortal, + const InOutPortalType combinedOtherStartIndexPortal) const + { + combinedOtherStartIndexPortal.Set(otherToCombinedSortOrderVal, + combinedNNeighboursPortal.Get(otherToCombinedSortOrderVal)); + combinedNNeighboursPortal.Set(otherToCombinedSortOrderVal, + combinedNNeighboursPortal.Get(otherToCombinedSortOrderVal) + + nneighboursVal); + + // Implements in reference code + // #pragma omp parallel for + // The following is save since each global index is only written by one entry + // for (indexVector::size_type vtx = 0; vtx < nNeighbours.size(); ++vtx) + // { + // combinedOtherStartIndex[otherToCombinedSortOrder[vtx]] = combinedNNeighbours[otherToCombinedSortOrder[vtx]]; + // combinedNNeighbours[otherToCombinedSortOrder[vtx]] += nNeighbours[vtx]; + // } + } + + + +}; // CombinedOtherStartIndexNNeighboursWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedSimulatedSimplicityIndexComparator.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedSimulatedSimplicityIndexComparator.h new file mode 100644 index 000000000..2c3d95fa2 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedSimulatedSimplicityIndexComparator.h @@ -0,0 +1,161 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_combined_simulated_simplicity_index_comparator_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_combined_simulated_simplicity_index_comparator_h + +#include +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +// comparator used for initial sort of data values +template +class CombinedSimulatedSimplicityIndexComparator +{ +public: + typedef CombinedVector CombinedDataVector; + typedef CombinedVector CombinedIndexVector; + + VTKM_CONT + CombinedSimulatedSimplicityIndexComparator(const CombinedDataVector& val, + const CombinedIndexVector& idx) + : values(val) + , indices(idx) + { + } + + VTKM_EXEC_CONT + bool operator()(vtkm::Id i, vtkm::Id j) const + { // operator() + // get values + FieldType val_i = this->values[i]; + FieldType val_j = this->values[j]; + + // value comparison + if (val_i < val_j) + return true; + if (val_j < val_i) + return false; + + // get indices + vtkm::Id idx_i = this->indices[i]; + vtkm::Id idx_j = this->indices[j]; + // index comparison for simulated simplicity + if (idx_i < idx_j) + return true; + if (idx_j < idx_i) + return false; + + // fallback - always false + return false; + + /** //Original code + { // operator() + // get values + dataType val_i = values[i]; + dataType val_j = values[j]; + + // value comparison + if (val_i < val_j) + return true; + if (val_j < val_i) + return false; + + // get indices + indexType idx_i = indices[i]; + indexType idx_j = indices[j]; + // index comparison for simulated simplicity + if (idx_i < idx_j) + return true; + if (idx_j < idx_i) + return false; + + // fallback - always false + return false; + } // operator() + + */ + + + } // operator() + +private: + const CombinedDataVector& values; + const CombinedIndexVector& indices; +}; + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedVector.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedVector.h new file mode 100644 index 000000000..fce4a35b2 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CombinedVector.h @@ -0,0 +1,139 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_combined_vector_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_combined_vector_h + +#include +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +template +class CombinedVector +{ +public: + typedef typename vtkm::cont::ArrayHandle::template ExecutionTypes::PortalConst + TArrayPortalType; + VTKM_CONT + CombinedVector(const vtkm::cont::ArrayHandle& thisVector, + const vtkm::cont::ArrayHandle& otherVector) + { + this->thisVectorPortal = thisVector.PrepareForInput(DeviceAdapter()); + this->otherVectorPortal = otherVector.PrepareForInput(DeviceAdapter()); + } + + // See contourtree_augmented/Types.h for definitions of isThis() and CV_OTHER_FLAG + + VTKM_EXEC_CONT + T operator[](vtkm::Id idx) const + { + return isThis(idx) ? this->thisVectorPortal.Get(maskedIndex(idx)) + : this->otherVectorPortal.Get(maskedIndex(idx)); + //std::cout<<"CV: idx="< +#include +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + +// transform functor to compute if element i is different from element i+1 in an arrays. The resulting array should hence +// be 1 element shorter than the input arrays +template +class CombinedVectorDifferentFromNext +{ +public: + typedef typename vtkm::worklet::contourtree_augmented::IdArrayType::template ExecutionTypes< + DeviceAdapter>::PortalConst sortOrderPortalType; + + VTKM_EXEC_CONT + CombinedVectorDifferentFromNext() + : dataArray() + { + } + + VTKM_CONT + CombinedVectorDifferentFromNext(CombinedVector* inDataArray, + const IdArrayType& sortOrder) + : dataArray(inDataArray) + { + overallSortOrderPortal = sortOrder.PrepareForInput(DeviceAdapter()); + } + + VTKM_EXEC_CONT + vtkm::Id operator()(vtkm::Id i) const + { + vtkm::Id currGlobalIdx = (*dataArray)[overallSortOrderPortal.Get(i)]; + vtkm::Id nextGlobalIdx = (*dataArray)[overallSortOrderPortal.Get(i + 1)]; + return (currGlobalIdx != nextGlobalIdx) ? 1 : 0; + } + +private: + sortOrderPortalType overallSortOrderPortal; + const CombinedVector* dataArray; +}; + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CompressNeighboursWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CompressNeighboursWorklet.h new file mode 100644 index 000000000..e06a99322 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/CompressNeighboursWorklet.h @@ -0,0 +1,126 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_compress_neighbours_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_compress_neighbours_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +class CompressNeighboursWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(FieldIn arcs, // (input) arcs + FieldIn arcTargetIndex, // (input) arcTargetIndex + WholeArrayOut neighbours); // (output) neighbours + typedef void ExecutionSignature(_1, InputIndex, _2, _3); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + CompressNeighboursWorklet() {} + + template + VTKM_EXEC void operator()(vtkm::Id& to, + vtkm::Id from, + vtkm::Id& arcTargetIndexFrom, + const OutFieldPortalType& neighboursPortal) const + { + if (!noSuchElement(to)) + { + neighboursPortal.Set(2 * arcTargetIndexFrom + 0, 2 * from + 0); + neighboursPortal.Set(2 * arcTargetIndexFrom + 1, 2 * from + 1); + } + + // In serial this worklet implements the following operation + // #pragma omp parallel for + // for (indexVector::size_type from = 0; from < arcs.size(); ++from) + // { + // indexType to = arcs[from]; + // if (!noSuchElement(to)) + // { + // assert(maskedIndex(to) != from); + // neighbours[2*arcTargetIndex[from]+0] = 2*from+0; + // neighbours[2*arcTargetIndex[from]+1] = 2*from+1; + // } + } + + +}; // ComputeMaxNeighboursWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ComputeMaxNeighboursWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ComputeMaxNeighboursWorklet.h new file mode 100644 index 000000000..389bdabdf --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ComputeMaxNeighboursWorklet.h @@ -0,0 +1,139 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_compute_max_neighbour_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_compute_max_neighbour_worklet_h + +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +// Worklet to update all of the edges so that the far end resets to the result of the ascent in the previous step +class ComputeMaxNeighboursWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(WholeArrayIn firstNeighbour, // (input) firstNeighbour + WholeArrayOut nNeighbours); // (output) + typedef void ExecutionSignature(_1, InputIndex, _2); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + ComputeMaxNeighboursWorklet(const vtkm::Id neighboursSize) + : NeighboursSize(neighboursSize) + { + } + + template + VTKM_EXEC void operator()(const InFieldPortalType& firstNeighbourPortal, + vtkm::Id startVtxNo, + const OutFieldPortalType& nNeighboursPortal) const + { + if (startVtxNo < firstNeighbourPortal.GetNumberOfValues() - 1) + { + nNeighboursPortal.Set(startVtxNo, + firstNeighbourPortal.Get(startVtxNo + 1) - + firstNeighbourPortal.Get(startVtxNo)); + } + else + { + nNeighboursPortal.Set(startVtxNo, + NeighboursSize - + firstNeighbourPortal.Get(nNeighboursPortal.GetNumberOfValues() - 1)); + } + + // In serial this worklet implements the following operation + // #pragma omp parallel for + // for (indexVector::size_type startVtxNo = 0; startVtxNo < firstNeighbour.size()-1; ++startVtxNo) + // { + // nNeighbours[startVtxNo] = firstNeighbour[startVtxNo+1] - firstNeighbour[startVtxNo]; + // } + // nNeighbours[nNeighbours.size() - 1] = neighbours.size() - firstNeighbour[nNeighbours.size() - 1]; + // + // // NOTE: In the above we change the loop to run for the full length of the array and instead + // // then do a conditional assign for the last element directly within the loop, rather + // // than shortcutting the loop and doing a special assigne after the loop. This allows + // // us to process all elements on the device in parallel rather than having to pull + // // data back into the control area to do the last assignement + } + +private: + vtkm::Id NeighboursSize; + + +}; // ComputeMaxNeighboursWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/FindStartIndexWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/FindStartIndexWorklet.h new file mode 100644 index 000000000..424adeefd --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/FindStartIndexWorklet.h @@ -0,0 +1,139 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_find_start_index_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_find_start_index_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +class FindStartIndexWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(WholeArrayIn neighbours, // (input) neighbours + WholeArrayIn arcs, // (input) arcs + WholeArrayOut firstNeighbour); // (output) firstNeighbours + typedef void ExecutionSignature(_1, InputIndex, _2, _3); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + FindStartIndexWorklet() {} + + template + VTKM_EXEC void operator()(const InFieldPortalType& neighboursPortal, + vtkm::Id sortedArcNo, + const InFieldPortalType& arcsPortal, + const OutFieldPortalType& firstNeighbourPortal) const + { + if (sortedArcNo > 0) + { + vtkm::Id prevFrom = (neighboursPortal.Get(sortedArcNo - 1) % 2 == 0) + ? neighboursPortal.Get(sortedArcNo - 1) / 2 + : maskedIndex(arcsPortal.Get(neighboursPortal.Get(sortedArcNo - 1) / 2)); + vtkm::Id currFrom = (neighboursPortal.Get(sortedArcNo) % 2 == 0) + ? neighboursPortal.Get(sortedArcNo) / 2 + : maskedIndex(arcsPortal.Get(neighboursPortal.Get(sortedArcNo) / 2)); + if (currFrom != prevFrom) + { + firstNeighbourPortal.Set(currFrom, sortedArcNo); + } + } + else // sortedArcNo == 0 + { + firstNeighbourPortal.Set(0, 0); + } + + // In serial this worklet implements the following operation + // #pragma omp parallel for + // for (indexVector::size_type sortedArcNo = 1; sortedArcNo < neighbours.size(); ++sortedArcNo) + // { + // indexType prevFrom = (neighbours[sortedArcNo-1] % 2 == 0) ? neighbours[sortedArcNo-1]/2 : maskedIndex(arcs[neighbours[sortedArcNo-1]/2]); + // indexType currFrom = (neighbours[sortedArcNo ] % 2 == 0) ? neighbours[sortedArcNo ]/2 : maskedIndex(arcs[neighbours[sortedArcNo ]/2]); + // if (currFrom != prevFrom) + // { + // assert(currFrom < firstNeighbour.size()); + // firstNeighbour[currFrom] = sortedArcNo; + // } + // } + } + + +}; // ComputeMaxNeighboursWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/InitToCombinedSortOrderArraysWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/InitToCombinedSortOrderArraysWorklet.h new file mode 100644 index 000000000..643997e33 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/InitToCombinedSortOrderArraysWorklet.h @@ -0,0 +1,128 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_init_to_combined_sort_order_arrays_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_init_to_combined_sort_order_arrays_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +class InitToCombinedSortOrderArraysWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature( + FieldIn overallSortIndex, // (input) overallSortIndex + FieldIn overallSortOrder, // (input) overallSortOrder + WholeArrayOut thisToCombinedSortOrder, // (output) thisToCombinedSortOrder + WholeArrayOut otherToCombinedSortOrder // (output) otherToCombinedSortOrder + ); + typedef void ExecutionSignature(_1, _2, _3, _4); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + InitToCombinedSortOrderArraysWorklet() {} + + template + VTKM_EXEC void operator()(const vtkm::Id& overallSortIndexVal, + const vtkm::Id& overallSortOrderVal, + const OutFieldPortalType& thisToCombinedSortOrderPortal, + const OutFieldPortalType& otherToCombinedSortOrderPortal) const + { + if (isThis(overallSortOrderVal)) + { + thisToCombinedSortOrderPortal.Set(maskedIndex(overallSortOrderVal), overallSortIndexVal); + } + else + { + otherToCombinedSortOrderPortal.Set(maskedIndex(overallSortOrderVal), overallSortIndexVal); + } + + // In serial this worklet implements the following operation + // for (indexVector::size_type i = 0; i < overallSortOrder.size(); ++i) + // { + // if (CombinedIndexVector::isThis(overallSortOrder[i])) + // thisToCombinedSortOrder[maskedIndex(overallSortOrder[maskedIndex(i)])] = overallSortIndex[i]; + // else + // otherToCombinedSortOrder[maskedIndex(overallSortOrder[maskedIndex(i)])] = overallSortIndex[i]; + // } + } + +}; // InitToCombinedSortOrderArraysWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/MergeCombinedOtherStartIndexWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/MergeCombinedOtherStartIndexWorklet.h new file mode 100644 index 000000000..6799ed5d5 --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/MergeCombinedOtherStartIndexWorklet.h @@ -0,0 +1,181 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_merge_combined_other_start_index_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_merge_combined_other_start_index_worklet_h + +#include +#include +#include +#include +#include + +// STL +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + +template +class MergeCombinedOtherStartIndexWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature( + WholeArrayInOut combinedOtherStartIndex, // (input, output and input domain) + WholeArrayInOut combinedNeighbours, // (input, output) + WholeArrayIn combinedFirstNeighbour // (input) + ); + typedef void ExecutionSignature(InputIndex, _1, _2, _3); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + MergeCombinedOtherStartIndexWorklet() {} + + + template + VTKM_EXEC void operator()(const vtkm::Id vtx, + const InOutFieldPortalType combinedOtherStartIndexPortal, + const InOutFieldPortalType combinedNeighboursPortal, + const InFieldPortalType combinedFirstNeighbourPortal) const + { + // TODO Replace this to not use stl algorithms inside the worklet + if (combinedOtherStartIndexPortal.Get(vtx)) // Needs merge + { + vtkm::cont::ArrayPortalToIterators combinedNeighboursIterators( + combinedNeighboursPortal); + auto neighboursBegin = + combinedNeighboursIterators.GetBegin() + combinedFirstNeighbourPortal.Get(vtx); + auto neighboursEnd = (vtx < combinedFirstNeighbourPortal.GetNumberOfValues() - 1) + ? combinedNeighboursIterators.GetBegin() + combinedFirstNeighbourPortal.Get(vtx + 1) + : combinedNeighboursIterators.GetEnd(); + std::inplace_merge( + neighboursBegin, neighboursBegin + combinedOtherStartIndexPortal.Get(vtx), neighboursEnd); + auto it = std::unique(neighboursBegin, neighboursEnd); + combinedOtherStartIndexPortal.Set(vtx, neighboursEnd - it); + while (it != neighboursEnd) + { + *(it++) = NO_SUCH_ELEMENT; + } + } + + /* Reference code implemented by this worklet + + #pragma omp parallel for + for (indexVector::size_type vtx = 0; vtx < combinedFirstNeighbour.size(); ++vtx) + { + if (combinedOtherStartIndex[vtx]) // Needs merge + { + indexVector::iterator neighboursBegin = combinedNeighbours.begin() + combinedFirstNeighbour[vtx]; + indexVector::iterator neighboursEnd = (vtx < combinedFirstNeighbour.size() - 1) ? combinedNeighbours.begin() + combinedFirstNeighbour[vtx+1] : combinedNeighbours.end(); + std::inplace_merge(neighboursBegin, neighboursBegin + combinedOtherStartIndex[vtx], neighboursEnd); + indexVector::iterator it = std::unique(neighboursBegin, neighboursEnd); + combinedOtherStartIndex[vtx] = neighboursEnd - it; + while (it != neighboursEnd) *(it++) = NO_SUCH_ELEMENT; + } + }*/ + + + /* Attempt at porting the code without using STL + if (combinedOtherStartIndexPortal.Get(vtx)) + { + vtkm::Id combinedNeighboursBeginIndex = combinedFirstNeighbourPortal.Get(vtx); + vtkm::Id combinedNeighboursEndIndex = (vtx < combinedFirstNeighbourPortal.GetNumberOfValues() - 1) ? combinedFirstNeighbourPortal.Get(vtx+1) : combinedNeighboursPortal.GetNumberOfValues() -1; + vtkm::Id numSelectedVals = combinedNeighboursEndIndex- combinedNeighboursBeginIndex + 1; + vtkm::cont::ArrayHandleCounting selectSubRangeIndex (combinedNeighboursBeginIndex, 1, numSelectedVals); + vtkm::cont::ArrayHandlePermutation, IdArrayType> selectSubRangeArrayHandle( + selectSubRangeIndex, // index array to select the range of values + combinedNeighboursPortal // value array to select from. // TODO this won't work because this is an ArrayPortal not an ArrayHandle + ); + vtkm::cont::DeviceAdapterAlgorithm::Sort(selectSubRangeArrayHandle); + vtkm::Id numUniqueVals = 1; + for(vtkm::Id i=combinedNeighboursBeginIndex; i<=combinedNeighboursEndIndex; i++){ + if (combinedNeighboursPortal.Get(i) == combinedNeighboursPortal.Get(i-1)) + { + combinedNeighboursPortal.Set(i, (vtkm::Id) NO_SUCH_ELEMENT); + } + else + { + numUniqueVals += 1; + } + } + combinedOtherStartIndexPortal.Set(vtx, combinedNeighboursEndIndex - (combinedNeighboursBeginIndex + numUniqueVals + 1)); + } + */ + } + + +}; // MergeCombinedOtherStartIndexWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ReplaceArcNumWithToVertexWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ReplaceArcNumWithToVertexWorklet.h new file mode 100644 index 000000000..af0cbf63f --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/ReplaceArcNumWithToVertexWorklet.h @@ -0,0 +1,112 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_replace_arc_num_with_to_vertex_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_replace_arc_num_with_to_vertex_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + + +class ReplaceArcNumWithToVertexWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(FieldInOut neighbours, // (input) neighbours + WholeArrayIn arcs); // (input) arcs + typedef void ExecutionSignature(_1, _2); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + ReplaceArcNumWithToVertexWorklet() {} + + template + VTKM_EXEC void operator()(vtkm::Id& neighboursVal, const InFieldPortalType& arcsPortal) const + { + neighboursVal = + (neighboursVal % 2 == 0) ? maskedIndex(arcsPortal.Get(neighboursVal / 2)) : neighboursVal / 2; + // In serial this worklet implements + // for (indexVector::size_type sortedArcNo = 0; sortedArcNo < neighbours.size(); ++sortedArcNo) + // { + // neighbours[sortedArcNo] = (neighbours[sortedArcNo] % 2 == 0) ? maskedIndex(arcs[neighbours[sortedArcNo]/2]) : neighbours[sortedArcNo]/2; + // } + } + + +}; // ComputeMaxNeighboursWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/SubtractAssignWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/SubtractAssignWorklet.h new file mode 100644 index 000000000..4bf2cf3bb --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/SubtractAssignWorklet.h @@ -0,0 +1,106 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_subtract_assign_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_subtract_assign_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + +class SubtractAssignWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature(FieldInOut leftArray, // (input) n + FieldIn rightArray); // (input) arcs + typedef void ExecutionSignature(_1, _2); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + SubtractAssignWorklet() {} + + + template + VTKM_EXEC void operator()(InValType& leftVal, const InValType& rightVal) const + { + leftVal -= rightVal; + } + + +}; // SubtractAssignWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/UpdateCombinedNeighboursWorklet.h b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/UpdateCombinedNeighboursWorklet.h new file mode 100644 index 000000000..3f865f2bd --- /dev/null +++ b/vtkm/worklet/contourtree_augmented/mesh_dem_meshtypes/contourtreemesh/UpdateCombinedNeighboursWorklet.h @@ -0,0 +1,155 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// This code is an extension of the algorithm presented in the paper: +// Parallel Peak Pruning for Scalable SMP Contour Tree Computation. +// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. +// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization +// (LDAV), October 2016, Baltimore, Maryland. +// +// The PPP2 algorithm and software were jointly developed by +// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and +// Oliver Ruebel (LBNL) +//============================================================================== + +#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_update_combined_neighbours_worklet_h +#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_update_combined_neighbours_worklet_h + +#include +#include + +namespace vtkm +{ +namespace worklet +{ +namespace contourtree_augmented +{ +namespace mesh_dem_contourtree_mesh_inc +{ + +class UpdateCombinedNeighboursWorklet : public vtkm::worklet::WorkletMapField +{ +public: + typedef void ControlSignature( + WholeArrayIn firstNeighbour, // (input) this->firstNerighbour or other.firstNeighbour + WholeArrayIn neighbours, // (input) this->neighbours or other.neighbours array + WholeArrayIn + toCombinedSortOrder, // (input) thisToCombinedSortOrder or otherToCombinedSortOrder array + WholeArrayIn combinedFirstNeighbour, // (input) combinedFirstNeighbour array in both cases + WholeArrayIn + combinedOtherStartIndex, // (input) const 0 array of length combinedOtherStartIndex for this and combinedOtherStartIndex for other loop + WholeArrayOut combinedNeighbours); // (output) combinedNeighbours array in both cases + typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6); + typedef _1 InputDomain; + + // Default Constructor + VTKM_EXEC_CONT + UpdateCombinedNeighboursWorklet() {} + + template + VTKM_EXEC void operator()( + const InFieldPortalType& firstNeighbourPortal, + const vtkm::Id vtx, + const InFieldPortalType& neighboursPortal, + const InFieldPortalType& toCombinedSortOrderPortal, + const InFieldPortalType& combinedFirstNeighbourPortal, + const InFieldPortalType2& + combinedOtherStartIndexPortal, // We need another InFieldPortalType here to allow us to hand in a smart array handle instead of a VTKM array + const OutFieldPortalType& combinedNeighboursPortal) const + { + vtkm::Id totalNumNeighbours = neighboursPortal.GetNumberOfValues(); + vtkm::Id totalNumVertices = firstNeighbourPortal.GetNumberOfValues(); + vtkm::Id numNeighbours = (vtx < totalNumVertices - 1) + ? firstNeighbourPortal.Get(vtx + 1) - firstNeighbourPortal.Get(vtx) + : totalNumNeighbours - firstNeighbourPortal.Get(vtx); + for (vtkm::Id nbrNo = 0; nbrNo < numNeighbours; ++nbrNo) + { + combinedNeighboursPortal.Set( + combinedFirstNeighbourPortal.Get(toCombinedSortOrderPortal.Get(vtx)) + + combinedOtherStartIndexPortal.Get(toCombinedSortOrderPortal.Get(vtx)) + nbrNo, + toCombinedSortOrderPortal.Get(neighboursPortal.Get(firstNeighbourPortal.Get(vtx) + nbrNo))); + } + + /* + This worklet implemnts the following two loops from the original OpenMP code + The two loops are the same but the arrays required are different + + #pragma omp parallel for + for (indexVector::size_type vtx = 0; vtx < firstNeighbour.size(); ++vtx) + { + indexType numNeighbours = (vtx < GetNumberOfVertices() - 1) ? firstNeighbour[vtx+1] - firstNeighbour[vtx] : neighbours.size() - firstNeighbour[vtx]; + + for (indexType nbrNo = 0; nbrNo < numNeighbours; ++nbrNo) + { + combinedNeighbours[combinedFirstNeighbour[thisToCombinedSortOrder[vtx]] + nbrNo] = thisToCombinedSortOrder[neighbours[firstNeighbour[vtx] + nbrNo]]; + } + } + + #pragma omp parallel for + for (indexVector::size_type vtx = 0; vtx < other.firstNeighbour.size(); ++vtx) + { + indexType numNeighbours = (vtx < other.GetNumberOfVertices() - 1) ? other.firstNeighbour[vtx+1] - other.firstNeighbour[vtx] : other.neighbours.size() - other.firstNeighbour[vtx]; + for (indexType nbrNo = 0; nbrNo < numNeighbours; ++nbrNo) + { + combinedNeighbours[combinedFirstNeighbour[otherToCombinedSortOrder[vtx]] + combinedOtherStartIndex[otherToCombinedSortOrder[vtx]] + nbrNo] = otherToCombinedSortOrder[other.neighbours[other.firstNeighbour[vtx] + nbrNo]]; + } + } + */ + } +}; // AdditionAssignWorklet + + +} // namespace mesh_dem_contourtree_mesh_inc +} // namespace contourtree_augmented +} // namespace worklet +} // namespace vtkm + +#endif diff --git a/vtkm/worklet/contourtree_augmented/meshextrema/SetStarts.h b/vtkm/worklet/contourtree_augmented/meshextrema/SetStarts.h index fc6ef1289..b0e846af5 100644 --- a/vtkm/worklet/contourtree_augmented/meshextrema/SetStarts.h +++ b/vtkm/worklet/contourtree_augmented/meshextrema/SetStarts.h @@ -73,7 +73,7 @@ public: FieldIn sortIndices, // (input) index into active vertices ExecObject meshStructure, // (input) execution object with the mesh structure WholeArrayOut meshExtema); // (output) whether critical - typedef void ExecutionSignature(_1, InputIndex, _2, _3); + typedef void ExecutionSignature(_1, _2, _3); using InputDomain = _1; // Constructor @@ -82,16 +82,15 @@ public: template VTKM_EXEC void operator()(const vtkm::Id& sortIndex, - const vtkm::Id vertexIndex, const MeshStructureType& meshStructure, const OutFieldPortalType& meshExtrema) const { - meshExtrema.Set(sortIndex, meshStructure.GetExtremalNeighbour(vertexIndex)); + meshExtrema.Set(sortIndex, meshStructure.GetExtremalNeighbour(sortIndex)); // In serial this does - //for (indexType vertex = 0; vertex < mesh.sortIndices.size(); vertex++) + // for (indexType sortIndex = 0; sortIndex < mesh.sortIndices.size(); sortIndex++) // { // per vertex - // peaks[mesh.sortIndices[vertex]] = mesh.GetExtremalNeighbour(vertex, isMaximal); + // extrema[sortIndex] = mesh.GetExtremalNeighbour(sortIndex, isMaximal); // } // per vertex } }; // Mesh2D_DEM_VertexStarter diff --git a/vtkm/worklet/contourtree_augmented/processcontourtree/Branch.h b/vtkm/worklet/contourtree_augmented/processcontourtree/Branch.h index bd776b15d..15c15d250 100644 --- a/vtkm/worklet/contourtree_augmented/processcontourtree/Branch.h +++ b/vtkm/worklet/contourtree_augmented/processcontourtree/Branch.h @@ -92,7 +92,8 @@ public: const IdArrayType& branchSaddle, const IdArrayType& branchParent, const IdArrayType& sortOrder, - const vtkm::cont::ArrayHandle& dataField); + const vtkm::cont::ArrayHandle& dataField, + bool dataFieldIsSorted); // Simplify branch composition down to target size (i.e., consisting of targetSize branches) void simplifyToSize(vtkm::Id targetSize, bool usePersistenceSorter = true); @@ -179,7 +180,8 @@ Branch* Branch::ComputeBranchDecomposition( const IdArrayType& branchSaddle, const IdArrayType& branchParent, const IdArrayType& sortOrder, - const vtkm::cont::ArrayHandle& dataField) + const vtkm::cont::ArrayHandle& dataField, + bool dataFieldIsSorted) { // ComputeBranchDecomposition() auto branchMinimumPortal = branchMinimum.GetPortalConstControl(); auto branchMaximumPortal = branchMaximum.GetPortalConstControl(); @@ -190,23 +192,23 @@ Branch* Branch::ComputeBranchDecomposition( auto dataFieldPortal = dataField.GetPortalConstControl(); vtkm::Id nBranches = branchSaddle.GetNumberOfValues(); std::vector*> branches; - Branch* root; - branches.reserve(nBranches); + Branch* root = nullptr; + branches.reserve(static_cast(nBranches)); for (int branchID = 0; branchID < nBranches; ++branchID) branches.push_back(new Branch); // Reconstruct explicit branch decomposition from array representation - for (int branchID = 0; branchID < nBranches; ++branchID) + for (std::size_t branchID = 0; branchID < static_cast(nBranches); ++branchID) { - if (!noSuchElement(branchSaddlePortal.Get(branchID))) + if (!noSuchElement(branchSaddlePortal.Get(static_cast(branchID)))) { - branches[branchID]->saddle = - maskedIndex(supernodesPortal.Get(maskedIndex(branchSaddlePortal.Get(branchID)))); - vtkm::Id branchMin = - maskedIndex(supernodesPortal.Get(maskedIndex(branchMinimumPortal.Get(branchID)))); - vtkm::Id branchMax = - maskedIndex(supernodesPortal.Get(maskedIndex(branchMaximumPortal.Get(branchID)))); + branches[branchID]->saddle = maskedIndex( + supernodesPortal.Get(maskedIndex(branchSaddlePortal.Get(static_cast(branchID))))); + vtkm::Id branchMin = maskedIndex(supernodesPortal.Get( + maskedIndex(branchMinimumPortal.Get(static_cast(branchID))))); + vtkm::Id branchMax = maskedIndex(supernodesPortal.Get( + maskedIndex(branchMaximumPortal.Get(static_cast(branchID))))); if (branchMin < branches[branchID]->saddle) branches[branchID]->extremum = branchMin; else if (branchMax > branches[branchID]->saddle) @@ -220,23 +222,35 @@ Branch* Branch::ComputeBranchDecomposition( else { branches[branchID]->saddle = - supernodesPortal.Get(maskedIndex(branchMinimumPortal.Get(branchID))); + supernodesPortal.Get(maskedIndex(branchMinimumPortal.Get(static_cast(branchID)))); branches[branchID]->extremum = - supernodesPortal.Get(maskedIndex(branchMaximumPortal.Get(branchID))); + supernodesPortal.Get(maskedIndex(branchMaximumPortal.Get(static_cast(branchID)))); + } + + if (dataFieldIsSorted) + { + branches[branchID]->saddleVal = dataFieldPortal.Get(branches[branchID]->saddle); + branches[branchID]->extremumVal = dataFieldPortal.Get(branches[branchID]->extremum); + } + else + { + branches[branchID]->saddleVal = + dataFieldPortal.Get(sortOrderPortal.Get(branches[branchID]->saddle)); + branches[branchID]->extremumVal = + dataFieldPortal.Get(sortOrderPortal.Get(branches[branchID]->extremum)); } branches[branchID]->saddle = sortOrderPortal.Get(branches[branchID]->saddle); branches[branchID]->extremum = sortOrderPortal.Get(branches[branchID]->extremum); - branches[branchID]->saddleVal = dataFieldPortal.Get(branches[branchID]->saddle); - branches[branchID]->extremumVal = dataFieldPortal.Get(branches[branchID]->extremum); - if (noSuchElement(branchParentPortal.Get(branchID))) + if (noSuchElement(branchParentPortal.Get(static_cast(branchID)))) { root = branches[branchID]; // No parent -> this is the root branch } else { - branches[branchID]->parent = branches[maskedIndex(branchParentPortal.Get(branchID))]; + branches[branchID]->parent = branches[static_cast( + maskedIndex(branchParentPortal.Get(static_cast(branchID))))]; branches[branchID]->parent->children.push_back(branches[branchID]); } } @@ -247,10 +261,14 @@ Branch* Branch::ComputeBranchDecomposition( auto superparentsPortal = contourTreeSuperparents.GetPortalConstControl(); for (vtkm::Id i = 0; i < contourTreeSuperparents.GetNumberOfValues(); i++) { - branches[maskedIndex(whichBranchPortal.Get(maskedIndex(superparentsPortal.Get(i))))] + branches[static_cast( + maskedIndex(whichBranchPortal.Get(maskedIndex(superparentsPortal.Get(i)))))] ->volume++; // Increment volume } - root->removeSymbolicPerturbation(); + if (root) + { + root->removeSymbolicPerturbation(); + } return root; } // ComputeBranchDecomposition() @@ -267,7 +285,7 @@ void Branch::simplifyToSize(vtkm::Id targetSize, bool usePersistenceSorter) q.push_back(this); std::vector*> active; - while (active.size() < targetSize && !q.empty()) + while (active.size() < static_cast(targetSize) && !q.empty()) { if (usePersistenceSorter) { @@ -367,7 +385,7 @@ Branch::~Branch() template void Branch::getRelevantValues(int type, T eps, std::vector& values) const { // getRelevantValues() - double val; + T val; bool isMax = false; if (extremumVal > saddleVal) @@ -380,7 +398,7 @@ void Branch::getRelevantValues(int type, T eps, std::vector& values) const val = saddleVal + (isMax ? +eps : -eps); break; case 1: - val = 0.5 * (extremumVal + saddleVal); + val = T(0.5f) * (extremumVal + saddleVal); break; case 2: val = extremumVal + (isMax ? -eps : +eps); @@ -406,7 +424,7 @@ void Branch::accumulateIntervals(int type, T eps, PiecewiseLinearFunction& val = saddleVal + (isMax ? +eps : -eps); break; case 1: - val = 0.5 * (extremumVal + saddleVal); + val = T(0.5f) * (extremumVal + saddleVal); break; case 2: val = extremumVal + (isMax ? -eps : +eps);